summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/lib/librte_eventdev
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/dpdk/lib/librte_eventdev')
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/Makefile46
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/meson.build27
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.c1128
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.h575
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_event_eth_rx_adapter.c2430
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_event_eth_rx_adapter.h513
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_event_ring.c184
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_event_ring.h278
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c1299
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter.h766
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter_pmd.h114
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_eventdev.c1357
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_eventdev.h1920
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h901
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd_pci.h137
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd_vdev.h108
-rw-r--r--src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_version.map113
17 files changed, 11896 insertions, 0 deletions
diff --git a/src/spdk/dpdk/lib/librte_eventdev/Makefile b/src/spdk/dpdk/lib/librte_eventdev/Makefile
new file mode 100644
index 00000000..47f599a6
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/Makefile
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_eventdev.a
+
+# library version
+LIBABIVER := 5
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+ifeq ($(CONFIG_RTE_EXEC_ENV_LINUXAPP),y)
+CFLAGS += -DLINUX
+else
+CFLAGS += -DBSD
+endif
+LDLIBS += -lrte_eal -lrte_ring -lrte_ethdev -lrte_hash -lrte_mempool -lrte_timer
+LDLIBS += -lrte_mbuf -lrte_cryptodev -lpthread
+
+# library source files
+SRCS-y += rte_eventdev.c
+SRCS-y += rte_event_ring.c
+SRCS-y += rte_event_eth_rx_adapter.c
+SRCS-y += rte_event_timer_adapter.c
+SRCS-y += rte_event_crypto_adapter.c
+
+# export include files
+SYMLINK-y-include += rte_eventdev.h
+SYMLINK-y-include += rte_eventdev_pmd.h
+SYMLINK-y-include += rte_eventdev_pmd_pci.h
+SYMLINK-y-include += rte_eventdev_pmd_vdev.h
+SYMLINK-y-include += rte_event_ring.h
+SYMLINK-y-include += rte_event_eth_rx_adapter.h
+SYMLINK-y-include += rte_event_timer_adapter.h
+SYMLINK-y-include += rte_event_timer_adapter_pmd.h
+SYMLINK-y-include += rte_event_crypto_adapter.h
+
+# versioning export map
+EXPORT_MAP := rte_eventdev_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/lib/librte_eventdev/meson.build b/src/spdk/dpdk/lib/librte_eventdev/meson.build
new file mode 100644
index 00000000..3cbaf298
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/meson.build
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+version = 5
+allow_experimental_apis = true
+
+if host_machine.system() == 'linux'
+ cflags += '-DLINUX'
+else
+ cflags += '-DBSD'
+endif
+
+sources = files('rte_eventdev.c',
+ 'rte_event_ring.c',
+ 'rte_event_eth_rx_adapter.c',
+ 'rte_event_timer_adapter.c',
+ 'rte_event_crypto_adapter.c')
+headers = files('rte_eventdev.h',
+ 'rte_eventdev_pmd.h',
+ 'rte_eventdev_pmd_pci.h',
+ 'rte_eventdev_pmd_vdev.h',
+ 'rte_event_ring.h',
+ 'rte_event_eth_rx_adapter.h',
+ 'rte_event_timer_adapter.h',
+ 'rte_event_timer_adapter_pmd.h',
+ 'rte_event_crypto_adapter.h')
+deps += ['ring', 'ethdev', 'hash', 'mempool', 'mbuf', 'timer', 'cryptodev']
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.c b/src/spdk/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.c
new file mode 100644
index 00000000..11b28ca9
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.c
@@ -0,0 +1,1128 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <stdbool.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_service_component.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+#include "rte_event_crypto_adapter.h"
+
+#define BATCH_SIZE 32
+#define DEFAULT_MAX_NB 128
+#define CRYPTO_ADAPTER_NAME_LEN 32
+#define CRYPTO_ADAPTER_MEM_NAME_LEN 32
+#define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100
+
+/* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD
+ * iterations of eca_crypto_adapter_enq_run()
+ */
+#define CRYPTO_ENQ_FLUSH_THRESHOLD 1024
+
+struct rte_event_crypto_adapter {
+ /* Event device identifier */
+ uint8_t eventdev_id;
+ /* Event port identifier */
+ uint8_t event_port_id;
+ /* Store event device's implicit release capability */
+ uint8_t implicit_release_disabled;
+ /* Max crypto ops processed in any service function invocation */
+ uint32_t max_nb;
+ /* Lock to serialize config updates with service function */
+ rte_spinlock_t lock;
+ /* Next crypto device to be processed */
+ uint16_t next_cdev_id;
+ /* Per crypto device structure */
+ struct crypto_device_info *cdevs;
+ /* Loop counter to flush crypto ops */
+ uint16_t transmit_loop_count;
+ /* Per instance stats structure */
+ struct rte_event_crypto_adapter_stats crypto_stats;
+ /* Configuration callback for rte_service configuration */
+ rte_event_crypto_adapter_conf_cb conf_cb;
+ /* Configuration callback argument */
+ void *conf_arg;
+ /* Set if default_cb is being used */
+ int default_cb_arg;
+ /* Service initialization state */
+ uint8_t service_inited;
+ /* Memory allocation name */
+ char mem_name[CRYPTO_ADAPTER_MEM_NAME_LEN];
+ /* Socket identifier cached from eventdev */
+ int socket_id;
+ /* Per adapter EAL service */
+ uint32_t service_id;
+ /* No. of queue pairs configured */
+ uint16_t nb_qps;
+ /* Adapter mode */
+ enum rte_event_crypto_adapter_mode mode;
+} __rte_cache_aligned;
+
+/* Per crypto device information */
+struct crypto_device_info {
+ /* Pointer to cryptodev */
+ struct rte_cryptodev *dev;
+ /* Pointer to queue pair info */
+ struct crypto_queue_pair_info *qpairs;
+ /* Next queue pair to be processed */
+ uint16_t next_queue_pair_id;
+ /* Set to indicate cryptodev->eventdev packet
+ * transfer uses a hardware mechanism
+ */
+ uint8_t internal_event_port;
+ /* Set to indicate processing has been started */
+ uint8_t dev_started;
+ /* If num_qpairs > 0, the start callback will
+ * be invoked if not already invoked
+ */
+ uint16_t num_qpairs;
+} __rte_cache_aligned;
+
+/* Per queue pair information */
+struct crypto_queue_pair_info {
+ /* Set to indicate queue pair is enabled */
+ bool qp_enabled;
+ /* Pointer to hold rte_crypto_ops for batching */
+ struct rte_crypto_op **op_buffer;
+ /* No of crypto ops accumulated */
+ uint8_t len;
+} __rte_cache_aligned;
+
+static struct rte_event_crypto_adapter **event_crypto_adapter;
+
+/* Macros to check for valid adapter */
+#define EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
+ if (!eca_valid_id(id)) { \
+ RTE_EDEV_LOG_ERR("Invalid crypto adapter id = %d\n", id); \
+ return retval; \
+ } \
+} while (0)
+
+static inline int
+eca_valid_id(uint8_t id)
+{
+ return id < RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
+}
+
+static int
+eca_init(void)
+{
+ const char *name = "crypto_adapter_array";
+ const struct rte_memzone *mz;
+ unsigned int sz;
+
+ sz = sizeof(*event_crypto_adapter) *
+ RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE;
+ sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
+ RTE_CACHE_LINE_SIZE);
+ if (mz == NULL) {
+ RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
+ PRId32, rte_errno);
+ return -rte_errno;
+ }
+ }
+
+ event_crypto_adapter = mz->addr;
+ return 0;
+}
+
+static inline struct rte_event_crypto_adapter *
+eca_id_to_adapter(uint8_t id)
+{
+ return event_crypto_adapter ?
+ event_crypto_adapter[id] : NULL;
+}
+
+static int
+eca_default_config_cb(uint8_t id, uint8_t dev_id,
+ struct rte_event_crypto_adapter_conf *conf, void *arg)
+{
+ struct rte_event_dev_config dev_conf;
+ struct rte_eventdev *dev;
+ uint8_t port_id;
+ int started;
+ int ret;
+ struct rte_event_port_conf *port_conf = arg;
+ struct rte_event_crypto_adapter *adapter = eca_id_to_adapter(id);
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ dev_conf = dev->data->dev_conf;
+
+ started = dev->data->dev_started;
+ if (started)
+ rte_event_dev_stop(dev_id);
+ port_id = dev_conf.nb_event_ports;
+ dev_conf.nb_event_ports += 1;
+ ret = rte_event_dev_configure(dev_id, &dev_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to configure event dev %u\n", dev_id);
+ if (started) {
+ if (rte_event_dev_start(dev_id))
+ return -EIO;
+ }
+ return ret;
+ }
+
+ ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to setup event port %u\n", port_id);
+ return ret;
+ }
+
+ conf->event_port_id = port_id;
+ conf->max_nb = DEFAULT_MAX_NB;
+ if (started)
+ ret = rte_event_dev_start(dev_id);
+
+ adapter->default_cb_arg = 1;
+ return ret;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_crypto_adapter_conf_cb conf_cb,
+ enum rte_event_crypto_adapter_mode mode,
+ void *conf_arg)
+{
+ struct rte_event_crypto_adapter *adapter;
+ char mem_name[CRYPTO_ADAPTER_NAME_LEN];
+ struct rte_event_dev_info dev_info;
+ int socket_id;
+ uint8_t i;
+ int ret;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ if (conf_cb == NULL)
+ return -EINVAL;
+
+ if (event_crypto_adapter == NULL) {
+ ret = eca_init();
+ if (ret)
+ return ret;
+ }
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter != NULL) {
+ RTE_EDEV_LOG_ERR("Crypto adapter id %u already exists!", id);
+ return -EEXIST;
+ }
+
+ socket_id = rte_event_dev_socket_id(dev_id);
+ snprintf(mem_name, CRYPTO_ADAPTER_MEM_NAME_LEN,
+ "rte_event_crypto_adapter_%d", id);
+
+ adapter = rte_zmalloc_socket(mem_name, sizeof(*adapter),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (adapter == NULL) {
+ RTE_EDEV_LOG_ERR("Failed to get mem for event crypto adapter!");
+ return -ENOMEM;
+ }
+
+ ret = rte_event_dev_info_get(dev_id, &dev_info);
+ if (ret < 0) {
+ RTE_EDEV_LOG_ERR("Failed to get info for eventdev %d: %s!",
+ dev_id, dev_info.driver_name);
+ return ret;
+ }
+
+ adapter->implicit_release_disabled = (dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE);
+ adapter->eventdev_id = dev_id;
+ adapter->socket_id = socket_id;
+ adapter->conf_cb = conf_cb;
+ adapter->conf_arg = conf_arg;
+ adapter->mode = mode;
+ strcpy(adapter->mem_name, mem_name);
+ adapter->cdevs = rte_zmalloc_socket(adapter->mem_name,
+ rte_cryptodev_count() *
+ sizeof(struct crypto_device_info), 0,
+ socket_id);
+ if (adapter->cdevs == NULL) {
+ RTE_EDEV_LOG_ERR("Failed to get mem for crypto devices\n");
+ rte_free(adapter);
+ return -ENOMEM;
+ }
+
+ rte_spinlock_init(&adapter->lock);
+ for (i = 0; i < rte_cryptodev_count(); i++)
+ adapter->cdevs[i].dev = rte_cryptodev_pmd_get_dev(i);
+
+ event_crypto_adapter[id] = adapter;
+
+ return 0;
+}
+
+
+int __rte_experimental
+rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config,
+ enum rte_event_crypto_adapter_mode mode)
+{
+ struct rte_event_port_conf *pc;
+ int ret;
+
+ if (port_config == NULL)
+ return -EINVAL;
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ pc = rte_malloc(NULL, sizeof(*pc), 0);
+ if (pc == NULL)
+ return -ENOMEM;
+ *pc = *port_config;
+ ret = rte_event_crypto_adapter_create_ext(id, dev_id,
+ eca_default_config_cb,
+ mode,
+ pc);
+ if (ret)
+ rte_free(pc);
+
+ return ret;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_free(uint8_t id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ if (adapter->nb_qps) {
+ RTE_EDEV_LOG_ERR("%" PRIu16 "Queue pairs not deleted",
+ adapter->nb_qps);
+ return -EBUSY;
+ }
+
+ if (adapter->default_cb_arg)
+ rte_free(adapter->conf_arg);
+ rte_free(adapter->cdevs);
+ rte_free(adapter);
+ event_crypto_adapter[id] = NULL;
+
+ return 0;
+}
+
+static inline unsigned int
+eca_enq_to_cryptodev(struct rte_event_crypto_adapter *adapter,
+ struct rte_event *ev, unsigned int cnt)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ union rte_event_crypto_metadata *m_data = NULL;
+ struct crypto_queue_pair_info *qp_info = NULL;
+ struct rte_crypto_op *crypto_op;
+ unsigned int i, n;
+ uint16_t qp_id, len, ret;
+ uint8_t cdev_id;
+
+ len = 0;
+ ret = 0;
+ n = 0;
+ stats->event_deq_count += cnt;
+
+ for (i = 0; i < cnt; i++) {
+ crypto_op = ev[i].event_ptr;
+ if (crypto_op == NULL)
+ continue;
+ if (crypto_op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ m_data = rte_cryptodev_sym_session_get_user_data(
+ crypto_op->sym->session);
+ if (m_data == NULL) {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+
+ cdev_id = m_data->request_info.cdev_id;
+ qp_id = m_data->request_info.queue_pair_id;
+ qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
+ if (qp_info == NULL) {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+ len = qp_info->len;
+ qp_info->op_buffer[len] = crypto_op;
+ len++;
+ } else if (crypto_op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+ crypto_op->private_data_offset) {
+ m_data = (union rte_event_crypto_metadata *)
+ ((uint8_t *)crypto_op +
+ crypto_op->private_data_offset);
+ cdev_id = m_data->request_info.cdev_id;
+ qp_id = m_data->request_info.queue_pair_id;
+ qp_info = &adapter->cdevs[cdev_id].qpairs[qp_id];
+ if (qp_info == NULL) {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+ len = qp_info->len;
+ qp_info->op_buffer[len] = crypto_op;
+ len++;
+ } else {
+ rte_pktmbuf_free(crypto_op->sym->m_src);
+ rte_crypto_op_free(crypto_op);
+ continue;
+ }
+
+ if (len == BATCH_SIZE) {
+ struct rte_crypto_op **op_buffer = qp_info->op_buffer;
+ ret = rte_cryptodev_enqueue_burst(cdev_id,
+ qp_id,
+ op_buffer,
+ BATCH_SIZE);
+
+ stats->crypto_enq_count += ret;
+
+ while (ret < len) {
+ struct rte_crypto_op *op;
+ op = op_buffer[ret++];
+ stats->crypto_enq_fail++;
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+ }
+
+ len = 0;
+ }
+
+ if (qp_info)
+ qp_info->len = len;
+ n += ret;
+ }
+
+ return n;
+}
+
+static unsigned int
+eca_crypto_enq_flush(struct rte_event_crypto_adapter *adapter)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ struct crypto_device_info *curr_dev;
+ struct crypto_queue_pair_info *curr_queue;
+ struct rte_crypto_op **op_buffer;
+ struct rte_cryptodev *dev;
+ uint8_t cdev_id;
+ uint16_t qp;
+ uint16_t ret;
+ uint16_t num_cdev = rte_cryptodev_count();
+
+ ret = 0;
+ for (cdev_id = 0; cdev_id < num_cdev; cdev_id++) {
+ curr_dev = &adapter->cdevs[cdev_id];
+ if (curr_dev == NULL)
+ continue;
+ dev = curr_dev->dev;
+
+ for (qp = 0; qp < dev->data->nb_queue_pairs; qp++) {
+
+ curr_queue = &curr_dev->qpairs[qp];
+ if (!curr_queue->qp_enabled)
+ continue;
+
+ op_buffer = curr_queue->op_buffer;
+ ret = rte_cryptodev_enqueue_burst(cdev_id,
+ qp,
+ op_buffer,
+ curr_queue->len);
+ stats->crypto_enq_count += ret;
+
+ while (ret < curr_queue->len) {
+ struct rte_crypto_op *op;
+ op = op_buffer[ret++];
+ stats->crypto_enq_fail++;
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+ }
+ curr_queue->len = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int
+eca_crypto_adapter_enq_run(struct rte_event_crypto_adapter *adapter,
+ unsigned int max_enq)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ struct rte_event ev[BATCH_SIZE];
+ unsigned int nb_enq, nb_enqueued;
+ uint16_t n;
+ uint8_t event_dev_id = adapter->eventdev_id;
+ uint8_t event_port_id = adapter->event_port_id;
+
+ nb_enqueued = 0;
+ if (adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
+ return 0;
+
+ for (nb_enq = 0; nb_enq < max_enq; nb_enq += n) {
+ stats->event_poll_count++;
+ n = rte_event_dequeue_burst(event_dev_id,
+ event_port_id, ev, BATCH_SIZE, 0);
+
+ if (!n)
+ break;
+
+ nb_enqueued += eca_enq_to_cryptodev(adapter, ev, n);
+ }
+
+ if ((++adapter->transmit_loop_count &
+ (CRYPTO_ENQ_FLUSH_THRESHOLD - 1)) == 0) {
+ nb_enqueued += eca_crypto_enq_flush(adapter);
+ }
+
+ return nb_enqueued;
+}
+
+static inline void
+eca_ops_enqueue_burst(struct rte_event_crypto_adapter *adapter,
+ struct rte_crypto_op **ops, uint16_t num)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ union rte_event_crypto_metadata *m_data = NULL;
+ uint8_t event_dev_id = adapter->eventdev_id;
+ uint8_t event_port_id = adapter->event_port_id;
+ struct rte_event events[BATCH_SIZE];
+ uint16_t nb_enqueued, nb_ev;
+ uint8_t retry;
+ uint8_t i;
+
+ nb_ev = 0;
+ retry = 0;
+ nb_enqueued = 0;
+ num = RTE_MIN(num, BATCH_SIZE);
+ for (i = 0; i < num; i++) {
+ struct rte_event *ev = &events[nb_ev++];
+ if (ops[i]->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ m_data = rte_cryptodev_sym_session_get_user_data(
+ ops[i]->sym->session);
+ } else if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
+ ops[i]->private_data_offset) {
+ m_data = (union rte_event_crypto_metadata *)
+ ((uint8_t *)ops[i] +
+ ops[i]->private_data_offset);
+ }
+
+ if (unlikely(m_data == NULL)) {
+ rte_pktmbuf_free(ops[i]->sym->m_src);
+ rte_crypto_op_free(ops[i]);
+ continue;
+ }
+
+ rte_memcpy(ev, &m_data->response_info, sizeof(*ev));
+ ev->event_ptr = ops[i];
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ if (adapter->implicit_release_disabled)
+ ev->op = RTE_EVENT_OP_FORWARD;
+ else
+ ev->op = RTE_EVENT_OP_NEW;
+ }
+
+ do {
+ nb_enqueued += rte_event_enqueue_burst(event_dev_id,
+ event_port_id,
+ &events[nb_enqueued],
+ nb_ev - nb_enqueued);
+ } while (retry++ < CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES &&
+ nb_enqueued < nb_ev);
+
+ /* Free mbufs and rte_crypto_ops for failed events */
+ for (i = nb_enqueued; i < nb_ev; i++) {
+ struct rte_crypto_op *op = events[i].event_ptr;
+ rte_pktmbuf_free(op->sym->m_src);
+ rte_crypto_op_free(op);
+ }
+
+ stats->event_enq_fail_count += nb_ev - nb_enqueued;
+ stats->event_enq_count += nb_enqueued;
+ stats->event_enq_retry_count += retry - 1;
+}
+
+static inline unsigned int
+eca_crypto_adapter_deq_run(struct rte_event_crypto_adapter *adapter,
+ unsigned int max_deq)
+{
+ struct rte_event_crypto_adapter_stats *stats = &adapter->crypto_stats;
+ struct crypto_device_info *curr_dev;
+ struct crypto_queue_pair_info *curr_queue;
+ struct rte_crypto_op *ops[BATCH_SIZE];
+ uint16_t n, nb_deq;
+ struct rte_cryptodev *dev;
+ uint8_t cdev_id;
+ uint16_t qp, dev_qps;
+ bool done;
+ uint16_t num_cdev = rte_cryptodev_count();
+
+ nb_deq = 0;
+ do {
+ uint16_t queues = 0;
+ done = true;
+
+ for (cdev_id = adapter->next_cdev_id;
+ cdev_id < num_cdev; cdev_id++) {
+ curr_dev = &adapter->cdevs[cdev_id];
+ if (curr_dev == NULL)
+ continue;
+ dev = curr_dev->dev;
+ dev_qps = dev->data->nb_queue_pairs;
+
+ for (qp = curr_dev->next_queue_pair_id;
+ queues < dev_qps; qp = (qp + 1) % dev_qps,
+ queues++) {
+
+ curr_queue = &curr_dev->qpairs[qp];
+ if (!curr_queue->qp_enabled)
+ continue;
+
+ n = rte_cryptodev_dequeue_burst(cdev_id, qp,
+ ops, BATCH_SIZE);
+ if (!n)
+ continue;
+
+ done = false;
+ stats->crypto_deq_count += n;
+ eca_ops_enqueue_burst(adapter, ops, n);
+ nb_deq += n;
+
+ if (nb_deq > max_deq) {
+ if ((qp + 1) == dev_qps) {
+ adapter->next_cdev_id =
+ (cdev_id + 1)
+ % num_cdev;
+ }
+ curr_dev->next_queue_pair_id = (qp + 1)
+ % dev->data->nb_queue_pairs;
+
+ return nb_deq;
+ }
+ }
+ }
+ } while (done == false);
+ return nb_deq;
+}
+
+static void
+eca_crypto_adapter_run(struct rte_event_crypto_adapter *adapter,
+ unsigned int max_ops)
+{
+ while (max_ops) {
+ unsigned int e_cnt, d_cnt;
+
+ e_cnt = eca_crypto_adapter_deq_run(adapter, max_ops);
+ max_ops -= RTE_MIN(max_ops, e_cnt);
+
+ d_cnt = eca_crypto_adapter_enq_run(adapter, max_ops);
+ max_ops -= RTE_MIN(max_ops, d_cnt);
+
+ if (e_cnt == 0 && d_cnt == 0)
+ break;
+
+ }
+}
+
+static int
+eca_service_func(void *args)
+{
+ struct rte_event_crypto_adapter *adapter = args;
+
+ if (rte_spinlock_trylock(&adapter->lock) == 0)
+ return 0;
+ eca_crypto_adapter_run(adapter, adapter->max_nb);
+ rte_spinlock_unlock(&adapter->lock);
+
+ return 0;
+}
+
+static int
+eca_init_service(struct rte_event_crypto_adapter *adapter, uint8_t id)
+{
+ struct rte_event_crypto_adapter_conf adapter_conf;
+ struct rte_service_spec service;
+ int ret;
+
+ if (adapter->service_inited)
+ return 0;
+
+ memset(&service, 0, sizeof(service));
+ snprintf(service.name, CRYPTO_ADAPTER_NAME_LEN,
+ "rte_event_crypto_adapter_%d", id);
+ service.socket_id = adapter->socket_id;
+ service.callback = eca_service_func;
+ service.callback_userdata = adapter;
+ /* Service function handles locking for queue add/del updates */
+ service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
+ ret = rte_service_component_register(&service, &adapter->service_id);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
+ service.name, ret);
+ return ret;
+ }
+
+ ret = adapter->conf_cb(id, adapter->eventdev_id,
+ &adapter_conf, adapter->conf_arg);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
+ ret);
+ return ret;
+ }
+
+ adapter->max_nb = adapter_conf.max_nb;
+ adapter->event_port_id = adapter_conf.event_port_id;
+ adapter->service_inited = 1;
+
+ return ret;
+}
+
+static void
+eca_update_qp_info(struct rte_event_crypto_adapter *adapter,
+ struct crypto_device_info *dev_info,
+ int32_t queue_pair_id,
+ uint8_t add)
+{
+ struct crypto_queue_pair_info *qp_info;
+ int enabled;
+ uint16_t i;
+
+ if (dev_info->qpairs == NULL)
+ return;
+
+ if (queue_pair_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
+ eca_update_qp_info(adapter, dev_info, i, add);
+ } else {
+ qp_info = &dev_info->qpairs[queue_pair_id];
+ enabled = qp_info->qp_enabled;
+ if (add) {
+ adapter->nb_qps += !enabled;
+ dev_info->num_qpairs += !enabled;
+ } else {
+ adapter->nb_qps -= enabled;
+ dev_info->num_qpairs -= enabled;
+ }
+ qp_info->qp_enabled = !!add;
+ }
+}
+
+static int
+eca_add_queue_pair(struct rte_event_crypto_adapter *adapter,
+ uint8_t cdev_id,
+ int queue_pair_id)
+{
+ struct crypto_device_info *dev_info = &adapter->cdevs[cdev_id];
+ struct crypto_queue_pair_info *qpairs;
+ uint32_t i;
+
+ if (dev_info->qpairs == NULL) {
+ dev_info->qpairs =
+ rte_zmalloc_socket(adapter->mem_name,
+ dev_info->dev->data->nb_queue_pairs *
+ sizeof(struct crypto_queue_pair_info),
+ 0, adapter->socket_id);
+ if (dev_info->qpairs == NULL)
+ return -ENOMEM;
+
+ qpairs = dev_info->qpairs;
+ qpairs->op_buffer = rte_zmalloc_socket(adapter->mem_name,
+ BATCH_SIZE *
+ sizeof(struct rte_crypto_op *),
+ 0, adapter->socket_id);
+ if (!qpairs->op_buffer) {
+ rte_free(qpairs);
+ return -ENOMEM;
+ }
+ }
+
+ if (queue_pair_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_queue_pairs; i++)
+ eca_update_qp_info(adapter, dev_info, i, 1);
+ } else
+ eca_update_qp_info(adapter, dev_info,
+ (uint16_t)queue_pair_id, 1);
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_add(uint8_t id,
+ uint8_t cdev_id,
+ int32_t queue_pair_id,
+ const struct rte_event *event)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct rte_eventdev *dev;
+ struct crypto_device_info *dev_info;
+ uint32_t cap;
+ int ret;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
+ return -EINVAL;
+ }
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
+ cdev_id,
+ &cap);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("Failed to get adapter caps dev %" PRIu8
+ " cdev %" PRIu8, id, cdev_id);
+ return ret;
+ }
+
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
+ (event == NULL)) {
+ RTE_EDEV_LOG_ERR("Conf value can not be NULL for dev_id=%u",
+ cdev_id);
+ return -EINVAL;
+ }
+
+ dev_info = &adapter->cdevs[cdev_id];
+
+ if (queue_pair_id != -1 &&
+ (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
+ RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
+ (uint16_t)queue_pair_id);
+ return -EINVAL;
+ }
+
+ /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD,
+ * no need of service core as HW supports event forward capability.
+ */
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW) ||
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
+ RTE_FUNC_PTR_OR_ERR_RET(
+ *dev->dev_ops->crypto_adapter_queue_pair_add,
+ -ENOTSUP);
+ if (dev_info->qpairs == NULL) {
+ dev_info->qpairs =
+ rte_zmalloc_socket(adapter->mem_name,
+ dev_info->dev->data->nb_queue_pairs *
+ sizeof(struct crypto_queue_pair_info),
+ 0, adapter->socket_id);
+ if (dev_info->qpairs == NULL)
+ return -ENOMEM;
+ }
+
+ ret = (*dev->dev_ops->crypto_adapter_queue_pair_add)(dev,
+ dev_info->dev,
+ queue_pair_id,
+ event);
+ if (ret)
+ return ret;
+
+ else
+ eca_update_qp_info(adapter, &adapter->cdevs[cdev_id],
+ queue_pair_id, 1);
+ }
+
+ /* In case HW cap is RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW,
+ * or SW adapter, initiate services so the application can choose
+ * which ever way it wants to use the adapter.
+ * Case 1: RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW
+ * Application may wants to use one of below two mode
+ * a. OP_FORWARD mode -> HW Dequeue + SW enqueue
+ * b. OP_NEW mode -> HW Dequeue
+ * Case 2: No HW caps, use SW adapter
+ * a. OP_FORWARD mode -> SW enqueue & dequeue
+ * b. OP_NEW mode -> SW Dequeue
+ */
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD) ||
+ (!(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) &&
+ !(cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND) &&
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA))) {
+ rte_spinlock_lock(&adapter->lock);
+ ret = eca_init_service(adapter, id);
+ if (ret == 0)
+ ret = eca_add_queue_pair(adapter, cdev_id,
+ queue_pair_id);
+ rte_spinlock_unlock(&adapter->lock);
+
+ if (ret)
+ return ret;
+
+ rte_service_component_runstate_set(adapter->service_id, 1);
+ }
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
+ int32_t queue_pair_id)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct crypto_device_info *dev_info;
+ struct rte_eventdev *dev;
+ int ret;
+ uint32_t cap;
+ uint16_t i;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) {
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%" PRIu8, cdev_id);
+ return -EINVAL;
+ }
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ ret = rte_event_crypto_adapter_caps_get(adapter->eventdev_id,
+ cdev_id,
+ &cap);
+ if (ret)
+ return ret;
+
+ dev_info = &adapter->cdevs[cdev_id];
+
+ if (queue_pair_id != -1 &&
+ (uint16_t)queue_pair_id >= dev_info->dev->data->nb_queue_pairs) {
+ RTE_EDEV_LOG_ERR("Invalid queue_pair_id %" PRIu16,
+ (uint16_t)queue_pair_id);
+ return -EINVAL;
+ }
+
+ if ((cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD) ||
+ (cap & RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW &&
+ adapter->mode == RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)) {
+ RTE_FUNC_PTR_OR_ERR_RET(
+ *dev->dev_ops->crypto_adapter_queue_pair_del,
+ -ENOTSUP);
+ ret = (*dev->dev_ops->crypto_adapter_queue_pair_del)(dev,
+ dev_info->dev,
+ queue_pair_id);
+ if (ret == 0) {
+ eca_update_qp_info(adapter,
+ &adapter->cdevs[cdev_id],
+ queue_pair_id,
+ 0);
+ if (dev_info->num_qpairs == 0) {
+ rte_free(dev_info->qpairs);
+ dev_info->qpairs = NULL;
+ }
+ }
+ } else {
+ if (adapter->nb_qps == 0)
+ return 0;
+
+ rte_spinlock_lock(&adapter->lock);
+ if (queue_pair_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_queue_pairs;
+ i++)
+ eca_update_qp_info(adapter, dev_info,
+ queue_pair_id, 0);
+ } else {
+ eca_update_qp_info(adapter, dev_info,
+ (uint16_t)queue_pair_id, 0);
+ }
+
+ if (dev_info->num_qpairs == 0) {
+ rte_free(dev_info->qpairs);
+ dev_info->qpairs = NULL;
+ }
+
+ rte_spinlock_unlock(&adapter->lock);
+ rte_service_component_runstate_set(adapter->service_id,
+ adapter->nb_qps);
+ }
+
+ return ret;
+}
+
+static int
+eca_adapter_ctrl(uint8_t id, int start)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct crypto_device_info *dev_info;
+ struct rte_eventdev *dev;
+ uint32_t i;
+ int use_service;
+ int stop = !start;
+
+ use_service = 0;
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+
+ for (i = 0; i < rte_cryptodev_count(); i++) {
+ dev_info = &adapter->cdevs[i];
+ /* if start check for num queue pairs */
+ if (start && !dev_info->num_qpairs)
+ continue;
+ /* if stop check if dev has been started */
+ if (stop && !dev_info->dev_started)
+ continue;
+ use_service |= !dev_info->internal_event_port;
+ dev_info->dev_started = start;
+ if (dev_info->internal_event_port == 0)
+ continue;
+ start ? (*dev->dev_ops->crypto_adapter_start)(dev,
+ &dev_info->dev[i]) :
+ (*dev->dev_ops->crypto_adapter_stop)(dev,
+ &dev_info->dev[i]);
+ }
+
+ if (use_service)
+ rte_service_runstate_set(adapter->service_id, start);
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_start(uint8_t id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ return eca_adapter_ctrl(id, 1);
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_stop(uint8_t id)
+{
+ return eca_adapter_ctrl(id, 0);
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_stats_get(uint8_t id,
+ struct rte_event_crypto_adapter_stats *stats)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct rte_event_crypto_adapter_stats dev_stats_sum = { 0 };
+ struct rte_event_crypto_adapter_stats dev_stats;
+ struct rte_eventdev *dev;
+ struct crypto_device_info *dev_info;
+ uint32_t i;
+ int ret;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL || stats == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ memset(stats, 0, sizeof(*stats));
+ for (i = 0; i < rte_cryptodev_count(); i++) {
+ dev_info = &adapter->cdevs[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->crypto_adapter_stats_get == NULL)
+ continue;
+ ret = (*dev->dev_ops->crypto_adapter_stats_get)(dev,
+ dev_info->dev,
+ &dev_stats);
+ if (ret)
+ continue;
+
+ dev_stats_sum.crypto_deq_count += dev_stats.crypto_deq_count;
+ dev_stats_sum.event_enq_count +=
+ dev_stats.event_enq_count;
+ }
+
+ if (adapter->service_inited)
+ *stats = adapter->crypto_stats;
+
+ stats->crypto_deq_count += dev_stats_sum.crypto_deq_count;
+ stats->event_enq_count += dev_stats_sum.event_enq_count;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_stats_reset(uint8_t id)
+{
+ struct rte_event_crypto_adapter *adapter;
+ struct crypto_device_info *dev_info;
+ struct rte_eventdev *dev;
+ uint32_t i;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[adapter->eventdev_id];
+ for (i = 0; i < rte_cryptodev_count(); i++) {
+ dev_info = &adapter->cdevs[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->crypto_adapter_stats_reset == NULL)
+ continue;
+ (*dev->dev_ops->crypto_adapter_stats_reset)(dev,
+ dev_info->dev);
+ }
+
+ memset(&adapter->crypto_stats, 0, sizeof(adapter->crypto_stats));
+ return 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL || service_id == NULL)
+ return -EINVAL;
+
+ if (adapter->service_inited)
+ *service_id = adapter->service_id;
+
+ return adapter->service_inited ? 0 : -ESRCH;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
+{
+ struct rte_event_crypto_adapter *adapter;
+
+ EVENT_CRYPTO_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ adapter = eca_id_to_adapter(id);
+ if (adapter == NULL || event_port_id == NULL)
+ return -EINVAL;
+
+ *event_port_id = adapter->event_port_id;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.h b/src/spdk/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.h
new file mode 100644
index 00000000..d367309c
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_event_crypto_adapter.h
@@ -0,0 +1,575 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_EVENT_CRYPTO_ADAPTER_
+#define _RTE_EVENT_CRYPTO_ADAPTER_
+
+/**
+ * @file
+ *
+ * RTE Event crypto adapter
+ *
+ * Eventdev library provides couple of adapters to bridge between various
+ * components for providing new event source. The event crypto adapter is
+ * one of those adapters which is intended to bridge between event devices
+ * and crypto devices.
+ *
+ * The crypto adapter adds support to enqueue/dequeue crypto operations to/
+ * from event device. The packet flow between crypto device and the event
+ * device can be accomplished using both SW and HW based transfer mechanisms.
+ * The adapter uses an EAL service core function for SW based packet transfer
+ * and uses the eventdev PMD functions to configure HW based packet transfer
+ * between the crypto device and the event device.
+ *
+ * The application can choose to submit a crypto operation directly to
+ * crypto device or send it to the crypto adapter via eventdev based on
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability.
+ * The first mode is known as the event new(RTE_EVENT_CRYPTO_ADAPTER_OP_NEW)
+ * mode and the second as the event forward(RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD)
+ * mode. The choice of mode can be specified while creating the adapter.
+ * In the former mode, it is an application responsibility to enable ingress
+ * packet ordering. In the latter mode, it is the adapter responsibility to
+ * enable the ingress packet ordering.
+ *
+ *
+ * Working model of RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode:
+ *
+ * +--------------+ +--------------+
+ * | | | Crypto stage |
+ * | Application |---[2]-->| + enqueue to |
+ * | | | cryptodev |
+ * +--------------+ +--------------+
+ * ^ ^ |
+ * | | [3]
+ * [6] [1] |
+ * | | |
+ * +--------------+ |
+ * | | |
+ * | Event device | |
+ * | | |
+ * +--------------+ |
+ * ^ |
+ * | |
+ * [5] |
+ * | v
+ * +--------------+ +--------------+
+ * | | | |
+ * |Crypto adapter|<--[4]---| Cryptodev |
+ * | | | |
+ * +--------------+ +--------------+
+ *
+ *
+ * [1] Application dequeues events from the previous stage.
+ * [2] Application prepares the crypto operations.
+ * [3] Crypto operations are submitted to cryptodev by application.
+ * [4] Crypto adapter dequeues crypto completions from cryptodev.
+ * [5] Crypto adapter enqueues events to the eventdev.
+ * [6] Application dequeues from eventdev and prepare for further
+ * processing.
+ *
+ * In the RTE_EVENT_CRYPTO_ADAPTER_OP_NEW mode, application submits crypto
+ * operations directly to crypto device. The adapter then dequeues crypto
+ * completions from crypto device and enqueue events to the event device.
+ * This mode does not ensure ingress ordering, if the application directly
+ * enqueues to cryptodev without going through crypto/atomic stage i.e.
+ * removing item [1] and [2].
+ * Events dequeued from the adapter will be treated as new events.
+ * In this mode, application needs to specify event information (response
+ * information) which is needed to enqueue an event after the crypto operation
+ * is completed.
+ *
+ *
+ * Working model of RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode:
+ *
+ * +--------------+ +--------------+
+ * --[1]-->| |---[2]-->| Application |
+ * | Event device | | in |
+ * <--[8]--| |<--[3]---| Ordered stage|
+ * +--------------+ +--------------+
+ * ^ |
+ * | [4]
+ * [7] |
+ * | v
+ * +----------------+ +--------------+
+ * | |--[5]->| |
+ * | Crypto adapter | | Cryptodev |
+ * | |<-[6]--| |
+ * +----------------+ +--------------+
+ *
+ *
+ * [1] Events from the previous stage.
+ * [2] Application in ordered stage dequeues events from eventdev.
+ * [3] Application enqueues crypto operations as events to eventdev.
+ * [4] Crypto adapter dequeues event from eventdev.
+ * [5] Crypto adapter submits crypto operations to cryptodev
+ * (Atomic stage).
+ * [6] Crypto adapter dequeues crypto completions from cryptodev
+ * [7] Crypto adapter enqueues events to the eventdev
+ * [8] Events to the next stage
+ *
+ * In the RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode, if HW supports
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability the application
+ * can directly submit the crypto operations to the cryptodev.
+ * If not, application retrieves crypto adapter's event port using
+ * rte_event_crypto_adapter_event_port_get() API. Then, links its event
+ * queue to this port and starts enqueuing crypto operations as events
+ * to the eventdev. The adapter then dequeues the events and submits the
+ * crypto operations to the cryptodev. After the crypto completions, the
+ * adapter enqueues events to the event device.
+ * Application can use this mode, when ingress packet ordering is needed.
+ * Events dequeued from the adapter will be treated as forwarded events.
+ * In this mode, the application needs to specify the cryptodev ID
+ * and queue pair ID (request information) needed to enqueue a crypto
+ * operation in addition to the event information (response information)
+ * needed to enqueue an event after the crypto operation has completed.
+ *
+ *
+ * The event crypto adapter provides common APIs to configure the packet flow
+ * from the crypto device to event devices for both SW and HW based transfers.
+ * The crypto event adapter's functions are:
+ * - rte_event_crypto_adapter_create_ext()
+ * - rte_event_crypto_adapter_create()
+ * - rte_event_crypto_adapter_free()
+ * - rte_event_crypto_adapter_queue_pair_add()
+ * - rte_event_crypto_adapter_queue_pair_del()
+ * - rte_event_crypto_adapter_start()
+ * - rte_event_crypto_adapter_stop()
+ * - rte_event_crypto_adapter_stats_get()
+ * - rte_event_crypto_adapter_stats_reset()
+
+ * The applicaton creates an instance using rte_event_crypto_adapter_create()
+ * or rte_event_crypto_adapter_create_ext().
+ *
+ * Cryptodev queue pair addition/deletion is done using the
+ * rte_event_crypto_adapter_queue_pair_xxx() APIs. If HW supports
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND capability, event
+ * information must be passed to the add API.
+ *
+ * The SW adapter or HW PMD uses rte_crypto_op::sess_type to decide whether
+ * request/response(private) data is located in the crypto/security session
+ * or at an offset in the rte_crypto_op.
+ *
+ * For session-based operations, the set and get API provides a mechanism for
+ * an application to store and retrieve the data information stored
+ * along with the crypto session.
+ * The RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA capability indicates
+ * whether HW or SW supports this feature.
+ *
+ * For session-less mode, the adapter gets the private data information placed
+ * along with the ``struct rte_crypto_op``.
+ * The rte_crypto_op::private_data_offset provides an offset to locate the
+ * request/response information in the rte_crypto_op. This offset is counted
+ * from the start of the rte_crypto_op including initialization vector (IV).
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include "rte_eventdev.h"
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this enum may change without prior notice
+ *
+ * Crypto event adapter mode
+ */
+enum rte_event_crypto_adapter_mode {
+ RTE_EVENT_CRYPTO_ADAPTER_OP_NEW,
+ /**< Start the crypto adapter in event new mode.
+ * @see RTE_EVENT_OP_NEW.
+ * Application submits crypto operations to the cryptodev.
+ * Adapter only dequeues the crypto completions from cryptodev
+ * and enqueue events to the eventdev.
+ */
+ RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD,
+ /**< Start the crypto adapter in event forward mode.
+ * @see RTE_EVENT_OP_FORWARD.
+ * Application submits crypto requests as events to the crypto
+ * adapter or crypto device based on
+ * RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD capability.
+ * Crypto completions are enqueued back to the eventdev by
+ * crypto adapter.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Crypto event request structure will be filled by application to
+ * provide event request information to the adapter.
+ */
+struct rte_event_crypto_request {
+ uint8_t resv[8];
+ /**< Overlaps with first 8 bytes of struct rte_event
+ * that encode the response event information. Application
+ * is expected to fill in struct rte_event response_info.
+ */
+ uint16_t cdev_id;
+ /**< cryptodev ID to be used */
+ uint16_t queue_pair_id;
+ /**< cryptodev queue pair ID to be used */
+ uint32_t resv1;
+ /**< Reserved bits */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Crypto event metadata structure will be filled by application
+ * to provide crypto request and event response information.
+ *
+ * If crypto events are enqueued using a HW mechanism, the cryptodev
+ * PMD will use the event response information to set up the event
+ * that is enqueued back to eventdev after completion of the crypto
+ * operation. If the transfer is done by SW, event response information
+ * will be used by the adapter.
+ */
+union rte_event_crypto_metadata {
+ struct rte_event_crypto_request request_info;
+ /**< Request information to be filled in by application
+ * for RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ */
+ struct rte_event response_info;
+ /**< Response information to be filled in by application
+ * for RTE_EVENT_CRYPTO_ADAPTER_OP_NEW and
+ * RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Adapter configuration structure that the adapter configuration callback
+ * function is expected to fill out
+ * @see rte_event_crypto_adapter_conf_cb
+ */
+struct rte_event_crypto_adapter_conf {
+ uint8_t event_port_id;
+ /**< Event port identifier, the adapter enqueues events to this
+ * port and dequeues crypto request events in
+ * RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ */
+ uint32_t max_nb;
+ /**< The adapter can return early if it has processed at least
+ * max_nb crypto ops. This isn't treated as a requirement; batching
+ * may cause the adapter to process more than max_nb crypto ops.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Function type used for adapter configuration callback. The callback is
+ * used to fill in members of the struct rte_event_crypto_adapter_conf, this
+ * callback is invoked when creating a SW service for packet transfer from
+ * cryptodev queue pair to the event device. The SW service is created within
+ * the rte_event_crypto_adapter_queue_pair_add() function if SW based packet
+ * transfers from cryptodev queue pair to the event device are required.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param conf
+ * Structure that needs to be populated by this callback.
+ *
+ * @param arg
+ * Argument to the callback. This is the same as the conf_arg passed to the
+ * rte_event_crypto_adapter_create_ext().
+ */
+typedef int (*rte_event_crypto_adapter_conf_cb) (uint8_t id, uint8_t dev_id,
+ struct rte_event_crypto_adapter_conf *conf,
+ void *arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * A structure used to retrieve statistics for an event crypto adapter
+ * instance.
+ */
+
+struct rte_event_crypto_adapter_stats {
+ uint64_t event_poll_count;
+ /**< Event port poll count */
+ uint64_t event_deq_count;
+ /**< Event dequeue count */
+ uint64_t crypto_enq_count;
+ /**< Cryptodev enqueue count */
+ uint64_t crypto_enq_fail;
+ /**< Cryptodev enqueue failed count */
+ uint64_t crypto_deq_count;
+ /**< Cryptodev dequeue count */
+ uint64_t event_enq_count;
+ /**< Event enqueue count */
+ uint64_t event_enq_retry_count;
+ /**< Event enqueue retry count */
+ uint64_t event_enq_fail_count;
+ /**< Event enqueue fail count */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new event crypto adapter with the specified identifier.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param conf_cb
+ * Callback function that fills in members of a
+ * struct rte_event_crypto_adapter_conf struct passed into
+ * it.
+ *
+ * @param mode
+ * Flag to indicate the mode of the adapter.
+ * @see rte_event_crypto_adapter_mode
+ *
+ * @param conf_arg
+ * Argument that is passed to the conf_cb function.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int __rte_experimental
+rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_crypto_adapter_conf_cb conf_cb,
+ enum rte_event_crypto_adapter_mode mode,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new event crypto adapter with the specified identifier.
+ * This function uses an internal configuration function that creates an event
+ * port. This default function reconfigures the event device with an
+ * additional event port and set up the event port using the port_config
+ * parameter passed into this function. In case the application needs more
+ * control in configuration of the service, it should use the
+ * rte_event_crypto_adapter_create_ext() version.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param port_config
+ * Argument of type *rte_event_port_conf* that is passed to the conf_cb
+ * function.
+ *
+ * @param mode
+ * Flag to indicate the mode of the adapter.
+ * @see rte_event_crypto_adapter_mode
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int __rte_experimental
+rte_event_crypto_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config,
+ enum rte_event_crypto_adapter_mode mode);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Free an event crypto adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, If the adapter still has queue pairs
+ * added to it, the function returns -EBUSY.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_free(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Add a queue pair to an event crypto adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param cdev_id
+ * Cryptodev identifier.
+ *
+ * @param queue_pair_id
+ * Cryptodev queue pair identifier. If queue_pair_id is set -1,
+ * adapter adds all the pre configured queue pairs to the instance.
+ *
+ * @param event
+ * if HW supports cryptodev queue pair to event queue binding, application is
+ * expected to fill in event information, else it will be NULL.
+ * @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND
+ *
+ * @return
+ * - 0: Success, queue pair added correctly.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_add(uint8_t id,
+ uint8_t cdev_id,
+ int32_t queue_pair_id,
+ const struct rte_event *event);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Delete a queue pair from an event crypto adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param cdev_id
+ * Cryptodev identifier.
+ *
+ * @param queue_pair_id
+ * Cryptodev queue pair identifier.
+ *
+ * @return
+ * - 0: Success, queue pair deleted successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_queue_pair_del(uint8_t id, uint8_t cdev_id,
+ int32_t queue_pair_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start event crypto adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ *
+ * @return
+ * - 0: Success, adapter started successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_start(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop event crypto adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, adapter stopped successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_stop(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] stats
+ * A pointer to structure used to retrieve statistics for an adapter.
+ *
+ * @return
+ * - 0: Success, retrieved successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_stats_get(uint8_t id,
+ struct rte_event_crypto_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, statistics reset successfully.
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_stats_reset(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the service ID of an adapter. If the adapter doesn't use
+ * a rte_service function, this function returns -ESRCH.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the adapter doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the event port of an adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] event_port_id
+ * Application links its event queue to this adapter port which is used
+ * in RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD mode.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_crypto_adapter_event_port_get(uint8_t id, uint8_t *event_port_id);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_EVENT_CRYPTO_ADAPTER_ */
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/src/spdk/dpdk/lib/librte_eventdev/rte_event_eth_rx_adapter.c
new file mode 100644
index 00000000..f5e5a0b5
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -0,0 +1,2430 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
+ */
+#if defined(LINUX)
+#include <sys/epoll.h>
+#endif
+#include <unistd.h>
+
+#include <rte_cycles.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_service_component.h>
+#include <rte_thash.h>
+#include <rte_interrupts.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+#include "rte_event_eth_rx_adapter.h"
+
+#define BATCH_SIZE 32
+#define BLOCK_CNT_THRESHOLD 10
+#define ETH_EVENT_BUFFER_SIZE (4*BATCH_SIZE)
+
+#define ETH_RX_ADAPTER_SERVICE_NAME_LEN 32
+#define ETH_RX_ADAPTER_MEM_NAME_LEN 32
+
+#define RSS_KEY_SIZE 40
+/* value written to intr thread pipe to signal thread exit */
+#define ETH_BRIDGE_INTR_THREAD_EXIT 1
+/* Sentinel value to detect initialized file handle */
+#define INIT_FD -1
+
+/*
+ * Used to store port and queue ID of interrupting Rx queue
+ */
+union queue_data {
+ RTE_STD_C11
+ void *ptr;
+ struct {
+ uint16_t port;
+ uint16_t queue;
+ };
+};
+
+/*
+ * There is an instance of this struct per polled Rx queue added to the
+ * adapter
+ */
+struct eth_rx_poll_entry {
+ /* Eth port to poll */
+ uint16_t eth_dev_id;
+ /* Eth rx queue to poll */
+ uint16_t eth_rx_qid;
+};
+
+/* Instance per adapter */
+struct rte_eth_event_enqueue_buffer {
+ /* Count of events in this buffer */
+ uint16_t count;
+ /* Array of events in this buffer */
+ struct rte_event events[ETH_EVENT_BUFFER_SIZE];
+};
+
+struct rte_event_eth_rx_adapter {
+ /* RSS key */
+ uint8_t rss_key_be[RSS_KEY_SIZE];
+ /* Event device identifier */
+ uint8_t eventdev_id;
+ /* Per ethernet device structure */
+ struct eth_device_info *eth_devices;
+ /* Event port identifier */
+ uint8_t event_port_id;
+ /* Lock to serialize config updates with service function */
+ rte_spinlock_t rx_lock;
+ /* Max mbufs processed in any service function invocation */
+ uint32_t max_nb_rx;
+ /* Receive queues that need to be polled */
+ struct eth_rx_poll_entry *eth_rx_poll;
+ /* Size of the eth_rx_poll array */
+ uint16_t num_rx_polled;
+ /* Weighted round robin schedule */
+ uint32_t *wrr_sched;
+ /* wrr_sched[] size */
+ uint32_t wrr_len;
+ /* Next entry in wrr[] to begin polling */
+ uint32_t wrr_pos;
+ /* Event burst buffer */
+ struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
+ /* Per adapter stats */
+ struct rte_event_eth_rx_adapter_stats stats;
+ /* Block count, counts up to BLOCK_CNT_THRESHOLD */
+ uint16_t enq_block_count;
+ /* Block start ts */
+ uint64_t rx_enq_block_start_ts;
+ /* epoll fd used to wait for Rx interrupts */
+ int epd;
+ /* Num of interrupt driven interrupt queues */
+ uint32_t num_rx_intr;
+ /* Used to send <dev id, queue id> of interrupting Rx queues from
+ * the interrupt thread to the Rx thread
+ */
+ struct rte_ring *intr_ring;
+ /* Rx Queue data (dev id, queue id) for the last non-empty
+ * queue polled
+ */
+ union queue_data qd;
+ /* queue_data is valid */
+ int qd_valid;
+ /* Interrupt ring lock, synchronizes Rx thread
+ * and interrupt thread
+ */
+ rte_spinlock_t intr_ring_lock;
+ /* event array passed to rte_poll_wait */
+ struct rte_epoll_event *epoll_events;
+ /* Count of interrupt vectors in use */
+ uint32_t num_intr_vec;
+ /* Thread blocked on Rx interrupts */
+ pthread_t rx_intr_thread;
+ /* Configuration callback for rte_service configuration */
+ rte_event_eth_rx_adapter_conf_cb conf_cb;
+ /* Configuration callback argument */
+ void *conf_arg;
+ /* Set if default_cb is being used */
+ int default_cb_arg;
+ /* Service initialization state */
+ uint8_t service_inited;
+ /* Total count of Rx queues in adapter */
+ uint32_t nb_queues;
+ /* Memory allocation name */
+ char mem_name[ETH_RX_ADAPTER_MEM_NAME_LEN];
+ /* Socket identifier cached from eventdev */
+ int socket_id;
+ /* Per adapter EAL service */
+ uint32_t service_id;
+ /* Adapter started flag */
+ uint8_t rxa_started;
+ /* Adapter ID */
+ uint8_t id;
+} __rte_cache_aligned;
+
+/* Per eth device */
+struct eth_device_info {
+ struct rte_eth_dev *dev;
+ struct eth_rx_queue_info *rx_queue;
+ /* Rx callback */
+ rte_event_eth_rx_adapter_cb_fn cb_fn;
+ /* Rx callback argument */
+ void *cb_arg;
+ /* Set if ethdev->eventdev packet transfer uses a
+ * hardware mechanism
+ */
+ uint8_t internal_event_port;
+ /* Set if the adapter is processing rx queues for
+ * this eth device and packet processing has been
+ * started, allows for the code to know if the PMD
+ * rx_adapter_stop callback needs to be invoked
+ */
+ uint8_t dev_rx_started;
+ /* Number of queues added for this device */
+ uint16_t nb_dev_queues;
+ /* Number of poll based queues
+ * If nb_rx_poll > 0, the start callback will
+ * be invoked if not already invoked
+ */
+ uint16_t nb_rx_poll;
+ /* Number of interrupt based queues
+ * If nb_rx_intr > 0, the start callback will
+ * be invoked if not already invoked.
+ */
+ uint16_t nb_rx_intr;
+ /* Number of queues that use the shared interrupt */
+ uint16_t nb_shared_intr;
+ /* sum(wrr(q)) for all queues within the device
+ * useful when deleting all device queues
+ */
+ uint32_t wrr_len;
+ /* Intr based queue index to start polling from, this is used
+ * if the number of shared interrupts is non-zero
+ */
+ uint16_t next_q_idx;
+ /* Intr based queue indices */
+ uint16_t *intr_queue;
+ /* device generates per Rx queue interrupt for queue index
+ * for queue indices < RTE_MAX_RXTX_INTR_VEC_ID - 1
+ */
+ int multi_intr_cap;
+ /* shared interrupt enabled */
+ int shared_intr_enabled;
+};
+
+/* Per Rx queue */
+struct eth_rx_queue_info {
+ int queue_enabled; /* True if added */
+ int intr_enabled;
+ uint16_t wt; /* Polling weight */
+ uint8_t event_queue_id; /* Event queue to enqueue packets to */
+ uint8_t sched_type; /* Sched type for events */
+ uint8_t priority; /* Event priority */
+ uint32_t flow_id; /* App provided flow identifier */
+ uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */
+};
+
+static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
+
+static inline int
+rxa_validate_id(uint8_t id)
+{
+ return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
+}
+
+#define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
+ if (!rxa_validate_id(id)) { \
+ RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
+ return retval; \
+ } \
+} while (0)
+
+static inline int
+rxa_sw_adapter_queue_count(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ return rx_adapter->num_rx_polled + rx_adapter->num_rx_intr;
+}
+
+/* Greatest common divisor */
+static uint16_t rxa_gcd_u16(uint16_t a, uint16_t b)
+{
+ uint16_t r = a % b;
+
+ return r ? rxa_gcd_u16(b, r) : b;
+}
+
+/* Returns the next queue in the polling sequence
+ *
+ * http://kb.linuxvirtualserver.org/wiki/Weighted_Round-Robin_Scheduling
+ */
+static int
+rxa_wrr_next(struct rte_event_eth_rx_adapter *rx_adapter,
+ unsigned int n, int *cw,
+ struct eth_rx_poll_entry *eth_rx_poll, uint16_t max_wt,
+ uint16_t gcd, int prev)
+{
+ int i = prev;
+ uint16_t w;
+
+ while (1) {
+ uint16_t q;
+ uint16_t d;
+
+ i = (i + 1) % n;
+ if (i == 0) {
+ *cw = *cw - gcd;
+ if (*cw <= 0)
+ *cw = max_wt;
+ }
+
+ q = eth_rx_poll[i].eth_rx_qid;
+ d = eth_rx_poll[i].eth_dev_id;
+ w = rx_adapter->eth_devices[d].rx_queue[q].wt;
+
+ if ((int)w >= *cw)
+ return i;
+ }
+}
+
+static inline int
+rxa_shared_intr(struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ int multi_intr_cap;
+
+ if (dev_info->dev->intr_handle == NULL)
+ return 0;
+
+ multi_intr_cap = rte_intr_cap_multiple(dev_info->dev->intr_handle);
+ return !multi_intr_cap ||
+ rx_queue_id >= RTE_MAX_RXTX_INTR_VEC_ID - 1;
+}
+
+static inline int
+rxa_intr_queue(struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ struct eth_rx_queue_info *queue_info;
+
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ return dev_info->rx_queue &&
+ !dev_info->internal_event_port &&
+ queue_info->queue_enabled && queue_info->wt == 0;
+}
+
+static inline int
+rxa_polled_queue(struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ struct eth_rx_queue_info *queue_info;
+
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ return !dev_info->internal_event_port &&
+ dev_info->rx_queue &&
+ queue_info->queue_enabled && queue_info->wt != 0;
+}
+
+/* Calculate change in number of vectors after Rx queue ID is add/deleted */
+static int
+rxa_nb_intr_vect(struct eth_device_info *dev_info, int rx_queue_id, int add)
+{
+ uint16_t i;
+ int n, s;
+ uint16_t nbq;
+
+ nbq = dev_info->dev->data->nb_rx_queues;
+ n = 0; /* non shared count */
+ s = 0; /* shared count */
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < nbq; i++) {
+ if (!rxa_shared_intr(dev_info, i))
+ n += add ? !rxa_intr_queue(dev_info, i) :
+ rxa_intr_queue(dev_info, i);
+ else
+ s += add ? !rxa_intr_queue(dev_info, i) :
+ rxa_intr_queue(dev_info, i);
+ }
+
+ if (s > 0) {
+ if ((add && dev_info->nb_shared_intr == 0) ||
+ (!add && dev_info->nb_shared_intr))
+ n += 1;
+ }
+ } else {
+ if (!rxa_shared_intr(dev_info, rx_queue_id))
+ n = add ? !rxa_intr_queue(dev_info, rx_queue_id) :
+ rxa_intr_queue(dev_info, rx_queue_id);
+ else
+ n = add ? !dev_info->nb_shared_intr :
+ dev_info->nb_shared_intr == 1;
+ }
+
+ return add ? n : -n;
+}
+
+/* Calculate nb_rx_intr after deleting interrupt mode rx queues
+ */
+static void
+rxa_calc_nb_post_intr_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_intr)
+{
+ uint32_t intr_diff;
+
+ if (rx_queue_id == -1)
+ intr_diff = dev_info->nb_rx_intr;
+ else
+ intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
+
+ *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
+}
+
+/* Calculate nb_rx_* after adding interrupt mode rx queues, newly added
+ * interrupt queues could currently be poll mode Rx queues
+ */
+static void
+rxa_calc_nb_post_add_intr(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ uint32_t intr_diff;
+ uint32_t poll_diff;
+ uint32_t wrr_len_diff;
+
+ if (rx_queue_id == -1) {
+ intr_diff = dev_info->dev->data->nb_rx_queues -
+ dev_info->nb_rx_intr;
+ poll_diff = dev_info->nb_rx_poll;
+ wrr_len_diff = dev_info->wrr_len;
+ } else {
+ intr_diff = !rxa_intr_queue(dev_info, rx_queue_id);
+ poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
+ wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
+ 0;
+ }
+
+ *nb_rx_intr = rx_adapter->num_rx_intr + intr_diff;
+ *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
+ *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
+}
+
+/* Calculate size of the eth_rx_poll and wrr_sched arrays
+ * after deleting poll mode rx queues
+ */
+static void
+rxa_calc_nb_post_poll_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_wrr)
+{
+ uint32_t poll_diff;
+ uint32_t wrr_len_diff;
+
+ if (rx_queue_id == -1) {
+ poll_diff = dev_info->nb_rx_poll;
+ wrr_len_diff = dev_info->wrr_len;
+ } else {
+ poll_diff = rxa_polled_queue(dev_info, rx_queue_id);
+ wrr_len_diff = poll_diff ? dev_info->rx_queue[rx_queue_id].wt :
+ 0;
+ }
+
+ *nb_rx_poll = rx_adapter->num_rx_polled - poll_diff;
+ *nb_wrr = rx_adapter->wrr_len - wrr_len_diff;
+}
+
+/* Calculate nb_rx_* after adding poll mode rx queues
+ */
+static void
+rxa_calc_nb_post_add_poll(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint16_t wt,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ uint32_t intr_diff;
+ uint32_t poll_diff;
+ uint32_t wrr_len_diff;
+
+ if (rx_queue_id == -1) {
+ intr_diff = dev_info->nb_rx_intr;
+ poll_diff = dev_info->dev->data->nb_rx_queues -
+ dev_info->nb_rx_poll;
+ wrr_len_diff = wt*dev_info->dev->data->nb_rx_queues
+ - dev_info->wrr_len;
+ } else {
+ intr_diff = rxa_intr_queue(dev_info, rx_queue_id);
+ poll_diff = !rxa_polled_queue(dev_info, rx_queue_id);
+ wrr_len_diff = rxa_polled_queue(dev_info, rx_queue_id) ?
+ wt - dev_info->rx_queue[rx_queue_id].wt :
+ wt;
+ }
+
+ *nb_rx_poll = rx_adapter->num_rx_polled + poll_diff;
+ *nb_rx_intr = rx_adapter->num_rx_intr - intr_diff;
+ *nb_wrr = rx_adapter->wrr_len + wrr_len_diff;
+}
+
+/* Calculate nb_rx_* after adding rx_queue_id */
+static void
+rxa_calc_nb_post_add(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint16_t wt,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ if (wt != 0)
+ rxa_calc_nb_post_add_poll(rx_adapter, dev_info, rx_queue_id,
+ wt, nb_rx_poll, nb_rx_intr, nb_wrr);
+ else
+ rxa_calc_nb_post_add_intr(rx_adapter, dev_info, rx_queue_id,
+ nb_rx_poll, nb_rx_intr, nb_wrr);
+}
+
+/* Calculate nb_rx_* after deleting rx_queue_id */
+static void
+rxa_calc_nb_post_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id,
+ uint32_t *nb_rx_poll,
+ uint32_t *nb_rx_intr,
+ uint32_t *nb_wrr)
+{
+ rxa_calc_nb_post_poll_del(rx_adapter, dev_info, rx_queue_id, nb_rx_poll,
+ nb_wrr);
+ rxa_calc_nb_post_intr_del(rx_adapter, dev_info, rx_queue_id,
+ nb_rx_intr);
+}
+
+/*
+ * Allocate the rx_poll array
+ */
+static struct eth_rx_poll_entry *
+rxa_alloc_poll(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint32_t num_rx_polled)
+{
+ size_t len;
+
+ len = RTE_ALIGN(num_rx_polled * sizeof(*rx_adapter->eth_rx_poll),
+ RTE_CACHE_LINE_SIZE);
+ return rte_zmalloc_socket(rx_adapter->mem_name,
+ len,
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+}
+
+/*
+ * Allocate the WRR array
+ */
+static uint32_t *
+rxa_alloc_wrr(struct rte_event_eth_rx_adapter *rx_adapter, int nb_wrr)
+{
+ size_t len;
+
+ len = RTE_ALIGN(nb_wrr * sizeof(*rx_adapter->wrr_sched),
+ RTE_CACHE_LINE_SIZE);
+ return rte_zmalloc_socket(rx_adapter->mem_name,
+ len,
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+}
+
+static int
+rxa_alloc_poll_arrays(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint32_t nb_poll,
+ uint32_t nb_wrr,
+ struct eth_rx_poll_entry **rx_poll,
+ uint32_t **wrr_sched)
+{
+
+ if (nb_poll == 0) {
+ *rx_poll = NULL;
+ *wrr_sched = NULL;
+ return 0;
+ }
+
+ *rx_poll = rxa_alloc_poll(rx_adapter, nb_poll);
+ if (*rx_poll == NULL) {
+ *wrr_sched = NULL;
+ return -ENOMEM;
+ }
+
+ *wrr_sched = rxa_alloc_wrr(rx_adapter, nb_wrr);
+ if (*wrr_sched == NULL) {
+ rte_free(*rx_poll);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Precalculate WRR polling sequence for all queues in rx_adapter */
+static void
+rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_rx_poll_entry *rx_poll,
+ uint32_t *rx_wrr)
+{
+ uint16_t d;
+ uint16_t q;
+ unsigned int i;
+ int prev = -1;
+ int cw = -1;
+
+ /* Initialize variables for calculation of wrr schedule */
+ uint16_t max_wrr_pos = 0;
+ unsigned int poll_q = 0;
+ uint16_t max_wt = 0;
+ uint16_t gcd = 0;
+
+ if (rx_poll == NULL)
+ return;
+
+ /* Generate array of all queues to poll, the size of this
+ * array is poll_q
+ */
+ RTE_ETH_FOREACH_DEV(d) {
+ uint16_t nb_rx_queues;
+ struct eth_device_info *dev_info =
+ &rx_adapter->eth_devices[d];
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ if (dev_info->rx_queue == NULL)
+ continue;
+ if (dev_info->internal_event_port)
+ continue;
+ dev_info->wrr_len = 0;
+ for (q = 0; q < nb_rx_queues; q++) {
+ struct eth_rx_queue_info *queue_info =
+ &dev_info->rx_queue[q];
+ uint16_t wt;
+
+ if (!rxa_polled_queue(dev_info, q))
+ continue;
+ wt = queue_info->wt;
+ rx_poll[poll_q].eth_dev_id = d;
+ rx_poll[poll_q].eth_rx_qid = q;
+ max_wrr_pos += wt;
+ dev_info->wrr_len += wt;
+ max_wt = RTE_MAX(max_wt, wt);
+ gcd = (gcd) ? rxa_gcd_u16(gcd, wt) : wt;
+ poll_q++;
+ }
+ }
+
+ /* Generate polling sequence based on weights */
+ prev = -1;
+ cw = -1;
+ for (i = 0; i < max_wrr_pos; i++) {
+ rx_wrr[i] = rxa_wrr_next(rx_adapter, poll_q, &cw,
+ rx_poll, max_wt, gcd, prev);
+ prev = rx_wrr[i];
+ }
+}
+
+static inline void
+rxa_mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr,
+ struct ipv6_hdr **ipv6_hdr)
+{
+ struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ struct vlan_hdr *vlan_hdr;
+
+ *ipv4_hdr = NULL;
+ *ipv6_hdr = NULL;
+
+ switch (eth_hdr->ether_type) {
+ case RTE_BE16(ETHER_TYPE_IPv4):
+ *ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ break;
+
+ case RTE_BE16(ETHER_TYPE_IPv6):
+ *ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1);
+ break;
+
+ case RTE_BE16(ETHER_TYPE_VLAN):
+ vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+ switch (vlan_hdr->eth_proto) {
+ case RTE_BE16(ETHER_TYPE_IPv4):
+ *ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1);
+ break;
+ case RTE_BE16(ETHER_TYPE_IPv6):
+ *ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1);
+ break;
+ default:
+ break;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Calculate RSS hash for IPv4/6 */
+static inline uint32_t
+rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be)
+{
+ uint32_t input_len;
+ void *tuple;
+ struct rte_ipv4_tuple ipv4_tuple;
+ struct rte_ipv6_tuple ipv6_tuple;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+
+ rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr);
+
+ if (ipv4_hdr) {
+ ipv4_tuple.src_addr = rte_be_to_cpu_32(ipv4_hdr->src_addr);
+ ipv4_tuple.dst_addr = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
+ tuple = &ipv4_tuple;
+ input_len = RTE_THASH_V4_L3_LEN;
+ } else if (ipv6_hdr) {
+ rte_thash_load_v6_addrs(ipv6_hdr,
+ (union rte_thash_tuple *)&ipv6_tuple);
+ tuple = &ipv6_tuple;
+ input_len = RTE_THASH_V6_L3_LEN;
+ } else
+ return 0;
+
+ return rte_softrss_be(tuple, input_len, rss_key_be);
+}
+
+static inline int
+rxa_enq_blocked(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ return !!rx_adapter->enq_block_count;
+}
+
+static inline void
+rxa_enq_block_start_ts(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ if (rx_adapter->rx_enq_block_start_ts)
+ return;
+
+ rx_adapter->enq_block_count++;
+ if (rx_adapter->enq_block_count < BLOCK_CNT_THRESHOLD)
+ return;
+
+ rx_adapter->rx_enq_block_start_ts = rte_get_tsc_cycles();
+}
+
+static inline void
+rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct rte_event_eth_rx_adapter_stats *stats)
+{
+ if (unlikely(!stats->rx_enq_start_ts))
+ stats->rx_enq_start_ts = rte_get_tsc_cycles();
+
+ if (likely(!rxa_enq_blocked(rx_adapter)))
+ return;
+
+ rx_adapter->enq_block_count = 0;
+ if (rx_adapter->rx_enq_block_start_ts) {
+ stats->rx_enq_end_ts = rte_get_tsc_cycles();
+ stats->rx_enq_block_cycles += stats->rx_enq_end_ts -
+ rx_adapter->rx_enq_block_start_ts;
+ rx_adapter->rx_enq_block_start_ts = 0;
+ }
+}
+
+/* Add event to buffer, free space check is done prior to calling
+ * this function
+ */
+static inline void
+rxa_buffer_event(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct rte_event *ev)
+{
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
+ rte_memcpy(&buf->events[buf->count++], ev, sizeof(struct rte_event));
+}
+
+/* Enqueue buffered events to event device */
+static inline uint16_t
+rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
+ struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
+
+ uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
+ rx_adapter->event_port_id,
+ buf->events,
+ buf->count);
+ if (n != buf->count) {
+ memmove(buf->events,
+ &buf->events[n],
+ (buf->count - n) * sizeof(struct rte_event));
+ stats->rx_enq_retry++;
+ }
+
+ n ? rxa_enq_block_end_ts(rx_adapter, stats) :
+ rxa_enq_block_start_ts(rx_adapter);
+
+ buf->count -= n;
+ stats->rx_enq_count += n;
+
+ return n;
+}
+
+static inline void
+rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint16_t eth_dev_id,
+ uint16_t rx_queue_id,
+ struct rte_mbuf **mbufs,
+ uint16_t num)
+{
+ uint32_t i;
+ struct eth_device_info *dev_info =
+ &rx_adapter->eth_devices[eth_dev_id];
+ struct eth_rx_queue_info *eth_rx_queue_info =
+ &dev_info->rx_queue[rx_queue_id];
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
+ int32_t qid = eth_rx_queue_info->event_queue_id;
+ uint8_t sched_type = eth_rx_queue_info->sched_type;
+ uint8_t priority = eth_rx_queue_info->priority;
+ uint32_t flow_id;
+ struct rte_event events[BATCH_SIZE];
+ struct rte_mbuf *m = mbufs[0];
+ uint32_t rss_mask;
+ uint32_t rss;
+ int do_rss;
+ uint64_t ts;
+ struct rte_mbuf *cb_mbufs[BATCH_SIZE];
+ uint16_t nb_cb;
+
+ /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
+ rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
+ do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
+
+ if ((m->ol_flags & PKT_RX_TIMESTAMP) == 0) {
+ ts = rte_get_tsc_cycles();
+ for (i = 0; i < num; i++) {
+ m = mbufs[i];
+
+ m->timestamp = ts;
+ m->ol_flags |= PKT_RX_TIMESTAMP;
+ }
+ }
+
+
+ nb_cb = dev_info->cb_fn ? dev_info->cb_fn(eth_dev_id, rx_queue_id,
+ ETH_EVENT_BUFFER_SIZE,
+ buf->count, mbufs,
+ num,
+ dev_info->cb_arg,
+ cb_mbufs) :
+ num;
+ if (nb_cb < num) {
+ mbufs = cb_mbufs;
+ num = nb_cb;
+ }
+
+ for (i = 0; i < num; i++) {
+ m = mbufs[i];
+ struct rte_event *ev = &events[i];
+
+ rss = do_rss ?
+ rxa_do_softrss(m, rx_adapter->rss_key_be) :
+ m->hash.rss;
+ flow_id =
+ eth_rx_queue_info->flow_id &
+ eth_rx_queue_info->flow_id_mask;
+ flow_id |= rss & ~eth_rx_queue_info->flow_id_mask;
+ ev->flow_id = flow_id;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = sched_type;
+ ev->queue_id = qid;
+ ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER;
+ ev->sub_event_type = 0;
+ ev->priority = priority;
+ ev->mbuf = m;
+
+ rxa_buffer_event(rx_adapter, ev);
+ }
+}
+
+/* Enqueue packets from <port, q> to event buffer */
+static inline uint32_t
+rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint16_t port_id,
+ uint16_t queue_id,
+ uint32_t rx_count,
+ uint32_t max_rx,
+ int *rxq_empty)
+{
+ struct rte_mbuf *mbufs[BATCH_SIZE];
+ struct rte_eth_event_enqueue_buffer *buf =
+ &rx_adapter->event_enqueue_buffer;
+ struct rte_event_eth_rx_adapter_stats *stats =
+ &rx_adapter->stats;
+ uint16_t n;
+ uint32_t nb_rx = 0;
+
+ if (rxq_empty)
+ *rxq_empty = 0;
+ /* Don't do a batch dequeue from the rx queue if there isn't
+ * enough space in the enqueue buffer.
+ */
+ while (BATCH_SIZE <= (RTE_DIM(buf->events) - buf->count)) {
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+
+ stats->rx_poll_count++;
+ n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
+ if (unlikely(!n)) {
+ if (rxq_empty)
+ *rxq_empty = 1;
+ break;
+ }
+ rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n);
+ nb_rx += n;
+ if (rx_count + nb_rx > max_rx)
+ break;
+ }
+
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+
+ return nb_rx;
+}
+
+static inline void
+rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter,
+ void *data)
+{
+ uint16_t port_id;
+ uint16_t queue;
+ int err;
+ union queue_data qd;
+ struct eth_device_info *dev_info;
+ struct eth_rx_queue_info *queue_info;
+ int *intr_enabled;
+
+ qd.ptr = data;
+ port_id = qd.port;
+ queue = qd.queue;
+
+ dev_info = &rx_adapter->eth_devices[port_id];
+ queue_info = &dev_info->rx_queue[queue];
+ rte_spinlock_lock(&rx_adapter->intr_ring_lock);
+ if (rxa_shared_intr(dev_info, queue))
+ intr_enabled = &dev_info->shared_intr_enabled;
+ else
+ intr_enabled = &queue_info->intr_enabled;
+
+ if (*intr_enabled) {
+ *intr_enabled = 0;
+ err = rte_ring_enqueue(rx_adapter->intr_ring, data);
+ /* Entry should always be available.
+ * The ring size equals the maximum number of interrupt
+ * vectors supported (an interrupt vector is shared in
+ * case of shared interrupts)
+ */
+ if (err)
+ RTE_EDEV_LOG_ERR("Failed to enqueue interrupt"
+ " to ring: %s", strerror(err));
+ else
+ rte_eth_dev_rx_intr_disable(port_id, queue);
+ }
+ rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
+}
+
+static int
+rxa_intr_ring_check_avail(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint32_t num_intr_vec)
+{
+ if (rx_adapter->num_intr_vec + num_intr_vec >
+ RTE_EVENT_ETH_INTR_RING_SIZE) {
+ RTE_EDEV_LOG_ERR("Exceeded intr ring slots current"
+ " %d needed %d limit %d", rx_adapter->num_intr_vec,
+ num_intr_vec, RTE_EVENT_ETH_INTR_RING_SIZE);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+/* Delete entries for (dev, queue) from the interrupt ring */
+static void
+rxa_intr_ring_del_entries(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id)
+{
+ int i, n;
+ union queue_data qd;
+
+ rte_spinlock_lock(&rx_adapter->intr_ring_lock);
+
+ n = rte_ring_count(rx_adapter->intr_ring);
+ for (i = 0; i < n; i++) {
+ rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
+ if (!rxa_shared_intr(dev_info, rx_queue_id)) {
+ if (qd.port == dev_info->dev->data->port_id &&
+ qd.queue == rx_queue_id)
+ continue;
+ } else {
+ if (qd.port == dev_info->dev->data->port_id)
+ continue;
+ }
+ rte_ring_enqueue(rx_adapter->intr_ring, qd.ptr);
+ }
+
+ rte_spinlock_unlock(&rx_adapter->intr_ring_lock);
+}
+
+/* pthread callback handling interrupt mode receive queues
+ * After receiving an Rx interrupt, it enqueues the port id and queue id of the
+ * interrupting queue to the adapter's ring buffer for interrupt events.
+ * These events are picked up by rxa_intr_ring_dequeue() which is invoked from
+ * the adapter service function.
+ */
+static void *
+rxa_intr_thread(void *arg)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter = arg;
+ struct rte_epoll_event *epoll_events = rx_adapter->epoll_events;
+ int n, i;
+
+ while (1) {
+ n = rte_epoll_wait(rx_adapter->epd, epoll_events,
+ RTE_EVENT_ETH_INTR_RING_SIZE, -1);
+ if (unlikely(n < 0))
+ RTE_EDEV_LOG_ERR("rte_epoll_wait returned error %d",
+ n);
+ for (i = 0; i < n; i++) {
+ rxa_intr_ring_enqueue(rx_adapter,
+ epoll_events[i].epdata.data);
+ }
+ }
+
+ return NULL;
+}
+
+/* Dequeue <port, q> from interrupt ring and enqueue received
+ * mbufs to eventdev
+ */
+static inline uint32_t
+rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ uint32_t n;
+ uint32_t nb_rx = 0;
+ int rxq_empty;
+ struct rte_eth_event_enqueue_buffer *buf;
+ rte_spinlock_t *ring_lock;
+ uint8_t max_done = 0;
+
+ if (rx_adapter->num_rx_intr == 0)
+ return 0;
+
+ if (rte_ring_count(rx_adapter->intr_ring) == 0
+ && !rx_adapter->qd_valid)
+ return 0;
+
+ buf = &rx_adapter->event_enqueue_buffer;
+ ring_lock = &rx_adapter->intr_ring_lock;
+
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+
+ while (BATCH_SIZE <= (RTE_DIM(buf->events) - buf->count)) {
+ struct eth_device_info *dev_info;
+ uint16_t port;
+ uint16_t queue;
+ union queue_data qd = rx_adapter->qd;
+ int err;
+
+ if (!rx_adapter->qd_valid) {
+ struct eth_rx_queue_info *queue_info;
+
+ rte_spinlock_lock(ring_lock);
+ err = rte_ring_dequeue(rx_adapter->intr_ring, &qd.ptr);
+ if (err) {
+ rte_spinlock_unlock(ring_lock);
+ break;
+ }
+
+ port = qd.port;
+ queue = qd.queue;
+ rx_adapter->qd = qd;
+ rx_adapter->qd_valid = 1;
+ dev_info = &rx_adapter->eth_devices[port];
+ if (rxa_shared_intr(dev_info, queue))
+ dev_info->shared_intr_enabled = 1;
+ else {
+ queue_info = &dev_info->rx_queue[queue];
+ queue_info->intr_enabled = 1;
+ }
+ rte_eth_dev_rx_intr_enable(port, queue);
+ rte_spinlock_unlock(ring_lock);
+ } else {
+ port = qd.port;
+ queue = qd.queue;
+
+ dev_info = &rx_adapter->eth_devices[port];
+ }
+
+ if (rxa_shared_intr(dev_info, queue)) {
+ uint16_t i;
+ uint16_t nb_queues;
+
+ nb_queues = dev_info->dev->data->nb_rx_queues;
+ n = 0;
+ for (i = dev_info->next_q_idx; i < nb_queues; i++) {
+ uint8_t enq_buffer_full;
+
+ if (!rxa_intr_queue(dev_info, i))
+ continue;
+ n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
+ rx_adapter->max_nb_rx,
+ &rxq_empty);
+ nb_rx += n;
+
+ enq_buffer_full = !rxq_empty && n == 0;
+ max_done = nb_rx > rx_adapter->max_nb_rx;
+
+ if (enq_buffer_full || max_done) {
+ dev_info->next_q_idx = i;
+ goto done;
+ }
+ }
+
+ rx_adapter->qd_valid = 0;
+
+ /* Reinitialize for next interrupt */
+ dev_info->next_q_idx = dev_info->multi_intr_cap ?
+ RTE_MAX_RXTX_INTR_VEC_ID - 1 :
+ 0;
+ } else {
+ n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
+ rx_adapter->max_nb_rx,
+ &rxq_empty);
+ rx_adapter->qd_valid = !rxq_empty;
+ nb_rx += n;
+ if (nb_rx > rx_adapter->max_nb_rx)
+ break;
+ }
+ }
+
+done:
+ rx_adapter->stats.rx_intr_packets += nb_rx;
+ return nb_rx;
+}
+
+/*
+ * Polls receive queues added to the event adapter and enqueues received
+ * packets to the event device.
+ *
+ * The receive code enqueues initially to a temporary buffer, the
+ * temporary buffer is drained anytime it holds >= BATCH_SIZE packets
+ *
+ * If there isn't space available in the temporary buffer, packets from the
+ * Rx queue aren't dequeued from the eth device, this back pressures the
+ * eth device, in virtual device environments this back pressure is relayed to
+ * the hypervisor's switching layer where adjustments can be made to deal with
+ * it.
+ */
+static inline uint32_t
+rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ uint32_t num_queue;
+ uint32_t nb_rx = 0;
+ struct rte_eth_event_enqueue_buffer *buf;
+ uint32_t wrr_pos;
+ uint32_t max_nb_rx;
+
+ wrr_pos = rx_adapter->wrr_pos;
+ max_nb_rx = rx_adapter->max_nb_rx;
+ buf = &rx_adapter->event_enqueue_buffer;
+ stats = &rx_adapter->stats;
+
+ /* Iterate through a WRR sequence */
+ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
+ unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
+ uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
+ uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
+
+ /* Don't do a batch dequeue from the rx queue if there isn't
+ * enough space in the enqueue buffer.
+ */
+ if (buf->count >= BATCH_SIZE)
+ rxa_flush_event_buffer(rx_adapter);
+ if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count)) {
+ rx_adapter->wrr_pos = wrr_pos;
+ return nb_rx;
+ }
+
+ nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
+ NULL);
+ if (nb_rx > max_nb_rx) {
+ rx_adapter->wrr_pos =
+ (wrr_pos + 1) % rx_adapter->wrr_len;
+ break;
+ }
+
+ if (++wrr_pos == rx_adapter->wrr_len)
+ wrr_pos = 0;
+ }
+ return nb_rx;
+}
+
+static int
+rxa_service_func(void *args)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter = args;
+ struct rte_event_eth_rx_adapter_stats *stats;
+
+ if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
+ return 0;
+ if (!rx_adapter->rxa_started) {
+ return 0;
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ }
+
+ stats = &rx_adapter->stats;
+ stats->rx_packets += rxa_intr_ring_dequeue(rx_adapter);
+ stats->rx_packets += rxa_poll(rx_adapter);
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ return 0;
+}
+
+static int
+rte_event_eth_rx_adapter_init(void)
+{
+ const char *name = "rte_event_eth_rx_adapter_array";
+ const struct rte_memzone *mz;
+ unsigned int sz;
+
+ sz = sizeof(*event_eth_rx_adapter) *
+ RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
+ sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
+
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ mz = rte_memzone_reserve_aligned(name, sz, rte_socket_id(), 0,
+ RTE_CACHE_LINE_SIZE);
+ if (mz == NULL) {
+ RTE_EDEV_LOG_ERR("failed to reserve memzone err = %"
+ PRId32, rte_errno);
+ return -rte_errno;
+ }
+ }
+
+ event_eth_rx_adapter = mz->addr;
+ return 0;
+}
+
+static inline struct rte_event_eth_rx_adapter *
+rxa_id_to_adapter(uint8_t id)
+{
+ return event_eth_rx_adapter ?
+ event_eth_rx_adapter[id] : NULL;
+}
+
+static int
+rxa_default_conf_cb(uint8_t id, uint8_t dev_id,
+ struct rte_event_eth_rx_adapter_conf *conf, void *arg)
+{
+ int ret;
+ struct rte_eventdev *dev;
+ struct rte_event_dev_config dev_conf;
+ int started;
+ uint8_t port_id;
+ struct rte_event_port_conf *port_conf = arg;
+ struct rte_event_eth_rx_adapter *rx_adapter = rxa_id_to_adapter(id);
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ dev_conf = dev->data->dev_conf;
+
+ started = dev->data->dev_started;
+ if (started)
+ rte_event_dev_stop(dev_id);
+ port_id = dev_conf.nb_event_ports;
+ dev_conf.nb_event_ports += 1;
+ ret = rte_event_dev_configure(dev_id, &dev_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
+ dev_id);
+ if (started) {
+ if (rte_event_dev_start(dev_id))
+ return -EIO;
+ }
+ return ret;
+ }
+
+ ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
+ port_id);
+ return ret;
+ }
+
+ conf->event_port_id = port_id;
+ conf->max_nb_rx = 128;
+ if (started)
+ ret = rte_event_dev_start(dev_id);
+ rx_adapter->default_cb_arg = 1;
+ return ret;
+}
+
+static int
+rxa_epoll_create1(void)
+{
+#if defined(LINUX)
+ int fd;
+ fd = epoll_create1(EPOLL_CLOEXEC);
+ return fd < 0 ? -errno : fd;
+#elif defined(BSD)
+ return -ENOTSUP;
+#endif
+}
+
+static int
+rxa_init_epd(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ if (rx_adapter->epd != INIT_FD)
+ return 0;
+
+ rx_adapter->epd = rxa_epoll_create1();
+ if (rx_adapter->epd < 0) {
+ int err = rx_adapter->epd;
+ rx_adapter->epd = INIT_FD;
+ RTE_EDEV_LOG_ERR("epoll_create1() failed, err %d", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+rxa_create_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ int err;
+ char thread_name[RTE_MAX_THREAD_NAME_LEN];
+
+ if (rx_adapter->intr_ring)
+ return 0;
+
+ rx_adapter->intr_ring = rte_ring_create("intr_ring",
+ RTE_EVENT_ETH_INTR_RING_SIZE,
+ rte_socket_id(), 0);
+ if (!rx_adapter->intr_ring)
+ return -ENOMEM;
+
+ rx_adapter->epoll_events = rte_zmalloc_socket(rx_adapter->mem_name,
+ RTE_EVENT_ETH_INTR_RING_SIZE *
+ sizeof(struct rte_epoll_event),
+ RTE_CACHE_LINE_SIZE,
+ rx_adapter->socket_id);
+ if (!rx_adapter->epoll_events) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ rte_spinlock_init(&rx_adapter->intr_ring_lock);
+
+ snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
+ "rx-intr-thread-%d", rx_adapter->id);
+
+ err = rte_ctrl_thread_create(&rx_adapter->rx_intr_thread, thread_name,
+ NULL, rxa_intr_thread, rx_adapter);
+ if (!err) {
+ rte_thread_setname(rx_adapter->rx_intr_thread, thread_name);
+ return 0;
+ }
+
+ RTE_EDEV_LOG_ERR("Failed to create interrupt thread err = %d\n", err);
+error:
+ rte_ring_free(rx_adapter->intr_ring);
+ rx_adapter->intr_ring = NULL;
+ rx_adapter->epoll_events = NULL;
+ return err;
+}
+
+static int
+rxa_destroy_intr_thread(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ int err;
+
+ err = pthread_cancel(rx_adapter->rx_intr_thread);
+ if (err)
+ RTE_EDEV_LOG_ERR("Can't cancel interrupt thread err = %d\n",
+ err);
+
+ err = pthread_join(rx_adapter->rx_intr_thread, NULL);
+ if (err)
+ RTE_EDEV_LOG_ERR("Can't join interrupt thread err = %d\n", err);
+
+ rte_free(rx_adapter->epoll_events);
+ rte_ring_free(rx_adapter->intr_ring);
+ rx_adapter->intr_ring = NULL;
+ rx_adapter->epoll_events = NULL;
+ return 0;
+}
+
+static int
+rxa_free_intr_resources(struct rte_event_eth_rx_adapter *rx_adapter)
+{
+ int ret;
+
+ if (rx_adapter->num_rx_intr == 0)
+ return 0;
+
+ ret = rxa_destroy_intr_thread(rx_adapter);
+ if (ret)
+ return ret;
+
+ close(rx_adapter->epd);
+ rx_adapter->epd = INIT_FD;
+
+ return ret;
+}
+
+static int
+rxa_disable_intr(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id)
+{
+ int err;
+ uint16_t eth_dev_id = dev_info->dev->data->port_id;
+ int sintr = rxa_shared_intr(dev_info, rx_queue_id);
+
+ err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
+ if (err) {
+ RTE_EDEV_LOG_ERR("Could not disable interrupt for Rx queue %u",
+ rx_queue_id);
+ return err;
+ }
+
+ err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
+ rx_adapter->epd,
+ RTE_INTR_EVENT_DEL,
+ 0);
+ if (err)
+ RTE_EDEV_LOG_ERR("Interrupt event deletion failed %d", err);
+
+ if (sintr)
+ dev_info->rx_queue[rx_queue_id].intr_enabled = 0;
+ else
+ dev_info->shared_intr_enabled = 0;
+ return err;
+}
+
+static int
+rxa_del_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id)
+{
+ int err;
+ int i;
+ int s;
+
+ if (dev_info->nb_rx_intr == 0)
+ return 0;
+
+ err = 0;
+ if (rx_queue_id == -1) {
+ s = dev_info->nb_shared_intr;
+ for (i = 0; i < dev_info->nb_rx_intr; i++) {
+ int sintr;
+ uint16_t q;
+
+ q = dev_info->intr_queue[i];
+ sintr = rxa_shared_intr(dev_info, q);
+ s -= sintr;
+
+ if (!sintr || s == 0) {
+
+ err = rxa_disable_intr(rx_adapter, dev_info,
+ q);
+ if (err)
+ return err;
+ rxa_intr_ring_del_entries(rx_adapter, dev_info,
+ q);
+ }
+ }
+ } else {
+ if (!rxa_intr_queue(dev_info, rx_queue_id))
+ return 0;
+ if (!rxa_shared_intr(dev_info, rx_queue_id) ||
+ dev_info->nb_shared_intr == 1) {
+ err = rxa_disable_intr(rx_adapter, dev_info,
+ rx_queue_id);
+ if (err)
+ return err;
+ rxa_intr_ring_del_entries(rx_adapter, dev_info,
+ rx_queue_id);
+ }
+
+ for (i = 0; i < dev_info->nb_rx_intr; i++) {
+ if (dev_info->intr_queue[i] == rx_queue_id) {
+ for (; i < dev_info->nb_rx_intr - 1; i++)
+ dev_info->intr_queue[i] =
+ dev_info->intr_queue[i + 1];
+ break;
+ }
+ }
+ }
+
+ return err;
+}
+
+static int
+rxa_config_intr(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ uint16_t rx_queue_id)
+{
+ int err, err1;
+ uint16_t eth_dev_id = dev_info->dev->data->port_id;
+ union queue_data qd;
+ int init_fd;
+ uint16_t *intr_queue;
+ int sintr = rxa_shared_intr(dev_info, rx_queue_id);
+
+ if (rxa_intr_queue(dev_info, rx_queue_id))
+ return 0;
+
+ intr_queue = dev_info->intr_queue;
+ if (dev_info->intr_queue == NULL) {
+ size_t len =
+ dev_info->dev->data->nb_rx_queues * sizeof(uint16_t);
+ dev_info->intr_queue =
+ rte_zmalloc_socket(
+ rx_adapter->mem_name,
+ len,
+ 0,
+ rx_adapter->socket_id);
+ if (dev_info->intr_queue == NULL)
+ return -ENOMEM;
+ }
+
+ init_fd = rx_adapter->epd;
+ err = rxa_init_epd(rx_adapter);
+ if (err)
+ goto err_free_queue;
+
+ qd.port = eth_dev_id;
+ qd.queue = rx_queue_id;
+
+ err = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
+ rx_adapter->epd,
+ RTE_INTR_EVENT_ADD,
+ qd.ptr);
+ if (err) {
+ RTE_EDEV_LOG_ERR("Failed to add interrupt event for"
+ " Rx Queue %u err %d", rx_queue_id, err);
+ goto err_del_fd;
+ }
+
+ err = rte_eth_dev_rx_intr_enable(eth_dev_id, rx_queue_id);
+ if (err) {
+ RTE_EDEV_LOG_ERR("Could not enable interrupt for"
+ " Rx Queue %u err %d", rx_queue_id, err);
+
+ goto err_del_event;
+ }
+
+ err = rxa_create_intr_thread(rx_adapter);
+ if (!err) {
+ if (sintr)
+ dev_info->shared_intr_enabled = 1;
+ else
+ dev_info->rx_queue[rx_queue_id].intr_enabled = 1;
+ return 0;
+ }
+
+
+ err = rte_eth_dev_rx_intr_disable(eth_dev_id, rx_queue_id);
+ if (err)
+ RTE_EDEV_LOG_ERR("Could not disable interrupt for"
+ " Rx Queue %u err %d", rx_queue_id, err);
+err_del_event:
+ err1 = rte_eth_dev_rx_intr_ctl_q(eth_dev_id, rx_queue_id,
+ rx_adapter->epd,
+ RTE_INTR_EVENT_DEL,
+ 0);
+ if (err1) {
+ RTE_EDEV_LOG_ERR("Could not delete event for"
+ " Rx Queue %u err %d", rx_queue_id, err1);
+ }
+err_del_fd:
+ if (init_fd == INIT_FD) {
+ close(rx_adapter->epd);
+ rx_adapter->epd = -1;
+ }
+err_free_queue:
+ if (intr_queue == NULL)
+ rte_free(dev_info->intr_queue);
+
+ return err;
+}
+
+static int
+rxa_add_intr_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int rx_queue_id)
+
+{
+ int i, j, err;
+ int si = -1;
+ int shared_done = (dev_info->nb_shared_intr > 0);
+
+ if (rx_queue_id != -1) {
+ if (rxa_shared_intr(dev_info, rx_queue_id) && shared_done)
+ return 0;
+ return rxa_config_intr(rx_adapter, dev_info, rx_queue_id);
+ }
+
+ err = 0;
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++) {
+
+ if (rxa_shared_intr(dev_info, i) && shared_done)
+ continue;
+
+ err = rxa_config_intr(rx_adapter, dev_info, i);
+
+ shared_done = err == 0 && rxa_shared_intr(dev_info, i);
+ if (shared_done) {
+ si = i;
+ dev_info->shared_intr_enabled = 1;
+ }
+ if (err)
+ break;
+ }
+
+ if (err == 0)
+ return 0;
+
+ shared_done = (dev_info->nb_shared_intr > 0);
+ for (j = 0; j < i; j++) {
+ if (rxa_intr_queue(dev_info, j))
+ continue;
+ if (rxa_shared_intr(dev_info, j) && si != j)
+ continue;
+ err = rxa_disable_intr(rx_adapter, dev_info, j);
+ if (err)
+ break;
+
+ }
+
+ return err;
+}
+
+
+static int
+rxa_init_service(struct rte_event_eth_rx_adapter *rx_adapter, uint8_t id)
+{
+ int ret;
+ struct rte_service_spec service;
+ struct rte_event_eth_rx_adapter_conf rx_adapter_conf;
+
+ if (rx_adapter->service_inited)
+ return 0;
+
+ memset(&service, 0, sizeof(service));
+ snprintf(service.name, ETH_RX_ADAPTER_SERVICE_NAME_LEN,
+ "rte_event_eth_rx_adapter_%d", id);
+ service.socket_id = rx_adapter->socket_id;
+ service.callback = rxa_service_func;
+ service.callback_userdata = rx_adapter;
+ /* Service function handles locking for queue add/del updates */
+ service.capabilities = RTE_SERVICE_CAP_MT_SAFE;
+ ret = rte_service_component_register(&service, &rx_adapter->service_id);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("failed to register service %s err = %" PRId32,
+ service.name, ret);
+ return ret;
+ }
+
+ ret = rx_adapter->conf_cb(id, rx_adapter->eventdev_id,
+ &rx_adapter_conf, rx_adapter->conf_arg);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("configuration callback failed err = %" PRId32,
+ ret);
+ goto err_done;
+ }
+ rx_adapter->event_port_id = rx_adapter_conf.event_port_id;
+ rx_adapter->max_nb_rx = rx_adapter_conf.max_nb_rx;
+ rx_adapter->service_inited = 1;
+ rx_adapter->epd = INIT_FD;
+ return 0;
+
+err_done:
+ rte_service_component_unregister(rx_adapter->service_id);
+ return ret;
+}
+
+static void
+rxa_update_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int32_t rx_queue_id,
+ uint8_t add)
+{
+ struct eth_rx_queue_info *queue_info;
+ int enabled;
+ uint16_t i;
+
+ if (dev_info->rx_queue == NULL)
+ return;
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
+ rxa_update_queue(rx_adapter, dev_info, i, add);
+ } else {
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ enabled = queue_info->queue_enabled;
+ if (add) {
+ rx_adapter->nb_queues += !enabled;
+ dev_info->nb_dev_queues += !enabled;
+ } else {
+ rx_adapter->nb_queues -= enabled;
+ dev_info->nb_dev_queues -= enabled;
+ }
+ queue_info->queue_enabled = !!add;
+ }
+}
+
+static void
+rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int32_t rx_queue_id)
+{
+ int pollq;
+ int intrq;
+ int sintrq;
+
+
+ if (rx_adapter->nb_queues == 0)
+ return;
+
+ if (rx_queue_id == -1) {
+ uint16_t nb_rx_queues;
+ uint16_t i;
+
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ for (i = 0; i < nb_rx_queues; i++)
+ rxa_sw_del(rx_adapter, dev_info, i);
+ return;
+ }
+
+ pollq = rxa_polled_queue(dev_info, rx_queue_id);
+ intrq = rxa_intr_queue(dev_info, rx_queue_id);
+ sintrq = rxa_shared_intr(dev_info, rx_queue_id);
+ rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 0);
+ rx_adapter->num_rx_polled -= pollq;
+ dev_info->nb_rx_poll -= pollq;
+ rx_adapter->num_rx_intr -= intrq;
+ dev_info->nb_rx_intr -= intrq;
+ dev_info->nb_shared_intr -= intrq && sintrq;
+}
+
+static void
+rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct eth_device_info *dev_info,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *conf)
+{
+ struct eth_rx_queue_info *queue_info;
+ const struct rte_event *ev = &conf->ev;
+ int pollq;
+ int intrq;
+ int sintrq;
+
+ if (rx_queue_id == -1) {
+ uint16_t nb_rx_queues;
+ uint16_t i;
+
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ for (i = 0; i < nb_rx_queues; i++)
+ rxa_add_queue(rx_adapter, dev_info, i, conf);
+ return;
+ }
+
+ pollq = rxa_polled_queue(dev_info, rx_queue_id);
+ intrq = rxa_intr_queue(dev_info, rx_queue_id);
+ sintrq = rxa_shared_intr(dev_info, rx_queue_id);
+
+ queue_info = &dev_info->rx_queue[rx_queue_id];
+ queue_info->event_queue_id = ev->queue_id;
+ queue_info->sched_type = ev->sched_type;
+ queue_info->priority = ev->priority;
+ queue_info->wt = conf->servicing_weight;
+
+ if (conf->rx_queue_flags &
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) {
+ queue_info->flow_id = ev->flow_id;
+ queue_info->flow_id_mask = ~0;
+ }
+
+ rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1);
+ if (rxa_polled_queue(dev_info, rx_queue_id)) {
+ rx_adapter->num_rx_polled += !pollq;
+ dev_info->nb_rx_poll += !pollq;
+ rx_adapter->num_rx_intr -= intrq;
+ dev_info->nb_rx_intr -= intrq;
+ dev_info->nb_shared_intr -= intrq && sintrq;
+ }
+
+ if (rxa_intr_queue(dev_info, rx_queue_id)) {
+ rx_adapter->num_rx_polled -= pollq;
+ dev_info->nb_rx_poll -= pollq;
+ rx_adapter->num_rx_intr += !intrq;
+ dev_info->nb_rx_intr += !intrq;
+ dev_info->nb_shared_intr += !intrq && sintrq;
+ if (dev_info->nb_shared_intr == 1) {
+ if (dev_info->multi_intr_cap)
+ dev_info->next_q_idx =
+ RTE_MAX_RXTX_INTR_VEC_ID - 1;
+ else
+ dev_info->next_q_idx = 0;
+ }
+ }
+}
+
+static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint16_t eth_dev_id,
+ int rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id];
+ struct rte_event_eth_rx_adapter_queue_conf temp_conf;
+ int ret;
+ struct eth_rx_poll_entry *rx_poll;
+ struct eth_rx_queue_info *rx_queue;
+ uint32_t *rx_wrr;
+ uint16_t nb_rx_queues;
+ uint32_t nb_rx_poll, nb_wrr;
+ uint32_t nb_rx_intr;
+ int num_intr_vec;
+ uint16_t wt;
+
+ if (queue_conf->servicing_weight == 0) {
+ struct rte_eth_dev_data *data = dev_info->dev->data;
+
+ temp_conf = *queue_conf;
+ if (!data->dev_conf.intr_conf.rxq) {
+ /* If Rx interrupts are disabled set wt = 1 */
+ temp_conf.servicing_weight = 1;
+ }
+ queue_conf = &temp_conf;
+ }
+
+ nb_rx_queues = dev_info->dev->data->nb_rx_queues;
+ rx_queue = dev_info->rx_queue;
+ wt = queue_conf->servicing_weight;
+
+ if (dev_info->rx_queue == NULL) {
+ dev_info->rx_queue =
+ rte_zmalloc_socket(rx_adapter->mem_name,
+ nb_rx_queues *
+ sizeof(struct eth_rx_queue_info), 0,
+ rx_adapter->socket_id);
+ if (dev_info->rx_queue == NULL)
+ return -ENOMEM;
+ }
+ rx_wrr = NULL;
+ rx_poll = NULL;
+
+ rxa_calc_nb_post_add(rx_adapter, dev_info, rx_queue_id,
+ queue_conf->servicing_weight,
+ &nb_rx_poll, &nb_rx_intr, &nb_wrr);
+
+ if (dev_info->dev->intr_handle)
+ dev_info->multi_intr_cap =
+ rte_intr_cap_multiple(dev_info->dev->intr_handle);
+
+ ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
+ &rx_poll, &rx_wrr);
+ if (ret)
+ goto err_free_rxqueue;
+
+ if (wt == 0) {
+ num_intr_vec = rxa_nb_intr_vect(dev_info, rx_queue_id, 1);
+
+ ret = rxa_intr_ring_check_avail(rx_adapter, num_intr_vec);
+ if (ret)
+ goto err_free_rxqueue;
+
+ ret = rxa_add_intr_queue(rx_adapter, dev_info, rx_queue_id);
+ if (ret)
+ goto err_free_rxqueue;
+ } else {
+
+ num_intr_vec = 0;
+ if (rx_adapter->num_rx_intr > nb_rx_intr) {
+ num_intr_vec = rxa_nb_intr_vect(dev_info,
+ rx_queue_id, 0);
+ /* interrupt based queues are being converted to
+ * poll mode queues, delete the interrupt configuration
+ * for those.
+ */
+ ret = rxa_del_intr_queue(rx_adapter,
+ dev_info, rx_queue_id);
+ if (ret)
+ goto err_free_rxqueue;
+ }
+ }
+
+ if (nb_rx_intr == 0) {
+ ret = rxa_free_intr_resources(rx_adapter);
+ if (ret)
+ goto err_free_rxqueue;
+ }
+
+ if (wt == 0) {
+ uint16_t i;
+
+ if (rx_queue_id == -1) {
+ for (i = 0; i < dev_info->dev->data->nb_rx_queues; i++)
+ dev_info->intr_queue[i] = i;
+ } else {
+ if (!rxa_intr_queue(dev_info, rx_queue_id))
+ dev_info->intr_queue[nb_rx_intr - 1] =
+ rx_queue_id;
+ }
+ }
+
+
+
+ rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
+ rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
+
+ rte_free(rx_adapter->eth_rx_poll);
+ rte_free(rx_adapter->wrr_sched);
+
+ rx_adapter->eth_rx_poll = rx_poll;
+ rx_adapter->wrr_sched = rx_wrr;
+ rx_adapter->wrr_len = nb_wrr;
+ rx_adapter->num_intr_vec += num_intr_vec;
+ return 0;
+
+err_free_rxqueue:
+ if (rx_queue == NULL) {
+ rte_free(dev_info->rx_queue);
+ dev_info->rx_queue = NULL;
+ }
+
+ rte_free(rx_poll);
+ rte_free(rx_wrr);
+
+ return 0;
+}
+
+static int
+rxa_ctrl(uint8_t id, int start)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+ uint32_t i;
+ int use_service = 0;
+ int stop = !start;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ rx_adapter = rxa_id_to_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+
+ RTE_ETH_FOREACH_DEV(i) {
+ dev_info = &rx_adapter->eth_devices[i];
+ /* if start check for num dev queues */
+ if (start && !dev_info->nb_dev_queues)
+ continue;
+ /* if stop check if dev has been started */
+ if (stop && !dev_info->dev_rx_started)
+ continue;
+ use_service |= !dev_info->internal_event_port;
+ dev_info->dev_rx_started = start;
+ if (dev_info->internal_event_port == 0)
+ continue;
+ start ? (*dev->dev_ops->eth_rx_adapter_start)(dev,
+ &rte_eth_devices[i]) :
+ (*dev->dev_ops->eth_rx_adapter_stop)(dev,
+ &rte_eth_devices[i]);
+ }
+
+ if (use_service) {
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ rx_adapter->rxa_started = start;
+ rte_service_runstate_set(rx_adapter->service_id, start);
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ }
+
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_eth_rx_adapter_conf_cb conf_cb,
+ void *conf_arg)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ int ret;
+ int socket_id;
+ uint16_t i;
+ char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
+ const uint8_t default_rss_key[] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
+ };
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ if (conf_cb == NULL)
+ return -EINVAL;
+
+ if (event_eth_rx_adapter == NULL) {
+ ret = rte_event_eth_rx_adapter_init();
+ if (ret)
+ return ret;
+ }
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if (rx_adapter != NULL) {
+ RTE_EDEV_LOG_ERR("Eth Rx adapter exists id = %" PRIu8, id);
+ return -EEXIST;
+ }
+
+ socket_id = rte_event_dev_socket_id(dev_id);
+ snprintf(mem_name, ETH_RX_ADAPTER_MEM_NAME_LEN,
+ "rte_event_eth_rx_adapter_%d",
+ id);
+
+ rx_adapter = rte_zmalloc_socket(mem_name, sizeof(*rx_adapter),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rx_adapter == NULL) {
+ RTE_EDEV_LOG_ERR("failed to get mem for rx adapter");
+ return -ENOMEM;
+ }
+
+ rx_adapter->eventdev_id = dev_id;
+ rx_adapter->socket_id = socket_id;
+ rx_adapter->conf_cb = conf_cb;
+ rx_adapter->conf_arg = conf_arg;
+ rx_adapter->id = id;
+ strcpy(rx_adapter->mem_name, mem_name);
+ rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
+ /* FIXME: incompatible with hotplug */
+ rte_eth_dev_count_total() *
+ sizeof(struct eth_device_info), 0,
+ socket_id);
+ rte_convert_rss_key((const uint32_t *)default_rss_key,
+ (uint32_t *)rx_adapter->rss_key_be,
+ RTE_DIM(default_rss_key));
+
+ if (rx_adapter->eth_devices == NULL) {
+ RTE_EDEV_LOG_ERR("failed to get mem for eth devices\n");
+ rte_free(rx_adapter);
+ return -ENOMEM;
+ }
+ rte_spinlock_init(&rx_adapter->rx_lock);
+ RTE_ETH_FOREACH_DEV(i)
+ rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
+
+ event_eth_rx_adapter[id] = rx_adapter;
+ if (conf_cb == rxa_default_conf_cb)
+ rx_adapter->default_cb_arg = 1;
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config)
+{
+ struct rte_event_port_conf *pc;
+ int ret;
+
+ if (port_config == NULL)
+ return -EINVAL;
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ pc = rte_malloc(NULL, sizeof(*pc), 0);
+ if (pc == NULL)
+ return -ENOMEM;
+ *pc = *port_config;
+ ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
+ rxa_default_conf_cb,
+ pc);
+ if (ret)
+ rte_free(pc);
+ return ret;
+}
+
+int
+rte_event_eth_rx_adapter_free(uint8_t id)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ if (rx_adapter->nb_queues) {
+ RTE_EDEV_LOG_ERR("%" PRIu16 " Rx queues not deleted",
+ rx_adapter->nb_queues);
+ return -EBUSY;
+ }
+
+ if (rx_adapter->default_cb_arg)
+ rte_free(rx_adapter->conf_arg);
+ rte_free(rx_adapter->eth_devices);
+ rte_free(rx_adapter);
+ event_eth_rx_adapter[id] = NULL;
+
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_add(uint8_t id,
+ uint16_t eth_dev_id,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ int ret;
+ uint32_t cap;
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if ((rx_adapter == NULL) || (queue_conf == NULL))
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
+ eth_dev_id,
+ &cap);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
+ "eth port %" PRIu16, id, eth_dev_id);
+ return ret;
+ }
+
+ if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) == 0
+ && (queue_conf->rx_queue_flags &
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
+ RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
+ " eth port: %" PRIu16 " adapter id: %" PRIu8,
+ eth_dev_id, id);
+ return -EINVAL;
+ }
+
+ if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
+ (rx_queue_id != -1)) {
+ RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
+ "event queue, eth port: %" PRIu16 " adapter id: %"
+ PRIu8, eth_dev_id, id);
+ return -EINVAL;
+ }
+
+ if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
+ rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+ RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
+ (uint16_t)rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_add,
+ -ENOTSUP);
+ if (dev_info->rx_queue == NULL) {
+ dev_info->rx_queue =
+ rte_zmalloc_socket(rx_adapter->mem_name,
+ dev_info->dev->data->nb_rx_queues *
+ sizeof(struct eth_rx_queue_info), 0,
+ rx_adapter->socket_id);
+ if (dev_info->rx_queue == NULL)
+ return -ENOMEM;
+ }
+
+ ret = (*dev->dev_ops->eth_rx_adapter_queue_add)(dev,
+ &rte_eth_devices[eth_dev_id],
+ rx_queue_id, queue_conf);
+ if (ret == 0) {
+ dev_info->internal_event_port = 1;
+ rxa_update_queue(rx_adapter,
+ &rx_adapter->eth_devices[eth_dev_id],
+ rx_queue_id,
+ 1);
+ }
+ } else {
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ dev_info->internal_event_port = 0;
+ ret = rxa_init_service(rx_adapter, id);
+ if (ret == 0) {
+ uint32_t service_id = rx_adapter->service_id;
+ ret = rxa_sw_add(rx_adapter, eth_dev_id, rx_queue_id,
+ queue_conf);
+ rte_service_component_runstate_set(service_id,
+ rxa_sw_adapter_queue_count(rx_adapter));
+ }
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ }
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
+ int32_t rx_queue_id)
+{
+ int ret = 0;
+ struct rte_eventdev *dev;
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct eth_device_info *dev_info;
+ uint32_t cap;
+ uint32_t nb_rx_poll = 0;
+ uint32_t nb_wrr = 0;
+ uint32_t nb_rx_intr;
+ struct eth_rx_poll_entry *rx_poll = NULL;
+ uint32_t *rx_wrr = NULL;
+ int num_intr_vec;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
+ eth_dev_id,
+ &cap);
+ if (ret)
+ return ret;
+
+ if (rx_queue_id != -1 && (uint16_t)rx_queue_id >=
+ rte_eth_devices[eth_dev_id].data->nb_rx_queues) {
+ RTE_EDEV_LOG_ERR("Invalid rx queue_id %" PRIu16,
+ (uint16_t)rx_queue_id);
+ return -EINVAL;
+ }
+
+ dev_info = &rx_adapter->eth_devices[eth_dev_id];
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_rx_adapter_queue_del,
+ -ENOTSUP);
+ ret = (*dev->dev_ops->eth_rx_adapter_queue_del)(dev,
+ &rte_eth_devices[eth_dev_id],
+ rx_queue_id);
+ if (ret == 0) {
+ rxa_update_queue(rx_adapter,
+ &rx_adapter->eth_devices[eth_dev_id],
+ rx_queue_id,
+ 0);
+ if (dev_info->nb_dev_queues == 0) {
+ rte_free(dev_info->rx_queue);
+ dev_info->rx_queue = NULL;
+ }
+ }
+ } else {
+ rxa_calc_nb_post_del(rx_adapter, dev_info, rx_queue_id,
+ &nb_rx_poll, &nb_rx_intr, &nb_wrr);
+
+ ret = rxa_alloc_poll_arrays(rx_adapter, nb_rx_poll, nb_wrr,
+ &rx_poll, &rx_wrr);
+ if (ret)
+ return ret;
+
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+
+ num_intr_vec = 0;
+ if (rx_adapter->num_rx_intr > nb_rx_intr) {
+
+ num_intr_vec = rxa_nb_intr_vect(dev_info,
+ rx_queue_id, 0);
+ ret = rxa_del_intr_queue(rx_adapter, dev_info,
+ rx_queue_id);
+ if (ret)
+ goto unlock_ret;
+ }
+
+ if (nb_rx_intr == 0) {
+ ret = rxa_free_intr_resources(rx_adapter);
+ if (ret)
+ goto unlock_ret;
+ }
+
+ rxa_sw_del(rx_adapter, dev_info, rx_queue_id);
+ rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
+
+ rte_free(rx_adapter->eth_rx_poll);
+ rte_free(rx_adapter->wrr_sched);
+
+ if (nb_rx_intr == 0) {
+ rte_free(dev_info->intr_queue);
+ dev_info->intr_queue = NULL;
+ }
+
+ rx_adapter->eth_rx_poll = rx_poll;
+ rx_adapter->wrr_sched = rx_wrr;
+ rx_adapter->wrr_len = nb_wrr;
+ rx_adapter->num_intr_vec += num_intr_vec;
+
+ if (dev_info->nb_dev_queues == 0) {
+ rte_free(dev_info->rx_queue);
+ dev_info->rx_queue = NULL;
+ }
+unlock_ret:
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ if (ret) {
+ rte_free(rx_poll);
+ rte_free(rx_wrr);
+ return ret;
+ }
+
+ rte_service_component_runstate_set(rx_adapter->service_id,
+ rxa_sw_adapter_queue_count(rx_adapter));
+ }
+
+ return ret;
+}
+
+int
+rte_event_eth_rx_adapter_start(uint8_t id)
+{
+ return rxa_ctrl(id, 1);
+}
+
+int
+rte_event_eth_rx_adapter_stop(uint8_t id)
+{
+ return rxa_ctrl(id, 0);
+}
+
+int
+rte_event_eth_rx_adapter_stats_get(uint8_t id,
+ struct rte_event_eth_rx_adapter_stats *stats)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_event_eth_rx_adapter_stats dev_stats_sum = { 0 };
+ struct rte_event_eth_rx_adapter_stats dev_stats;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+ uint32_t i;
+ int ret;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if (rx_adapter == NULL || stats == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ memset(stats, 0, sizeof(*stats));
+ RTE_ETH_FOREACH_DEV(i) {
+ dev_info = &rx_adapter->eth_devices[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->eth_rx_adapter_stats_get == NULL)
+ continue;
+ ret = (*dev->dev_ops->eth_rx_adapter_stats_get)(dev,
+ &rte_eth_devices[i],
+ &dev_stats);
+ if (ret)
+ continue;
+ dev_stats_sum.rx_packets += dev_stats.rx_packets;
+ dev_stats_sum.rx_enq_count += dev_stats.rx_enq_count;
+ }
+
+ if (rx_adapter->service_inited)
+ *stats = rx_adapter->stats;
+
+ stats->rx_packets += dev_stats_sum.rx_packets;
+ stats->rx_enq_count += dev_stats_sum.rx_enq_count;
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_stats_reset(uint8_t id)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_eventdev *dev;
+ struct eth_device_info *dev_info;
+ uint32_t i;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev = &rte_eventdevs[rx_adapter->eventdev_id];
+ RTE_ETH_FOREACH_DEV(i) {
+ dev_info = &rx_adapter->eth_devices[i];
+ if (dev_info->internal_event_port == 0 ||
+ dev->dev_ops->eth_rx_adapter_stats_reset == NULL)
+ continue;
+ (*dev->dev_ops->eth_rx_adapter_stats_reset)(dev,
+ &rte_eth_devices[i]);
+ }
+
+ memset(&rx_adapter->stats, 0, sizeof(rx_adapter->stats));
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if (rx_adapter == NULL || service_id == NULL)
+ return -EINVAL;
+
+ if (rx_adapter->service_inited)
+ *service_id = rx_adapter->service_id;
+
+ return rx_adapter->service_inited ? 0 : -ESRCH;
+}
+
+int rte_event_eth_rx_adapter_cb_register(uint8_t id,
+ uint16_t eth_dev_id,
+ rte_event_eth_rx_adapter_cb_fn cb_fn,
+ void *cb_arg)
+{
+ struct rte_event_eth_rx_adapter *rx_adapter;
+ struct eth_device_info *dev_info;
+ uint32_t cap;
+ int ret;
+
+ RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL);
+
+ rx_adapter = rxa_id_to_adapter(id);
+ if (rx_adapter == NULL)
+ return -EINVAL;
+
+ dev_info = &rx_adapter->eth_devices[eth_dev_id];
+ if (dev_info->rx_queue == NULL)
+ return -EINVAL;
+
+ ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id,
+ eth_dev_id,
+ &cap);
+ if (ret) {
+ RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
+ "eth port %" PRIu16, id, eth_dev_id);
+ return ret;
+ }
+
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
+ RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %"
+ PRIu16, eth_dev_id);
+ return -EINVAL;
+ }
+
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ dev_info->cb_fn = cb_fn;
+ dev_info->cb_arg = cb_arg;
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_event_eth_rx_adapter.h b/src/spdk/dpdk/lib/librte_eventdev/rte_event_eth_rx_adapter.h
new file mode 100644
index 00000000..332ee216
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_event_eth_rx_adapter.h
@@ -0,0 +1,513 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_EVENT_ETH_RX_ADAPTER_
+#define _RTE_EVENT_ETH_RX_ADAPTER_
+
+/**
+ * @file
+ *
+ * RTE Event Ethernet Rx Adapter
+ *
+ * An eventdev-based packet processing application enqueues/dequeues mbufs
+ * to/from the event device. Packet flow from the ethernet device to the event
+ * device can be accomplished using either HW or SW mechanisms depending on the
+ * platform and the particular combination of ethernet and event devices. The
+ * event ethernet Rx adapter provides common APIs to configure the packet flow
+ * from the ethernet devices to event devices across both these transfer
+ * mechanisms.
+ *
+ * The adapter uses a EAL service core function for SW based packet transfer
+ * and uses the eventdev PMD functions to configure HW based packet transfer
+ * between the ethernet device and the event device. For SW based packet
+ * transfer, if the mbuf does not have a timestamp set, the adapter adds a
+ * timestamp to the mbuf using rte_get_tsc_cycles(), this provides a more
+ * accurate timestamp as compared to if the application were to set the time
+ * stamp since it avoids event device schedule latency.
+ *
+ * The ethernet Rx event adapter's functions are:
+ * - rte_event_eth_rx_adapter_create_ext()
+ * - rte_event_eth_rx_adapter_create()
+ * - rte_event_eth_rx_adapter_free()
+ * - rte_event_eth_rx_adapter_queue_add()
+ * - rte_event_eth_rx_adapter_queue_del()
+ * - rte_event_eth_rx_adapter_start()
+ * - rte_event_eth_rx_adapter_stop()
+ * - rte_event_eth_rx_adapter_stats_get()
+ * - rte_event_eth_rx_adapter_stats_reset()
+ *
+ * The application creates an ethernet to event adapter using
+ * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()
+ * functions.
+ * The adapter needs to know which ethernet rx queues to poll for mbufs as well
+ * as event device parameters such as the event queue identifier, event
+ * priority and scheduling type that the adapter should use when constructing
+ * events. The rte_event_eth_rx_adapter_queue_add() function is provided for
+ * this purpose.
+ * The servicing weight parameter in the rte_event_eth_rx_adapter_queue_conf
+ * is applicable when the Rx adapter uses a service core function and is
+ * intended to provide application control of the frequency of polling ethernet
+ * device receive queues, for example, the application may want to poll higher
+ * priority queues with a higher frequency but at the same time not starve
+ * lower priority queues completely. If this parameter is zero and the receive
+ * interrupt is enabled when configuring the device, the receive queue is
+ * interrupt driven; else, the queue is assigned a servicing weight of one.
+ *
+ * The application can start/stop the adapter using the
+ * rte_event_eth_rx_adapter_start() and the rte_event_eth_rx_adapter_stop()
+ * functions. If the adapter uses a rte_service function, then the application
+ * is also required to assign a core to the service function and control the
+ * service core using the rte_service APIs. The
+ * rte_event_eth_rx_adapter_service_id_get() function can be used to retrieve
+ * the service function ID of the adapter in this case.
+ *
+ * For SW based packet transfers, i.e., when the
+ * RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT is not set in the adapter's
+ * capabilities flags for a particular ethernet device, the service function
+ * temporarily enqueues mbufs to an event buffer before batch enqueueing these
+ * to the event device. If the buffer fills up, the service function stops
+ * dequeueing packets from the ethernet device. The application may want to
+ * monitor the buffer fill level and instruct the service function to
+ * selectively buffer packets. The application may also use some other
+ * criteria to decide which packets should enter the event device even when
+ * the event buffer fill level is low. The
+ * rte_event_eth_rx_adapter_cb_register() function allows the
+ * application to register a callback that selects which packets to enqueue
+ * to the event device.
+ *
+ * Note:
+ * 1) Devices created after an instance of rte_event_eth_rx_adapter_create
+ * should be added to a new instance of the rx adapter.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <rte_service.h>
+
+#include "rte_eventdev.h"
+
+#define RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE 32
+
+/* struct rte_event_eth_rx_adapter_queue_conf flags definitions */
+#define RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID 0x1
+/**< This flag indicates the flow identifier is valid
+ * @see rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Adapter configuration structure that the adapter configuration callback
+ * function is expected to fill out
+ * @see rte_event_eth_rx_adapter_conf_cb
+ */
+struct rte_event_eth_rx_adapter_conf {
+ uint8_t event_port_id;
+ /**< Event port identifier, the adapter enqueues mbuf events to this
+ * port.
+ */
+ uint32_t max_nb_rx;
+ /**< The adapter can return early if it has processed at least
+ * max_nb_rx mbufs. This isn't treated as a requirement; batching may
+ * cause the adapter to process more than max_nb_rx mbufs.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Function type used for adapter configuration callback. The callback is
+ * used to fill in members of the struct rte_event_eth_rx_adapter_conf, this
+ * callback is invoked when creating a SW service for packet transfer from
+ * ethdev queues to the event device. The SW service is created within the
+ * rte_event_eth_rx_adapter_queue_add() function if SW based packet transfers
+ * from ethdev queues to the event device are required.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @param [out] conf
+ * Structure that needs to be populated by this callback.
+ *
+ * @param arg
+ * Argument to the callback. This is the same as the conf_arg passed to the
+ * rte_event_eth_rx_adapter_create_ext().
+ */
+typedef int (*rte_event_eth_rx_adapter_conf_cb) (uint8_t id, uint8_t dev_id,
+ struct rte_event_eth_rx_adapter_conf *conf,
+ void *arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Rx queue configuration structure
+ */
+struct rte_event_eth_rx_adapter_queue_conf {
+ uint32_t rx_queue_flags;
+ /**< Flags for handling received packets
+ * @see RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID
+ */
+ uint16_t servicing_weight;
+ /**< Relative polling frequency of ethernet receive queue when the
+ * adapter uses a service core function for ethernet to event device
+ * transfers. If it is set to zero, the Rx queue is interrupt driven
+ * (unless rx queue interrupts are not enabled for the ethernet
+ * device).
+ */
+ struct rte_event ev;
+ /**<
+ * The values from the following event fields will be used when
+ * queuing mbuf events:
+ * - event_queue_id: Targeted event queue ID for received packets.
+ * - event_priority: Event priority of packets from this Rx queue in
+ * the event queue relative to other events.
+ * - sched_type: Scheduling type for packets from this Rx queue.
+ * - flow_id: If the RTE_ETH_RX_EVENT_ADAPTER_QUEUE_FLOW_ID_VALID bit
+ * is set in rx_queue_flags, this flow_id is used for all
+ * packets received from this queue. Otherwise the flow ID
+ * is set to the RSS hash of the src and dst IPv4/6
+ * addresses.
+ *
+ * The event adapter sets ev.event_type to RTE_EVENT_TYPE_ETHDEV in the
+ * enqueued event.
+ */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * A structure used to retrieve statistics for an eth rx adapter instance.
+ */
+struct rte_event_eth_rx_adapter_stats {
+ uint64_t rx_poll_count;
+ /**< Receive queue poll count */
+ uint64_t rx_packets;
+ /**< Received packet count */
+ uint64_t rx_enq_count;
+ /**< Eventdev enqueue count */
+ uint64_t rx_enq_retry;
+ /**< Eventdev enqueue retry count */
+ uint64_t rx_enq_start_ts;
+ /**< Rx enqueue start timestamp */
+ uint64_t rx_enq_block_cycles;
+ /**< Cycles for which the service is blocked by the event device,
+ * i.e, the service fails to enqueue to the event device.
+ */
+ uint64_t rx_enq_end_ts;
+ /**< Latest timestamp at which the service is unblocked
+ * by the event device. The start, end timestamps and
+ * block cycles can be used to compute the percentage of
+ * cycles the service is blocked by the event device.
+ */
+ uint64_t rx_intr_packets;
+ /**< Received packet count for interrupt mode Rx queues */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Callback function invoked by the SW adapter before it continues
+ * to process packets. The callback is passed the size of the enqueue
+ * buffer in the SW adapter and the occupancy of the buffer. The
+ * callback can use these values to decide which mbufs should be
+ * enqueued to the event device. If the return value of the callback
+ * is less than nb_mbuf then the SW adapter uses the return value to
+ * enqueue enq_mbuf[] to the event device.
+ *
+ * @param eth_dev_id
+ * Port identifier of the Ethernet device.
+ * @param queue_id
+ * Receive queue index.
+ * @param enqueue_buf_size
+ * Total enqueue buffer size.
+ * @param enqueue_buf_count
+ * mbuf count in enqueue buffer.
+ * @param mbuf
+ * mbuf array.
+ * @param nb_mbuf
+ * mbuf count.
+ * @param cb_arg
+ * Callback argument.
+ * @param[out] enq_mbuf
+ * The adapter enqueues enq_mbuf[] if the return value of the
+ * callback is less than nb_mbuf
+ * @return
+ * Returns the number of mbufs should be enqueued to eventdev
+ */
+typedef uint16_t (*rte_event_eth_rx_adapter_cb_fn)(uint16_t eth_dev_id,
+ uint16_t queue_id,
+ uint32_t enqueue_buf_size,
+ uint32_t enqueue_buf_count,
+ struct rte_mbuf **mbuf,
+ uint16_t nb_mbuf,
+ void *cb_arg,
+ struct rte_mbuf **enq_buf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new ethernet Rx event adapter with the specified identifier.
+ *
+ * @param id
+ * The identifier of the ethernet Rx event adapter.
+ *
+ * @param dev_id
+ * The identifier of the device to configure.
+ *
+ * @param conf_cb
+ * Callback function that fills in members of a
+ * struct rte_event_eth_rx_adapter_conf struct passed into
+ * it.
+ *
+ * @param conf_arg
+ * Argument that is passed to the conf_cb function.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_eth_rx_adapter_conf_cb conf_cb,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a new ethernet Rx event adapter with the specified identifier.
+ * This function uses an internal configuration function that creates an event
+ * port. This default function reconfigures the event device with an
+ * additional event port and setups up the event port using the port_config
+ * parameter passed into this function. In case the application needs more
+ * control in configuration of the service, it should use the
+ * rte_event_eth_rx_adapter_create_ext() version.
+ *
+ * @param id
+ * The identifier of the ethernet Rx event adapter.
+ *
+ * @param dev_id
+ * The identifier of the device to configure.
+ *
+ * @param port_config
+ * Argument of type *rte_event_port_conf* that is passed to the conf_cb
+ * function.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Free an event adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, If the adapter still has Rx queues
+ * added to it, the function returns -EBUSY.
+ */
+int rte_event_eth_rx_adapter_free(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Add receive queue to an event adapter. After a queue has been
+ * added to the event adapter, the result of the application calling
+ * rte_eth_rx_burst(eth_dev_id, rx_queue_id, ..) is undefined.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param eth_dev_id
+ * Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index.
+ * If rx_queue_id is -1, then all Rx queues configured for
+ * the device are added. If the ethdev Rx queues can only be
+ * connected to a single event queue then rx_queue_id is
+ * required to be -1.
+ * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
+ *
+ * @param conf
+ * Additional configuration structure of type *rte_event_eth_rx_adapter_conf*
+ *
+ * @return
+ * - 0: Success, Receive queue added correctly.
+ * - <0: Error code on failure.
+ * - (-EIO) device reconfiguration and restart error. The adapter reconfigures
+ * the event device with an additional port if it is required to use a service
+ * function for packet transfer from the ethernet device to the event device.
+ * If the device had been started before this call, this error code indicates
+ * an error in restart following an error in reconfiguration, i.e., a
+ * combination of the two error codes.
+ */
+int rte_event_eth_rx_adapter_queue_add(uint8_t id,
+ uint16_t eth_dev_id,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Delete receive queue from an event adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param eth_dev_id
+ * Port identifier of Ethernet device.
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index.
+ * If rx_queue_id is -1, then all Rx queues configured for
+ * the device are deleted. If the ethdev Rx queues can only be
+ * connected to a single event queue then rx_queue_id is
+ * required to be -1.
+ * @see RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ
+ *
+ * @return
+ * - 0: Success, Receive queue deleted correctly.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
+ int32_t rx_queue_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start ethernet Rx event adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, Adapter started correctly.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_start(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop ethernet Rx event adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, Adapter started correctly.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_stop(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an adapter
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] stats
+ * A pointer to structure used to retrieve statistics for an adapter.
+ *
+ * @return
+ * - 0: Success, retrieved successfully.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_stats_get(uint8_t id,
+ struct rte_event_eth_rx_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an adapter.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @return
+ * - 0: Success, statistics reset successfully.
+ * - <0: Error code on failure.
+ */
+int rte_event_eth_rx_adapter_stats_reset(uint8_t id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the service ID of an adapter. If the adapter doesn't use
+ * a rte_service function, this function returns -ESRCH.
+ *
+ * @param id
+ * Adapter identifier.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the adapter doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Register callback to process Rx packets, this is supported for
+ * SW based packet transfers.
+ * @see rte_event_eth_rx_cb_fn
+ *
+ * @param id
+ * Adapter identifier.
+ * @param eth_dev_id
+ * Port identifier of Ethernet device.
+ * @param cb_fn
+ * Callback function.
+ * @param cb_arg
+ * Callback arg.
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure.
+ */
+int __rte_experimental
+rte_event_eth_rx_adapter_cb_register(uint8_t id,
+ uint16_t eth_dev_id,
+ rte_event_eth_rx_adapter_cb_fn cb_fn,
+ void *cb_arg);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_EVENT_ETH_RX_ADAPTER_ */
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_event_ring.c b/src/spdk/dpdk/lib/librte_eventdev/rte_event_ring.c
new file mode 100644
index 00000000..16d02a95
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_event_ring.c
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <string.h>
+
+#include <rte_tailq.h>
+#include <rte_memzone.h>
+#include <rte_rwlock.h>
+#include <rte_eal_memconfig.h>
+#include "rte_event_ring.h"
+
+TAILQ_HEAD(rte_event_ring_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_event_ring_tailq = {
+ .name = RTE_TAILQ_EVENT_RING_NAME,
+};
+EAL_REGISTER_TAILQ(rte_event_ring_tailq)
+
+int
+rte_event_ring_init(struct rte_event_ring *r, const char *name,
+ unsigned int count, unsigned int flags)
+{
+ /* compilation-time checks */
+ RTE_BUILD_BUG_ON((sizeof(struct rte_event_ring) &
+ RTE_CACHE_LINE_MASK) != 0);
+
+ /* init the ring structure */
+ return rte_ring_init(&r->r, name, count, flags);
+}
+
+/* create the ring */
+struct rte_event_ring *
+rte_event_ring_create(const char *name, unsigned int count, int socket_id,
+ unsigned int flags)
+{
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ struct rte_event_ring *r;
+ struct rte_tailq_entry *te;
+ const struct rte_memzone *mz;
+ ssize_t ring_size;
+ int mz_flags = 0;
+ struct rte_event_ring_list *ring_list = NULL;
+ const unsigned int requested_count = count;
+ int ret;
+
+ ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+ rte_event_ring_list);
+
+ /* for an exact size ring, round up from count to a power of two */
+ if (flags & RING_F_EXACT_SZ)
+ count = rte_align32pow2(count + 1);
+ else if (!rte_is_power_of_2(count)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ ring_size = sizeof(*r) + (count * sizeof(struct rte_event));
+
+ ret = snprintf(mz_name, sizeof(mz_name), "%s%s",
+ RTE_RING_MZ_PREFIX, name);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ rte_errno = ENAMETOOLONG;
+ return NULL;
+ }
+
+ te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
+ if (te == NULL) {
+ RTE_LOG(ERR, RING, "Cannot reserve memory for tailq\n");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /*
+ * reserve a memory zone for this ring. If we can't get rte_config or
+ * we are secondary process, the memzone_reserve function will set
+ * rte_errno for us appropriately - hence no check in this this function
+ */
+ mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
+ if (mz != NULL) {
+ r = mz->addr;
+ /* Check return value in case rte_ring_init() fails on size */
+ int err = rte_event_ring_init(r, name, requested_count, flags);
+ if (err) {
+ RTE_LOG(ERR, RING, "Ring init failed\n");
+ if (rte_memzone_free(mz) != 0)
+ RTE_LOG(ERR, RING, "Cannot free memzone\n");
+ rte_free(te);
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return NULL;
+ }
+
+ te->data = (void *) r;
+ r->r.memzone = mz;
+
+ TAILQ_INSERT_TAIL(ring_list, te, next);
+ } else {
+ r = NULL;
+ RTE_LOG(ERR, RING, "Cannot reserve memory\n");
+ rte_free(te);
+ }
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ return r;
+}
+
+
+struct rte_event_ring *
+rte_event_ring_lookup(const char *name)
+{
+ struct rte_tailq_entry *te;
+ struct rte_event_ring *r = NULL;
+ struct rte_event_ring_list *ring_list;
+
+ ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+ rte_event_ring_list);
+
+ rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ TAILQ_FOREACH(te, ring_list, next) {
+ r = (struct rte_event_ring *) te->data;
+ if (strncmp(name, r->r.name, RTE_RING_NAMESIZE) == 0)
+ break;
+ }
+
+ rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ if (te == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ return r;
+}
+
+/* free the ring */
+void
+rte_event_ring_free(struct rte_event_ring *r)
+{
+ struct rte_event_ring_list *ring_list = NULL;
+ struct rte_tailq_entry *te;
+
+ if (r == NULL)
+ return;
+
+ /*
+ * Ring was not created with rte_event_ring_create,
+ * therefore, there is no memzone to free.
+ */
+ if (r->r.memzone == NULL) {
+ RTE_LOG(ERR, RING,
+ "Cannot free ring (not created with rte_event_ring_create()");
+ return;
+ }
+
+ if (rte_memzone_free(r->r.memzone) != 0) {
+ RTE_LOG(ERR, RING, "Cannot free memory\n");
+ return;
+ }
+
+ ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+ rte_event_ring_list);
+ rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+ /* find out tailq entry */
+ TAILQ_FOREACH(te, ring_list, next) {
+ if (te->data == (void *) r)
+ break;
+ }
+
+ if (te == NULL) {
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ return;
+ }
+
+ TAILQ_REMOVE(ring_list, te, next);
+
+ rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+ rte_free(te);
+}
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_event_ring.h b/src/spdk/dpdk/lib/librte_eventdev/rte_event_ring.h
new file mode 100644
index 00000000..827a3209
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_event_ring.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Intel Corporation
+ */
+
+/**
+ * @file
+ * RTE Event Ring
+ *
+ * This provides a ring implementation for passing rte_event structures
+ * from one core to another.
+ */
+
+#ifndef _RTE_EVENT_RING_
+#define _RTE_EVENT_RING_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include "rte_eventdev.h"
+
+#define RTE_TAILQ_EVENT_RING_NAME "RTE_EVENT_RING"
+
+/**
+ * Generic ring structure for passing rte_event objects from core to core.
+ *
+ * Based on the primitives given in the rte_ring library. Designed to be
+ * used inside software eventdev implementations and by applications
+ * directly as needed.
+ */
+struct rte_event_ring {
+ struct rte_ring r;
+};
+
+/**
+ * Returns the number of events in the ring
+ *
+ * @param r
+ * pointer to the event ring
+ * @return
+ * the number of events in the ring
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_count(const struct rte_event_ring *r)
+{
+ return rte_ring_count(&r->r);
+}
+
+/**
+ * Returns the amount of free space in the ring
+ *
+ * @param r
+ * pointer to the event ring
+ * @return
+ * the number of free slots in the ring, i.e. the number of events that
+ * can be successfully enqueued before dequeue must be called
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_free_count(const struct rte_event_ring *r)
+{
+ return rte_ring_free_count(&r->r);
+}
+
+/**
+ * Enqueue a set of events onto a ring
+ *
+ * Note: this API enqueues by copying the events themselves onto the ring,
+ * rather than just placing a pointer to each event onto the ring. This
+ * means that statically-allocated events can safely be enqueued by this
+ * API.
+ *
+ * @param r
+ * pointer to the event ring
+ * @param events
+ * pointer to an array of struct rte_event objects
+ * @param n
+ * number of events in the array to enqueue
+ * @param free_space
+ * if non-null, is updated to indicate the amount of free space in the
+ * ring once the enqueue has completed.
+ * @return
+ * the number of elements, n', enqueued to the ring, 0 <= n' <= n
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_enqueue_burst(struct rte_event_ring *r,
+ const struct rte_event *events,
+ unsigned int n, uint16_t *free_space)
+{
+ uint32_t prod_head, prod_next;
+ uint32_t free_entries;
+
+ n = __rte_ring_move_prod_head(&r->r, r->r.prod.single, n,
+ RTE_RING_QUEUE_VARIABLE,
+ &prod_head, &prod_next, &free_entries);
+ if (n == 0)
+ goto end;
+
+ ENQUEUE_PTRS(&r->r, &r[1], prod_head, events, n, struct rte_event);
+
+ update_tail(&r->r.prod, prod_head, prod_next, r->r.prod.single, 1);
+end:
+ if (free_space != NULL)
+ *free_space = free_entries - n;
+ return n;
+}
+
+/**
+ * Dequeue a set of events from a ring
+ *
+ * Note: this API does not work with pointers to events, rather it copies
+ * the events themselves to the destination ``events`` buffer.
+ *
+ * @param r
+ * pointer to the event ring
+ * @param events
+ * pointer to an array to hold the struct rte_event objects
+ * @param n
+ * number of events that can be held in the ``events`` array
+ * @param available
+ * if non-null, is updated to indicate the number of events remaining in
+ * the ring once the dequeue has completed
+ * @return
+ * the number of elements, n', dequeued from the ring, 0 <= n' <= n
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_dequeue_burst(struct rte_event_ring *r,
+ struct rte_event *events,
+ unsigned int n, uint16_t *available)
+{
+ uint32_t cons_head, cons_next;
+ uint32_t entries;
+
+ n = __rte_ring_move_cons_head(&r->r, r->r.cons.single, n,
+ RTE_RING_QUEUE_VARIABLE,
+ &cons_head, &cons_next, &entries);
+ if (n == 0)
+ goto end;
+
+ DEQUEUE_PTRS(&r->r, &r[1], cons_head, events, n, struct rte_event);
+
+ update_tail(&r->r.cons, cons_head, cons_next, r->r.cons.single, 0);
+
+end:
+ if (available != NULL)
+ *available = entries - n;
+ return n;
+}
+
+/*
+ * Initializes an already-allocated ring structure
+ *
+ * @param r
+ * pointer to the ring memory to be initialized
+ * @param name
+ * name to be given to the ring
+ * @param count
+ * the number of elements to be stored in the ring. If the flag
+ * ``RING_F_EXACT_SZ`` is not set, this must be a power of 2, and the actual
+ * usable space in the ring will be ``count - 1`` entries. If the flag
+ * ``RING_F_EXACT_SZ`` is set, the this can be any value up to the ring size
+ * limit - 1, and the usable space will be exactly that requested.
+ * @param flags
+ * An OR of the following:
+ * - RING_F_SP_ENQ: If this flag is set, the default behavior when
+ * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ * is "single-producer". Otherwise, it is "multi-producers".
+ * - RING_F_SC_DEQ: If this flag is set, the default behavior when
+ * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ * is "single-consumer". Otherwise, it is "multi-consumers".
+ * - RING_F_EXACT_SZ: If this flag is set, the ``count`` parameter is to
+ * be taken as the exact usable size of the ring, and as such does not
+ * need to be a power of 2. The underlying ring memory should be a
+ * power-of-2 size greater than the count value.
+ * @return
+ * 0 on success, or a negative value on error.
+ */
+int
+rte_event_ring_init(struct rte_event_ring *r, const char *name,
+ unsigned int count, unsigned int flags);
+
+/*
+ * Create an event ring structure
+ *
+ * This function allocates memory and initializes an event ring inside that
+ * memory.
+ *
+ * @param name
+ * name to be given to the ring
+ * @param count
+ * the number of elements to be stored in the ring. If the flag
+ * ``RING_F_EXACT_SZ`` is not set, this must be a power of 2, and the actual
+ * usable space in the ring will be ``count - 1`` entries. If the flag
+ * ``RING_F_EXACT_SZ`` is set, the this can be any value up to the ring size
+ * limit - 1, and the usable space will be exactly that requested.
+ * @param socket_id
+ * The *socket_id* argument is the socket identifier in case of
+ * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ * constraint for the reserved zone.
+ * @param flags
+ * An OR of the following:
+ * - RING_F_SP_ENQ: If this flag is set, the default behavior when
+ * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ * is "single-producer". Otherwise, it is "multi-producers".
+ * - RING_F_SC_DEQ: If this flag is set, the default behavior when
+ * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ * is "single-consumer". Otherwise, it is "multi-consumers".
+ * - RING_F_EXACT_SZ: If this flag is set, the ``count`` parameter is to
+ * be taken as the exact usable size of the ring, and as such does not
+ * need to be a power of 2. The underlying ring memory should be a
+ * power-of-2 size greater than the count value.
+ * @return
+ * On success, the pointer to the new allocated ring. NULL on error with
+ * rte_errno set appropriately. Possible errno values include:
+ * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ * - E_RTE_SECONDARY - function was called from a secondary process instance
+ * - EINVAL - count provided is not a power of 2
+ * - ENOSPC - the maximum number of memzones has already been allocated
+ * - EEXIST - a memzone with the same name already exists
+ * - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_event_ring *
+rte_event_ring_create(const char *name, unsigned int count, int socket_id,
+ unsigned int flags);
+
+/**
+ * Search for an event ring based on its name
+ *
+ * @param name
+ * The name of the ring.
+ * @return
+ * The pointer to the ring matching the name, or NULL if not found,
+ * with rte_errno set appropriately. Possible rte_errno values include:
+ * - ENOENT - required entry not available to return.
+ */
+struct rte_event_ring *
+rte_event_ring_lookup(const char *name);
+
+/**
+ * De-allocate all memory used by the ring.
+ *
+ * @param r
+ * Ring to free
+ */
+void
+rte_event_ring_free(struct rte_event_ring *r);
+
+/**
+ * Return the size of the event ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The size of the data store used by the ring.
+ * NOTE: this is not the same as the usable space in the ring. To query that
+ * use ``rte_ring_get_capacity()``.
+ */
+static inline unsigned int
+rte_event_ring_get_size(const struct rte_event_ring *r)
+{
+ return rte_ring_get_size(&r->r);
+}
+
+/**
+ * Return the number of elements which can be stored in the event ring.
+ *
+ * @param r
+ * A pointer to the ring structure.
+ * @return
+ * The usable size of the ring.
+ */
+static inline unsigned int
+rte_event_ring_get_capacity(const struct rte_event_ring *r)
+{
+ return rte_ring_get_capacity(&r->r);
+}
+#endif
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c b/src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c
new file mode 100644
index 00000000..79070d48
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter.c
@@ -0,0 +1,1299 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#include <string.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_memzone.h>
+#include <rte_memory.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_common.h>
+#include <rte_timer.h>
+#include <rte_service_component.h>
+#include <rte_cycles.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+#include "rte_event_timer_adapter.h"
+#include "rte_event_timer_adapter_pmd.h"
+
+#define DATA_MZ_NAME_MAX_LEN 64
+#define DATA_MZ_NAME_FORMAT "rte_event_timer_adapter_data_%d"
+
+static int evtim_logtype;
+static int evtim_svc_logtype;
+static int evtim_buffer_logtype;
+
+static struct rte_event_timer_adapter adapters[RTE_EVENT_TIMER_ADAPTER_NUM_MAX];
+
+static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops;
+
+#define EVTIM_LOG(level, logtype, ...) \
+ rte_log(RTE_LOG_ ## level, logtype, \
+ RTE_FMT("EVTIMER: %s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) \
+ "\n", __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#define EVTIM_LOG_ERR(...) EVTIM_LOG(ERR, evtim_logtype, __VA_ARGS__)
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+#define EVTIM_LOG_DBG(...) \
+ EVTIM_LOG(DEBUG, evtim_logtype, __VA_ARGS__)
+#define EVTIM_BUF_LOG_DBG(...) \
+ EVTIM_LOG(DEBUG, evtim_buffer_logtype, __VA_ARGS__)
+#define EVTIM_SVC_LOG_DBG(...) \
+ EVTIM_LOG(DEBUG, evtim_svc_logtype, __VA_ARGS__)
+#else
+#define EVTIM_LOG_DBG(...) (void)0
+#define EVTIM_BUF_LOG_DBG(...) (void)0
+#define EVTIM_SVC_LOG_DBG(...) (void)0
+#endif
+
+static int
+default_port_conf_cb(uint16_t id, uint8_t event_dev_id, uint8_t *event_port_id,
+ void *conf_arg)
+{
+ struct rte_event_timer_adapter *adapter;
+ struct rte_eventdev *dev;
+ struct rte_event_dev_config dev_conf;
+ struct rte_event_port_conf *port_conf, def_port_conf = {0};
+ int started;
+ uint8_t port_id;
+ uint8_t dev_id;
+ int ret;
+
+ RTE_SET_USED(event_dev_id);
+
+ adapter = &adapters[id];
+ dev = &rte_eventdevs[adapter->data->event_dev_id];
+ dev_id = dev->data->dev_id;
+ dev_conf = dev->data->dev_conf;
+
+ started = dev->data->dev_started;
+ if (started)
+ rte_event_dev_stop(dev_id);
+
+ port_id = dev_conf.nb_event_ports;
+ dev_conf.nb_event_ports += 1;
+ ret = rte_event_dev_configure(dev_id, &dev_conf);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to configure event dev %u\n", dev_id);
+ if (started)
+ if (rte_event_dev_start(dev_id))
+ return -EIO;
+
+ return ret;
+ }
+
+ if (conf_arg != NULL)
+ port_conf = conf_arg;
+ else {
+ port_conf = &def_port_conf;
+ ret = rte_event_port_default_conf_get(dev_id, port_id,
+ port_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = rte_event_port_setup(dev_id, port_id, port_conf);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to setup event port %u on event dev %u\n",
+ port_id, dev_id);
+ return ret;
+ }
+
+ *event_port_id = port_id;
+
+ if (started)
+ ret = rte_event_dev_start(dev_id);
+
+ return ret;
+}
+
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf)
+{
+ return rte_event_timer_adapter_create_ext(conf, default_port_conf_cb,
+ NULL);
+}
+
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create_ext(
+ const struct rte_event_timer_adapter_conf *conf,
+ rte_event_timer_adapter_port_conf_cb_t conf_cb,
+ void *conf_arg)
+{
+ uint16_t adapter_id;
+ struct rte_event_timer_adapter *adapter;
+ const struct rte_memzone *mz;
+ char mz_name[DATA_MZ_NAME_MAX_LEN];
+ int n, ret;
+ struct rte_eventdev *dev;
+
+ if (conf == NULL) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* Check eventdev ID */
+ if (!rte_event_pmd_is_valid_dev(conf->event_dev_id)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ dev = &rte_eventdevs[conf->event_dev_id];
+
+ adapter_id = conf->timer_adapter_id;
+
+ /* Check that adapter_id is in range */
+ if (adapter_id >= RTE_EVENT_TIMER_ADAPTER_NUM_MAX) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* Check adapter ID not already allocated */
+ adapter = &adapters[adapter_id];
+ if (adapter->allocated) {
+ rte_errno = EEXIST;
+ return NULL;
+ }
+
+ /* Create shared data area. */
+ n = snprintf(mz_name, sizeof(mz_name), DATA_MZ_NAME_FORMAT, adapter_id);
+ if (n >= (int)sizeof(mz_name)) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+ mz = rte_memzone_reserve(mz_name,
+ sizeof(struct rte_event_timer_adapter_data),
+ conf->socket_id, 0);
+ if (mz == NULL)
+ /* rte_errno set by rte_memzone_reserve */
+ return NULL;
+
+ adapter->data = mz->addr;
+ memset(adapter->data, 0, sizeof(struct rte_event_timer_adapter_data));
+
+ adapter->data->mz = mz;
+ adapter->data->event_dev_id = conf->event_dev_id;
+ adapter->data->id = adapter_id;
+ adapter->data->socket_id = conf->socket_id;
+ adapter->data->conf = *conf; /* copy conf structure */
+
+ /* Query eventdev PMD for timer adapter capabilities and ops */
+ ret = dev->dev_ops->timer_adapter_caps_get(dev,
+ adapter->data->conf.flags,
+ &adapter->data->caps,
+ &adapter->ops);
+ if (ret < 0) {
+ rte_errno = ret;
+ goto free_memzone;
+ }
+
+ if (!(adapter->data->caps &
+ RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
+ FUNC_PTR_OR_NULL_RET_WITH_ERRNO(conf_cb, -EINVAL);
+ ret = conf_cb(adapter->data->id, adapter->data->event_dev_id,
+ &adapter->data->event_port_id, conf_arg);
+ if (ret < 0) {
+ rte_errno = ret;
+ goto free_memzone;
+ }
+ }
+
+ /* If eventdev PMD did not provide ops, use default software
+ * implementation.
+ */
+ if (adapter->ops == NULL)
+ adapter->ops = &sw_event_adapter_timer_ops;
+
+ /* Allow driver to do some setup */
+ FUNC_PTR_OR_NULL_RET_WITH_ERRNO(adapter->ops->init, -ENOTSUP);
+ ret = adapter->ops->init(adapter);
+ if (ret < 0) {
+ rte_errno = ret;
+ goto free_memzone;
+ }
+
+ /* Set fast-path function pointers */
+ adapter->arm_burst = adapter->ops->arm_burst;
+ adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
+ adapter->cancel_burst = adapter->ops->cancel_burst;
+
+ adapter->allocated = 1;
+
+ return adapter;
+
+free_memzone:
+ rte_memzone_free(adapter->data->mz);
+ return NULL;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+
+ if (adapter->ops->get_info)
+ /* let driver set values it knows */
+ adapter->ops->get_info(adapter, adapter_info);
+
+ /* Set common values */
+ adapter_info->conf = adapter->data->conf;
+ adapter_info->event_dev_port_id = adapter->data->event_port_id;
+ adapter_info->caps = adapter->data->caps;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->start, -EINVAL);
+
+ ret = adapter->ops->start(adapter);
+ if (ret < 0)
+ return ret;
+
+ adapter->data->started = 1;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->stop, -EINVAL);
+
+ if (adapter->data->started == 0) {
+ EVTIM_LOG_ERR("event timer adapter %"PRIu8" already stopped",
+ adapter->data->id);
+ return 0;
+ }
+
+ ret = adapter->ops->stop(adapter);
+ if (ret < 0)
+ return ret;
+
+ adapter->data->started = 0;
+
+ return 0;
+}
+
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_lookup(uint16_t adapter_id)
+{
+ char name[DATA_MZ_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+ struct rte_event_timer_adapter_data *data;
+ struct rte_event_timer_adapter *adapter;
+ int ret;
+ struct rte_eventdev *dev;
+
+ if (adapters[adapter_id].allocated)
+ return &adapters[adapter_id]; /* Adapter is already loaded */
+
+ snprintf(name, DATA_MZ_NAME_MAX_LEN, DATA_MZ_NAME_FORMAT, adapter_id);
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ rte_errno = ENOENT;
+ return NULL;
+ }
+
+ data = mz->addr;
+
+ adapter = &adapters[data->id];
+ adapter->data = data;
+
+ dev = &rte_eventdevs[adapter->data->event_dev_id];
+
+ /* Query eventdev PMD for timer adapter capabilities and ops */
+ ret = dev->dev_ops->timer_adapter_caps_get(dev,
+ adapter->data->conf.flags,
+ &adapter->data->caps,
+ &adapter->ops);
+ if (ret < 0) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ /* If eventdev PMD did not provide ops, use default software
+ * implementation.
+ */
+ if (adapter->ops == NULL)
+ adapter->ops = &sw_event_adapter_timer_ops;
+
+ /* Set fast-path function pointers */
+ adapter->arm_burst = adapter->ops->arm_burst;
+ adapter->arm_tmo_tick_burst = adapter->ops->arm_tmo_tick_burst;
+ adapter->cancel_burst = adapter->ops->cancel_burst;
+
+ adapter->allocated = 1;
+
+ return adapter;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->uninit, -EINVAL);
+
+ if (adapter->data->started == 1) {
+ EVTIM_LOG_ERR("event timer adapter %"PRIu8" must be stopped "
+ "before freeing", adapter->data->id);
+ return -EBUSY;
+ }
+
+ /* free impl priv data */
+ ret = adapter->ops->uninit(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* free shared data area */
+ ret = rte_memzone_free(adapter->data->mz);
+ if (ret < 0)
+ return ret;
+
+ adapter->data = NULL;
+ adapter->allocated = 0;
+
+ return 0;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
+ uint32_t *service_id)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+
+ if (adapter->data->service_inited && service_id != NULL)
+ *service_id = adapter->data->service_id;
+
+ return adapter->data->service_inited ? 0 : -ESRCH;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->stats_get, -EINVAL);
+ if (stats == NULL)
+ return -EINVAL;
+
+ return adapter->ops->stats_get(adapter, stats);
+}
+
+int __rte_experimental
+rte_event_timer_adapter_stats_reset(struct rte_event_timer_adapter *adapter)
+{
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->ops->stats_reset, -EINVAL);
+ return adapter->ops->stats_reset(adapter);
+}
+
+/*
+ * Software event timer adapter buffer helper functions
+ */
+
+#define NSECPERSEC 1E9
+
+/* Optimizations used to index into the buffer require that the buffer size
+ * be a power of 2.
+ */
+#define EVENT_BUFFER_SZ 4096
+#define EVENT_BUFFER_BATCHSZ 32
+#define EVENT_BUFFER_MASK (EVENT_BUFFER_SZ - 1)
+
+struct event_buffer {
+ uint16_t head;
+ uint16_t tail;
+ struct rte_event events[EVENT_BUFFER_SZ];
+} __rte_cache_aligned;
+
+static inline bool
+event_buffer_full(struct event_buffer *bufp)
+{
+ return (bufp->head - bufp->tail) == EVENT_BUFFER_SZ;
+}
+
+static inline bool
+event_buffer_batch_ready(struct event_buffer *bufp)
+{
+ return (bufp->head - bufp->tail) >= EVENT_BUFFER_BATCHSZ;
+}
+
+static void
+event_buffer_init(struct event_buffer *bufp)
+{
+ bufp->head = bufp->tail = 0;
+ memset(&bufp->events, 0, sizeof(struct rte_event) * EVENT_BUFFER_SZ);
+}
+
+static int
+event_buffer_add(struct event_buffer *bufp, struct rte_event *eventp)
+{
+ uint16_t head_idx;
+ struct rte_event *buf_eventp;
+
+ if (event_buffer_full(bufp))
+ return -1;
+
+ /* Instead of modulus, bitwise AND with mask to get head_idx. */
+ head_idx = bufp->head & EVENT_BUFFER_MASK;
+ buf_eventp = &bufp->events[head_idx];
+ rte_memcpy(buf_eventp, eventp, sizeof(struct rte_event));
+
+ /* Wrap automatically when overflow occurs. */
+ bufp->head++;
+
+ return 0;
+}
+
+static void
+event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id,
+ uint16_t *nb_events_flushed,
+ uint16_t *nb_events_inv)
+{
+ uint16_t head_idx, tail_idx, n = 0;
+ struct rte_event *events = bufp->events;
+
+ /* Instead of modulus, bitwise AND with mask to get index. */
+ head_idx = bufp->head & EVENT_BUFFER_MASK;
+ tail_idx = bufp->tail & EVENT_BUFFER_MASK;
+
+ /* Determine the largest contigous run we can attempt to enqueue to the
+ * event device.
+ */
+ if (head_idx > tail_idx)
+ n = head_idx - tail_idx;
+ else if (head_idx < tail_idx)
+ n = EVENT_BUFFER_SZ - tail_idx;
+ else {
+ *nb_events_flushed = 0;
+ return;
+ }
+
+ *nb_events_inv = 0;
+ *nb_events_flushed = rte_event_enqueue_burst(dev_id, port_id,
+ &events[tail_idx], n);
+ if (*nb_events_flushed != n && rte_errno == -EINVAL) {
+ EVTIM_LOG_ERR("failed to enqueue invalid event - dropping it");
+ (*nb_events_inv)++;
+ }
+
+ bufp->tail = bufp->tail + *nb_events_flushed + *nb_events_inv;
+}
+
+/*
+ * Software event timer adapter implementation
+ */
+
+struct rte_event_timer_adapter_sw_data {
+ /* List of messages for outstanding timers */
+ TAILQ_HEAD(, msg) msgs_tailq_head;
+ /* Lock to guard tailq and armed count */
+ rte_spinlock_t msgs_tailq_sl;
+ /* Identifier of service executing timer management logic. */
+ uint32_t service_id;
+ /* The cycle count at which the adapter should next tick */
+ uint64_t next_tick_cycles;
+ /* Incremented as the service moves through phases of an iteration */
+ volatile int service_phase;
+ /* The tick resolution used by adapter instance. May have been
+ * adjusted from what user requested
+ */
+ uint64_t timer_tick_ns;
+ /* Maximum timeout in nanoseconds allowed by adapter instance. */
+ uint64_t max_tmo_ns;
+ /* Ring containing messages to arm or cancel event timers */
+ struct rte_ring *msg_ring;
+ /* Mempool containing msg objects */
+ struct rte_mempool *msg_pool;
+ /* Buffered timer expiry events to be enqueued to an event device. */
+ struct event_buffer buffer;
+ /* Statistics */
+ struct rte_event_timer_adapter_stats stats;
+ /* The number of threads currently adding to the message ring */
+ rte_atomic16_t message_producer_count;
+};
+
+enum msg_type {MSG_TYPE_ARM, MSG_TYPE_CANCEL};
+
+struct msg {
+ enum msg_type type;
+ struct rte_event_timer *evtim;
+ struct rte_timer tim;
+ TAILQ_ENTRY(msg) msgs;
+};
+
+static void
+sw_event_timer_cb(struct rte_timer *tim, void *arg)
+{
+ int ret;
+ uint16_t nb_evs_flushed = 0;
+ uint16_t nb_evs_invalid = 0;
+ uint64_t opaque;
+ struct rte_event_timer *evtim;
+ struct rte_event_timer_adapter *adapter;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ evtim = arg;
+ opaque = evtim->impl_opaque[1];
+ adapter = (struct rte_event_timer_adapter *)(uintptr_t)opaque;
+ sw_data = adapter->data->adapter_priv;
+
+ ret = event_buffer_add(&sw_data->buffer, &evtim->ev);
+ if (ret < 0) {
+ /* If event buffer is full, put timer back in list with
+ * immediate expiry value, so that we process it again on the
+ * next iteration.
+ */
+ rte_timer_reset_sync(tim, 0, SINGLE, rte_lcore_id(),
+ sw_event_timer_cb, evtim);
+
+ sw_data->stats.evtim_retry_count++;
+ EVTIM_LOG_DBG("event buffer full, resetting rte_timer with "
+ "immediate expiry value");
+ } else {
+ struct msg *m = container_of(tim, struct msg, tim);
+ TAILQ_REMOVE(&sw_data->msgs_tailq_head, m, msgs);
+ EVTIM_BUF_LOG_DBG("buffered an event timer expiry event");
+ evtim->state = RTE_EVENT_TIMER_NOT_ARMED;
+
+ /* Free the msg object containing the rte_timer now that
+ * we've buffered its event successfully.
+ */
+ rte_mempool_put(sw_data->msg_pool, m);
+
+ /* Bump the count when we successfully add an expiry event to
+ * the buffer.
+ */
+ sw_data->stats.evtim_exp_count++;
+ }
+
+ if (event_buffer_batch_ready(&sw_data->buffer)) {
+ event_buffer_flush(&sw_data->buffer,
+ adapter->data->event_dev_id,
+ adapter->data->event_port_id,
+ &nb_evs_flushed,
+ &nb_evs_invalid);
+
+ sw_data->stats.ev_enq_count += nb_evs_flushed;
+ sw_data->stats.ev_inv_count += nb_evs_invalid;
+ }
+}
+
+static __rte_always_inline uint64_t
+get_timeout_cycles(struct rte_event_timer *evtim,
+ struct rte_event_timer_adapter *adapter)
+{
+ uint64_t timeout_ns;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+ timeout_ns = evtim->timeout_ticks * sw_data->timer_tick_ns;
+ return timeout_ns * rte_get_timer_hz() / NSECPERSEC;
+
+}
+
+/* This function returns true if one or more (adapter) ticks have occurred since
+ * the last time it was called.
+ */
+static inline bool
+adapter_did_tick(struct rte_event_timer_adapter *adapter)
+{
+ uint64_t cycles_per_adapter_tick, start_cycles;
+ uint64_t *next_tick_cyclesp;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+ next_tick_cyclesp = &sw_data->next_tick_cycles;
+
+ cycles_per_adapter_tick = sw_data->timer_tick_ns *
+ (rte_get_timer_hz() / NSECPERSEC);
+
+ start_cycles = rte_get_timer_cycles();
+
+ /* Note: initially, *next_tick_cyclesp == 0, so the clause below will
+ * execute, and set things going.
+ */
+
+ if (start_cycles >= *next_tick_cyclesp) {
+ /* Snap the current cycle count to the preceding adapter tick
+ * boundary.
+ */
+ start_cycles -= start_cycles % cycles_per_adapter_tick;
+
+ *next_tick_cyclesp = start_cycles + cycles_per_adapter_tick;
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Check that event timer timeout value is in range */
+static __rte_always_inline int
+check_timeout(struct rte_event_timer *evtim,
+ const struct rte_event_timer_adapter *adapter)
+{
+ uint64_t tmo_nsec;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+ tmo_nsec = evtim->timeout_ticks * sw_data->timer_tick_ns;
+
+ if (tmo_nsec > sw_data->max_tmo_ns)
+ return -1;
+
+ if (tmo_nsec < sw_data->timer_tick_ns)
+ return -2;
+
+ return 0;
+}
+
+/* Check that event timer event queue sched type matches destination event queue
+ * sched type
+ */
+static __rte_always_inline int
+check_destination_event_queue(struct rte_event_timer *evtim,
+ const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ uint32_t sched_type;
+
+ ret = rte_event_queue_attr_get(adapter->data->event_dev_id,
+ evtim->ev.queue_id,
+ RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE,
+ &sched_type);
+
+ if ((ret < 0 && ret != -EOVERFLOW) ||
+ evtim->ev.sched_type != sched_type)
+ return -1;
+
+ return 0;
+}
+
+#define NB_OBJS 32
+static int
+sw_event_timer_adapter_service_func(void *arg)
+{
+ int i, num_msgs;
+ uint64_t cycles, opaque;
+ uint16_t nb_evs_flushed = 0;
+ uint16_t nb_evs_invalid = 0;
+ struct rte_event_timer_adapter *adapter;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ struct rte_event_timer *evtim = NULL;
+ struct rte_timer *tim = NULL;
+ struct msg *msg, *msgs[NB_OBJS];
+
+ adapter = arg;
+ sw_data = adapter->data->adapter_priv;
+
+ sw_data->service_phase = 1;
+ rte_smp_wmb();
+
+ while (rte_atomic16_read(&sw_data->message_producer_count) > 0 ||
+ !rte_ring_empty(sw_data->msg_ring)) {
+
+ num_msgs = rte_ring_dequeue_burst(sw_data->msg_ring,
+ (void **)msgs, NB_OBJS, NULL);
+
+ for (i = 0; i < num_msgs; i++) {
+ int ret = 0;
+
+ RTE_SET_USED(ret);
+
+ msg = msgs[i];
+ evtim = msg->evtim;
+
+ switch (msg->type) {
+ case MSG_TYPE_ARM:
+ EVTIM_SVC_LOG_DBG("dequeued ARM message from "
+ "ring");
+ tim = &msg->tim;
+ rte_timer_init(tim);
+ cycles = get_timeout_cycles(evtim,
+ adapter);
+ ret = rte_timer_reset(tim, cycles, SINGLE,
+ rte_lcore_id(),
+ sw_event_timer_cb,
+ evtim);
+ RTE_ASSERT(ret == 0);
+
+ evtim->impl_opaque[0] = (uintptr_t)tim;
+ evtim->impl_opaque[1] = (uintptr_t)adapter;
+
+ TAILQ_INSERT_TAIL(&sw_data->msgs_tailq_head,
+ msg,
+ msgs);
+ break;
+ case MSG_TYPE_CANCEL:
+ EVTIM_SVC_LOG_DBG("dequeued CANCEL message "
+ "from ring");
+ opaque = evtim->impl_opaque[0];
+ tim = (struct rte_timer *)(uintptr_t)opaque;
+ RTE_ASSERT(tim != NULL);
+
+ ret = rte_timer_stop(tim);
+ RTE_ASSERT(ret == 0);
+
+ /* Free the msg object for the original arm
+ * request.
+ */
+ struct msg *m;
+ m = container_of(tim, struct msg, tim);
+ TAILQ_REMOVE(&sw_data->msgs_tailq_head, m,
+ msgs);
+ rte_mempool_put(sw_data->msg_pool, m);
+
+ /* Free the msg object for the current msg */
+ rte_mempool_put(sw_data->msg_pool, msg);
+
+ evtim->impl_opaque[0] = 0;
+ evtim->impl_opaque[1] = 0;
+
+ break;
+ }
+ }
+ }
+
+ sw_data->service_phase = 2;
+ rte_smp_wmb();
+
+ if (adapter_did_tick(adapter)) {
+ rte_timer_manage();
+
+ event_buffer_flush(&sw_data->buffer,
+ adapter->data->event_dev_id,
+ adapter->data->event_port_id,
+ &nb_evs_flushed, &nb_evs_invalid);
+
+ sw_data->stats.ev_enq_count += nb_evs_flushed;
+ sw_data->stats.ev_inv_count += nb_evs_invalid;
+ sw_data->stats.adapter_tick_count++;
+ }
+
+ sw_data->service_phase = 0;
+ rte_smp_wmb();
+
+ return 0;
+}
+
+/* The adapter initialization function rounds the mempool size up to the next
+ * power of 2, so we can take the difference between that value and what the
+ * user requested, and use the space for caches. This avoids a scenario where a
+ * user can't arm the number of timers the adapter was configured with because
+ * mempool objects have been lost to caches.
+ *
+ * nb_actual should always be a power of 2, so we can iterate over the powers
+ * of 2 to see what the largest cache size we can use is.
+ */
+static int
+compute_msg_mempool_cache_size(uint64_t nb_requested, uint64_t nb_actual)
+{
+ int i;
+ int size;
+ int cache_size = 0;
+
+ for (i = 0; ; i++) {
+ size = 1 << i;
+
+ if (RTE_MAX_LCORE * size < (int)(nb_actual - nb_requested) &&
+ size < RTE_MEMPOOL_CACHE_MAX_SIZE &&
+ size <= nb_actual / 1.5)
+ cache_size = size;
+ else
+ break;
+ }
+
+ return cache_size;
+}
+
+#define SW_MIN_INTERVAL 1E5
+
+static int
+sw_event_timer_adapter_init(struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ uint64_t nb_timers;
+ unsigned int flags;
+ struct rte_service_spec service;
+ static bool timer_subsystem_inited; // static initialized to false
+
+ /* Allocate storage for SW implementation data */
+ char priv_data_name[RTE_RING_NAMESIZE];
+ snprintf(priv_data_name, RTE_RING_NAMESIZE, "sw_evtim_adap_priv_%"PRIu8,
+ adapter->data->id);
+ adapter->data->adapter_priv = rte_zmalloc_socket(
+ priv_data_name,
+ sizeof(struct rte_event_timer_adapter_sw_data),
+ RTE_CACHE_LINE_SIZE,
+ adapter->data->socket_id);
+ if (adapter->data->adapter_priv == NULL) {
+ EVTIM_LOG_ERR("failed to allocate space for private data");
+ rte_errno = ENOMEM;
+ return -1;
+ }
+
+ if (adapter->data->conf.timer_tick_ns < SW_MIN_INTERVAL) {
+ EVTIM_LOG_ERR("failed to create adapter with requested tick "
+ "interval");
+ rte_errno = EINVAL;
+ return -1;
+ }
+
+ sw_data = adapter->data->adapter_priv;
+
+ sw_data->timer_tick_ns = adapter->data->conf.timer_tick_ns;
+ sw_data->max_tmo_ns = adapter->data->conf.max_tmo_ns;
+
+ TAILQ_INIT(&sw_data->msgs_tailq_head);
+ rte_spinlock_init(&sw_data->msgs_tailq_sl);
+ rte_atomic16_init(&sw_data->message_producer_count);
+
+ /* Rings require power of 2, so round up to next such value */
+ nb_timers = rte_align64pow2(adapter->data->conf.nb_timers);
+
+ char msg_ring_name[RTE_RING_NAMESIZE];
+ snprintf(msg_ring_name, RTE_RING_NAMESIZE,
+ "sw_evtim_adap_msg_ring_%"PRIu8, adapter->data->id);
+ flags = adapter->data->conf.flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT ?
+ RING_F_SP_ENQ | RING_F_SC_DEQ :
+ RING_F_SC_DEQ;
+ sw_data->msg_ring = rte_ring_create(msg_ring_name, nb_timers,
+ adapter->data->socket_id, flags);
+ if (sw_data->msg_ring == NULL) {
+ EVTIM_LOG_ERR("failed to create message ring");
+ rte_errno = ENOMEM;
+ goto free_priv_data;
+ }
+
+ char pool_name[RTE_RING_NAMESIZE];
+ snprintf(pool_name, RTE_RING_NAMESIZE, "sw_evtim_adap_msg_pool_%"PRIu8,
+ adapter->data->id);
+
+ /* Both the arming/canceling thread and the service thread will do puts
+ * to the mempool, but if the SP_PUT flag is enabled, we can specify
+ * single-consumer get for the mempool.
+ */
+ flags = adapter->data->conf.flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT ?
+ MEMPOOL_F_SC_GET : 0;
+
+ /* The usable size of a ring is count - 1, so subtract one here to
+ * make the counts agree.
+ */
+ int pool_size = nb_timers - 1;
+ int cache_size = compute_msg_mempool_cache_size(
+ adapter->data->conf.nb_timers, nb_timers);
+ sw_data->msg_pool = rte_mempool_create(pool_name, pool_size,
+ sizeof(struct msg), cache_size,
+ 0, NULL, NULL, NULL, NULL,
+ adapter->data->socket_id, flags);
+ if (sw_data->msg_pool == NULL) {
+ EVTIM_LOG_ERR("failed to create message object mempool");
+ rte_errno = ENOMEM;
+ goto free_msg_ring;
+ }
+
+ event_buffer_init(&sw_data->buffer);
+
+ /* Register a service component to run adapter logic */
+ memset(&service, 0, sizeof(service));
+ snprintf(service.name, RTE_SERVICE_NAME_MAX,
+ "sw_evimer_adap_svc_%"PRIu8, adapter->data->id);
+ service.socket_id = adapter->data->socket_id;
+ service.callback = sw_event_timer_adapter_service_func;
+ service.callback_userdata = adapter;
+ service.capabilities &= ~(RTE_SERVICE_CAP_MT_SAFE);
+ ret = rte_service_component_register(&service, &sw_data->service_id);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to register service %s with id %"PRIu32
+ ": err = %d", service.name, sw_data->service_id,
+ ret);
+
+ rte_errno = ENOSPC;
+ goto free_msg_pool;
+ }
+
+ EVTIM_LOG_DBG("registered service %s with id %"PRIu32, service.name,
+ sw_data->service_id);
+
+ adapter->data->service_id = sw_data->service_id;
+ adapter->data->service_inited = 1;
+
+ if (!timer_subsystem_inited) {
+ rte_timer_subsystem_init();
+ timer_subsystem_inited = true;
+ }
+
+ return 0;
+
+free_msg_pool:
+ rte_mempool_free(sw_data->msg_pool);
+free_msg_ring:
+ rte_ring_free(sw_data->msg_ring);
+free_priv_data:
+ rte_free(sw_data);
+ return -1;
+}
+
+static int
+sw_event_timer_adapter_uninit(struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ struct msg *m1, *m2;
+ struct rte_event_timer_adapter_sw_data *sw_data =
+ adapter->data->adapter_priv;
+
+ rte_spinlock_lock(&sw_data->msgs_tailq_sl);
+
+ /* Cancel outstanding rte_timers and free msg objects */
+ m1 = TAILQ_FIRST(&sw_data->msgs_tailq_head);
+ while (m1 != NULL) {
+ EVTIM_LOG_DBG("freeing outstanding timer");
+ m2 = TAILQ_NEXT(m1, msgs);
+
+ rte_timer_stop_sync(&m1->tim);
+ rte_mempool_put(sw_data->msg_pool, m1);
+
+ m1 = m2;
+ }
+
+ rte_spinlock_unlock(&sw_data->msgs_tailq_sl);
+
+ ret = rte_service_component_unregister(sw_data->service_id);
+ if (ret < 0) {
+ EVTIM_LOG_ERR("failed to unregister service component");
+ return ret;
+ }
+
+ rte_ring_free(sw_data->msg_ring);
+ rte_mempool_free(sw_data->msg_pool);
+ rte_free(adapter->data->adapter_priv);
+
+ return 0;
+}
+
+static inline int32_t
+get_mapped_count_for_service(uint32_t service_id)
+{
+ int32_t core_count, i, mapped_count = 0;
+ uint32_t lcore_arr[RTE_MAX_LCORE];
+
+ core_count = rte_service_lcore_list(lcore_arr, RTE_MAX_LCORE);
+
+ for (i = 0; i < core_count; i++)
+ if (rte_service_map_lcore_get(service_id, lcore_arr[i]) == 1)
+ mapped_count++;
+
+ return mapped_count;
+}
+
+static int
+sw_event_timer_adapter_start(const struct rte_event_timer_adapter *adapter)
+{
+ int mapped_count;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+
+ sw_data = adapter->data->adapter_priv;
+
+ /* Mapping the service to more than one service core can introduce
+ * delays while one thread is waiting to acquire a lock, so only allow
+ * one core to be mapped to the service.
+ */
+ mapped_count = get_mapped_count_for_service(sw_data->service_id);
+
+ if (mapped_count == 1)
+ return rte_service_component_runstate_set(sw_data->service_id,
+ 1);
+
+ return mapped_count < 1 ? -ENOENT : -ENOTSUP;
+}
+
+static int
+sw_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter)
+{
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data =
+ adapter->data->adapter_priv;
+
+ ret = rte_service_component_runstate_set(sw_data->service_id, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the service to complete its final iteration before
+ * stopping.
+ */
+ while (sw_data->service_phase != 0)
+ rte_pause();
+
+ rte_smp_rmb();
+
+ return 0;
+}
+
+static void
+sw_event_timer_adapter_get_info(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info)
+{
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ sw_data = adapter->data->adapter_priv;
+
+ adapter_info->min_resolution_ns = sw_data->timer_tick_ns;
+ adapter_info->max_tmo_ns = sw_data->max_tmo_ns;
+}
+
+static int
+sw_event_timer_adapter_stats_get(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats)
+{
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ sw_data = adapter->data->adapter_priv;
+ *stats = sw_data->stats;
+ return 0;
+}
+
+static int
+sw_event_timer_adapter_stats_reset(
+ const struct rte_event_timer_adapter *adapter)
+{
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ sw_data = adapter->data->adapter_priv;
+ memset(&sw_data->stats, 0, sizeof(sw_data->stats));
+ return 0;
+}
+
+static __rte_always_inline uint16_t
+__sw_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+ uint16_t i;
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ struct msg *msgs[nb_evtims];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ /* Check that the service is running. */
+ if (rte_service_runstate_get(adapter->data->service_id) != 1) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+#endif
+
+ sw_data = adapter->data->adapter_priv;
+
+ ret = rte_mempool_get_bulk(sw_data->msg_pool, (void **)msgs, nb_evtims);
+ if (ret < 0) {
+ rte_errno = ENOSPC;
+ return 0;
+ }
+
+ /* Let the service know we're producing messages for it to process */
+ rte_atomic16_inc(&sw_data->message_producer_count);
+
+ /* If the service is managing timers, wait for it to finish */
+ while (sw_data->service_phase == 2)
+ rte_pause();
+
+ rte_smp_rmb();
+
+ for (i = 0; i < nb_evtims; i++) {
+ /* Don't modify the event timer state in these cases */
+ if (evtims[i]->state == RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EALREADY;
+ break;
+ } else if (!(evtims[i]->state == RTE_EVENT_TIMER_NOT_ARMED ||
+ evtims[i]->state == RTE_EVENT_TIMER_CANCELED)) {
+ rte_errno = EINVAL;
+ break;
+ }
+
+ ret = check_timeout(evtims[i], adapter);
+ if (ret == -1) {
+ evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOLATE;
+ rte_errno = EINVAL;
+ break;
+ }
+ if (ret == -2) {
+ evtims[i]->state = RTE_EVENT_TIMER_ERROR_TOOEARLY;
+ rte_errno = EINVAL;
+ break;
+ }
+
+ if (check_destination_event_queue(evtims[i], adapter) < 0) {
+ evtims[i]->state = RTE_EVENT_TIMER_ERROR;
+ rte_errno = EINVAL;
+ break;
+ }
+
+ /* Checks passed, set up a message to enqueue */
+ msgs[i]->type = MSG_TYPE_ARM;
+ msgs[i]->evtim = evtims[i];
+
+ /* Set the payload pointer if not set. */
+ if (evtims[i]->ev.event_ptr == NULL)
+ evtims[i]->ev.event_ptr = evtims[i];
+
+ /* msg objects that get enqueued successfully will be freed
+ * either by a future cancel operation or by the timer
+ * expiration callback.
+ */
+ if (rte_ring_enqueue(sw_data->msg_ring, msgs[i]) < 0) {
+ rte_errno = ENOSPC;
+ break;
+ }
+
+ EVTIM_LOG_DBG("enqueued ARM message to ring");
+
+ evtims[i]->state = RTE_EVENT_TIMER_ARMED;
+ }
+
+ /* Let the service know we're done producing messages */
+ rte_atomic16_dec(&sw_data->message_producer_count);
+
+ if (i < nb_evtims)
+ rte_mempool_put_bulk(sw_data->msg_pool, (void **)&msgs[i],
+ nb_evtims - i);
+
+ return i;
+}
+
+static uint16_t
+sw_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+ return __sw_event_timer_arm_burst(adapter, evtims, nb_evtims);
+}
+
+static uint16_t
+sw_event_timer_cancel_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+ uint16_t i;
+ int ret;
+ struct rte_event_timer_adapter_sw_data *sw_data;
+ struct msg *msgs[nb_evtims];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ /* Check that the service is running. */
+ if (rte_service_runstate_get(adapter->data->service_id) != 1) {
+ rte_errno = EINVAL;
+ return 0;
+ }
+#endif
+
+ sw_data = adapter->data->adapter_priv;
+
+ ret = rte_mempool_get_bulk(sw_data->msg_pool, (void **)msgs, nb_evtims);
+ if (ret < 0) {
+ rte_errno = ENOSPC;
+ return 0;
+ }
+
+ /* Let the service know we're producing messages for it to process */
+ rte_atomic16_inc(&sw_data->message_producer_count);
+
+ /* If the service could be modifying event timer states, wait */
+ while (sw_data->service_phase == 2)
+ rte_pause();
+
+ rte_smp_rmb();
+
+ for (i = 0; i < nb_evtims; i++) {
+ /* Don't modify the event timer state in these cases */
+ if (evtims[i]->state == RTE_EVENT_TIMER_CANCELED) {
+ rte_errno = EALREADY;
+ break;
+ } else if (evtims[i]->state != RTE_EVENT_TIMER_ARMED) {
+ rte_errno = EINVAL;
+ break;
+ }
+
+ msgs[i]->type = MSG_TYPE_CANCEL;
+ msgs[i]->evtim = evtims[i];
+
+ if (rte_ring_enqueue(sw_data->msg_ring, msgs[i]) < 0) {
+ rte_errno = ENOSPC;
+ break;
+ }
+
+ EVTIM_LOG_DBG("enqueued CANCEL message to ring");
+
+ evtims[i]->state = RTE_EVENT_TIMER_CANCELED;
+ }
+
+ /* Let the service know we're done producing messages */
+ rte_atomic16_dec(&sw_data->message_producer_count);
+
+ if (i < nb_evtims)
+ rte_mempool_put_bulk(sw_data->msg_pool, (void **)&msgs[i],
+ nb_evtims - i);
+
+ return i;
+}
+
+static uint16_t
+sw_event_timer_arm_tmo_tick_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint64_t timeout_ticks,
+ uint16_t nb_evtims)
+{
+ int i;
+
+ for (i = 0; i < nb_evtims; i++)
+ evtims[i]->timeout_ticks = timeout_ticks;
+
+ return __sw_event_timer_arm_burst(adapter, evtims, nb_evtims);
+}
+
+static const struct rte_event_timer_adapter_ops sw_event_adapter_timer_ops = {
+ .init = sw_event_timer_adapter_init,
+ .uninit = sw_event_timer_adapter_uninit,
+ .start = sw_event_timer_adapter_start,
+ .stop = sw_event_timer_adapter_stop,
+ .get_info = sw_event_timer_adapter_get_info,
+ .stats_get = sw_event_timer_adapter_stats_get,
+ .stats_reset = sw_event_timer_adapter_stats_reset,
+ .arm_burst = sw_event_timer_arm_burst,
+ .arm_tmo_tick_burst = sw_event_timer_arm_tmo_tick_burst,
+ .cancel_burst = sw_event_timer_cancel_burst,
+};
+
+RTE_INIT(event_timer_adapter_init_log)
+{
+ evtim_logtype = rte_log_register("lib.eventdev.adapter.timer");
+ if (evtim_logtype >= 0)
+ rte_log_set_level(evtim_logtype, RTE_LOG_NOTICE);
+
+ evtim_buffer_logtype = rte_log_register("lib.eventdev.adapter.timer."
+ "buffer");
+ if (evtim_buffer_logtype >= 0)
+ rte_log_set_level(evtim_buffer_logtype, RTE_LOG_NOTICE);
+
+ evtim_svc_logtype = rte_log_register("lib.eventdev.adapter.timer.svc");
+ if (evtim_svc_logtype >= 0)
+ rte_log_set_level(evtim_svc_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter.h b/src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter.h
new file mode 100644
index 00000000..d4ea6f17
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter.h
@@ -0,0 +1,766 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc.
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#ifndef __RTE_EVENT_TIMER_ADAPTER_H__
+#define __RTE_EVENT_TIMER_ADAPTER_H__
+
+/**
+ * @file
+ *
+ * RTE Event Timer Adapter
+ *
+ * An event timer adapter has the following abstract working model:
+ *
+ * timer_tick_ns
+ * +
+ * +-------+ |
+ * | | |
+ * +-------+ bkt 0 +----v---+
+ * | | | |
+ * | +-------+ |
+ * +---+---+ +---+---+ +---+---+---+---+
+ * | | | | | | | | |
+ * | bkt n | | bkt 1 |<-> t0| t1| t2| tn|
+ * | | | | | | | | |
+ * +---+---+ +---+---+ +---+---+---+---+
+ * | Timer adapter |
+ * +---+---+ +---+---+
+ * | | | |
+ * | bkt 4 | | bkt 2 |<--- Current bucket
+ * | | | |
+ * +---+---+ +---+---+
+ * | +-------+ |
+ * | | | |
+ * +------+ bkt 3 +-------+
+ * | |
+ * +-------+
+ *
+ * - It has a virtual monotonically increasing 64-bit timer adapter clock based
+ * on *enum rte_event_timer_adapter_clk_src* clock source. The clock source
+ * could be a CPU clock, or a platform dependent external clock.
+ *
+ * - The application creates a timer adapter instance with given the clock
+ * source, the total number of event timers, and a resolution(expressed in ns)
+ * to traverse between the buckets.
+ *
+ * - Each timer adapter may have 0 to n buckets based on the configured
+ * max timeout(max_tmo_ns) and resolution(timer_tick_ns). Upon starting the
+ * timer adapter, the adapter starts ticking at *timer_tick_ns* resolution.
+ *
+ * - The application arms an event timer that will expire *timer_tick_ns*
+ * from now.
+ *
+ * - The application can cancel an armed timer and no timer expiry event will be
+ * generated.
+ *
+ * - If a timer expires then the library injects the timer expiry event in
+ * the designated event queue.
+ *
+ * - The timer expiry event will be received through *rte_event_dequeue_burst*.
+ *
+ * - The application frees the timer adapter instance.
+ *
+ * Multiple timer adapters can be created with a varying level of resolution
+ * for various expiry use cases that run in parallel.
+ *
+ * Before using the timer adapter, the application has to create and configure
+ * an event device along with the event port. Based on the event device
+ * capability it might require creating an additional event port to be used
+ * by the timer adapter.
+ *
+ * The application creates the event timer adapter using the
+ * ``rte_event_timer_adapter_create()``. The event device id is passed to this
+ * function, inside this function the event device capability is checked,
+ * and if an in-built port is absent the application uses the default
+ * function to create a new producer port.
+ *
+ * The application may also use the function
+ * ``rte_event_timer_adapter_create_ext()`` to have granular control over
+ * producer port creation in a case where the in-built port is absent.
+ *
+ * After creating the timer adapter, the application has to start it
+ * using ``rte_event_timer_adapter_start()``. The buckets are traversed from
+ * 0 to n; when the adapter ticks, the next bucket is visited. Each time,
+ * the list per bucket is processed, and timer expiry events are sent to the
+ * designated event queue.
+ *
+ * The application can arm one or more event timers using the
+ * ``rte_event_timer_arm_burst()``. The *timeout_ticks* represents the number
+ * of *timer_tick_ns* after which the timer has to expire. The timeout at
+ * which the timers expire can be grouped or be independent of each
+ * event timer instance. ``rte_event_timer_arm_tmo_tick_burst()`` addresses the
+ * former case and ``rte_event_timer_arm_burst()`` addresses the latter case.
+ *
+ * The application can cancel the timers from expiring using the
+ * ``rte_event_timer_cancel_burst()``.
+ *
+ * On the secondary process, ``rte_event_timer_adapter_lookup()`` can be used
+ * to get the timer adapter pointer from its id and use it to invoke fastpath
+ * operations such as arm and cancel.
+ *
+ * Some of the use cases of event timer adapter are Beacon Timers,
+ * Generic SW Timeout, Wireless MAC Scheduling, 3G Frame Protocols,
+ * Packet Scheduling, Protocol Retransmission Timers, Supervision Timers.
+ * All these use cases require high resolution and low time drift.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_spinlock.h>
+#include <rte_memory.h>
+
+#include "rte_eventdev.h"
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this enum may change without prior notice
+ *
+ * Timer adapter clock source
+ */
+enum rte_event_timer_adapter_clk_src {
+ RTE_EVENT_TIMER_ADAPTER_CPU_CLK,
+ /**< Use CPU clock as the clock source. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK0,
+ /**< Platform dependent external clock source 0. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK1,
+ /**< Platform dependent external clock source 1. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK2,
+ /**< Platform dependent external clock source 2. */
+ RTE_EVENT_TIMER_ADAPTER_EXT_CLK3,
+ /**< Platform dependent external clock source 3. */
+};
+
+#define RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES (1ULL << 0)
+/**< The event timer adapter implementation may have constraints on the
+ * resolution (timer_tick_ns) and maximum timer expiry timeout(max_tmo_ns)
+ * based on the given timer adapter or system. If this flag is set, the
+ * implementation adjusts the resolution and maximum timeout to the best
+ * possible configuration. On successful timer adapter creation, the
+ * application can get the configured resolution and max timeout with
+ * ``rte_event_timer_adapter_get_info()``.
+ *
+ * @see struct rte_event_timer_adapter_info::min_resolution_ns
+ * @see struct rte_event_timer_adapter_info::max_tmo_ns
+ */
+#define RTE_EVENT_TIMER_ADAPTER_F_SP_PUT (1ULL << 1)
+/**< ``rte_event_timer_arm_burst()`` API to be used in single producer mode.
+ *
+ * @see struct rte_event_timer_adapter_conf::flags
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Timer adapter configuration structure
+ */
+struct rte_event_timer_adapter_conf {
+ uint8_t event_dev_id;
+ /**< Event device identifier */
+ uint16_t timer_adapter_id;
+ /**< Event timer adapter identifier */
+ uint32_t socket_id;
+ /**< Identifier of socket from which to allocate memory for adapter */
+ enum rte_event_timer_adapter_clk_src clk_src;
+ /**< Clock source for timer adapter */
+ uint64_t timer_tick_ns;
+ /**< Timer adapter resolution in ns */
+ uint64_t max_tmo_ns;
+ /**< Maximum timer timeout(expiry) in ns */
+ uint64_t nb_timers;
+ /**< Total number of timers per adapter */
+ uint64_t flags;
+ /**< Timer adapter config flags (RTE_EVENT_TIMER_ADAPTER_F_*) */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Event timer adapter stats structure
+ */
+struct rte_event_timer_adapter_stats {
+ uint64_t evtim_exp_count;
+ /**< Number of event timers that have expired. */
+ uint64_t ev_enq_count;
+ /**< Eventdev enqueue count */
+ uint64_t ev_inv_count;
+ /**< Invalid expiry event count */
+ uint64_t evtim_retry_count;
+ /**< Event timer retry count */
+ uint64_t adapter_tick_count;
+ /**< Tick count for the adapter, at its resolution */
+};
+
+struct rte_event_timer_adapter;
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Callback function type for producer port creation.
+ */
+typedef int (*rte_event_timer_adapter_port_conf_cb_t)(uint16_t id,
+ uint8_t event_dev_id,
+ uint8_t *event_port_id,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create an event timer adapter.
+ *
+ * This function must be invoked first before any other function in the API.
+ *
+ * @param conf
+ * The event timer adapter configuration structure.
+ *
+ * @return
+ * A pointer to the new allocated event timer adapter on success.
+ * NULL on error with rte_errno set appropriately.
+ * Possible rte_errno values include:
+ * - ERANGE: timer_tick_ns is not in supported range.
+ * - ENOMEM: unable to allocate sufficient memory for adapter instances
+ * - EINVAL: invalid event device identifier specified in config
+ * - ENOSPC: maximum number of adapters already created
+ * - EIO: event device reconfiguration and restart error. The adapter
+ * reconfigures the event device with an additional port by default if it is
+ * required to use a service to manage timers. If the device had been started
+ * before this call, this error code indicates an error in restart following
+ * an error in reconfiguration, i.e., a combination of the two error codes.
+ */
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create(const struct rte_event_timer_adapter_conf *conf);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Create a timer adapter with the supplied callback.
+ *
+ * This function can be used to have a more granular control over the timer
+ * adapter creation. If a built-in port is absent, then the function uses the
+ * callback provided to create and get the port id to be used as a producer
+ * port.
+ *
+ * @param conf
+ * The timer adapter configuration structure
+ * @param conf_cb
+ * The port config callback function.
+ * @param conf_arg
+ * Opaque pointer to the argument for the callback function
+ *
+ * @return
+ * A pointer to the new allocated event timer adapter on success.
+ * NULL on error with rte_errno set appropriately.
+ * Possible rte_errno values include:
+ * - ERANGE: timer_tick_ns is not in supported range.
+ * - ENOMEM: unable to allocate sufficient memory for adapter instances
+ * - EINVAL: invalid event device identifier specified in config
+ * - ENOSPC: maximum number of adapters already created
+ */
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_create_ext(
+ const struct rte_event_timer_adapter_conf *conf,
+ rte_event_timer_adapter_port_conf_cb_t conf_cb,
+ void *conf_arg);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Timer adapter info structure.
+ */
+struct rte_event_timer_adapter_info {
+ uint64_t min_resolution_ns;
+ /**< Minimum timer adapter resolution in ns */
+ uint64_t max_tmo_ns;
+ /**< Maximum timer timeout(expire) in ns */
+ struct rte_event_timer_adapter_conf conf;
+ /**< Configured timer adapter attributes */
+ uint32_t caps;
+ /**< Event timer adapter capabilities */
+ int16_t event_dev_port_id;
+ /**< Event device port ID, if applicable */
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the contextual information of an event timer adapter.
+ *
+ * @param adapter
+ * A pointer to the event timer adapter structure.
+ *
+ * @param[out] adapter_info
+ * A pointer to a structure of type *rte_event_timer_adapter_info* to be
+ * filled with the contextual information of the adapter.
+ *
+ * @return
+ * - 0: Success, driver updates the contextual information of the
+ * timer adapter
+ * - <0: Error code returned by the driver info get function.
+ * - -EINVAL: adapter identifier invalid
+ *
+ * @see RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES,
+ * struct rte_event_timer_adapter_info
+ *
+ */
+int __rte_experimental
+rte_event_timer_adapter_get_info(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Start a timer adapter.
+ *
+ * The adapter start step is the last one and consists of setting the timer
+ * adapter to start accepting the timers and schedules to event queues.
+ *
+ * On success, all basic functions exported by the API (timer arm,
+ * timer cancel and so on) can be invoked.
+ *
+ * @param adapter
+ * A pointer to the event timer adapter structure.
+ *
+ * @return
+ * - 0: Success, adapter started.
+ * - <0: Error code returned by the driver start function.
+ * - -EINVAL if adapter identifier invalid
+ * - -ENOENT if software adapter but no service core mapped
+ * - -ENOTSUP if software adapter and more than one service core mapped
+ */
+int __rte_experimental
+rte_event_timer_adapter_start(
+ const struct rte_event_timer_adapter *adapter);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Stop an event timer adapter.
+ *
+ * The adapter can be restarted with a call to
+ * ``rte_event_timer_adapter_start()``.
+ *
+ * @param adapter
+ * A pointer to the event timer adapter structure.
+ *
+ * @return
+ * - 0: Success, adapter stopped.
+ * - <0: Error code returned by the driver stop function.
+ * - -EINVAL if adapter identifier invalid
+ */
+int __rte_experimental
+rte_event_timer_adapter_stop(const struct rte_event_timer_adapter *adapter);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Lookup an event timer adapter using its identifier.
+ *
+ * If an event timer adapter was created in another process with the same
+ * identifier, this function will locate its state and set up access to it
+ * so that it can be used in this process.
+ *
+ * @param adapter_id
+ * The event timer adapter identifier.
+ *
+ * @return
+ * A pointer to the event timer adapter matching the identifier on success.
+ * NULL on error with rte_errno set appropriately.
+ * Possible rte_errno values include:
+ * - ENOENT - requested entry not available to return.
+ */
+struct rte_event_timer_adapter * __rte_experimental
+rte_event_timer_adapter_lookup(uint16_t adapter_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Free an event timer adapter.
+ *
+ * Destroy an event timer adapter, freeing all resources.
+ *
+ * Before invoking this function, the application must wait for all the
+ * armed timers to expire or cancel the outstanding armed timers.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ *
+ * @return
+ * - 0: Successfully freed the event timer adapter resources.
+ * - <0: Failed to free the event timer adapter resources.
+ * - -EAGAIN: adapter is busy; timers outstanding
+ * - -EBUSY: stop hasn't been called for this adapter yet
+ * - -EINVAL: adapter id invalid, or adapter invalid
+ */
+int __rte_experimental
+rte_event_timer_adapter_free(struct rte_event_timer_adapter *adapter);
+
+/**
+ * Retrieve the service ID of the event timer adapter. If the adapter doesn't
+ * use an rte_service function, this function returns -ESRCH.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ * - -ESRCH: the adapter does not require a service to operate
+ */
+int __rte_experimental
+rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
+ uint32_t *service_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param[out] stats
+ * A pointer to a structure to fill with statistics.
+ *
+ * @return
+ * - 0: Successfully retrieved.
+ * - <0: Failure; error code returned.
+ */
+int __rte_experimental
+rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ *
+ * @return
+ * - 0: Successfully reset;
+ * - <0: Failure; error code returned.
+ */
+int __rte_experimental rte_event_timer_adapter_stats_reset(
+ struct rte_event_timer_adapter *adapter);
+
+/**
+ * Retrieve the service ID of the event timer adapter. If the adapter doesn't
+ * use an rte_service function, this function returns -ESRCH.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the event dev doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int
+rte_event_timer_adapter_service_id_get(struct rte_event_timer_adapter *adapter,
+ uint32_t *service_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param[out] stats
+ * A pointer to a structure to fill with statistics.
+ *
+ * @return
+ * - 0: Successfully retrieved.
+ * - <0: Failure; error code returned.
+ */
+int rte_event_timer_adapter_stats_get(struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Reset statistics for an event timer adapter instance.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ *
+ * @return
+ * - 0: Successfully reset;
+ * - <0: Failure; error code returned.
+ */
+int rte_event_timer_adapter_stats_reset(
+ struct rte_event_timer_adapter *adapter);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * Event timer state.
+ */
+enum rte_event_timer_state {
+ RTE_EVENT_TIMER_NOT_ARMED = 0,
+ /**< Event timer not armed. */
+ RTE_EVENT_TIMER_ARMED = 1,
+ /**< Event timer successfully armed. */
+ RTE_EVENT_TIMER_CANCELED = 2,
+ /**< Event timer successfully canceled. */
+ RTE_EVENT_TIMER_ERROR = -1,
+ /**< Generic event timer error. */
+ RTE_EVENT_TIMER_ERROR_TOOEARLY = -2,
+ /**< Event timer timeout tick value is too small for the adapter to
+ * handle, given its configured resolution.
+ */
+ RTE_EVENT_TIMER_ERROR_TOOLATE = -3,
+ /**< Event timer timeout tick is greater than the maximum timeout.*/
+};
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this structure may change without prior notice
+ *
+ * The generic *rte_event_timer* structure to hold the event timer attributes
+ * for arm and cancel operations.
+ */
+RTE_STD_C11
+struct rte_event_timer {
+ struct rte_event ev;
+ /**<
+ * Expiry event attributes. On successful event timer timeout,
+ * the following attributes will be used to inject the expiry event to
+ * the eventdev:
+ * - event_queue_id: Targeted event queue id for expiry events.
+ * - event_priority: Event priority of the event expiry event in the
+ * event queue relative to other events.
+ * - sched_type: Scheduling type of the expiry event.
+ * - flow_id: Flow id of the expiry event.
+ * - op: RTE_EVENT_OP_NEW
+ * - event_type: RTE_EVENT_TYPE_TIMER
+ */
+ volatile enum rte_event_timer_state state;
+ /**< State of the event timer. */
+ uint64_t timeout_ticks;
+ /**< Expiry timer ticks expressed in number of *timer_ticks_ns* from
+ * now.
+ * @see struct rte_event_timer_adapter_info::adapter_conf::timer_tick_ns
+ */
+ uint64_t impl_opaque[2];
+ /**< Implementation-specific opaque data.
+ * An event timer adapter implementation use this field to hold
+ * implementation specific values to share between the arm and cancel
+ * operations. The application should not modify this field.
+ */
+ uint8_t user_meta[0];
+ /**< Memory to store user specific metadata.
+ * The event timer adapter implementation should not modify this area.
+ */
+} __rte_cache_aligned;
+
+typedef uint16_t (*rte_event_timer_arm_burst_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **tims,
+ uint16_t nb_tims);
+/**< @internal Enable event timers to enqueue timer events upon expiry */
+typedef uint16_t (*rte_event_timer_arm_tmo_tick_burst_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **tims,
+ uint64_t timeout_tick,
+ uint16_t nb_tims);
+/**< @internal Enable event timers with common expiration time */
+typedef uint16_t (*rte_event_timer_cancel_burst_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **tims,
+ uint16_t nb_tims);
+/**< @internal Prevent event timers from enqueuing timer events */
+
+/**
+ * @internal Data structure associated with each event timer adapter.
+ */
+struct rte_event_timer_adapter {
+ rte_event_timer_arm_burst_t arm_burst;
+ /**< Pointer to driver arm_burst function. */
+ rte_event_timer_arm_tmo_tick_burst_t arm_tmo_tick_burst;
+ /**< Pointer to driver arm_tmo_tick_burst function. */
+ rte_event_timer_cancel_burst_t cancel_burst;
+ /**< Pointer to driver cancel function. */
+ struct rte_event_timer_adapter_data *data;
+ /**< Pointer to shared adapter data */
+ const struct rte_event_timer_adapter_ops *ops;
+ /**< Functions exported by adapter driver */
+
+ RTE_STD_C11
+ uint8_t allocated : 1;
+ /**< Flag to indicate that this adapter has been allocated */
+} __rte_cache_aligned;
+
+#define ADAPTER_VALID_OR_ERR_RET(adapter, retval) do { \
+ if (adapter == NULL || !adapter->allocated) \
+ return retval; \
+} while (0)
+
+#define FUNC_PTR_OR_ERR_RET(func, errval) do { \
+ if ((func) == NULL) \
+ return errval; \
+} while (0)
+
+#define FUNC_PTR_OR_NULL_RET_WITH_ERRNO(func, errval) do { \
+ if ((func) == NULL) { \
+ rte_errno = errval; \
+ return NULL; \
+ } \
+} while (0)
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Arm a burst of event timers with separate expiration timeout tick for each
+ * event timer.
+ *
+ * Before calling this function, the application allocates
+ * ``struct rte_event_timer`` objects from mempool or huge page backed
+ * application buffers of desired size. On successful allocation,
+ * application updates the `struct rte_event_timer`` attributes such as
+ * expiry event attributes, timeout ticks from now.
+ * This function submits the event timer arm requests to the event timer adapter
+ * and on expiry, the events will be injected to designated event queue.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param evtims
+ * Pointer to an array of objects of type *rte_event_timer* structure.
+ * @param nb_evtims
+ * Number of event timers in the supplied array.
+ *
+ * @return
+ * The number of successfully armed event timers. The return value can be less
+ * than the value of the *nb_evtims* parameter. If the return value is less
+ * than *nb_evtims*, the remaining event timers at the end of *evtims*
+ * are not consumed, and the caller has to take care of them, and rte_errno
+ * is set accordingly. Possible errno values include:
+ * - EINVAL Invalid timer adapter, expiry event queue ID is invalid, or an
+ * expiry event's sched type doesn't match the capabilities of the
+ * destination event queue.
+ * - EAGAIN Specified timer adapter is not running
+ * - EALREADY A timer was encountered that was already armed
+ */
+static inline uint16_t __rte_experimental
+rte_event_timer_arm_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->arm_burst, -EINVAL);
+#endif
+ return adapter->arm_burst(adapter, evtims, nb_evtims);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Arm a burst of event timers with same expiration timeout tick.
+ *
+ * Provides the same functionality as ``rte_event_timer_arm_burst()``, except
+ * that application can use this API when all the event timers have the
+ * same timeout expiration tick. This specialized function can provide the
+ * additional hint to the adapter implementation and optimize if possible.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param evtims
+ * Points to an array of objects of type *rte_event_timer* structure.
+ * @param timeout_ticks
+ * The number of ticks in which the timers should expire.
+ * @param nb_evtims
+ * Number of event timers in the supplied array.
+ *
+ * @return
+ * The number of successfully armed event timers. The return value can be less
+ * than the value of the *nb_evtims* parameter. If the return value is less
+ * than *nb_evtims*, the remaining event timers at the end of *evtims*
+ * are not consumed, and the caller has to take care of them, and rte_errno
+ * is set accordingly. Possible errno values include:
+ * - EINVAL Invalid timer adapter, expiry event queue ID is invalid, or an
+ * expiry event's sched type doesn't match the capabilities of the
+ * destination event queue.
+ * - EAGAIN Specified event timer adapter is not running
+ * - EALREADY A timer was encountered that was already armed
+ */
+static inline uint16_t __rte_experimental
+rte_event_timer_arm_tmo_tick_burst(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ const uint64_t timeout_ticks,
+ const uint16_t nb_evtims)
+{
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->arm_tmo_tick_burst, -EINVAL);
+#endif
+ return adapter->arm_tmo_tick_burst(adapter, evtims, timeout_ticks,
+ nb_evtims);
+}
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Cancel a burst of event timers from being scheduled to the event device.
+ *
+ * @param adapter
+ * A pointer to an event timer adapter structure.
+ * @param evtims
+ * Points to an array of objects of type *rte_event_timer* structure
+ * @param nb_evtims
+ * Number of event timer instances in the supplied array.
+ *
+ * @return
+ * The number of successfully canceled event timers. The return value can be
+ * less than the value of the *nb_evtims* parameter. If the return value is
+ * less than *nb_evtims*, the remaining event timers at the end of *evtims*
+ * are not consumed, and the caller has to take care of them, and rte_errno
+ * is set accordingly. Possible errno values include:
+ * - EINVAL Invalid timer adapter identifier
+ * - EAGAIN Specified timer adapter is not running
+ * - EALREADY A timer was encountered that was already canceled
+ */
+static inline uint16_t __rte_experimental
+rte_event_timer_cancel_burst(const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer **evtims,
+ uint16_t nb_evtims)
+{
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ ADAPTER_VALID_OR_ERR_RET(adapter, -EINVAL);
+ FUNC_PTR_OR_ERR_RET(adapter->cancel_burst, -EINVAL);
+#endif
+ return adapter->cancel_burst(adapter, evtims, nb_evtims);
+}
+
+#endif /* __RTE_EVENT_TIMER_ADAPTER_H__ */
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter_pmd.h b/src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter_pmd.h
new file mode 100644
index 00000000..cf3509dc
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_event_timer_adapter_pmd.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * All rights reserved.
+ */
+
+#ifndef __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+#define __RTE_EVENT_TIMER_ADAPTER_PMD_H__
+
+/**
+ * @file
+ * RTE Event Timer Adapter API (PMD Side)
+ *
+ * @note
+ * This file provides implementation helpers for internal use by PMDs. They
+ * are not intended to be exposed to applications and are not subject to ABI
+ * versioning.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "rte_event_timer_adapter.h"
+
+/*
+ * Definitions of functions exported by an event timer adapter implementation
+ * through *rte_event_timer_adapter_ops* structure supplied in the
+ * *rte_event_timer_adapter* structure associated with an event timer adapter.
+ */
+
+typedef int (*rte_event_timer_adapter_init_t)(
+ struct rte_event_timer_adapter *adapter);
+/**< @internal Event timer adapter implementation setup */
+typedef int (*rte_event_timer_adapter_uninit_t)(
+ struct rte_event_timer_adapter *adapter);
+/**< @internal Event timer adapter implementation teardown */
+typedef int (*rte_event_timer_adapter_start_t)(
+ const struct rte_event_timer_adapter *adapter);
+/**< @internal Start running event timer adapter */
+typedef int (*rte_event_timer_adapter_stop_t)(
+ const struct rte_event_timer_adapter *adapter);
+/**< @internal Stop running event timer adapter */
+typedef void (*rte_event_timer_adapter_get_info_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_info *adapter_info);
+/**< @internal Get contextual information for event timer adapter */
+typedef int (*rte_event_timer_adapter_stats_get_t)(
+ const struct rte_event_timer_adapter *adapter,
+ struct rte_event_timer_adapter_stats *stats);
+/**< @internal Get statistics for event timer adapter */
+typedef int (*rte_event_timer_adapter_stats_reset_t)(
+ const struct rte_event_timer_adapter *adapter);
+/**< @internal Reset statistics for event timer adapter */
+
+/**
+ * @internal Structure containing the functions exported by an event timer
+ * adapter implementation.
+ */
+struct rte_event_timer_adapter_ops {
+ rte_event_timer_adapter_init_t init; /**< Set up adapter */
+ rte_event_timer_adapter_uninit_t uninit;/**< Tear down adapter */
+ rte_event_timer_adapter_start_t start; /**< Start adapter */
+ rte_event_timer_adapter_stop_t stop; /**< Stop adapter */
+ rte_event_timer_adapter_get_info_t get_info;
+ /**< Get info from driver */
+ rte_event_timer_adapter_stats_get_t stats_get;
+ /**< Get adapter statistics */
+ rte_event_timer_adapter_stats_reset_t stats_reset;
+ /**< Reset adapter statistics */
+ rte_event_timer_arm_burst_t arm_burst;
+ /**< Arm one or more event timers */
+ rte_event_timer_arm_tmo_tick_burst_t arm_tmo_tick_burst;
+ /**< Arm event timers with same expiration time */
+ rte_event_timer_cancel_burst_t cancel_burst;
+ /**< Cancel one or more event timers */
+};
+
+/**
+ * @internal Adapter data; structure to be placed in shared memory to be
+ * accessible by various processes in a multi-process configuration.
+ */
+struct rte_event_timer_adapter_data {
+ uint8_t id;
+ /**< Event timer adapter ID */
+ uint8_t event_dev_id;
+ /**< Event device ID */
+ uint32_t socket_id;
+ /**< Socket ID where memory is allocated */
+ uint8_t event_port_id;
+ /**< Optional: event port ID used when the inbuilt port is absent */
+ const struct rte_memzone *mz;
+ /**< Event timer adapter memzone pointer */
+ struct rte_event_timer_adapter_conf conf;
+ /**< Configuration used to configure the adapter. */
+ uint32_t caps;
+ /**< Adapter capabilities */
+ void *adapter_priv;
+ /**< Timer adapter private data*/
+ uint8_t service_inited;
+ /**< Service initialization state */
+ uint32_t service_id;
+ /**< Service ID*/
+
+ RTE_STD_C11
+ uint8_t started : 1;
+ /**< Flag to indicate adapter started. */
+} __rte_cache_aligned;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __RTE_EVENT_TIMER_ADAPTER_PMD_H__ */
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev.c b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev.c
new file mode 100644
index 00000000..801810ed
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev.c
@@ -0,0 +1,1357 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include <rte_ethdev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_eventdev.h"
+#include "rte_eventdev_pmd.h"
+
+struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
+
+struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
+
+static struct rte_eventdev_global eventdev_globals = {
+ .nb_devs = 0
+};
+
+struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
+
+/* Event dev north bound API implementation */
+
+uint8_t
+rte_event_dev_count(void)
+{
+ return rte_eventdev_globals->nb_devs;
+}
+
+int
+rte_event_dev_get_dev_id(const char *name)
+{
+ int i;
+ uint8_t cmp;
+
+ if (!name)
+ return -EINVAL;
+
+ for (i = 0; i < rte_eventdev_globals->nb_devs; i++) {
+ cmp = (strncmp(rte_event_devices[i].data->name, name,
+ RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
+ (rte_event_devices[i].dev ? (strncmp(
+ rte_event_devices[i].dev->driver->name, name,
+ RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
+ if (cmp && (rte_event_devices[i].attached ==
+ RTE_EVENTDEV_ATTACHED))
+ return i;
+ }
+ return -ENODEV;
+}
+
+int
+rte_event_dev_socket_id(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ return dev->data->socket_id;
+}
+
+int
+rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (dev_info == NULL)
+ return -EINVAL;
+
+ memset(dev_info, 0, sizeof(struct rte_event_dev_info));
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+
+ dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
+
+ dev_info->dev = dev->dev;
+ return 0;
+}
+
+int
+rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
+ uint32_t *caps)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
+
+ dev = &rte_eventdevs[dev_id];
+
+ if (caps == NULL)
+ return -EINVAL;
+ *caps = 0;
+
+ return dev->dev_ops->eth_rx_adapter_caps_get ?
+ (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
+ &rte_eth_devices[eth_port_id],
+ caps)
+ : 0;
+}
+
+int __rte_experimental
+rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
+{
+ struct rte_eventdev *dev;
+ const struct rte_event_timer_adapter_ops *ops;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
+ dev = &rte_eventdevs[dev_id];
+
+ if (caps == NULL)
+ return -EINVAL;
+ *caps = 0;
+
+ return dev->dev_ops->timer_adapter_caps_get ?
+ (*dev->dev_ops->timer_adapter_caps_get)(dev,
+ 0,
+ caps,
+ &ops)
+ : 0;
+}
+
+int __rte_experimental
+rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
+ uint32_t *caps)
+{
+ struct rte_eventdev *dev;
+ struct rte_cryptodev *cdev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
+ return -EINVAL;
+
+ dev = &rte_eventdevs[dev_id];
+ cdev = rte_cryptodev_pmd_get_dev(cdev_id);
+
+ if (caps == NULL)
+ return -EINVAL;
+ *caps = 0;
+
+ return dev->dev_ops->crypto_adapter_caps_get ?
+ (*dev->dev_ops->crypto_adapter_caps_get)
+ (dev, cdev, caps) : -ENOTSUP;
+}
+
+static inline int
+rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
+{
+ uint8_t old_nb_queues = dev->data->nb_queues;
+ struct rte_event_queue_conf *queues_cfg;
+ unsigned int i;
+
+ RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
+ dev->data->dev_id);
+
+ /* First time configuration */
+ if (dev->data->queues_cfg == NULL && nb_queues != 0) {
+ /* Allocate memory to store queue configuration */
+ dev->data->queues_cfg = rte_zmalloc_socket(
+ "eventdev->data->queues_cfg",
+ sizeof(dev->data->queues_cfg[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->queues_cfg == NULL) {
+ dev->data->nb_queues = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
+ "nb_queues %u", nb_queues);
+ return -(ENOMEM);
+ }
+ /* Re-configure */
+ } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->queue_release)(dev, i);
+
+ /* Re allocate memory to store queue configuration */
+ queues_cfg = dev->data->queues_cfg;
+ queues_cfg = rte_realloc(queues_cfg,
+ sizeof(queues_cfg[0]) * nb_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (queues_cfg == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
+ " nb_queues %u", nb_queues);
+ return -(ENOMEM);
+ }
+ dev->data->queues_cfg = queues_cfg;
+
+ if (nb_queues > old_nb_queues) {
+ uint8_t new_qs = nb_queues - old_nb_queues;
+
+ memset(queues_cfg + old_nb_queues, 0,
+ sizeof(queues_cfg[0]) * new_qs);
+ }
+ } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->queue_release)(dev, i);
+ }
+
+ dev->data->nb_queues = nb_queues;
+ return 0;
+}
+
+#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
+
+static inline int
+rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
+{
+ uint8_t old_nb_ports = dev->data->nb_ports;
+ void **ports;
+ uint16_t *links_map;
+ struct rte_event_port_conf *ports_cfg;
+ unsigned int i;
+
+ RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
+ dev->data->dev_id);
+
+ /* First time configuration */
+ if (dev->data->ports == NULL && nb_ports != 0) {
+ dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
+ sizeof(dev->data->ports[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->ports == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Allocate memory to store port configurations */
+ dev->data->ports_cfg =
+ rte_zmalloc_socket("eventdev->ports_cfg",
+ sizeof(dev->data->ports_cfg[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->ports_cfg == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Allocate memory to store queue to port link connection */
+ dev->data->links_map =
+ rte_zmalloc_socket("eventdev->links_map",
+ sizeof(dev->data->links_map[0]) * nb_ports *
+ RTE_EVENT_MAX_QUEUES_PER_DEV,
+ RTE_CACHE_LINE_SIZE, dev->data->socket_id);
+ if (dev->data->links_map == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+ for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
+ dev->data->links_map[i] =
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+ } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
+
+ ports = dev->data->ports;
+ ports_cfg = dev->data->ports_cfg;
+ links_map = dev->data->links_map;
+
+ for (i = nb_ports; i < old_nb_ports; i++)
+ (*dev->dev_ops->port_release)(ports[i]);
+
+ /* Realloc memory for ports */
+ ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE);
+ if (ports == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
+ " nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Realloc memory for ports_cfg */
+ ports_cfg = rte_realloc(ports_cfg,
+ sizeof(ports_cfg[0]) * nb_ports,
+ RTE_CACHE_LINE_SIZE);
+ if (ports_cfg == NULL) {
+ RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
+ " nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ /* Realloc memory to store queue to port link connection */
+ links_map = rte_realloc(links_map,
+ sizeof(dev->data->links_map[0]) * nb_ports *
+ RTE_EVENT_MAX_QUEUES_PER_DEV,
+ RTE_CACHE_LINE_SIZE);
+ if (links_map == NULL) {
+ dev->data->nb_ports = 0;
+ RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
+ "nb_ports %u", nb_ports);
+ return -(ENOMEM);
+ }
+
+ if (nb_ports > old_nb_ports) {
+ uint8_t new_ps = nb_ports - old_nb_ports;
+ unsigned int old_links_map_end =
+ old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
+ unsigned int links_map_end =
+ nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
+
+ memset(ports + old_nb_ports, 0,
+ sizeof(ports[0]) * new_ps);
+ memset(ports_cfg + old_nb_ports, 0,
+ sizeof(ports_cfg[0]) * new_ps);
+ for (i = old_links_map_end; i < links_map_end; i++)
+ links_map[i] =
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+ }
+
+ dev->data->ports = ports;
+ dev->data->ports_cfg = ports_cfg;
+ dev->data->links_map = links_map;
+ } else if (dev->data->ports != NULL && nb_ports == 0) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
+
+ ports = dev->data->ports;
+ for (i = nb_ports; i < old_nb_ports; i++)
+ (*dev->dev_ops->port_release)(ports[i]);
+ }
+
+ dev->data->nb_ports = nb_ports;
+ return 0;
+}
+
+int
+rte_event_dev_configure(uint8_t dev_id,
+ const struct rte_event_dev_config *dev_conf)
+{
+ struct rte_eventdev *dev;
+ struct rte_event_dev_info info;
+ int diag;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+ if (dev->data->dev_started) {
+ RTE_EDEV_LOG_ERR(
+ "device %d must be stopped to allow configuration", dev_id);
+ return -EBUSY;
+ }
+
+ if (dev_conf == NULL)
+ return -EINVAL;
+
+ (*dev->dev_ops->dev_infos_get)(dev, &info);
+
+ /* Check dequeue_timeout_ns value is in limit */
+ if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
+ if (dev_conf->dequeue_timeout_ns &&
+ (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
+ || dev_conf->dequeue_timeout_ns >
+ info.max_dequeue_timeout_ns)) {
+ RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
+ " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
+ dev_id, dev_conf->dequeue_timeout_ns,
+ info.min_dequeue_timeout_ns,
+ info.max_dequeue_timeout_ns);
+ return -EINVAL;
+ }
+ }
+
+ /* Check nb_events_limit is in limit */
+ if (dev_conf->nb_events_limit > info.max_num_events) {
+ RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
+ dev_id, dev_conf->nb_events_limit, info.max_num_events);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_queues is in limit */
+ if (!dev_conf->nb_event_queues) {
+ RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
+ dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_queues > info.max_event_queues) {
+ RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
+ dev_id, dev_conf->nb_event_queues, info.max_event_queues);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_ports is in limit */
+ if (!dev_conf->nb_event_ports) {
+ RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_ports > info.max_event_ports) {
+ RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
+ dev_id, dev_conf->nb_event_ports, info.max_event_ports);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_queue_flows is in limit */
+ if (!dev_conf->nb_event_queue_flows) {
+ RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
+ return -EINVAL;
+ }
+ if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
+ RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
+ dev_id, dev_conf->nb_event_queue_flows,
+ info.max_event_queue_flows);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_port_dequeue_depth is in limit */
+ if (!dev_conf->nb_event_port_dequeue_depth) {
+ RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
+ dev_id);
+ return -EINVAL;
+ }
+ if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
+ (dev_conf->nb_event_port_dequeue_depth >
+ info.max_event_port_dequeue_depth)) {
+ RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
+ dev_id, dev_conf->nb_event_port_dequeue_depth,
+ info.max_event_port_dequeue_depth);
+ return -EINVAL;
+ }
+
+ /* Check nb_event_port_enqueue_depth is in limit */
+ if (!dev_conf->nb_event_port_enqueue_depth) {
+ RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
+ dev_id);
+ return -EINVAL;
+ }
+ if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
+ (dev_conf->nb_event_port_enqueue_depth >
+ info.max_event_port_enqueue_depth)) {
+ RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
+ dev_id, dev_conf->nb_event_port_enqueue_depth,
+ info.max_event_port_enqueue_depth);
+ return -EINVAL;
+ }
+
+ /* Copy the dev_conf parameter into the dev structure */
+ memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+
+ /* Setup new number of queues and reconfigure device. */
+ diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
+ if (diag != 0) {
+ RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
+ dev_id, diag);
+ return diag;
+ }
+
+ /* Setup new number of ports and reconfigure device. */
+ diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
+ if (diag != 0) {
+ rte_event_dev_queue_config(dev, 0);
+ RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
+ dev_id, diag);
+ return diag;
+ }
+
+ /* Configure the device */
+ diag = (*dev->dev_ops->dev_configure)(dev);
+ if (diag != 0) {
+ RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
+ rte_event_dev_queue_config(dev, 0);
+ rte_event_dev_port_config(dev, 0);
+ }
+
+ dev->data->event_dev_cap = info.event_dev_cap;
+ return diag;
+}
+
+static inline int
+is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ if (queue_id < dev->data->nb_queues && queue_id <
+ RTE_EVENT_MAX_QUEUES_PER_DEV)
+ return 1;
+ else
+ return 0;
+}
+
+int
+rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (queue_conf == NULL)
+ return -EINVAL;
+
+ if (!is_valid_queue(dev, queue_id)) {
+ RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
+ memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
+ (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
+ return 0;
+}
+
+static inline int
+is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
+{
+ if (queue_conf &&
+ !(queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
+ ((queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ (queue_conf->schedule_type
+ == RTE_SCHED_TYPE_ATOMIC)
+ ))
+ return 1;
+ else
+ return 0;
+}
+
+static inline int
+is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
+{
+ if (queue_conf &&
+ !(queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
+ ((queue_conf->event_queue_cfg &
+ RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
+ (queue_conf->schedule_type
+ == RTE_SCHED_TYPE_ORDERED)
+ ))
+ return 1;
+ else
+ return 0;
+}
+
+
+int
+rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct rte_eventdev *dev;
+ struct rte_event_queue_conf def_conf;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (!is_valid_queue(dev, queue_id)) {
+ RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+ return -EINVAL;
+ }
+
+ /* Check nb_atomic_flows limit */
+ if (is_valid_atomic_queue_conf(queue_conf)) {
+ if (queue_conf->nb_atomic_flows == 0 ||
+ queue_conf->nb_atomic_flows >
+ dev->data->dev_conf.nb_event_queue_flows) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
+ dev_id, queue_id, queue_conf->nb_atomic_flows,
+ dev->data->dev_conf.nb_event_queue_flows);
+ return -EINVAL;
+ }
+ }
+
+ /* Check nb_atomic_order_sequences limit */
+ if (is_valid_ordered_queue_conf(queue_conf)) {
+ if (queue_conf->nb_atomic_order_sequences == 0 ||
+ queue_conf->nb_atomic_order_sequences >
+ dev->data->dev_conf.nb_event_queue_flows) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
+ dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
+ dev->data->dev_conf.nb_event_queue_flows);
+ return -EINVAL;
+ }
+ }
+
+ if (dev->data->dev_started) {
+ RTE_EDEV_LOG_ERR(
+ "device %d must be stopped to allow queue setup", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
+
+ if (queue_conf == NULL) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
+ -ENOTSUP);
+ (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
+ queue_conf = &def_conf;
+ }
+
+ dev->data->queues_cfg[queue_id] = *queue_conf;
+ return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
+}
+
+static inline int
+is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
+{
+ if (port_id < dev->data->nb_ports)
+ return 1;
+ else
+ return 0;
+}
+
+int
+rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (port_conf == NULL)
+ return -EINVAL;
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
+ memset(port_conf, 0, sizeof(struct rte_event_port_conf));
+ (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
+ return 0;
+}
+
+int
+rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct rte_eventdev *dev;
+ struct rte_event_port_conf def_conf;
+ int diag;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ /* Check new_event_threshold limit */
+ if ((port_conf && !port_conf->new_event_threshold) ||
+ (port_conf && port_conf->new_event_threshold >
+ dev->data->dev_conf.nb_events_limit)) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
+ dev_id, port_id, port_conf->new_event_threshold,
+ dev->data->dev_conf.nb_events_limit);
+ return -EINVAL;
+ }
+
+ /* Check dequeue_depth limit */
+ if ((port_conf && !port_conf->dequeue_depth) ||
+ (port_conf && port_conf->dequeue_depth >
+ dev->data->dev_conf.nb_event_port_dequeue_depth)) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
+ dev_id, port_id, port_conf->dequeue_depth,
+ dev->data->dev_conf.nb_event_port_dequeue_depth);
+ return -EINVAL;
+ }
+
+ /* Check enqueue_depth limit */
+ if ((port_conf && !port_conf->enqueue_depth) ||
+ (port_conf && port_conf->enqueue_depth >
+ dev->data->dev_conf.nb_event_port_enqueue_depth)) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
+ dev_id, port_id, port_conf->enqueue_depth,
+ dev->data->dev_conf.nb_event_port_enqueue_depth);
+ return -EINVAL;
+ }
+
+ if (port_conf && port_conf->disable_implicit_release &&
+ !(dev->data->event_dev_cap &
+ RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
+ RTE_EDEV_LOG_ERR(
+ "dev%d port%d Implicit release disable not supported",
+ dev_id, port_id);
+ return -EINVAL;
+ }
+
+ if (dev->data->dev_started) {
+ RTE_EDEV_LOG_ERR(
+ "device %d must be stopped to allow port setup", dev_id);
+ return -EBUSY;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
+
+ if (port_conf == NULL) {
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
+ -ENOTSUP);
+ (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
+ port_conf = &def_conf;
+ }
+
+ dev->data->ports_cfg[port_id] = *port_conf;
+
+ diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
+
+ /* Unlink all the queues from this port(default state after setup) */
+ if (!diag)
+ diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
+
+ if (diag < 0)
+ return diag;
+
+ return 0;
+}
+
+int
+rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
+ uint32_t *attr_value)
+{
+ struct rte_eventdev *dev;
+
+ if (!attr_value)
+ return -EINVAL;
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ switch (attr_id) {
+ case RTE_EVENT_DEV_ATTR_PORT_COUNT:
+ *attr_value = dev->data->nb_ports;
+ break;
+ case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
+ *attr_value = dev->data->nb_queues;
+ break;
+ case RTE_EVENT_DEV_ATTR_STARTED:
+ *attr_value = dev->data->dev_started;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
+ uint32_t *attr_value)
+{
+ struct rte_eventdev *dev;
+
+ if (!attr_value)
+ return -EINVAL;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ switch (attr_id) {
+ case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
+ *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
+ break;
+ case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
+ *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
+ break;
+ case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
+ *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
+ break;
+ default:
+ return -EINVAL;
+ };
+ return 0;
+}
+
+int
+rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
+ uint32_t *attr_value)
+{
+ struct rte_event_queue_conf *conf;
+ struct rte_eventdev *dev;
+
+ if (!attr_value)
+ return -EINVAL;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ if (!is_valid_queue(dev, queue_id)) {
+ RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
+ return -EINVAL;
+ }
+
+ conf = &dev->data->queues_cfg[queue_id];
+
+ switch (attr_id) {
+ case RTE_EVENT_QUEUE_ATTR_PRIORITY:
+ *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
+ *attr_value = conf->priority;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
+ *attr_value = conf->nb_atomic_flows;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
+ *attr_value = conf->nb_atomic_order_sequences;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
+ *attr_value = conf->event_queue_cfg;
+ break;
+ case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
+ if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
+ return -EOVERFLOW;
+
+ *attr_value = conf->schedule_type;
+ break;
+ default:
+ return -EINVAL;
+ };
+ return 0;
+}
+
+int
+rte_event_port_link(uint8_t dev_id, uint8_t port_id,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct rte_eventdev *dev;
+ uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint16_t *links_map;
+ int i, diag;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
+ dev = &rte_eventdevs[dev_id];
+
+ if (*dev->dev_ops->port_link == NULL) {
+ RTE_PMD_DEBUG_TRACE("Function not supported\n");
+ rte_errno = -ENOTSUP;
+ return 0;
+ }
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (queues == NULL) {
+ for (i = 0; i < dev->data->nb_queues; i++)
+ queues_list[i] = i;
+
+ queues = queues_list;
+ nb_links = dev->data->nb_queues;
+ }
+
+ if (priorities == NULL) {
+ for (i = 0; i < nb_links; i++)
+ priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
+
+ priorities = priorities_list;
+ }
+
+ for (i = 0; i < nb_links; i++)
+ if (queues[i] >= dev->data->nb_queues) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
+ queues, priorities, nb_links);
+ if (diag < 0)
+ return diag;
+
+ links_map = dev->data->links_map;
+ /* Point links_map to this port specific area */
+ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ for (i = 0; i < diag; i++)
+ links_map[queues[i]] = (uint8_t)priorities[i];
+
+ return diag;
+}
+
+int
+rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct rte_eventdev *dev;
+ uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ int i, diag, j;
+ uint16_t *links_map;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
+ dev = &rte_eventdevs[dev_id];
+
+ if (*dev->dev_ops->port_unlink == NULL) {
+ RTE_PMD_DEBUG_TRACE("Function not supported\n");
+ rte_errno = -ENOTSUP;
+ return 0;
+ }
+
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ links_map = dev->data->links_map;
+ /* Point links_map to this port specific area */
+ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+
+ if (queues == NULL) {
+ j = 0;
+ for (i = 0; i < dev->data->nb_queues; i++) {
+ if (links_map[i] !=
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
+ all_queues[j] = i;
+ j++;
+ }
+ }
+ queues = all_queues;
+ } else {
+ for (j = 0; j < nb_unlinks; j++) {
+ if (links_map[queues[j]] ==
+ EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
+ break;
+ }
+ }
+
+ nb_unlinks = j;
+ for (i = 0; i < nb_unlinks; i++)
+ if (queues[i] >= dev->data->nb_queues) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
+ queues, nb_unlinks);
+
+ if (diag < 0)
+ return diag;
+
+ for (i = 0; i < diag; i++)
+ links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
+
+ return diag;
+}
+
+int
+rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint8_t priorities[])
+{
+ struct rte_eventdev *dev;
+ uint16_t *links_map;
+ int i, count = 0;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ if (!is_valid_port(dev, port_id)) {
+ RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
+ return -EINVAL;
+ }
+
+ links_map = dev->data->links_map;
+ /* Point links_map to this port specific area */
+ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ for (i = 0; i < dev->data->nb_queues; i++) {
+ if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
+ queues[count] = i;
+ priorities[count] = (uint8_t)links_map[i];
+ ++count;
+ }
+ }
+ return count;
+}
+
+int
+rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
+
+ if (timeout_ticks == NULL)
+ return -EINVAL;
+
+ return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
+}
+
+int
+rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ if (service_id == NULL)
+ return -EINVAL;
+
+ if (dev->data->service_inited)
+ *service_id = dev->data->service_id;
+
+ return dev->data->service_inited ? 0 : -ESRCH;
+}
+
+int
+rte_event_dev_dump(uint8_t dev_id, FILE *f)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
+
+ (*dev->dev_ops->dump)(dev, f);
+ return 0;
+
+}
+
+static int
+xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ if (dev->dev_ops->xstats_get_names != NULL)
+ return (*dev->dev_ops->xstats_get_names)(dev, mode,
+ queue_port_id,
+ NULL, NULL, 0);
+ return 0;
+}
+
+int
+rte_event_dev_xstats_names_get(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
+ const int cnt_expected_entries = xstats_get_count(dev_id, mode,
+ queue_port_id);
+ if (xstats_names == NULL || cnt_expected_entries < 0 ||
+ (int)size < cnt_expected_entries)
+ return cnt_expected_entries;
+
+ /* dev_id checked above */
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ if (dev->dev_ops->xstats_get_names != NULL)
+ return (*dev->dev_ops->xstats_get_names)(dev, mode,
+ queue_port_id, xstats_names, ids, size);
+
+ return -ENOTSUP;
+}
+
+/* retrieve eventdev extended statistics */
+int
+rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id, const unsigned int ids[],
+ uint64_t values[], unsigned int n)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ /* implemented by the driver */
+ if (dev->dev_ops->xstats_get != NULL)
+ return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
+ ids, values, n);
+ return -ENOTSUP;
+}
+
+uint64_t
+rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
+ unsigned int *id)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+ unsigned int temp = -1;
+
+ if (id != NULL)
+ *id = (unsigned int)-1;
+ else
+ id = &temp; /* ensure driver never gets a NULL value */
+
+ /* implemented by driver */
+ if (dev->dev_ops->xstats_get_by_name != NULL)
+ return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
+ return -ENOTSUP;
+}
+
+int rte_event_dev_xstats_reset(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ if (dev->dev_ops->xstats_reset != NULL)
+ return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
+ ids, nb_ids);
+ return -ENOTSUP;
+}
+
+int rte_event_dev_selftest(uint8_t dev_id)
+{
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ if (dev->dev_ops->dev_selftest != NULL)
+ return (*dev->dev_ops->dev_selftest)();
+ return -ENOTSUP;
+}
+
+int
+rte_event_dev_start(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+ int diag;
+
+ RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+
+ if (dev->data->dev_started != 0) {
+ RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
+ dev_id);
+ return 0;
+ }
+
+ diag = (*dev->dev_ops->dev_start)(dev);
+ if (diag == 0)
+ dev->data->dev_started = 1;
+ else
+ return diag;
+
+ return 0;
+}
+
+int
+rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
+ eventdev_stop_flush_t callback, void *userdata)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+
+ dev->dev_ops->dev_stop_flush = callback;
+ dev->data->dev_stop_flush_arg = userdata;
+
+ return 0;
+}
+
+void
+rte_event_dev_stop(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
+
+ RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+
+ if (dev->data->dev_started == 0) {
+ RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
+ dev_id);
+ return;
+ }
+
+ dev->data->dev_started = 0;
+ (*dev->dev_ops->dev_stop)(dev);
+}
+
+int
+rte_event_dev_close(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+ dev = &rte_eventdevs[dev_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
+
+ /* Device must be stopped before it can be closed */
+ if (dev->data->dev_started == 1) {
+ RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
+ dev_id);
+ return -EBUSY;
+ }
+
+ return (*dev->dev_ops->dev_close)(dev);
+}
+
+static inline int
+rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
+ int socket_id)
+{
+ char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+ int n;
+
+ /* Generate memzone name */
+ n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
+ if (n >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ mz = rte_memzone_reserve(mz_name,
+ sizeof(struct rte_eventdev_data),
+ socket_id, 0);
+ } else
+ mz = rte_memzone_lookup(mz_name);
+
+ if (mz == NULL)
+ return -ENOMEM;
+
+ *data = mz->addr;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ memset(*data, 0, sizeof(struct rte_eventdev_data));
+
+ return 0;
+}
+
+static inline uint8_t
+rte_eventdev_find_free_device_index(void)
+{
+ uint8_t dev_id;
+
+ for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
+ if (rte_eventdevs[dev_id].attached ==
+ RTE_EVENTDEV_DETACHED)
+ return dev_id;
+ }
+ return RTE_EVENT_MAX_DEVS;
+}
+
+struct rte_eventdev *
+rte_event_pmd_allocate(const char *name, int socket_id)
+{
+ struct rte_eventdev *eventdev;
+ uint8_t dev_id;
+
+ if (rte_event_pmd_get_named_dev(name) != NULL) {
+ RTE_EDEV_LOG_ERR("Event device with name %s already "
+ "allocated!", name);
+ return NULL;
+ }
+
+ dev_id = rte_eventdev_find_free_device_index();
+ if (dev_id == RTE_EVENT_MAX_DEVS) {
+ RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
+ return NULL;
+ }
+
+ eventdev = &rte_eventdevs[dev_id];
+
+ if (eventdev->data == NULL) {
+ struct rte_eventdev_data *eventdev_data = NULL;
+
+ int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
+ socket_id);
+
+ if (retval < 0 || eventdev_data == NULL)
+ return NULL;
+
+ eventdev->data = eventdev_data;
+
+ snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
+ "%s", name);
+
+ eventdev->data->dev_id = dev_id;
+ eventdev->data->socket_id = socket_id;
+ eventdev->data->dev_started = 0;
+
+ eventdev->attached = RTE_EVENTDEV_ATTACHED;
+
+ eventdev_globals.nb_devs++;
+ }
+
+ return eventdev;
+}
+
+int
+rte_event_pmd_release(struct rte_eventdev *eventdev)
+{
+ int ret;
+ char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ const struct rte_memzone *mz;
+
+ if (eventdev == NULL)
+ return -EINVAL;
+
+ eventdev->attached = RTE_EVENTDEV_DETACHED;
+ eventdev_globals.nb_devs--;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rte_free(eventdev->data->dev_private);
+
+ /* Generate memzone name */
+ ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
+ eventdev->data->dev_id);
+ if (ret >= (int)sizeof(mz_name))
+ return -EINVAL;
+
+ mz = rte_memzone_lookup(mz_name);
+ if (mz == NULL)
+ return -ENOMEM;
+
+ ret = rte_memzone_free(mz);
+ if (ret)
+ return ret;
+ }
+
+ eventdev->data = NULL;
+ return 0;
+}
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev.h b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev.h
new file mode 100644
index 00000000..b6fd6ee7
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev.h
@@ -0,0 +1,1920 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc.
+ * Copyright(c) 2016-2018 Intel Corporation.
+ * Copyright 2016 NXP
+ * All rights reserved.
+ */
+
+#ifndef _RTE_EVENTDEV_H_
+#define _RTE_EVENTDEV_H_
+
+/**
+ * @file
+ *
+ * RTE Event Device API
+ *
+ * In a polling model, lcores poll ethdev ports and associated rx queues
+ * directly to look for packet. In an event driven model, by contrast, lcores
+ * call the scheduler that selects packets for them based on programmer
+ * specified criteria. Eventdev library adds support for event driven
+ * programming model, which offer applications automatic multicore scaling,
+ * dynamic load balancing, pipelining, packet ingress order maintenance and
+ * synchronization services to simplify application packet processing.
+ *
+ * The Event Device API is composed of two parts:
+ *
+ * - The application-oriented Event API that includes functions to setup
+ * an event device (configure it, setup its queues, ports and start it), to
+ * establish the link between queues to port and to receive events, and so on.
+ *
+ * - The driver-oriented Event API that exports a function allowing
+ * an event poll Mode Driver (PMD) to simultaneously register itself as
+ * an event device driver.
+ *
+ * Event device components:
+ *
+ * +-----------------+
+ * | +-------------+ |
+ * +-------+ | | flow 0 | |
+ * |Packet | | +-------------+ |
+ * |event | | +-------------+ |
+ * | | | | flow 1 | |port_link(port0, queue0)
+ * +-------+ | +-------------+ | | +--------+
+ * +-------+ | +-------------+ o-----v-----o |dequeue +------+
+ * |Crypto | | | flow n | | | event +------->|Core 0|
+ * |work | | +-------------+ o----+ | port 0 | | |
+ * |done ev| | event queue 0 | | +--------+ +------+
+ * +-------+ +-----------------+ |
+ * +-------+ |
+ * |Timer | +-----------------+ | +--------+
+ * |expiry | | +-------------+ | +------o |dequeue +------+
+ * |event | | | flow 0 | o-----------o event +------->|Core 1|
+ * +-------+ | +-------------+ | +----o port 1 | | |
+ * Event enqueue | +-------------+ | | +--------+ +------+
+ * o-------------> | | flow 1 | | |
+ * enqueue( | +-------------+ | |
+ * queue_id, | | | +--------+ +------+
+ * flow_id, | +-------------+ | | | |dequeue |Core 2|
+ * sched_type, | | flow n | o-----------o event +------->| |
+ * event_type, | +-------------+ | | | port 2 | +------+
+ * subev_type, | event queue 1 | | +--------+
+ * event) +-----------------+ | +--------+
+ * | | |dequeue +------+
+ * +-------+ +-----------------+ | | event +------->|Core n|
+ * |Core | | +-------------+ o-----------o port n | | |
+ * |(SW) | | | flow 0 | | | +--------+ +--+---+
+ * |event | | +-------------+ | | |
+ * +-------+ | +-------------+ | | |
+ * ^ | | flow 1 | | | |
+ * | | +-------------+ o------+ |
+ * | | +-------------+ | |
+ * | | | flow n | | |
+ * | | +-------------+ | |
+ * | | event queue n | |
+ * | +-----------------+ |
+ * | |
+ * +-----------------------------------------------------------+
+ *
+ * Event device: A hardware or software-based event scheduler.
+ *
+ * Event: A unit of scheduling that encapsulates a packet or other datatype
+ * like SW generated event from the CPU, Crypto work completion notification,
+ * Timer expiry event notification etc as well as metadata.
+ * The metadata includes flow ID, scheduling type, event priority, event_type,
+ * sub_event_type etc.
+ *
+ * Event queue: A queue containing events that are scheduled by the event dev.
+ * An event queue contains events of different flows associated with scheduling
+ * types, such as atomic, ordered, or parallel.
+ *
+ * Event port: An application's interface into the event dev for enqueue and
+ * dequeue operations. Each event port can be linked with one or more
+ * event queues for dequeue operations.
+ *
+ * By default, all the functions of the Event Device API exported by a PMD
+ * are lock-free functions which assume to not be invoked in parallel on
+ * different logical cores to work on the same target object. For instance,
+ * the dequeue function of a PMD cannot be invoked in parallel on two logical
+ * cores to operates on same event port. Of course, this function
+ * can be invoked in parallel by different logical cores on different ports.
+ * It is the responsibility of the upper level application to enforce this rule.
+ *
+ * In all functions of the Event API, the Event device is
+ * designated by an integer >= 0 named the device identifier *dev_id*
+ *
+ * At the Event driver level, Event devices are represented by a generic
+ * data structure of type *rte_event_dev*.
+ *
+ * Event devices are dynamically registered during the PCI/SoC device probing
+ * phase performed at EAL initialization time.
+ * When an Event device is being probed, a *rte_event_dev* structure and
+ * a new device identifier are allocated for that device. Then, the
+ * event_dev_init() function supplied by the Event driver matching the probed
+ * device is invoked to properly initialize the device.
+ *
+ * The role of the device init function consists of resetting the hardware or
+ * software event driver implementations.
+ *
+ * If the device init operation is successful, the correspondence between
+ * the device identifier assigned to the new device and its associated
+ * *rte_event_dev* structure is effectively registered.
+ * Otherwise, both the *rte_event_dev* structure and the device identifier are
+ * freed.
+ *
+ * The functions exported by the application Event API to setup a device
+ * designated by its device identifier must be invoked in the following order:
+ * - rte_event_dev_configure()
+ * - rte_event_queue_setup()
+ * - rte_event_port_setup()
+ * - rte_event_port_link()
+ * - rte_event_dev_start()
+ *
+ * Then, the application can invoke, in any order, the functions
+ * exported by the Event API to schedule events, dequeue events, enqueue events,
+ * change event queue(s) to event port [un]link establishment and so on.
+ *
+ * Application may use rte_event_[queue/port]_default_conf_get() to get the
+ * default configuration to set up an event queue or event port by
+ * overriding few default values.
+ *
+ * If the application wants to change the configuration (i.e. call
+ * rte_event_dev_configure(), rte_event_queue_setup(), or
+ * rte_event_port_setup()), it must call rte_event_dev_stop() first to stop the
+ * device and then do the reconfiguration before calling rte_event_dev_start()
+ * again. The schedule, enqueue and dequeue functions should not be invoked
+ * when the device is stopped.
+ *
+ * Finally, an application can close an Event device by invoking the
+ * rte_event_dev_close() function.
+ *
+ * Each function of the application Event API invokes a specific function
+ * of the PMD that controls the target device designated by its device
+ * identifier.
+ *
+ * For this purpose, all device-specific functions of an Event driver are
+ * supplied through a set of pointers contained in a generic structure of type
+ * *event_dev_ops*.
+ * The address of the *event_dev_ops* structure is stored in the *rte_event_dev*
+ * structure by the device init function of the Event driver, which is
+ * invoked during the PCI/SoC device probing phase, as explained earlier.
+ *
+ * In other words, each function of the Event API simply retrieves the
+ * *rte_event_dev* structure associated with the device identifier and
+ * performs an indirect invocation of the corresponding driver function
+ * supplied in the *event_dev_ops* structure of the *rte_event_dev* structure.
+ *
+ * For performance reasons, the address of the fast-path functions of the
+ * Event driver is not contained in the *event_dev_ops* structure.
+ * Instead, they are directly stored at the beginning of the *rte_event_dev*
+ * structure to avoid an extra indirect memory access during their invocation.
+ *
+ * RTE event device drivers do not use interrupts for enqueue or dequeue
+ * operation. Instead, Event drivers export Poll-Mode enqueue and dequeue
+ * functions to applications.
+ *
+ * The events are injected to event device through *enqueue* operation by
+ * event producers in the system. The typical event producers are ethdev
+ * subsystem for generating packet events, CPU(SW) for generating events based
+ * on different stages of application processing, cryptodev for generating
+ * crypto work completion notification etc
+ *
+ * The *dequeue* operation gets one or more events from the event ports.
+ * The application process the events and send to downstream event queue through
+ * rte_event_enqueue_burst() if it is an intermediate stage of event processing,
+ * on the final stage, the application may send to different subsystem like
+ * ethdev to send the packet/event on the wire using ethdev
+ * rte_eth_tx_burst() API.
+ *
+ * The point at which events are scheduled to ports depends on the device.
+ * For hardware devices, scheduling occurs asynchronously without any software
+ * intervention. Software schedulers can either be distributed
+ * (each worker thread schedules events to its own port) or centralized
+ * (a dedicated thread schedules to all ports). Distributed software schedulers
+ * perform the scheduling in rte_event_dequeue_burst(), whereas centralized
+ * scheduler logic need a dedicated service core for scheduling.
+ * The RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED capability flag is not set
+ * indicates the device is centralized and thus needs a dedicated scheduling
+ * thread that repeatedly calls software specific scheduling function.
+ *
+ * An event driven worker thread has following typical workflow on fastpath:
+ * \code{.c}
+ * while (1) {
+ * rte_event_dequeue_burst(...);
+ * (event processing)
+ * rte_event_enqueue_burst(...);
+ * }
+ * \endcode
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_memory.h>
+#include <rte_errno.h>
+
+struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
+struct rte_event;
+
+/* Event device capability bitmap flags */
+#define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
+/**< Event scheduling prioritization is based on the priority associated with
+ * each event queue.
+ *
+ * @see rte_event_queue_setup()
+ */
+#define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
+/**< Event scheduling prioritization is based on the priority associated with
+ * each event. Priority of each event is supplied in *rte_event* structure
+ * on each enqueue operation.
+ *
+ * @see rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
+/**< Event device operates in distributed scheduling mode.
+ * In distributed scheduling mode, event scheduling happens in HW or
+ * rte_event_dequeue_burst() or the combination of these two.
+ * If the flag is not set then eventdev is centralized and thus needs a
+ * dedicated service core that acts as a scheduling thread .
+ *
+ * @see rte_event_dequeue_burst()
+ */
+#define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
+/**< Event device is capable of enqueuing events of any type to any queue.
+ * If this capability is not set, the queue only supports events of the
+ * *RTE_SCHED_TYPE_* type that it was created with.
+ *
+ * @see RTE_SCHED_TYPE_* values
+ */
+#define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
+/**< Event device is capable of operating in burst mode for enqueue(forward,
+ * release) and dequeue operation. If this capability is not set, application
+ * still uses the rte_event_dequeue_burst() and rte_event_enqueue_burst() but
+ * PMD accepts only one event at a time.
+ *
+ * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
+/**< Event device ports support disabling the implicit release feature, in
+ * which the port will release all unreleased events in its dequeue operation.
+ * If this capability is set and the port is configured with implicit release
+ * disabled, the application is responsible for explicitly releasing events
+ * using either the RTE_EVENT_OP_FORWARD or the RTE_EVENT_OP_RELEASE event
+ * enqueue operations.
+ *
+ * @see rte_event_dequeue_burst() rte_event_enqueue_burst()
+ */
+
+#define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
+/**< Event device is capable of operating in none sequential mode. The path
+ * of the event is not necessary to be sequential. Application can change
+ * the path of event at runtime. If the flag is not set, then event each event
+ * will follow a path from queue 0 to queue 1 to queue 2 etc. If the flag is
+ * set, events may be sent to queues in any order. If the flag is not set, the
+ * eventdev will return an error when the application enqueues an event for a
+ * qid which is not the next in the sequence.
+ */
+
+#define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
+/**< Event device is capable of configuring the queue/port link at runtime.
+ * If the flag is not set, the eventdev queue/port link is only can be
+ * configured during initialization.
+ */
+
+#define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
+/**< Event device is capable of setting up the link between multiple queue
+ * with single port. If the flag is not set, the eventdev can only map a
+ * single queue to each port or map a single queue to many port.
+ */
+
+/* Event device priority levels */
+#define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
+/**< Highest priority expressed across eventdev subsystem
+ * @see rte_event_queue_setup(), rte_event_enqueue_burst()
+ * @see rte_event_port_link()
+ */
+#define RTE_EVENT_DEV_PRIORITY_NORMAL 128
+/**< Normal priority expressed across eventdev subsystem
+ * @see rte_event_queue_setup(), rte_event_enqueue_burst()
+ * @see rte_event_port_link()
+ */
+#define RTE_EVENT_DEV_PRIORITY_LOWEST 255
+/**< Lowest priority expressed across eventdev subsystem
+ * @see rte_event_queue_setup(), rte_event_enqueue_burst()
+ * @see rte_event_port_link()
+ */
+
+/**
+ * Get the total number of event devices that have been successfully
+ * initialised.
+ *
+ * @return
+ * The total number of usable event devices.
+ */
+uint8_t
+rte_event_dev_count(void);
+
+/**
+ * Get the device identifier for the named event device.
+ *
+ * @param name
+ * Event device name to select the event device identifier.
+ *
+ * @return
+ * Returns event device identifier on success.
+ * - <0: Failure to find named event device.
+ */
+int
+rte_event_dev_get_dev_id(const char *name);
+
+/**
+ * Return the NUMA socket to which a device is connected.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @return
+ * The NUMA socket id to which the device is connected or
+ * a default of zero if the socket could not be determined.
+ * -(-EINVAL) dev_id value is out of range.
+ */
+int
+rte_event_dev_socket_id(uint8_t dev_id);
+
+/**
+ * Event device information
+ */
+struct rte_event_dev_info {
+ const char *driver_name; /**< Event driver name */
+ struct rte_device *dev; /**< Device information */
+ uint32_t min_dequeue_timeout_ns;
+ /**< Minimum supported global dequeue timeout(ns) by this device */
+ uint32_t max_dequeue_timeout_ns;
+ /**< Maximum supported global dequeue timeout(ns) by this device */
+ uint32_t dequeue_timeout_ns;
+ /**< Configured global dequeue timeout(ns) for this device */
+ uint8_t max_event_queues;
+ /**< Maximum event_queues supported by this device */
+ uint32_t max_event_queue_flows;
+ /**< Maximum supported flows in an event queue by this device*/
+ uint8_t max_event_queue_priority_levels;
+ /**< Maximum number of event queue priority levels by this device.
+ * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
+ */
+ uint8_t max_event_priority_levels;
+ /**< Maximum number of event priority levels by this device.
+ * Valid when the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability
+ */
+ uint8_t max_event_ports;
+ /**< Maximum number of event ports supported by this device */
+ uint8_t max_event_port_dequeue_depth;
+ /**< Maximum number of events can be dequeued at a time from an
+ * event port by this device.
+ * A device that does not support bulk dequeue will set this as 1.
+ */
+ uint32_t max_event_port_enqueue_depth;
+ /**< Maximum number of events can be enqueued at a time from an
+ * event port by this device.
+ * A device that does not support bulk enqueue will set this as 1.
+ */
+ int32_t max_num_events;
+ /**< A *closed system* event dev has a limit on the number of events it
+ * can manage at a time. An *open system* event dev does not have a
+ * limit and will specify this as -1.
+ */
+ uint32_t event_dev_cap;
+ /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+};
+
+/**
+ * Retrieve the contextual information of an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param[out] dev_info
+ * A pointer to a structure of type *rte_event_dev_info* to be filled with the
+ * contextual information of the device.
+ *
+ * @return
+ * - 0: Success, driver updates the contextual information of the event device
+ * - <0: Error code returned by the driver info get function.
+ *
+ */
+int
+rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
+
+/**
+ * The count of ports.
+ */
+#define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
+/**
+ * The count of queues.
+ */
+#define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
+/**
+ * The status of the device, zero for stopped, non-zero for started.
+ */
+#define RTE_EVENT_DEV_ATTR_STARTED 2
+
+/**
+ * Get an attribute from a device.
+ *
+ * @param dev_id Eventdev id
+ * @param attr_id The attribute ID to retrieve
+ * @param[out] attr_value A pointer that will be filled in with the attribute
+ * value if successful.
+ *
+ * @return
+ * - 0: Successfully retrieved attribute value
+ * - -EINVAL: Invalid device or *attr_id* provided, or *attr_value* is NULL
+ */
+int
+rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
+ uint32_t *attr_value);
+
+
+/* Event device configuration bitmap flags */
+#define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
+/**< Override the global *dequeue_timeout_ns* and use per dequeue timeout in ns.
+ * @see rte_event_dequeue_timeout_ticks(), rte_event_dequeue_burst()
+ */
+
+/** Event device configuration structure */
+struct rte_event_dev_config {
+ uint32_t dequeue_timeout_ns;
+ /**< rte_event_dequeue_burst() timeout on this device.
+ * This value should be in the range of *min_dequeue_timeout_ns* and
+ * *max_dequeue_timeout_ns* which previously provided in
+ * rte_event_dev_info_get()
+ * The value 0 is allowed, in which case, default dequeue timeout used.
+ * @see RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ */
+ int32_t nb_events_limit;
+ /**< In a *closed system* this field is the limit on maximum number of
+ * events that can be inflight in the eventdev at a given time. The
+ * limit is required to ensure that the finite space in a closed system
+ * is not overwhelmed. The value cannot exceed the *max_num_events*
+ * as provided by rte_event_dev_info_get().
+ * This value should be set to -1 for *open system*.
+ */
+ uint8_t nb_event_queues;
+ /**< Number of event queues to configure on this device.
+ * This value cannot exceed the *max_event_queues* which previously
+ * provided in rte_event_dev_info_get()
+ */
+ uint8_t nb_event_ports;
+ /**< Number of event ports to configure on this device.
+ * This value cannot exceed the *max_event_ports* which previously
+ * provided in rte_event_dev_info_get()
+ */
+ uint32_t nb_event_queue_flows;
+ /**< Number of flows for any event queue on this device.
+ * This value cannot exceed the *max_event_queue_flows* which previously
+ * provided in rte_event_dev_info_get()
+ */
+ uint32_t nb_event_port_dequeue_depth;
+ /**< Maximum number of events can be dequeued at a time from an
+ * event port by this device.
+ * This value cannot exceed the *max_event_port_dequeue_depth*
+ * which previously provided in rte_event_dev_info_get().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
+ * @see rte_event_port_setup()
+ */
+ uint32_t nb_event_port_enqueue_depth;
+ /**< Maximum number of events can be enqueued at a time from an
+ * event port by this device.
+ * This value cannot exceed the *max_event_port_enqueue_depth*
+ * which previously provided in rte_event_dev_info_get().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
+ * @see rte_event_port_setup()
+ */
+ uint32_t event_dev_cfg;
+ /**< Event device config flags(RTE_EVENT_DEV_CFG_)*/
+};
+
+/**
+ * Configure an event device.
+ *
+ * This function must be invoked first before any other function in the
+ * API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * The caller may use rte_event_dev_info_get() to get the capability of each
+ * resources available for this event device.
+ *
+ * @param dev_id
+ * The identifier of the device to configure.
+ * @param dev_conf
+ * The event device configuration structure.
+ *
+ * @return
+ * - 0: Success, device configured.
+ * - <0: Error code returned by the driver configuration function.
+ */
+int
+rte_event_dev_configure(uint8_t dev_id,
+ const struct rte_event_dev_config *dev_conf);
+
+
+/* Event queue specific APIs */
+
+/* Event queue configuration bitmap flags */
+#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
+/**< Allow ATOMIC,ORDERED,PARALLEL schedule type enqueue
+ *
+ * @see RTE_SCHED_TYPE_ORDERED, RTE_SCHED_TYPE_ATOMIC, RTE_SCHED_TYPE_PARALLEL
+ * @see rte_event_enqueue_burst()
+ */
+#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
+/**< This event queue links only to a single event port.
+ *
+ * @see rte_event_port_setup(), rte_event_port_link()
+ */
+
+/** Event queue configuration structure */
+struct rte_event_queue_conf {
+ uint32_t nb_atomic_flows;
+ /**< The maximum number of active flows this queue can track at any
+ * given time. If the queue is configured for atomic scheduling (by
+ * applying the RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg
+ * or RTE_SCHED_TYPE_ATOMIC flag to schedule_type), then the
+ * value must be in the range of [1, nb_event_queue_flows], which was
+ * previously provided in rte_event_dev_configure().
+ */
+ uint32_t nb_atomic_order_sequences;
+ /**< The maximum number of outstanding events waiting to be
+ * reordered by this queue. In other words, the number of entries in
+ * this queue’s reorder buffer.When the number of events in the
+ * reorder buffer reaches to *nb_atomic_order_sequences* then the
+ * scheduler cannot schedule the events from this queue and invalid
+ * event will be returned from dequeue until one or more entries are
+ * freed up/released.
+ * If the queue is configured for ordered scheduling (by applying the
+ * RTE_EVENT_QUEUE_CFG_ALL_TYPES flag to event_queue_cfg or
+ * RTE_SCHED_TYPE_ORDERED flag to schedule_type), then the value must
+ * be in the range of [1, nb_event_queue_flows], which was
+ * previously supplied to rte_event_dev_configure().
+ */
+ uint32_t event_queue_cfg;
+ /**< Queue cfg flags(EVENT_QUEUE_CFG_) */
+ uint8_t schedule_type;
+ /**< Queue schedule type(RTE_SCHED_TYPE_*).
+ * Valid when RTE_EVENT_QUEUE_CFG_ALL_TYPES bit is not set in
+ * event_queue_cfg.
+ */
+ uint8_t priority;
+ /**< Priority for this event queue relative to other event queues.
+ * The requested priority should in the range of
+ * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
+ * The implementation shall normalize the requested priority to
+ * event device supported priority value.
+ * Valid when the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability
+ */
+};
+
+/**
+ * Retrieve the default configuration information of an event queue designated
+ * by its *queue_id* from the event driver for an event device.
+ *
+ * This function intended to be used in conjunction with rte_event_queue_setup()
+ * where caller needs to set up the queue by overriding few default values.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the event queue to get the configuration information.
+ * The value must be in the range [0, nb_event_queues - 1]
+ * previously supplied to rte_event_dev_configure().
+ * @param[out] queue_conf
+ * The pointer to the default event queue configuration data.
+ * @return
+ * - 0: Success, driver updates the default event queue configuration data.
+ * - <0: Error code returned by the driver info get function.
+ *
+ * @see rte_event_queue_setup()
+ *
+ */
+int
+rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf);
+
+/**
+ * Allocate and set up an event queue for an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param queue_id
+ * The index of the event queue to setup. The value must be in the range
+ * [0, nb_event_queues - 1] previously supplied to rte_event_dev_configure().
+ * @param queue_conf
+ * The pointer to the configuration data to be used for the event queue.
+ * NULL value is allowed, in which case default configuration used.
+ *
+ * @see rte_event_queue_default_conf_get()
+ *
+ * @return
+ * - 0: Success, event queue correctly set up.
+ * - <0: event queue configuration failed
+ */
+int
+rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf);
+
+/**
+ * The priority of the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
+/**
+ * The number of atomic flows configured for the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
+/**
+ * The number of atomic order sequences configured for the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
+/**
+ * The cfg flags for the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
+/**
+ * The schedule type of the queue.
+ */
+#define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
+
+/**
+ * Get an attribute from a queue.
+ *
+ * @param dev_id
+ * Eventdev id
+ * @param queue_id
+ * Eventdev queue id
+ * @param attr_id
+ * The attribute ID to retrieve
+ * @param[out] attr_value
+ * A pointer that will be filled in with the attribute value if successful
+ *
+ * @return
+ * - 0: Successfully returned value
+ * - -EINVAL: invalid device, queue or attr_id provided, or attr_value was
+ * NULL
+ * - -EOVERFLOW: returned when attr_id is set to
+ * RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE and event_queue_cfg is set to
+ * RTE_EVENT_QUEUE_CFG_ALL_TYPES
+ */
+int
+rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
+ uint32_t *attr_value);
+
+/* Event port specific APIs */
+
+/** Event port configuration structure */
+struct rte_event_port_conf {
+ int32_t new_event_threshold;
+ /**< A backpressure threshold for new event enqueues on this port.
+ * Use for *closed system* event dev where event capacity is limited,
+ * and cannot exceed the capacity of the event dev.
+ * Configuring ports with different thresholds can make higher priority
+ * traffic less likely to be backpressured.
+ * For example, a port used to inject NIC Rx packets into the event dev
+ * can have a lower threshold so as not to overwhelm the device,
+ * while ports used for worker pools can have a higher threshold.
+ * This value cannot exceed the *nb_events_limit*
+ * which was previously supplied to rte_event_dev_configure().
+ * This should be set to '-1' for *open system*.
+ */
+ uint16_t dequeue_depth;
+ /**< Configure number of bulk dequeues for this event port.
+ * This value cannot exceed the *nb_event_port_dequeue_depth*
+ * which previously supplied to rte_event_dev_configure().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
+ */
+ uint16_t enqueue_depth;
+ /**< Configure number of bulk enqueues for this event port.
+ * This value cannot exceed the *nb_event_port_enqueue_depth*
+ * which previously supplied to rte_event_dev_configure().
+ * Ignored when device is not RTE_EVENT_DEV_CAP_BURST_MODE capable.
+ */
+ uint8_t disable_implicit_release;
+ /**< Configure the port not to release outstanding events in
+ * rte_event_dev_dequeue_burst(). If true, all events received through
+ * the port must be explicitly released with RTE_EVENT_OP_RELEASE or
+ * RTE_EVENT_OP_FORWARD. Must be false when the device is not
+ * RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE capable.
+ */
+};
+
+/**
+ * Retrieve the default configuration information of an event port designated
+ * by its *port_id* from the event driver for an event device.
+ *
+ * This function intended to be used in conjunction with rte_event_port_setup()
+ * where caller needs to set up the port by overriding few default values.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The index of the event port to get the configuration information.
+ * The value must be in the range [0, nb_event_ports - 1]
+ * previously supplied to rte_event_dev_configure().
+ * @param[out] port_conf
+ * The pointer to the default event port configuration data
+ * @return
+ * - 0: Success, driver updates the default event port configuration data.
+ * - <0: Error code returned by the driver info get function.
+ *
+ * @see rte_event_port_setup()
+ *
+ */
+int
+rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
+ struct rte_event_port_conf *port_conf);
+
+/**
+ * Allocate and set up an event port for an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The index of the event port to setup. The value must be in the range
+ * [0, nb_event_ports - 1] previously supplied to rte_event_dev_configure().
+ * @param port_conf
+ * The pointer to the configuration data to be used for the queue.
+ * NULL value is allowed, in which case default configuration used.
+ *
+ * @see rte_event_port_default_conf_get()
+ *
+ * @return
+ * - 0: Success, event port correctly set up.
+ * - <0: Port configuration failed
+ * - (-EDQUOT) Quota exceeded(Application tried to link the queue configured
+ * with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
+ */
+int
+rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf);
+
+/**
+ * The queue depth of the port on the enqueue side
+ */
+#define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
+/**
+ * The queue depth of the port on the dequeue side
+ */
+#define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
+/**
+ * The new event threshold of the port
+ */
+#define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
+
+/**
+ * Get an attribute from a port.
+ *
+ * @param dev_id
+ * Eventdev id
+ * @param port_id
+ * Eventdev port id
+ * @param attr_id
+ * The attribute ID to retrieve
+ * @param[out] attr_value
+ * A pointer that will be filled in with the attribute value if successful
+ *
+ * @return
+ * - 0: Successfully returned value
+ * - (-EINVAL) Invalid device, port or attr_id, or attr_value was NULL
+ */
+int
+rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
+ uint32_t *attr_value);
+
+/**
+ * Start an event device.
+ *
+ * The device start step is the last one and consists of setting the event
+ * queues to start accepting the events and schedules to event ports.
+ *
+ * On success, all basic functions exported by the API (event enqueue,
+ * event dequeue and so on) can be invoked.
+ *
+ * @param dev_id
+ * Event device identifier
+ * @return
+ * - 0: Success, device started.
+ * - -ESTALE : Not all ports of the device are configured
+ * - -ENOLINK: Not all queues are linked, which could lead to deadlock.
+ */
+int
+rte_event_dev_start(uint8_t dev_id);
+
+/**
+ * Stop an event device.
+ *
+ * This function causes all queued events to be drained, including those
+ * residing in event ports. While draining events out of the device, this
+ * function calls the user-provided flush callback (if one was registered) once
+ * per event.
+ *
+ * The device can be restarted with a call to rte_event_dev_start(). Threads
+ * that continue to enqueue/dequeue while the device is stopped, or being
+ * stopped, will result in undefined behavior. This includes event adapters,
+ * which must be stopped prior to stopping the eventdev.
+ *
+ * @param dev_id
+ * Event device identifier.
+ *
+ * @see rte_event_dev_stop_flush_callback_register()
+ */
+void
+rte_event_dev_stop(uint8_t dev_id);
+
+typedef void (*eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event,
+ void *arg);
+/**< Callback function called during rte_event_dev_stop(), invoked once per
+ * flushed event.
+ */
+
+/**
+ * Registers a callback function to be invoked during rte_event_dev_stop() for
+ * each flushed event. This function can be used to properly dispose of queued
+ * events, for example events containing memory pointers.
+ *
+ * The callback function is only registered for the calling process. The
+ * callback function must be registered in every process that can call
+ * rte_event_dev_stop().
+ *
+ * To unregister a callback, call this function with a NULL callback pointer.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param callback
+ * Callback function invoked once per flushed event.
+ * @param userdata
+ * Argument supplied to callback.
+ *
+ * @return
+ * - 0 on success.
+ * - -EINVAL if *dev_id* is invalid
+ *
+ * @see rte_event_dev_stop()
+ */
+int
+rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
+ eventdev_stop_flush_t callback, void *userdata);
+
+/**
+ * Close an event device. The device cannot be restarted!
+ *
+ * @param dev_id
+ * Event device identifier
+ *
+ * @return
+ * - 0 on successfully closing device
+ * - <0 on failure to close device
+ * - (-EAGAIN) if device is busy
+ */
+int
+rte_event_dev_close(uint8_t dev_id);
+
+/* Scheduler type definitions */
+#define RTE_SCHED_TYPE_ORDERED 0
+/**< Ordered scheduling
+ *
+ * Events from an ordered flow of an event queue can be scheduled to multiple
+ * ports for concurrent processing while maintaining the original event order.
+ * This scheme enables the user to achieve high single flow throughput by
+ * avoiding SW synchronization for ordering between ports which bound to cores.
+ *
+ * The source flow ordering from an event queue is maintained when events are
+ * enqueued to their destination queue within the same ordered flow context.
+ * An event port holds the context until application call
+ * rte_event_dequeue_burst() from the same port, which implicitly releases
+ * the context.
+ * User may allow the scheduler to release the context earlier than that
+ * by invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE operation.
+ *
+ * Events from the source queue appear in their original order when dequeued
+ * from a destination queue.
+ * Event ordering is based on the received event(s), but also other
+ * (newly allocated or stored) events are ordered when enqueued within the same
+ * ordered context. Events not enqueued (e.g. released or stored) within the
+ * context are considered missing from reordering and are skipped at this time
+ * (but can be ordered again within another context).
+ *
+ * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
+ */
+
+#define RTE_SCHED_TYPE_ATOMIC 1
+/**< Atomic scheduling
+ *
+ * Events from an atomic flow of an event queue can be scheduled only to a
+ * single port at a time. The port is guaranteed to have exclusive (atomic)
+ * access to the associated flow context, which enables the user to avoid SW
+ * synchronization. Atomic flows also help to maintain event ordering
+ * since only one port at a time can process events from a flow of an
+ * event queue.
+ *
+ * The atomic queue synchronization context is dedicated to the port until
+ * application call rte_event_dequeue_burst() from the same port,
+ * which implicitly releases the context. User may allow the scheduler to
+ * release the context earlier than that by invoking rte_event_enqueue_burst()
+ * with RTE_EVENT_OP_RELEASE operation.
+ *
+ * @see rte_event_queue_setup(), rte_event_dequeue_burst(), RTE_EVENT_OP_RELEASE
+ */
+
+#define RTE_SCHED_TYPE_PARALLEL 2
+/**< Parallel scheduling
+ *
+ * The scheduler performs priority scheduling, load balancing, etc. functions
+ * but does not provide additional event synchronization or ordering.
+ * It is free to schedule events from a single parallel flow of an event queue
+ * to multiple events ports for concurrent processing.
+ * The application is responsible for flow context synchronization and
+ * event ordering (SW synchronization).
+ *
+ * @see rte_event_queue_setup(), rte_event_dequeue_burst()
+ */
+
+/* Event types to classify the event source */
+#define RTE_EVENT_TYPE_ETHDEV 0x0
+/**< The event generated from ethdev subsystem */
+#define RTE_EVENT_TYPE_CRYPTODEV 0x1
+/**< The event generated from crypodev subsystem */
+#define RTE_EVENT_TYPE_TIMER 0x2
+/**< The event generated from event timer adapter */
+#define RTE_EVENT_TYPE_CPU 0x3
+/**< The event generated from cpu for pipelining.
+ * Application may use *sub_event_type* to further classify the event
+ */
+#define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
+/**< The event generated from event eth Rx adapter */
+#define RTE_EVENT_TYPE_MAX 0x10
+/**< Maximum number of event types */
+
+/* Event enqueue operations */
+#define RTE_EVENT_OP_NEW 0
+/**< The event producers use this operation to inject a new event to the
+ * event device.
+ */
+#define RTE_EVENT_OP_FORWARD 1
+/**< The CPU use this operation to forward the event to different event queue or
+ * change to new application specific flow or schedule type to enable
+ * pipelining.
+ *
+ * This operation must only be enqueued to the same port that the
+ * event to be forwarded was dequeued from.
+ */
+#define RTE_EVENT_OP_RELEASE 2
+/**< Release the flow context associated with the schedule type.
+ *
+ * If current flow's scheduler type method is *RTE_SCHED_TYPE_ATOMIC*
+ * then this function hints the scheduler that the user has completed critical
+ * section processing in the current atomic context.
+ * The scheduler is now allowed to schedule events from the same flow from
+ * an event queue to another port. However, the context may be still held
+ * until the next rte_event_dequeue_burst() call, this call allows but does not
+ * force the scheduler to release the context early.
+ *
+ * Early atomic context release may increase parallelism and thus system
+ * performance, but the user needs to design carefully the split into critical
+ * vs non-critical sections.
+ *
+ * If current flow's scheduler type method is *RTE_SCHED_TYPE_ORDERED*
+ * then this function hints the scheduler that the user has done all that need
+ * to maintain event order in the current ordered context.
+ * The scheduler is allowed to release the ordered context of this port and
+ * avoid reordering any following enqueues.
+ *
+ * Early ordered context release may increase parallelism and thus system
+ * performance.
+ *
+ * If current flow's scheduler type method is *RTE_SCHED_TYPE_PARALLEL*
+ * or no scheduling context is held then this function may be an NOOP,
+ * depending on the implementation.
+ *
+ * This operation must only be enqueued to the same port that the
+ * event to be released was dequeued from.
+ *
+ */
+
+/**
+ * The generic *rte_event* structure to hold the event attributes
+ * for dequeue and enqueue operation
+ */
+RTE_STD_C11
+struct rte_event {
+ /** WORD0 */
+ union {
+ uint64_t event;
+ /** Event attributes for dequeue or enqueue operation */
+ struct {
+ uint32_t flow_id:20;
+ /**< Targeted flow identifier for the enqueue and
+ * dequeue operation.
+ * The value must be in the range of
+ * [0, nb_event_queue_flows - 1] which
+ * previously supplied to rte_event_dev_configure().
+ */
+ uint32_t sub_event_type:8;
+ /**< Sub-event types based on the event source.
+ * @see RTE_EVENT_TYPE_CPU
+ */
+ uint32_t event_type:4;
+ /**< Event type to classify the event source.
+ * @see RTE_EVENT_TYPE_ETHDEV, (RTE_EVENT_TYPE_*)
+ */
+ uint8_t op:2;
+ /**< The type of event enqueue operation - new/forward/
+ * etc.This field is not preserved across an instance
+ * and is undefined on dequeue.
+ * @see RTE_EVENT_OP_NEW, (RTE_EVENT_OP_*)
+ */
+ uint8_t rsvd:4;
+ /**< Reserved for future use */
+ uint8_t sched_type:2;
+ /**< Scheduler synchronization type (RTE_SCHED_TYPE_*)
+ * associated with flow id on a given event queue
+ * for the enqueue and dequeue operation.
+ */
+ uint8_t queue_id;
+ /**< Targeted event queue identifier for the enqueue or
+ * dequeue operation.
+ * The value must be in the range of
+ * [0, nb_event_queues - 1] which previously supplied to
+ * rte_event_dev_configure().
+ */
+ uint8_t priority;
+ /**< Event priority relative to other events in the
+ * event queue. The requested priority should in the
+ * range of [RTE_EVENT_DEV_PRIORITY_HIGHEST,
+ * RTE_EVENT_DEV_PRIORITY_LOWEST].
+ * The implementation shall normalize the requested
+ * priority to supported priority value.
+ * Valid when the device has
+ * RTE_EVENT_DEV_CAP_EVENT_QOS capability.
+ */
+ uint8_t impl_opaque;
+ /**< Implementation specific opaque value.
+ * An implementation may use this field to hold
+ * implementation specific value to share between
+ * dequeue and enqueue operation.
+ * The application should not modify this field.
+ */
+ };
+ };
+ /** WORD1 */
+ union {
+ uint64_t u64;
+ /**< Opaque 64-bit value */
+ void *event_ptr;
+ /**< Opaque event pointer */
+ struct rte_mbuf *mbuf;
+ /**< mbuf pointer if dequeued event is associated with mbuf */
+ };
+};
+
+/* Ethdev Rx adapter capability bitmap flags */
+#define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
+/**< This flag is sent when the packet transfer mechanism is in HW.
+ * Ethdev can send packets to the event device using internal event port.
+ */
+#define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
+/**< Adapter supports multiple event queues per ethdev. Every ethdev
+ * Rx queue can be connected to a unique event queue.
+ */
+#define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
+/**< The application can override the adapter generated flow ID in the
+ * event. This flow ID can be specified when adding an ethdev Rx queue
+ * to the adapter using the ev member of struct rte_event_eth_rx_adapter
+ * @see struct rte_event_eth_rx_adapter_queue_conf::ev
+ * @see struct rte_event_eth_rx_adapter_queue_conf::rx_queue_flags
+ */
+
+/**
+ * Retrieve the event device's ethdev Rx adapter capabilities for the
+ * specified ethernet port
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param eth_port_id
+ * The identifier of the ethernet device.
+ *
+ * @param[out] caps
+ * A pointer to memory filled with Rx event adapter capabilities.
+ *
+ * @return
+ * - 0: Success, driver provides Rx event adapter capabilities for the
+ * ethernet device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+int
+rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
+ uint32_t *caps);
+
+#define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
+/**< This flag is set when the timer mechanism is in HW. */
+
+/**
+ * Retrieve the event device's timer adapter capabilities.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param[out] caps
+ * A pointer to memory to be filled with event timer adapter capabilities.
+ *
+ * @return
+ * - 0: Success, driver provided event timer adapter capabilities.
+ * - <0: Error code returned by the driver function.
+ */
+int __rte_experimental
+rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
+
+/* Crypto adapter capability bitmap flag */
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
+/**< Flag indicates HW is capable of generating events in
+ * RTE_EVENT_OP_NEW enqueue operation. Cryptodev will send
+ * packets to the event device as new events using an internal
+ * event port.
+ */
+
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
+/**< Flag indicates HW is capable of generating events in
+ * RTE_EVENT_OP_FORWARD enqueue operation. Cryptodev will send
+ * packets to the event device as forwarded event using an
+ * internal event port.
+ */
+
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
+/**< Flag indicates HW is capable of mapping crypto queue pair to
+ * event queue.
+ */
+
+#define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
+/**< Flag indicates HW/SW suports a mechanism to store and retrieve
+ * the private data information along with the crypto session.
+ */
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Retrieve the event device's crypto adapter capabilities for the
+ * specified cryptodev device
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param cdev_id
+ * The identifier of the cryptodev device.
+ *
+ * @param[out] caps
+ * A pointer to memory filled with event adapter capabilities.
+ * It is expected to be pre-allocated & initialized by caller.
+ *
+ * @return
+ * - 0: Success, driver provides event adapter capabilities for the
+ * cryptodev device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+int __rte_experimental
+rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
+ uint32_t *caps);
+
+struct rte_eventdev_ops;
+struct rte_eventdev;
+
+typedef uint16_t (*event_enqueue_t)(void *port, const struct rte_event *ev);
+/**< @internal Enqueue event on port of a device */
+
+typedef uint16_t (*event_enqueue_burst_t)(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+/**< @internal Enqueue burst of events on port of a device */
+
+typedef uint16_t (*event_dequeue_t)(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks);
+/**< @internal Dequeue event from port of a device */
+
+typedef uint16_t (*event_dequeue_burst_t)(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks);
+/**< @internal Dequeue burst of events from port of a device */
+
+#define RTE_EVENTDEV_NAME_MAX_LEN (64)
+/**< @internal Max length of name of event PMD */
+
+/**
+ * @internal
+ * The data part, with no function pointers, associated with each device.
+ *
+ * This structure is safe to place in shared memory to be common among
+ * different processes in a multi-process configuration.
+ */
+struct rte_eventdev_data {
+ int socket_id;
+ /**< Socket ID where memory is allocated */
+ uint8_t dev_id;
+ /**< Device ID for this instance */
+ uint8_t nb_queues;
+ /**< Number of event queues. */
+ uint8_t nb_ports;
+ /**< Number of event ports. */
+ void **ports;
+ /**< Array of pointers to ports. */
+ struct rte_event_port_conf *ports_cfg;
+ /**< Array of port configuration structures. */
+ struct rte_event_queue_conf *queues_cfg;
+ /**< Array of queue configuration structures. */
+ uint16_t *links_map;
+ /**< Memory to store queues to port connections. */
+ void *dev_private;
+ /**< PMD-specific private data */
+ uint32_t event_dev_cap;
+ /**< Event device capabilities(RTE_EVENT_DEV_CAP_)*/
+ struct rte_event_dev_config dev_conf;
+ /**< Configuration applied to device. */
+ uint8_t service_inited;
+ /* Service initialization state */
+ uint32_t service_id;
+ /* Service ID*/
+ void *dev_stop_flush_arg;
+ /**< User-provided argument for event flush function */
+
+ RTE_STD_C11
+ uint8_t dev_started : 1;
+ /**< Device state: STARTED(1)/STOPPED(0) */
+
+ char name[RTE_EVENTDEV_NAME_MAX_LEN];
+ /**< Unique identifier name */
+} __rte_cache_aligned;
+
+/** @internal The data structure associated with each event device. */
+struct rte_eventdev {
+ event_enqueue_t enqueue;
+ /**< Pointer to PMD enqueue function. */
+ event_enqueue_burst_t enqueue_burst;
+ /**< Pointer to PMD enqueue burst function. */
+ event_enqueue_burst_t enqueue_new_burst;
+ /**< Pointer to PMD enqueue burst function(op new variant) */
+ event_enqueue_burst_t enqueue_forward_burst;
+ /**< Pointer to PMD enqueue burst function(op forward variant) */
+ event_dequeue_t dequeue;
+ /**< Pointer to PMD dequeue function. */
+ event_dequeue_burst_t dequeue_burst;
+ /**< Pointer to PMD dequeue burst function. */
+
+ struct rte_eventdev_data *data;
+ /**< Pointer to device data */
+ struct rte_eventdev_ops *dev_ops;
+ /**< Functions exported by PMD */
+ struct rte_device *dev;
+ /**< Device info. supplied by probing */
+
+ RTE_STD_C11
+ uint8_t attached : 1;
+ /**< Flag indicating the device is attached */
+} __rte_cache_aligned;
+
+extern struct rte_eventdev *rte_eventdevs;
+/** @internal The pool of rte_eventdev structures. */
+
+static __rte_always_inline uint16_t
+__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events,
+ const event_enqueue_burst_t fn)
+{
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (port_id >= dev->data->nb_ports) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+ /*
+ * Allow zero cost non burst mode routine invocation if application
+ * requests nb_events as const one
+ */
+ if (nb_events == 1)
+ return (*dev->enqueue)(dev->data->ports[port_id], ev);
+ else
+ return fn(dev->data->ports[port_id], ev, nb_events);
+}
+
+/**
+ * Enqueue a burst of events objects or an event object supplied in *rte_event*
+ * structure on an event device designated by its *dev_id* through the event
+ * port specified by *port_id*. Each event object specifies the event queue on
+ * which it will be enqueued.
+ *
+ * The *nb_events* parameter is the number of event objects to enqueue which are
+ * supplied in the *ev* array of *rte_event* structure.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * The rte_event_enqueue_burst() function returns the number of
+ * events objects it actually enqueued. A return value equal to *nb_events*
+ * means that all event objects have been enqueued.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ * The number of event objects to enqueue, typically number of
+ * rte_event_port_enqueue_depth() available for this port.
+ *
+ * @return
+ * The number of event objects actually enqueued on the event device. The
+ * return value can be less than the value of the *nb_events* parameter when
+ * the event devices queue is full or if invalid parameters are specified in a
+ * *rte_event*. If the return value is less than *nb_events*, the remaining
+ * events at the end of ev[] are not consumed and the caller has to take care
+ * of them, and rte_errno is set accordingly. Possible errno values include:
+ * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue
+ * ID is invalid, or an event's sched type doesn't match the
+ * capabilities of the destination queue.
+ * - -ENOSPC The event port was backpressured and unable to enqueue
+ * one or more events. This error code is only applicable to
+ * closed systems.
+ * @see rte_event_port_enqueue_depth()
+ */
+static inline uint16_t
+rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events)
+{
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+ dev->enqueue_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_NEW* on
+ * an event device designated by its *dev_id* through the event port specified
+ * by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_NEW*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_NEW.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ * The number of event objects to enqueue, typically number of
+ * rte_event_port_enqueue_depth() available for this port.
+ *
+ * @return
+ * The number of event objects actually enqueued on the event device. The
+ * return value can be less than the value of the *nb_events* parameter when
+ * the event devices queue is full or if invalid parameters are specified in a
+ * *rte_event*. If the return value is less than *nb_events*, the remaining
+ * events at the end of ev[] are not consumed and the caller has to take care
+ * of them, and rte_errno is set accordingly. Possible errno values include:
+ * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue
+ * ID is invalid, or an event's sched type doesn't match the
+ * capabilities of the destination queue.
+ * - -ENOSPC The event port was backpressured and unable to enqueue
+ * one or more events. This error code is only applicable to
+ * closed systems.
+ * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events)
+{
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+ dev->enqueue_new_burst);
+}
+
+/**
+ * Enqueue a burst of events objects of operation type *RTE_EVENT_OP_FORWARD*
+ * on an event device designated by its *dev_id* through the event port
+ * specified by *port_id*.
+ *
+ * Provides the same functionality as rte_event_enqueue_burst(), expect that
+ * application can use this API when the all objects in the burst contains
+ * the enqueue operation of the type *RTE_EVENT_OP_FORWARD*. This specialized
+ * function can provide the additional hint to the PMD and optimize if possible.
+ *
+ * The rte_event_enqueue_new_burst() result is undefined if the enqueue burst
+ * has event object of operation type != RTE_EVENT_OP_FORWARD.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * which contain the event object enqueue operations to be processed.
+ * @param nb_events
+ * The number of event objects to enqueue, typically number of
+ * rte_event_port_enqueue_depth() available for this port.
+ *
+ * @return
+ * The number of event objects actually enqueued on the event device. The
+ * return value can be less than the value of the *nb_events* parameter when
+ * the event devices queue is full or if invalid parameters are specified in a
+ * *rte_event*. If the return value is less than *nb_events*, the remaining
+ * events at the end of ev[] are not consumed and the caller has to take care
+ * of them, and rte_errno is set accordingly. Possible errno values include:
+ * - -EINVAL The port ID is invalid, device ID is invalid, an event's queue
+ * ID is invalid, or an event's sched type doesn't match the
+ * capabilities of the destination queue.
+ * - -ENOSPC The event port was backpressured and unable to enqueue
+ * one or more events. This error code is only applicable to
+ * closed systems.
+ * @see rte_event_port_enqueue_depth() rte_event_enqueue_burst()
+ */
+static inline uint16_t
+rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
+ const struct rte_event ev[], uint16_t nb_events)
+{
+ const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+ return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
+ dev->enqueue_forward_burst);
+}
+
+/**
+ * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue_burst()
+ *
+ * If the device is configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT flag
+ * then application can use this function to convert timeout value in
+ * nanoseconds to implementations specific timeout value supplied in
+ * rte_event_dequeue_burst()
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param ns
+ * Wait time in nanosecond
+ * @param[out] timeout_ticks
+ * Value for the *timeout_ticks* parameter in rte_event_dequeue_burst()
+ *
+ * @return
+ * - 0 on success.
+ * - -ENOTSUP if the device doesn't support timeouts
+ * - -EINVAL if *dev_id* is invalid or *timeout_ticks* is NULL
+ * - other values < 0 on failure.
+ *
+ * @see rte_event_dequeue_burst(), RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ * @see rte_event_dev_configure()
+ *
+ */
+int
+rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
+ uint64_t *timeout_ticks);
+
+/**
+ * Dequeue a burst of events objects or an event object from the event port
+ * designated by its *event_port_id*, on an event device designated
+ * by its *dev_id*.
+ *
+ * rte_event_dequeue_burst() does not dictate the specifics of scheduling
+ * algorithm as each eventdev driver may have different criteria to schedule
+ * an event. However, in general, from an application perspective scheduler may
+ * use the following scheme to dispatch an event to the port.
+ *
+ * 1) Selection of event queue based on
+ * a) The list of event queues are linked to the event port.
+ * b) If the device has RTE_EVENT_DEV_CAP_QUEUE_QOS capability then event
+ * queue selection from list is based on event queue priority relative to
+ * other event queue supplied as *priority* in rte_event_queue_setup()
+ * c) If the device has RTE_EVENT_DEV_CAP_EVENT_QOS capability then event
+ * queue selection from the list is based on event priority supplied as
+ * *priority* in rte_event_enqueue_burst()
+ * 2) Selection of event
+ * a) The number of flows available in selected event queue.
+ * b) Schedule type method associated with the event
+ *
+ * The *nb_events* parameter is the maximum number of event objects to dequeue
+ * which are returned in the *ev* array of *rte_event* structure.
+ *
+ * The rte_event_dequeue_burst() function returns the number of events objects
+ * it actually dequeued. A return value equal to *nb_events* means that all
+ * event objects have been dequeued.
+ *
+ * The number of events dequeued is the number of scheduler contexts held by
+ * this port. These contexts are automatically released in the next
+ * rte_event_dequeue_burst() invocation if the port supports implicit
+ * releases, or invoking rte_event_enqueue_burst() with RTE_EVENT_OP_RELEASE
+ * operation can be used to release the contexts early.
+ *
+ * Event operations RTE_EVENT_OP_FORWARD and RTE_EVENT_OP_RELEASE must only be
+ * enqueued to the same port that their associated events were dequeued from.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param port_id
+ * The identifier of the event port.
+ * @param[out] ev
+ * Points to an array of *nb_events* objects of type *rte_event* structure
+ * for output to be populated with the dequeued event objects.
+ * @param nb_events
+ * The maximum number of event objects to dequeue, typically number of
+ * rte_event_port_dequeue_depth() available for this port.
+ *
+ * @param timeout_ticks
+ * - 0 no-wait, returns immediately if there is no event.
+ * - >0 wait for the event, if the device is configured with
+ * RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT then this function will wait until
+ * at least one event is available or *timeout_ticks* time.
+ * if the device is not configured with RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT
+ * then this function will wait until the event available or
+ * *dequeue_timeout_ns* ns which was previously supplied to
+ * rte_event_dev_configure()
+ *
+ * @return
+ * The number of event objects actually dequeued from the port. The return
+ * value can be less than the value of the *nb_events* parameter when the
+ * event port's queue is not full.
+ *
+ * @see rte_event_port_dequeue_depth()
+ */
+static inline uint16_t
+rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ struct rte_eventdev *dev = &rte_eventdevs[dev_id];
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+ if (dev_id >= RTE_EVENT_MAX_DEVS || !rte_eventdevs[dev_id].attached) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+
+ if (port_id >= dev->data->nb_ports) {
+ rte_errno = -EINVAL;
+ return 0;
+ }
+#endif
+
+ /*
+ * Allow zero cost non burst mode routine invocation if application
+ * requests nb_events as const one
+ */
+ if (nb_events == 1)
+ return (*dev->dequeue)(
+ dev->data->ports[port_id], ev, timeout_ticks);
+ else
+ return (*dev->dequeue_burst)(
+ dev->data->ports[port_id], ev, nb_events,
+ timeout_ticks);
+}
+
+/**
+ * Link multiple source event queues supplied in *queues* to the destination
+ * event port designated by its *port_id* with associated service priority
+ * supplied in *priorities* on the event device designated by its *dev_id*.
+ *
+ * The link establishment shall enable the event port *port_id* from
+ * receiving events from the specified event queue(s) supplied in *queues*
+ *
+ * An event queue may link to one or more event ports.
+ * The number of links can be established from an event queue to event port is
+ * implementation defined.
+ *
+ * Event queue(s) to event port link establishment can be changed at runtime
+ * without re-configuring the device to support scaling and to reduce the
+ * latency of critical work by establishing the link with more event ports
+ * at runtime.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier to select the destination port to link.
+ *
+ * @param queues
+ * Points to an array of *nb_links* event queues to be linked
+ * to the event port.
+ * NULL value is allowed, in which case this function links all the configured
+ * event queues *nb_event_queues* which previously supplied to
+ * rte_event_dev_configure() to the event port *port_id*
+ *
+ * @param priorities
+ * Points to an array of *nb_links* service priorities associated with each
+ * event queue link to event port.
+ * The priority defines the event port's servicing priority for
+ * event queue, which may be ignored by an implementation.
+ * The requested priority should in the range of
+ * [RTE_EVENT_DEV_PRIORITY_HIGHEST, RTE_EVENT_DEV_PRIORITY_LOWEST].
+ * The implementation shall normalize the requested priority to
+ * implementation supported priority value.
+ * NULL value is allowed, in which case this function links the event queues
+ * with RTE_EVENT_DEV_PRIORITY_NORMAL servicing priority
+ *
+ * @param nb_links
+ * The number of links to establish. This parameter is ignored if queues is
+ * NULL.
+ *
+ * @return
+ * The number of links actually established. The return value can be less than
+ * the value of the *nb_links* parameter when the implementation has the
+ * limitation on specific queue to port link establishment or if invalid
+ * parameters are specified in *queues*
+ * If the return value is less than *nb_links*, the remaining links at the end
+ * of link[] are not established, and the caller has to take care of them.
+ * If return value is less than *nb_links* then implementation shall update the
+ * rte_errno accordingly, Possible rte_errno values are
+ * (-EDQUOT) Quota exceeded(Application tried to link the queue configured with
+ * RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)
+ * (-EINVAL) Invalid parameter
+ *
+ */
+int
+rte_event_port_link(uint8_t dev_id, uint8_t port_id,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links);
+
+/**
+ * Unlink multiple source event queues supplied in *queues* from the destination
+ * event port designated by its *port_id* on the event device designated
+ * by its *dev_id*.
+ *
+ * The unlink establishment shall disable the event port *port_id* from
+ * receiving events from the specified event queue *queue_id*
+ *
+ * Event queue(s) to event port unlink establishment can be changed at runtime
+ * without re-configuring the device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier to select the destination port to unlink.
+ *
+ * @param queues
+ * Points to an array of *nb_unlinks* event queues to be unlinked
+ * from the event port.
+ * NULL value is allowed, in which case this function unlinks all the
+ * event queue(s) from the event port *port_id*.
+ *
+ * @param nb_unlinks
+ * The number of unlinks to establish. This parameter is ignored if queues is
+ * NULL.
+ *
+ * @return
+ * The number of unlinks actually established. The return value can be less
+ * than the value of the *nb_unlinks* parameter when the implementation has the
+ * limitation on specific queue to port unlink establishment or
+ * if invalid parameters are specified.
+ * If the return value is less than *nb_unlinks*, the remaining queues at the
+ * end of queues[] are not established, and the caller has to take care of them.
+ * If return value is less than *nb_unlinks* then implementation shall update
+ * the rte_errno accordingly, Possible rte_errno values are
+ * (-EINVAL) Invalid parameter
+ *
+ */
+int
+rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint16_t nb_unlinks);
+
+/**
+ * Retrieve the list of source event queues and its associated service priority
+ * linked to the destination event port designated by its *port_id*
+ * on the event device designated by its *dev_id*.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param port_id
+ * Event port identifier.
+ *
+ * @param[out] queues
+ * Points to an array of *queues* for output.
+ * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
+ * store the event queue(s) linked with event port *port_id*
+ *
+ * @param[out] priorities
+ * Points to an array of *priorities* for output.
+ * The caller has to allocate *RTE_EVENT_MAX_QUEUES_PER_DEV* bytes to
+ * store the service priority associated with each event queue linked
+ *
+ * @return
+ * The number of links established on the event port designated by its
+ * *port_id*.
+ * - <0 on failure.
+ *
+ */
+int
+rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
+ uint8_t queues[], uint8_t priorities[]);
+
+/**
+ * Retrieve the service ID of the event dev. If the adapter doesn't use
+ * a rte_service function, this function returns -ESRCH.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param [out] service_id
+ * A pointer to a uint32_t, to be filled in with the service id.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure, if the event dev doesn't use a rte_service
+ * function, this function returns -ESRCH.
+ */
+int
+rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
+
+/**
+ * Dump internal information about *dev_id* to the FILE* provided in *f*.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ *
+ * @param f
+ * A pointer to a file for output
+ *
+ * @return
+ * - 0: on success
+ * - <0: on failure.
+ */
+int
+rte_event_dev_dump(uint8_t dev_id, FILE *f);
+
+/** Maximum name length for extended statistics counters */
+#define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
+
+/**
+ * Selects the component of the eventdev to retrieve statistics from.
+ */
+enum rte_event_dev_xstats_mode {
+ RTE_EVENT_DEV_XSTATS_DEVICE,
+ RTE_EVENT_DEV_XSTATS_PORT,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+};
+
+/**
+ * A name-key lookup element for extended statistics.
+ *
+ * This structure is used to map between names and ID numbers
+ * for extended ethdev statistics.
+ */
+struct rte_event_dev_xstats_name {
+ char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
+};
+
+/**
+ * Retrieve names of extended statistics of an event device.
+ *
+ * @param dev_id
+ * The identifier of the event device.
+ * @param mode
+ * The mode of statistics to retrieve. Choices include the device statistics,
+ * port statistics or queue statistics.
+ * @param queue_port_id
+ * Used to specify the port or queue number in queue or port mode, and is
+ * ignored in device mode.
+ * @param[out] xstats_names
+ * Block of memory to insert names into. Must be at least size in capacity.
+ * If set to NULL, function returns required capacity.
+ * @param[out] ids
+ * Block of memory to insert ids into. Must be at least size in capacity.
+ * If set to NULL, function returns required capacity. The id values returned
+ * can be passed to *rte_event_dev_xstats_get* to select statistics.
+ * @param size
+ * Capacity of xstats_names (number of names).
+ * @return
+ * - positive value lower or equal to size: success. The return value
+ * is the number of entries filled in the stats table.
+ * - positive value higher than size: error, the given statistics table
+ * is too small. The return value corresponds to the size that should
+ * be given to succeed. The entries in the table are not valid and
+ * shall not be used by the caller.
+ * - negative value on error:
+ * -ENODEV for invalid *dev_id*
+ * -EINVAL for invalid mode, queue port or id parameters
+ * -ENOTSUP if the device doesn't support this function.
+ */
+int
+rte_event_dev_xstats_names_get(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids,
+ unsigned int size);
+
+/**
+ * Retrieve extended statistics of an event device.
+ *
+ * @param dev_id
+ * The identifier of the device.
+ * @param mode
+ * The mode of statistics to retrieve. Choices include the device statistics,
+ * port statistics or queue statistics.
+ * @param queue_port_id
+ * Used to specify the port or queue number in queue or port mode, and is
+ * ignored in device mode.
+ * @param ids
+ * The id numbers of the stats to get. The ids can be got from the stat
+ * position in the stat list from rte_event_dev_get_xstats_names(), or
+ * by using rte_eventdev_get_xstats_by_name()
+ * @param[out] values
+ * The values for each stats request by ID.
+ * @param n
+ * The number of stats requested
+ * @return
+ * - positive value: number of stat entries filled into the values array
+ * - negative value on error:
+ * -ENODEV for invalid *dev_id*
+ * -EINVAL for invalid mode, queue port or id parameters
+ * -ENOTSUP if the device doesn't support this function.
+ */
+int
+rte_event_dev_xstats_get(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id,
+ const unsigned int ids[],
+ uint64_t values[], unsigned int n);
+
+/**
+ * Retrieve the value of a single stat by requesting it by name.
+ *
+ * @param dev_id
+ * The identifier of the device
+ * @param name
+ * The stat name to retrieve
+ * @param[out] id
+ * If non-NULL, the numerical id of the stat will be returned, so that further
+ * requests for the stat can be got using rte_eventdev_xstats_get, which will
+ * be faster as it doesn't need to scan a list of names for the stat.
+ * If the stat cannot be found, the id returned will be (unsigned)-1.
+ * @return
+ * - positive value or zero: the stat value
+ * - negative value: -EINVAL if stat not found, -ENOTSUP if not supported.
+ */
+uint64_t
+rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
+ unsigned int *id);
+
+/**
+ * Reset the values of the xstats of the selected component in the device.
+ *
+ * @param dev_id
+ * The identifier of the device
+ * @param mode
+ * The mode of the statistics to reset. Choose from device, queue or port.
+ * @param queue_port_id
+ * The queue or port to reset. 0 and positive values select ports and queues,
+ * while -1 indicates all ports or queues.
+ * @param ids
+ * Selects specific statistics to be reset. When NULL, all statistics selected
+ * by *mode* will be reset. If non-NULL, must point to array of at least
+ * *nb_ids* size.
+ * @param nb_ids
+ * The number of ids available from the *ids* array. Ignored when ids is NULL.
+ * @return
+ * - zero: successfully reset the statistics to zero
+ * - negative value: -EINVAL invalid parameters, -ENOTSUP if not supported.
+ */
+int
+rte_event_dev_xstats_reset(uint8_t dev_id,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
+/**
+ * Trigger the eventdev self test.
+ *
+ * @param dev_id
+ * The identifier of the device
+ * @return
+ * - 0: Selftest successful
+ * - -ENOTSUP if the device doesn't support selftest
+ * - other values < 0 on failure.
+ */
+int rte_event_dev_selftest(uint8_t dev_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h
new file mode 100644
index 00000000..3fbb4d2b
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd.h
@@ -0,0 +1,901 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
+ */
+
+#ifndef _RTE_EVENTDEV_PMD_H_
+#define _RTE_EVENTDEV_PMD_H_
+
+/** @file
+ * RTE Event PMD APIs
+ *
+ * @note
+ * These API are from event PMD only and user applications should not call
+ * them directly.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_dev.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "rte_eventdev.h"
+#include "rte_event_timer_adapter_pmd.h"
+
+/* Logging Macros */
+#define RTE_EDEV_LOG_ERR(...) \
+ RTE_LOG(ERR, EVENTDEV, \
+ RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
+#define RTE_EDEV_LOG_DEBUG(...) \
+ RTE_LOG(DEBUG, EVENTDEV, \
+ RTE_FMT("%s() line %u: " RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ __func__, __LINE__, RTE_FMT_TAIL(__VA_ARGS__,)))
+#else
+#define RTE_EDEV_LOG_DEBUG(...) (void)0
+#endif
+
+/* Macros to check for valid device */
+#define RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, retval) do { \
+ if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
+ return retval; \
+ } \
+} while (0)
+
+#define RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, errno, retval) do { \
+ if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
+ rte_errno = errno; \
+ return retval; \
+ } \
+} while (0)
+
+#define RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id) do { \
+ if (!rte_event_pmd_is_valid_dev((dev_id))) { \
+ RTE_EDEV_LOG_ERR("Invalid dev_id=%d\n", dev_id); \
+ return; \
+ } \
+} while (0)
+
+#define RTE_EVENT_ETH_RX_ADAPTER_SW_CAP \
+ ((RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) | \
+ (RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ))
+
+#define RTE_EVENT_CRYPTO_ADAPTER_SW_CAP \
+ RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA
+
+/**< Ethernet Rx adapter cap to return If the packet transfers from
+ * the ethdev to eventdev use a SW service function
+ */
+
+#define RTE_EVENTDEV_DETACHED (0)
+#define RTE_EVENTDEV_ATTACHED (1)
+
+struct rte_eth_dev;
+
+/** Global structure used for maintaining state of allocated event devices */
+struct rte_eventdev_global {
+ uint8_t nb_devs; /**< Number of devices found */
+};
+
+extern struct rte_eventdev_global *rte_eventdev_globals;
+/** Pointer to global event devices data structure. */
+extern struct rte_eventdev *rte_eventdevs;
+/** The pool of rte_eventdev structures. */
+
+/**
+ * Get the rte_eventdev structure device pointer for the named device.
+ *
+ * @param name
+ * device name to select the device structure.
+ *
+ * @return
+ * - The rte_eventdev structure pointer for the given device ID.
+ */
+static inline struct rte_eventdev *
+rte_event_pmd_get_named_dev(const char *name)
+{
+ struct rte_eventdev *dev;
+ unsigned int i;
+
+ if (name == NULL)
+ return NULL;
+
+ for (i = 0; i < RTE_EVENT_MAX_DEVS; i++) {
+ dev = &rte_eventdevs[i];
+ if ((dev->attached == RTE_EVENTDEV_ATTACHED) &&
+ (strcmp(dev->data->name, name) == 0))
+ return dev;
+ }
+
+ return NULL;
+}
+
+/**
+ * Validate if the event device index is valid attached event device.
+ *
+ * @param dev_id
+ * Event device index.
+ *
+ * @return
+ * - If the device index is valid (1) or not (0).
+ */
+static inline unsigned
+rte_event_pmd_is_valid_dev(uint8_t dev_id)
+{
+ struct rte_eventdev *dev;
+
+ if (dev_id >= RTE_EVENT_MAX_DEVS)
+ return 0;
+
+ dev = &rte_eventdevs[dev_id];
+ if (dev->attached != RTE_EVENTDEV_ATTACHED)
+ return 0;
+ else
+ return 1;
+}
+
+/**
+ * Definitions of all functions exported by a driver through the
+ * the generic structure of type *event_dev_ops* supplied in the
+ * *rte_eventdev* structure associated with a device.
+ */
+
+/**
+ * Get device information of a device.
+ *
+ * @param dev
+ * Event device pointer
+ * @param dev_info
+ * Event device information structure
+ *
+ * @return
+ * Returns 0 on success
+ */
+typedef void (*eventdev_info_get_t)(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info);
+
+/**
+ * Configure a device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @return
+ * Returns 0 on success
+ */
+typedef int (*eventdev_configure_t)(const struct rte_eventdev *dev);
+
+/**
+ * Start a configured device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @return
+ * Returns 0 on success
+ */
+typedef int (*eventdev_start_t)(struct rte_eventdev *dev);
+
+/**
+ * Stop a configured device.
+ *
+ * @param dev
+ * Event device pointer
+ */
+typedef void (*eventdev_stop_t)(struct rte_eventdev *dev);
+
+/**
+ * Close a configured device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @return
+ * - 0 on success
+ * - (-EAGAIN) if can't close as device is busy
+ */
+typedef int (*eventdev_close_t)(struct rte_eventdev *dev);
+
+/**
+ * Retrieve the default event queue configuration.
+ *
+ * @param dev
+ * Event device pointer
+ * @param queue_id
+ * Event queue index
+ * @param[out] queue_conf
+ * Event queue configuration structure
+ *
+ */
+typedef void (*eventdev_queue_default_conf_get_t)(struct rte_eventdev *dev,
+ uint8_t queue_id, struct rte_event_queue_conf *queue_conf);
+
+/**
+ * Setup an event queue.
+ *
+ * @param dev
+ * Event device pointer
+ * @param queue_id
+ * Event queue index
+ * @param queue_conf
+ * Event queue configuration structure
+ *
+ * @return
+ * Returns 0 on success.
+ */
+typedef int (*eventdev_queue_setup_t)(struct rte_eventdev *dev,
+ uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf);
+
+/**
+ * Release resources allocated by given event queue.
+ *
+ * @param dev
+ * Event device pointer
+ * @param queue_id
+ * Event queue index
+ *
+ */
+typedef void (*eventdev_queue_release_t)(struct rte_eventdev *dev,
+ uint8_t queue_id);
+
+/**
+ * Retrieve the default event port configuration.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port_id
+ * Event port index
+ * @param[out] port_conf
+ * Event port configuration structure
+ *
+ */
+typedef void (*eventdev_port_default_conf_get_t)(struct rte_eventdev *dev,
+ uint8_t port_id, struct rte_event_port_conf *port_conf);
+
+/**
+ * Setup an event port.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port_id
+ * Event port index
+ * @param port_conf
+ * Event port configuration structure
+ *
+ * @return
+ * Returns 0 on success.
+ */
+typedef int (*eventdev_port_setup_t)(struct rte_eventdev *dev,
+ uint8_t port_id,
+ const struct rte_event_port_conf *port_conf);
+
+/**
+ * Release memory resources allocated by given event port.
+ *
+ * @param port
+ * Event port pointer
+ *
+ */
+typedef void (*eventdev_port_release_t)(void *port);
+
+/**
+ * Link multiple source event queues to destination event port.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port
+ * Event port pointer
+ * @param link
+ * Points to an array of *nb_links* event queues to be linked
+ * to the event port.
+ * @param priorities
+ * Points to an array of *nb_links* service priorities associated with each
+ * event queue link to event port.
+ * @param nb_links
+ * The number of links to establish
+ *
+ * @return
+ * Returns 0 on success.
+ *
+ */
+typedef int (*eventdev_port_link_t)(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links);
+
+/**
+ * Unlink multiple source event queues from destination event port.
+ *
+ * @param dev
+ * Event device pointer
+ * @param port
+ * Event port pointer
+ * @param queues
+ * An array of *nb_unlinks* event queues to be unlinked from the event port.
+ * @param nb_unlinks
+ * The number of unlinks to establish
+ *
+ * @return
+ * Returns 0 on success.
+ *
+ */
+typedef int (*eventdev_port_unlink_t)(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks);
+
+/**
+ * Converts nanoseconds to *timeout_ticks* value for rte_event_dequeue()
+ *
+ * @param dev
+ * Event device pointer
+ * @param ns
+ * Wait time in nanosecond
+ * @param[out] timeout_ticks
+ * Value for the *timeout_ticks* parameter in rte_event_dequeue() function
+ *
+ * @return
+ * Returns 0 on success.
+ *
+ */
+typedef int (*eventdev_dequeue_timeout_ticks_t)(struct rte_eventdev *dev,
+ uint64_t ns, uint64_t *timeout_ticks);
+
+/**
+ * Dump internal information
+ *
+ * @param dev
+ * Event device pointer
+ * @param f
+ * A pointer to a file for output
+ *
+ */
+typedef void (*eventdev_dump_t)(struct rte_eventdev *dev, FILE *f);
+
+/**
+ * Retrieve a set of statistics from device
+ *
+ * @param dev
+ * Event device pointer
+ * @param ids
+ * The stat ids to retrieve
+ * @param values
+ * The returned stat values
+ * @param n
+ * The number of id values and entries in the values array
+ * @return
+ * The number of stat values successfully filled into the values array
+ */
+typedef int (*eventdev_xstats_get_t)(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n);
+
+/**
+ * Resets the statistic values in xstats for the device, based on mode.
+ */
+typedef int (*eventdev_xstats_reset_t)(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
+/**
+ * Get names of extended stats of an event device
+ *
+ * @param dev
+ * Event device pointer
+ * @param xstats_names
+ * Array of name values to be filled in
+ * @param size
+ * Number of values in the xstats_names array
+ * @return
+ * When size >= the number of stats, return the number of stat values filled
+ * into the array.
+ * When size < the number of available stats, return the number of stats
+ * values, and do not fill in any data into xstats_names.
+ */
+typedef int (*eventdev_xstats_get_names_t)(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size);
+
+/**
+ * Get value of one stats and optionally return its id
+ *
+ * @param dev
+ * Event device pointer
+ * @param name
+ * The name of the stat to retrieve
+ * @param id
+ * Pointer to an unsigned int where we store the stat-id for future reference.
+ * This pointer may be null if the id is not required.
+ * @return
+ * The value of the stat, or (uint64_t)-1 if the stat is not found.
+ * If the stat is not found, the id value will be returned as (unsigned)-1,
+ * if id pointer is non-NULL
+ */
+typedef uint64_t (*eventdev_xstats_get_by_name)(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id);
+
+
+/**
+ * Retrieve the event device's ethdev Rx adapter capabilities for the
+ * specified ethernet port
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param[out] caps
+ * A pointer to memory filled with Rx event adapter capabilities.
+ *
+ * @return
+ * - 0: Success, driver provides Rx event adapter capabilities for the
+ * ethernet device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_eth_rx_adapter_caps_get_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps);
+
+struct rte_event_eth_rx_adapter_queue_conf *queue_conf;
+
+/**
+ * Retrieve the event device's timer adapter capabilities, as well as the ops
+ * structure that an event timer adapter should call through to enter the
+ * driver
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param flags
+ * Flags that can be used to determine how to select an event timer
+ * adapter ops structure
+ *
+ * @param[out] caps
+ * A pointer to memory filled with Rx event adapter capabilities.
+ *
+ * @param[out] ops
+ * A pointer to the ops pointer to set with the address of the desired ops
+ * structure
+ *
+ * @return
+ * - 0: Success, driver provides Rx event adapter capabilities for the
+ * ethernet device.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_timer_adapter_caps_get_t)(
+ const struct rte_eventdev *dev,
+ uint64_t flags,
+ uint32_t *caps,
+ const struct rte_event_timer_adapter_ops **ops);
+
+/**
+ * Add ethernet Rx queues to event device. This callback is invoked if
+ * the caps returned from rte_eventdev_eth_rx_adapter_caps_get(, eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index
+ *
+ * @param queue_conf
+ * Additional configuration structure
+
+ * @return
+ * - 0: Success, ethernet receive queue added successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_eth_rx_adapter_queue_add_t)(
+ const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+
+/**
+ * Delete ethernet Rx queues from event device. This callback is invoked if
+ * the caps returned from eventdev_eth_rx_adapter_caps_get(, eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param rx_queue_id
+ * Ethernet device receive queue index
+ *
+ * @return
+ * - 0: Success, ethernet receive queue deleted successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_eth_rx_adapter_queue_del_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id);
+
+/**
+ * Start ethernet Rx adapter. This callback is invoked if
+ * the caps returned from eventdev_eth_rx_adapter_caps_get(.., eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set and Rx queues
+ * from eth_port_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @return
+ * - 0: Success, ethernet Rx adapter started successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_eth_rx_adapter_start_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev);
+
+/**
+ * Stop ethernet Rx adapter. This callback is invoked if
+ * the caps returned from eventdev_eth_rx_adapter_caps_get(..,eth_port_id)
+ * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set and Rx queues
+ * from eth_port_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @return
+ * - 0: Success, ethernet Rx adapter stopped successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_eth_rx_adapter_stop_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev);
+
+struct rte_event_eth_rx_adapter_stats *stats;
+
+/**
+ * Retrieve ethernet Rx adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @param[out] stats
+ * Pointer to stats structure
+ *
+ * @return
+ * Return 0 on success.
+ */
+
+typedef int (*eventdev_eth_rx_adapter_stats_get)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ struct rte_event_eth_rx_adapter_stats *stats);
+/**
+ * Reset ethernet Rx adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param eth_dev
+ * Ethernet device pointer
+ *
+ * @return
+ * Return 0 on success.
+ */
+typedef int (*eventdev_eth_rx_adapter_stats_reset)
+ (const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev);
+/**
+ * Start eventdev selftest.
+ *
+ * @return
+ * Return 0 on success.
+ */
+typedef int (*eventdev_selftest)(void);
+
+
+struct rte_cryptodev;
+
+/**
+ * This API may change without prior notice
+ *
+ * Retrieve the event device's crypto adapter capabilities for the
+ * specified cryptodev
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * cryptodev pointer
+ *
+ * @param[out] caps
+ * A pointer to memory filled with event adapter capabilities.
+ * It is expected to be pre-allocated & initialized by caller.
+ *
+ * @return
+ * - 0: Success, driver provides event adapter capabilities for the
+ * cryptodev.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_crypto_adapter_caps_get_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ uint32_t *caps);
+
+/**
+ * This API may change without prior notice
+ *
+ * Add crypto queue pair to event device. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(, cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * cryptodev pointer
+ *
+ * @param queue_pair_id
+ * cryptodev queue pair identifier.
+ *
+ * @param event
+ * Event information required for binding cryptodev queue pair to event queue.
+ * This structure will have a valid value for only those HW PMDs supporting
+ * @see RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND capability.
+ *
+ * @return
+ * - 0: Success, cryptodev queue pair added successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_crypto_adapter_queue_pair_add_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id,
+ const struct rte_event *event);
+
+
+/**
+ * This API may change without prior notice
+ *
+ * Delete crypto queue pair to event device. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(, cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * cryptodev pointer
+ *
+ * @param queue_pair_id
+ * cryptodev queue pair identifier.
+ *
+ * @return
+ * - 0: Success, cryptodev queue pair deleted successfully.
+ * - <0: Error code returned by the driver function.
+ *
+ */
+typedef int (*eventdev_crypto_adapter_queue_pair_del_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ int32_t queue_pair_id);
+
+/**
+ * Start crypto adapter. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(.., cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set and queue pairs
+ * from cdev_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @return
+ * - 0: Success, crypto adapter started successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_crypto_adapter_start_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev);
+
+/**
+ * Stop crypto adapter. This callback is invoked if
+ * the caps returned from rte_event_crypto_adapter_caps_get(.., cdev_id)
+ * has RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_* set and queue pairs
+ * from cdev_id have been added to the event device.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @return
+ * - 0: Success, crypto adapter stopped successfully.
+ * - <0: Error code returned by the driver function.
+ */
+typedef int (*eventdev_crypto_adapter_stop_t)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev);
+
+struct rte_event_crypto_adapter_stats;
+
+/**
+ * Retrieve crypto adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @param[out] stats
+ * Pointer to stats structure
+ *
+ * @return
+ * Return 0 on success.
+ */
+
+typedef int (*eventdev_crypto_adapter_stats_get)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ struct rte_event_crypto_adapter_stats *stats);
+
+/**
+ * Reset crypto adapter statistics.
+ *
+ * @param dev
+ * Event device pointer
+ *
+ * @param cdev
+ * Crypto device pointer
+ *
+ * @return
+ * Return 0 on success.
+ */
+
+typedef int (*eventdev_crypto_adapter_stats_reset)
+ (const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev);
+
+/** Event device operations function pointer table */
+struct rte_eventdev_ops {
+ eventdev_info_get_t dev_infos_get; /**< Get device info. */
+ eventdev_configure_t dev_configure; /**< Configure device. */
+ eventdev_start_t dev_start; /**< Start device. */
+ eventdev_stop_t dev_stop; /**< Stop device. */
+ eventdev_close_t dev_close; /**< Close device. */
+
+ eventdev_queue_default_conf_get_t queue_def_conf;
+ /**< Get default queue configuration. */
+ eventdev_queue_setup_t queue_setup;
+ /**< Set up an event queue. */
+ eventdev_queue_release_t queue_release;
+ /**< Release an event queue. */
+
+ eventdev_port_default_conf_get_t port_def_conf;
+ /**< Get default port configuration. */
+ eventdev_port_setup_t port_setup;
+ /**< Set up an event port. */
+ eventdev_port_release_t port_release;
+ /**< Release an event port. */
+
+ eventdev_port_link_t port_link;
+ /**< Link event queues to an event port. */
+ eventdev_port_unlink_t port_unlink;
+ /**< Unlink event queues from an event port. */
+ eventdev_dequeue_timeout_ticks_t timeout_ticks;
+ /**< Converts ns to *timeout_ticks* value for rte_event_dequeue() */
+ eventdev_dump_t dump;
+ /* Dump internal information */
+
+ eventdev_xstats_get_t xstats_get;
+ /**< Get extended device statistics. */
+ eventdev_xstats_get_names_t xstats_get_names;
+ /**< Get names of extended stats. */
+ eventdev_xstats_get_by_name xstats_get_by_name;
+ /**< Get one value by name. */
+ eventdev_xstats_reset_t xstats_reset;
+ /**< Reset the statistics values in xstats. */
+
+ eventdev_eth_rx_adapter_caps_get_t eth_rx_adapter_caps_get;
+ /**< Get ethernet Rx adapter capabilities */
+ eventdev_eth_rx_adapter_queue_add_t eth_rx_adapter_queue_add;
+ /**< Add Rx queues to ethernet Rx adapter */
+ eventdev_eth_rx_adapter_queue_del_t eth_rx_adapter_queue_del;
+ /**< Delete Rx queues from ethernet Rx adapter */
+ eventdev_eth_rx_adapter_start_t eth_rx_adapter_start;
+ /**< Start ethernet Rx adapter */
+ eventdev_eth_rx_adapter_stop_t eth_rx_adapter_stop;
+ /**< Stop ethernet Rx adapter */
+ eventdev_eth_rx_adapter_stats_get eth_rx_adapter_stats_get;
+ /**< Get ethernet Rx stats */
+ eventdev_eth_rx_adapter_stats_reset eth_rx_adapter_stats_reset;
+ /**< Reset ethernet Rx stats */
+
+ eventdev_timer_adapter_caps_get_t timer_adapter_caps_get;
+ /**< Get timer adapter capabilities */
+
+ eventdev_crypto_adapter_caps_get_t crypto_adapter_caps_get;
+ /**< Get crypto adapter capabilities */
+ eventdev_crypto_adapter_queue_pair_add_t crypto_adapter_queue_pair_add;
+ /**< Add queue pair to crypto adapter */
+ eventdev_crypto_adapter_queue_pair_del_t crypto_adapter_queue_pair_del;
+ /**< Delete queue pair from crypto adapter */
+ eventdev_crypto_adapter_start_t crypto_adapter_start;
+ /**< Start crypto adapter */
+ eventdev_crypto_adapter_stop_t crypto_adapter_stop;
+ /**< Stop crypto adapter */
+ eventdev_crypto_adapter_stats_get crypto_adapter_stats_get;
+ /**< Get crypto stats */
+ eventdev_crypto_adapter_stats_reset crypto_adapter_stats_reset;
+ /**< Reset crypto stats */
+
+ eventdev_selftest dev_selftest;
+ /**< Start eventdev Selftest */
+
+ eventdev_stop_flush_t dev_stop_flush;
+ /**< User-provided event flush function */
+};
+
+/**
+ * Allocates a new eventdev slot for an event device and returns the pointer
+ * to that slot for the driver to use.
+ *
+ * @param name
+ * Unique identifier name for each device
+ * @param socket_id
+ * Socket to allocate resources on.
+ * @return
+ * - Slot in the rte_dev_devices array for a new device;
+ */
+struct rte_eventdev *
+rte_event_pmd_allocate(const char *name, int socket_id);
+
+/**
+ * Release the specified eventdev device.
+ *
+ * @param eventdev
+ * The *eventdev* pointer is the address of the *rte_eventdev* structure.
+ * @return
+ * - 0 on success, negative on error
+ */
+int
+rte_event_pmd_release(struct rte_eventdev *eventdev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_PMD_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd_pci.h b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd_pci.h
new file mode 100644
index 00000000..8fb61386
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd_pci.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Cavium, Inc
+ */
+
+#ifndef _RTE_EVENTDEV_PMD_PCI_H_
+#define _RTE_EVENTDEV_PMD_PCI_H_
+
+/** @file
+ * RTE Eventdev PCI PMD APIs
+ *
+ * @note
+ * These API are from event PCI PMD only and user applications should not call
+ * them directly.
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_config.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#include "rte_eventdev_pmd.h"
+
+typedef int (*eventdev_pmd_pci_callback_t)(struct rte_eventdev *dev);
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .probe function to attach to a event
+ * interface.
+ */
+static inline int
+rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev,
+ size_t private_data_size,
+ eventdev_pmd_pci_callback_t devinit)
+{
+ struct rte_eventdev *eventdev;
+
+ char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
+
+ int retval;
+
+ if (devinit == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, eventdev_name,
+ sizeof(eventdev_name));
+
+ eventdev = rte_event_pmd_allocate(eventdev_name,
+ pci_dev->device.numa_node);
+ if (eventdev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eventdev->data->dev_private =
+ rte_zmalloc_socket(
+ "eventdev private structure",
+ private_data_size,
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (eventdev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ eventdev->dev = &pci_dev->device;
+
+ /* Invoke PMD device initialization function */
+ retval = devinit(eventdev);
+ if (retval == 0)
+ return 0;
+
+ RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)"
+ " failed", pci_drv->driver.name,
+ (unsigned int) pci_dev->id.vendor_id,
+ (unsigned int) pci_dev->id.device_id);
+
+ rte_event_pmd_release(eventdev);
+
+ return -ENXIO;
+}
+
+
+/**
+ * @internal
+ * Wrapper for use by pci drivers as a .remove function to detach a event
+ * interface.
+ */
+static inline int
+rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev,
+ eventdev_pmd_pci_callback_t devuninit)
+{
+ struct rte_eventdev *eventdev;
+ char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN];
+ int ret = 0;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, eventdev_name,
+ sizeof(eventdev_name));
+
+ eventdev = rte_event_pmd_get_named_dev(eventdev_name);
+ if (eventdev == NULL)
+ return -ENODEV;
+
+ ret = rte_event_dev_close(eventdev->data->dev_id);
+ if (ret < 0)
+ return ret;
+
+ /* Invoke PMD device un-init function */
+ if (devuninit)
+ ret = devuninit(eventdev);
+ if (ret)
+ return ret;
+
+ /* Free event device */
+ rte_event_pmd_release(eventdev);
+
+ eventdev->dev = NULL;
+
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_PMD_PCI_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd_vdev.h b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
new file mode 100644
index 00000000..8c64a067
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_pmd_vdev.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016-2017 Cavium, Inc
+ */
+
+#ifndef _RTE_EVENTDEV_PMD_VDEV_H_
+#define _RTE_EVENTDEV_PMD_VDEV_H_
+
+/** @file
+ * RTE Eventdev VDEV PMD APIs
+ *
+ * @note
+ * These API are from event VDEV PMD only and user applications should not call
+ * them directly.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#include <rte_config.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_bus_vdev.h>
+
+#include "rte_eventdev_pmd.h"
+
+/**
+ * @internal
+ * Creates a new virtual event device and returns the pointer to that device.
+ *
+ * @param name
+ * PMD type name
+ * @param dev_private_size
+ * Size of event PMDs private data
+ * @param socket_id
+ * Socket to allocate resources on.
+ *
+ * @return
+ * - Eventdev pointer if device is successfully created.
+ * - NULL if device cannot be created.
+ */
+static inline struct rte_eventdev *
+rte_event_pmd_vdev_init(const char *name, size_t dev_private_size,
+ int socket_id)
+{
+
+ struct rte_eventdev *eventdev;
+
+ /* Allocate device structure */
+ eventdev = rte_event_pmd_allocate(name, socket_id);
+ if (eventdev == NULL)
+ return NULL;
+
+ /* Allocate private device structure */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eventdev->data->dev_private =
+ rte_zmalloc_socket("eventdev device private",
+ dev_private_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (eventdev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private device"
+ " data");
+ }
+
+ return eventdev;
+}
+
+/**
+ * @internal
+ * Destroy the given virtual event device
+ *
+ * @param name
+ * PMD type name
+ * @return
+ * - 0 on success, negative on error
+ */
+static inline int
+rte_event_pmd_vdev_uninit(const char *name)
+{
+ int ret;
+ struct rte_eventdev *eventdev;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ eventdev = rte_event_pmd_get_named_dev(name);
+ if (eventdev == NULL)
+ return -ENODEV;
+
+ ret = rte_event_dev_close(eventdev->data->dev_id);
+ if (ret < 0)
+ return ret;
+
+ /* Free the event device */
+ rte_event_pmd_release(eventdev);
+
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EVENTDEV_PMD_VDEV_H_ */
diff --git a/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_version.map b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_version.map
new file mode 100644
index 00000000..12835e9f
--- /dev/null
+++ b/src/spdk/dpdk/lib/librte_eventdev/rte_eventdev_version.map
@@ -0,0 +1,113 @@
+DPDK_17.05 {
+ global:
+
+ rte_eventdevs;
+
+ rte_event_dev_count;
+ rte_event_dev_get_dev_id;
+ rte_event_dev_socket_id;
+ rte_event_dev_info_get;
+ rte_event_dev_configure;
+ rte_event_dev_start;
+ rte_event_dev_stop;
+ rte_event_dev_close;
+ rte_event_dev_dump;
+ rte_event_dev_xstats_by_name_get;
+ rte_event_dev_xstats_get;
+ rte_event_dev_xstats_names_get;
+ rte_event_dev_xstats_reset;
+
+ rte_event_port_default_conf_get;
+ rte_event_port_setup;
+ rte_event_port_link;
+ rte_event_port_unlink;
+ rte_event_port_links_get;
+
+ rte_event_queue_default_conf_get;
+ rte_event_queue_setup;
+
+ rte_event_dequeue_timeout_ticks;
+
+ rte_event_pmd_allocate;
+ rte_event_pmd_release;
+ rte_event_pmd_vdev_init;
+ rte_event_pmd_vdev_uninit;
+ rte_event_pmd_pci_probe;
+ rte_event_pmd_pci_remove;
+
+ local: *;
+};
+
+DPDK_17.08 {
+ global:
+
+ rte_event_ring_create;
+ rte_event_ring_free;
+ rte_event_ring_init;
+ rte_event_ring_lookup;
+} DPDK_17.05;
+
+DPDK_17.11 {
+ global:
+
+ rte_event_dev_attr_get;
+ rte_event_dev_service_id_get;
+ rte_event_port_attr_get;
+ rte_event_queue_attr_get;
+
+ rte_event_eth_rx_adapter_caps_get;
+ rte_event_eth_rx_adapter_create;
+ rte_event_eth_rx_adapter_create_ext;
+ rte_event_eth_rx_adapter_free;
+ rte_event_eth_rx_adapter_queue_add;
+ rte_event_eth_rx_adapter_queue_del;
+ rte_event_eth_rx_adapter_service_id_get;
+ rte_event_eth_rx_adapter_start;
+ rte_event_eth_rx_adapter_stats_get;
+ rte_event_eth_rx_adapter_stats_reset;
+ rte_event_eth_rx_adapter_stop;
+} DPDK_17.08;
+
+DPDK_18.02 {
+ global:
+
+ rte_event_dev_selftest;
+} DPDK_17.11;
+
+DPDK_18.05 {
+ global:
+
+ rte_event_dev_stop_flush_callback_register;
+} DPDK_18.02;
+
+EXPERIMENTAL {
+ global:
+
+ rte_event_crypto_adapter_caps_get;
+ rte_event_crypto_adapter_create;
+ rte_event_crypto_adapter_create_ext;
+ rte_event_crypto_adapter_event_port_get;
+ rte_event_crypto_adapter_free;
+ rte_event_crypto_adapter_queue_pair_add;
+ rte_event_crypto_adapter_queue_pair_del;
+ rte_event_crypto_adapter_service_id_get;
+ rte_event_crypto_adapter_start;
+ rte_event_crypto_adapter_stats_get;
+ rte_event_crypto_adapter_stats_reset;
+ rte_event_crypto_adapter_stop;
+ rte_event_eth_rx_adapter_cb_register;
+ rte_event_timer_adapter_caps_get;
+ rte_event_timer_adapter_create;
+ rte_event_timer_adapter_create_ext;
+ rte_event_timer_adapter_free;
+ rte_event_timer_adapter_get_info;
+ rte_event_timer_adapter_lookup;
+ rte_event_timer_adapter_service_id_get;
+ rte_event_timer_adapter_start;
+ rte_event_timer_adapter_stats_get;
+ rte_event_timer_adapter_stats_reset;
+ rte_event_timer_adapter_stop;
+ rte_event_timer_arm_burst;
+ rte_event_timer_arm_tmo_tick_burst;
+ rte_event_timer_cancel_burst;
+};