summaryrefslogtreecommitdiffstats
path: root/src/seastar/dpdk/drivers/event
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/seastar/dpdk/drivers/event/Makefile43
-rw-r--r--src/seastar/dpdk/drivers/event/octeontx/Makefile70
-rw-r--r--src/seastar/dpdk/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h61
-rw-r--r--src/seastar/dpdk/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map9
-rw-r--r--src/seastar/dpdk/drivers/event/octeontx/ssovf_evdev.c580
-rw-r--r--src/seastar/dpdk/drivers/event/octeontx/ssovf_evdev.h203
-rw-r--r--src/seastar/dpdk/drivers/event/octeontx/ssovf_mbox.c232
-rw-r--r--src/seastar/dpdk/drivers/event/octeontx/ssovf_probe.c288
-rw-r--r--src/seastar/dpdk/drivers/event/octeontx/ssovf_worker.c252
-rw-r--r--src/seastar/dpdk/drivers/event/octeontx/ssovf_worker.h139
-rw-r--r--src/seastar/dpdk/drivers/event/skeleton/Makefile51
-rw-r--r--src/seastar/dpdk/drivers/event/skeleton/rte_pmd_skeleton_event_version.map4
-rw-r--r--src/seastar/dpdk/drivers/event/skeleton/skeleton_eventdev.c497
-rw-r--r--src/seastar/dpdk/drivers/event/skeleton/skeleton_eventdev.h68
-rw-r--r--src/seastar/dpdk/drivers/event/sw/Makefile62
-rw-r--r--src/seastar/dpdk/drivers/event/sw/event_ring.h185
-rw-r--r--src/seastar/dpdk/drivers/event/sw/iq_ring.h176
-rw-r--r--src/seastar/dpdk/drivers/event/sw/rte_pmd_evdev_sw_version.map3
-rw-r--r--src/seastar/dpdk/drivers/event/sw/sw_evdev.c833
-rw-r--r--src/seastar/dpdk/drivers/event/sw/sw_evdev.h318
-rw-r--r--src/seastar/dpdk/drivers/event/sw/sw_evdev_scheduler.c601
-rw-r--r--src/seastar/dpdk/drivers/event/sw/sw_evdev_worker.c185
-rw-r--r--src/seastar/dpdk/drivers/event/sw/sw_evdev_xstats.c674
23 files changed, 5534 insertions, 0 deletions
diff --git a/src/seastar/dpdk/drivers/event/Makefile b/src/seastar/dpdk/drivers/event/Makefile
new file mode 100644
index 00000000..1cf389e8
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/Makefile
@@ -0,0 +1,43 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Cavium networks. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+core-libs := librte_eal librte_eventdev
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton
+DEPDIRS-skeleton = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw
+DEPDIRS-sw = $(core-libs) librte_kvargs librte_ring
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += octeontx
+DEPDIRS-octeontx = $(core-libs)
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/src/seastar/dpdk/drivers/event/octeontx/Makefile b/src/seastar/dpdk/drivers/event/octeontx/Makefile
new file mode 100644
index 00000000..aca3d095
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/octeontx/Makefile
@@ -0,0 +1,70 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Cavium Networks. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium Networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_octeontx_ssovf.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_octeontx_ssovf_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_probe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF) += ssovf_mbox.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_ssovf_worker.o += -fno-prefetch-loop-arrays
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_ssovf_worker.o += -Ofast
+else
+CFLAGS_ssovf_worker.o += -O3 -ffast-math
+endif
+
+else
+CFLAGS_ssovf_worker.o += -Ofast
+endif
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF)-include := rte_pmd_octeontx_ssovf.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h b/src/seastar/dpdk/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
new file mode 100644
index 00000000..3da7cfdd
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/octeontx/rte_pmd_octeontx_ssovf.h
@@ -0,0 +1,61 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RTE_PMD_OCTEONTX_SSOVF_H__
+#define __RTE_PMD_OCTEONTX_SSOVF_H__
+
+#include <rte_common.h>
+
+struct octeontx_ssovf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_ssovfs; /* Total sso groups available in domain */
+ uint8_t total_ssowvfs;/* Total sso hws available in domain */
+};
+
+enum octeontx_ssovf_type {
+ OCTEONTX_SSO_GROUP, /* SSO group vf */
+ OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
+};
+
+struct octeontx_mbox_hdr {
+ uint16_t vfid; /* VF index or pf resource index local to the domain */
+ uint8_t coproc; /* Coprocessor id */
+ uint8_t msg; /* Message id */
+ uint8_t res_code; /* Functional layer response code */
+};
+
+int octeontx_ssovf_info(struct octeontx_ssovf_info *info);
+void *octeontx_ssovf_bar(enum octeontx_ssovf_type, uint8_t id, uint8_t bar);
+int octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr,
+ void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
+
+#endif /* __RTE_PMD_OCTEONTX_SSOVF_H__ */
diff --git a/src/seastar/dpdk/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map b/src/seastar/dpdk/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
new file mode 100644
index 00000000..3810a03f
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/octeontx/rte_pmd_octeontx_ssovf_version.map
@@ -0,0 +1,9 @@
+DPDK_17.05 {
+ global:
+
+ octeontx_ssovf_info;
+ octeontx_ssovf_bar;
+ octeontx_ssovf_mbox_send;
+
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/event/octeontx/ssovf_evdev.c b/src/seastar/dpdk/drivers/event/octeontx/ssovf_evdev.c
new file mode 100644
index 00000000..c80a4437
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/octeontx/ssovf_evdev.c
@@ -0,0 +1,580 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_vdev.h>
+
+#include "ssovf_evdev.h"
+
+/* SSOPF Mailbox messages */
+
+struct ssovf_mbox_dev_info {
+ uint64_t min_deq_timeout_ns;
+ uint64_t max_deq_timeout_ns;
+ uint32_t max_num_events;
+};
+
+static int
+ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ uint16_t len = sizeof(struct ssovf_mbox_dev_info);
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_GET_DEV_INFO;
+ hdr.vfid = 0;
+
+ memset(info, 0, len);
+ return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len);
+}
+
+struct ssovf_mbox_getwork_wait {
+ uint64_t wait_ns;
+};
+
+static int
+ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_getwork_wait tmo_set;
+ uint16_t len = sizeof(struct ssovf_mbox_getwork_wait);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_SET_GETWORK_WAIT;
+ hdr.vfid = 0;
+
+ tmo_set.wait_ns = timeout_ns;
+ ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0);
+ if (ret)
+ ssovf_log_err("Failed to set getwork timeout(%d)", ret);
+
+ return ret;
+}
+
+struct ssovf_mbox_grp_pri {
+ uint8_t wgt_left; /* Read only */
+ uint8_t weight;
+ uint8_t affinity;
+ uint8_t priority;
+};
+
+static int
+ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_grp_pri grp;
+ uint16_t len = sizeof(struct ssovf_mbox_grp_pri);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_GRP_SET_PRIORITY;
+ hdr.vfid = queue;
+
+ grp.weight = 0xff;
+ grp.affinity = 0xff;
+ grp.priority = prio / 32; /* Normalize to 0 to 7 */
+
+ ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0);
+ if (ret)
+ ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
+
+ return ret;
+}
+
+struct ssovf_mbox_convert_ns_getworks_iter {
+ uint64_t wait_ns;
+ uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */
+};
+
+static int
+ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
+{
+ struct octeontx_mbox_hdr hdr = {0};
+ struct ssovf_mbox_convert_ns_getworks_iter ns2iter;
+ uint16_t len = sizeof(ns2iter);
+ int ret;
+
+ hdr.coproc = SSO_COPROC;
+ hdr.msg = SSO_CONVERT_NS_GETWORK_ITER;
+ hdr.vfid = 0;
+
+ memset(&ns2iter, 0, len);
+ ns2iter.wait_ns = ns;
+ ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
+ if (ret < 0 || (ret != len)) {
+ ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
+ return -EIO;
+ }
+
+ *tmo_ticks = ns2iter.getwork_iter;
+ return 0;
+}
+
+static void
+ssovf_fastpath_fns_set(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ dev->schedule = NULL;
+ dev->enqueue = ssows_enq;
+ dev->enqueue_burst = ssows_enq_burst;
+ dev->dequeue = ssows_deq;
+ dev->dequeue_burst = ssows_deq_burst;
+
+ if (edev->is_timeout_deq) {
+ dev->dequeue = ssows_deq_timeout;
+ dev->dequeue_burst = ssows_deq_timeout_burst;
+ }
+}
+
+static void
+ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
+ dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
+ dev_info->max_event_queues = edev->max_event_queues;
+ dev_info->max_event_queue_flows = (1ULL << 20);
+ dev_info->max_event_queue_priority_levels = 8;
+ dev_info->max_event_priority_levels = 1;
+ dev_info->max_event_ports = edev->max_event_ports;
+ dev_info->max_event_port_dequeue_depth = 1;
+ dev_info->max_event_port_enqueue_depth = 1;
+ dev_info->max_num_events = edev->max_num_events;
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
+}
+
+static int
+ssovf_configure(const struct rte_eventdev *dev)
+{
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint64_t deq_tmo_ns;
+
+ ssovf_func_trace();
+ deq_tmo_ns = conf->dequeue_timeout_ns;
+
+ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ edev->is_timeout_deq = 1;
+ deq_tmo_ns = edev->min_deq_timeout_ns;
+ }
+ edev->nb_event_queues = conf->nb_event_queues;
+ edev->nb_event_ports = conf->nb_event_ports;
+
+ return ssovf_mbox_getwork_tmo_set(deq_tmo_ns);
+}
+
+static void
+ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ queue_conf->nb_atomic_flows = (1ULL << 20);
+ queue_conf->nb_atomic_order_sequences = (1ULL << 20);
+ queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static void
+ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static int
+ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ RTE_SET_USED(dev);
+ ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority);
+
+ return ssovf_mbox_priority_set(queue_id, queue_conf->priority);
+}
+
+static void
+ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ RTE_SET_USED(port_id);
+ port_conf->new_event_threshold = edev->max_num_events;
+ port_conf->dequeue_depth = 1;
+ port_conf->enqueue_depth = 1;
+}
+
+static void
+ssovf_port_release(void *port)
+{
+ rte_free(port);
+}
+
+static int
+ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct ssows *ws;
+ uint32_t reg_off;
+ uint8_t q;
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+
+ ssovf_func_trace("port=%d", port_id);
+ RTE_SET_USED(port_conf);
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->ports[port_id] != NULL) {
+ ssovf_port_release(dev->data->ports[port_id]);
+ dev->data->ports[port_id] = NULL;
+ }
+
+ /* Allocate event port memory */
+ ws = rte_zmalloc_socket("eventdev ssows",
+ sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
+ dev->data->socket_id);
+ if (ws == NULL) {
+ ssovf_log_err("Failed to alloc memory for port=%d", port_id);
+ return -ENOMEM;
+ }
+
+ ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
+ if (ws->base == NULL) {
+ rte_free(ws);
+ ssovf_log_err("Failed to get hws base addr port=%d", port_id);
+ return -EINVAL;
+ }
+
+ reg_off = SSOW_VHWS_OP_GET_WORK0;
+ reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
+ reg_off |= 1 << 16; /* Wait */
+ ws->getwork = ws->base + reg_off;
+ ws->port = port_id;
+
+ for (q = 0; q < edev->nb_event_queues; q++) {
+ ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
+ if (ws->grps[q] == NULL) {
+ rte_free(ws);
+ ssovf_log_err("Failed to get grp%d base addr", q);
+ return -EINVAL;
+ }
+ }
+
+ dev->data->ports[port_id] = ws;
+ ssovf_log_dbg("port=%d ws=%p", port_id, ws);
+ return 0;
+}
+
+static int
+ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
+ const uint8_t priorities[], uint16_t nb_links)
+{
+ uint16_t link;
+ uint64_t val;
+ struct ssows *ws = port;
+
+ ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links);
+ RTE_SET_USED(dev);
+ RTE_SET_USED(priorities);
+
+ for (link = 0; link < nb_links; link++) {
+ val = queues[link];
+ val |= (1ULL << 24); /* Set membership */
+ ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
+ }
+ return (int)nb_links;
+}
+
+static int
+ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
+ uint16_t nb_unlinks)
+{
+ uint16_t unlink;
+ uint64_t val;
+ struct ssows *ws = port;
+
+ ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks);
+ RTE_SET_USED(dev);
+
+ for (unlink = 0; unlink < nb_unlinks; unlink++) {
+ val = queues[unlink];
+ val &= ~(1ULL << 24); /* Clear membership */
+ ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
+ }
+ return (int)nb_unlinks;
+}
+
+static int
+ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks)
+{
+ RTE_SET_USED(dev);
+
+ return ssovf_mbox_timeout_ticks(ns, tmo_ticks);
+}
+
+static void
+ssows_dump(struct ssows *ws, FILE *f)
+{
+ uint8_t *base = ws->base;
+ uint64_t val;
+
+ fprintf(f, "\t---------------port%d---------------\n", ws->port);
+ val = ssovf_read64(base + SSOW_VHWS_TAG);
+ fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n",
+ (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
+ (int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1,
+ (int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff,
+ (int)(val >> 63) & 0x1);
+
+ val = ssovf_read64(base + SSOW_VHWS_WQP);
+ fprintf(f, "\twqp=0x%"PRIx64"\n", val);
+
+ val = ssovf_read64(base + SSOW_VHWS_LINKS);
+ fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n",
+ (int)(val & 0x3ff), (int)(val >> 10) & 0x1,
+ (int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1,
+ (int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff);
+
+ val = ssovf_read64(base + SSOW_VHWS_PENDTAG);
+ fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n",
+ (uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
+ (int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1,
+ (int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1,
+ (int)(val >> 63) & 0x1);
+
+ val = ssovf_read64(base + SSOW_VHWS_PENDWQP);
+ fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
+}
+
+static void
+ssovf_dump(struct rte_eventdev *dev, FILE *f)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint8_t port;
+
+ /* Dump SSOWVF debug registers */
+ for (port = 0; port < edev->nb_event_ports; port++)
+ ssows_dump(dev->data->ports[port], f);
+}
+
+static int
+ssovf_start(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ struct ssows *ws;
+ uint8_t *base;
+ uint8_t i;
+
+ ssovf_func_trace();
+ for (i = 0; i < edev->nb_event_ports; i++) {
+ ws = dev->data->ports[i];
+ ssows_reset(ws);
+ ws->swtag_req = 0;
+ }
+
+ for (i = 0; i < edev->nb_event_queues; i++) {
+ /* Consume all the events through HWS0 */
+ ssows_flush_events(dev->data->ports[0], i);
+
+ base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base += SSO_VHGRP_QCTL;
+ ssovf_write64(1, base); /* Enable SSO group */
+ }
+
+ ssovf_fastpath_fns_set(dev);
+ return 0;
+}
+
+static void
+ssovf_stop(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ struct ssows *ws;
+ uint8_t *base;
+ uint8_t i;
+
+ ssovf_func_trace();
+ for (i = 0; i < edev->nb_event_ports; i++) {
+ ws = dev->data->ports[i];
+ ssows_reset(ws);
+ ws->swtag_req = 0;
+ }
+
+ for (i = 0; i < edev->nb_event_queues; i++) {
+ /* Consume all the events through HWS0 */
+ ssows_flush_events(dev->data->ports[0], i);
+
+ base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
+ base += SSO_VHGRP_QCTL;
+ ssovf_write64(0, base); /* Disable SSO group */
+ }
+}
+
+static int
+ssovf_close(struct rte_eventdev *dev)
+{
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t i;
+
+ for (i = 0; i < edev->nb_event_queues; i++)
+ all_queues[i] = i;
+
+ for (i = 0; i < edev->nb_event_ports; i++)
+ ssovf_port_unlink(dev, dev->data->ports[i], all_queues,
+ edev->nb_event_queues);
+ return 0;
+}
+
+/* Initialize and register event driver with DPDK Application */
+static const struct rte_eventdev_ops ssovf_ops = {
+ .dev_infos_get = ssovf_info_get,
+ .dev_configure = ssovf_configure,
+ .queue_def_conf = ssovf_queue_def_conf,
+ .queue_setup = ssovf_queue_setup,
+ .queue_release = ssovf_queue_release,
+ .port_def_conf = ssovf_port_def_conf,
+ .port_setup = ssovf_port_setup,
+ .port_release = ssovf_port_release,
+ .port_link = ssovf_port_link,
+ .port_unlink = ssovf_port_unlink,
+ .timeout_ticks = ssovf_timeout_ticks,
+ .dump = ssovf_dump,
+ .dev_start = ssovf_start,
+ .dev_stop = ssovf_stop,
+ .dev_close = ssovf_close
+};
+
+static int
+ssovf_vdev_probe(struct rte_vdev_device *vdev)
+{
+ struct octeontx_ssovf_info oinfo;
+ struct ssovf_mbox_dev_info info;
+ struct ssovf_evdev *edev;
+ struct rte_eventdev *eventdev;
+ static int ssovf_init_once;
+ const char *name;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ /* More than one instance is not supported */
+ if (ssovf_init_once) {
+ ssovf_log_err("Request to create >1 %s instance", name);
+ return -EINVAL;
+ }
+
+ eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
+ rte_socket_id());
+ if (eventdev == NULL) {
+ ssovf_log_err("Failed to create eventdev vdev %s", name);
+ return -ENOMEM;
+ }
+ eventdev->dev_ops = &ssovf_ops;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ ssovf_fastpath_fns_set(eventdev);
+ return 0;
+ }
+
+ ret = octeontx_ssovf_info(&oinfo);
+ if (ret) {
+ ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
+ goto error;
+ }
+
+ edev = ssovf_pmd_priv(eventdev);
+ edev->max_event_ports = oinfo.total_ssowvfs;
+ edev->max_event_queues = oinfo.total_ssovfs;
+ edev->is_timeout_deq = 0;
+
+ ret = ssovf_mbox_dev_info(&info);
+ if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) {
+ ssovf_log_err("Failed to get mbox devinfo %d", ret);
+ goto error;
+ }
+
+ edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
+ edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
+ edev->max_num_events = info.max_num_events;
+ ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d",
+ info.min_deq_timeout_ns, info.max_deq_timeout_ns,
+ info.max_num_events);
+
+ if (!edev->max_event_ports || !edev->max_event_queues) {
+ ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
+ edev->max_event_queues, edev->max_event_ports);
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
+ name, oinfo.domain, edev->max_event_queues,
+ edev->max_event_ports);
+
+ ssovf_init_once = 1;
+ return 0;
+
+error:
+ rte_event_pmd_vdev_uninit(name);
+ return ret;
+}
+
+static int
+ssovf_vdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ ssovf_log_info("Closing %s", name);
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_ssovf_pmd = {
+ .probe = ssovf_vdev_probe,
+ .remove = ssovf_vdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd);
diff --git a/src/seastar/dpdk/drivers/event/octeontx/ssovf_evdev.h b/src/seastar/dpdk/drivers/event/octeontx/ssovf_evdev.h
new file mode 100644
index 00000000..6e0a3521
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/octeontx/ssovf_evdev.h
@@ -0,0 +1,203 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SSOVF_EVDEV_H__
+#define __SSOVF_EVDEV_H__
+
+#include <rte_config.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_io.h>
+
+#include "rte_pmd_octeontx_ssovf.h"
+
+#define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
+
+#ifdef RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG
+#define ssovf_log_info(fmt, args...) \
+ RTE_LOG(INFO, EVENTDEV, "[%s] %s() " fmt "\n", \
+ RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+#define ssovf_log_dbg(fmt, args...) \
+ RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() " fmt "\n", \
+ RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+#else
+#define ssovf_log_info(fmt, args...)
+#define ssovf_log_dbg(fmt, args...)
+#endif
+
+#define ssovf_func_trace ssovf_log_dbg
+#define ssovf_log_err(fmt, args...) \
+ RTE_LOG(ERR, EVENTDEV, "[%s] %s() " fmt "\n", \
+ RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
+#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D
+
+#define SSO_MAX_VHGRP (64)
+#define SSO_MAX_VHWS (32)
+
+/* SSO VF register offsets */
+#define SSO_VHGRP_QCTL (0x010ULL)
+#define SSO_VHGRP_INT (0x100ULL)
+#define SSO_VHGRP_INT_W1S (0x108ULL)
+#define SSO_VHGRP_INT_ENA_W1S (0x110ULL)
+#define SSO_VHGRP_INT_ENA_W1C (0x118ULL)
+#define SSO_VHGRP_INT_THR (0x140ULL)
+#define SSO_VHGRP_INT_CNT (0x180ULL)
+#define SSO_VHGRP_XAQ_CNT (0x1B0ULL)
+#define SSO_VHGRP_AQ_CNT (0x1C0ULL)
+#define SSO_VHGRP_AQ_THR (0x1E0ULL)
+#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
+
+/* BAR2 */
+#define SSO_VHGRP_OP_ADD_WORK0 (0x00ULL)
+#define SSO_VHGRP_OP_ADD_WORK1 (0x08ULL)
+
+/* SSOW VF register offsets (BAR0) */
+#define SSOW_VHWS_GRPMSK_CHGX(x) (0x080ULL | ((x) << 3))
+#define SSOW_VHWS_TAG (0x300ULL)
+#define SSOW_VHWS_WQP (0x308ULL)
+#define SSOW_VHWS_LINKS (0x310ULL)
+#define SSOW_VHWS_PENDTAG (0x340ULL)
+#define SSOW_VHWS_PENDWQP (0x348ULL)
+#define SSOW_VHWS_SWTP (0x400ULL)
+#define SSOW_VHWS_OP_ALLOC_WE (0x410ULL)
+#define SSOW_VHWS_OP_UPD_WQP_GRP0 (0x440ULL)
+#define SSOW_VHWS_OP_UPD_WQP_GRP1 (0x448ULL)
+#define SSOW_VHWS_OP_SWTAG_UNTAG (0x490ULL)
+#define SSOW_VHWS_OP_SWTAG_CLR (0x820ULL)
+#define SSOW_VHWS_OP_DESCHED (0x860ULL)
+#define SSOW_VHWS_OP_DESCHED_NOSCH (0x870ULL)
+#define SSOW_VHWS_OP_SWTAG_DESCHED (0x8C0ULL)
+#define SSOW_VHWS_OP_SWTAG_NOSCHED (0x8D0ULL)
+#define SSOW_VHWS_OP_SWTP_SET (0xC20ULL)
+#define SSOW_VHWS_OP_SWTAG_NORM (0xC80ULL)
+#define SSOW_VHWS_OP_SWTAG_FULL0 (0xCA0UL)
+#define SSOW_VHWS_OP_SWTAG_FULL1 (0xCA8ULL)
+#define SSOW_VHWS_OP_CLR_NSCHED (0x10000ULL)
+#define SSOW_VHWS_OP_GET_WORK0 (0x80000ULL)
+#define SSOW_VHWS_OP_GET_WORK1 (0x80008ULL)
+
+#define SSOW_BAR4_LEN (64 * 1024)
+
+/* Mailbox message constants */
+#define SSO_COPROC 0x2
+
+#define SSO_GETDOMAINCFG 0x1
+#define SSO_IDENTIFY 0x2
+#define SSO_GET_DEV_INFO 0x3
+#define SSO_GET_GETWORK_WAIT 0x4
+#define SSO_SET_GETWORK_WAIT 0x5
+#define SSO_CONVERT_NS_GETWORK_ITER 0x6
+#define SSO_GRP_GET_PRIORITY 0x7
+#define SSO_GRP_SET_PRIORITY 0x8
+
+/*
+ * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implictly strongly ordered. So, The relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define ssovf_read64 rte_read64_relaxed
+#define ssovf_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define ssovf_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define ssovf_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define ssovf_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define ssovf_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
+
+struct ssovf_evdev {
+ uint8_t max_event_queues;
+ uint8_t max_event_ports;
+ uint8_t is_timeout_deq;
+ uint8_t nb_event_queues;
+ uint8_t nb_event_ports;
+ uint32_t min_deq_timeout_ns;
+ uint32_t max_deq_timeout_ns;
+ int32_t max_num_events;
+} __rte_cache_aligned;
+
+/* Event port aka HWS */
+struct ssows {
+ uint8_t cur_tt;
+ uint8_t cur_grp;
+ uint8_t swtag_req;
+ uint8_t *base;
+ uint8_t *getwork;
+ uint8_t *grps[SSO_MAX_VHGRP];
+ uint8_t port;
+} __rte_cache_aligned;
+
+static inline struct ssovf_evdev *
+ssovf_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+uint16_t ssows_enq(void *port, const struct rte_event *ev);
+uint16_t ssows_enq_burst(void *port,
+ const struct rte_event ev[], uint16_t nb_events);
+uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
+uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks);
+uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks);
+void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
+void ssows_reset(struct ssows *ws);
+
+#endif /* __SSOVF_EVDEV_H__ */
diff --git a/src/seastar/dpdk/drivers/event/octeontx/ssovf_mbox.c b/src/seastar/dpdk/drivers/event/octeontx/ssovf_mbox.c
new file mode 100644
index 00000000..7394a3a9
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/octeontx/ssovf_mbox.c
@@ -0,0 +1,232 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_spinlock.h>
+
+#include "ssovf_evdev.h"
+
+/* Mbox operation timeout in seconds */
+#define MBOX_WAIT_TIME_SEC 3
+#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */)
+
+/* Mbox channel state */
+enum {
+ MBOX_CHAN_STATE_REQ = 1,
+ MBOX_CHAN_STATE_RES = 0,
+};
+
+/* Response messages */
+enum {
+ MBOX_RET_SUCCESS,
+ MBOX_RET_INVALID,
+ MBOX_RET_INTERNAL_ERR,
+};
+
+struct mbox {
+ int init_once;
+ uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
+ uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
+ uint16_t tag_own; /* Last tag which was written to own channel */
+ rte_spinlock_t lock;
+};
+
+static struct mbox octeontx_mbox;
+
+/*
+ * Structure used for mbox synchronization
+ * This structure sits at the begin of Mbox RAM and used as main
+ * synchronization point for channel communication
+ */
+struct mbox_ram_hdr {
+ union {
+ uint64_t u64;
+ struct {
+ uint8_t chan_state : 1;
+ uint8_t coproc : 7;
+ uint8_t msg;
+ uint8_t vfid;
+ uint8_t res_code;
+ uint16_t tag;
+ uint16_t len;
+ };
+ };
+};
+
+static inline void
+mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ const void *txmsg, uint16_t txsize)
+{
+ struct mbox_ram_hdr old_hdr;
+ struct mbox_ram_hdr new_hdr = { {0} };
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /*
+ * Initialize the channel with the tag left by last send.
+ * On success full mbox send complete, PF increments the tag by one.
+ * The sender can validate integrity of PF message with this scheme
+ */
+ old_hdr.u64 = rte_read64(ram_mbox_hdr);
+ m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */
+
+ /* Copy msg body */
+ if (txmsg)
+ memcpy(ram_mbox_msg, txmsg, txsize);
+
+ /* Prepare new hdr */
+ new_hdr.chan_state = MBOX_CHAN_STATE_REQ;
+ new_hdr.coproc = hdr->coproc;
+ new_hdr.msg = hdr->msg;
+ new_hdr.vfid = hdr->vfid;
+ new_hdr.tag = m->tag_own;
+ new_hdr.len = txsize;
+
+ /* Write the msg header */
+ rte_write64(new_hdr.u64, ram_mbox_hdr);
+ rte_io_wmb();
+ /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */
+ rte_write64(0, m->reg);
+}
+
+static inline int
+mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ void *rxmsg, uint16_t rxsize)
+{
+ int res = 0, wait;
+ uint16_t len;
+ struct mbox_ram_hdr rx_hdr;
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /* Wait for response */
+ wait = MBOX_WAIT_TIME_SEC * 1000 * 10;
+ while (wait > 0) {
+ rte_delay_us(100);
+ rx_hdr.u64 = rte_read64(ram_mbox_hdr);
+ if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES)
+ break;
+ --wait;
+ }
+
+ hdr->res_code = rx_hdr.res_code;
+ m->tag_own++;
+
+ /* Timeout */
+ if (wait <= 0) {
+ res = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* Tag mismatch */
+ if (m->tag_own != rx_hdr.tag) {
+ res = -EINVAL;
+ goto error;
+ }
+
+ /* PF nacked the msg */
+ if (rx_hdr.res_code != MBOX_RET_SUCCESS) {
+ res = -EBADMSG;
+ goto error;
+ }
+
+ len = RTE_MIN(rx_hdr.len, rxsize);
+ if (rxmsg)
+ memcpy(rxmsg, ram_mbox_msg, len);
+
+ return len;
+
+error:
+ ssovf_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
+ m->tag_own, rx_hdr.tag, hdr->msg, hdr->coproc, res,
+ hdr->res_code);
+ return res;
+}
+
+static inline int
+mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
+ uint16_t txsize, void *rxmsg, uint16_t rxsize)
+{
+ int res = -EINVAL;
+
+ if (m->init_once == 0 || hdr == NULL ||
+ txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
+ ssovf_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
+ m->init_once, hdr, txsize, rxsize);
+ return res;
+ }
+
+ rte_spinlock_lock(&m->lock);
+
+ mbox_send_request(m, hdr, txmsg, txsize);
+ res = mbox_wait_response(m, hdr, rxmsg, rxsize);
+
+ rte_spinlock_unlock(&m->lock);
+ return res;
+}
+
+static inline int
+mbox_setup(struct mbox *m)
+{
+ if (unlikely(m->init_once == 0)) {
+ rte_spinlock_init(&m->lock);
+ m->ram_mbox_base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
+ m->reg = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
+ m->reg += SSO_VHGRP_PF_MBOX(1);
+
+ if (m->ram_mbox_base == NULL || m->reg == NULL) {
+ ssovf_log_err("Invalid ram_mbox_base=%p or reg=%p",
+ m->ram_mbox_base, m->reg);
+ return -EINVAL;
+ }
+ m->init_once = 1;
+ }
+ return 0;
+}
+
+int
+octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
+ uint16_t txlen, void *rxdata, uint16_t rxlen)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY || mbox_setup(m))
+ return -EINVAL;
+
+ return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
+}
diff --git a/src/seastar/dpdk/drivers/event/octeontx/ssovf_probe.c b/src/seastar/dpdk/drivers/event/octeontx/ssovf_probe.c
new file mode 100644
index 00000000..b644ebde
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/octeontx/ssovf_probe.c
@@ -0,0 +1,288 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+
+#include "ssovf_evdev.h"
+
+struct ssovf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+};
+
+struct ssowvf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+ void *bar4;
+};
+
+struct ssowvf_identify {
+ uint16_t domain;
+ uint16_t vfid;
+};
+
+struct ssodev {
+ uint8_t total_ssovfs;
+ uint8_t total_ssowvfs;
+ struct ssovf_res grp[SSO_MAX_VHGRP];
+ struct ssowvf_res hws[SSO_MAX_VHWS];
+};
+
+static struct ssodev sdev;
+
+/* Interface functions */
+int
+octeontx_ssovf_info(struct octeontx_ssovf_info *info)
+{
+ uint8_t i;
+ uint16_t domain;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY || info == NULL)
+ return -EINVAL;
+
+ if (sdev.total_ssovfs == 0 || sdev.total_ssowvfs == 0)
+ return -ENODEV;
+
+ domain = sdev.grp[0].domain;
+ for (i = 0; i < sdev.total_ssovfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.grp[i].vfid != i ||
+ sdev.grp[i].bar0 == NULL ||
+ sdev.grp[i].domain != domain) {
+ ssovf_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.grp[i].vfid,
+ domain, sdev.grp[i].domain,
+ sdev.grp[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < sdev.total_ssowvfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.hws[i].vfid != i ||
+ sdev.hws[i].bar0 == NULL ||
+ sdev.hws[i].domain != domain) {
+ ssovf_log_err("HWS error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.hws[i].vfid,
+ domain, sdev.hws[i].domain,
+ sdev.hws[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ info->domain = domain;
+ info->total_ssovfs = sdev.total_ssovfs;
+ info->total_ssowvfs = sdev.total_ssowvfs;
+ return 0;
+}
+
+void*
+octeontx_ssovf_bar(enum octeontx_ssovf_type type, uint8_t id, uint8_t bar)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ type > OCTEONTX_SSO_HWS)
+ return NULL;
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ if (id >= sdev.total_ssovfs)
+ return NULL;
+ } else {
+ if (id >= sdev.total_ssowvfs)
+ return NULL;
+ }
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ switch (bar) {
+ case 0:
+ return sdev.grp[id].bar0;
+ case 2:
+ return sdev.grp[id].bar2;
+ default:
+ return NULL;
+ }
+ } else {
+ switch (bar) {
+ case 0:
+ return sdev.hws[id].bar0;
+ case 2:
+ return sdev.hws[id].bar2;
+ case 4:
+ return sdev.hws[id].bar4;
+ default:
+ return NULL;
+ }
+ }
+}
+
+/* SSOWVF pcie device aka event port probe */
+
+static int
+ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint16_t vfid;
+ struct ssowvf_res *res;
+ struct ssowvf_identify *id;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL ||
+ pci_dev->mem_resource[4].addr == NULL) {
+ ssovf_log_err("Empty bars %p %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr,
+ pci_dev->mem_resource[4].addr);
+ return -ENODEV;
+ }
+
+ if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) {
+ ssovf_log_err("Bar4 len mismatch %d != %d",
+ SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len);
+ return -EINVAL;
+ }
+
+ id = pci_dev->mem_resource[4].addr;
+ vfid = id->vfid;
+ if (vfid >= SSO_MAX_VHWS) {
+ ssovf_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS);
+ return -EINVAL;
+ }
+
+ res = &sdev.hws[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->bar4 = pci_dev->mem_resource[4].addr;
+ res->domain = id->domain;
+
+ sdev.total_ssowvfs++;
+ rte_wmb();
+ ssovf_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain,
+ res->vfid, sdev.total_ssowvfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssowvf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOWS_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssowvf = {
+ .id_table = pci_ssowvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssowvf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssowvf, pci_ssowvf);
+
+/* SSOVF pcie device aka event queue probe */
+
+static int
+ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ uint8_t *idreg;
+ struct ssovf_res *res;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL) {
+ ssovf_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr);
+ return -ENODEV;
+ }
+ idreg = pci_dev->mem_resource[0].addr;
+ idreg += SSO_VHGRP_AQ_THR;
+ val = rte_read64(idreg);
+
+ /* Write back the default value of aq_thr */
+ rte_write64((1ULL << 33) - 1, idreg);
+ vfid = (val >> 16) & 0xffff;
+ if (vfid >= SSO_MAX_VHGRP) {
+ ssovf_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP);
+ return -EINVAL;
+ }
+
+ res = &sdev.grp[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->domain = val & 0xffff;
+
+ sdev.total_ssovfs++;
+ rte_wmb();
+ ssovf_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain,
+ res->vfid, sdev.total_ssovfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssovf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssovf = {
+ .id_table = pci_ssovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssovf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssovf, pci_ssovf);
diff --git a/src/seastar/dpdk/drivers/event/octeontx/ssovf_worker.c b/src/seastar/dpdk/drivers/event/octeontx/ssovf_worker.c
new file mode 100644
index 00000000..ad3fe684
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/octeontx/ssovf_worker.c
@@ -0,0 +1,252 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ssovf_worker.h"
+
+static force_inline void
+ssows_new_event(struct ssows *ws, const struct rte_event *ev)
+{
+ const uint64_t event_ptr = ev->u64;
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t new_tt = ev->sched_type;
+ const uint8_t grp = ev->queue_id;
+
+ ssows_add_work(ws, event_ptr, tag, new_tt, grp);
+}
+
+static force_inline void
+ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
+{
+ const uint8_t cur_tt = ws->cur_tt;
+ const uint8_t new_tt = ev->sched_type;
+ const uint32_t tag = (uint32_t)ev->event;
+ /*
+ * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
+ *
+ * SSO_SYNC_ORDERED norm norm untag
+ * SSO_SYNC_ATOMIC norm norm untag
+ * SSO_SYNC_UNTAGGED full full NOOP
+ */
+ if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
+ if (new_tt != SSO_SYNC_UNTAGGED) {
+ ssows_swtag_full(ws, ev->u64, tag,
+ new_tt, grp);
+ }
+ } else {
+ if (likely(new_tt != SSO_SYNC_UNTAGGED))
+ ssows_swtag_norm(ws, tag, new_tt);
+ else
+ ssows_swtag_untag(ws);
+ }
+ ws->swtag_req = 1;
+}
+
+#define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
+
+static force_inline void
+ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
+{
+ const uint64_t event_ptr = ev->u64;
+ const uint32_t tag = (uint32_t)ev->event;
+ const uint8_t cur_tt = ws->cur_tt;
+ const uint8_t new_tt = ev->sched_type;
+
+ if (cur_tt == SSO_SYNC_ORDERED) {
+ /* Create unique tag based on custom event type and new grp */
+ uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
+
+ newtag |= grp << 20;
+ newtag |= tag;
+ ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
+ rte_smp_wmb();
+ ssows_swtag_wait(ws);
+ } else {
+ rte_smp_wmb();
+ }
+ ssows_add_work(ws, event_ptr, tag, new_tt, grp);
+}
+
+static force_inline void
+ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
+{
+ const uint8_t grp = ev->queue_id;
+
+ /* Group hasn't changed, Use SWTAG to forward the event */
+ if (ws->cur_grp == grp)
+ ssows_fwd_swtag(ws, ev, grp);
+ else
+ /*
+ * Group has been changed for group based work pipelining,
+ * Use deschedule/add_work operation to transfer the event to
+ * new group/core
+ */
+ ssows_fwd_group(ws, ev, grp);
+}
+
+static force_inline void
+ssows_release_event(struct ssows *ws)
+{
+ if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
+ ssows_swtag_untag(ws);
+}
+
+force_inline uint16_t __hot
+ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+ struct ssows *ws = port;
+
+ RTE_SET_USED(timeout_ticks);
+
+ ssows_swtag_wait(ws);
+ if (ws->swtag_req) {
+ ws->swtag_req = 0;
+ return 1;
+ } else {
+ return ssows_get_work(ws, ev);
+ }
+}
+
+force_inline uint16_t __hot
+ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
+{
+ struct ssows *ws = port;
+ uint64_t iter;
+ uint16_t ret = 1;
+
+ ssows_swtag_wait(ws);
+ if (ws->swtag_req) {
+ ws->swtag_req = 0;
+ } else {
+ ret = ssows_get_work(ws, ev);
+ for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
+ ret = ssows_get_work(ws, ev);
+ }
+ return ret;
+}
+
+uint16_t __hot
+ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
+ uint64_t timeout_ticks)
+{
+ RTE_SET_USED(nb_events);
+
+ return ssows_deq(port, ev, timeout_ticks);
+}
+
+uint16_t __hot
+ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
+ uint64_t timeout_ticks)
+{
+ RTE_SET_USED(nb_events);
+
+ return ssows_deq_timeout(port, ev, timeout_ticks);
+}
+
+force_inline uint16_t __hot
+ssows_enq(void *port, const struct rte_event *ev)
+{
+ struct ssows *ws = port;
+ uint16_t ret = 1;
+
+ switch (ev->op) {
+ case RTE_EVENT_OP_NEW:
+ ssows_new_event(ws, ev);
+ break;
+ case RTE_EVENT_OP_FORWARD:
+ ssows_forward_event(ws, ev);
+ break;
+ case RTE_EVENT_OP_RELEASE:
+ ssows_release_event(ws);
+ break;
+ default:
+ ret = 0;
+ }
+ return ret;
+}
+
+uint16_t __hot
+ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
+{
+ RTE_SET_USED(nb_events);
+ return ssows_enq(port, ev);
+}
+
+void
+ssows_flush_events(struct ssows *ws, uint8_t queue_id)
+{
+ uint32_t reg_off;
+ uint64_t aq_cnt = 1;
+ uint64_t cq_ds_cnt = 1;
+ uint64_t enable, get_work0, get_work1;
+ uint8_t *base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
+
+ enable = ssovf_read64(base + SSO_VHGRP_QCTL);
+ if (!enable)
+ return;
+
+ reg_off = SSOW_VHWS_OP_GET_WORK0;
+ reg_off |= 1 << 17; /* Grouped */
+ reg_off |= 1 << 16; /* WAIT */
+ reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
+ while (aq_cnt || cq_ds_cnt) {
+ aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
+ cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
+ /* Extract cq and ds count */
+ cq_ds_cnt &= 0x1FFF1FFF0000;
+ ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
+ }
+
+ RTE_SET_USED(get_work0);
+ RTE_SET_USED(get_work1);
+}
+
+void
+ssows_reset(struct ssows *ws)
+{
+ uint64_t tag;
+ uint64_t pend_tag;
+ uint8_t pend_tt;
+ uint8_t tt;
+
+ tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
+ pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
+
+ if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
+ pend_tt = (pend_tag >> 32) & 0x3;
+ if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
+ ssows_desched(ws);
+ } else {
+ tt = (tag >> 32) & 0x3;
+ if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
+ ssows_swtag_untag(ws);
+ }
+}
diff --git a/src/seastar/dpdk/drivers/event/octeontx/ssovf_worker.h b/src/seastar/dpdk/drivers/event/octeontx/ssovf_worker.h
new file mode 100644
index 00000000..300dfae8
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/octeontx/ssovf_worker.h
@@ -0,0 +1,139 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <rte_common.h>
+
+#include "ssovf_evdev.h"
+
+enum {
+ SSO_SYNC_ORDERED,
+ SSO_SYNC_ATOMIC,
+ SSO_SYNC_UNTAGGED,
+ SSO_SYNC_EMPTY
+};
+
+#ifndef force_inline
+#define force_inline inline __attribute__((always_inline))
+#endif
+
+#ifndef __hot
+#define __hot __attribute__((hot))
+#endif
+
+/* SSO Operations */
+
+static force_inline uint16_t
+ssows_get_work(struct ssows *ws, struct rte_event *ev)
+{
+ uint64_t get_work0, get_work1;
+ uint64_t sched_type_queue;
+
+ ssovf_load_pair(get_work0, get_work1, ws->getwork);
+
+ sched_type_queue = (get_work0 >> 32) & 0xfff;
+ ws->cur_tt = sched_type_queue & 0x3;
+ ws->cur_grp = sched_type_queue >> 2;
+ sched_type_queue = sched_type_queue << 38;
+
+ ev->event = sched_type_queue | (get_work0 & 0xffffffff);
+ ev->u64 = get_work1;
+ return !!get_work1;
+}
+
+static force_inline void
+ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
+ const uint8_t new_tt, const uint8_t grp)
+{
+ uint64_t add_work0;
+
+ add_work0 = tag | ((uint64_t)(new_tt) << 32);
+ ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
+}
+
+static force_inline void
+ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
+ const uint8_t new_tt, const uint8_t grp)
+{
+ uint64_t swtag_full0;
+
+ swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
+ ((uint64_t)grp << 34);
+ ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
+ SSOW_VHWS_OP_SWTAG_FULL0));
+}
+
+static force_inline void
+ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
+{
+ uint64_t val;
+
+ val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
+ ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
+}
+
+static force_inline void
+ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
+{
+ uint64_t val;
+
+ val = tag | ((uint64_t)(new_tt & 0x3) << 32);
+ ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
+}
+
+static force_inline void
+ssows_swtag_untag(struct ssows *ws)
+{
+ ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
+ ws->cur_tt = SSO_SYNC_UNTAGGED;
+}
+
+static force_inline void
+ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
+{
+ ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
+ SSOW_VHWS_OP_UPD_WQP_GRP0));
+}
+
+static force_inline void
+ssows_desched(struct ssows *ws)
+{
+ ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
+}
+
+static force_inline void
+ssows_swtag_wait(struct ssows *ws)
+{
+ /* Wait for the SWTAG/SWTAG_FULL operation */
+ while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
+ ;
+}
diff --git a/src/seastar/dpdk/drivers/event/skeleton/Makefile b/src/seastar/dpdk/drivers/event/skeleton/Makefile
new file mode 100644
index 00000000..c2b2456b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/skeleton/Makefile
@@ -0,0 +1,51 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Cavium Networks. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium Networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_skeleton_event.a
+
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_skeleton_event_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SKELETON_EVENTDEV) += skeleton_eventdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/event/skeleton/rte_pmd_skeleton_event_version.map b/src/seastar/dpdk/drivers/event/skeleton/rte_pmd_skeleton_event_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/skeleton/rte_pmd_skeleton_event_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/event/skeleton/skeleton_eventdev.c b/src/seastar/dpdk/drivers/event/skeleton/skeleton_eventdev.c
new file mode 100644
index 00000000..800bd76e
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/skeleton/skeleton_eventdev.c
@@ -0,0 +1,497 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_pci.h>
+#include <rte_lcore.h>
+#include <rte_vdev.h>
+
+#include "skeleton_eventdev.h"
+
+#define EVENTDEV_NAME_SKELETON_PMD event_skeleton
+/**< Skeleton event device PMD name */
+
+static uint16_t
+skeleton_eventdev_enqueue(void *port, const struct rte_event *ev)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(port);
+
+ return 0;
+}
+
+static uint16_t
+skeleton_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(port);
+ RTE_SET_USED(nb_events);
+
+ return 0;
+}
+
+static uint16_t
+skeleton_eventdev_dequeue(void *port, struct rte_event *ev,
+ uint64_t timeout_ticks)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(timeout_ticks);
+
+ return 0;
+}
+
+static uint16_t
+skeleton_eventdev_dequeue_burst(void *port, struct rte_event ev[],
+ uint16_t nb_events, uint64_t timeout_ticks)
+{
+ struct skeleton_port *sp = port;
+
+ RTE_SET_USED(sp);
+ RTE_SET_USED(ev);
+ RTE_SET_USED(nb_events);
+ RTE_SET_USED(timeout_ticks);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+
+ dev_info->min_dequeue_timeout_ns = 1;
+ dev_info->max_dequeue_timeout_ns = 10000;
+ dev_info->dequeue_timeout_ns = 25;
+ dev_info->max_event_queues = 64;
+ dev_info->max_event_queue_flows = (1ULL << 20);
+ dev_info->max_event_queue_priority_levels = 8;
+ dev_info->max_event_priority_levels = 8;
+ dev_info->max_event_ports = 32;
+ dev_info->max_event_port_dequeue_depth = 16;
+ dev_info->max_event_port_enqueue_depth = 16;
+ dev_info->max_num_events = (1ULL << 20);
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_EVENT_QOS;
+}
+
+static int
+skeleton_eventdev_configure(const struct rte_eventdev *dev)
+{
+ struct rte_eventdev_data *data = dev->data;
+ struct rte_event_dev_config *conf = &data->dev_conf;
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(conf);
+ RTE_SET_USED(skel);
+
+ PMD_DRV_LOG(DEBUG, "Configured eventdev devid=%d", dev->data->dev_id);
+ return 0;
+}
+
+static int
+skeleton_eventdev_start(struct rte_eventdev *dev)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_stop(struct rte_eventdev *dev)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+}
+
+static int
+skeleton_eventdev_close(struct rte_eventdev *dev)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(queue_id);
+
+ queue_conf->nb_atomic_flows = (1ULL << 20);
+ queue_conf->nb_atomic_order_sequences = (1ULL << 20);
+ queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static void
+skeleton_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static int
+skeleton_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(queue_conf);
+ RTE_SET_USED(queue_id);
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold = 32 * 1024;
+ port_conf->dequeue_depth = 16;
+ port_conf->enqueue_depth = 16;
+}
+
+static void
+skeleton_eventdev_port_release(void *port)
+{
+ struct skeleton_port *sp = port;
+ PMD_DRV_FUNC_TRACE();
+
+ rte_free(sp);
+}
+
+static int
+skeleton_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct skeleton_port *sp;
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(port_conf);
+
+ /* Free memory prior to re-allocation if needed */
+ if (dev->data->ports[port_id] != NULL) {
+ PMD_DRV_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+ port_id);
+ skeleton_eventdev_port_release(dev->data->ports[port_id]);
+ dev->data->ports[port_id] = NULL;
+ }
+
+ /* Allocate event port memory */
+ sp = rte_zmalloc_socket("eventdev port",
+ sizeof(struct skeleton_port), RTE_CACHE_LINE_SIZE,
+ dev->data->socket_id);
+ if (sp == NULL) {
+ PMD_DRV_ERR("Failed to allocate sp port_id=%d", port_id);
+ return -ENOMEM;
+ }
+
+ sp->port_id = port_id;
+
+ PMD_DRV_LOG(DEBUG, "[%d] sp=%p", port_id, sp);
+
+ dev->data->ports[port_id] = sp;
+ return 0;
+}
+
+static int
+skeleton_eventdev_port_link(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct skeleton_port *sp = port;
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(sp);
+ RTE_SET_USED(queues);
+ RTE_SET_USED(priorities);
+
+ /* Linked all the queues */
+ return (int)nb_links;
+}
+
+static int
+skeleton_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct skeleton_port *sp = port;
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(sp);
+ RTE_SET_USED(queues);
+
+ /* Unlinked all the queues */
+ return (int)nb_unlinks;
+
+}
+
+static int
+skeleton_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+ uint32_t scale = 1;
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ *timeout_ticks = ns * scale;
+
+ return 0;
+}
+
+static void
+skeleton_eventdev_dump(struct rte_eventdev *dev, FILE *f)
+{
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(dev);
+
+ PMD_DRV_FUNC_TRACE();
+
+ RTE_SET_USED(skel);
+ RTE_SET_USED(f);
+}
+
+
+/* Initialize and register event driver with DPDK Application */
+static const struct rte_eventdev_ops skeleton_eventdev_ops = {
+ .dev_infos_get = skeleton_eventdev_info_get,
+ .dev_configure = skeleton_eventdev_configure,
+ .dev_start = skeleton_eventdev_start,
+ .dev_stop = skeleton_eventdev_stop,
+ .dev_close = skeleton_eventdev_close,
+ .queue_def_conf = skeleton_eventdev_queue_def_conf,
+ .queue_setup = skeleton_eventdev_queue_setup,
+ .queue_release = skeleton_eventdev_queue_release,
+ .port_def_conf = skeleton_eventdev_port_def_conf,
+ .port_setup = skeleton_eventdev_port_setup,
+ .port_release = skeleton_eventdev_port_release,
+ .port_link = skeleton_eventdev_port_link,
+ .port_unlink = skeleton_eventdev_port_unlink,
+ .timeout_ticks = skeleton_eventdev_timeout_ticks,
+ .dump = skeleton_eventdev_dump
+};
+
+static int
+skeleton_eventdev_init(struct rte_eventdev *eventdev)
+{
+ struct rte_pci_device *pci_dev;
+ struct skeleton_eventdev *skel = skeleton_pmd_priv(eventdev);
+ int ret = 0;
+
+ PMD_DRV_FUNC_TRACE();
+
+ eventdev->dev_ops = &skeleton_eventdev_ops;
+ eventdev->schedule = NULL;
+ eventdev->enqueue = skeleton_eventdev_enqueue;
+ eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
+ eventdev->dequeue = skeleton_eventdev_dequeue;
+ eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
+
+ skel->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
+ if (!skel->reg_base) {
+ PMD_DRV_ERR("Failed to map BAR0");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ skel->device_id = pci_dev->id.device_id;
+ skel->vendor_id = pci_dev->id.vendor_id;
+ skel->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ skel->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+
+ PMD_DRV_LOG(DEBUG, "pci device (%x:%x) %u:%u:%u:%u",
+ pci_dev->id.vendor_id, pci_dev->id.device_id,
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
+
+ PMD_DRV_LOG(INFO, "dev_id=%d socket_id=%d (%x:%x)",
+ eventdev->data->dev_id, eventdev->data->socket_id,
+ skel->vendor_id, skel->device_id);
+
+fail:
+ return ret;
+}
+
+/* PCI based event device */
+
+#define EVENTDEV_SKEL_VENDOR_ID 0x177d
+#define EVENTDEV_SKEL_PRODUCT_ID 0x0001
+
+static const struct rte_pci_id pci_id_skeleton_map[] = {
+ {
+ RTE_PCI_DEVICE(EVENTDEV_SKEL_VENDOR_ID,
+ EVENTDEV_SKEL_PRODUCT_ID)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_eventdev_driver pci_eventdev_skeleton_pmd = {
+ .pci_drv = {
+ .id_table = pci_id_skeleton_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = rte_event_pmd_pci_probe,
+ .remove = rte_event_pmd_pci_remove,
+ },
+ .eventdev_init = skeleton_eventdev_init,
+ .dev_private_size = sizeof(struct skeleton_eventdev),
+};
+
+RTE_PMD_REGISTER_PCI(event_skeleton_pci, pci_eventdev_skeleton_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(event_skeleton_pci, pci_id_skeleton_map);
+
+/* VDEV based event device */
+
+static int
+skeleton_eventdev_create(const char *name, int socket_id)
+{
+ struct rte_eventdev *eventdev;
+
+ eventdev = rte_event_pmd_vdev_init(name,
+ sizeof(struct skeleton_eventdev), socket_id);
+ if (eventdev == NULL) {
+ PMD_DRV_ERR("Failed to create eventdev vdev %s", name);
+ goto fail;
+ }
+
+ eventdev->dev_ops = &skeleton_eventdev_ops;
+ eventdev->schedule = NULL;
+ eventdev->enqueue = skeleton_eventdev_enqueue;
+ eventdev->enqueue_burst = skeleton_eventdev_enqueue_burst;
+ eventdev->dequeue = skeleton_eventdev_dequeue;
+ eventdev->dequeue_burst = skeleton_eventdev_dequeue_burst;
+
+ return 0;
+fail:
+ return -EFAULT;
+}
+
+static int
+skeleton_eventdev_probe(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ RTE_LOG(INFO, PMD, "Initializing %s on NUMA node %d\n", name,
+ rte_socket_id());
+ return skeleton_eventdev_create(name, rte_socket_id());
+}
+
+static int
+skeleton_eventdev_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver vdev_eventdev_skeleton_pmd = {
+ .probe = skeleton_eventdev_probe,
+ .remove = skeleton_eventdev_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SKELETON_PMD, vdev_eventdev_skeleton_pmd);
diff --git a/src/seastar/dpdk/drivers/event/skeleton/skeleton_eventdev.h b/src/seastar/dpdk/drivers/event/skeleton/skeleton_eventdev.h
new file mode 100644
index 00000000..1ce62da7
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/skeleton/skeleton_eventdev.h
@@ -0,0 +1,68 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium networks Ltd. 2016.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SKELETON_EVENTDEV_H__
+#define __SKELETON_EVENTDEV_H__
+
+#include <rte_eventdev_pmd.h>
+
+#ifdef RTE_LIBRTE_PMD_SKELETON_EVENTDEV_DEBUG
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>")
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
+#define PMD_DRV_FUNC_TRACE() do { } while (0)
+#endif
+
+#define PMD_DRV_ERR(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s(): " fmt "\n", __func__, ## args)
+
+struct skeleton_eventdev {
+ uintptr_t reg_base;
+ uint16_t device_id;
+ uint16_t vendor_id;
+ uint16_t subsystem_device_id;
+ uint16_t subsystem_vendor_id;
+} __rte_cache_aligned;
+
+struct skeleton_port {
+ uint8_t port_id;
+} __rte_cache_aligned;
+
+static inline struct skeleton_eventdev *
+skeleton_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+#endif /* __SKELETON_EVENTDEV_H__ */
diff --git a/src/seastar/dpdk/drivers/event/sw/Makefile b/src/seastar/dpdk/drivers/event/sw/Makefile
new file mode 100644
index 00000000..857a87cc
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/sw/Makefile
@@ -0,0 +1,62 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_sw_event.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+# for older GCC versions, allow us to initialize an event using
+# designated initializers.
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -le 50 && echo 1), 1)
+CFLAGS += -Wno-missing-field-initializers
+endif
+endif
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_evdev_sw_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_worker.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_scheduler.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV) += sw_evdev_xstats.c
+
+# export include files
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/drivers/event/sw/event_ring.h b/src/seastar/dpdk/drivers/event/sw/event_ring.h
new file mode 100644
index 00000000..cdaee95d
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/sw/event_ring.h
@@ -0,0 +1,185 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Generic ring structure for passing events from one core to another.
+ *
+ * Used by the software scheduler for the producer and consumer rings for
+ * each port, i.e. for passing events from worker cores to scheduler and
+ * vice-versa. Designed for single-producer, single-consumer use with two
+ * cores working on each ring.
+ */
+
+#ifndef _EVENT_RING_
+#define _EVENT_RING_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+
+#define QE_RING_NAMESIZE 32
+
+struct qe_ring {
+ char name[QE_RING_NAMESIZE] __rte_cache_aligned;
+ uint32_t ring_size; /* size of memory block allocated to the ring */
+ uint32_t mask; /* mask for read/write values == ring_size -1 */
+ uint32_t size; /* actual usable space in the ring */
+ volatile uint32_t write_idx __rte_cache_aligned;
+ volatile uint32_t read_idx __rte_cache_aligned;
+
+ struct rte_event ring[0] __rte_cache_aligned;
+};
+
+#ifndef force_inline
+#define force_inline inline __attribute__((always_inline))
+#endif
+
+static inline struct qe_ring *
+qe_ring_create(const char *name, unsigned int size, unsigned int socket_id)
+{
+ struct qe_ring *retval;
+ const uint32_t ring_size = rte_align32pow2(size + 1);
+ size_t memsize = sizeof(*retval) +
+ (ring_size * sizeof(retval->ring[0]));
+
+ retval = rte_zmalloc_socket(NULL, memsize, 0, socket_id);
+ if (retval == NULL)
+ goto end;
+
+ snprintf(retval->name, sizeof(retval->name), "EVDEV_RG_%s", name);
+ retval->ring_size = ring_size;
+ retval->mask = ring_size - 1;
+ retval->size = size;
+end:
+ return retval;
+}
+
+static inline void
+qe_ring_destroy(struct qe_ring *r)
+{
+ rte_free(r);
+}
+
+static force_inline unsigned int
+qe_ring_count(const struct qe_ring *r)
+{
+ return r->write_idx - r->read_idx;
+}
+
+static force_inline unsigned int
+qe_ring_free_count(const struct qe_ring *r)
+{
+ return r->size - qe_ring_count(r);
+}
+
+static force_inline unsigned int
+qe_ring_enqueue_burst(struct qe_ring *r, const struct rte_event *qes,
+ unsigned int nb_qes, uint16_t *free_count)
+{
+ const uint32_t size = r->size;
+ const uint32_t mask = r->mask;
+ const uint32_t read = r->read_idx;
+ uint32_t write = r->write_idx;
+ const uint32_t space = read + size - write;
+ uint32_t i;
+
+ if (space < nb_qes)
+ nb_qes = space;
+
+ for (i = 0; i < nb_qes; i++, write++)
+ r->ring[write & mask] = qes[i];
+
+ rte_smp_wmb();
+
+ if (nb_qes != 0)
+ r->write_idx = write;
+
+ *free_count = space - nb_qes;
+
+ return nb_qes;
+}
+
+static force_inline unsigned int
+qe_ring_enqueue_burst_with_ops(struct qe_ring *r, const struct rte_event *qes,
+ unsigned int nb_qes, uint8_t *ops)
+{
+ const uint32_t size = r->size;
+ const uint32_t mask = r->mask;
+ const uint32_t read = r->read_idx;
+ uint32_t write = r->write_idx;
+ const uint32_t space = read + size - write;
+ uint32_t i;
+
+ if (space < nb_qes)
+ nb_qes = space;
+
+ for (i = 0; i < nb_qes; i++, write++) {
+ r->ring[write & mask] = qes[i];
+ r->ring[write & mask].op = ops[i];
+ }
+
+ rte_smp_wmb();
+
+ if (nb_qes != 0)
+ r->write_idx = write;
+
+ return nb_qes;
+}
+
+static force_inline unsigned int
+qe_ring_dequeue_burst(struct qe_ring *r, struct rte_event *qes,
+ unsigned int nb_qes)
+{
+ const uint32_t mask = r->mask;
+ uint32_t read = r->read_idx;
+ const uint32_t write = r->write_idx;
+ const uint32_t items = write - read;
+ uint32_t i;
+
+ if (items < nb_qes)
+ nb_qes = items;
+
+
+ for (i = 0; i < nb_qes; i++, read++)
+ qes[i] = r->ring[read & mask];
+
+ rte_smp_rmb();
+
+ if (nb_qes != 0)
+ r->read_idx += nb_qes;
+
+ return nb_qes;
+}
+
+#endif
diff --git a/src/seastar/dpdk/drivers/event/sw/iq_ring.h b/src/seastar/dpdk/drivers/event/sw/iq_ring.h
new file mode 100644
index 00000000..d480d156
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/sw/iq_ring.h
@@ -0,0 +1,176 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Ring structure definitions used for the internal ring buffers of the
+ * SW eventdev implementation. These are designed for single-core use only.
+ */
+#ifndef _IQ_RING_
+#define _IQ_RING_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_eventdev.h>
+
+#define IQ_RING_NAMESIZE 12
+#define QID_IQ_DEPTH 512
+#define QID_IQ_MASK (uint16_t)(QID_IQ_DEPTH - 1)
+
+struct iq_ring {
+ char name[IQ_RING_NAMESIZE] __rte_cache_aligned;
+ uint16_t write_idx;
+ uint16_t read_idx;
+
+ struct rte_event ring[QID_IQ_DEPTH];
+};
+
+#ifndef force_inline
+#define force_inline inline __attribute__((always_inline))
+#endif
+
+static inline struct iq_ring *
+iq_ring_create(const char *name, unsigned int socket_id)
+{
+ struct iq_ring *retval;
+
+ retval = rte_malloc_socket(NULL, sizeof(*retval), 0, socket_id);
+ if (retval == NULL)
+ goto end;
+
+ snprintf(retval->name, sizeof(retval->name), "%s", name);
+ retval->write_idx = retval->read_idx = 0;
+end:
+ return retval;
+}
+
+static inline void
+iq_ring_destroy(struct iq_ring *r)
+{
+ rte_free(r);
+}
+
+static force_inline uint16_t
+iq_ring_count(const struct iq_ring *r)
+{
+ return r->write_idx - r->read_idx;
+}
+
+static force_inline uint16_t
+iq_ring_free_count(const struct iq_ring *r)
+{
+ return QID_IQ_MASK - iq_ring_count(r);
+}
+
+static force_inline uint16_t
+iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
+{
+ const uint16_t read = r->read_idx;
+ uint16_t write = r->write_idx;
+ const uint16_t space = read + QID_IQ_MASK - write;
+ uint16_t i;
+
+ if (space < nb_qes)
+ nb_qes = space;
+
+ for (i = 0; i < nb_qes; i++, write++)
+ r->ring[write & QID_IQ_MASK] = qes[i];
+
+ r->write_idx = write;
+
+ return nb_qes;
+}
+
+static force_inline uint16_t
+iq_ring_dequeue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
+{
+ uint16_t read = r->read_idx;
+ const uint16_t write = r->write_idx;
+ const uint16_t items = write - read;
+ uint16_t i;
+
+ for (i = 0; i < nb_qes; i++, read++)
+ qes[i] = r->ring[read & QID_IQ_MASK];
+
+ if (items < nb_qes)
+ nb_qes = items;
+
+ r->read_idx += nb_qes;
+
+ return nb_qes;
+}
+
+/* assumes there is space, from a previous dequeue_burst */
+static force_inline uint16_t
+iq_ring_put_back(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
+{
+ uint16_t i, read = r->read_idx;
+
+ for (i = nb_qes; i-- > 0; )
+ r->ring[--read & QID_IQ_MASK] = qes[i];
+
+ r->read_idx = read;
+ return nb_qes;
+}
+
+static force_inline const struct rte_event *
+iq_ring_peek(const struct iq_ring *r)
+{
+ return &r->ring[r->read_idx & QID_IQ_MASK];
+}
+
+static force_inline void
+iq_ring_pop(struct iq_ring *r)
+{
+ r->read_idx++;
+}
+
+static force_inline int
+iq_ring_enqueue(struct iq_ring *r, const struct rte_event *qe)
+{
+ const uint16_t read = r->read_idx;
+ const uint16_t write = r->write_idx;
+ const uint16_t space = read + QID_IQ_MASK - write;
+
+ if (space == 0)
+ return -1;
+
+ r->ring[write & QID_IQ_MASK] = *qe;
+
+ r->write_idx = write + 1;
+
+ return 0;
+}
+
+#endif
diff --git a/src/seastar/dpdk/drivers/event/sw/rte_pmd_evdev_sw_version.map b/src/seastar/dpdk/drivers/event/sw/rte_pmd_evdev_sw_version.map
new file mode 100644
index 00000000..5352e7e3
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/sw/rte_pmd_evdev_sw_version.map
@@ -0,0 +1,3 @@
+DPDK_17.05 {
+ local: *;
+};
diff --git a/src/seastar/dpdk/drivers/event/sw/sw_evdev.c b/src/seastar/dpdk/drivers/event/sw/sw_evdev.c
new file mode 100644
index 00000000..a31aaa66
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/sw/sw_evdev.c
@@ -0,0 +1,833 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_vdev.h>
+#include <rte_memzone.h>
+#include <rte_kvargs.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+
+#include "sw_evdev.h"
+#include "iq_ring.h"
+#include "event_ring.h"
+
+#define EVENTDEV_NAME_SW_PMD event_sw
+#define NUMA_NODE_ARG "numa_node"
+#define SCHED_QUANTA_ARG "sched_quanta"
+#define CREDIT_QUANTA_ARG "credit_quanta"
+
+static void
+sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
+
+static int
+sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
+ const uint8_t priorities[], uint16_t num)
+{
+ struct sw_port *p = port;
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ int i;
+
+ RTE_SET_USED(priorities);
+ for (i = 0; i < num; i++) {
+ struct sw_qid *q = &sw->qids[queues[i]];
+
+ /* check for qid map overflow */
+ if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+
+ if (p->is_directed && p->num_qids_mapped > 0) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+
+ if (q->type == SW_SCHED_TYPE_DIRECT) {
+ /* check directed qids only map to one port */
+ if (p->num_qids_mapped > 0) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+ /* check port only takes a directed flow */
+ if (num > 1) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+
+ p->is_directed = 1;
+ p->num_qids_mapped = 1;
+ } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
+ p->num_ordered_qids++;
+ p->num_qids_mapped++;
+ } else if (q->type == RTE_SCHED_TYPE_ATOMIC) {
+ p->num_qids_mapped++;
+ }
+
+ q->cq_map[q->cq_num_mapped_cqs] = p->id;
+ rte_smp_wmb();
+ q->cq_num_mapped_cqs++;
+ }
+ return i;
+}
+
+static int
+sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
+ uint16_t nb_unlinks)
+{
+ struct sw_port *p = port;
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ unsigned int i, j;
+
+ int unlinked = 0;
+ for (i = 0; i < nb_unlinks; i++) {
+ struct sw_qid *q = &sw->qids[queues[i]];
+ for (j = 0; j < q->cq_num_mapped_cqs; j++) {
+ if (q->cq_map[j] == p->id) {
+ q->cq_map[j] =
+ q->cq_map[q->cq_num_mapped_cqs - 1];
+ rte_smp_wmb();
+ q->cq_num_mapped_cqs--;
+ unlinked++;
+
+ p->num_qids_mapped--;
+
+ if (q->type == RTE_SCHED_TYPE_ORDERED)
+ p->num_ordered_qids--;
+
+ continue;
+ }
+ }
+ }
+ return unlinked;
+}
+
+static int
+sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *conf)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ struct sw_port *p = &sw->ports[port_id];
+ char buf[QE_RING_NAMESIZE];
+ unsigned int i;
+
+ struct rte_event_dev_info info;
+ sw_info_get(dev, &info);
+
+ /* detect re-configuring and return credits to instance if needed */
+ if (p->initialized) {
+ /* taking credits from pool is done one quanta at a time, and
+ * credits may be spend (counted in p->inflights) or still
+ * available in the port (p->inflight_credits). We must return
+ * the sum to no leak credits
+ */
+ int possible_inflights = p->inflight_credits + p->inflights;
+ rte_atomic32_sub(&sw->inflights, possible_inflights);
+ }
+
+ *p = (struct sw_port){0}; /* zero entire structure */
+ p->id = port_id;
+ p->sw = sw;
+
+ snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
+ "rx_worker_ring");
+ p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
+ dev->data->socket_id);
+ if (p->rx_worker_ring == NULL) {
+ SW_LOG_ERR("Error creating RX worker ring for port %d\n",
+ port_id);
+ return -1;
+ }
+
+ p->inflight_max = conf->new_event_threshold;
+
+ snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
+ "cq_worker_ring");
+ p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
+ dev->data->socket_id);
+ if (p->cq_worker_ring == NULL) {
+ qe_ring_destroy(p->rx_worker_ring);
+ SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
+ port_id);
+ return -1;
+ }
+ sw->cq_ring_space[port_id] = conf->dequeue_depth;
+
+ /* set hist list contents to empty */
+ for (i = 0; i < SW_PORT_HIST_LIST; i++) {
+ p->hist_list[i].fid = -1;
+ p->hist_list[i].qid = -1;
+ }
+ dev->data->ports[port_id] = p;
+
+ rte_smp_wmb();
+ p->initialized = 1;
+ return 0;
+}
+
+static void
+sw_port_release(void *port)
+{
+ struct sw_port *p = (void *)port;
+ if (p == NULL)
+ return;
+
+ qe_ring_destroy(p->rx_worker_ring);
+ qe_ring_destroy(p->cq_worker_ring);
+ memset(p, 0, sizeof(*p));
+}
+
+static int32_t
+qid_init(struct sw_evdev *sw, unsigned int idx, int type,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ unsigned int i;
+ int dev_id = sw->data->dev_id;
+ int socket_id = sw->data->socket_id;
+ char buf[IQ_RING_NAMESIZE];
+ struct sw_qid *qid = &sw->qids[idx];
+
+ for (i = 0; i < SW_IQS_MAX; i++) {
+ snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i);
+ qid->iq[i] = iq_ring_create(buf, socket_id);
+ if (!qid->iq[i]) {
+ SW_LOG_DBG("ring create failed");
+ goto cleanup;
+ }
+ }
+
+ /* Initialize the FID structures to no pinning (-1), and zero packets */
+ const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
+ for (i = 0; i < RTE_DIM(qid->fids); i++)
+ qid->fids[i] = fid;
+
+ qid->id = idx;
+ qid->type = type;
+ qid->priority = queue_conf->priority;
+
+ if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+ char ring_name[RTE_RING_NAMESIZE];
+ uint32_t window_size;
+
+ /* rte_ring and window_size_mask require require window_size to
+ * be a power-of-2.
+ */
+ window_size = rte_align32pow2(
+ queue_conf->nb_atomic_order_sequences);
+
+ qid->window_size = window_size - 1;
+
+ if (!window_size) {
+ SW_LOG_DBG(
+ "invalid reorder_window_size for ordered queue\n"
+ );
+ goto cleanup;
+ }
+
+ snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
+ qid->reorder_buffer = rte_zmalloc_socket(buf,
+ window_size * sizeof(qid->reorder_buffer[0]),
+ 0, socket_id);
+ if (!qid->reorder_buffer) {
+ SW_LOG_DBG("reorder_buffer malloc failed\n");
+ goto cleanup;
+ }
+
+ memset(&qid->reorder_buffer[0],
+ 0,
+ window_size * sizeof(qid->reorder_buffer[0]));
+
+ snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
+ dev_id, idx);
+
+ /* lookup the ring, and if it already exists, free it */
+ struct rte_ring *cleanup = rte_ring_lookup(ring_name);
+ if (cleanup)
+ rte_ring_free(cleanup);
+
+ qid->reorder_buffer_freelist = rte_ring_create(ring_name,
+ window_size,
+ socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (!qid->reorder_buffer_freelist) {
+ SW_LOG_DBG("freelist ring create failed");
+ goto cleanup;
+ }
+
+ /* Populate the freelist with reorder buffer entries. Enqueue
+ * 'window_size - 1' entries because the rte_ring holds only
+ * that many.
+ */
+ for (i = 0; i < window_size - 1; i++) {
+ if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
+ &qid->reorder_buffer[i]) < 0)
+ goto cleanup;
+ }
+
+ qid->reorder_buffer_index = 0;
+ qid->cq_next_tx = 0;
+ }
+
+ qid->initialized = 1;
+
+ return 0;
+
+cleanup:
+ for (i = 0; i < SW_IQS_MAX; i++) {
+ if (qid->iq[i])
+ iq_ring_destroy(qid->iq[i]);
+ }
+
+ if (qid->reorder_buffer) {
+ rte_free(qid->reorder_buffer);
+ qid->reorder_buffer = NULL;
+ }
+
+ if (qid->reorder_buffer_freelist) {
+ rte_ring_free(qid->reorder_buffer_freelist);
+ qid->reorder_buffer_freelist = NULL;
+ }
+
+ return -EINVAL;
+}
+
+static int
+sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *conf)
+{
+ int type;
+
+ /* SINGLE_LINK can be OR-ed with other types, so handle first */
+ if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
+ type = SW_SCHED_TYPE_DIRECT;
+ } else {
+ switch (conf->event_queue_cfg) {
+ case RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY:
+ type = RTE_SCHED_TYPE_ATOMIC;
+ break;
+ case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
+ type = RTE_SCHED_TYPE_ORDERED;
+ break;
+ case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
+ type = RTE_SCHED_TYPE_PARALLEL;
+ break;
+ case RTE_EVENT_QUEUE_CFG_ALL_TYPES:
+ SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
+ return -ENOTSUP;
+ default:
+ SW_LOG_ERR("Unknown queue type %d requested\n",
+ conf->event_queue_cfg);
+ return -EINVAL;
+ }
+ }
+
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ return qid_init(sw, queue_id, type, conf);
+}
+
+static void
+sw_queue_release(struct rte_eventdev *dev, uint8_t id)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ struct sw_qid *qid = &sw->qids[id];
+ uint32_t i;
+
+ for (i = 0; i < SW_IQS_MAX; i++)
+ iq_ring_destroy(qid->iq[i]);
+
+ if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+ rte_free(qid->reorder_buffer);
+ rte_ring_free(qid->reorder_buffer_freelist);
+ }
+ memset(qid, 0, sizeof(*qid));
+}
+
+static void
+sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ static const struct rte_event_queue_conf default_conf = {
+ .nb_atomic_flows = 4096,
+ .nb_atomic_order_sequences = 1,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ };
+
+ *conf = default_conf;
+}
+
+static void
+sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold = 1024;
+ port_conf->dequeue_depth = 16;
+ port_conf->enqueue_depth = 16;
+}
+
+static int
+sw_dev_configure(const struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ const struct rte_eventdev_data *data = dev->data;
+ const struct rte_event_dev_config *conf = &data->dev_conf;
+
+ sw->qid_count = conf->nb_event_queues;
+ sw->port_count = conf->nb_event_ports;
+ sw->nb_events_limit = conf->nb_events_limit;
+ rte_atomic32_set(&sw->inflights, 0);
+
+ if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
+ return -ENOTSUP;
+
+ return 0;
+}
+
+static void
+sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
+{
+ RTE_SET_USED(dev);
+
+ static const struct rte_event_dev_info evdev_sw_info = {
+ .driver_name = SW_PMD_NAME,
+ .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
+ .max_event_queue_flows = SW_QID_NUM_FIDS,
+ .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
+ .max_event_priority_levels = SW_IQS_MAX,
+ .max_event_ports = SW_PORTS_MAX,
+ .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
+ .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
+ .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
+ .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
+ RTE_EVENT_DEV_CAP_EVENT_QOS),
+ };
+
+ *info = evdev_sw_info;
+}
+
+static void
+sw_dump(struct rte_eventdev *dev, FILE *f)
+{
+ const struct sw_evdev *sw = sw_pmd_priv(dev);
+
+ static const char * const q_type_strings[] = {
+ "Ordered", "Atomic", "Parallel", "Directed"
+ };
+ uint32_t i;
+ fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name",
+ sw->port_count, sw->qid_count);
+
+ fprintf(f, "\trx %"PRIu64"\n\tdrop %"PRIu64"\n\ttx %"PRIu64"\n",
+ sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts);
+ fprintf(f, "\tsched calls: %"PRIu64"\n", sw->sched_called);
+ fprintf(f, "\tsched cq/qid call: %"PRIu64"\n", sw->sched_cq_qid_called);
+ fprintf(f, "\tsched no IQ enq: %"PRIu64"\n", sw->sched_no_iq_enqueues);
+ fprintf(f, "\tsched no CQ enq: %"PRIu64"\n", sw->sched_no_cq_enqueues);
+ uint32_t inflights = rte_atomic32_read(&sw->inflights);
+ uint32_t credits = sw->nb_events_limit - inflights;
+ fprintf(f, "\tinflight %d, credits: %d\n", inflights, credits);
+
+#define COL_RED "\x1b[31m"
+#define COL_RESET "\x1b[0m"
+
+ for (i = 0; i < sw->port_count; i++) {
+ int max, j;
+ const struct sw_port *p = &sw->ports[i];
+ if (!p->initialized) {
+ fprintf(f, " %sPort %d not initialized.%s\n",
+ COL_RED, i, COL_RESET);
+ continue;
+ }
+ fprintf(f, " Port %d %s\n", i,
+ p->is_directed ? " (SingleCons)" : "");
+ fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64
+ "\t%sinflight %d%s\n", sw->ports[i].stats.rx_pkts,
+ sw->ports[i].stats.rx_dropped,
+ sw->ports[i].stats.tx_pkts,
+ (p->inflights == p->inflight_max) ?
+ COL_RED : COL_RESET,
+ sw->ports[i].inflights, COL_RESET);
+
+ fprintf(f, "\tMax New: %u"
+ "\tAvg cycles PP: %"PRIu64"\tCredits: %u\n",
+ sw->ports[i].inflight_max,
+ sw->ports[i].avg_pkt_ticks,
+ sw->ports[i].inflight_credits);
+ fprintf(f, "\tReceive burst distribution:\n");
+ float zp_percent = p->zero_polls * 100.0 / p->total_polls;
+ fprintf(f, zp_percent < 10 ? "\t\t0:%.02f%% " : "\t\t0:%.0f%% ",
+ zp_percent);
+ for (max = (int)RTE_DIM(p->poll_buckets); max-- > 0;)
+ if (p->poll_buckets[max] != 0)
+ break;
+ for (j = 0; j <= max; j++) {
+ if (p->poll_buckets[j] != 0) {
+ float poll_pc = p->poll_buckets[j] * 100.0 /
+ p->total_polls;
+ fprintf(f, "%u-%u:%.02f%% ",
+ ((j << SW_DEQ_STAT_BUCKET_SHIFT) + 1),
+ ((j+1) << SW_DEQ_STAT_BUCKET_SHIFT),
+ poll_pc);
+ }
+ }
+ fprintf(f, "\n");
+
+ if (p->rx_worker_ring) {
+ uint64_t used = qe_ring_count(p->rx_worker_ring);
+ uint64_t space = qe_ring_free_count(p->rx_worker_ring);
+ const char *col = (space == 0) ? COL_RED : COL_RESET;
+ fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
+ PRIu64 COL_RESET"\n", col, used, space);
+ } else
+ fprintf(f, "\trx ring not initialized.\n");
+
+ if (p->cq_worker_ring) {
+ uint64_t used = qe_ring_count(p->cq_worker_ring);
+ uint64_t space = qe_ring_free_count(p->cq_worker_ring);
+ const char *col = (space == 0) ? COL_RED : COL_RESET;
+ fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
+ PRIu64 COL_RESET"\n", col, used, space);
+ } else
+ fprintf(f, "\tcq ring not initialized.\n");
+ }
+
+ for (i = 0; i < sw->qid_count; i++) {
+ const struct sw_qid *qid = &sw->qids[i];
+ if (!qid->initialized) {
+ fprintf(f, " %sQueue %d not initialized.%s\n",
+ COL_RED, i, COL_RESET);
+ continue;
+ }
+ int affinities_per_port[SW_PORTS_MAX] = {0};
+ uint32_t inflights = 0;
+
+ fprintf(f, " Queue %d (%s)\n", i, q_type_strings[qid->type]);
+ fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64"\n",
+ qid->stats.rx_pkts, qid->stats.rx_dropped,
+ qid->stats.tx_pkts);
+ if (qid->type == RTE_SCHED_TYPE_ORDERED) {
+ struct rte_ring *rob_buf_free =
+ qid->reorder_buffer_freelist;
+ if (rob_buf_free)
+ fprintf(f, "\tReorder entries in use: %u\n",
+ rte_ring_free_count(rob_buf_free));
+ else
+ fprintf(f,
+ "\tReorder buffer not initialized\n");
+ }
+
+ uint32_t flow;
+ for (flow = 0; flow < RTE_DIM(qid->fids); flow++)
+ if (qid->fids[flow].cq != -1) {
+ affinities_per_port[qid->fids[flow].cq]++;
+ inflights += qid->fids[flow].pcount;
+ }
+
+ uint32_t cq;
+ fprintf(f, "\tInflights: %u\tFlows pinned per port: ",
+ inflights);
+ for (cq = 0; cq < sw->port_count; cq++)
+ fprintf(f, "%d ", affinities_per_port[cq]);
+ fprintf(f, "\n");
+
+ uint32_t iq;
+ uint32_t iq_printed = 0;
+ for (iq = 0; iq < SW_IQS_MAX; iq++) {
+ if (!qid->iq[iq]) {
+ fprintf(f, "\tiq %d is not initialized.\n", iq);
+ iq_printed = 1;
+ continue;
+ }
+ uint32_t used = iq_ring_count(qid->iq[iq]);
+ uint32_t free = iq_ring_free_count(qid->iq[iq]);
+ const char *col = (free == 0) ? COL_RED : COL_RESET;
+ if (used > 0) {
+ fprintf(f, "\t%siq %d: Used %d\tFree %d"
+ COL_RESET"\n", col, iq, used, free);
+ iq_printed = 1;
+ }
+ }
+ if (iq_printed == 0)
+ fprintf(f, "\t-- iqs empty --\n");
+ }
+}
+
+static int
+sw_start(struct rte_eventdev *dev)
+{
+ unsigned int i, j;
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ /* check all ports are set up */
+ for (i = 0; i < sw->port_count; i++)
+ if (sw->ports[i].rx_worker_ring == NULL) {
+ SW_LOG_ERR("Port %d not configured\n", i);
+ return -ESTALE;
+ }
+
+ /* check all queues are configured and mapped to ports*/
+ for (i = 0; i < sw->qid_count; i++)
+ if (sw->qids[i].iq[0] == NULL ||
+ sw->qids[i].cq_num_mapped_cqs == 0) {
+ SW_LOG_ERR("Queue %d not configured\n", i);
+ return -ENOLINK;
+ }
+
+ /* build up our prioritized array of qids */
+ /* We don't use qsort here, as if all/multiple entries have the same
+ * priority, the result is non-deterministic. From "man 3 qsort":
+ * "If two members compare as equal, their order in the sorted
+ * array is undefined."
+ */
+ uint32_t qidx = 0;
+ for (j = 0; j <= RTE_EVENT_DEV_PRIORITY_LOWEST; j++) {
+ for (i = 0; i < sw->qid_count; i++) {
+ if (sw->qids[i].priority == j) {
+ sw->qids_prioritized[qidx] = &sw->qids[i];
+ qidx++;
+ }
+ }
+ }
+
+ if (sw_xstats_init(sw) < 0)
+ return -EINVAL;
+
+ rte_smp_wmb();
+ sw->started = 1;
+
+ return 0;
+}
+
+static void
+sw_stop(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ sw_xstats_uninit(sw);
+ sw->started = 0;
+ rte_smp_wmb();
+}
+
+static int
+sw_close(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ uint32_t i;
+
+ for (i = 0; i < sw->qid_count; i++)
+ sw_queue_release(dev, i);
+ sw->qid_count = 0;
+
+ for (i = 0; i < sw->port_count; i++)
+ sw_port_release(&sw->ports[i]);
+ sw->port_count = 0;
+
+ memset(&sw->stats, 0, sizeof(sw->stats));
+ sw->sched_called = 0;
+ sw->sched_no_iq_enqueues = 0;
+ sw->sched_no_cq_enqueues = 0;
+ sw->sched_cq_qid_called = 0;
+
+ return 0;
+}
+
+static int
+assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *socket_id = opaque;
+ *socket_id = atoi(value);
+ if (*socket_id >= RTE_MAX_NUMA_NODES)
+ return -1;
+ return 0;
+}
+
+static int
+set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *quanta = opaque;
+ *quanta = atoi(value);
+ if (*quanta < 0 || *quanta >= 4096)
+ return -1;
+ return 0;
+}
+
+static int
+set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
+{
+ int *credit = opaque;
+ *credit = atoi(value);
+ if (*credit < 0 || *credit >= 128)
+ return -1;
+ return 0;
+}
+
+static int
+sw_probe(struct rte_vdev_device *vdev)
+{
+ static const struct rte_eventdev_ops evdev_sw_ops = {
+ .dev_configure = sw_dev_configure,
+ .dev_infos_get = sw_info_get,
+ .dev_close = sw_close,
+ .dev_start = sw_start,
+ .dev_stop = sw_stop,
+ .dump = sw_dump,
+
+ .queue_def_conf = sw_queue_def_conf,
+ .queue_setup = sw_queue_setup,
+ .queue_release = sw_queue_release,
+ .port_def_conf = sw_port_def_conf,
+ .port_setup = sw_port_setup,
+ .port_release = sw_port_release,
+ .port_link = sw_port_link,
+ .port_unlink = sw_port_unlink,
+
+ .xstats_get = sw_xstats_get,
+ .xstats_get_names = sw_xstats_get_names,
+ .xstats_get_by_name = sw_xstats_get_by_name,
+ .xstats_reset = sw_xstats_reset,
+ };
+
+ static const char *const args[] = {
+ NUMA_NODE_ARG,
+ SCHED_QUANTA_ARG,
+ CREDIT_QUANTA_ARG,
+ NULL
+ };
+ const char *name;
+ const char *params;
+ struct rte_eventdev *dev;
+ struct sw_evdev *sw;
+ int socket_id = rte_socket_id();
+ int sched_quanta = SW_DEFAULT_SCHED_QUANTA;
+ int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
+
+ name = rte_vdev_device_name(vdev);
+ params = rte_vdev_device_args(vdev);
+ if (params != NULL && params[0] != '\0') {
+ struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
+
+ if (!kvlist) {
+ SW_LOG_INFO(
+ "Ignoring unsupported parameters when creating device '%s'\n",
+ name);
+ } else {
+ int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
+ assign_numa_node, &socket_id);
+ if (ret != 0) {
+ SW_LOG_ERR(
+ "%s: Error parsing numa node parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
+ set_sched_quanta, &sched_quanta);
+ if (ret != 0) {
+ SW_LOG_ERR(
+ "%s: Error parsing sched quanta parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
+ set_credit_quanta, &credit_quanta);
+ if (ret != 0) {
+ SW_LOG_ERR(
+ "%s: Error parsing credit quanta parameter",
+ name);
+ rte_kvargs_free(kvlist);
+ return ret;
+ }
+
+ rte_kvargs_free(kvlist);
+ }
+ }
+
+ SW_LOG_INFO(
+ "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
+ name, socket_id, sched_quanta, credit_quanta);
+
+ dev = rte_event_pmd_vdev_init(name,
+ sizeof(struct sw_evdev), socket_id);
+ if (dev == NULL) {
+ SW_LOG_ERR("eventdev vdev init() failed");
+ return -EFAULT;
+ }
+ dev->dev_ops = &evdev_sw_ops;
+ dev->enqueue = sw_event_enqueue;
+ dev->enqueue_burst = sw_event_enqueue_burst;
+ dev->dequeue = sw_event_dequeue;
+ dev->dequeue_burst = sw_event_dequeue_burst;
+ dev->schedule = sw_event_schedule;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ sw = dev->data->dev_private;
+ sw->data = dev->data;
+
+ /* copy values passed from vdev command line to instance */
+ sw->credit_update_quanta = credit_quanta;
+ sw->sched_quanta = sched_quanta;
+
+ return 0;
+}
+
+static int
+sw_remove(struct rte_vdev_device *vdev)
+{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ SW_LOG_INFO("Closing eventdev sw device %s\n", name);
+
+ return rte_event_pmd_vdev_uninit(name);
+}
+
+static struct rte_vdev_driver evdev_sw_pmd_drv = {
+ .probe = sw_probe,
+ .remove = sw_remove
+};
+
+RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
+ SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");
diff --git a/src/seastar/dpdk/drivers/event/sw/sw_evdev.h b/src/seastar/dpdk/drivers/event/sw/sw_evdev.h
new file mode 100644
index 00000000..61c671d6
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/sw/sw_evdev.h
@@ -0,0 +1,318 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SW_EVDEV_H_
+#define _SW_EVDEV_H_
+
+#include <rte_eventdev.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_atomic.h>
+
+#define SW_DEFAULT_CREDIT_QUANTA 32
+#define SW_DEFAULT_SCHED_QUANTA 128
+#define SW_QID_NUM_FIDS 16384
+#define SW_IQS_MAX 4
+#define SW_Q_PRIORITY_MAX 255
+#define SW_PORTS_MAX 64
+#define MAX_SW_CONS_Q_DEPTH 128
+#define SW_INFLIGHT_EVENTS_TOTAL 4096
+/* allow for lots of over-provisioning */
+#define MAX_SW_PROD_Q_DEPTH 4096
+#define SW_FRAGMENTS_MAX 16
+
+/* report dequeue burst sizes in buckets */
+#define SW_DEQ_STAT_BUCKET_SHIFT 2
+/* how many packets pulled from port by sched */
+#define SCHED_DEQUEUE_BURST_SIZE 32
+
+#define SW_PORT_HIST_LIST (MAX_SW_PROD_Q_DEPTH) /* size of our history list */
+#define NUM_SAMPLES 64 /* how many data points use for average stats */
+
+#define EVENTDEV_NAME_SW_PMD event_sw
+#define SW_PMD_NAME RTE_STR(event_sw)
+
+#define SW_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)
+
+#define SW_NUM_POLL_BUCKETS (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT)
+
+enum {
+ QE_FLAG_VALID_SHIFT = 0,
+ QE_FLAG_COMPLETE_SHIFT,
+ QE_FLAG_NOT_EOP_SHIFT,
+ _QE_FLAG_COUNT
+};
+
+#define QE_FLAG_VALID (1 << QE_FLAG_VALID_SHIFT) /* for NEW FWD, FRAG */
+#define QE_FLAG_COMPLETE (1 << QE_FLAG_COMPLETE_SHIFT) /* set for FWD, DROP */
+#define QE_FLAG_NOT_EOP (1 << QE_FLAG_NOT_EOP_SHIFT) /* set for FRAG only */
+
+static const uint8_t sw_qe_flag_map[] = {
+ QE_FLAG_VALID /* NEW Event */,
+ QE_FLAG_VALID | QE_FLAG_COMPLETE /* FWD Event */,
+ QE_FLAG_COMPLETE /* RELEASE Event */,
+
+ /* Values which can be used for future support for partial
+ * events, i.e. where one event comes back to the scheduler
+ * as multiple which need to be tracked together
+ */
+ QE_FLAG_VALID | QE_FLAG_COMPLETE | QE_FLAG_NOT_EOP,
+};
+
+#ifdef RTE_LIBRTE_PMD_EVDEV_SW_DEBUG
+#define SW_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
+ SW_PMD_NAME, \
+ __func__, __LINE__, ## args)
+
+#define SW_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
+ SW_PMD_NAME, \
+ __func__, __LINE__, ## args)
+#else
+#define SW_LOG_INFO(fmt, args...)
+#define SW_LOG_DBG(fmt, args...)
+#endif
+
+#define SW_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, EVENTDEV, "[%s] %s() line %u: " fmt "\n", \
+ SW_PMD_NAME, \
+ __func__, __LINE__, ## args)
+
+/* Records basic event stats at a given point. Used in port and qid structs */
+struct sw_point_stats {
+ uint64_t rx_pkts;
+ uint64_t rx_dropped;
+ uint64_t tx_pkts;
+};
+
+/* structure used to track what port a flow (FID) is pinned to */
+struct sw_fid_t {
+ /* which CQ this FID is currently pinned to */
+ int32_t cq;
+ /* number of packets gone to the CQ with this FID */
+ uint32_t pcount;
+};
+
+struct reorder_buffer_entry {
+ uint16_t num_fragments; /**< Number of packet fragments */
+ uint16_t fragment_index; /**< Points to the oldest valid frag */
+ uint8_t ready; /**< Entry is ready to be reordered */
+ struct rte_event fragments[SW_FRAGMENTS_MAX];
+};
+
+struct sw_qid {
+ /* set when the QID has been initialized */
+ uint8_t initialized;
+ /* The type of this QID */
+ int8_t type;
+ /* Integer ID representing the queue. This is used in history lists,
+ * to identify the stage of processing.
+ */
+ uint32_t id;
+ struct sw_point_stats stats;
+
+ /* Internal priority rings for packets */
+ struct iq_ring *iq[SW_IQS_MAX];
+ uint32_t iq_pkt_mask; /* A mask to indicate packets in an IQ */
+ uint64_t iq_pkt_count[SW_IQS_MAX];
+
+ /* Information on what CQs are polling this IQ */
+ uint32_t cq_num_mapped_cqs;
+ uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
+ uint32_t cq_map[SW_PORTS_MAX];
+
+ /* Track flow ids for atomic load balancing */
+ struct sw_fid_t fids[SW_QID_NUM_FIDS];
+
+ /* Track packet order for reordering when needed */
+ struct reorder_buffer_entry *reorder_buffer; /*< pkts await reorder */
+ struct rte_ring *reorder_buffer_freelist; /* available reorder slots */
+ uint32_t reorder_buffer_index; /* oldest valid reorder buffer entry */
+ uint32_t window_size; /* Used to wrap reorder_buffer_index */
+
+ uint8_t priority;
+};
+
+struct sw_hist_list_entry {
+ int32_t qid;
+ int32_t fid;
+ struct reorder_buffer_entry *rob_entry;
+};
+
+struct sw_evdev;
+
+struct sw_port {
+ /* new enqueue / dequeue API doesn't have an instance pointer, only the
+ * pointer to the port being enqueue/dequeued from
+ */
+ struct sw_evdev *sw;
+
+ /* set when the port is initialized */
+ uint8_t initialized;
+ /* A numeric ID for the port */
+ uint8_t id;
+
+ int16_t is_directed; /** Takes from a single directed QID */
+ /**
+ * For loadbalanced we can optimise pulling packets from
+ * producers if there is no reordering involved
+ */
+ int16_t num_ordered_qids;
+
+ /** Ring and buffer for pulling events from workers for scheduling */
+ struct qe_ring *rx_worker_ring __rte_cache_aligned;
+ /** Ring and buffer for pushing packets to workers after scheduling */
+ struct qe_ring *cq_worker_ring;
+
+ /* hole */
+
+ /* num releases yet to be completed on this port */
+ uint16_t outstanding_releases __rte_cache_aligned;
+ uint16_t inflight_max; /* app requested max inflights for this port */
+ uint16_t inflight_credits; /* num credits this port has right now */
+
+ uint16_t last_dequeue_burst_sz; /* how big the burst was */
+ uint64_t last_dequeue_ticks; /* used to track burst processing time */
+ uint64_t avg_pkt_ticks; /* tracks average over NUM_SAMPLES burst */
+ uint64_t total_polls; /* how many polls were counted in stats */
+ uint64_t zero_polls; /* tracks polls returning nothing */
+ uint32_t poll_buckets[SW_NUM_POLL_BUCKETS];
+ /* bucket values in 4s for shorter reporting */
+
+ /* History list structs, containing info on pkts egressed to worker */
+ uint16_t hist_head __rte_cache_aligned;
+ uint16_t hist_tail;
+ uint16_t inflights;
+ struct sw_hist_list_entry hist_list[SW_PORT_HIST_LIST];
+
+ /* track packets in and out of this port */
+ struct sw_point_stats stats;
+
+
+ uint32_t pp_buf_start;
+ uint32_t pp_buf_count;
+ uint16_t cq_buf_count;
+ struct rte_event pp_buf[SCHED_DEQUEUE_BURST_SIZE];
+ struct rte_event cq_buf[MAX_SW_CONS_Q_DEPTH];
+
+ uint8_t num_qids_mapped;
+};
+
+struct sw_evdev {
+ struct rte_eventdev_data *data;
+
+ uint32_t port_count;
+ uint32_t qid_count;
+ uint32_t xstats_count;
+ struct sw_xstats_entry *xstats;
+ uint32_t xstats_count_mode_dev;
+ uint32_t xstats_count_mode_port;
+ uint32_t xstats_count_mode_queue;
+
+ /* Contains all ports - load balanced and directed */
+ struct sw_port ports[SW_PORTS_MAX] __rte_cache_aligned;
+
+ rte_atomic32_t inflights __rte_cache_aligned;
+
+ /*
+ * max events in this instance. Cached here for performance.
+ * (also available in data->conf.nb_events_limit)
+ */
+ uint32_t nb_events_limit;
+
+ /* Internal queues - one per logical queue */
+ struct sw_qid qids[RTE_EVENT_MAX_QUEUES_PER_DEV] __rte_cache_aligned;
+
+ /* Cache how many packets are in each cq */
+ uint16_t cq_ring_space[SW_PORTS_MAX] __rte_cache_aligned;
+
+ /* Array of pointers to load-balanced QIDs sorted by priority level */
+ struct sw_qid *qids_prioritized[RTE_EVENT_MAX_QUEUES_PER_DEV];
+
+ /* Stats */
+ struct sw_point_stats stats __rte_cache_aligned;
+ uint64_t sched_called;
+ int32_t sched_quanta;
+ uint64_t sched_no_iq_enqueues;
+ uint64_t sched_no_cq_enqueues;
+ uint64_t sched_cq_qid_called;
+
+ uint8_t started;
+ uint32_t credit_update_quanta;
+
+ /* store num stats and offset of the stats for each port */
+ uint16_t xstats_count_per_port[SW_PORTS_MAX];
+ uint16_t xstats_offset_for_port[SW_PORTS_MAX];
+ /* store num stats and offset of the stats for each queue */
+ uint16_t xstats_count_per_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint16_t xstats_offset_for_qid[RTE_EVENT_MAX_QUEUES_PER_DEV];
+};
+
+static inline struct sw_evdev *
+sw_pmd_priv(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+static inline const struct sw_evdev *
+sw_pmd_priv_const(const struct rte_eventdev *eventdev)
+{
+ return eventdev->data->dev_private;
+}
+
+uint16_t sw_event_enqueue(void *port, const struct rte_event *ev);
+uint16_t sw_event_enqueue_burst(void *port, const struct rte_event ev[],
+ uint16_t num);
+
+uint16_t sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait);
+uint16_t sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
+ uint64_t wait);
+void sw_event_schedule(struct rte_eventdev *dev);
+int sw_xstats_init(struct sw_evdev *dev);
+int sw_xstats_uninit(struct sw_evdev *dev);
+int sw_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size);
+int sw_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n);
+uint64_t sw_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id);
+int sw_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids);
+
+
+#endif /* _SW_EVDEV_H_ */
diff --git a/src/seastar/dpdk/drivers/event/sw/sw_evdev_scheduler.c b/src/seastar/dpdk/drivers/event/sw/sw_evdev_scheduler.c
new file mode 100644
index 00000000..a333a6f0
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/sw/sw_evdev_scheduler.c
@@ -0,0 +1,601 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ring.h>
+#include <rte_hash_crc.h>
+#include "sw_evdev.h"
+#include "iq_ring.h"
+#include "event_ring.h"
+
+#define SW_IQS_MASK (SW_IQS_MAX-1)
+
+/* Retrieve the highest priority IQ or -1 if no pkts available. Doing the
+ * CLZ twice is faster than caching the value due to data dependencies
+ */
+#define PKT_MASK_TO_IQ(pkts) \
+ (__builtin_ctz(pkts | (1 << SW_IQS_MAX)))
+
+#if SW_IQS_MAX != 4
+#error Misconfigured PRIO_TO_IQ caused by SW_IQS_MAX value change
+#endif
+#define PRIO_TO_IQ(prio) (prio >> 6)
+
+#define MAX_PER_IQ_DEQUEUE 48
+#define FLOWID_MASK (SW_QID_NUM_FIDS-1)
+/* use cheap bit mixing, we only need to lose a few bits */
+#define SW_HASH_FLOWID(f) (((f) ^ (f >> 10)) & FLOWID_MASK)
+
+static inline uint32_t
+sw_schedule_atomic_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
+ uint32_t iq_num, unsigned int count)
+{
+ struct rte_event qes[MAX_PER_IQ_DEQUEUE]; /* count <= MAX */
+ struct rte_event blocked_qes[MAX_PER_IQ_DEQUEUE];
+ uint32_t nb_blocked = 0;
+ uint32_t i;
+
+ if (count > MAX_PER_IQ_DEQUEUE)
+ count = MAX_PER_IQ_DEQUEUE;
+
+ /* This is the QID ID. The QID ID is static, hence it can be
+ * used to identify the stage of processing in history lists etc
+ */
+ uint32_t qid_id = qid->id;
+
+ iq_ring_dequeue_burst(qid->iq[iq_num], qes, count);
+ for (i = 0; i < count; i++) {
+ const struct rte_event *qe = &qes[i];
+ const uint16_t flow_id = SW_HASH_FLOWID(qes[i].flow_id);
+ struct sw_fid_t *fid = &qid->fids[flow_id];
+ int cq = fid->cq;
+
+ if (cq < 0) {
+ uint32_t cq_idx = qid->cq_next_tx++;
+ if (qid->cq_next_tx == qid->cq_num_mapped_cqs)
+ qid->cq_next_tx = 0;
+ cq = qid->cq_map[cq_idx];
+
+ /* find least used */
+ int cq_free_cnt = sw->cq_ring_space[cq];
+ for (cq_idx = 0; cq_idx < qid->cq_num_mapped_cqs;
+ cq_idx++) {
+ int test_cq = qid->cq_map[cq_idx];
+ int test_cq_free = sw->cq_ring_space[test_cq];
+ if (test_cq_free > cq_free_cnt) {
+ cq = test_cq;
+ cq_free_cnt = test_cq_free;
+ }
+ }
+
+ fid->cq = cq; /* this pins early */
+ }
+
+ if (sw->cq_ring_space[cq] == 0 ||
+ sw->ports[cq].inflights == SW_PORT_HIST_LIST) {
+ blocked_qes[nb_blocked++] = *qe;
+ continue;
+ }
+
+ struct sw_port *p = &sw->ports[cq];
+
+ /* at this point we can queue up the packet on the cq_buf */
+ fid->pcount++;
+ p->cq_buf[p->cq_buf_count++] = *qe;
+ p->inflights++;
+ sw->cq_ring_space[cq]--;
+
+ int head = (p->hist_head++ & (SW_PORT_HIST_LIST-1));
+ p->hist_list[head].fid = flow_id;
+ p->hist_list[head].qid = qid_id;
+
+ p->stats.tx_pkts++;
+ qid->stats.tx_pkts++;
+
+ /* if we just filled in the last slot, flush the buffer */
+ if (sw->cq_ring_space[cq] == 0) {
+ struct qe_ring *worker = p->cq_worker_ring;
+ qe_ring_enqueue_burst(worker, p->cq_buf,
+ p->cq_buf_count,
+ &sw->cq_ring_space[cq]);
+ p->cq_buf_count = 0;
+ }
+ }
+ iq_ring_put_back(qid->iq[iq_num], blocked_qes, nb_blocked);
+
+ return count - nb_blocked;
+}
+
+static inline uint32_t
+sw_schedule_parallel_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
+ uint32_t iq_num, unsigned int count, int keep_order)
+{
+ uint32_t i;
+ uint32_t cq_idx = qid->cq_next_tx;
+
+ /* This is the QID ID. The QID ID is static, hence it can be
+ * used to identify the stage of processing in history lists etc
+ */
+ uint32_t qid_id = qid->id;
+
+ if (count > MAX_PER_IQ_DEQUEUE)
+ count = MAX_PER_IQ_DEQUEUE;
+
+ if (keep_order)
+ /* only schedule as many as we have reorder buffer entries */
+ count = RTE_MIN(count,
+ rte_ring_count(qid->reorder_buffer_freelist));
+
+ for (i = 0; i < count; i++) {
+ const struct rte_event *qe = iq_ring_peek(qid->iq[iq_num]);
+ uint32_t cq_check_count = 0;
+ uint32_t cq;
+
+ /*
+ * for parallel, just send to next available CQ in round-robin
+ * fashion. So scan for an available CQ. If all CQs are full
+ * just return and move on to next QID
+ */
+ do {
+ if (++cq_check_count > qid->cq_num_mapped_cqs)
+ goto exit;
+ cq = qid->cq_map[cq_idx];
+ if (++cq_idx == qid->cq_num_mapped_cqs)
+ cq_idx = 0;
+ } while (qe_ring_free_count(sw->ports[cq].cq_worker_ring) == 0 ||
+ sw->ports[cq].inflights == SW_PORT_HIST_LIST);
+
+ struct sw_port *p = &sw->ports[cq];
+ if (sw->cq_ring_space[cq] == 0 ||
+ p->inflights == SW_PORT_HIST_LIST)
+ break;
+
+ sw->cq_ring_space[cq]--;
+
+ qid->stats.tx_pkts++;
+
+ const int head = (p->hist_head & (SW_PORT_HIST_LIST-1));
+ p->hist_list[head].fid = SW_HASH_FLOWID(qe->flow_id);
+ p->hist_list[head].qid = qid_id;
+
+ if (keep_order)
+ rte_ring_sc_dequeue(qid->reorder_buffer_freelist,
+ (void *)&p->hist_list[head].rob_entry);
+
+ sw->ports[cq].cq_buf[sw->ports[cq].cq_buf_count++] = *qe;
+ iq_ring_pop(qid->iq[iq_num]);
+
+ rte_compiler_barrier();
+ p->inflights++;
+ p->stats.tx_pkts++;
+ p->hist_head++;
+ }
+exit:
+ qid->cq_next_tx = cq_idx;
+ return i;
+}
+
+static uint32_t
+sw_schedule_dir_to_cq(struct sw_evdev *sw, struct sw_qid * const qid,
+ uint32_t iq_num, unsigned int count __rte_unused)
+{
+ uint32_t cq_id = qid->cq_map[0];
+ struct sw_port *port = &sw->ports[cq_id];
+
+ /* get max burst enq size for cq_ring */
+ uint32_t count_free = sw->cq_ring_space[cq_id];
+ if (count_free == 0)
+ return 0;
+
+ /* burst dequeue from the QID IQ ring */
+ struct iq_ring *ring = qid->iq[iq_num];
+ uint32_t ret = iq_ring_dequeue_burst(ring,
+ &port->cq_buf[port->cq_buf_count], count_free);
+ port->cq_buf_count += ret;
+
+ /* Update QID, Port and Total TX stats */
+ qid->stats.tx_pkts += ret;
+ port->stats.tx_pkts += ret;
+
+ /* Subtract credits from cached value */
+ sw->cq_ring_space[cq_id] -= ret;
+
+ return ret;
+}
+
+static uint32_t
+sw_schedule_qid_to_cq(struct sw_evdev *sw)
+{
+ uint32_t pkts = 0;
+ uint32_t qid_idx;
+
+ sw->sched_cq_qid_called++;
+
+ for (qid_idx = 0; qid_idx < sw->qid_count; qid_idx++) {
+ struct sw_qid *qid = sw->qids_prioritized[qid_idx];
+
+ int type = qid->type;
+ int iq_num = PKT_MASK_TO_IQ(qid->iq_pkt_mask);
+
+ /* zero mapped CQs indicates directed */
+ if (iq_num >= SW_IQS_MAX)
+ continue;
+
+ uint32_t pkts_done = 0;
+ uint32_t count = iq_ring_count(qid->iq[iq_num]);
+
+ if (count > 0) {
+ if (type == SW_SCHED_TYPE_DIRECT)
+ pkts_done += sw_schedule_dir_to_cq(sw, qid,
+ iq_num, count);
+ else if (type == RTE_SCHED_TYPE_ATOMIC)
+ pkts_done += sw_schedule_atomic_to_cq(sw, qid,
+ iq_num, count);
+ else
+ pkts_done += sw_schedule_parallel_to_cq(sw, qid,
+ iq_num, count,
+ type == RTE_SCHED_TYPE_ORDERED);
+ }
+
+ /* Check if the IQ that was polled is now empty, and unset it
+ * in the IQ mask if its empty.
+ */
+ int all_done = (pkts_done == count);
+
+ qid->iq_pkt_mask &= ~(all_done << (iq_num));
+ pkts += pkts_done;
+ }
+
+ return pkts;
+}
+
+/* This function will perform re-ordering of packets, and injecting into
+ * the appropriate QID IQ. As LB and DIR QIDs are in the same array, but *NOT*
+ * contiguous in that array, this function accepts a "range" of QIDs to scan.
+ */
+static uint16_t
+sw_schedule_reorder(struct sw_evdev *sw, int qid_start, int qid_end)
+{
+ /* Perform egress reordering */
+ struct rte_event *qe;
+ uint32_t pkts_iter = 0;
+
+ for (; qid_start < qid_end; qid_start++) {
+ struct sw_qid *qid = &sw->qids[qid_start];
+ int i, num_entries_in_use;
+
+ if (qid->type != RTE_SCHED_TYPE_ORDERED)
+ continue;
+
+ num_entries_in_use = rte_ring_free_count(
+ qid->reorder_buffer_freelist);
+
+ for (i = 0; i < num_entries_in_use; i++) {
+ struct reorder_buffer_entry *entry;
+ int j;
+
+ entry = &qid->reorder_buffer[qid->reorder_buffer_index];
+
+ if (!entry->ready)
+ break;
+
+ for (j = 0; j < entry->num_fragments; j++) {
+ uint16_t dest_qid;
+ uint16_t dest_iq;
+
+ int idx = entry->fragment_index + j;
+ qe = &entry->fragments[idx];
+
+ dest_qid = qe->queue_id;
+ dest_iq = PRIO_TO_IQ(qe->priority);
+
+ if (dest_qid >= sw->qid_count) {
+ sw->stats.rx_dropped++;
+ continue;
+ }
+
+ struct sw_qid *dest_qid_ptr =
+ &sw->qids[dest_qid];
+ const struct iq_ring *dest_iq_ptr =
+ dest_qid_ptr->iq[dest_iq];
+ if (iq_ring_free_count(dest_iq_ptr) == 0)
+ break;
+
+ pkts_iter++;
+
+ struct sw_qid *q = &sw->qids[dest_qid];
+ struct iq_ring *r = q->iq[dest_iq];
+
+ /* we checked for space above, so enqueue must
+ * succeed
+ */
+ iq_ring_enqueue(r, qe);
+ q->iq_pkt_mask |= (1 << (dest_iq));
+ q->iq_pkt_count[dest_iq]++;
+ q->stats.rx_pkts++;
+ }
+
+ entry->ready = (j != entry->num_fragments);
+ entry->num_fragments -= j;
+ entry->fragment_index += j;
+
+ if (!entry->ready) {
+ entry->fragment_index = 0;
+
+ rte_ring_sp_enqueue(
+ qid->reorder_buffer_freelist,
+ entry);
+
+ qid->reorder_buffer_index++;
+ qid->reorder_buffer_index %= qid->window_size;
+ }
+ }
+ }
+ return pkts_iter;
+}
+
+static inline void __attribute__((always_inline))
+sw_refill_pp_buf(struct sw_evdev *sw, struct sw_port *port)
+{
+ RTE_SET_USED(sw);
+ struct qe_ring *worker = port->rx_worker_ring;
+ port->pp_buf_start = 0;
+ port->pp_buf_count = qe_ring_dequeue_burst(worker, port->pp_buf,
+ RTE_DIM(port->pp_buf));
+}
+
+static inline uint32_t __attribute__((always_inline))
+__pull_port_lb(struct sw_evdev *sw, uint32_t port_id, int allow_reorder)
+{
+ static struct reorder_buffer_entry dummy_rob;
+ uint32_t pkts_iter = 0;
+ struct sw_port *port = &sw->ports[port_id];
+
+ /* If shadow ring has 0 pkts, pull from worker ring */
+ if (port->pp_buf_count == 0)
+ sw_refill_pp_buf(sw, port);
+
+ while (port->pp_buf_count) {
+ const struct rte_event *qe = &port->pp_buf[port->pp_buf_start];
+ struct sw_hist_list_entry *hist_entry = NULL;
+ uint8_t flags = qe->op;
+ const uint16_t eop = !(flags & QE_FLAG_NOT_EOP);
+ int needs_reorder = 0;
+ /* if no-reordering, having PARTIAL == NEW */
+ if (!allow_reorder && !eop)
+ flags = QE_FLAG_VALID;
+
+ /*
+ * if we don't have space for this packet in an IQ,
+ * then move on to next queue. Technically, for a
+ * packet that needs reordering, we don't need to check
+ * here, but it simplifies things not to special-case
+ */
+ uint32_t iq_num = PRIO_TO_IQ(qe->priority);
+ struct sw_qid *qid = &sw->qids[qe->queue_id];
+
+ if ((flags & QE_FLAG_VALID) &&
+ iq_ring_free_count(qid->iq[iq_num]) == 0)
+ break;
+
+ /* now process based on flags. Note that for directed
+ * queues, the enqueue_flush masks off all but the
+ * valid flag. This makes FWD and PARTIAL enqueues just
+ * NEW type, and makes DROPS no-op calls.
+ */
+ if ((flags & QE_FLAG_COMPLETE) && port->inflights > 0) {
+ const uint32_t hist_tail = port->hist_tail &
+ (SW_PORT_HIST_LIST - 1);
+
+ hist_entry = &port->hist_list[hist_tail];
+ const uint32_t hist_qid = hist_entry->qid;
+ const uint32_t hist_fid = hist_entry->fid;
+
+ struct sw_fid_t *fid =
+ &sw->qids[hist_qid].fids[hist_fid];
+ fid->pcount -= eop;
+ if (fid->pcount == 0)
+ fid->cq = -1;
+
+ if (allow_reorder) {
+ /* set reorder ready if an ordered QID */
+ uintptr_t rob_ptr =
+ (uintptr_t)hist_entry->rob_entry;
+ const uintptr_t valid = (rob_ptr != 0);
+ needs_reorder = valid;
+ rob_ptr |=
+ ((valid - 1) & (uintptr_t)&dummy_rob);
+ struct reorder_buffer_entry *tmp_rob_ptr =
+ (struct reorder_buffer_entry *)rob_ptr;
+ tmp_rob_ptr->ready = eop * needs_reorder;
+ }
+
+ port->inflights -= eop;
+ port->hist_tail += eop;
+ }
+ if (flags & QE_FLAG_VALID) {
+ port->stats.rx_pkts++;
+
+ if (allow_reorder && needs_reorder) {
+ struct reorder_buffer_entry *rob_entry =
+ hist_entry->rob_entry;
+
+ hist_entry->rob_entry = NULL;
+ /* Although fragmentation not currently
+ * supported by eventdev API, we support it
+ * here. Open: How do we alert the user that
+ * they've exceeded max frags?
+ */
+ int num_frag = rob_entry->num_fragments;
+ if (num_frag == SW_FRAGMENTS_MAX)
+ sw->stats.rx_dropped++;
+ else {
+ int idx = rob_entry->num_fragments++;
+ rob_entry->fragments[idx] = *qe;
+ }
+ goto end_qe;
+ }
+
+ /* Use the iq_num from above to push the QE
+ * into the qid at the right priority
+ */
+
+ qid->iq_pkt_mask |= (1 << (iq_num));
+ iq_ring_enqueue(qid->iq[iq_num], qe);
+ qid->iq_pkt_count[iq_num]++;
+ qid->stats.rx_pkts++;
+ pkts_iter++;
+ }
+
+end_qe:
+ port->pp_buf_start++;
+ port->pp_buf_count--;
+ } /* while (avail_qes) */
+
+ return pkts_iter;
+}
+
+static uint32_t
+sw_schedule_pull_port_lb(struct sw_evdev *sw, uint32_t port_id)
+{
+ return __pull_port_lb(sw, port_id, 1);
+}
+
+static uint32_t
+sw_schedule_pull_port_no_reorder(struct sw_evdev *sw, uint32_t port_id)
+{
+ return __pull_port_lb(sw, port_id, 0);
+}
+
+static uint32_t
+sw_schedule_pull_port_dir(struct sw_evdev *sw, uint32_t port_id)
+{
+ uint32_t pkts_iter = 0;
+ struct sw_port *port = &sw->ports[port_id];
+
+ /* If shadow ring has 0 pkts, pull from worker ring */
+ if (port->pp_buf_count == 0)
+ sw_refill_pp_buf(sw, port);
+
+ while (port->pp_buf_count) {
+ const struct rte_event *qe = &port->pp_buf[port->pp_buf_start];
+ uint8_t flags = qe->op;
+
+ if ((flags & QE_FLAG_VALID) == 0)
+ goto end_qe;
+
+ uint32_t iq_num = PRIO_TO_IQ(qe->priority);
+ struct sw_qid *qid = &sw->qids[qe->queue_id];
+ struct iq_ring *iq_ring = qid->iq[iq_num];
+
+ if (iq_ring_free_count(iq_ring) == 0)
+ break; /* move to next port */
+
+ port->stats.rx_pkts++;
+
+ /* Use the iq_num from above to push the QE
+ * into the qid at the right priority
+ */
+ qid->iq_pkt_mask |= (1 << (iq_num));
+ iq_ring_enqueue(iq_ring, qe);
+ qid->iq_pkt_count[iq_num]++;
+ qid->stats.rx_pkts++;
+ pkts_iter++;
+
+end_qe:
+ port->pp_buf_start++;
+ port->pp_buf_count--;
+ } /* while port->pp_buf_count */
+
+ return pkts_iter;
+}
+
+void
+sw_event_schedule(struct rte_eventdev *dev)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ uint32_t in_pkts, out_pkts;
+ uint32_t out_pkts_total = 0, in_pkts_total = 0;
+ int32_t sched_quanta = sw->sched_quanta;
+ uint32_t i;
+
+ sw->sched_called++;
+ if (!sw->started)
+ return;
+
+ do {
+ uint32_t in_pkts_this_iteration = 0;
+
+ /* Pull from rx_ring for ports */
+ do {
+ in_pkts = 0;
+ for (i = 0; i < sw->port_count; i++)
+ if (sw->ports[i].is_directed)
+ in_pkts += sw_schedule_pull_port_dir(sw, i);
+ else if (sw->ports[i].num_ordered_qids > 0)
+ in_pkts += sw_schedule_pull_port_lb(sw, i);
+ else
+ in_pkts += sw_schedule_pull_port_no_reorder(sw, i);
+
+ /* QID scan for re-ordered */
+ in_pkts += sw_schedule_reorder(sw, 0,
+ sw->qid_count);
+ in_pkts_this_iteration += in_pkts;
+ } while (in_pkts > 4 &&
+ (int)in_pkts_this_iteration < sched_quanta);
+
+ out_pkts = 0;
+ out_pkts += sw_schedule_qid_to_cq(sw);
+ out_pkts_total += out_pkts;
+ in_pkts_total += in_pkts_this_iteration;
+
+ if (in_pkts == 0 && out_pkts == 0)
+ break;
+ } while ((int)out_pkts_total < sched_quanta);
+
+ /* push all the internal buffered QEs in port->cq_ring to the
+ * worker cores: aka, do the ring transfers batched.
+ */
+ for (i = 0; i < sw->port_count; i++) {
+ struct qe_ring *worker = sw->ports[i].cq_worker_ring;
+ qe_ring_enqueue_burst(worker, sw->ports[i].cq_buf,
+ sw->ports[i].cq_buf_count,
+ &sw->cq_ring_space[i]);
+ sw->ports[i].cq_buf_count = 0;
+ }
+
+ sw->stats.tx_pkts += out_pkts_total;
+ sw->stats.rx_pkts += in_pkts_total;
+
+ sw->sched_no_iq_enqueues += (in_pkts_total == 0);
+ sw->sched_no_cq_enqueues += (out_pkts_total == 0);
+
+}
diff --git a/src/seastar/dpdk/drivers/event/sw/sw_evdev_worker.c b/src/seastar/dpdk/drivers/event/sw/sw_evdev_worker.c
new file mode 100644
index 00000000..9cb6bef5
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/sw/sw_evdev_worker.c
@@ -0,0 +1,185 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+
+#include "sw_evdev.h"
+#include "event_ring.h"
+
+#define PORT_ENQUEUE_MAX_BURST_SIZE 64
+
+static inline void
+sw_event_release(struct sw_port *p, uint8_t index)
+{
+ /*
+ * Drops the next outstanding event in our history. Used on dequeue
+ * to clear any history before dequeuing more events.
+ */
+ RTE_SET_USED(index);
+
+ /* create drop message */
+ struct rte_event ev;
+ ev.op = sw_qe_flag_map[RTE_EVENT_OP_RELEASE];
+
+ uint16_t free_count;
+ qe_ring_enqueue_burst(p->rx_worker_ring, &ev, 1, &free_count);
+
+ /* each release returns one credit */
+ p->outstanding_releases--;
+ p->inflight_credits++;
+}
+
+uint16_t
+sw_event_enqueue_burst(void *port, const struct rte_event ev[], uint16_t num)
+{
+ int32_t i;
+ uint8_t new_ops[PORT_ENQUEUE_MAX_BURST_SIZE];
+ struct sw_port *p = port;
+ struct sw_evdev *sw = (void *)p->sw;
+ uint32_t sw_inflights = rte_atomic32_read(&sw->inflights);
+
+ if (unlikely(p->inflight_max < sw_inflights))
+ return 0;
+
+ if (num > PORT_ENQUEUE_MAX_BURST_SIZE)
+ num = PORT_ENQUEUE_MAX_BURST_SIZE;
+
+ if (p->inflight_credits < num) {
+ /* check if event enqueue brings port over max threshold */
+ uint32_t credit_update_quanta = sw->credit_update_quanta;
+ if (sw_inflights + credit_update_quanta > sw->nb_events_limit)
+ return 0;
+
+ rte_atomic32_add(&sw->inflights, credit_update_quanta);
+ p->inflight_credits += (credit_update_quanta);
+
+ if (p->inflight_credits < num)
+ return 0;
+ }
+
+ for (i = 0; i < num; i++) {
+ int op = ev[i].op;
+ int outstanding = p->outstanding_releases > 0;
+ const uint8_t invalid_qid = (ev[i].queue_id >= sw->qid_count);
+
+ p->inflight_credits -= (op == RTE_EVENT_OP_NEW);
+ p->inflight_credits += (op == RTE_EVENT_OP_RELEASE) *
+ outstanding;
+
+ new_ops[i] = sw_qe_flag_map[op];
+ new_ops[i] &= ~(invalid_qid << QE_FLAG_VALID_SHIFT);
+
+ /* FWD and RELEASE packets will both resolve to taken (assuming
+ * correct usage of the API), providing very high correct
+ * prediction rate.
+ */
+ if ((new_ops[i] & QE_FLAG_COMPLETE) && outstanding)
+ p->outstanding_releases--;
+
+ /* error case: branch to avoid touching p->stats */
+ if (unlikely(invalid_qid)) {
+ p->stats.rx_dropped++;
+ p->inflight_credits++;
+ }
+ }
+
+ /* returns number of events actually enqueued */
+ uint32_t enq = qe_ring_enqueue_burst_with_ops(p->rx_worker_ring, ev, i,
+ new_ops);
+ if (p->outstanding_releases == 0 && p->last_dequeue_burst_sz != 0) {
+ uint64_t burst_ticks = rte_get_timer_cycles() -
+ p->last_dequeue_ticks;
+ uint64_t burst_pkt_ticks =
+ burst_ticks / p->last_dequeue_burst_sz;
+ p->avg_pkt_ticks -= p->avg_pkt_ticks / NUM_SAMPLES;
+ p->avg_pkt_ticks += burst_pkt_ticks / NUM_SAMPLES;
+ p->last_dequeue_ticks = 0;
+ }
+ return enq;
+}
+
+uint16_t
+sw_event_enqueue(void *port, const struct rte_event *ev)
+{
+ return sw_event_enqueue_burst(port, ev, 1);
+}
+
+uint16_t
+sw_event_dequeue_burst(void *port, struct rte_event *ev, uint16_t num,
+ uint64_t wait)
+{
+ RTE_SET_USED(wait);
+ struct sw_port *p = (void *)port;
+ struct sw_evdev *sw = (void *)p->sw;
+ struct qe_ring *ring = p->cq_worker_ring;
+ uint32_t credit_update_quanta = sw->credit_update_quanta;
+
+ /* check that all previous dequeues have been released */
+ if (!p->is_directed) {
+ uint16_t out_rels = p->outstanding_releases;
+ uint16_t i;
+ for (i = 0; i < out_rels; i++)
+ sw_event_release(p, i);
+ }
+
+ /* returns number of events actually dequeued */
+ uint16_t ndeq = qe_ring_dequeue_burst(ring, ev, num);
+ if (unlikely(ndeq == 0)) {
+ p->outstanding_releases = 0;
+ p->zero_polls++;
+ p->total_polls++;
+ goto end;
+ }
+
+ /* only add credits for directed ports - LB ports send RELEASEs */
+ p->inflight_credits += ndeq * p->is_directed;
+ p->outstanding_releases = ndeq;
+ p->last_dequeue_burst_sz = ndeq;
+ p->last_dequeue_ticks = rte_get_timer_cycles();
+ p->poll_buckets[(ndeq - 1) >> SW_DEQ_STAT_BUCKET_SHIFT]++;
+ p->total_polls++;
+
+end:
+ if (p->inflight_credits >= credit_update_quanta * 2 &&
+ p->inflight_credits > credit_update_quanta + ndeq) {
+ rte_atomic32_sub(&sw->inflights, credit_update_quanta);
+ p->inflight_credits -= credit_update_quanta;
+ }
+ return ndeq;
+}
+
+uint16_t
+sw_event_dequeue(void *port, struct rte_event *ev, uint64_t wait)
+{
+ return sw_event_dequeue_burst(port, ev, 1, wait);
+}
diff --git a/src/seastar/dpdk/drivers/event/sw/sw_evdev_xstats.c b/src/seastar/dpdk/drivers/event/sw/sw_evdev_xstats.c
new file mode 100644
index 00000000..c7b1abe8
--- /dev/null
+++ b/src/seastar/dpdk/drivers/event/sw/sw_evdev_xstats.c
@@ -0,0 +1,674 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "sw_evdev.h"
+#include "iq_ring.h"
+#include "event_ring.h"
+
+enum xstats_type {
+ /* common stats */
+ rx,
+ tx,
+ dropped,
+ inflight,
+ calls,
+ credits,
+ /* device instance specific */
+ no_iq_enq,
+ no_cq_enq,
+ /* port_specific */
+ rx_used,
+ rx_free,
+ tx_used,
+ tx_free,
+ pkt_cycles,
+ poll_return, /* for zero-count and used also for port bucket loop */
+ /* qid_specific */
+ iq_size,
+ iq_used,
+ /* qid port mapping specific */
+ pinned,
+};
+
+typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
+ uint16_t obj_idx, /* port or queue id */
+ enum xstats_type stat, int extra_arg);
+
+struct sw_xstats_entry {
+ struct rte_event_dev_xstats_name name;
+ xstats_fn fn;
+ uint16_t obj_idx;
+ enum xstats_type stat;
+ enum rte_event_dev_xstats_mode mode;
+ int extra_arg;
+ uint8_t reset_allowed; /* when set, this value can be reset */
+ uint64_t reset_value; /* an offset to be taken away to emulate resets */
+};
+
+static uint64_t
+get_dev_stat(const struct sw_evdev *sw, uint16_t obj_idx __rte_unused,
+ enum xstats_type type, int extra_arg __rte_unused)
+{
+ switch (type) {
+ case rx: return sw->stats.rx_pkts;
+ case tx: return sw->stats.tx_pkts;
+ case dropped: return sw->stats.rx_dropped;
+ case calls: return sw->sched_called;
+ case no_iq_enq: return sw->sched_no_iq_enqueues;
+ case no_cq_enq: return sw->sched_no_cq_enqueues;
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg __rte_unused)
+{
+ const struct sw_port *p = &sw->ports[obj_idx];
+
+ switch (type) {
+ case rx: return p->stats.rx_pkts;
+ case tx: return p->stats.tx_pkts;
+ case dropped: return p->stats.rx_dropped;
+ case inflight: return p->inflights;
+ case pkt_cycles: return p->avg_pkt_ticks;
+ case calls: return p->total_polls;
+ case credits: return p->inflight_credits;
+ case poll_return: return p->zero_polls;
+ case rx_used: return qe_ring_count(p->rx_worker_ring);
+ case rx_free: return qe_ring_free_count(p->rx_worker_ring);
+ case tx_used: return qe_ring_count(p->cq_worker_ring);
+ case tx_free: return qe_ring_free_count(p->cq_worker_ring);
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_port_bucket_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg)
+{
+ const struct sw_port *p = &sw->ports[obj_idx];
+
+ switch (type) {
+ case poll_return: return p->poll_buckets[extra_arg];
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_qid_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg __rte_unused)
+{
+ const struct sw_qid *qid = &sw->qids[obj_idx];
+
+ switch (type) {
+ case rx: return qid->stats.rx_pkts;
+ case tx: return qid->stats.tx_pkts;
+ case dropped: return qid->stats.rx_dropped;
+ case inflight:
+ do {
+ uint64_t infl = 0;
+ unsigned int i;
+ for (i = 0; i < RTE_DIM(qid->fids); i++)
+ infl += qid->fids[i].pcount;
+ return infl;
+ } while (0);
+ break;
+ case iq_size: return RTE_DIM(qid->iq[0]->ring);
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_qid_iq_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg)
+{
+ const struct sw_qid *qid = &sw->qids[obj_idx];
+ const int iq_idx = extra_arg;
+
+ switch (type) {
+ case iq_used: return iq_ring_count(qid->iq[iq_idx]);
+ default: return -1;
+ }
+}
+
+static uint64_t
+get_qid_port_stat(const struct sw_evdev *sw, uint16_t obj_idx,
+ enum xstats_type type, int extra_arg)
+{
+ const struct sw_qid *qid = &sw->qids[obj_idx];
+ uint16_t port = extra_arg;
+
+ switch (type) {
+ case pinned:
+ do {
+ uint64_t pin = 0;
+ unsigned int i;
+ for (i = 0; i < RTE_DIM(qid->fids); i++)
+ if (qid->fids[i].cq == port)
+ pin++;
+ return pin;
+ } while (0);
+ break;
+ default: return -1;
+ }
+}
+
+int
+sw_xstats_init(struct sw_evdev *sw)
+{
+ /*
+ * define the stats names and types. Used to build up the device
+ * xstats array
+ * There are multiple set of stats:
+ * - device-level,
+ * - per-port,
+ * - per-port-dequeue-burst-sizes
+ * - per-qid,
+ * - per-iq
+ * - per-port-per-qid
+ *
+ * For each of these sets, we have three parallel arrays, one for the
+ * names, the other for the stat type parameter to be passed in the fn
+ * call to get that stat. The third array allows resetting or not.
+ * All these arrays must be kept in sync
+ */
+ static const char * const dev_stats[] = { "rx", "tx", "drop",
+ "sched_calls", "sched_no_iq_enq", "sched_no_cq_enq",
+ };
+ static const enum xstats_type dev_types[] = { rx, tx, dropped,
+ calls, no_iq_enq, no_cq_enq,
+ };
+ /* all device stats are allowed to be reset */
+
+ static const char * const port_stats[] = {"rx", "tx", "drop",
+ "inflight", "avg_pkt_cycles", "credits",
+ "rx_ring_used", "rx_ring_free",
+ "cq_ring_used", "cq_ring_free",
+ "dequeue_calls", "dequeues_returning_0",
+ };
+ static const enum xstats_type port_types[] = { rx, tx, dropped,
+ inflight, pkt_cycles, credits,
+ rx_used, rx_free, tx_used, tx_free,
+ calls, poll_return,
+ };
+ static const uint8_t port_reset_allowed[] = {1, 1, 1,
+ 0, 1, 0,
+ 0, 0, 0, 0,
+ 1, 1,
+ };
+
+ static const char * const port_bucket_stats[] = {
+ "dequeues_returning" };
+ static const enum xstats_type port_bucket_types[] = { poll_return };
+ /* all bucket dequeues are allowed to be reset, handled in loop below */
+
+ static const char * const qid_stats[] = {"rx", "tx", "drop",
+ "inflight", "iq_size"
+ };
+ static const enum xstats_type qid_types[] = { rx, tx, dropped,
+ inflight, iq_size
+ };
+ static const uint8_t qid_reset_allowed[] = {1, 1, 1,
+ 0, 0
+ };
+
+ static const char * const qid_iq_stats[] = { "used" };
+ static const enum xstats_type qid_iq_types[] = { iq_used };
+ /* reset allowed */
+
+ static const char * const qid_port_stats[] = { "pinned_flows" };
+ static const enum xstats_type qid_port_types[] = { pinned };
+ /* reset allowed */
+ /* ---- end of stat definitions ---- */
+
+ /* check sizes, since a missed comma can lead to strings being
+ * joined by the compiler.
+ */
+ RTE_BUILD_BUG_ON(RTE_DIM(dev_stats) != RTE_DIM(dev_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_iq_stats) != RTE_DIM(qid_iq_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_port_stats) != RTE_DIM(qid_port_types));
+ RTE_BUILD_BUG_ON(RTE_DIM(port_bucket_stats) !=
+ RTE_DIM(port_bucket_types));
+
+ RTE_BUILD_BUG_ON(RTE_DIM(port_stats) != RTE_DIM(port_reset_allowed));
+ RTE_BUILD_BUG_ON(RTE_DIM(qid_stats) != RTE_DIM(qid_reset_allowed));
+
+ /* other vars */
+ const uint32_t cons_bkt_shift =
+ (MAX_SW_CONS_Q_DEPTH >> SW_DEQ_STAT_BUCKET_SHIFT);
+ const unsigned int count = RTE_DIM(dev_stats) +
+ sw->port_count * RTE_DIM(port_stats) +
+ sw->port_count * RTE_DIM(port_bucket_stats) *
+ (cons_bkt_shift + 1) +
+ sw->qid_count * RTE_DIM(qid_stats) +
+ sw->qid_count * SW_IQS_MAX * RTE_DIM(qid_iq_stats) +
+ sw->qid_count * sw->port_count *
+ RTE_DIM(qid_port_stats);
+ unsigned int i, port, qid, iq, bkt, stat = 0;
+
+ sw->xstats = rte_zmalloc_socket(NULL, sizeof(sw->xstats[0]) * count, 0,
+ sw->data->socket_id);
+ if (sw->xstats == NULL)
+ return -ENOMEM;
+
+#define sname sw->xstats[stat].name.name
+ for (i = 0; i < RTE_DIM(dev_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_dev_stat,
+ .stat = dev_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_DEVICE,
+ .reset_allowed = 1,
+ };
+ snprintf(sname, sizeof(sname), "dev_%s", dev_stats[i]);
+ }
+ sw->xstats_count_mode_dev = stat;
+
+ for (port = 0; port < sw->port_count; port++) {
+ sw->xstats_offset_for_port[port] = stat;
+
+ uint32_t count_offset = stat;
+
+ for (i = 0; i < RTE_DIM(port_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_port_stat,
+ .obj_idx = port,
+ .stat = port_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_PORT,
+ .reset_allowed = port_reset_allowed[i],
+ };
+ snprintf(sname, sizeof(sname), "port_%u_%s",
+ port, port_stats[i]);
+ }
+
+ for (bkt = 0; bkt < (sw->ports[port].cq_worker_ring->size >>
+ SW_DEQ_STAT_BUCKET_SHIFT) + 1; bkt++) {
+ for (i = 0; i < RTE_DIM(port_bucket_stats); i++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_port_bucket_stat,
+ .obj_idx = port,
+ .stat = port_bucket_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_PORT,
+ .extra_arg = bkt,
+ .reset_allowed = 1,
+ };
+ snprintf(sname, sizeof(sname),
+ "port_%u_%s_%u-%u",
+ port, port_bucket_stats[i],
+ (bkt << SW_DEQ_STAT_BUCKET_SHIFT) + 1,
+ (bkt + 1) << SW_DEQ_STAT_BUCKET_SHIFT);
+ stat++;
+ }
+ }
+
+ sw->xstats_count_per_port[port] = stat - count_offset;
+ }
+
+ sw->xstats_count_mode_port = stat - sw->xstats_count_mode_dev;
+
+ for (qid = 0; qid < sw->qid_count; qid++) {
+ uint32_t count_offset = stat;
+ sw->xstats_offset_for_qid[qid] = stat;
+
+ for (i = 0; i < RTE_DIM(qid_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_qid_stat,
+ .obj_idx = qid,
+ .stat = qid_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+ .reset_allowed = qid_reset_allowed[i],
+ };
+ snprintf(sname, sizeof(sname), "qid_%u_%s",
+ qid, qid_stats[i]);
+ }
+ for (iq = 0; iq < SW_IQS_MAX; iq++)
+ for (i = 0; i < RTE_DIM(qid_iq_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_qid_iq_stat,
+ .obj_idx = qid,
+ .stat = qid_iq_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+ .extra_arg = iq,
+ .reset_allowed = 0,
+ };
+ snprintf(sname, sizeof(sname),
+ "qid_%u_iq_%u_%s",
+ qid, iq,
+ qid_iq_stats[i]);
+ }
+
+ for (port = 0; port < sw->port_count; port++)
+ for (i = 0; i < RTE_DIM(qid_port_stats); i++, stat++) {
+ sw->xstats[stat] = (struct sw_xstats_entry){
+ .fn = get_qid_port_stat,
+ .obj_idx = qid,
+ .stat = qid_port_types[i],
+ .mode = RTE_EVENT_DEV_XSTATS_QUEUE,
+ .extra_arg = port,
+ .reset_allowed = 0,
+ };
+ snprintf(sname, sizeof(sname),
+ "qid_%u_port_%u_%s",
+ qid, port,
+ qid_port_stats[i]);
+ }
+
+ sw->xstats_count_per_qid[qid] = stat - count_offset;
+ }
+
+ sw->xstats_count_mode_queue = stat -
+ (sw->xstats_count_mode_dev + sw->xstats_count_mode_port);
+#undef sname
+
+ sw->xstats_count = stat;
+
+ return stat;
+}
+
+int
+sw_xstats_uninit(struct sw_evdev *sw)
+{
+ rte_free(sw->xstats);
+ sw->xstats_count = 0;
+ return 0;
+}
+
+int
+sw_xstats_get_names(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ struct rte_event_dev_xstats_name *xstats_names,
+ unsigned int *ids, unsigned int size)
+{
+ const struct sw_evdev *sw = sw_pmd_priv_const(dev);
+ unsigned int i;
+ unsigned int xidx = 0;
+ RTE_SET_USED(mode);
+ RTE_SET_USED(queue_port_id);
+
+ uint32_t xstats_mode_count = 0;
+ uint32_t start_offset = 0;
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ xstats_mode_count = sw->xstats_count_mode_dev;
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id >= (signed int)sw->port_count)
+ break;
+ xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
+ start_offset = sw->xstats_offset_for_port[queue_port_id];
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id >= (signed int)sw->qid_count)
+ break;
+ xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
+ start_offset = sw->xstats_offset_for_qid[queue_port_id];
+ break;
+ default:
+ SW_LOG_ERR("Invalid mode received in sw_xstats_get_names()\n");
+ return -EINVAL;
+ };
+
+ if (xstats_mode_count > size || !ids || !xstats_names)
+ return xstats_mode_count;
+
+ for (i = 0; i < sw->xstats_count && xidx < size; i++) {
+ if (sw->xstats[i].mode != mode)
+ continue;
+
+ if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
+ queue_port_id != sw->xstats[i].obj_idx)
+ continue;
+
+ xstats_names[xidx] = sw->xstats[i].name;
+ if (ids)
+ ids[xidx] = start_offset + xidx;
+ xidx++;
+ }
+ return xidx;
+}
+
+static int
+sw_xstats_update(struct sw_evdev *sw, enum rte_event_dev_xstats_mode mode,
+ uint8_t queue_port_id, const unsigned int ids[],
+ uint64_t values[], unsigned int n, const uint32_t reset,
+ const uint32_t ret_if_n_lt_nstats)
+{
+ unsigned int i;
+ unsigned int xidx = 0;
+ RTE_SET_USED(mode);
+ RTE_SET_USED(queue_port_id);
+
+ uint32_t xstats_mode_count = 0;
+
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ xstats_mode_count = sw->xstats_count_mode_dev;
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id >= (signed int)sw->port_count)
+ goto invalid_value;
+ xstats_mode_count = sw->xstats_count_per_port[queue_port_id];
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id >= (signed int)sw->qid_count)
+ goto invalid_value;
+ xstats_mode_count = sw->xstats_count_per_qid[queue_port_id];
+ break;
+ default:
+ SW_LOG_ERR("Invalid mode received in sw_xstats_get()\n");
+ goto invalid_value;
+ };
+
+ /* this function can check num stats and return them (xstats_get() style
+ * behaviour) or ignore n for reset() of a single stat style behaviour.
+ */
+ if (ret_if_n_lt_nstats && xstats_mode_count > n)
+ return xstats_mode_count;
+
+ for (i = 0; i < n && xidx < xstats_mode_count; i++) {
+ struct sw_xstats_entry *xs = &sw->xstats[ids[i]];
+ if (ids[i] > sw->xstats_count || xs->mode != mode)
+ continue;
+
+ if (mode != RTE_EVENT_DEV_XSTATS_DEVICE &&
+ queue_port_id != xs->obj_idx)
+ continue;
+
+ uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
+ - xs->reset_value;
+
+ if (values)
+ values[xidx] = val;
+
+ if (xs->reset_allowed && reset)
+ xs->reset_value = val;
+
+ xidx++;
+ }
+
+ return xidx;
+invalid_value:
+ return -EINVAL;
+}
+
+int
+sw_xstats_get(const struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
+ const unsigned int ids[], uint64_t values[], unsigned int n)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ const uint32_t reset = 0;
+ const uint32_t ret_n_lt_stats = 1;
+ return sw_xstats_update(sw, mode, queue_port_id, ids, values, n,
+ reset, ret_n_lt_stats);
+}
+
+uint64_t
+sw_xstats_get_by_name(const struct rte_eventdev *dev,
+ const char *name, unsigned int *id)
+{
+ const struct sw_evdev *sw = sw_pmd_priv_const(dev);
+ unsigned int i;
+
+ for (i = 0; i < sw->xstats_count; i++) {
+ struct sw_xstats_entry *xs = &sw->xstats[i];
+ if (strncmp(xs->name.name, name,
+ RTE_EVENT_DEV_XSTATS_NAME_SIZE) == 0){
+ if (id != NULL)
+ *id = i;
+ return xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
+ - xs->reset_value;
+ }
+ }
+ if (id != NULL)
+ *id = (uint32_t)-1;
+ return (uint64_t)-1;
+}
+
+static void
+sw_xstats_reset_range(struct sw_evdev *sw, uint32_t start, uint32_t num)
+{
+ uint32_t i;
+ for (i = start; i < start + num; i++) {
+ struct sw_xstats_entry *xs = &sw->xstats[i];
+ if (!xs->reset_allowed)
+ continue;
+
+ uint64_t val = xs->fn(sw, xs->obj_idx, xs->stat, xs->extra_arg)
+ - xs->reset_value;
+ xs->reset_value = val;
+ }
+}
+
+static int
+sw_xstats_reset_queue(struct sw_evdev *sw, uint8_t queue_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ const uint32_t reset = 1;
+ const uint32_t ret_n_lt_stats = 0;
+ if (ids) {
+ uint32_t nb_reset = sw_xstats_update(sw,
+ RTE_EVENT_DEV_XSTATS_QUEUE,
+ queue_id, ids, NULL, nb_ids,
+ reset, ret_n_lt_stats);
+ return nb_reset == nb_ids ? 0 : -EINVAL;
+ }
+
+ if (ids == NULL)
+ sw_xstats_reset_range(sw, sw->xstats_offset_for_qid[queue_id],
+ sw->xstats_count_per_qid[queue_id]);
+
+ return 0;
+}
+
+static int
+sw_xstats_reset_port(struct sw_evdev *sw, uint8_t port_id,
+ const uint32_t ids[], uint32_t nb_ids)
+{
+ const uint32_t reset = 1;
+ const uint32_t ret_n_lt_stats = 0;
+ int offset = sw->xstats_offset_for_port[port_id];
+ int nb_stat = sw->xstats_count_per_port[port_id];
+
+ if (ids) {
+ uint32_t nb_reset = sw_xstats_update(sw,
+ RTE_EVENT_DEV_XSTATS_PORT, port_id,
+ ids, NULL, nb_ids,
+ reset, ret_n_lt_stats);
+ return nb_reset == nb_ids ? 0 : -EINVAL;
+ }
+
+ sw_xstats_reset_range(sw, offset, nb_stat);
+ return 0;
+}
+
+static int
+sw_xstats_reset_dev(struct sw_evdev *sw, const uint32_t ids[], uint32_t nb_ids)
+{
+ uint32_t i;
+ if (ids) {
+ for (i = 0; i < nb_ids; i++) {
+ uint32_t id = ids[i];
+ if (id >= sw->xstats_count_mode_dev)
+ return -EINVAL;
+ sw_xstats_reset_range(sw, id, 1);
+ }
+ } else {
+ for (i = 0; i < sw->xstats_count_mode_dev; i++)
+ sw_xstats_reset_range(sw, i, 1);
+ }
+
+ return 0;
+}
+
+int
+sw_xstats_reset(struct rte_eventdev *dev,
+ enum rte_event_dev_xstats_mode mode,
+ int16_t queue_port_id,
+ const uint32_t ids[],
+ uint32_t nb_ids)
+{
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ uint32_t i, err;
+
+ /* handle -1 for queue_port_id here, looping over all ports/queues */
+ switch (mode) {
+ case RTE_EVENT_DEV_XSTATS_DEVICE:
+ sw_xstats_reset_dev(sw, ids, nb_ids);
+ break;
+ case RTE_EVENT_DEV_XSTATS_PORT:
+ if (queue_port_id == -1) {
+ for (i = 0; i < sw->port_count; i++) {
+ err = sw_xstats_reset_port(sw, i, ids, nb_ids);
+ if (err)
+ return -EINVAL;
+ }
+ } else if (queue_port_id < (int16_t)sw->port_count)
+ sw_xstats_reset_port(sw, queue_port_id, ids, nb_ids);
+ break;
+ case RTE_EVENT_DEV_XSTATS_QUEUE:
+ if (queue_port_id == -1) {
+ for (i = 0; i < sw->qid_count; i++) {
+ err = sw_xstats_reset_queue(sw, i, ids, nb_ids);
+ if (err)
+ return -EINVAL;
+ }
+ } else if (queue_port_id < (int16_t)sw->qid_count)
+ sw_xstats_reset_queue(sw, queue_port_id, ids, nb_ids);
+ break;
+ };
+
+ return 0;
+}