summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/app/test-eventdev
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/dpdk/app/test-eventdev')
-rw-r--r--src/spdk/dpdk/app/test-eventdev/Makefile32
-rw-r--r--src/spdk/dpdk/app/test-eventdev/evt_common.h183
-rw-r--r--src/spdk/dpdk/app/test-eventdev/evt_main.c214
-rw-r--r--src/spdk/dpdk/app/test-eventdev/evt_options.c450
-rw-r--r--src/spdk/dpdk/app/test-eventdev/evt_options.h289
-rw-r--r--src/spdk/dpdk/app/test-eventdev/evt_test.c42
-rw-r--r--src/spdk/dpdk/app/test-eventdev/evt_test.h96
-rw-r--r--src/spdk/dpdk/app/test-eventdev/meson.build17
-rw-r--r--src/spdk/dpdk/app/test-eventdev/parser.c359
-rw-r--r--src/spdk/dpdk/app/test-eventdev/parser.h50
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_order_atq.c205
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_order_common.c361
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_order_common.h125
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_order_queue.c215
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_perf_atq.c308
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_perf_common.c839
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_perf_common.h159
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_perf_queue.c323
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_pipeline_atq.c518
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_pipeline_common.c534
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_pipeline_common.h173
-rw-r--r--src/spdk/dpdk/app/test-eventdev/test_pipeline_queue.c533
22 files changed, 6025 insertions, 0 deletions
diff --git a/src/spdk/dpdk/app/test-eventdev/Makefile b/src/spdk/dpdk/app/test-eventdev/Makefile
new file mode 100644
index 000000000..e600e21c4
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+APP = dpdk-test-eventdev
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y := evt_main.c
+SRCS-y += evt_options.c
+SRCS-y += evt_test.c
+SRCS-y += parser.c
+
+SRCS-y += test_order_common.c
+SRCS-y += test_order_queue.c
+SRCS-y += test_order_atq.c
+
+SRCS-y += test_perf_common.c
+SRCS-y += test_perf_queue.c
+SRCS-y += test_perf_atq.c
+
+SRCS-y += test_pipeline_common.c
+SRCS-y += test_pipeline_queue.c
+SRCS-y += test_pipeline_atq.c
+
+include $(RTE_SDK)/mk/rte.app.mk
diff --git a/src/spdk/dpdk/app/test-eventdev/evt_common.h b/src/spdk/dpdk/app/test-eventdev/evt_common.h
new file mode 100644
index 000000000..f9d7378d3
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/evt_common.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _EVT_COMMON_
+#define _EVT_COMMON_
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_eventdev.h>
+#include <rte_service.h>
+
+#define CLNRM "\x1b[0m"
+#define CLRED "\x1b[31m"
+#define CLGRN "\x1b[32m"
+#define CLYEL "\x1b[33m"
+
+#define evt_err(fmt, args...) \
+ fprintf(stderr, CLRED"error: %s() "fmt CLNRM "\n", __func__, ## args)
+
+#define evt_info(fmt, args...) \
+ fprintf(stdout, CLYEL""fmt CLNRM "\n", ## args)
+
+#define EVT_STR_FMT 20
+
+#define evt_dump(str, fmt, val...) \
+ printf("\t%-*s : "fmt"\n", EVT_STR_FMT, str, ## val)
+
+#define evt_dump_begin(str) printf("\t%-*s : {", EVT_STR_FMT, str)
+
+#define evt_dump_end printf("\b}\n")
+
+#define EVT_MAX_STAGES 64
+#define EVT_MAX_PORTS 256
+#define EVT_MAX_QUEUES 256
+
+enum evt_prod_type {
+ EVT_PROD_TYPE_NONE,
+ EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */
+ EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */
+ EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */
+ EVT_PROD_TYPE_MAX,
+};
+
+struct evt_options {
+#define EVT_TEST_NAME_MAX_LEN 32
+ char test_name[EVT_TEST_NAME_MAX_LEN];
+ bool plcores[RTE_MAX_LCORE];
+ bool wlcores[RTE_MAX_LCORE];
+ int pool_sz;
+ int socket_id;
+ int nb_stages;
+ int verbose_level;
+ uint8_t dev_id;
+ uint8_t timdev_cnt;
+ uint8_t nb_timer_adptrs;
+ uint8_t timdev_use_burst;
+ uint8_t sched_type_list[EVT_MAX_STAGES];
+ uint16_t mbuf_sz;
+ uint16_t wkr_deq_dep;
+ uint32_t nb_flows;
+ uint32_t tx_first;
+ uint32_t max_pkt_sz;
+ uint32_t deq_tmo_nsec;
+ uint32_t q_priority:1;
+ uint32_t fwd_latency:1;
+ uint64_t nb_pkts;
+ uint64_t nb_timers;
+ uint64_t expiry_nsec;
+ uint64_t max_tmo_nsec;
+ uint64_t timer_tick_nsec;
+ uint64_t optm_timer_tick_nsec;
+ enum evt_prod_type prod_type;
+};
+
+static inline bool
+evt_has_distributed_sched(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) ?
+ true : false;
+}
+
+static inline bool
+evt_has_burst_mode(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ?
+ true : false;
+}
+
+
+static inline bool
+evt_has_all_types_queue(uint8_t dev_id)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(dev_id, &dev_info);
+ return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) ?
+ true : false;
+}
+
+static inline int
+evt_service_setup(uint32_t service_id)
+{
+ int32_t core_cnt;
+ unsigned int lcore = 0;
+ uint32_t core_array[RTE_MAX_LCORE];
+ uint8_t cnt;
+ uint8_t min_cnt = UINT8_MAX;
+
+ if (!rte_service_lcore_count())
+ return -ENOENT;
+
+ core_cnt = rte_service_lcore_list(core_array,
+ RTE_MAX_LCORE);
+ if (core_cnt < 0)
+ return -ENOENT;
+ /* Get the core which has least number of services running. */
+ while (core_cnt--) {
+ /* Reset default mapping */
+ rte_service_map_lcore_set(service_id,
+ core_array[core_cnt], 0);
+ cnt = rte_service_lcore_count_services(
+ core_array[core_cnt]);
+ if (cnt < min_cnt) {
+ lcore = core_array[core_cnt];
+ min_cnt = cnt;
+ }
+ }
+ if (rte_service_map_lcore_set(service_id, lcore, 1))
+ return -ENOENT;
+
+ return 0;
+}
+
+static inline int
+evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues,
+ uint8_t nb_ports)
+{
+ struct rte_event_dev_info info;
+ int ret;
+
+ memset(&info, 0, sizeof(struct rte_event_dev_info));
+ ret = rte_event_dev_info_get(opt->dev_id, &info);
+ if (ret) {
+ evt_err("failed to get eventdev info %d", opt->dev_id);
+ return ret;
+ }
+
+ if (opt->deq_tmo_nsec) {
+ if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.min_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too low, using %d",
+ opt->deq_tmo_nsec);
+ }
+ if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) {
+ opt->deq_tmo_nsec = info.max_dequeue_timeout_ns;
+ evt_info("dequeue_timeout_ns too high, using %d",
+ opt->deq_tmo_nsec);
+ }
+ }
+
+ const struct rte_event_dev_config config = {
+ .dequeue_timeout_ns = opt->deq_tmo_nsec,
+ .nb_event_queues = nb_queues,
+ .nb_event_ports = nb_ports,
+ .nb_events_limit = info.max_num_events,
+ .nb_event_queue_flows = opt->nb_flows,
+ .nb_event_port_dequeue_depth =
+ info.max_event_port_dequeue_depth,
+ .nb_event_port_enqueue_depth =
+ info.max_event_port_enqueue_depth,
+ };
+
+ return rte_event_dev_configure(opt->dev_id, &config);
+}
+
+#endif /* _EVT_COMMON_*/
diff --git a/src/spdk/dpdk/app/test-eventdev/evt_main.c b/src/spdk/dpdk/app/test-eventdev/evt_main.c
new file mode 100644
index 000000000..a8d304bab
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/evt_main.c
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <rte_atomic.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_eventdev.h>
+
+#include "evt_options.h"
+#include "evt_test.h"
+
+struct evt_options opt;
+struct evt_test *test;
+
+static void
+signal_handler(int signum)
+{
+ int i;
+ static uint8_t once;
+
+ if ((signum == SIGINT || signum == SIGTERM) && !once) {
+ once = true;
+ printf("\nSignal %d received, preparing to exit...\n",
+ signum);
+
+ if (test != NULL) {
+ /* request all lcores to exit from the main loop */
+ *(int *)test->test_priv = true;
+ rte_wmb();
+
+ if (test->ops.ethdev_destroy)
+ test->ops.ethdev_destroy(test, &opt);
+
+ rte_eal_mp_wait_lcore();
+
+ if (test->ops.test_result)
+ test->ops.test_result(test, &opt);
+
+ if (opt.prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV(i)
+ rte_eth_dev_close(i);
+ }
+
+ if (test->ops.eventdev_destroy)
+ test->ops.eventdev_destroy(test, &opt);
+
+ if (test->ops.mempool_destroy)
+ test->ops.mempool_destroy(test, &opt);
+
+ if (test->ops.test_destroy)
+ test->ops.test_destroy(test, &opt);
+ }
+
+ /* exit with the expected status */
+ signal(signum, SIG_DFL);
+ kill(getpid(), signum);
+ }
+}
+
+static inline void
+evt_options_dump_all(struct evt_test *test, struct evt_options *opts)
+{
+ evt_options_dump(opts);
+ if (test->ops.opt_dump)
+ test->ops.opt_dump(opts);
+}
+
+int
+main(int argc, char **argv)
+{
+ uint8_t evdevs;
+ int ret;
+
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_panic("invalid EAL arguments\n");
+ argc -= ret;
+ argv += ret;
+
+ evdevs = rte_event_dev_count();
+ if (!evdevs)
+ rte_panic("no eventdev devices found\n");
+
+ /* Populate the default values of the options */
+ evt_options_default(&opt);
+
+ /* Parse the command line arguments */
+ ret = evt_options_parse(&opt, argc, argv);
+ if (ret) {
+ evt_err("parsing on or more user options failed");
+ goto error;
+ }
+
+ /* Get struct evt_test *test from name */
+ test = evt_test_get(opt.test_name);
+ if (test == NULL) {
+ evt_err("failed to find requested test: %s", opt.test_name);
+ goto error;
+ }
+
+ if (test->ops.test_result == NULL) {
+ evt_err("%s: ops.test_result not found", opt.test_name);
+ goto error;
+ }
+
+ /* Verify the command line options */
+ if (opt.dev_id >= rte_event_dev_count()) {
+ evt_err("invalid event device %d", opt.dev_id);
+ goto error;
+ }
+ if (test->ops.opt_check) {
+ if (test->ops.opt_check(&opt)) {
+ evt_err("invalid command line argument");
+ evt_options_dump_all(test, &opt);
+ goto error;
+ }
+ }
+
+ /* Check the eventdev capability before proceeding */
+ if (test->ops.cap_check) {
+ if (test->ops.cap_check(&opt) == false) {
+ evt_info("unsupported test: %s", opt.test_name);
+ evt_options_dump_all(test, &opt);
+ ret = EVT_TEST_UNSUPPORTED;
+ goto nocap;
+ }
+ }
+
+ /* Dump the options */
+ if (opt.verbose_level)
+ evt_options_dump_all(test, &opt);
+
+ /* Test specific setup */
+ if (test->ops.test_setup) {
+ if (test->ops.test_setup(test, &opt)) {
+ evt_err("failed to setup test: %s", opt.test_name);
+ goto error;
+
+ }
+ }
+
+ /* Test specific mempool setup */
+ if (test->ops.mempool_setup) {
+ if (test->ops.mempool_setup(test, &opt)) {
+ evt_err("%s: mempool setup failed", opt.test_name);
+ goto test_destroy;
+ }
+ }
+
+ /* Test specific ethdev setup */
+ if (test->ops.ethdev_setup) {
+ if (test->ops.ethdev_setup(test, &opt)) {
+ evt_err("%s: ethdev setup failed", opt.test_name);
+ goto mempool_destroy;
+ }
+ }
+
+ /* Test specific eventdev setup */
+ if (test->ops.eventdev_setup) {
+ if (test->ops.eventdev_setup(test, &opt)) {
+ evt_err("%s: eventdev setup failed", opt.test_name);
+ goto ethdev_destroy;
+ }
+ }
+
+ /* Launch lcores */
+ if (test->ops.launch_lcores) {
+ if (test->ops.launch_lcores(test, &opt)) {
+ evt_err("%s: failed to launch lcores", opt.test_name);
+ goto eventdev_destroy;
+ }
+ }
+
+ rte_eal_mp_wait_lcore();
+
+ /* Print the test result */
+ ret = test->ops.test_result(test, &opt);
+nocap:
+ if (ret == EVT_TEST_SUCCESS) {
+ printf("Result: "CLGRN"%s"CLNRM"\n", "Success");
+ } else if (ret == EVT_TEST_FAILED) {
+ printf("Result: "CLRED"%s"CLNRM"\n", "Failed");
+ return EXIT_FAILURE;
+ } else if (ret == EVT_TEST_UNSUPPORTED) {
+ printf("Result: "CLYEL"%s"CLNRM"\n", "Unsupported");
+ }
+
+ return 0;
+eventdev_destroy:
+ if (test->ops.eventdev_destroy)
+ test->ops.eventdev_destroy(test, &opt);
+
+ethdev_destroy:
+ if (test->ops.ethdev_destroy)
+ test->ops.ethdev_destroy(test, &opt);
+
+mempool_destroy:
+ if (test->ops.mempool_destroy)
+ test->ops.mempool_destroy(test, &opt);
+
+test_destroy:
+ if (test->ops.test_destroy)
+ test->ops.test_destroy(test, &opt);
+error:
+ return EXIT_FAILURE;
+}
diff --git a/src/spdk/dpdk/app/test-eventdev/evt_options.c b/src/spdk/dpdk/app/test-eventdev/evt_options.c
new file mode 100644
index 000000000..c60b61a90
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/evt_options.c
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <getopt.h>
+
+#include <rte_string_fns.h>
+#include <rte_common.h>
+#include <rte_eventdev.h>
+#include <rte_lcore.h>
+
+#include "evt_options.h"
+#include "evt_test.h"
+#include "parser.h"
+
+void
+evt_options_default(struct evt_options *opt)
+{
+ memset(opt, 0, sizeof(*opt));
+ opt->verbose_level = 1; /* Enable minimal prints */
+ opt->dev_id = 0;
+ strncpy(opt->test_name, "order_queue", EVT_TEST_NAME_MAX_LEN);
+ opt->nb_flows = 1024;
+ opt->socket_id = SOCKET_ID_ANY;
+ opt->pool_sz = 16 * 1024;
+ opt->wkr_deq_dep = 16;
+ opt->nb_pkts = (1ULL << 26); /* do ~64M packets */
+ opt->nb_timers = 1E8;
+ opt->nb_timer_adptrs = 1;
+ opt->timer_tick_nsec = 1E3; /* 1000ns ~ 1us */
+ opt->max_tmo_nsec = 1E5; /* 100000ns ~100us */
+ opt->expiry_nsec = 1E4; /* 10000ns ~10us */
+ opt->prod_type = EVT_PROD_TYPE_SYNT;
+}
+
+typedef int (*option_parser_t)(struct evt_options *opt,
+ const char *arg);
+
+struct long_opt_parser {
+ const char *lgopt_name;
+ option_parser_t parser_fn;
+};
+
+static int
+evt_parse_nb_flows(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint32(&(opt->nb_flows), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_dev_id(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint8(&(opt->dev_id), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_verbose(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->verbose_level = atoi(arg);
+ return 0;
+}
+
+static int
+evt_parse_fwd_latency(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->fwd_latency = 1;
+ return 0;
+}
+
+static int
+evt_parse_queue_priority(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->q_priority = 1;
+ return 0;
+}
+
+static int
+evt_parse_deq_tmo_nsec(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint32(&(opt->deq_tmo_nsec), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_eth_prod_type(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->prod_type = EVT_PROD_TYPE_ETH_RX_ADPTR;
+ return 0;
+}
+
+static int
+evt_parse_timer_prod_type(struct evt_options *opt, const char *arg __rte_unused)
+{
+ opt->prod_type = EVT_PROD_TYPE_EVENT_TIMER_ADPTR;
+ return 0;
+}
+
+static int
+evt_parse_timer_prod_type_burst(struct evt_options *opt,
+ const char *arg __rte_unused)
+{
+ opt->prod_type = EVT_PROD_TYPE_EVENT_TIMER_ADPTR;
+ opt->timdev_use_burst = 1;
+ return 0;
+}
+
+static int
+evt_parse_test_name(struct evt_options *opt, const char *arg)
+{
+ strlcpy(opt->test_name, arg, EVT_TEST_NAME_MAX_LEN);
+ return 0;
+}
+
+static int
+evt_parse_socket_id(struct evt_options *opt, const char *arg)
+{
+ opt->socket_id = atoi(arg);
+ return 0;
+}
+
+static int
+evt_parse_wkr_deq_dep(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint16(&(opt->wkr_deq_dep), arg);
+ return ret;
+}
+
+static int
+evt_parse_nb_pkts(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint64(&(opt->nb_pkts), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_nb_timers(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint64(&(opt->nb_timers), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_timer_tick_nsec(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint64(&(opt->timer_tick_nsec), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_max_tmo_nsec(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint64(&(opt->max_tmo_nsec), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_expiry_nsec(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint64(&(opt->expiry_nsec), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_nb_timer_adptrs(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint8(&(opt->nb_timer_adptrs), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_pool_sz(struct evt_options *opt, const char *arg)
+{
+ opt->pool_sz = atoi(arg);
+
+ return 0;
+}
+
+static int
+evt_parse_plcores(struct evt_options *opt, const char *corelist)
+{
+ int ret;
+
+ ret = parse_lcores_list(opt->plcores, corelist);
+ if (ret == -E2BIG)
+ evt_err("duplicate lcores in plcores");
+
+ return ret;
+}
+
+static int
+evt_parse_work_lcores(struct evt_options *opt, const char *corelist)
+{
+ int ret;
+
+ ret = parse_lcores_list(opt->wlcores, corelist);
+ if (ret == -E2BIG)
+ evt_err("duplicate lcores in wlcores");
+
+ return ret;
+}
+
+static int
+evt_parse_mbuf_sz(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint16(&(opt->mbuf_sz), arg);
+
+ return ret;
+}
+
+static int
+evt_parse_max_pkt_sz(struct evt_options *opt, const char *arg)
+{
+ int ret;
+
+ ret = parser_read_uint32(&(opt->max_pkt_sz), arg);
+
+ return ret;
+}
+
+static void
+usage(char *program)
+{
+ printf("usage : %s [EAL options] -- [application options]\n", program);
+ printf("application options:\n");
+ printf("\t--verbose : verbose level\n"
+ "\t--dev : device id of the event device\n"
+ "\t--test : name of the test application to run\n"
+ "\t--socket_id : socket_id of application resources\n"
+ "\t--pool_sz : pool size of the mempool\n"
+ "\t--plcores : list of lcore ids for producers\n"
+ "\t--wlcores : list of lcore ids for workers\n"
+ "\t--stlist : list of scheduled types of the stages\n"
+ "\t--nb_flows : number of flows to produce\n"
+ "\t--nb_pkts : number of packets to produce\n"
+ "\t--worker_deq_depth : dequeue depth of the worker\n"
+ "\t--fwd_latency : perform fwd_latency measurement\n"
+ "\t--queue_priority : enable queue priority\n"
+ "\t--deq_tmo_nsec : global dequeue timeout\n"
+ "\t--prod_type_ethdev : use ethernet device as producer.\n"
+ "\t--prod_type_timerdev : use event timer device as producer.\n"
+ "\t expity_nsec would be the timeout\n"
+ "\t in ns.\n"
+ "\t--prod_type_timerdev_burst : use timer device as producer\n"
+ "\t burst mode.\n"
+ "\t--nb_timers : number of timers to arm.\n"
+ "\t--nb_timer_adptrs : number of timer adapters to use.\n"
+ "\t--timer_tick_nsec : timer tick interval in ns.\n"
+ "\t--max_tmo_nsec : max timeout interval in ns.\n"
+ "\t--expiry_nsec : event timer expiry ns.\n"
+ "\t--mbuf_sz : packet mbuf size.\n"
+ "\t--max_pkt_sz : max packet size.\n"
+ );
+ printf("available tests:\n");
+ evt_test_dump_names();
+}
+
+static int
+evt_parse_sched_type_list(struct evt_options *opt, const char *arg)
+{
+ char c;
+ int i = 0, j = -1;
+
+ for (i = 0; i < EVT_MAX_STAGES; i++)
+ opt->sched_type_list[i] = (uint8_t)-1;
+
+ i = 0;
+
+ do {
+ c = arg[++j];
+
+ switch (c) {
+ case 'o':
+ case 'O':
+ opt->sched_type_list[i++] = RTE_SCHED_TYPE_ORDERED;
+ break;
+ case 'a':
+ case 'A':
+ opt->sched_type_list[i++] = RTE_SCHED_TYPE_ATOMIC;
+ break;
+ case 'p':
+ case 'P':
+ opt->sched_type_list[i++] = RTE_SCHED_TYPE_PARALLEL;
+ break;
+ case ',':
+ break;
+ default:
+ if (c != '\0') {
+ evt_err("invalid sched_type %c", c);
+ return -EINVAL;
+ }
+ }
+ } while (c != '\0');
+
+ opt->nb_stages = i;
+ return 0;
+}
+
+static struct option lgopts[] = {
+ { EVT_NB_FLOWS, 1, 0, 0 },
+ { EVT_DEVICE, 1, 0, 0 },
+ { EVT_VERBOSE, 1, 0, 0 },
+ { EVT_TEST, 1, 0, 0 },
+ { EVT_PROD_LCORES, 1, 0, 0 },
+ { EVT_WORK_LCORES, 1, 0, 0 },
+ { EVT_SOCKET_ID, 1, 0, 0 },
+ { EVT_POOL_SZ, 1, 0, 0 },
+ { EVT_NB_PKTS, 1, 0, 0 },
+ { EVT_WKR_DEQ_DEP, 1, 0, 0 },
+ { EVT_SCHED_TYPE_LIST, 1, 0, 0 },
+ { EVT_FWD_LATENCY, 0, 0, 0 },
+ { EVT_QUEUE_PRIORITY, 0, 0, 0 },
+ { EVT_DEQ_TMO_NSEC, 1, 0, 0 },
+ { EVT_PROD_ETHDEV, 0, 0, 0 },
+ { EVT_PROD_TIMERDEV, 0, 0, 0 },
+ { EVT_PROD_TIMERDEV_BURST, 0, 0, 0 },
+ { EVT_NB_TIMERS, 1, 0, 0 },
+ { EVT_NB_TIMER_ADPTRS, 1, 0, 0 },
+ { EVT_TIMER_TICK_NSEC, 1, 0, 0 },
+ { EVT_MAX_TMO_NSEC, 1, 0, 0 },
+ { EVT_EXPIRY_NSEC, 1, 0, 0 },
+ { EVT_MBUF_SZ, 1, 0, 0 },
+ { EVT_MAX_PKT_SZ, 1, 0, 0 },
+ { EVT_HELP, 0, 0, 0 },
+ { NULL, 0, 0, 0 }
+};
+
+static int
+evt_opts_parse_long(int opt_idx, struct evt_options *opt)
+{
+ unsigned int i;
+
+ struct long_opt_parser parsermap[] = {
+ { EVT_NB_FLOWS, evt_parse_nb_flows},
+ { EVT_DEVICE, evt_parse_dev_id},
+ { EVT_VERBOSE, evt_parse_verbose},
+ { EVT_TEST, evt_parse_test_name},
+ { EVT_PROD_LCORES, evt_parse_plcores},
+ { EVT_WORK_LCORES, evt_parse_work_lcores},
+ { EVT_SOCKET_ID, evt_parse_socket_id},
+ { EVT_POOL_SZ, evt_parse_pool_sz},
+ { EVT_NB_PKTS, evt_parse_nb_pkts},
+ { EVT_WKR_DEQ_DEP, evt_parse_wkr_deq_dep},
+ { EVT_SCHED_TYPE_LIST, evt_parse_sched_type_list},
+ { EVT_FWD_LATENCY, evt_parse_fwd_latency},
+ { EVT_QUEUE_PRIORITY, evt_parse_queue_priority},
+ { EVT_DEQ_TMO_NSEC, evt_parse_deq_tmo_nsec},
+ { EVT_PROD_ETHDEV, evt_parse_eth_prod_type},
+ { EVT_PROD_TIMERDEV, evt_parse_timer_prod_type},
+ { EVT_PROD_TIMERDEV_BURST, evt_parse_timer_prod_type_burst},
+ { EVT_NB_TIMERS, evt_parse_nb_timers},
+ { EVT_NB_TIMER_ADPTRS, evt_parse_nb_timer_adptrs},
+ { EVT_TIMER_TICK_NSEC, evt_parse_timer_tick_nsec},
+ { EVT_MAX_TMO_NSEC, evt_parse_max_tmo_nsec},
+ { EVT_EXPIRY_NSEC, evt_parse_expiry_nsec},
+ { EVT_MBUF_SZ, evt_parse_mbuf_sz},
+ { EVT_MAX_PKT_SZ, evt_parse_max_pkt_sz},
+ };
+
+ for (i = 0; i < RTE_DIM(parsermap); i++) {
+ if (strncmp(lgopts[opt_idx].name, parsermap[i].lgopt_name,
+ strlen(lgopts[opt_idx].name)) == 0)
+ return parsermap[i].parser_fn(opt, optarg);
+ }
+
+ return -EINVAL;
+}
+
+int
+evt_options_parse(struct evt_options *opt, int argc, char **argv)
+{
+ int opts, retval, opt_idx;
+
+ while ((opts = getopt_long(argc, argv, "", lgopts, &opt_idx)) != EOF) {
+ switch (opts) {
+ case 0: /* long options */
+ if (!strcmp(lgopts[opt_idx].name, "help")) {
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ }
+
+ retval = evt_opts_parse_long(opt_idx, opt);
+ if (retval != 0)
+ return retval;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void
+evt_options_dump(struct evt_options *opt)
+{
+ int lcore_id;
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ evt_dump("driver", "%s", dev_info.driver_name);
+ evt_dump("test", "%s", opt->test_name);
+ evt_dump("dev", "%d", opt->dev_id);
+ evt_dump("verbose_level", "%d", opt->verbose_level);
+ evt_dump("socket_id", "%d", opt->socket_id);
+ evt_dump("pool_sz", "%d", opt->pool_sz);
+ evt_dump("master lcore", "%d", rte_get_master_lcore());
+ evt_dump("nb_pkts", "%"PRIu64, opt->nb_pkts);
+ evt_dump("nb_timers", "%"PRIu64, opt->nb_timers);
+ evt_dump_begin("available lcores");
+ RTE_LCORE_FOREACH(lcore_id)
+ printf("%d ", lcore_id);
+ evt_dump_end;
+ evt_dump_nb_flows(opt);
+ evt_dump_worker_dequeue_depth(opt);
+}
diff --git a/src/spdk/dpdk/app/test-eventdev/evt_options.h b/src/spdk/dpdk/app/test-eventdev/evt_options.h
new file mode 100644
index 000000000..748e54fae
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/evt_options.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _EVT_OPTIONS_
+#define _EVT_OPTIONS_
+
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_lcore.h>
+
+#include "evt_common.h"
+
+#define EVT_BOOL_FMT(x) ((x) ? "true" : "false")
+
+#define EVT_VERBOSE ("verbose")
+#define EVT_DEVICE ("dev")
+#define EVT_TEST ("test")
+#define EVT_PROD_LCORES ("plcores")
+#define EVT_WORK_LCORES ("wlcores")
+#define EVT_NB_FLOWS ("nb_flows")
+#define EVT_SOCKET_ID ("socket_id")
+#define EVT_POOL_SZ ("pool_sz")
+#define EVT_WKR_DEQ_DEP ("worker_deq_depth")
+#define EVT_NB_PKTS ("nb_pkts")
+#define EVT_NB_STAGES ("nb_stages")
+#define EVT_SCHED_TYPE_LIST ("stlist")
+#define EVT_FWD_LATENCY ("fwd_latency")
+#define EVT_QUEUE_PRIORITY ("queue_priority")
+#define EVT_DEQ_TMO_NSEC ("deq_tmo_nsec")
+#define EVT_PROD_ETHDEV ("prod_type_ethdev")
+#define EVT_PROD_TIMERDEV ("prod_type_timerdev")
+#define EVT_PROD_TIMERDEV_BURST ("prod_type_timerdev_burst")
+#define EVT_NB_TIMERS ("nb_timers")
+#define EVT_NB_TIMER_ADPTRS ("nb_timer_adptrs")
+#define EVT_TIMER_TICK_NSEC ("timer_tick_nsec")
+#define EVT_MAX_TMO_NSEC ("max_tmo_nsec")
+#define EVT_EXPIRY_NSEC ("expiry_nsec")
+#define EVT_MBUF_SZ ("mbuf_sz")
+#define EVT_MAX_PKT_SZ ("max_pkt_sz")
+#define EVT_HELP ("help")
+
+void evt_options_default(struct evt_options *opt);
+int evt_options_parse(struct evt_options *opt, int argc, char **argv);
+void evt_options_dump(struct evt_options *opt);
+
+/* options check helpers */
+static inline bool
+evt_lcores_has_overlap(bool lcores[], int lcore)
+{
+ if (lcores[lcore] == true) {
+ evt_err("lcore overlaps at %d", lcore);
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool
+evt_lcores_has_overlap_multi(bool lcoresx[], bool lcoresy[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (lcoresx[i] && lcoresy[i]) {
+ evt_err("lcores overlaps at %d", i);
+ return true;
+ }
+ }
+ return false;
+}
+
+static inline bool
+evt_has_active_lcore(bool lcores[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if (lcores[i])
+ return true;
+ return false;
+}
+
+static inline int
+evt_nr_active_lcores(bool lcores[])
+{
+ int i;
+ int c = 0;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if (lcores[i])
+ c++;
+ return c;
+}
+
+static inline int
+evt_get_first_active_lcore(bool lcores[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if (lcores[i])
+ return i;
+ return -1;
+}
+
+static inline bool
+evt_has_disabled_lcore(bool lcores[])
+{
+ int i;
+
+ for (i = 0; i < RTE_MAX_LCORE; i++)
+ if ((lcores[i] == true) && !(rte_lcore_is_enabled(i)))
+ return true;
+ return false;
+}
+
+static inline bool
+evt_has_invalid_stage(struct evt_options *opt)
+{
+ if (!opt->nb_stages) {
+ evt_err("need minimum one stage, check --stlist");
+ return true;
+ }
+ if (opt->nb_stages > EVT_MAX_STAGES) {
+ evt_err("requested changes are beyond EVT_MAX_STAGES=%d",
+ EVT_MAX_STAGES);
+ return true;
+ }
+ return false;
+}
+
+static inline bool
+evt_has_invalid_sched_type(struct evt_options *opt)
+{
+ int i;
+
+ for (i = 0; i < opt->nb_stages; i++) {
+ if (opt->sched_type_list[i] > RTE_SCHED_TYPE_PARALLEL) {
+ evt_err("invalid sched_type %d at %d",
+ opt->sched_type_list[i], i);
+ return true;
+ }
+ }
+ return false;
+}
+
+/* option dump helpers */
+static inline void
+evt_dump_worker_lcores(struct evt_options *opt)
+{
+ int c;
+
+ evt_dump_begin("worker lcores");
+ for (c = 0; c < RTE_MAX_LCORE; c++) {
+ if (opt->wlcores[c])
+ printf("%d ", c);
+ }
+ evt_dump_end;
+}
+
+static inline void
+evt_dump_producer_lcores(struct evt_options *opt)
+{
+ int c;
+
+ evt_dump_begin("producer lcores");
+ for (c = 0; c < RTE_MAX_LCORE; c++) {
+ if (opt->plcores[c])
+ printf("%d ", c);
+ }
+ evt_dump_end;
+}
+
+static inline void
+evt_dump_nb_flows(struct evt_options *opt)
+{
+ evt_dump("nb_flows", "%d", opt->nb_flows);
+}
+
+static inline void
+evt_dump_worker_dequeue_depth(struct evt_options *opt)
+{
+ evt_dump("worker deq depth", "%d", opt->wkr_deq_dep);
+}
+
+static inline void
+evt_dump_nb_stages(struct evt_options *opt)
+{
+ evt_dump("nb_stages", "%d", opt->nb_stages);
+}
+
+static inline void
+evt_dump_fwd_latency(struct evt_options *opt)
+{
+ evt_dump("fwd_latency", "%s", EVT_BOOL_FMT(opt->fwd_latency));
+}
+
+static inline void
+evt_dump_queue_priority(struct evt_options *opt)
+{
+ evt_dump("queue_priority", "%s", EVT_BOOL_FMT(opt->q_priority));
+}
+
+static inline const char*
+evt_sched_type_2_str(uint8_t sched_type)
+{
+
+ if (sched_type == RTE_SCHED_TYPE_ORDERED)
+ return "O";
+ else if (sched_type == RTE_SCHED_TYPE_ATOMIC)
+ return "A";
+ else if (sched_type == RTE_SCHED_TYPE_PARALLEL)
+ return "P";
+ else
+ return "I";
+}
+
+static inline void
+evt_dump_sched_type_list(struct evt_options *opt)
+{
+ int i;
+
+ evt_dump_begin("sched_type_list");
+ for (i = 0; i < opt->nb_stages; i++)
+ printf("%s ", evt_sched_type_2_str(opt->sched_type_list[i]));
+
+ evt_dump_end;
+}
+
+static inline const char *
+evt_prod_id_to_name(enum evt_prod_type prod_type)
+{
+ switch (prod_type) {
+ default:
+ case EVT_PROD_TYPE_SYNT:
+ return "Synthetic producer lcores";
+ case EVT_PROD_TYPE_ETH_RX_ADPTR:
+ return "Ethdev Rx Adapter";
+ case EVT_PROD_TYPE_EVENT_TIMER_ADPTR:
+ return "Event timer adapter";
+ }
+
+ return "";
+}
+
+#define EVT_PROD_MAX_NAME_LEN 50
+static inline void
+evt_dump_producer_type(struct evt_options *opt)
+{
+ char name[EVT_PROD_MAX_NAME_LEN];
+
+ switch (opt->prod_type) {
+ default:
+ case EVT_PROD_TYPE_SYNT:
+ snprintf(name, EVT_PROD_MAX_NAME_LEN,
+ "Synthetic producer lcores");
+ break;
+ case EVT_PROD_TYPE_ETH_RX_ADPTR:
+ snprintf(name, EVT_PROD_MAX_NAME_LEN,
+ "Ethdev Rx Adapter producers");
+ evt_dump("nb_ethdev", "%d", rte_eth_dev_count_avail());
+ break;
+ case EVT_PROD_TYPE_EVENT_TIMER_ADPTR:
+ if (opt->timdev_use_burst)
+ snprintf(name, EVT_PROD_MAX_NAME_LEN,
+ "Event timer adapter burst mode producer");
+ else
+ snprintf(name, EVT_PROD_MAX_NAME_LEN,
+ "Event timer adapter producer");
+ evt_dump("nb_timer_adapters", "%d", opt->nb_timer_adptrs);
+ evt_dump("max_tmo_nsec", "%"PRIu64"", opt->max_tmo_nsec);
+ evt_dump("expiry_nsec", "%"PRIu64"", opt->expiry_nsec);
+ if (opt->optm_timer_tick_nsec)
+ evt_dump("optm_timer_tick_nsec", "%"PRIu64"",
+ opt->optm_timer_tick_nsec);
+ else
+ evt_dump("timer_tick_nsec", "%"PRIu64"",
+ opt->timer_tick_nsec);
+ break;
+ }
+ evt_dump("prod_type", "%s", name);
+}
+
+#endif /* _EVT_OPTIONS_ */
diff --git a/src/spdk/dpdk/app/test-eventdev/evt_test.c b/src/spdk/dpdk/app/test-eventdev/evt_test.c
new file mode 100644
index 000000000..72d6228bc
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/evt_test.c
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include "evt_test.h"
+
+static STAILQ_HEAD(, evt_test_entry) head = STAILQ_HEAD_INITIALIZER(head);
+
+void
+evt_test_register(struct evt_test_entry *entry)
+{
+ STAILQ_INSERT_TAIL(&head, entry, next);
+}
+
+struct evt_test*
+evt_test_get(const char *name)
+{
+ struct evt_test_entry *entry;
+
+ if (!name)
+ return NULL;
+
+ STAILQ_FOREACH(entry, &head, next)
+ if (!strncmp(entry->test.name, name, strlen(name)))
+ return &entry->test;
+
+ return NULL;
+}
+
+void
+evt_test_dump_names(void)
+{
+ struct evt_test_entry *entry;
+
+ STAILQ_FOREACH(entry, &head, next)
+ if (entry->test.name)
+ printf("\t %s\n", entry->test.name);
+}
diff --git a/src/spdk/dpdk/app/test-eventdev/evt_test.h b/src/spdk/dpdk/app/test-eventdev/evt_test.h
new file mode 100644
index 000000000..f07d2c333
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/evt_test.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _EVT_TEST_
+#define _EVT_TEST_
+
+#include <string.h>
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_eal.h>
+
+enum evt_test_result {
+ EVT_TEST_SUCCESS,
+ EVT_TEST_FAILED,
+ EVT_TEST_UNSUPPORTED,
+};
+
+struct evt_test;
+struct evt_options;
+
+typedef bool (*evt_test_capability_check_t)(struct evt_options *opt);
+typedef int (*evt_test_options_check_t)(struct evt_options *opt);
+typedef void (*evt_test_options_dump_t)(struct evt_options *opt);
+typedef int (*evt_test_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_mempool_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_ethdev_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_eventdev_setup_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_launch_lcores_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef int (*evt_test_result_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_eventdev_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_ethdev_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_mempool_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+typedef void (*evt_test_destroy_t)
+ (struct evt_test *test, struct evt_options *opt);
+
+struct evt_test_ops {
+ evt_test_capability_check_t cap_check;
+ evt_test_options_check_t opt_check;
+ evt_test_options_dump_t opt_dump;
+ evt_test_setup_t test_setup;
+ evt_test_mempool_setup_t mempool_setup;
+ evt_test_ethdev_setup_t ethdev_setup;
+ evt_test_eventdev_setup_t eventdev_setup;
+ evt_test_launch_lcores_t launch_lcores;
+ evt_test_result_t test_result;
+ evt_test_eventdev_destroy_t eventdev_destroy;
+ evt_test_ethdev_destroy_t ethdev_destroy;
+ evt_test_mempool_destroy_t mempool_destroy;
+ evt_test_destroy_t test_destroy;
+};
+
+struct evt_test {
+ const char *name;
+ void *test_priv;
+ struct evt_test_ops ops;
+};
+
+struct evt_test_entry {
+ struct evt_test test;
+
+ STAILQ_ENTRY(evt_test_entry) next;
+};
+
+void evt_test_register(struct evt_test_entry *test);
+void evt_test_dump_names(void);
+
+#define EVT_TEST_REGISTER(nm) \
+static struct evt_test_entry _evt_test_entry_ ##nm; \
+RTE_INIT(evt_test_ ##nm) \
+{ \
+ _evt_test_entry_ ##nm.test.name = RTE_STR(nm);\
+ memcpy(&_evt_test_entry_ ##nm.test.ops, &nm, \
+ sizeof(struct evt_test_ops)); \
+ evt_test_register(&_evt_test_entry_ ##nm); \
+}
+
+struct evt_test *evt_test_get(const char *name);
+
+static inline void *
+evt_test_priv(struct evt_test *test)
+{
+ return test->test_priv;
+}
+
+#endif /* _EVT_TEST_ */
diff --git a/src/spdk/dpdk/app/test-eventdev/meson.build b/src/spdk/dpdk/app/test-eventdev/meson.build
new file mode 100644
index 000000000..9e588d9ec
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/meson.build
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Cavium, Inc
+
+sources = files('evt_main.c',
+ 'evt_options.c',
+ 'evt_test.c',
+ 'parser.c',
+ 'test_order_common.c',
+ 'test_order_atq.c',
+ 'test_order_queue.c',
+ 'test_perf_common.c',
+ 'test_perf_atq.c',
+ 'test_perf_queue.c',
+ 'test_pipeline_common.c',
+ 'test_pipeline_atq.c',
+ 'test_pipeline_queue.c')
+deps += 'eventdev'
diff --git a/src/spdk/dpdk/app/test-eventdev/parser.c b/src/spdk/dpdk/app/test-eventdev/parser.c
new file mode 100644
index 000000000..24f1855e9
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/parser.c
@@ -0,0 +1,359 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2017 Cavium, Inc.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+#include <stdbool.h>
+
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+#include "parser.h"
+
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+int
+parser_read_arg_bool(const char *p)
+{
+ p = skip_white_spaces(p);
+ int result = -EINVAL;
+
+ if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
+ ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
+ p += 3;
+ result = 1;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'n')) ||
+ ((p[0] == 'O') && (p[1] == 'N'))) {
+ p += 2;
+ result = 1;
+ }
+
+ if (((p[0] == 'n') && (p[1] == 'o')) ||
+ ((p[0] == 'N') && (p[1] == 'O'))) {
+ p += 2;
+ result = 0;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
+ ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
+ p += 3;
+ result = 0;
+ }
+
+ p = skip_white_spaces(p);
+
+ if (p[0] != '\0')
+ return -EINVAL;
+
+ return result;
+}
+
+int
+parser_read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_int32(int32_t *value, const char *p)
+{
+ char *next;
+ int32_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtol(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint64_hex(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+
+ val = strtoul(p, &next, 16);
+ if (p == next)
+ return -EINVAL;
+
+ p = skip_white_spaces(next);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32_hex(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16_hex(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8_hex(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens)
+{
+ uint32_t i;
+
+ if ((string == NULL) ||
+ (tokens == NULL) ||
+ (*n_tokens < 1))
+ return -EINVAL;
+
+ for (i = 0; i < *n_tokens; i++) {
+ tokens[i] = strtok_r(string, PARSE_DELIMITER, &string);
+ if (tokens[i] == NULL)
+ break;
+ }
+
+ if ((i == *n_tokens) &&
+ (strtok_r(string, PARSE_DELIMITER, &string) != NULL))
+ return -E2BIG;
+
+ *n_tokens = i;
+ return 0;
+}
+
+int
+parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if ((src == NULL) ||
+ (dst == NULL) ||
+ (size == NULL) ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
+int
+parse_lcores_list(bool lcores[], const char *corelist)
+{
+ int i, idx = 0;
+ int min, max;
+ char *end = NULL;
+
+ if (corelist == NULL)
+ return -1;
+ while (isblank(*corelist))
+ corelist++;
+ i = strlen(corelist);
+ while ((i > 0) && isblank(corelist[i - 1]))
+ i--;
+
+ /* Get list of lcores */
+ min = RTE_MAX_LCORE;
+ do {
+ while (isblank(*corelist))
+ corelist++;
+ if (*corelist == '\0')
+ return -1;
+ idx = strtoul(corelist, &end, 10);
+
+ if (end == NULL)
+ return -1;
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ min = idx;
+ } else if ((*end == ',') || (*end == '\0')) {
+ max = idx;
+ if (min == RTE_MAX_LCORE)
+ min = idx;
+ for (idx = min; idx <= max; idx++) {
+ if (lcores[idx] == 1)
+ return -E2BIG;
+ lcores[idx] = 1;
+ }
+
+ min = RTE_MAX_LCORE;
+ } else
+ return -1;
+ corelist = end + 1;
+ } while (*end != '\0');
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/app/test-eventdev/parser.h b/src/spdk/dpdk/app/test-eventdev/parser.h
new file mode 100644
index 000000000..673ff22d7
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/parser.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#ifndef __INCLUDE_PARSER_H__
+#define __INCLUDE_PARSER_H__
+
+#include <stdint.h>
+
+#define PARSE_DELIMITER " \f\n\r\t\v"
+
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++) \
+ ; \
+ _p; \
+})
+
+static inline size_t
+skip_digits(const char *src)
+{
+ size_t i;
+
+ for (i = 0; isdigit(src[i]); i++)
+ ;
+
+ return i;
+}
+
+int parser_read_arg_bool(const char *p);
+
+int parser_read_uint64(uint64_t *value, const char *p);
+int parser_read_uint32(uint32_t *value, const char *p);
+int parser_read_uint16(uint16_t *value, const char *p);
+int parser_read_uint8(uint8_t *value, const char *p);
+
+int parser_read_uint64_hex(uint64_t *value, const char *p);
+int parser_read_uint32_hex(uint32_t *value, const char *p);
+int parser_read_uint16_hex(uint16_t *value, const char *p);
+int parser_read_uint8_hex(uint8_t *value, const char *p);
+
+int parser_read_int32(int32_t *value, const char *p);
+
+int parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+
+int parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens);
+
+int parse_lcores_list(bool lcores[], const char *corelist);
+#endif
diff --git a/src/spdk/dpdk/app/test-eventdev/test_order_atq.c b/src/spdk/dpdk/app/test-eventdev/test_order_atq.c
new file mode 100644
index 000000000..3366cfce9
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_order_atq.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include "test_order_common.h"
+
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
+
+static __rte_always_inline void
+order_atq_process_stage_0(struct rte_event *const ev)
+{
+ ev->sub_event_type = 1; /* move to stage 1 (atomic) on the same queue */
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+order_atq_worker(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->err == false) {
+ uint16_t event = rte_event_dequeue_burst(dev_id, port,
+ &ev, 1, 0);
+ if (!event) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ if (ev.sub_event_type == 0) { /* stage 0 from producer */
+ order_atq_process_stage_0(&ev);
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
+ != 1)
+ rte_pause();
+ } else if (ev.sub_event_type == 1) { /* stage 1 */
+ order_process_stage_1(t, &ev, nb_flows,
+ expected_flow_seq, outstand_pkts);
+ } else {
+ order_process_stage_invalid(t, &ev);
+ }
+ }
+ return 0;
+}
+
+static int
+order_atq_worker_burst(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev[BURST_SIZE];
+ uint16_t i;
+
+ while (t->err == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
+ BURST_SIZE, 0);
+
+ if (nb_rx == 0) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (ev[i].sub_event_type == 0) { /*stage 0 */
+ order_atq_process_stage_0(&ev[i]);
+ } else if (ev[i].sub_event_type == 1) { /* stage 1 */
+ order_process_stage_1(t, &ev[i], nb_flows,
+ expected_flow_seq, outstand_pkts);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ order_process_stage_invalid(t, &ev[i]);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev_id, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ const bool burst = evt_has_burst_mode(w->dev_id);
+
+ if (burst)
+ return order_atq_worker_burst(arg);
+ else
+ return order_atq_worker(arg);
+}
+
+static int
+order_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return order_launch_lcores(test, opt, worker_wrapper);
+}
+
+#define NB_QUEUES 1
+static int
+order_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ int ret;
+
+ const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
+ /* number of active worker cores + 1 producer */
+ const uint8_t nb_ports = nb_workers + 1;
+
+ ret = evt_configure_eventdev(opt, NB_QUEUES, nb_ports);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* q0 all types queue configuration */
+ struct rte_event_queue_conf q0_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ ret = rte_event_queue_setup(opt->dev_id, 0, &q0_conf);
+ if (ret) {
+ evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* setup one port per worker, linking to all queues */
+ ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
+ if (ret)
+ return ret;
+
+ if (!evt_has_distributed_sched(opt->dev_id)) {
+ uint32_t service_id;
+ rte_event_dev_service_id_get(opt->dev_id, &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
+ }
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+order_atq_opt_dump(struct evt_options *opt)
+{
+ order_opt_dump(opt);
+ evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
+}
+
+static bool
+order_atq_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
+ order_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ NB_QUEUES, dev_info.max_event_queues,
+ order_nb_event_ports(opt), dev_info.max_event_ports);
+ return false;
+ }
+
+ if (!evt_has_all_types_queue(opt->dev_id))
+ return false;
+
+ return true;
+}
+
+static const struct evt_test_ops order_atq = {
+ .cap_check = order_atq_capability_check,
+ .opt_check = order_opt_check,
+ .opt_dump = order_atq_opt_dump,
+ .test_setup = order_test_setup,
+ .mempool_setup = order_mempool_setup,
+ .eventdev_setup = order_atq_eventdev_setup,
+ .launch_lcores = order_atq_launch_lcores,
+ .eventdev_destroy = order_eventdev_destroy,
+ .mempool_destroy = order_mempool_destroy,
+ .test_result = order_test_result,
+ .test_destroy = order_test_destroy,
+};
+
+EVT_TEST_REGISTER(order_atq);
diff --git a/src/spdk/dpdk/app/test-eventdev/test_order_common.c b/src/spdk/dpdk/app/test-eventdev/test_order_common.c
new file mode 100644
index 000000000..4190f9ade
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_order_common.c
@@ -0,0 +1,361 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "test_order_common.h"
+
+int
+order_test_result(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_order *t = evt_test_priv(test);
+
+ return t->result;
+}
+
+static inline int
+order_producer(void *arg)
+{
+ struct prod_data *p = arg;
+ struct test_order *t = p->t;
+ struct evt_options *opt = t->opt;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct rte_mempool *pool = t->pool;
+ const uint64_t nb_pkts = t->nb_pkts;
+ uint32_t *producer_flow_seq = t->producer_flow_seq;
+ const uint32_t nb_flows = t->nb_flows;
+ uint64_t count = 0;
+ struct rte_mbuf *m;
+ struct rte_event ev;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
+ __func__, rte_lcore_id(), dev_id, port, p->queue_id);
+
+ ev.event = 0;
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.queue_id = p->queue_id;
+ ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+ ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 0; /* stage 0 */
+
+ while (count < nb_pkts && t->err == false) {
+ m = rte_pktmbuf_alloc(pool);
+ if (m == NULL)
+ continue;
+
+ const uint32_t flow = (uintptr_t)m % nb_flows;
+ /* Maintain seq number per flow */
+ m->seqn = producer_flow_seq[flow]++;
+
+ ev.flow_id = flow;
+ ev.mbuf = m;
+
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+ if (t->err)
+ break;
+ rte_pause();
+ }
+
+ count++;
+ }
+ return 0;
+}
+
+int
+order_opt_check(struct evt_options *opt)
+{
+ if (opt->prod_type != EVT_PROD_TYPE_SYNT) {
+ evt_err("Invalid producer type '%s' valid producer '%s'",
+ evt_prod_id_to_name(opt->prod_type),
+ evt_prod_id_to_name(EVT_PROD_TYPE_SYNT));
+ return -1;
+ }
+
+ /* 1 producer + N workers + 1 master */
+ if (rte_lcore_count() < 3) {
+ evt_err("test need minimum 3 lcores");
+ return -1;
+ }
+
+ /* Validate worker lcores */
+ if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
+ evt_err("worker lcores overlaps with master lcore");
+ return -1;
+ }
+
+ if (evt_nr_active_lcores(opt->plcores) == 0) {
+ evt_err("missing the producer lcore");
+ return -1;
+ }
+
+ if (evt_nr_active_lcores(opt->plcores) != 1) {
+ evt_err("only one producer lcore must be selected");
+ return -1;
+ }
+
+ int plcore = evt_get_first_active_lcore(opt->plcores);
+
+ if (plcore < 0) {
+ evt_err("failed to find active producer");
+ return plcore;
+ }
+
+ if (evt_lcores_has_overlap(opt->wlcores, plcore)) {
+ evt_err("worker lcores overlaps producer lcore");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->wlcores)) {
+ evt_err("one or more workers lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->wlcores)) {
+ evt_err("minimum one worker is required");
+ return -1;
+ }
+
+ /* Validate producer lcore */
+ if (plcore == (int)rte_get_master_lcore()) {
+ evt_err("producer lcore and master lcore should be different");
+ return -1;
+ }
+ if (!rte_lcore_is_enabled(plcore)) {
+ evt_err("producer lcore is not enabled");
+ return -1;
+ }
+
+ /* Fixups */
+ if (opt->nb_pkts == 0)
+ opt->nb_pkts = INT64_MAX;
+
+ return 0;
+}
+
+int
+order_test_setup(struct evt_test *test, struct evt_options *opt)
+{
+ void *test_order;
+
+ test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order),
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+ if (test_order == NULL) {
+ evt_err("failed to allocate test_order memory");
+ goto nomem;
+ }
+ test->test_priv = test_order;
+
+ struct test_order *t = evt_test_priv(test);
+
+ t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq",
+ sizeof(*t->producer_flow_seq) * opt->nb_flows,
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+
+ if (t->producer_flow_seq == NULL) {
+ evt_err("failed to allocate t->producer_flow_seq memory");
+ goto prod_nomem;
+ }
+
+ t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq",
+ sizeof(*t->expected_flow_seq) * opt->nb_flows,
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+
+ if (t->expected_flow_seq == NULL) {
+ evt_err("failed to allocate t->expected_flow_seq memory");
+ goto exp_nomem;
+ }
+ rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts);
+ t->err = false;
+ t->nb_pkts = opt->nb_pkts;
+ t->nb_flows = opt->nb_flows;
+ t->result = EVT_TEST_FAILED;
+ t->opt = opt;
+ return 0;
+
+exp_nomem:
+ rte_free(t->producer_flow_seq);
+prod_nomem:
+ rte_free(test->test_priv);
+nomem:
+ return -ENOMEM;
+}
+
+void
+order_test_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_order *t = evt_test_priv(test);
+
+ rte_free(t->expected_flow_seq);
+ rte_free(t->producer_flow_seq);
+ rte_free(test->test_priv);
+}
+
+int
+order_mempool_setup(struct evt_test *test, struct evt_options *opt)
+{
+ struct test_order *t = evt_test_priv(test);
+
+ t->pool = rte_pktmbuf_pool_create(test->name, opt->pool_sz,
+ 256 /* Cache */, 0,
+ 512, /* Use very small mbufs */
+ opt->socket_id);
+ if (t->pool == NULL) {
+ evt_err("failed to create mempool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+order_mempool_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_order *t = evt_test_priv(test);
+
+ rte_mempool_free(t->pool);
+}
+
+void
+order_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(test);
+
+ rte_event_dev_stop(opt->dev_id);
+ rte_event_dev_close(opt->dev_id);
+}
+
+void
+order_opt_dump(struct evt_options *opt)
+{
+ evt_dump_producer_lcores(opt);
+ evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ evt_dump_worker_lcores(opt);
+ evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
+}
+
+int
+order_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *))
+{
+ int ret, lcore_id;
+ struct test_order *t = evt_test_priv(test);
+
+ int wkr_idx = 0;
+ /* launch workers */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (!(opt->wlcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
+ lcore_id);
+ if (ret) {
+ evt_err("failed to launch worker %d", lcore_id);
+ return ret;
+ }
+ wkr_idx++;
+ }
+
+ /* launch producer */
+ int plcore = evt_get_first_active_lcore(opt->plcores);
+
+ ret = rte_eal_remote_launch(order_producer, &t->prod, plcore);
+ if (ret) {
+ evt_err("failed to launch order_producer %d", plcore);
+ return ret;
+ }
+
+ uint64_t cycles = rte_get_timer_cycles();
+ int64_t old_remaining = -1;
+
+ while (t->err == false) {
+ uint64_t new_cycles = rte_get_timer_cycles();
+ int64_t remaining = rte_atomic64_read(&t->outstand_pkts);
+
+ if (remaining <= 0) {
+ t->result = EVT_TEST_SUCCESS;
+ break;
+ }
+
+ if (new_cycles - cycles > rte_get_timer_hz() * 1) {
+ printf(CLGRN"\r%"PRId64""CLNRM, remaining);
+ fflush(stdout);
+ if (old_remaining == remaining) {
+ rte_event_dev_dump(opt->dev_id, stdout);
+ evt_err("No schedules for seconds, deadlock");
+ t->err = true;
+ rte_smp_wmb();
+ break;
+ }
+ old_remaining = remaining;
+ cycles = new_cycles;
+ }
+ }
+ printf("\r");
+
+ return 0;
+}
+
+int
+order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t nb_workers, uint8_t nb_queues)
+{
+ int ret;
+ uint8_t port;
+ struct test_order *t = evt_test_priv(test);
+ struct rte_event_dev_info dev_info;
+
+ memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
+ ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (ret) {
+ evt_err("failed to get eventdev info %d", opt->dev_id);
+ return ret;
+ }
+
+ if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+ opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+ /* port configuration */
+ const struct rte_event_port_conf p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = dev_info.max_event_port_dequeue_depth,
+ .new_event_threshold = dev_info.max_num_events,
+ };
+
+ /* setup one port per worker, linking to all queues */
+ for (port = 0; port < nb_workers; port++) {
+ struct worker_data *w = &t->worker[port];
+
+ w->dev_id = opt->dev_id;
+ w->port_id = port;
+ w->t = t;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
+ if (ret != nb_queues) {
+ evt_err("failed to link all queues to port %d", port);
+ return -EINVAL;
+ }
+ }
+ struct prod_data *p = &t->prod;
+
+ p->dev_id = opt->dev_id;
+ p->port_id = port; /* last port */
+ p->queue_id = 0;
+ p->t = t;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
+ if (ret) {
+ evt_err("failed to setup producer port %d", port);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/src/spdk/dpdk/app/test-eventdev/test_order_common.h b/src/spdk/dpdk/app/test-eventdev/test_order_common.h
new file mode 100644
index 000000000..e0fe9c968
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_order_common.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _TEST_ORDER_COMMON_
+#define _TEST_ORDER_COMMON_
+
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <rte_cycles.h>
+#include <rte_eventdev.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+
+#include "evt_common.h"
+#include "evt_options.h"
+#include "evt_test.h"
+
+#define BURST_SIZE 16
+
+struct test_order;
+
+struct worker_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ struct test_order *t;
+};
+
+struct prod_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ uint8_t queue_id;
+ struct test_order *t;
+};
+
+struct test_order {
+ /* Don't change the offset of "err". Signal handler use this memory
+ * to terminate all lcores work.
+ */
+ int err;
+ /*
+ * The atomic_* is an expensive operation,Since it is a functional test,
+ * We are using the atomic_ operation to reduce the code complexity.
+ */
+ rte_atomic64_t outstand_pkts;
+ enum evt_test_result result;
+ uint32_t nb_flows;
+ uint64_t nb_pkts;
+ struct rte_mempool *pool;
+ struct prod_data prod;
+ struct worker_data worker[EVT_MAX_PORTS];
+ uint32_t *producer_flow_seq;
+ uint32_t *expected_flow_seq;
+ struct evt_options *opt;
+} __rte_cache_aligned;
+
+static inline int
+order_nb_event_ports(struct evt_options *opt)
+{
+ return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */;
+}
+
+static __rte_always_inline void
+order_process_stage_1(struct test_order *const t,
+ struct rte_event *const ev, const uint32_t nb_flows,
+ uint32_t *const expected_flow_seq,
+ rte_atomic64_t *const outstand_pkts)
+{
+ const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
+ /* compare the seqn against expected value */
+ if (ev->mbuf->seqn != expected_flow_seq[flow]) {
+ evt_err("flow=%x seqn mismatch got=%x expected=%x",
+ flow, ev->mbuf->seqn, expected_flow_seq[flow]);
+ t->err = true;
+ rte_smp_wmb();
+ }
+ /*
+ * Events from an atomic flow of an event queue can be scheduled only to
+ * a single port at a time. The port is guaranteed to have exclusive
+ * (atomic) access for given atomic flow.So we don't need to update
+ * expected_flow_seq in critical section.
+ */
+ expected_flow_seq[flow]++;
+ rte_pktmbuf_free(ev->mbuf);
+ rte_atomic64_sub(outstand_pkts, 1);
+}
+
+static __rte_always_inline void
+order_process_stage_invalid(struct test_order *const t,
+ struct rte_event *const ev)
+{
+ evt_err("invalid queue %d", ev->queue_id);
+ t->err = true;
+ rte_smp_wmb();
+}
+
+#define ORDER_WORKER_INIT\
+ struct worker_data *w = arg;\
+ struct test_order *t = w->t;\
+ struct evt_options *opt = t->opt;\
+ const uint8_t dev_id = w->dev_id;\
+ const uint8_t port = w->port_id;\
+ const uint32_t nb_flows = t->nb_flows;\
+ uint32_t *expected_flow_seq = t->expected_flow_seq;\
+ rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\
+ if (opt->verbose_level > 1)\
+ printf("%s(): lcore %d dev_id %d port=%d\n",\
+ __func__, rte_lcore_id(), dev_id, port)
+
+int order_test_result(struct evt_test *test, struct evt_options *opt);
+int order_opt_check(struct evt_options *opt);
+int order_test_setup(struct evt_test *test, struct evt_options *opt);
+int order_mempool_setup(struct evt_test *test, struct evt_options *opt);
+int order_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *));
+int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t nb_workers, uint8_t nb_queues);
+void order_test_destroy(struct evt_test *test, struct evt_options *opt);
+void order_opt_dump(struct evt_options *opt);
+void order_mempool_destroy(struct evt_test *test, struct evt_options *opt);
+void order_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
+
+#endif /* _TEST_ORDER_COMMON_ */
diff --git a/src/spdk/dpdk/app/test-eventdev/test_order_queue.c b/src/spdk/dpdk/app/test-eventdev/test_order_queue.c
new file mode 100644
index 000000000..495efd92f
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_order_queue.c
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include "test_order_common.h"
+
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
+
+static __rte_always_inline void
+order_queue_process_stage_0(struct rte_event *const ev)
+{
+ ev->queue_id = 1; /* q1 atomic queue */
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+order_queue_worker(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->err == false) {
+ uint16_t event = rte_event_dequeue_burst(dev_id, port,
+ &ev, 1, 0);
+ if (!event) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ if (ev.queue_id == 0) { /* from ordered queue */
+ order_queue_process_stage_0(&ev);
+ while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
+ != 1)
+ rte_pause();
+ } else if (ev.queue_id == 1) { /* from atomic queue */
+ order_process_stage_1(t, &ev, nb_flows,
+ expected_flow_seq, outstand_pkts);
+ } else {
+ order_process_stage_invalid(t, &ev);
+ }
+ }
+ return 0;
+}
+
+static int
+order_queue_worker_burst(void *arg)
+{
+ ORDER_WORKER_INIT;
+ struct rte_event ev[BURST_SIZE];
+ uint16_t i;
+
+ while (t->err == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
+ BURST_SIZE, 0);
+
+ if (nb_rx == 0) {
+ if (rte_atomic64_read(outstand_pkts) <= 0)
+ break;
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (ev[i].queue_id == 0) { /* from ordered queue */
+ order_queue_process_stage_0(&ev[i]);
+ } else if (ev[i].queue_id == 1) {/* from atomic queue */
+ order_process_stage_1(t, &ev[i], nb_flows,
+ expected_flow_seq, outstand_pkts);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ order_process_stage_invalid(t, &ev[i]);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev_id, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ const bool burst = evt_has_burst_mode(w->dev_id);
+
+ if (burst)
+ return order_queue_worker_burst(arg);
+ else
+ return order_queue_worker(arg);
+}
+
+static int
+order_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return order_launch_lcores(test, opt, worker_wrapper);
+}
+
+#define NB_QUEUES 2
+static int
+order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ int ret;
+
+ const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
+ /* number of active worker cores + 1 producer */
+ const uint8_t nb_ports = nb_workers + 1;
+
+ ret = evt_configure_eventdev(opt, NB_QUEUES, nb_ports);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* q0 (ordered queue) configuration */
+ struct rte_event_queue_conf q0_ordered_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .schedule_type = RTE_SCHED_TYPE_ORDERED,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ ret = rte_event_queue_setup(opt->dev_id, 0, &q0_ordered_conf);
+ if (ret) {
+ evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* q1 (atomic queue) configuration */
+ struct rte_event_queue_conf q1_atomic_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .schedule_type = RTE_SCHED_TYPE_ATOMIC,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ ret = rte_event_queue_setup(opt->dev_id, 1, &q1_atomic_conf);
+ if (ret) {
+ evt_err("failed to setup queue1 eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ /* setup one port per worker, linking to all queues */
+ ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
+ if (ret)
+ return ret;
+
+ if (!evt_has_distributed_sched(opt->dev_id)) {
+ uint32_t service_id;
+ rte_event_dev_service_id_get(opt->dev_id, &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
+ }
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+order_queue_opt_dump(struct evt_options *opt)
+{
+ order_opt_dump(opt);
+ evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
+}
+
+static bool
+order_queue_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
+ order_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ NB_QUEUES, dev_info.max_event_queues,
+ order_nb_event_ports(opt), dev_info.max_event_ports);
+ return false;
+ }
+
+ return true;
+}
+
+static const struct evt_test_ops order_queue = {
+ .cap_check = order_queue_capability_check,
+ .opt_check = order_opt_check,
+ .opt_dump = order_queue_opt_dump,
+ .test_setup = order_test_setup,
+ .mempool_setup = order_mempool_setup,
+ .eventdev_setup = order_queue_eventdev_setup,
+ .launch_lcores = order_queue_launch_lcores,
+ .eventdev_destroy = order_eventdev_destroy,
+ .mempool_destroy = order_mempool_destroy,
+ .test_result = order_test_result,
+ .test_destroy = order_test_destroy,
+};
+
+EVT_TEST_REGISTER(order_queue);
diff --git a/src/spdk/dpdk/app/test-eventdev/test_perf_atq.c b/src/spdk/dpdk/app/test-eventdev/test_perf_atq.c
new file mode 100644
index 000000000..8fd51004e
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_perf_atq.c
@@ -0,0 +1,308 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "test_perf_common.h"
+
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
+
+static inline int
+atq_nb_event_queues(struct evt_options *opt)
+{
+ /* nb_queues = number of producers */
+ return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
+ rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
+}
+
+static __rte_always_inline void
+atq_mark_fwd_latency(struct rte_event *const ev)
+{
+ if (unlikely(ev->sub_event_type == 0)) {
+ struct perf_elt *const m = ev->event_ptr;
+
+ m->timestamp = rte_get_timer_cycles();
+ }
+}
+
+static __rte_always_inline void
+atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
+ const uint8_t nb_stages)
+{
+ ev->sub_event_type++;
+ ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages];
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+perf_atq_worker(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ if (enable_fwd_latency && !prod_timer_type)
+ /* first stage in pipeline, mark ts to compute fwd latency */
+ atq_mark_fwd_latency(&ev);
+
+ /* last stage in pipeline */
+ if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(pool,
+ &ev, w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool, &ev, w,
+ bufs, sz, cnt);
+ } else {
+ atq_fwd_event(&ev, sched_type_list, nb_stages);
+ while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
+ rte_pause();
+ }
+ }
+ return 0;
+}
+
+static int
+perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ uint16_t i;
+ /* +1 to avoid prefetch out of array check */
+ struct rte_event ev[BURST_SIZE + 1];
+
+ while (t->done == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (enable_fwd_latency && !prod_timer_type) {
+ rte_prefetch0(ev[i+1].event_ptr);
+ /* first stage in pipeline.
+ * mark time stamp to compute fwd latency
+ */
+ atq_mark_fwd_latency(&ev[i]);
+ }
+ /* last stage in pipeline */
+ if (unlikely((ev[i].sub_event_type % nb_stages)
+ == laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(
+ pool, &ev[i], w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool,
+ &ev[i], w, bufs, sz, cnt);
+
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ atq_fwd_event(&ev[i], sched_type_list,
+ nb_stages);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ struct evt_options *opt = w->t->opt;
+
+ const bool burst = evt_has_burst_mode(w->dev_id);
+ const int fwd_latency = opt->fwd_latency;
+
+ /* allow compiler to optimize */
+ if (!burst && !fwd_latency)
+ return perf_atq_worker(arg, 0);
+ else if (!burst && fwd_latency)
+ return perf_atq_worker(arg, 1);
+ else if (burst && !fwd_latency)
+ return perf_atq_worker_burst(arg, 0);
+ else if (burst && fwd_latency)
+ return perf_atq_worker_burst(arg, 1);
+
+ rte_panic("invalid worker\n");
+}
+
+static int
+perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return perf_launch_lcores(test, opt, worker_wrapper);
+}
+
+static int
+perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ int ret;
+ uint8_t queue;
+ uint8_t nb_queues;
+ uint8_t nb_ports;
+ uint16_t prod;
+ struct rte_event_dev_info dev_info;
+ struct test_perf *t = evt_test_priv(test);
+
+ nb_ports = evt_nr_active_lcores(opt->wlcores);
+ nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 :
+ evt_nr_active_lcores(opt->plcores);
+
+ nb_queues = atq_nb_event_queues(opt);
+
+ memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
+ ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (ret) {
+ evt_err("failed to get eventdev info %d", opt->dev_id);
+ return ret;
+ }
+
+ ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ struct rte_event_queue_conf q_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ /* queue configurations */
+ for (queue = 0; queue < nb_queues; queue++) {
+ ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
+ if (ret) {
+ evt_err("failed to setup queue=%d", queue);
+ return ret;
+ }
+ }
+
+ if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+ opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+ /* port configuration */
+ const struct rte_event_port_conf p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = dev_info.max_event_port_dequeue_depth,
+ .new_event_threshold = dev_info.max_num_events,
+ };
+
+ ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,
+ &p_conf);
+ if (ret)
+ return ret;
+
+ if (!evt_has_distributed_sched(opt->dev_id)) {
+ uint32_t service_id;
+ rte_event_dev_service_id_get(opt->dev_id, &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
+ }
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_eth_dev_start(prod);
+ if (ret) {
+ evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
+ prod);
+ return ret;
+ }
+
+ ret = rte_event_eth_rx_adapter_start(prod);
+ if (ret) {
+ evt_err("Rx adapter[%d] start failed", prod);
+ return ret;
+ }
+ printf("%s: Port[%d] using Rx adapter[%d] started\n",
+ __func__, prod, prod);
+ }
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
+ ret = rte_event_timer_adapter_start(
+ t->timer_adptr[prod]);
+ if (ret) {
+ evt_err("failed to Start event timer adapter %d"
+ , prod);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void
+perf_atq_opt_dump(struct evt_options *opt)
+{
+ perf_opt_dump(opt, atq_nb_event_queues(opt));
+}
+
+static int
+perf_atq_opt_check(struct evt_options *opt)
+{
+ return perf_opt_check(opt, atq_nb_event_queues(opt));
+}
+
+static bool
+perf_atq_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < atq_nb_event_queues(opt) ||
+ dev_info.max_event_ports < perf_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ atq_nb_event_queues(opt), dev_info.max_event_queues,
+ perf_nb_event_ports(opt), dev_info.max_event_ports);
+ }
+ if (!evt_has_all_types_queue(opt->dev_id))
+ return false;
+
+ return true;
+}
+
+static const struct evt_test_ops perf_atq = {
+ .cap_check = perf_atq_capability_check,
+ .opt_check = perf_atq_opt_check,
+ .opt_dump = perf_atq_opt_dump,
+ .test_setup = perf_test_setup,
+ .ethdev_setup = perf_ethdev_setup,
+ .mempool_setup = perf_mempool_setup,
+ .eventdev_setup = perf_atq_eventdev_setup,
+ .launch_lcores = perf_atq_launch_lcores,
+ .eventdev_destroy = perf_eventdev_destroy,
+ .mempool_destroy = perf_mempool_destroy,
+ .ethdev_destroy = perf_ethdev_destroy,
+ .test_result = perf_test_result,
+ .test_destroy = perf_test_destroy,
+};
+
+EVT_TEST_REGISTER(perf_atq);
diff --git a/src/spdk/dpdk/app/test-eventdev/test_perf_common.c b/src/spdk/dpdk/app/test-eventdev/test_perf_common.c
new file mode 100644
index 000000000..b3af4bfec
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_perf_common.c
@@ -0,0 +1,839 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "test_perf_common.h"
+
+int
+perf_test_result(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ int i;
+ uint64_t total = 0;
+ struct test_perf *t = evt_test_priv(test);
+
+ printf("Packet distribution across worker cores :\n");
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].processed_pkts;
+ for (i = 0; i < t->nb_workers; i++)
+ printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:"
+ CLGRN" %3.2f\n"CLNRM, i,
+ t->worker[i].processed_pkts,
+ (((double)t->worker[i].processed_pkts)/total)
+ * 100);
+
+ return t->result;
+}
+
+static inline int
+perf_producer(void *arg)
+{
+ int i;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ const uint8_t dev_id = p->dev_id;
+ const uint8_t port = p->port_id;
+ struct rte_mempool *pool = t->pool;
+ const uint64_t nb_pkts = t->nb_pkts;
+ const uint32_t nb_flows = t->nb_flows;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+ struct rte_event ev;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__,
+ rte_lcore_id(), dev_id, port, p->queue_id);
+
+ ev.event = 0;
+ ev.op = RTE_EVENT_OP_NEW;
+ ev.queue_id = p->queue_id;
+ ev.sched_type = t->opt->sched_type_list[0];
+ ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ ev.event_type = RTE_EVENT_TYPE_CPU;
+ ev.sub_event_type = 0; /* stage 0 */
+
+ while (count < nb_pkts && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+ continue;
+ for (i = 0; i < BURST_SIZE; i++) {
+ ev.flow_id = flow_counter++ % nb_flows;
+ ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ while (rte_event_enqueue_burst(dev_id,
+ port, &ev, 1) != 1) {
+ if (t->done)
+ break;
+ rte_pause();
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
+ }
+ count += BURST_SIZE;
+ }
+
+ return 0;
+}
+
+static inline int
+perf_event_timer_producer(void *arg)
+{
+ int i;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ uint64_t arm_latency = 0;
+ const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_timers = opt->nb_timers;
+ struct rte_mempool *pool = t->pool;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+ struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
+ uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+ memset(&tim, 0, sizeof(struct rte_event_timer));
+ timeout_ticks = opt->optm_timer_tick_nsec ?
+ (timeout_ticks * opt->timer_tick_nsec)
+ / opt->optm_timer_tick_nsec : timeout_ticks;
+ timeout_ticks += timeout_ticks ? 0 : 1;
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+ while (count < nb_timers && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+ continue;
+ for (i = 0; i < BURST_SIZE; i++) {
+ rte_prefetch0(m[i + 1]);
+ m[i]->tim = tim;
+ m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+ m[i]->tim.ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ while (rte_event_timer_arm_burst(
+ adptr[flow_counter % nb_timer_adptrs],
+ (struct rte_event_timer **)&m[i], 1) != 1) {
+ if (t->done)
+ break;
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
+ arm_latency += rte_get_timer_cycles() - m[i]->timestamp;
+ }
+ count += BURST_SIZE;
+ }
+ fflush(stdout);
+ rte_delay_ms(1000);
+ printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+ __func__, rte_lcore_id(),
+ count ? (float)(arm_latency / count) /
+ (rte_get_timer_hz() / 1000000) : 0);
+ return 0;
+}
+
+static inline int
+perf_event_timer_producer_burst(void *arg)
+{
+ int i;
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ struct evt_options *opt = t->opt;
+ uint32_t flow_counter = 0;
+ uint64_t count = 0;
+ uint64_t arm_latency = 0;
+ const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+ const uint32_t nb_flows = t->nb_flows;
+ const uint64_t nb_timers = opt->nb_timers;
+ struct rte_mempool *pool = t->pool;
+ struct perf_elt *m[BURST_SIZE + 1] = {NULL};
+ struct rte_event_timer_adapter **adptr = t->timer_adptr;
+ struct rte_event_timer tim;
+ uint64_t timeout_ticks = opt->expiry_nsec / opt->timer_tick_nsec;
+
+ memset(&tim, 0, sizeof(struct rte_event_timer));
+ timeout_ticks = opt->optm_timer_tick_nsec ?
+ (timeout_ticks * opt->timer_tick_nsec)
+ / opt->optm_timer_tick_nsec : timeout_ticks;
+ timeout_ticks += timeout_ticks ? 0 : 1;
+ tim.ev.event_type = RTE_EVENT_TYPE_TIMER;
+ tim.ev.op = RTE_EVENT_OP_NEW;
+ tim.ev.sched_type = t->opt->sched_type_list[0];
+ tim.ev.queue_id = p->queue_id;
+ tim.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ tim.state = RTE_EVENT_TIMER_NOT_ARMED;
+ tim.timeout_ticks = timeout_ticks;
+
+ if (opt->verbose_level > 1)
+ printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+ while (count < nb_timers && t->done == false) {
+ if (rte_mempool_get_bulk(pool, (void **)m, BURST_SIZE) < 0)
+ continue;
+ for (i = 0; i < BURST_SIZE; i++) {
+ rte_prefetch0(m[i + 1]);
+ m[i]->tim = tim;
+ m[i]->tim.ev.flow_id = flow_counter++ % nb_flows;
+ m[i]->tim.ev.event_ptr = m[i];
+ m[i]->timestamp = rte_get_timer_cycles();
+ }
+ rte_event_timer_arm_tmo_tick_burst(
+ adptr[flow_counter % nb_timer_adptrs],
+ (struct rte_event_timer **)m,
+ tim.timeout_ticks,
+ BURST_SIZE);
+ arm_latency += rte_get_timer_cycles() - m[i - 1]->timestamp;
+ count += BURST_SIZE;
+ }
+ fflush(stdout);
+ rte_delay_ms(1000);
+ printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+ __func__, rte_lcore_id(),
+ count ? (float)(arm_latency / count) /
+ (rte_get_timer_hz() / 1000000) : 0);
+ return 0;
+}
+
+static int
+perf_producer_wrapper(void *arg)
+{
+ struct prod_data *p = arg;
+ struct test_perf *t = p->t;
+ /* Launch the producer function only in case of synthetic producer. */
+ if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
+ return perf_producer(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+ !t->opt->timdev_use_burst)
+ return perf_event_timer_producer(arg);
+ else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR &&
+ t->opt->timdev_use_burst)
+ return perf_event_timer_producer_burst(arg);
+ return 0;
+}
+
+static inline uint64_t
+processed_pkts(struct test_perf *t)
+{
+ uint8_t i;
+ uint64_t total = 0;
+
+ rte_smp_rmb();
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].processed_pkts;
+
+ return total;
+}
+
+static inline uint64_t
+total_latency(struct test_perf *t)
+{
+ uint8_t i;
+ uint64_t total = 0;
+
+ rte_smp_rmb();
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].latency;
+
+ return total;
+}
+
+
+int
+perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *))
+{
+ int ret, lcore_id;
+ struct test_perf *t = evt_test_priv(test);
+
+ int port_idx = 0;
+ /* launch workers */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (!(opt->wlcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(worker,
+ &t->worker[port_idx], lcore_id);
+ if (ret) {
+ evt_err("failed to launch worker %d", lcore_id);
+ return ret;
+ }
+ port_idx++;
+ }
+
+ /* launch producers */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (!(opt->plcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(perf_producer_wrapper,
+ &t->prod[port_idx], lcore_id);
+ if (ret) {
+ evt_err("failed to launch perf_producer %d", lcore_id);
+ return ret;
+ }
+ port_idx++;
+ }
+
+ const uint64_t total_pkts = t->outstand_pkts;
+
+ uint64_t dead_lock_cycles = rte_get_timer_cycles();
+ int64_t dead_lock_remaining = total_pkts;
+ const uint64_t dead_lock_sample = rte_get_timer_hz() * 5;
+
+ uint64_t perf_cycles = rte_get_timer_cycles();
+ int64_t perf_remaining = total_pkts;
+ const uint64_t perf_sample = rte_get_timer_hz();
+
+ static float total_mpps;
+ static uint64_t samples;
+
+ const uint64_t freq_mhz = rte_get_timer_hz() / 1000000;
+ int64_t remaining = t->outstand_pkts - processed_pkts(t);
+
+ while (t->done == false) {
+ const uint64_t new_cycles = rte_get_timer_cycles();
+
+ if ((new_cycles - perf_cycles) > perf_sample) {
+ const uint64_t latency = total_latency(t);
+ const uint64_t pkts = processed_pkts(t);
+
+ remaining = t->outstand_pkts - pkts;
+ float mpps = (float)(perf_remaining-remaining)/1000000;
+
+ perf_remaining = remaining;
+ perf_cycles = new_cycles;
+ total_mpps += mpps;
+ ++samples;
+ if (opt->fwd_latency && pkts > 0) {
+ printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM,
+ mpps, total_mpps/samples,
+ (float)(latency/pkts)/freq_mhz);
+ } else {
+ printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
+ mpps, total_mpps/samples);
+ }
+ fflush(stdout);
+
+ if (remaining <= 0) {
+ t->result = EVT_TEST_SUCCESS;
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type ==
+ EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ t->done = true;
+ rte_smp_wmb();
+ break;
+ }
+ }
+ }
+
+ if (new_cycles - dead_lock_cycles > dead_lock_sample &&
+ (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)) {
+ remaining = t->outstand_pkts - processed_pkts(t);
+ if (dead_lock_remaining == remaining) {
+ rte_event_dev_dump(opt->dev_id, stdout);
+ evt_err("No schedules for seconds, deadlock");
+ t->done = true;
+ rte_smp_wmb();
+ break;
+ }
+ dead_lock_remaining = remaining;
+ dead_lock_cycles = new_cycles;
+ }
+ }
+ printf("\n");
+ return 0;
+}
+
+static int
+perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
+ struct rte_event_port_conf prod_conf)
+{
+ int ret = 0;
+ uint16_t prod;
+ struct rte_event_eth_rx_adapter_queue_conf queue_conf;
+
+ memset(&queue_conf, 0,
+ sizeof(struct rte_event_eth_rx_adapter_queue_conf));
+ queue_conf.ev.sched_type = opt->sched_type_list[0];
+ RTE_ETH_FOREACH_DEV(prod) {
+ uint32_t cap;
+
+ ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
+ prod, &cap);
+ if (ret) {
+ evt_err("failed to get event rx adapter[%d]"
+ " capabilities",
+ opt->dev_id);
+ return ret;
+ }
+ queue_conf.ev.queue_id = prod * stride;
+ ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
+ &prod_conf);
+ if (ret) {
+ evt_err("failed to create rx adapter[%d]", prod);
+ return ret;
+ }
+ ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
+ &queue_conf);
+ if (ret) {
+ evt_err("failed to add rx queues to adapter[%d]", prod);
+ return ret;
+ }
+
+ if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
+ uint32_t service_id;
+
+ rte_event_eth_rx_adapter_service_id_get(prod,
+ &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("Failed to setup service core"
+ " for Rx adapter\n");
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int
+perf_event_timer_adapter_setup(struct test_perf *t)
+{
+ int i;
+ int ret;
+ struct rte_event_timer_adapter_info adapter_info;
+ struct rte_event_timer_adapter *wl;
+ uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
+ uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
+
+ if (nb_producers == 1)
+ flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
+
+ for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
+ struct rte_event_timer_adapter_conf config = {
+ .event_dev_id = t->opt->dev_id,
+ .timer_adapter_id = i,
+ .timer_tick_ns = t->opt->timer_tick_nsec,
+ .max_tmo_ns = t->opt->max_tmo_nsec,
+ .nb_timers = t->opt->pool_sz,
+ .flags = flags,
+ };
+
+ wl = rte_event_timer_adapter_create(&config);
+ if (wl == NULL) {
+ evt_err("failed to create event timer ring %d", i);
+ return rte_errno;
+ }
+
+ memset(&adapter_info, 0,
+ sizeof(struct rte_event_timer_adapter_info));
+ rte_event_timer_adapter_get_info(wl, &adapter_info);
+ t->opt->optm_timer_tick_nsec = adapter_info.min_resolution_ns;
+
+ if (!(adapter_info.caps &
+ RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
+ uint32_t service_id = -1U;
+
+ rte_event_timer_adapter_service_id_get(wl,
+ &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("Failed to setup service core"
+ " for timer adapter\n");
+ return ret;
+ }
+ rte_service_runstate_set(service_id, 1);
+ }
+ t->timer_adptr[i] = wl;
+ }
+ return 0;
+}
+
+int
+perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t stride, uint8_t nb_queues,
+ const struct rte_event_port_conf *port_conf)
+{
+ struct test_perf *t = evt_test_priv(test);
+ uint16_t port, prod;
+ int ret = -1;
+
+ /* setup one port per worker, linking to all queues */
+ for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
+ port++) {
+ struct worker_data *w = &t->worker[port];
+
+ w->dev_id = opt->dev_id;
+ w->port_id = port;
+ w->t = t;
+ w->processed_pkts = 0;
+ w->latency = 0;
+
+ ret = rte_event_port_setup(opt->dev_id, port, port_conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0);
+ if (ret != nb_queues) {
+ evt_err("failed to link all queues to port %d", port);
+ return -EINVAL;
+ }
+ }
+
+ /* port for producers, no links */
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ for ( ; port < perf_nb_event_ports(opt); port++) {
+ struct prod_data *p = &t->prod[port];
+ p->t = t;
+ }
+
+ ret = perf_event_rx_adapter_setup(opt, stride, *port_conf);
+ if (ret)
+ return ret;
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ prod = 0;
+ for ( ; port < perf_nb_event_ports(opt); port++) {
+ struct prod_data *p = &t->prod[port];
+ p->queue_id = prod * stride;
+ p->t = t;
+ prod++;
+ }
+
+ ret = perf_event_timer_adapter_setup(t);
+ if (ret)
+ return ret;
+ } else {
+ prod = 0;
+ for ( ; port < perf_nb_event_ports(opt); port++) {
+ struct prod_data *p = &t->prod[port];
+
+ p->dev_id = opt->dev_id;
+ p->port_id = port;
+ p->queue_id = prod * stride;
+ p->t = t;
+
+ ret = rte_event_port_setup(opt->dev_id, port,
+ port_conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+ prod++;
+ }
+ }
+
+ return ret;
+}
+
+int
+perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
+{
+ unsigned int lcores;
+
+ /* N producer + N worker + 1 master when producer cores are used
+ * Else N worker + 1 master when Rx adapter is used
+ */
+ lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2;
+
+ if (rte_lcore_count() < lcores) {
+ evt_err("test need minimum %d lcores", lcores);
+ return -1;
+ }
+
+ /* Validate worker lcores */
+ if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
+ evt_err("worker lcores overlaps with master lcore");
+ return -1;
+ }
+ if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) {
+ evt_err("worker lcores overlaps producer lcores");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->wlcores)) {
+ evt_err("one or more workers lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->wlcores)) {
+ evt_err("minimum one worker is required");
+ return -1;
+ }
+
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ /* Validate producer lcores */
+ if (evt_lcores_has_overlap(opt->plcores,
+ rte_get_master_lcore())) {
+ evt_err("producer lcores overlaps with master lcore");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->plcores)) {
+ evt_err("one or more producer lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->plcores)) {
+ evt_err("minimum one producer is required");
+ return -1;
+ }
+ }
+
+ if (evt_has_invalid_stage(opt))
+ return -1;
+
+ if (evt_has_invalid_sched_type(opt))
+ return -1;
+
+ if (nb_queues > EVT_MAX_QUEUES) {
+ evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
+ return -1;
+ }
+ if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) {
+ evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
+ return -1;
+ }
+
+ /* Fixups */
+ if ((opt->nb_stages == 1 &&
+ opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
+ opt->fwd_latency) {
+ evt_info("fwd_latency is valid when nb_stages > 1, disabling");
+ opt->fwd_latency = 0;
+ }
+
+ if (opt->fwd_latency && !opt->q_priority) {
+ evt_info("enabled queue priority for latency measurement");
+ opt->q_priority = 1;
+ }
+ if (opt->nb_pkts == 0)
+ opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores);
+
+ return 0;
+}
+
+void
+perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
+{
+ evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores));
+ evt_dump_producer_lcores(opt);
+ evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ evt_dump_worker_lcores(opt);
+ evt_dump_nb_stages(opt);
+ evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt));
+ evt_dump("nb_evdev_queues", "%d", nb_queues);
+ evt_dump_queue_priority(opt);
+ evt_dump_sched_type_list(opt);
+ evt_dump_producer_type(opt);
+}
+
+void
+perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ int i;
+ struct test_perf *t = evt_test_priv(test);
+
+ if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ for (i = 0; i < opt->nb_timer_adptrs; i++)
+ rte_event_timer_adapter_stop(t->timer_adptr[i]);
+ }
+ rte_event_dev_stop(opt->dev_id);
+ rte_event_dev_close(opt->dev_id);
+}
+
+static inline void
+perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused,
+ void *obj, unsigned i __rte_unused)
+{
+ memset(obj, 0, mp->elt_size);
+}
+
+#define NB_RX_DESC 128
+#define NB_TX_DESC 512
+int
+perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i;
+ int ret;
+ struct test_perf *t = evt_test_priv(test);
+ struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+ .split_hdr_size = 0,
+ },
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_hf = ETH_RSS_IP,
+ },
+ },
+ };
+
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
+ return 0;
+
+ if (!rte_eth_dev_count_avail()) {
+ evt_err("No ethernet ports found.");
+ return -ENODEV;
+ }
+
+ RTE_ETH_FOREACH_DEV(i) {
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_conf local_port_conf = port_conf;
+
+ ret = rte_eth_dev_info_get(i, &dev_info);
+ if (ret != 0) {
+ evt_err("Error during getting device (port %u) info: %s\n",
+ i, strerror(-ret));
+ return ret;
+ }
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ evt_info("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"\n",
+ i,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
+ if (rte_eth_dev_configure(i, 1, 1, &local_port_conf) < 0) {
+ evt_err("Failed to configure eth port [%d]", i);
+ return -EINVAL;
+ }
+
+ if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
+ rte_socket_id(), NULL, t->pool) < 0) {
+ evt_err("Failed to setup eth port [%d] rx_queue: %d.",
+ i, 0);
+ return -EINVAL;
+ }
+
+ if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
+ rte_socket_id(), NULL) < 0) {
+ evt_err("Failed to setup eth port [%d] tx_queue: %d.",
+ i, 0);
+ return -EINVAL;
+ }
+
+ ret = rte_eth_promiscuous_enable(i);
+ if (ret != 0) {
+ evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
+ i, rte_strerror(-ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i;
+ RTE_SET_USED(test);
+
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV(i) {
+ rte_event_eth_rx_adapter_stop(i);
+ rte_eth_dev_stop(i);
+ }
+ }
+}
+
+int
+perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
+{
+ struct test_perf *t = evt_test_priv(test);
+
+ if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ t->pool = rte_mempool_create(test->name, /* mempool name */
+ opt->pool_sz, /* number of elements*/
+ sizeof(struct perf_elt), /* element size*/
+ 512, /* cache size*/
+ 0, NULL, NULL,
+ perf_elt_init, /* obj constructor */
+ NULL, opt->socket_id, 0); /* flags */
+ } else {
+ t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
+ opt->pool_sz, /* number of elements*/
+ 512, /* cache size*/
+ 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ opt->socket_id); /* flags */
+
+ }
+
+ if (t->pool == NULL) {
+ evt_err("failed to create mempool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+perf_mempool_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_perf *t = evt_test_priv(test);
+
+ rte_mempool_free(t->pool);
+}
+
+int
+perf_test_setup(struct evt_test *test, struct evt_options *opt)
+{
+ void *test_perf;
+
+ test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf),
+ RTE_CACHE_LINE_SIZE, opt->socket_id);
+ if (test_perf == NULL) {
+ evt_err("failed to allocate test_perf memory");
+ goto nomem;
+ }
+ test->test_priv = test_perf;
+
+ struct test_perf *t = evt_test_priv(test);
+
+ if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ t->outstand_pkts = opt->nb_timers *
+ evt_nr_active_lcores(opt->plcores);
+ t->nb_pkts = opt->nb_timers;
+ } else {
+ t->outstand_pkts = opt->nb_pkts *
+ evt_nr_active_lcores(opt->plcores);
+ t->nb_pkts = opt->nb_pkts;
+ }
+
+ t->nb_workers = evt_nr_active_lcores(opt->wlcores);
+ t->done = false;
+ t->nb_flows = opt->nb_flows;
+ t->result = EVT_TEST_FAILED;
+ t->opt = opt;
+ memcpy(t->sched_type_list, opt->sched_type_list,
+ sizeof(opt->sched_type_list));
+ return 0;
+nomem:
+ return -ENOMEM;
+}
+
+void
+perf_test_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+
+ rte_free(test->test_priv);
+}
diff --git a/src/spdk/dpdk/app/test-eventdev/test_perf_common.h b/src/spdk/dpdk/app/test-eventdev/test_perf_common.h
new file mode 100644
index 000000000..ff9705df8
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_perf_common.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#ifndef _TEST_PERF_COMMON_
+#define _TEST_PERF_COMMON_
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_timer_adapter.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+
+#include "evt_common.h"
+#include "evt_options.h"
+#include "evt_test.h"
+
+struct test_perf;
+
+struct worker_data {
+ uint64_t processed_pkts;
+ uint64_t latency;
+ uint8_t dev_id;
+ uint8_t port_id;
+ struct test_perf *t;
+} __rte_cache_aligned;
+
+struct prod_data {
+ uint8_t dev_id;
+ uint8_t port_id;
+ uint8_t queue_id;
+ struct test_perf *t;
+} __rte_cache_aligned;
+
+
+struct test_perf {
+ /* Don't change the offset of "done". Signal handler use this memory
+ * to terminate all lcores work.
+ */
+ int done;
+ uint64_t outstand_pkts;
+ uint8_t nb_workers;
+ enum evt_test_result result;
+ uint32_t nb_flows;
+ uint64_t nb_pkts;
+ struct rte_mempool *pool;
+ struct prod_data prod[EVT_MAX_PORTS];
+ struct worker_data worker[EVT_MAX_PORTS];
+ struct evt_options *opt;
+ uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
+ struct rte_event_timer_adapter *timer_adptr[
+ RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned;
+} __rte_cache_aligned;
+
+struct perf_elt {
+ union {
+ struct rte_event_timer tim;
+ struct {
+ char pad[offsetof(struct rte_event_timer, user_meta)];
+ uint64_t timestamp;
+ };
+ };
+} __rte_cache_aligned;
+
+#define BURST_SIZE 16
+
+#define PERF_WORKER_INIT\
+ struct worker_data *w = arg;\
+ struct test_perf *t = w->t;\
+ struct evt_options *opt = t->opt;\
+ const uint8_t dev = w->dev_id;\
+ const uint8_t port = w->port_id;\
+ const uint8_t prod_timer_type = \
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\
+ uint8_t *const sched_type_list = &t->sched_type_list[0];\
+ struct rte_mempool *const pool = t->pool;\
+ const uint8_t nb_stages = t->opt->nb_stages;\
+ const uint8_t laststage = nb_stages - 1;\
+ uint8_t cnt = 0;\
+ void *bufs[16] __rte_cache_aligned;\
+ int const sz = RTE_DIM(bufs);\
+ if (opt->verbose_level > 1)\
+ printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\
+ rte_lcore_id(), dev, port)
+
+static __rte_always_inline int
+perf_process_last_stage(struct rte_mempool *const pool,
+ struct rte_event *const ev, struct worker_data *const w,
+ void *bufs[], int const buf_sz, uint8_t count)
+{
+ bufs[count++] = ev->event_ptr;
+ w->processed_pkts++;
+ rte_smp_wmb();
+
+ if (unlikely(count == buf_sz)) {
+ count = 0;
+ rte_mempool_put_bulk(pool, bufs, buf_sz);
+ }
+ return count;
+}
+
+static __rte_always_inline uint8_t
+perf_process_last_stage_latency(struct rte_mempool *const pool,
+ struct rte_event *const ev, struct worker_data *const w,
+ void *bufs[], int const buf_sz, uint8_t count)
+{
+ uint64_t latency;
+ struct perf_elt *const m = ev->event_ptr;
+
+ bufs[count++] = ev->event_ptr;
+ w->processed_pkts++;
+
+ if (unlikely(count == buf_sz)) {
+ count = 0;
+ latency = rte_get_timer_cycles() - m->timestamp;
+ rte_mempool_put_bulk(pool, bufs, buf_sz);
+ } else {
+ latency = rte_get_timer_cycles() - m->timestamp;
+ }
+
+ w->latency += latency;
+ rte_smp_wmb();
+ return count;
+}
+
+
+static inline int
+perf_nb_event_ports(struct evt_options *opt)
+{
+ return evt_nr_active_lcores(opt->wlcores) +
+ evt_nr_active_lcores(opt->plcores);
+}
+
+int perf_test_result(struct evt_test *test, struct evt_options *opt);
+int perf_opt_check(struct evt_options *opt, uint64_t nb_queues);
+int perf_test_setup(struct evt_test *test, struct evt_options *opt);
+int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt);
+int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
+int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t stride, uint8_t nb_queues,
+ const struct rte_event_port_conf *port_conf);
+int perf_event_dev_service_setup(uint8_t dev_id);
+int perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *));
+void perf_opt_dump(struct evt_options *opt, uint8_t nb_queues);
+void perf_test_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
+void perf_mempool_destroy(struct evt_test *test, struct evt_options *opt);
+
+#endif /* _TEST_PERF_COMMON_ */
diff --git a/src/spdk/dpdk/app/test-eventdev/test_perf_queue.c b/src/spdk/dpdk/app/test-eventdev/test_perf_queue.c
new file mode 100644
index 000000000..f4ea3a795
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_perf_queue.c
@@ -0,0 +1,323 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Cavium, Inc
+ */
+
+#include "test_perf_common.h"
+
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
+
+static inline int
+perf_queue_nb_event_queues(struct evt_options *opt)
+{
+ /* nb_queues = number of producers * number of stages */
+ uint8_t nb_prod = opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ?
+ rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
+ return nb_prod * opt->nb_stages;
+}
+
+static __rte_always_inline void
+mark_fwd_latency(struct rte_event *const ev,
+ const uint8_t nb_stages)
+{
+ if (unlikely((ev->queue_id % nb_stages) == 0)) {
+ struct perf_elt *const m = ev->event_ptr;
+
+ m->timestamp = rte_get_timer_cycles();
+ }
+}
+
+static __rte_always_inline void
+fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
+ const uint8_t nb_stages)
+{
+ ev->queue_id++;
+ ev->sched_type = sched_type_list[ev->queue_id % nb_stages];
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+}
+
+static int
+perf_queue_worker(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ struct rte_event ev;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+ if (enable_fwd_latency && !prod_timer_type)
+ /* first q in pipeline, mark timestamp to compute fwd latency */
+ mark_fwd_latency(&ev, nb_stages);
+
+ /* last stage in pipeline */
+ if (unlikely((ev.queue_id % nb_stages) == laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(pool,
+ &ev, w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool,
+ &ev, w, bufs, sz, cnt);
+ } else {
+ fwd_event(&ev, sched_type_list, nb_stages);
+ while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1)
+ rte_pause();
+ }
+ }
+ return 0;
+}
+
+static int
+perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
+{
+ PERF_WORKER_INIT;
+ uint16_t i;
+ /* +1 to avoid prefetch out of array check */
+ struct rte_event ev[BURST_SIZE + 1];
+
+ while (t->done == false) {
+ uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ if (enable_fwd_latency && !prod_timer_type) {
+ rte_prefetch0(ev[i+1].event_ptr);
+ /* first queue in pipeline.
+ * mark time stamp to compute fwd latency
+ */
+ mark_fwd_latency(&ev[i], nb_stages);
+ }
+ /* last stage in pipeline */
+ if (unlikely((ev[i].queue_id % nb_stages) ==
+ laststage)) {
+ if (enable_fwd_latency)
+ cnt = perf_process_last_stage_latency(
+ pool, &ev[i], w, bufs, sz, cnt);
+ else
+ cnt = perf_process_last_stage(pool,
+ &ev[i], w, bufs, sz, cnt);
+
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ } else {
+ fwd_event(&ev[i], sched_type_list, nb_stages);
+ }
+ }
+
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev, port,
+ ev + enq, nb_rx - enq);
+ }
+ }
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ struct evt_options *opt = w->t->opt;
+
+ const bool burst = evt_has_burst_mode(w->dev_id);
+ const int fwd_latency = opt->fwd_latency;
+
+ /* allow compiler to optimize */
+ if (!burst && !fwd_latency)
+ return perf_queue_worker(arg, 0);
+ else if (!burst && fwd_latency)
+ return perf_queue_worker(arg, 1);
+ else if (burst && !fwd_latency)
+ return perf_queue_worker_burst(arg, 0);
+ else if (burst && fwd_latency)
+ return perf_queue_worker_burst(arg, 1);
+
+ rte_panic("invalid worker\n");
+}
+
+static int
+perf_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return perf_launch_lcores(test, opt, worker_wrapper);
+}
+
+static int
+perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ uint8_t queue;
+ int nb_stages = opt->nb_stages;
+ int ret;
+ int nb_ports;
+ int nb_queues;
+ uint16_t prod;
+ struct rte_event_dev_info dev_info;
+ struct test_perf *t = evt_test_priv(test);
+
+ nb_ports = evt_nr_active_lcores(opt->wlcores);
+ nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
+ opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
+ evt_nr_active_lcores(opt->plcores);
+
+ nb_queues = perf_queue_nb_event_queues(opt);
+
+ memset(&dev_info, 0, sizeof(struct rte_event_dev_info));
+ ret = rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (ret) {
+ evt_err("failed to get eventdev info %d", opt->dev_id);
+ return ret;
+ }
+
+ ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ struct rte_event_queue_conf q_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ /* queue configurations */
+ for (queue = 0; queue < nb_queues; queue++) {
+ q_conf.schedule_type =
+ (opt->sched_type_list[queue % nb_stages]);
+
+ if (opt->q_priority) {
+ uint8_t stage_pos = queue % nb_stages;
+ /* Configure event queues(stage 0 to stage n) with
+ * RTE_EVENT_DEV_PRIORITY_LOWEST to
+ * RTE_EVENT_DEV_PRIORITY_HIGHEST.
+ */
+ uint8_t step = RTE_EVENT_DEV_PRIORITY_LOWEST /
+ (nb_stages - 1);
+ /* Higher prio for the queues closer to last stage */
+ q_conf.priority = RTE_EVENT_DEV_PRIORITY_LOWEST -
+ (step * stage_pos);
+ }
+ ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
+ if (ret) {
+ evt_err("failed to setup queue=%d", queue);
+ return ret;
+ }
+ }
+
+ if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+ opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+ /* port configuration */
+ const struct rte_event_port_conf p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = dev_info.max_event_port_dequeue_depth,
+ .new_event_threshold = dev_info.max_num_events,
+ };
+
+ ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
+ nb_queues, &p_conf);
+ if (ret)
+ return ret;
+
+ if (!evt_has_distributed_sched(opt->dev_id)) {
+ uint32_t service_id;
+ rte_event_dev_service_id_get(opt->dev_id, &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
+ }
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_eth_dev_start(prod);
+ if (ret) {
+ evt_err("Ethernet dev [%d] failed to start. Using synthetic producer",
+ prod);
+ return ret;
+ }
+
+ ret = rte_event_eth_rx_adapter_start(prod);
+ if (ret) {
+ evt_err("Rx adapter[%d] start failed", prod);
+ return ret;
+ }
+ printf("%s: Port[%d] using Rx adapter[%d] started\n",
+ __func__, prod, prod);
+ }
+ } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+ for (prod = 0; prod < opt->nb_timer_adptrs; prod++) {
+ ret = rte_event_timer_adapter_start(
+ t->timer_adptr[prod]);
+ if (ret) {
+ evt_err("failed to Start event timer adapter %d"
+ , prod);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void
+perf_queue_opt_dump(struct evt_options *opt)
+{
+ evt_dump_fwd_latency(opt);
+ perf_opt_dump(opt, perf_queue_nb_event_queues(opt));
+}
+
+static int
+perf_queue_opt_check(struct evt_options *opt)
+{
+ return perf_opt_check(opt, perf_queue_nb_event_queues(opt));
+}
+
+static bool
+perf_queue_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < perf_queue_nb_event_queues(opt) ||
+ dev_info.max_event_ports < perf_nb_event_ports(opt)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ perf_queue_nb_event_queues(opt),
+ dev_info.max_event_queues,
+ perf_nb_event_ports(opt), dev_info.max_event_ports);
+ }
+
+ return true;
+}
+
+static const struct evt_test_ops perf_queue = {
+ .cap_check = perf_queue_capability_check,
+ .opt_check = perf_queue_opt_check,
+ .opt_dump = perf_queue_opt_dump,
+ .test_setup = perf_test_setup,
+ .mempool_setup = perf_mempool_setup,
+ .ethdev_setup = perf_ethdev_setup,
+ .eventdev_setup = perf_queue_eventdev_setup,
+ .launch_lcores = perf_queue_launch_lcores,
+ .eventdev_destroy = perf_eventdev_destroy,
+ .mempool_destroy = perf_mempool_destroy,
+ .ethdev_destroy = perf_ethdev_destroy,
+ .test_result = perf_test_result,
+ .test_destroy = perf_test_destroy,
+};
+
+EVT_TEST_REGISTER(perf_queue);
diff --git a/src/spdk/dpdk/app/test-eventdev/test_pipeline_atq.c b/src/spdk/dpdk/app/test-eventdev/test_pipeline_atq.c
new file mode 100644
index 000000000..8e8686c14
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_pipeline_atq.c
@@ -0,0 +1,518 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Cavium, Inc.
+ */
+
+#include "test_pipeline_common.h"
+
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
+
+static __rte_always_inline int
+pipeline_atq_nb_event_queues(struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+
+ return rte_eth_dev_count_avail();
+}
+
+static __rte_noinline int
+pipeline_atq_worker_single_stage_tx(void *arg)
+{
+ PIPELINE_WORKER_SINGLE_STAGE_INIT;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ pipeline_event_tx(dev, port, &ev);
+ w->processed_pkts++;
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_atq_worker_single_stage_fwd(void *arg)
+{
+ PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ ev.queue_id = tx_queue[ev.mbuf->port];
+ pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ pipeline_event_enqueue(dev, port, &ev);
+ w->processed_pkts++;
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_atq_worker_single_stage_burst_tx(void *arg)
+{
+ PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
+ }
+
+ pipeline_event_tx_burst(dev, port, ev, nb_rx);
+ w->processed_pkts += nb_rx;
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_atq_worker_single_stage_burst_fwd(void *arg)
+{
+ PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
+ ev[i].queue_id = tx_queue[ev[i].mbuf->port];
+ pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
+ }
+
+ pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ w->processed_pkts += nb_rx;
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_atq_worker_multi_stage_tx(void *arg)
+{
+ PIPELINE_WORKER_MULTI_STAGE_INIT;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ cq_id = ev.sub_event_type % nb_stages;
+
+ if (cq_id == last_queue) {
+ pipeline_event_tx(dev, port, &ev);
+ w->processed_pkts++;
+ continue;
+ }
+
+ ev.sub_event_type++;
+ pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+ pipeline_event_enqueue(dev, port, &ev);
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_atq_worker_multi_stage_fwd(void *arg)
+{
+ PIPELINE_WORKER_MULTI_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ cq_id = ev.sub_event_type % nb_stages;
+
+ if (cq_id == last_queue) {
+ ev.queue_id = tx_queue[ev.mbuf->port];
+ pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ w->processed_pkts++;
+ } else {
+ ev.sub_event_type++;
+ pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+ }
+
+ pipeline_event_enqueue(dev, port, &ev);
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_atq_worker_multi_stage_burst_tx(void *arg)
+{
+ PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ cq_id = ev[i].sub_event_type % nb_stages;
+
+ if (cq_id == last_queue) {
+ pipeline_event_tx(dev, port, &ev[i]);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ w->processed_pkts++;
+ continue;
+ }
+
+ ev[i].sub_event_type++;
+ pipeline_fwd_event(&ev[i], sched_type_list[cq_id]);
+ }
+
+ pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_atq_worker_multi_stage_burst_fwd(void *arg)
+{
+ PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ cq_id = ev[i].sub_event_type % nb_stages;
+
+ if (cq_id == last_queue) {
+ w->processed_pkts++;
+ ev[i].queue_id = tx_queue[ev[i].mbuf->port];
+ pipeline_fwd_event(&ev[i],
+ RTE_SCHED_TYPE_ATOMIC);
+ } else {
+ ev[i].sub_event_type++;
+ pipeline_fwd_event(&ev[i],
+ sched_type_list[cq_id]);
+ }
+ }
+
+ pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ }
+
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ struct evt_options *opt = w->t->opt;
+ const bool burst = evt_has_burst_mode(w->dev_id);
+ const bool internal_port = w->t->internal_port;
+ const uint8_t nb_stages = opt->nb_stages;
+ RTE_SET_USED(opt);
+
+ if (nb_stages == 1) {
+ if (!burst && internal_port)
+ return pipeline_atq_worker_single_stage_tx(arg);
+ else if (!burst && !internal_port)
+ return pipeline_atq_worker_single_stage_fwd(arg);
+ else if (burst && internal_port)
+ return pipeline_atq_worker_single_stage_burst_tx(arg);
+ else if (burst && !internal_port)
+ return pipeline_atq_worker_single_stage_burst_fwd(arg);
+ } else {
+ if (!burst && internal_port)
+ return pipeline_atq_worker_multi_stage_tx(arg);
+ else if (!burst && !internal_port)
+ return pipeline_atq_worker_multi_stage_fwd(arg);
+ if (burst && internal_port)
+ return pipeline_atq_worker_multi_stage_burst_tx(arg);
+ else if (burst && !internal_port)
+ return pipeline_atq_worker_multi_stage_burst_fwd(arg);
+ }
+
+ rte_panic("invalid worker\n");
+}
+
+static int
+pipeline_atq_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return pipeline_launch_lcores(test, opt, worker_wrapper);
+}
+
+static int
+pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ int ret;
+ int nb_ports;
+ int nb_queues;
+ uint8_t queue;
+ uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
+ uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t nb_worker_queues = 0;
+ uint8_t tx_evport_id = 0;
+ uint16_t prod = 0;
+ struct rte_event_dev_info info;
+ struct test_pipeline *t = evt_test_priv(test);
+
+ nb_ports = evt_nr_active_lcores(opt->wlcores);
+ nb_queues = rte_eth_dev_count_avail();
+
+ memset(tx_evqueue_id, 0, sizeof(uint8_t) * RTE_MAX_ETHPORTS);
+ memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
+ /* One queue for Tx adapter per port */
+ if (!t->internal_port) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ tx_evqueue_id[prod] = nb_queues;
+ nb_queues++;
+ }
+ }
+
+ rte_event_dev_info_get(opt->dev_id, &info);
+
+ ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ struct rte_event_queue_conf q_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ /* queue configurations */
+ for (queue = 0; queue < nb_queues; queue++) {
+ q_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+ if (!t->internal_port) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ if (queue == tx_evqueue_id[prod]) {
+ q_conf.event_queue_cfg =
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+ } else {
+ queue_arr[nb_worker_queues] = queue;
+ nb_worker_queues++;
+ }
+ }
+ }
+
+ ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
+ if (ret) {
+ evt_err("failed to setup queue=%d", queue);
+ return ret;
+ }
+ }
+
+ if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
+ opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
+
+ /* port configuration */
+ const struct rte_event_port_conf p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = info.max_event_port_dequeue_depth,
+ .new_event_threshold = info.max_num_events,
+ };
+
+ if (!t->internal_port)
+ ret = pipeline_event_port_setup(test, opt, queue_arr,
+ nb_worker_queues, p_conf);
+ else
+ ret = pipeline_event_port_setup(test, opt, NULL, nb_queues,
+ p_conf);
+
+ if (ret)
+ return ret;
+
+ /*
+ * The pipelines are setup in the following manner:
+ *
+ * eth_dev_count = 2, nb_stages = 2, atq mode
+ *
+ * eth0, eth1 have Internal port capability :
+ * queues = 2
+ * stride = 1
+ *
+ * event queue pipelines:
+ * eth0 -> q0 ->Tx
+ * eth1 -> q1 ->Tx
+ *
+ * q0, q1 are configured as ATQ so, all the different stages can
+ * be enqueued on the same queue.
+ *
+ * eth0, eth1 use Tx adapters service core :
+ * queues = 4
+ * stride = 1
+ *
+ * event queue pipelines:
+ * eth0 -> q0 -> q2 -> Tx
+ * eth1 -> q1 -> q3 -> Tx
+ *
+ * q0, q1 are configured as stated above.
+ * q2, q3 configured as SINGLE_LINK.
+ */
+ ret = pipeline_event_rx_adapter_setup(opt, 1, p_conf);
+ if (ret)
+ return ret;
+ ret = pipeline_event_tx_adapter_setup(opt, p_conf);
+ if (ret)
+ return ret;
+
+ if (!evt_has_distributed_sched(opt->dev_id)) {
+ uint32_t service_id;
+ rte_event_dev_service_id_get(opt->dev_id, &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
+ }
+
+ /* Connect the tx_evqueue_id to the Tx adapter port */
+ if (!t->internal_port) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_event_eth_tx_adapter_event_port_get(prod,
+ &tx_evport_id);
+ if (ret) {
+ evt_err("Unable to get Tx adapter[%d]", prod);
+ return ret;
+ }
+
+ if (rte_event_port_link(opt->dev_id, tx_evport_id,
+ &tx_evqueue_id[prod],
+ NULL, 1) != 1) {
+ evt_err("Unable to link Tx adptr[%d] evprt[%d]",
+ prod, tx_evport_id);
+ return ret;
+ }
+ }
+ }
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_eth_dev_start(prod);
+ if (ret) {
+ evt_err("Ethernet dev [%d] failed to start."
+ " Using synthetic producer", prod);
+ return ret;
+ }
+ }
+
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_event_eth_rx_adapter_start(prod);
+ if (ret) {
+ evt_err("Rx adapter[%d] start failed", prod);
+ return ret;
+ }
+
+ ret = rte_event_eth_tx_adapter_start(prod);
+ if (ret) {
+ evt_err("Tx adapter[%d] start failed", prod);
+ return ret;
+ }
+ }
+
+ memcpy(t->tx_evqueue_id, tx_evqueue_id, sizeof(uint8_t) *
+ RTE_MAX_ETHPORTS);
+
+ return 0;
+}
+
+static void
+pipeline_atq_opt_dump(struct evt_options *opt)
+{
+ pipeline_opt_dump(opt, pipeline_atq_nb_event_queues(opt));
+}
+
+static int
+pipeline_atq_opt_check(struct evt_options *opt)
+{
+ return pipeline_opt_check(opt, pipeline_atq_nb_event_queues(opt));
+}
+
+static bool
+pipeline_atq_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < pipeline_atq_nb_event_queues(opt) ||
+ dev_info.max_event_ports <
+ evt_nr_active_lcores(opt->wlcores)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ pipeline_atq_nb_event_queues(opt),
+ dev_info.max_event_queues,
+ evt_nr_active_lcores(opt->wlcores),
+ dev_info.max_event_ports);
+ }
+
+ return true;
+}
+
+static const struct evt_test_ops pipeline_atq = {
+ .cap_check = pipeline_atq_capability_check,
+ .opt_check = pipeline_atq_opt_check,
+ .opt_dump = pipeline_atq_opt_dump,
+ .test_setup = pipeline_test_setup,
+ .mempool_setup = pipeline_mempool_setup,
+ .ethdev_setup = pipeline_ethdev_setup,
+ .eventdev_setup = pipeline_atq_eventdev_setup,
+ .launch_lcores = pipeline_atq_launch_lcores,
+ .eventdev_destroy = pipeline_eventdev_destroy,
+ .mempool_destroy = pipeline_mempool_destroy,
+ .ethdev_destroy = pipeline_ethdev_destroy,
+ .test_result = pipeline_test_result,
+ .test_destroy = pipeline_test_destroy,
+};
+
+EVT_TEST_REGISTER(pipeline_atq);
diff --git a/src/spdk/dpdk/app/test-eventdev/test_pipeline_common.c b/src/spdk/dpdk/app/test-eventdev/test_pipeline_common.c
new file mode 100644
index 000000000..17088b1b4
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_pipeline_common.c
@@ -0,0 +1,534 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Cavium, Inc.
+ */
+
+#include "test_pipeline_common.h"
+
+int
+pipeline_test_result(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ int i;
+ uint64_t total = 0;
+ struct test_pipeline *t = evt_test_priv(test);
+
+ evt_info("Packet distribution across worker cores :");
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].processed_pkts;
+ for (i = 0; i < t->nb_workers; i++)
+ evt_info("Worker %d packets: "CLGRN"%"PRIx64""CLNRM" percentage:"
+ CLGRN" %3.2f"CLNRM, i,
+ t->worker[i].processed_pkts,
+ (((double)t->worker[i].processed_pkts)/total)
+ * 100);
+ return t->result;
+}
+
+void
+pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues)
+{
+ evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ evt_dump_worker_lcores(opt);
+ evt_dump_nb_stages(opt);
+ evt_dump("nb_evdev_ports", "%d", pipeline_nb_event_ports(opt));
+ evt_dump("nb_evdev_queues", "%d", nb_queues);
+ evt_dump_queue_priority(opt);
+ evt_dump_sched_type_list(opt);
+ evt_dump_producer_type(opt);
+}
+
+static inline uint64_t
+processed_pkts(struct test_pipeline *t)
+{
+ uint8_t i;
+ uint64_t total = 0;
+
+ rte_smp_rmb();
+ for (i = 0; i < t->nb_workers; i++)
+ total += t->worker[i].processed_pkts;
+
+ return total;
+}
+
+int
+pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *))
+{
+ int ret, lcore_id;
+ struct test_pipeline *t = evt_test_priv(test);
+
+ int port_idx = 0;
+ /* launch workers */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (!(opt->wlcores[lcore_id]))
+ continue;
+
+ ret = rte_eal_remote_launch(worker,
+ &t->worker[port_idx], lcore_id);
+ if (ret) {
+ evt_err("failed to launch worker %d", lcore_id);
+ return ret;
+ }
+ port_idx++;
+ }
+
+ uint64_t perf_cycles = rte_get_timer_cycles();
+ const uint64_t perf_sample = rte_get_timer_hz();
+
+ static float total_mpps;
+ static uint64_t samples;
+
+ uint64_t prev_pkts = 0;
+
+ while (t->done == false) {
+ const uint64_t new_cycles = rte_get_timer_cycles();
+
+ if ((new_cycles - perf_cycles) > perf_sample) {
+ const uint64_t curr_pkts = processed_pkts(t);
+
+ float mpps = (float)(curr_pkts - prev_pkts)/1000000;
+
+ prev_pkts = curr_pkts;
+ perf_cycles = new_cycles;
+ total_mpps += mpps;
+ ++samples;
+ printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
+ mpps, total_mpps/samples);
+ fflush(stdout);
+ }
+ }
+ printf("\n");
+ return 0;
+}
+
+int
+pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
+{
+ unsigned int lcores;
+ /*
+ * N worker + 1 master
+ */
+ lcores = 2;
+
+ if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ evt_err("Invalid producer type '%s' valid producer '%s'",
+ evt_prod_id_to_name(opt->prod_type),
+ evt_prod_id_to_name(EVT_PROD_TYPE_ETH_RX_ADPTR));
+ return -1;
+ }
+
+ if (!rte_eth_dev_count_avail()) {
+ evt_err("test needs minimum 1 ethernet dev");
+ return -1;
+ }
+
+ if (rte_lcore_count() < lcores) {
+ evt_err("test need minimum %d lcores", lcores);
+ return -1;
+ }
+
+ /* Validate worker lcores */
+ if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) {
+ evt_err("worker lcores overlaps with master lcore");
+ return -1;
+ }
+ if (evt_has_disabled_lcore(opt->wlcores)) {
+ evt_err("one or more workers lcores are not enabled");
+ return -1;
+ }
+ if (!evt_has_active_lcore(opt->wlcores)) {
+ evt_err("minimum one worker is required");
+ return -1;
+ }
+
+ if (nb_queues > EVT_MAX_QUEUES) {
+ evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
+ return -1;
+ }
+ if (pipeline_nb_event_ports(opt) > EVT_MAX_PORTS) {
+ evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
+ return -1;
+ }
+
+ if (evt_has_invalid_stage(opt))
+ return -1;
+
+ if (evt_has_invalid_sched_type(opt))
+ return -1;
+
+ return 0;
+}
+
+#define NB_RX_DESC 128
+#define NB_TX_DESC 512
+int
+pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i;
+ int ret;
+ uint8_t nb_queues = 1;
+ struct test_pipeline *t = evt_test_priv(test);
+ struct rte_eth_rxconf rx_conf;
+ struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
+ },
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_hf = ETH_RSS_IP,
+ },
+ },
+ };
+
+ if (!rte_eth_dev_count_avail()) {
+ evt_err("No ethernet ports found.");
+ return -ENODEV;
+ }
+
+ if (opt->max_pkt_sz < RTE_ETHER_MIN_LEN) {
+ evt_err("max_pkt_sz can not be less than %d",
+ RTE_ETHER_MIN_LEN);
+ return -EINVAL;
+ }
+
+ port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz;
+ if (opt->max_pkt_sz > RTE_ETHER_MAX_LEN)
+ port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ t->internal_port = 1;
+ RTE_ETH_FOREACH_DEV(i) {
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_conf local_port_conf = port_conf;
+ uint32_t caps = 0;
+
+ ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps);
+ if (ret != 0) {
+ evt_err("failed to get event tx adapter[%d] caps", i);
+ return ret;
+ }
+
+ if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
+ t->internal_port = 0;
+
+ ret = rte_eth_dev_info_get(i, &dev_info);
+ if (ret != 0) {
+ evt_err("Error during getting device (port %u) info: %s\n",
+ i, strerror(-ret));
+ return ret;
+ }
+
+ rx_conf = dev_info.default_rxconf;
+ rx_conf.offloads = port_conf.rxmode.offloads;
+
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ evt_info("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"",
+ i,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
+ if (rte_eth_dev_configure(i, nb_queues, nb_queues,
+ &local_port_conf)
+ < 0) {
+ evt_err("Failed to configure eth port [%d]", i);
+ return -EINVAL;
+ }
+
+ if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC,
+ rte_socket_id(), &rx_conf, t->pool) < 0) {
+ evt_err("Failed to setup eth port [%d] rx_queue: %d.",
+ i, 0);
+ return -EINVAL;
+ }
+ if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
+ rte_socket_id(), NULL) < 0) {
+ evt_err("Failed to setup eth port [%d] tx_queue: %d.",
+ i, 0);
+ return -EINVAL;
+ }
+
+ ret = rte_eth_promiscuous_enable(i);
+ if (ret != 0) {
+ evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
+ i, rte_strerror(-ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int
+pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t *queue_arr, uint8_t nb_queues,
+ const struct rte_event_port_conf p_conf)
+{
+ int ret;
+ uint8_t port;
+ struct test_pipeline *t = evt_test_priv(test);
+
+
+ /* setup one port per worker, linking to all queues */
+ for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) {
+ struct worker_data *w = &t->worker[port];
+
+ w->dev_id = opt->dev_id;
+ w->port_id = port;
+ w->t = t;
+ w->processed_pkts = 0;
+
+ ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
+ if (ret) {
+ evt_err("failed to setup port %d", port);
+ return ret;
+ }
+
+ if (rte_event_port_link(opt->dev_id, port, queue_arr, NULL,
+ nb_queues) != nb_queues)
+ goto link_fail;
+ }
+
+ return 0;
+
+link_fail:
+ evt_err("failed to link queues to port %d", port);
+ return -EINVAL;
+}
+
+int
+pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
+ struct rte_event_port_conf prod_conf)
+{
+ int ret = 0;
+ uint16_t prod;
+ struct rte_event_eth_rx_adapter_queue_conf queue_conf;
+
+ memset(&queue_conf, 0,
+ sizeof(struct rte_event_eth_rx_adapter_queue_conf));
+ queue_conf.ev.sched_type = opt->sched_type_list[0];
+ RTE_ETH_FOREACH_DEV(prod) {
+ uint32_t cap;
+
+ ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
+ prod, &cap);
+ if (ret) {
+ evt_err("failed to get event rx adapter[%d]"
+ " capabilities",
+ opt->dev_id);
+ return ret;
+ }
+ queue_conf.ev.queue_id = prod * stride;
+ ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
+ &prod_conf);
+ if (ret) {
+ evt_err("failed to create rx adapter[%d]", prod);
+ return ret;
+ }
+ ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
+ &queue_conf);
+ if (ret) {
+ evt_err("failed to add rx queues to adapter[%d]", prod);
+ return ret;
+ }
+
+ if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
+ uint32_t service_id = -1U;
+
+ rte_event_eth_rx_adapter_service_id_get(prod,
+ &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("Failed to setup service core"
+ " for Rx adapter");
+ return ret;
+ }
+ }
+
+ evt_info("Port[%d] using Rx adapter[%d] configured", prod,
+ prod);
+ }
+
+ return ret;
+}
+
+int
+pipeline_event_tx_adapter_setup(struct evt_options *opt,
+ struct rte_event_port_conf port_conf)
+{
+ int ret = 0;
+ uint16_t consm;
+
+ RTE_ETH_FOREACH_DEV(consm) {
+ uint32_t cap;
+
+ ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id,
+ consm, &cap);
+ if (ret) {
+ evt_err("failed to get event tx adapter[%d] caps",
+ consm);
+ return ret;
+ }
+
+ ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id,
+ &port_conf);
+ if (ret) {
+ evt_err("failed to create tx adapter[%d]", consm);
+ return ret;
+ }
+
+ ret = rte_event_eth_tx_adapter_queue_add(consm, consm, -1);
+ if (ret) {
+ evt_err("failed to add tx queues to adapter[%d]",
+ consm);
+ return ret;
+ }
+
+ if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) {
+ uint32_t service_id = -1U;
+
+ ret = rte_event_eth_tx_adapter_service_id_get(consm,
+ &service_id);
+ if (ret != -ESRCH && ret != 0) {
+ evt_err("Failed to get Tx adptr service ID");
+ return ret;
+ }
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("Failed to setup service core"
+ " for Tx adapter");
+ return ret;
+ }
+ }
+
+ evt_info("Port[%d] using Tx adapter[%d] Configured", consm,
+ consm);
+ }
+
+ return ret;
+}
+
+void
+pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i;
+ RTE_SET_USED(test);
+ RTE_SET_USED(opt);
+
+ RTE_ETH_FOREACH_DEV(i) {
+ rte_event_eth_rx_adapter_stop(i);
+ rte_event_eth_tx_adapter_stop(i);
+ rte_eth_dev_stop(i);
+ }
+}
+
+void
+pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(test);
+
+ rte_event_dev_stop(opt->dev_id);
+ rte_event_dev_close(opt->dev_id);
+}
+
+int
+pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt)
+{
+ struct test_pipeline *t = evt_test_priv(test);
+ int i, ret;
+
+ if (!opt->mbuf_sz)
+ opt->mbuf_sz = RTE_MBUF_DEFAULT_BUF_SIZE;
+
+ if (!opt->max_pkt_sz)
+ opt->max_pkt_sz = RTE_ETHER_MAX_LEN;
+
+ RTE_ETH_FOREACH_DEV(i) {
+ struct rte_eth_dev_info dev_info;
+ uint16_t data_size = 0;
+
+ memset(&dev_info, 0, sizeof(dev_info));
+ ret = rte_eth_dev_info_get(i, &dev_info);
+ if (ret != 0) {
+ evt_err("Error during getting device (port %u) info: %s\n",
+ i, strerror(-ret));
+ return ret;
+ }
+
+ if (dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
+ dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
+ data_size = opt->max_pkt_sz /
+ dev_info.rx_desc_lim.nb_mtu_seg_max;
+ data_size += RTE_PKTMBUF_HEADROOM;
+
+ if (data_size > opt->mbuf_sz)
+ opt->mbuf_sz = data_size;
+ }
+ }
+
+ t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
+ opt->pool_sz, /* number of elements*/
+ 512, /* cache size*/
+ 0,
+ opt->mbuf_sz,
+ opt->socket_id); /* flags */
+
+ if (t->pool == NULL) {
+ evt_err("failed to create mempool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+ struct test_pipeline *t = evt_test_priv(test);
+
+ rte_mempool_free(t->pool);
+}
+
+int
+pipeline_test_setup(struct evt_test *test, struct evt_options *opt)
+{
+ void *test_pipeline;
+
+ test_pipeline = rte_zmalloc_socket(test->name,
+ sizeof(struct test_pipeline), RTE_CACHE_LINE_SIZE,
+ opt->socket_id);
+ if (test_pipeline == NULL) {
+ evt_err("failed to allocate test_pipeline memory");
+ goto nomem;
+ }
+ test->test_priv = test_pipeline;
+
+ struct test_pipeline *t = evt_test_priv(test);
+
+ t->nb_workers = evt_nr_active_lcores(opt->wlcores);
+ t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->wlcores);
+ t->done = false;
+ t->nb_flows = opt->nb_flows;
+ t->result = EVT_TEST_FAILED;
+ t->opt = opt;
+ opt->prod_type = EVT_PROD_TYPE_ETH_RX_ADPTR;
+ memcpy(t->sched_type_list, opt->sched_type_list,
+ sizeof(opt->sched_type_list));
+ return 0;
+nomem:
+ return -ENOMEM;
+}
+
+void
+pipeline_test_destroy(struct evt_test *test, struct evt_options *opt)
+{
+ RTE_SET_USED(opt);
+
+ rte_free(test->test_priv);
+}
diff --git a/src/spdk/dpdk/app/test-eventdev/test_pipeline_common.h b/src/spdk/dpdk/app/test-eventdev/test_pipeline_common.h
new file mode 100644
index 000000000..6e73c6ab2
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_pipeline_common.h
@@ -0,0 +1,173 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Cavium, Inc.
+ */
+
+#ifndef _TEST_PIPELINE_COMMON_
+#define _TEST_PIPELINE_COMMON_
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+#include <rte_prefetch.h>
+#include <rte_spinlock.h>
+#include <rte_service.h>
+#include <rte_service_component.h>
+
+#include "evt_common.h"
+#include "evt_options.h"
+#include "evt_test.h"
+
+struct test_pipeline;
+
+struct worker_data {
+ uint64_t processed_pkts;
+ uint8_t dev_id;
+ uint8_t port_id;
+ struct test_pipeline *t;
+} __rte_cache_aligned;
+
+struct test_pipeline {
+ /* Don't change the offset of "done". Signal handler use this memory
+ * to terminate all lcores work.
+ */
+ int done;
+ uint8_t nb_workers;
+ uint8_t internal_port;
+ uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
+ enum evt_test_result result;
+ uint32_t nb_flows;
+ uint64_t outstand_pkts;
+ struct rte_mempool *pool;
+ struct worker_data worker[EVT_MAX_PORTS];
+ struct evt_options *opt;
+ uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
+} __rte_cache_aligned;
+
+#define BURST_SIZE 16
+
+#define PIPELINE_WORKER_SINGLE_STAGE_INIT \
+ struct worker_data *w = arg; \
+ struct test_pipeline *t = w->t; \
+ const uint8_t dev = w->dev_id; \
+ const uint8_t port = w->port_id; \
+ struct rte_event ev __rte_cache_aligned
+
+#define PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT \
+ int i; \
+ struct worker_data *w = arg; \
+ struct test_pipeline *t = w->t; \
+ const uint8_t dev = w->dev_id; \
+ const uint8_t port = w->port_id; \
+ struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
+
+#define PIPELINE_WORKER_MULTI_STAGE_INIT \
+ struct worker_data *w = arg; \
+ struct test_pipeline *t = w->t; \
+ uint8_t cq_id; \
+ const uint8_t dev = w->dev_id; \
+ const uint8_t port = w->port_id; \
+ const uint8_t last_queue = t->opt->nb_stages - 1; \
+ uint8_t *const sched_type_list = &t->sched_type_list[0]; \
+ const uint8_t nb_stages = t->opt->nb_stages + 1; \
+ struct rte_event ev __rte_cache_aligned
+
+#define PIPELINE_WORKER_MULTI_STAGE_BURST_INIT \
+ int i; \
+ struct worker_data *w = arg; \
+ struct test_pipeline *t = w->t; \
+ uint8_t cq_id; \
+ const uint8_t dev = w->dev_id; \
+ const uint8_t port = w->port_id; \
+ const uint8_t last_queue = t->opt->nb_stages - 1; \
+ uint8_t *const sched_type_list = &t->sched_type_list[0]; \
+ const uint8_t nb_stages = t->opt->nb_stages + 1; \
+ struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned
+
+static __rte_always_inline void
+pipeline_fwd_event(struct rte_event *ev, uint8_t sched)
+{
+ ev->event_type = RTE_EVENT_TYPE_CPU;
+ ev->op = RTE_EVENT_OP_FORWARD;
+ ev->sched_type = sched;
+}
+
+static __rte_always_inline void
+pipeline_event_tx(const uint8_t dev, const uint8_t port,
+ struct rte_event * const ev)
+{
+ rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0);
+ while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0))
+ rte_pause();
+}
+
+static __rte_always_inline void
+pipeline_event_tx_burst(const uint8_t dev, const uint8_t port,
+ struct rte_event *ev, const uint16_t nb_rx)
+{
+ uint16_t enq;
+
+ enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, nb_rx, 0);
+ while (enq < nb_rx) {
+ enq += rte_event_eth_tx_adapter_enqueue(dev, port,
+ ev + enq, nb_rx - enq, 0);
+ }
+}
+
+static __rte_always_inline void
+pipeline_event_enqueue(const uint8_t dev, const uint8_t port,
+ struct rte_event *ev)
+{
+ while (rte_event_enqueue_burst(dev, port, ev, 1) != 1)
+ rte_pause();
+}
+
+static __rte_always_inline void
+pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port,
+ struct rte_event *ev, const uint16_t nb_rx)
+{
+ uint16_t enq;
+
+ enq = rte_event_enqueue_burst(dev, port, ev, nb_rx);
+ while (enq < nb_rx) {
+ enq += rte_event_enqueue_burst(dev, port,
+ ev + enq, nb_rx - enq);
+ }
+}
+
+static inline int
+pipeline_nb_event_ports(struct evt_options *opt)
+{
+ return evt_nr_active_lcores(opt->wlcores);
+}
+
+int pipeline_test_result(struct evt_test *test, struct evt_options *opt);
+int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues);
+int pipeline_test_setup(struct evt_test *test, struct evt_options *opt);
+int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt);
+int pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
+ struct rte_event_port_conf prod_conf);
+int pipeline_event_tx_adapter_setup(struct evt_options *opt,
+ struct rte_event_port_conf prod_conf);
+int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt);
+int pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
+ uint8_t *queue_arr, uint8_t nb_queues,
+ const struct rte_event_port_conf p_conf);
+int pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
+ int (*worker)(void *));
+void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues);
+void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt);
+void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
+void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt);
+void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt);
+
+#endif /* _TEST_PIPELINE_COMMON_ */
diff --git a/src/spdk/dpdk/app/test-eventdev/test_pipeline_queue.c b/src/spdk/dpdk/app/test-eventdev/test_pipeline_queue.c
new file mode 100644
index 000000000..7bebac34f
--- /dev/null
+++ b/src/spdk/dpdk/app/test-eventdev/test_pipeline_queue.c
@@ -0,0 +1,533 @@
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 Cavium, Inc.
+ */
+
+#include "test_pipeline_common.h"
+
+/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
+
+static __rte_always_inline int
+pipeline_queue_nb_event_queues(struct evt_options *opt)
+{
+ uint16_t eth_count = rte_eth_dev_count_avail();
+
+ return (eth_count * opt->nb_stages) + eth_count;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_single_stage_tx(void *arg)
+{
+ PIPELINE_WORKER_SINGLE_STAGE_INIT;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ pipeline_event_tx(dev, port, &ev);
+ w->processed_pkts++;
+ } else {
+ ev.queue_id++;
+ pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ pipeline_event_enqueue(dev, port, &ev);
+ }
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_single_stage_fwd(void *arg)
+{
+ PIPELINE_WORKER_SINGLE_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ ev.queue_id = tx_queue[ev.mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
+ pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ pipeline_event_enqueue(dev, port, &ev);
+ w->processed_pkts++;
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_single_stage_burst_tx(void *arg)
+{
+ PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ pipeline_event_tx(dev, port, &ev[i]);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ w->processed_pkts++;
+ } else {
+ ev[i].queue_id++;
+ pipeline_fwd_event(&ev[i],
+ RTE_SCHED_TYPE_ATOMIC);
+ }
+ }
+
+ pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_single_stage_burst_fwd(void *arg)
+{
+ PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ ev[i].queue_id = tx_queue[ev[i].mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
+ pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
+ }
+
+ pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ w->processed_pkts += nb_rx;
+ }
+
+ return 0;
+}
+
+
+static __rte_noinline int
+pipeline_queue_worker_multi_stage_tx(void *arg)
+{
+ PIPELINE_WORKER_MULTI_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ cq_id = ev.queue_id % nb_stages;
+
+ if (ev.queue_id == tx_queue[ev.mbuf->port]) {
+ pipeline_event_tx(dev, port, &ev);
+ w->processed_pkts++;
+ continue;
+ }
+
+ ev.queue_id++;
+ pipeline_fwd_event(&ev, cq_id != last_queue ?
+ sched_type_list[cq_id] :
+ RTE_SCHED_TYPE_ATOMIC);
+ pipeline_event_enqueue(dev, port, &ev);
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_multi_stage_fwd(void *arg)
+{
+ PIPELINE_WORKER_MULTI_STAGE_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
+
+ while (t->done == false) {
+ uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+ if (!event) {
+ rte_pause();
+ continue;
+ }
+
+ cq_id = ev.queue_id % nb_stages;
+
+ if (cq_id == last_queue) {
+ ev.queue_id = tx_queue[ev.mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev.mbuf, 0);
+ pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+ w->processed_pkts++;
+ } else {
+ ev.queue_id++;
+ pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+ }
+
+ pipeline_event_enqueue(dev, port, &ev);
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_multi_stage_burst_tx(void *arg)
+{
+ PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ cq_id = ev[i].queue_id % nb_stages;
+
+ if (ev[i].queue_id == tx_queue[ev[i].mbuf->port]) {
+ pipeline_event_tx(dev, port, &ev[i]);
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+ w->processed_pkts++;
+ continue;
+ }
+
+ ev[i].queue_id++;
+ pipeline_fwd_event(&ev[i], cq_id != last_queue ?
+ sched_type_list[cq_id] :
+ RTE_SCHED_TYPE_ATOMIC);
+ }
+
+ pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ }
+
+ return 0;
+}
+
+static __rte_noinline int
+pipeline_queue_worker_multi_stage_burst_fwd(void *arg)
+{
+ PIPELINE_WORKER_MULTI_STAGE_BURST_INIT;
+ const uint8_t *tx_queue = t->tx_evqueue_id;
+
+ while (t->done == false) {
+ uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+ BURST_SIZE, 0);
+
+ if (!nb_rx) {
+ rte_pause();
+ continue;
+ }
+
+ for (i = 0; i < nb_rx; i++) {
+ rte_prefetch0(ev[i + 1].mbuf);
+ cq_id = ev[i].queue_id % nb_stages;
+
+ if (cq_id == last_queue) {
+ ev[i].queue_id = tx_queue[ev[i].mbuf->port];
+ rte_event_eth_tx_adapter_txq_set(ev[i].mbuf, 0);
+ pipeline_fwd_event(&ev[i],
+ RTE_SCHED_TYPE_ATOMIC);
+ w->processed_pkts++;
+ } else {
+ ev[i].queue_id++;
+ pipeline_fwd_event(&ev[i],
+ sched_type_list[cq_id]);
+ }
+ }
+
+ pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+ }
+
+ return 0;
+}
+
+static int
+worker_wrapper(void *arg)
+{
+ struct worker_data *w = arg;
+ struct evt_options *opt = w->t->opt;
+ const bool burst = evt_has_burst_mode(w->dev_id);
+ const bool internal_port = w->t->internal_port;
+ const uint8_t nb_stages = opt->nb_stages;
+ RTE_SET_USED(opt);
+
+ if (nb_stages == 1) {
+ if (!burst && internal_port)
+ return pipeline_queue_worker_single_stage_tx(arg);
+ else if (!burst && !internal_port)
+ return pipeline_queue_worker_single_stage_fwd(arg);
+ else if (burst && internal_port)
+ return pipeline_queue_worker_single_stage_burst_tx(arg);
+ else if (burst && !internal_port)
+ return pipeline_queue_worker_single_stage_burst_fwd(
+ arg);
+ } else {
+ if (!burst && internal_port)
+ return pipeline_queue_worker_multi_stage_tx(arg);
+ else if (!burst && !internal_port)
+ return pipeline_queue_worker_multi_stage_fwd(arg);
+ else if (burst && internal_port)
+ return pipeline_queue_worker_multi_stage_burst_tx(arg);
+ else if (burst && !internal_port)
+ return pipeline_queue_worker_multi_stage_burst_fwd(arg);
+
+ }
+ rte_panic("invalid worker\n");
+}
+
+static int
+pipeline_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
+{
+ return pipeline_launch_lcores(test, opt, worker_wrapper);
+}
+
+static int
+pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
+{
+ int ret;
+ int nb_ports;
+ int nb_queues;
+ int nb_stages = opt->nb_stages;
+ uint8_t queue;
+ uint8_t tx_evport_id = 0;
+ uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS];
+ uint8_t queue_arr[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t nb_worker_queues = 0;
+ uint16_t prod = 0;
+ struct rte_event_dev_info info;
+ struct test_pipeline *t = evt_test_priv(test);
+
+ nb_ports = evt_nr_active_lcores(opt->wlcores);
+ nb_queues = rte_eth_dev_count_avail() * (nb_stages);
+
+ /* One queue for Tx adapter per port */
+ nb_queues += rte_eth_dev_count_avail();
+
+ memset(tx_evqueue_id, 0, sizeof(uint8_t) * RTE_MAX_ETHPORTS);
+ memset(queue_arr, 0, sizeof(uint8_t) * RTE_EVENT_MAX_QUEUES_PER_DEV);
+
+ rte_event_dev_info_get(opt->dev_id, &info);
+ ret = evt_configure_eventdev(opt, nb_queues, nb_ports);
+ if (ret) {
+ evt_err("failed to configure eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+ struct rte_event_queue_conf q_conf = {
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+ .nb_atomic_flows = opt->nb_flows,
+ .nb_atomic_order_sequences = opt->nb_flows,
+ };
+ /* queue configurations */
+ for (queue = 0; queue < nb_queues; queue++) {
+ uint8_t slot;
+
+ q_conf.event_queue_cfg = 0;
+ slot = queue % (nb_stages + 1);
+ if (slot == nb_stages) {
+ q_conf.schedule_type = RTE_SCHED_TYPE_ATOMIC;
+ if (!t->internal_port) {
+ q_conf.event_queue_cfg =
+ RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+ }
+ tx_evqueue_id[prod++] = queue;
+ } else {
+ q_conf.schedule_type = opt->sched_type_list[slot];
+ queue_arr[nb_worker_queues] = queue;
+ nb_worker_queues++;
+ }
+
+ ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf);
+ if (ret) {
+ evt_err("failed to setup queue=%d", queue);
+ return ret;
+ }
+ }
+
+ if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
+ opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
+
+ /* port configuration */
+ const struct rte_event_port_conf p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = info.max_event_port_dequeue_depth,
+ .new_event_threshold = info.max_num_events,
+ };
+
+ if (!t->internal_port) {
+ ret = pipeline_event_port_setup(test, opt, queue_arr,
+ nb_worker_queues, p_conf);
+ if (ret)
+ return ret;
+ } else
+ ret = pipeline_event_port_setup(test, opt, NULL, nb_queues,
+ p_conf);
+
+ if (ret)
+ return ret;
+ /*
+ * The pipelines are setup in the following manner:
+ *
+ * eth_dev_count = 2, nb_stages = 2.
+ *
+ * queues = 6
+ * stride = 3
+ *
+ * event queue pipelines:
+ * eth0 -> q0 -> q1 -> (q2->tx)
+ * eth1 -> q3 -> q4 -> (q5->tx)
+ *
+ * q2, q5 configured as ATOMIC | SINGLE_LINK
+ *
+ */
+ ret = pipeline_event_rx_adapter_setup(opt, nb_stages + 1, p_conf);
+ if (ret)
+ return ret;
+
+ ret = pipeline_event_tx_adapter_setup(opt, p_conf);
+ if (ret)
+ return ret;
+
+ if (!evt_has_distributed_sched(opt->dev_id)) {
+ uint32_t service_id;
+ rte_event_dev_service_id_get(opt->dev_id, &service_id);
+ ret = evt_service_setup(service_id);
+ if (ret) {
+ evt_err("No service lcore found to run event dev.");
+ return ret;
+ }
+ }
+
+ /* Connect the tx_evqueue_id to the Tx adapter port */
+ if (!t->internal_port) {
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_event_eth_tx_adapter_event_port_get(prod,
+ &tx_evport_id);
+ if (ret) {
+ evt_err("Unable to get Tx adptr[%d] evprt[%d]",
+ prod, tx_evport_id);
+ return ret;
+ }
+
+ if (rte_event_port_link(opt->dev_id, tx_evport_id,
+ &tx_evqueue_id[prod],
+ NULL, 1) != 1) {
+ evt_err("Unable to link Tx adptr[%d] evprt[%d]",
+ prod, tx_evport_id);
+ return ret;
+ }
+ }
+ }
+
+ ret = rte_event_dev_start(opt->dev_id);
+ if (ret) {
+ evt_err("failed to start eventdev %d", opt->dev_id);
+ return ret;
+ }
+
+
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_eth_dev_start(prod);
+ if (ret) {
+ evt_err("Ethernet dev [%d] failed to start."
+ " Using synthetic producer", prod);
+ return ret;
+ }
+
+ }
+
+ RTE_ETH_FOREACH_DEV(prod) {
+ ret = rte_event_eth_rx_adapter_start(prod);
+ if (ret) {
+ evt_err("Rx adapter[%d] start failed", prod);
+ return ret;
+ }
+
+ ret = rte_event_eth_tx_adapter_start(prod);
+ if (ret) {
+ evt_err("Tx adapter[%d] start failed", prod);
+ return ret;
+ }
+ }
+
+ memcpy(t->tx_evqueue_id, tx_evqueue_id, sizeof(uint8_t) *
+ RTE_MAX_ETHPORTS);
+
+ return 0;
+}
+
+static void
+pipeline_queue_opt_dump(struct evt_options *opt)
+{
+ pipeline_opt_dump(opt, pipeline_queue_nb_event_queues(opt));
+}
+
+static int
+pipeline_queue_opt_check(struct evt_options *opt)
+{
+ return pipeline_opt_check(opt, pipeline_queue_nb_event_queues(opt));
+}
+
+static bool
+pipeline_queue_capability_check(struct evt_options *opt)
+{
+ struct rte_event_dev_info dev_info;
+
+ rte_event_dev_info_get(opt->dev_id, &dev_info);
+ if (dev_info.max_event_queues < pipeline_queue_nb_event_queues(opt) ||
+ dev_info.max_event_ports <
+ evt_nr_active_lcores(opt->wlcores)) {
+ evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
+ pipeline_queue_nb_event_queues(opt),
+ dev_info.max_event_queues,
+ evt_nr_active_lcores(opt->wlcores),
+ dev_info.max_event_ports);
+ }
+
+ return true;
+}
+
+static const struct evt_test_ops pipeline_queue = {
+ .cap_check = pipeline_queue_capability_check,
+ .opt_check = pipeline_queue_opt_check,
+ .opt_dump = pipeline_queue_opt_dump,
+ .test_setup = pipeline_test_setup,
+ .mempool_setup = pipeline_mempool_setup,
+ .ethdev_setup = pipeline_ethdev_setup,
+ .eventdev_setup = pipeline_queue_eventdev_setup,
+ .launch_lcores = pipeline_queue_launch_lcores,
+ .eventdev_destroy = pipeline_eventdev_destroy,
+ .mempool_destroy = pipeline_mempool_destroy,
+ .ethdev_destroy = pipeline_ethdev_destroy,
+ .test_result = pipeline_test_result,
+ .test_destroy = pipeline_test_destroy,
+};
+
+EVT_TEST_REGISTER(pipeline_queue);