summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/examples/l2fwd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/spdk/dpdk/examples/l2fwd
parentInitial commit. (diff)
downloadceph-upstream/16.2.11+ds.tar.xz
ceph-upstream/16.2.11+ds.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--src/spdk/dpdk/examples/l2fwd-cat/Makefile75
-rw-r--r--src/spdk/dpdk/examples/l2fwd-cat/cat.c1032
-rw-r--r--src/spdk/dpdk/examples/l2fwd-cat/cat.h43
-rw-r--r--src/spdk/dpdk/examples/l2fwd-cat/l2fwd-cat.c205
-rw-r--r--src/spdk/dpdk/examples/l2fwd-cat/meson.build16
-rw-r--r--src/spdk/dpdk/examples/l2fwd-crypto/Makefile63
-rw-r--r--src/spdk/dpdk/examples/l2fwd-crypto/main.c2810
-rw-r--r--src/spdk/dpdk/examples/l2fwd-crypto/meson.build13
-rw-r--r--src/spdk/dpdk/examples/l2fwd-event/Makefile63
-rw-r--r--src/spdk/dpdk/examples/l2fwd-event/l2fwd_common.c116
-rw-r--r--src/spdk/dpdk/examples/l2fwd-event/l2fwd_common.h133
-rw-r--r--src/spdk/dpdk/examples/l2fwd-event/l2fwd_event.c394
-rw-r--r--src/spdk/dpdk/examples/l2fwd-event/l2fwd_event.h73
-rw-r--r--src/spdk/dpdk/examples/l2fwd-event/l2fwd_event_generic.c320
-rw-r--r--src/spdk/dpdk/examples/l2fwd-event/l2fwd_event_internal_port.c296
-rw-r--r--src/spdk/dpdk/examples/l2fwd-event/l2fwd_poll.c182
-rw-r--r--src/spdk/dpdk/examples/l2fwd-event/l2fwd_poll.h25
-rw-r--r--src/spdk/dpdk/examples/l2fwd-event/main.c720
-rw-r--r--src/spdk/dpdk/examples/l2fwd-event/meson.build19
-rw-r--r--src/spdk/dpdk/examples/l2fwd-jobstats/Makefile58
-rw-r--r--src/spdk/dpdk/examples/l2fwd-jobstats/main.c1029
-rw-r--r--src/spdk/dpdk/examples/l2fwd-jobstats/meson.build13
-rw-r--r--src/spdk/dpdk/examples/l2fwd-keepalive/Makefile60
-rw-r--r--src/spdk/dpdk/examples/l2fwd-keepalive/ka-agent/Makefile22
-rw-r--r--src/spdk/dpdk/examples/l2fwd-keepalive/ka-agent/main.c120
-rw-r--r--src/spdk/dpdk/examples/l2fwd-keepalive/main.c825
-rw-r--r--src/spdk/dpdk/examples/l2fwd-keepalive/meson.build14
-rw-r--r--src/spdk/dpdk/examples/l2fwd-keepalive/shm.c112
-rw-r--r--src/spdk/dpdk/examples/l2fwd-keepalive/shm.h69
-rw-r--r--src/spdk/dpdk/examples/l2fwd/Makefile60
-rw-r--r--src/spdk/dpdk/examples/l2fwd/main.c776
-rw-r--r--src/spdk/dpdk/examples/l2fwd/meson.build13
32 files changed, 9769 insertions, 0 deletions
diff --git a/src/spdk/dpdk/examples/l2fwd-cat/Makefile b/src/spdk/dpdk/examples/l2fwd-cat/Makefile
new file mode 100644
index 000000000..ca1202be1
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-cat/Makefile
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+# binary name
+APP = l2fwd-cat
+
+# all source are stored in SRCS-y
+SRCS-y := l2fwd-cat.c cat.c
+
+# Build using pkg-config variables if possible
+ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
+
+all: shared
+.PHONY: shared static
+shared: build/$(APP)-shared
+ ln -sf $(APP)-shared build/$(APP)
+static: build/$(APP)-static
+ ln -sf $(APP)-static build/$(APP)
+
+PKGCONF ?= pkg-config
+
+PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null)
+CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk)
+LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
+LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk)
+
+LDFLAGS += -lpqos
+
+build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
+
+build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
+
+build:
+ @mkdir -p $@
+
+.PHONY: clean
+clean:
+ rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
+ test -d build && rmdir -p build || true
+
+else # Build using legacy build system
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+ifeq ($(PQOS_INSTALL_PATH),)
+$(error "Please define PQOS_INSTALL_PATH environment variable")
+endif
+
+# Default target, detect a build directory, by looking for a path with a .config
+RTE_TARGET ?= $(notdir $(abspath $(dir $(firstword $(wildcard $(RTE_SDK)/*/.config)))))
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+EXTRA_CFLAGS += -O3 -g -Wfatal-errors
+
+CFLAGS += -I$(PQOS_INSTALL_PATH)/../include
+
+LDLIBS += -L$(PQOS_INSTALL_PATH)
+LDLIBS += -lpqos
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+endif
diff --git a/src/spdk/dpdk/examples/l2fwd-cat/cat.c b/src/spdk/dpdk/examples/l2fwd-cat/cat.c
new file mode 100644
index 000000000..502c6b327
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-cat/cat.c
@@ -0,0 +1,1032 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#include <getopt.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+
+#include <pqos.h>
+
+#include "cat.h"
+
+#define BITS_PER_HEX 4
+#define PQOS_MAX_SOCKETS 8
+#define PQOS_MAX_SOCKET_CORES 64
+#define PQOS_MAX_CORES (PQOS_MAX_SOCKET_CORES * PQOS_MAX_SOCKETS)
+
+static const struct pqos_cap *m_cap;
+static const struct pqos_cpuinfo *m_cpu;
+static const struct pqos_capability *m_cap_l3ca;
+#if PQOS_VERSION <= 103
+static unsigned m_sockets[PQOS_MAX_SOCKETS];
+#else
+static unsigned int *m_sockets;
+#endif
+static unsigned m_sock_count;
+static struct cat_config m_config[PQOS_MAX_CORES];
+static unsigned m_config_count;
+
+static unsigned
+bits_count(uint64_t bitmask)
+{
+ unsigned count = 0;
+
+ for (; bitmask != 0; count++)
+ bitmask &= bitmask - 1;
+
+ return count;
+}
+
+/*
+ * Parse elem, the elem could be single number/range or '(' ')' group
+ * 1) A single number elem, it's just a simple digit. e.g. 9
+ * 2) A single range elem, two digits with a '-' between. e.g. 2-6
+ * 3) A group elem, combines multiple 1) or 2) with '( )'. e.g (0,2-4,6)
+ * Within group elem, '-' used for a range separator;
+ * ',' used for a single number.
+ */
+static int
+parse_set(const char *input, rte_cpuset_t *cpusetp)
+{
+ unsigned idx;
+ const char *str = input;
+ char *end = NULL;
+ unsigned min, max;
+ const unsigned num = PQOS_MAX_CORES;
+
+ CPU_ZERO(cpusetp);
+
+ while (isblank(*str))
+ str++;
+
+ /* only digit or left bracket is qualify for start point */
+ if ((!isdigit(*str) && *str != '(') || *str == '\0')
+ return -1;
+
+ /* process single number or single range of number */
+ if (*str != '(') {
+ errno = 0;
+ idx = strtoul(str, &end, 10);
+
+ if (errno || end == NULL || idx >= num)
+ return -1;
+
+ while (isblank(*end))
+ end++;
+
+ min = idx;
+ max = idx;
+ if (*end == '-') {
+ /* process single <number>-<number> */
+ end++;
+ while (isblank(*end))
+ end++;
+ if (!isdigit(*end))
+ return -1;
+
+ errno = 0;
+ idx = strtoul(end, &end, 10);
+ if (errno || end == NULL || idx >= num)
+ return -1;
+ max = idx;
+ while (isblank(*end))
+ end++;
+ if (*end != ',' && *end != '\0')
+ return -1;
+ }
+
+ if (*end != ',' && *end != '\0' && *end != '@')
+ return -1;
+
+ for (idx = RTE_MIN(min, max); idx <= RTE_MAX(min, max);
+ idx++)
+ CPU_SET(idx, cpusetp);
+
+ return end - input;
+ }
+
+ /* process set within bracket */
+ str++;
+ while (isblank(*str))
+ str++;
+ if (*str == '\0')
+ return -1;
+
+ min = PQOS_MAX_CORES;
+ do {
+
+ /* go ahead to the first digit */
+ while (isblank(*str))
+ str++;
+ if (!isdigit(*str))
+ return -1;
+
+ /* get the digit value */
+ errno = 0;
+ idx = strtoul(str, &end, 10);
+ if (errno || end == NULL || idx >= num)
+ return -1;
+
+ /* go ahead to separator '-',',' and ')' */
+ while (isblank(*end))
+ end++;
+ if (*end == '-') {
+ if (min == PQOS_MAX_CORES)
+ min = idx;
+ else /* avoid continuous '-' */
+ return -1;
+ } else if ((*end == ',') || (*end == ')')) {
+ max = idx;
+ if (min == PQOS_MAX_CORES)
+ min = idx;
+ for (idx = RTE_MIN(min, max); idx <= RTE_MAX(min, max);
+ idx++)
+ CPU_SET(idx, cpusetp);
+
+ min = PQOS_MAX_CORES;
+ } else
+ return -1;
+
+ str = end + 1;
+ } while (*end != '\0' && *end != ')');
+
+ return str - input;
+}
+
+/* Test if bitmask is contiguous */
+static int
+is_contiguous(uint64_t bitmask)
+{
+ /* check if bitmask is contiguous */
+ unsigned i = 0;
+ unsigned j = 0;
+ const unsigned max_idx = (sizeof(bitmask) * CHAR_BIT);
+
+ if (bitmask == 0)
+ return 0;
+
+ for (i = 0; i < max_idx; i++) {
+ if (((1ULL << i) & bitmask) != 0)
+ j++;
+ else if (j > 0)
+ break;
+ }
+
+ if (bits_count(bitmask) != j) {
+ printf("PQOS: mask 0x%llx is not contiguous.\n",
+ (unsigned long long)bitmask);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * The format pattern: --l3ca='<cbm@cpus>[,<(ccbm,dcbm)@cpus>...]'
+ * cbm could be a single mask or for a CDP enabled system, a group of two masks
+ * ("code cbm" and "data cbm")
+ * '(' and ')' are necessary if it's a group.
+ * cpus could be a single digit/range or a group.
+ * '(' and ')' are necessary if it's a group.
+ *
+ * e.g. '0x00F00@(1,3), 0x0FF00@(4-6), 0xF0000@7'
+ * - CPUs 1 and 3 share its 4 ways with CPUs 4, 5 and 6;
+ * - CPUs 4,5 and 6 share half (4 out of 8 ways) of its L3 with 1 and 3;
+ * - CPUs 4,5 and 6 have exclusive access to 4 out of 8 ways;
+ * - CPU 7 has exclusive access to all of its 4 ways;
+ *
+ * e.g. '(0x00C00,0x00300)@(1,3)' for a CDP enabled system
+ * - cpus 1 and 3 have access to 2 ways for code and 2 ways for data,
+ * code and data ways are not overlapping.;
+ */
+static int
+parse_l3ca(const char *l3ca)
+{
+ unsigned idx = 0;
+ const char *cbm_start = NULL;
+ char *cbm_end = NULL;
+ const char *end = NULL;
+ int offset;
+ rte_cpuset_t cpuset;
+ uint64_t mask = 0;
+ uint64_t cmask = 0;
+
+ if (l3ca == NULL)
+ goto err;
+
+ /* Get cbm */
+ do {
+ CPU_ZERO(&cpuset);
+ mask = 0;
+ cmask = 0;
+
+ while (isblank(*l3ca))
+ l3ca++;
+
+ if (*l3ca == '\0')
+ goto err;
+
+ /* record mask_set start point */
+ cbm_start = l3ca;
+
+ /* go across a complete bracket */
+ if (*cbm_start == '(') {
+ l3ca += strcspn(l3ca, ")");
+ if (*l3ca++ == '\0')
+ goto err;
+ }
+
+ /* scan the separator '@', ','(next) or '\0'(finish) */
+ l3ca += strcspn(l3ca, "@,");
+
+ if (*l3ca != '@')
+ goto err;
+
+ /* explicit assign cpu_set */
+ offset = parse_set(l3ca + 1, &cpuset);
+ if (offset < 0 || CPU_COUNT(&cpuset) == 0)
+ goto err;
+
+ end = l3ca + 1 + offset;
+
+ if (*end != ',' && *end != '\0')
+ goto err;
+
+ /* parse mask_set from start point */
+ if (*cbm_start == '(') {
+ cbm_start++;
+
+ while (isblank(*cbm_start))
+ cbm_start++;
+
+ if (!isxdigit(*cbm_start))
+ goto err;
+
+ errno = 0;
+ cmask = strtoul(cbm_start, &cbm_end, 16);
+ if (errno != 0 || cbm_end == NULL || cmask == 0)
+ goto err;
+
+ while (isblank(*cbm_end))
+ cbm_end++;
+
+ if (*cbm_end != ',')
+ goto err;
+
+ cbm_end++;
+
+ while (isblank(*cbm_end))
+ cbm_end++;
+
+ if (!isxdigit(*cbm_end))
+ goto err;
+
+ errno = 0;
+ mask = strtoul(cbm_end, &cbm_end, 16);
+ if (errno != 0 || cbm_end == NULL || mask == 0)
+ goto err;
+ } else {
+ while (isblank(*cbm_start))
+ cbm_start++;
+
+ if (!isxdigit(*cbm_start))
+ goto err;
+
+ errno = 0;
+ mask = strtoul(cbm_start, &cbm_end, 16);
+ if (errno != 0 || cbm_end == NULL || mask == 0)
+ goto err;
+
+ }
+
+ if (mask == 0 || is_contiguous(mask) == 0)
+ goto err;
+
+ if (cmask != 0 && is_contiguous(cmask) == 0)
+ goto err;
+
+ rte_memcpy(&m_config[idx].cpumask,
+ &cpuset, sizeof(rte_cpuset_t));
+
+ if (cmask != 0) {
+ m_config[idx].cdp = 1;
+ m_config[idx].code_mask = cmask;
+ m_config[idx].data_mask = mask;
+ } else
+ m_config[idx].mask = mask;
+
+ m_config_count++;
+
+ l3ca = end + 1;
+ idx++;
+ } while (*end != '\0' && idx < PQOS_MAX_CORES);
+
+ return 0;
+
+err:
+ return -EINVAL;
+}
+
+static int
+check_cpus_overlapping(void)
+{
+ unsigned i = 0;
+ unsigned j = 0;
+ rte_cpuset_t mask;
+
+ CPU_ZERO(&mask);
+
+ for (i = 0; i < m_config_count; i++) {
+ for (j = i + 1; j < m_config_count; j++) {
+ RTE_CPU_AND(&mask,
+ &m_config[i].cpumask,
+ &m_config[j].cpumask);
+
+ if (CPU_COUNT(&mask) != 0) {
+ printf("PQOS: Requested CPUs sets are "
+ "overlapping.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+check_cpus(void)
+{
+ unsigned i = 0;
+ unsigned cpu_id = 0;
+ unsigned cos_id = 0;
+ int ret = 0;
+
+ for (i = 0; i < m_config_count; i++) {
+ for (cpu_id = 0; cpu_id < PQOS_MAX_CORES; cpu_id++) {
+ if (CPU_ISSET(cpu_id, &m_config[i].cpumask) != 0) {
+
+ ret = pqos_cpu_check_core(m_cpu, cpu_id);
+ if (ret != PQOS_RETVAL_OK) {
+ printf("PQOS: %u is not a valid "
+ "logical core id.\n", cpu_id);
+ ret = -ENODEV;
+ goto exit;
+ }
+
+#if PQOS_VERSION <= 103
+ ret = pqos_l3ca_assoc_get(cpu_id, &cos_id);
+#else
+ ret = pqos_alloc_assoc_get(cpu_id, &cos_id);
+#endif
+ if (ret != PQOS_RETVAL_OK) {
+ printf("PQOS: Failed to read COS "
+ "associated to cpu %u.\n",
+ cpu_id);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ /*
+ * Check if COS assigned to lcore is different
+ * then default one (#0)
+ */
+ if (cos_id != 0) {
+ printf("PQOS: cpu %u has already "
+ "associated COS#%u. "
+ "Please reset L3CA.\n",
+ cpu_id, cos_id);
+ ret = -EBUSY;
+ goto exit;
+ }
+ }
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static int
+check_cdp(void)
+{
+ unsigned i = 0;
+
+ for (i = 0; i < m_config_count; i++) {
+ if (m_config[i].cdp == 1 && m_cap_l3ca->u.l3ca->cdp_on == 0) {
+ if (m_cap_l3ca->u.l3ca->cdp == 0) {
+ printf("PQOS: CDP requested but not "
+ "supported.\n");
+ } else {
+ printf("PQOS: CDP requested but not enabled. "
+ "Please enable CDP.\n");
+ }
+ return -ENOTSUP;
+ }
+ }
+
+ return 0;
+}
+
+static int
+check_cbm_len_and_contention(void)
+{
+ unsigned i = 0;
+ uint64_t mask = 0;
+ const uint64_t not_cbm = (UINT64_MAX << (m_cap_l3ca->u.l3ca->num_ways));
+ const uint64_t cbm_contention_mask = m_cap_l3ca->u.l3ca->way_contention;
+ int ret = 0;
+
+ for (i = 0; i < m_config_count; i++) {
+ if (m_config[i].cdp == 1)
+ mask = m_config[i].code_mask | m_config[i].data_mask;
+ else
+ mask = m_config[i].mask;
+
+ if ((mask & not_cbm) != 0) {
+ printf("PQOS: One or more of requested CBM masks not "
+ "supported by system (too long).\n");
+ ret = -ENOTSUP;
+ break;
+ }
+
+ /* Just a warning */
+ if ((mask & cbm_contention_mask) != 0) {
+ printf("PQOS: One or more of requested CBM masks "
+ "overlap CBM contention mask.\n");
+ break;
+ }
+
+ }
+
+ return ret;
+}
+
+static int
+check_and_select_classes(unsigned cos_id_map[][PQOS_MAX_SOCKETS])
+{
+ unsigned i = 0;
+ unsigned j = 0;
+ unsigned phy_pkg_id = 0;
+ unsigned cos_id = 0;
+ unsigned cpu_id = 0;
+ unsigned phy_pkg_lcores[PQOS_MAX_SOCKETS][m_config_count];
+ const unsigned cos_num = m_cap_l3ca->u.l3ca->num_classes;
+ unsigned used_cos_table[PQOS_MAX_SOCKETS][cos_num];
+ int ret = 0;
+
+ memset(phy_pkg_lcores, 0, sizeof(phy_pkg_lcores));
+ memset(used_cos_table, 0, sizeof(used_cos_table));
+
+ /* detect currently used COS */
+ for (j = 0; j < m_cpu->num_cores; j++) {
+ cpu_id = m_cpu->cores[j].lcore;
+
+#if PQOS_VERSION <= 103
+ ret = pqos_l3ca_assoc_get(cpu_id, &cos_id);
+#else
+ ret = pqos_alloc_assoc_get(cpu_id, &cos_id);
+#endif
+ if (ret != PQOS_RETVAL_OK) {
+ printf("PQOS: Failed to read COS associated to "
+ "cpu %u on phy_pkg %u.\n", cpu_id, phy_pkg_id);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ret = pqos_cpu_get_socketid(m_cpu, cpu_id, &phy_pkg_id);
+ if (ret != PQOS_RETVAL_OK) {
+ printf("PQOS: Failed to get socket for cpu %u\n",
+ cpu_id);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ /* Mark COS as used */
+ if (used_cos_table[phy_pkg_id][cos_id] == 0)
+ used_cos_table[phy_pkg_id][cos_id]++;
+ }
+
+ /* look for avail. COS to fulfill requested config */
+ for (i = 0; i < m_config_count; i++) {
+ for (j = 0; j < m_cpu->num_cores; j++) {
+ cpu_id = m_cpu->cores[j].lcore;
+ if (CPU_ISSET(cpu_id, &m_config[i].cpumask) == 0)
+ continue;
+
+ ret = pqos_cpu_get_socketid(m_cpu, cpu_id, &phy_pkg_id);
+ if (ret != PQOS_RETVAL_OK) {
+ printf("PQOS: Failed to get socket for "
+ "cpu %u\n", cpu_id);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ /*
+ * Check if we already have COS selected
+ * to be used for that group on that socket
+ */
+ if (phy_pkg_lcores[phy_pkg_id][i] != 0)
+ continue;
+
+ phy_pkg_lcores[phy_pkg_id][i]++;
+
+ /* Search for avail. COS to be used on that socket */
+ for (cos_id = 0; cos_id < cos_num; cos_id++) {
+ if (used_cos_table[phy_pkg_id][cos_id] == 0) {
+ used_cos_table[phy_pkg_id][cos_id]++;
+ cos_id_map[i][phy_pkg_id] = cos_id;
+ break;
+ }
+ }
+
+ /* If there is no COS available ...*/
+ if (cos_id == cos_num) {
+ ret = -E2BIG;
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ if (ret != 0)
+ printf("PQOS: Not enough available COS to configure "
+ "requested configuration.\n");
+
+ return ret;
+}
+
+static int
+configure_cat(unsigned cos_id_map[][PQOS_MAX_SOCKETS])
+{
+ unsigned phy_pkg_id = 0;
+ unsigned cpu_id = 0;
+ unsigned cos_id = 0;
+ unsigned i = 0;
+ unsigned j = 0;
+ struct pqos_l3ca l3ca = {0};
+ int ret = 0;
+
+ for (i = 0; i < m_config_count; i++) {
+ memset(&l3ca, 0, sizeof(l3ca));
+
+ l3ca.cdp = m_config[i].cdp;
+ if (m_config[i].cdp == 1) {
+#if PQOS_VERSION <= 103
+ l3ca.code_mask = m_config[i].code_mask;
+ l3ca.data_mask = m_config[i].data_mask;
+#else
+ l3ca.u.s.code_mask = m_config[i].code_mask;
+ l3ca.u.s.data_mask = m_config[i].data_mask;
+#endif
+ } else
+#if PQOS_VERSION <= 103
+ l3ca.ways_mask = m_config[i].mask;
+#else
+ l3ca.u.ways_mask = m_config[i].mask;
+#endif
+
+ for (j = 0; j < m_sock_count; j++) {
+ phy_pkg_id = m_sockets[j];
+ if (cos_id_map[i][phy_pkg_id] == 0)
+ continue;
+
+ l3ca.class_id = cos_id_map[i][phy_pkg_id];
+
+ ret = pqos_l3ca_set(phy_pkg_id, 1, &l3ca);
+ if (ret != PQOS_RETVAL_OK) {
+ printf("PQOS: Failed to set COS %u on "
+ "phy_pkg %u.\n", l3ca.class_id,
+ phy_pkg_id);
+ ret = -EFAULT;
+ goto exit;
+ }
+ }
+ }
+
+ for (i = 0; i < m_config_count; i++) {
+ for (j = 0; j < m_cpu->num_cores; j++) {
+ cpu_id = m_cpu->cores[j].lcore;
+ if (CPU_ISSET(cpu_id, &m_config[i].cpumask) == 0)
+ continue;
+
+ ret = pqos_cpu_get_socketid(m_cpu, cpu_id, &phy_pkg_id);
+ if (ret != PQOS_RETVAL_OK) {
+ printf("PQOS: Failed to get socket for "
+ "cpu %u\n", cpu_id);
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ cos_id = cos_id_map[i][phy_pkg_id];
+
+#if PQOS_VERSION <= 103
+ ret = pqos_l3ca_assoc_set(cpu_id, cos_id);
+#else
+ ret = pqos_alloc_assoc_set(cpu_id, cos_id);
+#endif
+ if (ret != PQOS_RETVAL_OK) {
+ printf("PQOS: Failed to associate COS %u to "
+ "cpu %u\n", cos_id, cpu_id);
+ ret = -EFAULT;
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ return ret;
+}
+
+
+/* Parse the argument given in the command line of the application */
+static int
+parse_args(int argc, char **argv)
+{
+ int opt = 0;
+ int retval = 0;
+ int oldopterr = 0;
+ char **argvopt = argv;
+ char *prgname = argv[0];
+
+ static struct option lgopts[] = {
+ { "l3ca", required_argument, 0, 0 },
+ { NULL, 0, 0, 0 }
+ };
+
+ /* Disable printing messages within getopt() */
+ oldopterr = opterr;
+ opterr = 0;
+
+ opt = getopt_long(argc, argvopt, "", lgopts, NULL);
+ if (opt == 0) {
+ retval = parse_l3ca(optarg);
+ if (retval != 0) {
+ printf("PQOS: Invalid L3CA parameters!\n");
+ goto exit;
+ }
+
+ argv[optind - 1] = prgname;
+ retval = optind - 1;
+ } else
+ retval = 0;
+
+exit:
+ /* reset getopt lib */
+ optind = 1;
+
+ /* Restore opterr value */
+ opterr = oldopterr;
+
+ return retval;
+}
+
+static void
+print_cmd_line_config(void)
+{
+ char cpustr[PQOS_MAX_CORES * 3] = {0};
+ unsigned i = 0;
+ unsigned j = 0;
+
+ for (i = 0; i < m_config_count; i++) {
+ unsigned len = 0;
+ memset(cpustr, 0, sizeof(cpustr));
+
+ /* Generate CPU list */
+ for (j = 0; j < PQOS_MAX_CORES; j++) {
+ if (CPU_ISSET(j, &m_config[i].cpumask) != 1)
+ continue;
+
+ len += snprintf(cpustr + len, sizeof(cpustr) - len - 1,
+ "%u,", j);
+
+ if (len >= sizeof(cpustr) - 1)
+ break;
+ }
+
+ if (m_config[i].cdp == 1) {
+ printf("PQOS: CPUs: %s cMASK: 0x%llx, dMASK: "
+ "0x%llx\n", cpustr,
+ (unsigned long long)m_config[i].code_mask,
+ (unsigned long long)m_config[i].data_mask);
+ } else {
+ printf("PQOS: CPUs: %s MASK: 0x%llx\n", cpustr,
+ (unsigned long long)m_config[i].mask);
+ }
+ }
+}
+
+/**
+ * @brief Prints CAT configuration
+ */
+static void
+print_cat_config(void)
+{
+ int ret = PQOS_RETVAL_OK;
+ unsigned i = 0;
+
+ for (i = 0; i < m_sock_count; i++) {
+ struct pqos_l3ca tab[PQOS_MAX_L3CA_COS] = {{0} };
+ unsigned num = 0;
+ unsigned n = 0;
+
+ ret = pqos_l3ca_get(m_sockets[i], PQOS_MAX_L3CA_COS, &num, tab);
+ if (ret != PQOS_RETVAL_OK) {
+ printf("PQOS: Error retrieving COS!\n");
+ return;
+ }
+
+ printf("PQOS: COS definitions for Socket %u:\n", m_sockets[i]);
+ for (n = 0; n < num; n++) {
+ if (tab[n].cdp == 1) {
+ printf("PQOS: COS: %u, cMASK: 0x%llx, "
+ "dMASK: 0x%llx\n", tab[n].class_id,
+#if PQOS_VERSION <= 103
+ (unsigned long long)tab[n].code_mask,
+ (unsigned long long)tab[n].data_mask);
+#else
+ (unsigned long long)tab[n].u.s.code_mask,
+ (unsigned long long)tab[n].u.s.data_mask);
+#endif
+ } else {
+ printf("PQOS: COS: %u, MASK: 0x%llx\n",
+ tab[n].class_id,
+#if PQOS_VERSION <= 103
+ (unsigned long long)tab[n].ways_mask);
+#else
+ (unsigned long long)tab[n].u.ways_mask);
+#endif
+ }
+ }
+ }
+
+ for (i = 0; i < m_sock_count; i++) {
+#if PQOS_VERSION <= 103
+ unsigned lcores[PQOS_MAX_SOCKET_CORES] = {0};
+#else
+ unsigned int *lcores = NULL;
+#endif
+ unsigned lcount = 0;
+ unsigned n = 0;
+
+#if PQOS_VERSION <= 103
+ ret = pqos_cpu_get_cores(m_cpu, m_sockets[i],
+ PQOS_MAX_SOCKET_CORES, &lcount, &lcores[0]);
+ if (ret != PQOS_RETVAL_OK) {
+#else
+ lcores = pqos_cpu_get_cores(m_cpu, m_sockets[i],
+ &lcount);
+ if (lcores == NULL || lcount == 0) {
+#endif
+ printf("PQOS: Error retrieving core information!\n");
+ return;
+ }
+
+ printf("PQOS: CPU information for socket %u:\n", m_sockets[i]);
+ for (n = 0; n < lcount; n++) {
+ unsigned class_id = 0;
+
+#if PQOS_VERSION <= 103
+ ret = pqos_l3ca_assoc_get(lcores[n], &class_id);
+#else
+ ret = pqos_alloc_assoc_get(lcores[n], &class_id);
+#endif
+ if (ret == PQOS_RETVAL_OK)
+ printf("PQOS: CPU: %u, COS: %u\n", lcores[n],
+ class_id);
+ else
+ printf("PQOS: CPU: %u, ERROR\n", lcores[n]);
+ }
+
+#if PQOS_VERSION > 103
+ free(lcores);
+#endif
+ }
+
+}
+
+static int
+cat_validate(void)
+{
+ int ret = 0;
+
+ ret = check_cpus();
+ if (ret != 0)
+ return ret;
+
+ ret = check_cdp();
+ if (ret != 0)
+ return ret;
+
+ ret = check_cbm_len_and_contention();
+ if (ret != 0)
+ return ret;
+
+ ret = check_cpus_overlapping();
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static int
+cat_set(void)
+{
+ int ret = 0;
+ unsigned cos_id_map[m_config_count][PQOS_MAX_SOCKETS];
+
+ memset(cos_id_map, 0, sizeof(cos_id_map));
+
+ ret = check_and_select_classes(cos_id_map);
+ if (ret != 0)
+ return ret;
+
+ ret = configure_cat(cos_id_map);
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+cat_fini(void)
+{
+ int ret = 0;
+
+ printf("PQOS: Shutting down PQoS library...\n");
+
+ /* deallocate all the resources */
+ ret = pqos_fini();
+ if (ret != PQOS_RETVAL_OK && ret != PQOS_RETVAL_INIT)
+ printf("PQOS: Error shutting down PQoS library!\n");
+
+ m_cap = NULL;
+ m_cpu = NULL;
+ m_cap_l3ca = NULL;
+#if PQOS_VERSION <= 103
+ memset(m_sockets, 0, sizeof(m_sockets));
+#else
+ if (m_sockets != NULL)
+ free(m_sockets);
+#endif
+ m_sock_count = 0;
+ memset(m_config, 0, sizeof(m_config));
+ m_config_count = 0;
+}
+
+void
+cat_exit(void)
+{
+ unsigned i = 0;
+ unsigned j = 0;
+ unsigned cpu_id = 0;
+ int ret = 0;
+
+ /* if lib is not initialized, do nothing */
+ if (m_cap == NULL && m_cpu == NULL)
+ return;
+
+ printf("PQOS: Reverting CAT configuration...\n");
+
+ for (i = 0; i < m_config_count; i++) {
+ for (j = 0; j < m_cpu->num_cores; j++) {
+ cpu_id = m_cpu->cores[j].lcore;
+ if (CPU_ISSET(cpu_id, &m_config[i].cpumask) == 0)
+ continue;
+
+#if PQOS_VERSION <= 103
+ ret = pqos_l3ca_assoc_set(cpu_id, 0);
+#else
+ ret = pqos_alloc_assoc_set(cpu_id, 0);
+#endif
+ if (ret != PQOS_RETVAL_OK) {
+ printf("PQOS: Failed to associate COS 0 to "
+ "cpu %u\n", cpu_id);
+ }
+ }
+ }
+
+ cat_fini();
+}
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\nPQOS: Signal %d received, preparing to exit...\n",
+ signum);
+
+ cat_exit();
+
+ /* exit with the expected status */
+ signal(signum, SIG_DFL);
+ kill(getpid(), signum);
+ }
+}
+
+int
+cat_init(int argc, char **argv)
+{
+ int ret = 0;
+ int args_num = 0;
+ struct pqos_config cfg = {0};
+
+ if (m_cap != NULL || m_cpu != NULL) {
+ printf("PQOS: CAT module already initialized!\n");
+ return -EEXIST;
+ }
+
+ /* Parse cmd line args */
+ ret = parse_args(argc, argv);
+
+ if (ret <= 0)
+ goto err;
+
+ args_num = ret;
+
+ /* Print cmd line configuration */
+ print_cmd_line_config();
+
+ /* PQoS Initialization - Check and initialize CAT capability */
+ cfg.fd_log = STDOUT_FILENO;
+ cfg.verbose = 0;
+#if PQOS_VERSION <= 103
+ cfg.cdp_cfg = PQOS_REQUIRE_CDP_ANY;
+#endif
+ ret = pqos_init(&cfg);
+ if (ret != PQOS_RETVAL_OK) {
+ printf("PQOS: Error initializing PQoS library!\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* Get capability and CPU info pointer */
+ ret = pqos_cap_get(&m_cap, &m_cpu);
+ if (ret != PQOS_RETVAL_OK || m_cap == NULL || m_cpu == NULL) {
+ printf("PQOS: Error retrieving PQoS capabilities!\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* Get L3CA capabilities */
+ ret = pqos_cap_get_type(m_cap, PQOS_CAP_TYPE_L3CA, &m_cap_l3ca);
+ if (ret != PQOS_RETVAL_OK || m_cap_l3ca == NULL) {
+ printf("PQOS: Error retrieving PQOS_CAP_TYPE_L3CA "
+ "capabilities!\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* Get CPU socket information */
+#if PQOS_VERSION <= 103
+ ret = pqos_cpu_get_sockets(m_cpu, PQOS_MAX_SOCKETS, &m_sock_count,
+ m_sockets);
+ if (ret != PQOS_RETVAL_OK) {
+#else
+ m_sockets = pqos_cpu_get_sockets(m_cpu, &m_sock_count);
+ if (m_sockets == NULL) {
+#endif
+ printf("PQOS: Error retrieving CPU socket information!\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* Validate cmd line configuration */
+ ret = cat_validate();
+ if (ret != 0) {
+ printf("PQOS: Requested CAT configuration is not valid!\n");
+ goto err;
+ }
+
+ /* configure system */
+ ret = cat_set();
+ if (ret != 0) {
+ printf("PQOS: Failed to configure CAT!\n");
+ goto err;
+ }
+
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ ret = atexit(cat_exit);
+ if (ret != 0) {
+ printf("PQOS: Cannot set exit function\n");
+ goto err;
+ }
+
+ /* Print CAT configuration */
+ print_cat_config();
+
+ return args_num;
+
+err:
+ /* deallocate all the resources */
+ cat_fini();
+ return ret;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-cat/cat.h b/src/spdk/dpdk/examples/l2fwd-cat/cat.h
new file mode 100644
index 000000000..1eb0543cd
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-cat/cat.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#ifndef _CAT_H
+#define _CAT_H
+
+/**
+ * @file
+ * PQoS CAT
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_lcore.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* L3 cache allocation class of service data structure */
+struct cat_config {
+ rte_cpuset_t cpumask; /* CPUs bitmask */
+ int cdp; /* data & code masks used if true */
+ union {
+ uint64_t mask; /* capacity bitmask (CBM) */
+ struct {
+ uint64_t data_mask; /* data capacity bitmask (CBM) */
+ uint64_t code_mask; /* code capacity bitmask (CBM) */
+ };
+ };
+};
+
+int cat_init(int argc, char **argv);
+
+void cat_exit(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CAT_H */
diff --git a/src/spdk/dpdk/examples/l2fwd-cat/l2fwd-cat.c b/src/spdk/dpdk/examples/l2fwd-cat/l2fwd-cat.c
new file mode 100644
index 000000000..45a497c08
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-cat/l2fwd-cat.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+
+#include "cat.h"
+
+#define RX_RING_SIZE 128
+#define TX_RING_SIZE 512
+
+#define NUM_MBUFS 8191
+#define MBUF_CACHE_SIZE 250
+#define BURST_SIZE 32
+
+static const struct rte_eth_conf port_conf_default = {
+ .rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }
+};
+
+/* l2fwd-cat.c: CAT enabled, basic DPDK skeleton forwarding example. */
+
+/*
+ * Initializes a given port using global settings and with the RX buffers
+ * coming from the mbuf_pool passed as a parameter.
+ */
+static inline int
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
+{
+ struct rte_eth_conf port_conf = port_conf_default;
+ const uint16_t rx_rings = 1, tx_rings = 1;
+ int retval;
+ uint16_t q;
+ uint16_t nb_rxd = RX_RING_SIZE;
+ uint16_t nb_txd = TX_RING_SIZE;
+
+ if (!rte_eth_dev_is_valid_port(port))
+ return -1;
+
+ /* Configure the Ethernet device. */
+ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+ if (retval != 0)
+ return retval;
+
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
+ if (retval != 0)
+ return retval;
+
+ /* Allocate and set up 1 RX queue per Ethernet port. */
+ for (q = 0; q < rx_rings; q++) {
+ retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
+ rte_eth_dev_socket_id(port), NULL, mbuf_pool);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Allocate and set up 1 TX queue per Ethernet port. */
+ for (q = 0; q < tx_rings; q++) {
+ retval = rte_eth_tx_queue_setup(port, q, nb_txd,
+ rte_eth_dev_socket_id(port), NULL);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Start the Ethernet port. */
+ retval = rte_eth_dev_start(port);
+ if (retval < 0)
+ return retval;
+
+ /* Display the port MAC address. */
+ struct rte_ether_addr addr;
+ retval = rte_eth_macaddr_get(port, &addr);
+ if (retval < 0)
+ return retval;
+
+ printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
+ " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
+ port,
+ addr.addr_bytes[0], addr.addr_bytes[1],
+ addr.addr_bytes[2], addr.addr_bytes[3],
+ addr.addr_bytes[4], addr.addr_bytes[5]);
+
+ /* Enable RX in promiscuous mode for the Ethernet device. */
+ retval = rte_eth_promiscuous_enable(port);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
+/*
+ * The lcore main. This is the main thread that does the work, reading from
+ * an input port and writing to an output port.
+ */
+static __rte_noreturn void
+lcore_main(void)
+{
+ uint16_t port;
+
+ /*
+ * Check that the port is on the same NUMA node as the polling thread
+ * for best performance.
+ */
+ RTE_ETH_FOREACH_DEV(port)
+ if (rte_eth_dev_socket_id(port) > 0 &&
+ rte_eth_dev_socket_id(port) !=
+ (int)rte_socket_id())
+ printf("WARNING, port %u is on remote NUMA node to "
+ "polling thread.\n\tPerformance will "
+ "not be optimal.\n", port);
+
+ printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n",
+ rte_lcore_id());
+
+ /* Run until the application is quit or killed. */
+ for (;;) {
+ /*
+ * Receive packets on a port and forward them on the paired
+ * port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
+ */
+ RTE_ETH_FOREACH_DEV(port) {
+
+ /* Get burst of RX packets, from first port of pair. */
+ struct rte_mbuf *bufs[BURST_SIZE];
+ const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
+ bufs, BURST_SIZE);
+
+ if (unlikely(nb_rx == 0))
+ continue;
+
+ /* Send burst of TX packets, to second port of pair. */
+ const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
+ bufs, nb_rx);
+
+ /* Free any unsent packets. */
+ if (unlikely(nb_tx < nb_rx)) {
+ uint16_t buf;
+ for (buf = nb_tx; buf < nb_rx; buf++)
+ rte_pktmbuf_free(bufs[buf]);
+ }
+ }
+ }
+}
+
+/*
+ * The main function, which does initialization and calls the per-lcore
+ * functions.
+ */
+int
+main(int argc, char *argv[])
+{
+ struct rte_mempool *mbuf_pool;
+ unsigned nb_ports;
+ uint16_t portid;
+
+ /* Initialize the Environment Abstraction Layer (EAL). */
+ int ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+
+ argc -= ret;
+ argv += ret;
+
+ /*
+ * Initialize the PQoS library and configure CAT.
+ * Please see l2fwd-cat documentation for more info.
+ */
+ ret = cat_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "PQOS: L3CA init failed!\n");
+
+ argc -= ret;
+ argv += ret;
+
+ /* Check that there is an even number of ports to send/receive on. */
+ nb_ports = rte_eth_dev_count_avail();
+ if (nb_ports < 2 || (nb_ports & 1))
+ rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
+
+ /* Creates a new mempool in memory to hold the mbufs. */
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
+ MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+
+ /* Initialize all ports. */
+ RTE_ETH_FOREACH_DEV(portid)
+ if (port_init(portid, mbuf_pool) != 0)
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
+ portid);
+
+ if (rte_lcore_count() > 1)
+ printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
+
+ /* Call lcore_main on the master core only. */
+ lcore_main();
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-cat/meson.build b/src/spdk/dpdk/examples/l2fwd-cat/meson.build
new file mode 100644
index 000000000..2bed18e74
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-cat/meson.build
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+pqos = cc.find_library('pqos', required: false)
+build = pqos.found()
+ext_deps += pqos
+allow_experimental_apis = true
+cflags += '-I/usr/local/include' # assume pqos lib installed in /usr/local
+sources = files(
+ 'cat.c', 'l2fwd-cat.c'
+)
diff --git a/src/spdk/dpdk/examples/l2fwd-crypto/Makefile b/src/spdk/dpdk/examples/l2fwd-crypto/Makefile
new file mode 100644
index 000000000..2f1405a72
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-crypto/Makefile
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
+
+# binary name
+APP = l2fwd-crypto
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+# Build using pkg-config variables if possible
+ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
+
+all: shared
+.PHONY: shared static
+shared: build/$(APP)-shared
+ ln -sf $(APP)-shared build/$(APP)
+static: build/$(APP)-static
+ ln -sf $(APP)-static build/$(APP)
+
+PKGCONF ?= pkg-config
+
+PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null)
+CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk)
+LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
+LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk)
+
+build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
+
+build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
+
+build:
+ @mkdir -p $@
+
+.PHONY: clean
+clean:
+ rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
+ test -d build && rmdir -p build || true
+
+else # Build using legacy build system
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, detect a build directory, by looking for a path with a .config
+RTE_TARGET ?= $(notdir $(abspath $(dir $(firstword $(wildcard $(RTE_SDK)/*/.config)))))
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+ifeq ($(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER),y)
+LDLIBS += -lrte_pmd_crypto_scheduler
+endif
+endif
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+endif
diff --git a/src/spdk/dpdk/examples/l2fwd-crypto/main.c b/src/spdk/dpdk/examples/l2fwd-crypto/main.c
new file mode 100644
index 000000000..fcb55c370
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-crypto/main.c
@@ -0,0 +1,2810 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2016 Intel Corporation
+ */
+
+#include <time.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_cryptodev.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_interrupts.h>
+#include <rte_ip.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_per_lcore.h>
+#include <rte_prefetch.h>
+#include <rte_random.h>
+#include <rte_hexdump.h>
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+#include <rte_cryptodev_scheduler.h>
+#endif
+
+enum cdev_type {
+ CDEV_TYPE_ANY,
+ CDEV_TYPE_HW,
+ CDEV_TYPE_SW
+};
+
+#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
+
+#define NB_MBUF 8192
+
+#define MAX_STR_LEN 32
+#define MAX_KEY_SIZE 128
+#define MAX_IV_SIZE 16
+#define MAX_AAD_SIZE 65535
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+#define SESSION_POOL_CACHE_SIZE 0
+
+#define MAXIMUM_IV_LENGTH 16
+#define IV_OFFSET (sizeof(struct rte_crypto_op) + \
+ sizeof(struct rte_crypto_sym_op))
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 1024
+#define RTE_TEST_TX_DESC_DEFAULT 1024
+
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct rte_ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
+
+/* mask of enabled ports */
+static uint64_t l2fwd_enabled_port_mask;
+static uint64_t l2fwd_enabled_crypto_mask;
+
+/* list of enabled ports */
+static uint16_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
+
+
+struct pkt_buffer {
+ unsigned len;
+ struct rte_mbuf *buffer[MAX_PKT_BURST];
+};
+
+struct op_buffer {
+ unsigned len;
+ struct rte_crypto_op *buffer[MAX_PKT_BURST];
+};
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+
+enum l2fwd_crypto_xform_chain {
+ L2FWD_CRYPTO_CIPHER_HASH,
+ L2FWD_CRYPTO_HASH_CIPHER,
+ L2FWD_CRYPTO_CIPHER_ONLY,
+ L2FWD_CRYPTO_HASH_ONLY,
+ L2FWD_CRYPTO_AEAD
+};
+
+struct l2fwd_key {
+ uint8_t *data;
+ uint32_t length;
+ rte_iova_t phys_addr;
+};
+
+struct l2fwd_iv {
+ uint8_t *data;
+ uint16_t length;
+};
+
+/** l2fwd crypto application command line options */
+struct l2fwd_crypto_options {
+ unsigned portmask;
+ unsigned nb_ports_per_lcore;
+ unsigned refresh_period;
+ unsigned single_lcore:1;
+
+ enum cdev_type type;
+ unsigned sessionless:1;
+
+ enum l2fwd_crypto_xform_chain xform_chain;
+
+ struct rte_crypto_sym_xform cipher_xform;
+ unsigned ckey_param;
+ int ckey_random_size;
+ uint8_t cipher_key[MAX_KEY_SIZE];
+
+ struct l2fwd_iv cipher_iv;
+ unsigned int cipher_iv_param;
+ int cipher_iv_random_size;
+
+ struct rte_crypto_sym_xform auth_xform;
+ uint8_t akey_param;
+ int akey_random_size;
+ uint8_t auth_key[MAX_KEY_SIZE];
+
+ struct l2fwd_iv auth_iv;
+ unsigned int auth_iv_param;
+ int auth_iv_random_size;
+
+ struct rte_crypto_sym_xform aead_xform;
+ unsigned int aead_key_param;
+ int aead_key_random_size;
+ uint8_t aead_key[MAX_KEY_SIZE];
+
+ struct l2fwd_iv aead_iv;
+ unsigned int aead_iv_param;
+ int aead_iv_random_size;
+
+ struct l2fwd_key aad;
+ unsigned aad_param;
+ int aad_random_size;
+
+ int digest_size;
+
+ uint16_t block_size;
+ char string_type[MAX_STR_LEN];
+
+ uint64_t cryptodev_mask;
+
+ unsigned int mac_updating;
+};
+
+/** l2fwd crypto lcore params */
+struct l2fwd_crypto_params {
+ uint8_t dev_id;
+ uint8_t qp_id;
+
+ unsigned digest_length;
+ unsigned block_size;
+
+ struct l2fwd_iv cipher_iv;
+ struct l2fwd_iv auth_iv;
+ struct l2fwd_iv aead_iv;
+ struct l2fwd_key aad;
+ struct rte_cryptodev_sym_session *session;
+
+ uint8_t do_cipher;
+ uint8_t do_hash;
+ uint8_t do_aead;
+ uint8_t hash_verify;
+
+ enum rte_crypto_cipher_algorithm cipher_algo;
+ enum rte_crypto_auth_algorithm auth_algo;
+ enum rte_crypto_aead_algorithm aead_algo;
+};
+
+/** lcore configuration */
+struct lcore_queue_conf {
+ unsigned nb_rx_ports;
+ uint16_t rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+
+ unsigned nb_crypto_devs;
+ unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
+
+ struct op_buffer op_buf[RTE_CRYPTO_MAX_DEVS];
+ struct pkt_buffer pkt_buf[RTE_MAX_ETHPORTS];
+} __rte_cache_aligned;
+
+struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+static struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+ .split_hdr_size = 0,
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+};
+
+struct rte_mempool *l2fwd_pktmbuf_pool;
+struct rte_mempool *l2fwd_crypto_op_pool;
+static struct {
+ struct rte_mempool *sess_mp;
+ struct rte_mempool *priv_mp;
+} session_pool_socket[RTE_MAX_NUMA_NODES];
+
+/* Per-port statistics struct */
+struct l2fwd_port_statistics {
+ uint64_t tx;
+ uint64_t rx;
+
+ uint64_t crypto_enqueued;
+ uint64_t crypto_dequeued;
+
+ uint64_t dropped;
+} __rte_cache_aligned;
+
+struct l2fwd_crypto_statistics {
+ uint64_t enqueued;
+ uint64_t dequeued;
+
+ uint64_t errors;
+} __rte_cache_aligned;
+
+struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
+struct l2fwd_crypto_statistics crypto_statistics[RTE_CRYPTO_MAX_DEVS];
+
+/* A tsc-based timer responsible for triggering statistics printout */
+#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
+#define MAX_TIMER_PERIOD 86400UL /* 1 day max */
+
+/* default period is 10 seconds */
+static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000;
+
+/* Print out statistics on packets dropped */
+static void
+print_stats(void)
+{
+ uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+ uint64_t total_packets_enqueued, total_packets_dequeued,
+ total_packets_errors;
+ uint16_t portid;
+ uint64_t cdevid;
+
+ total_packets_dropped = 0;
+ total_packets_tx = 0;
+ total_packets_rx = 0;
+ total_packets_enqueued = 0;
+ total_packets_dequeued = 0;
+ total_packets_errors = 0;
+
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("\nPort statistics ====================================");
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ /* skip disabled ports */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ printf("\nStatistics for port %u ------------------------------"
+ "\nPackets sent: %32"PRIu64
+ "\nPackets received: %28"PRIu64
+ "\nPackets dropped: %29"PRIu64,
+ portid,
+ port_statistics[portid].tx,
+ port_statistics[portid].rx,
+ port_statistics[portid].dropped);
+
+ total_packets_dropped += port_statistics[portid].dropped;
+ total_packets_tx += port_statistics[portid].tx;
+ total_packets_rx += port_statistics[portid].rx;
+ }
+ printf("\nCrypto statistics ==================================");
+
+ for (cdevid = 0; cdevid < RTE_CRYPTO_MAX_DEVS; cdevid++) {
+ /* skip disabled ports */
+ if ((l2fwd_enabled_crypto_mask & (((uint64_t)1) << cdevid)) == 0)
+ continue;
+ printf("\nStatistics for cryptodev %"PRIu64
+ " -------------------------"
+ "\nPackets enqueued: %28"PRIu64
+ "\nPackets dequeued: %28"PRIu64
+ "\nPackets errors: %30"PRIu64,
+ cdevid,
+ crypto_statistics[cdevid].enqueued,
+ crypto_statistics[cdevid].dequeued,
+ crypto_statistics[cdevid].errors);
+
+ total_packets_enqueued += crypto_statistics[cdevid].enqueued;
+ total_packets_dequeued += crypto_statistics[cdevid].dequeued;
+ total_packets_errors += crypto_statistics[cdevid].errors;
+ }
+ printf("\nAggregate statistics ==============================="
+ "\nTotal packets received: %22"PRIu64
+ "\nTotal packets enqueued: %22"PRIu64
+ "\nTotal packets dequeued: %22"PRIu64
+ "\nTotal packets sent: %26"PRIu64
+ "\nTotal packets dropped: %23"PRIu64
+ "\nTotal packets crypto errors: %17"PRIu64,
+ total_packets_rx,
+ total_packets_enqueued,
+ total_packets_dequeued,
+ total_packets_tx,
+ total_packets_dropped,
+ total_packets_errors);
+ printf("\n====================================================\n");
+}
+
+static int
+l2fwd_crypto_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ struct l2fwd_crypto_params *cparams)
+{
+ struct rte_crypto_op **op_buffer;
+ unsigned ret;
+
+ op_buffer = (struct rte_crypto_op **)
+ qconf->op_buf[cparams->dev_id].buffer;
+
+ ret = rte_cryptodev_enqueue_burst(cparams->dev_id,
+ cparams->qp_id, op_buffer, (uint16_t) n);
+
+ crypto_statistics[cparams->dev_id].enqueued += ret;
+ if (unlikely(ret < n)) {
+ crypto_statistics[cparams->dev_id].errors += (n - ret);
+ do {
+ rte_pktmbuf_free(op_buffer[ret]->sym->m_src);
+ rte_crypto_op_free(op_buffer[ret]);
+ } while (++ret < n);
+ }
+
+ return 0;
+}
+
+static int
+l2fwd_crypto_enqueue(struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
+{
+ unsigned lcore_id, len;
+ struct lcore_queue_conf *qconf;
+
+ lcore_id = rte_lcore_id();
+
+ qconf = &lcore_queue_conf[lcore_id];
+ len = qconf->op_buf[cparams->dev_id].len;
+ qconf->op_buf[cparams->dev_id].buffer[len] = op;
+ len++;
+
+ /* enough ops to be sent */
+ if (len == MAX_PKT_BURST) {
+ l2fwd_crypto_send_burst(qconf, MAX_PKT_BURST, cparams);
+ len = 0;
+ }
+
+ qconf->op_buf[cparams->dev_id].len = len;
+ return 0;
+}
+
+static int
+l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
+ struct rte_crypto_op *op,
+ struct l2fwd_crypto_params *cparams)
+{
+ struct rte_ether_hdr *eth_hdr;
+ struct rte_ipv4_hdr *ip_hdr;
+
+ uint32_t ipdata_offset, data_len;
+ uint32_t pad_len = 0;
+ char *padding;
+
+ eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+
+ if (eth_hdr->ether_type != rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4))
+ return -1;
+
+ ipdata_offset = sizeof(struct rte_ether_hdr);
+
+ ip_hdr = (struct rte_ipv4_hdr *)(rte_pktmbuf_mtod(m, char *) +
+ ipdata_offset);
+
+ ipdata_offset += (ip_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK)
+ * RTE_IPV4_IHL_MULTIPLIER;
+
+
+ /* Zero pad data to be crypto'd so it is block aligned */
+ data_len = rte_pktmbuf_data_len(m) - ipdata_offset;
+
+ if ((cparams->do_hash || cparams->do_aead) && cparams->hash_verify)
+ data_len -= cparams->digest_length;
+
+ if (cparams->do_cipher) {
+ /*
+ * Following algorithms are block cipher algorithms,
+ * and might need padding
+ */
+ switch (cparams->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ if (data_len % cparams->block_size)
+ pad_len = cparams->block_size -
+ (data_len % cparams->block_size);
+ break;
+ default:
+ pad_len = 0;
+ }
+
+ if (pad_len) {
+ padding = rte_pktmbuf_append(m, pad_len);
+ if (unlikely(!padding))
+ return -1;
+
+ data_len += pad_len;
+ memset(padding, 0, pad_len);
+ }
+ }
+
+ /* Set crypto operation data parameters */
+ rte_crypto_op_attach_sym_session(op, cparams->session);
+
+ if (cparams->do_hash) {
+ if (cparams->auth_iv.length) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ IV_OFFSET +
+ cparams->cipher_iv.length);
+ /*
+ * Copy IV at the end of the crypto operation,
+ * after the cipher IV, if added
+ */
+ rte_memcpy(iv_ptr, cparams->auth_iv.data,
+ cparams->auth_iv.length);
+ }
+ if (!cparams->hash_verify) {
+ /* Append space for digest to end of packet */
+ op->sym->auth.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ cparams->digest_length);
+ } else {
+ op->sym->auth.digest.data = rte_pktmbuf_mtod(m,
+ uint8_t *) + ipdata_offset + data_len;
+ }
+
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
+ rte_pktmbuf_pkt_len(m) - cparams->digest_length);
+
+ /* For wireless algorithms, offset/length must be in bits */
+ if (cparams->auth_algo == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
+ cparams->auth_algo == RTE_CRYPTO_AUTH_KASUMI_F9 ||
+ cparams->auth_algo == RTE_CRYPTO_AUTH_ZUC_EIA3) {
+ op->sym->auth.data.offset = ipdata_offset << 3;
+ op->sym->auth.data.length = data_len << 3;
+ } else {
+ op->sym->auth.data.offset = ipdata_offset;
+ op->sym->auth.data.length = data_len;
+ }
+ }
+
+ if (cparams->do_cipher) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ IV_OFFSET);
+ /* Copy IV at the end of the crypto operation */
+ rte_memcpy(iv_ptr, cparams->cipher_iv.data,
+ cparams->cipher_iv.length);
+
+ /* For wireless algorithms, offset/length must be in bits */
+ if (cparams->cipher_algo == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
+ cparams->cipher_algo == RTE_CRYPTO_CIPHER_KASUMI_F8 ||
+ cparams->cipher_algo == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
+ op->sym->cipher.data.offset = ipdata_offset << 3;
+ op->sym->cipher.data.length = data_len << 3;
+ } else {
+ op->sym->cipher.data.offset = ipdata_offset;
+ op->sym->cipher.data.length = data_len;
+ }
+ }
+
+ if (cparams->do_aead) {
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ IV_OFFSET);
+ /* Copy IV at the end of the crypto operation */
+ /*
+ * If doing AES-CCM, nonce is copied one byte
+ * after the start of IV field
+ */
+ if (cparams->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
+ rte_memcpy(iv_ptr + 1, cparams->aead_iv.data,
+ cparams->aead_iv.length);
+ else
+ rte_memcpy(iv_ptr, cparams->aead_iv.data,
+ cparams->aead_iv.length);
+
+ op->sym->aead.data.offset = ipdata_offset;
+ op->sym->aead.data.length = data_len;
+
+ if (!cparams->hash_verify) {
+ /* Append space for digest to end of packet */
+ op->sym->aead.digest.data = (uint8_t *)rte_pktmbuf_append(m,
+ cparams->digest_length);
+ } else {
+ op->sym->aead.digest.data = rte_pktmbuf_mtod(m,
+ uint8_t *) + ipdata_offset + data_len;
+ }
+
+ op->sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
+ rte_pktmbuf_pkt_len(m) - cparams->digest_length);
+
+ if (cparams->aad.length) {
+ op->sym->aead.aad.data = cparams->aad.data;
+ op->sym->aead.aad.phys_addr = cparams->aad.phys_addr;
+ }
+ }
+
+ op->sym->m_src = m;
+
+ return l2fwd_crypto_enqueue(op, cparams);
+}
+
+
+/* Send the burst of packets on an output interface */
+static int
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
+ uint16_t port)
+{
+ struct rte_mbuf **pkt_buffer;
+ unsigned ret;
+
+ pkt_buffer = (struct rte_mbuf **)qconf->pkt_buf[port].buffer;
+
+ ret = rte_eth_tx_burst(port, 0, pkt_buffer, (uint16_t)n);
+ port_statistics[port].tx += ret;
+ if (unlikely(ret < n)) {
+ port_statistics[port].dropped += (n - ret);
+ do {
+ rte_pktmbuf_free(pkt_buffer[ret]);
+ } while (++ret < n);
+ }
+
+ return 0;
+}
+
+/* Enqueue packets for TX and prepare them to be sent */
+static int
+l2fwd_send_packet(struct rte_mbuf *m, uint16_t port)
+{
+ unsigned lcore_id, len;
+ struct lcore_queue_conf *qconf;
+
+ lcore_id = rte_lcore_id();
+
+ qconf = &lcore_queue_conf[lcore_id];
+ len = qconf->pkt_buf[port].len;
+ qconf->pkt_buf[port].buffer[len] = m;
+ len++;
+
+ /* enough pkts to be sent */
+ if (unlikely(len == MAX_PKT_BURST)) {
+ l2fwd_send_burst(qconf, MAX_PKT_BURST, port);
+ len = 0;
+ }
+
+ qconf->pkt_buf[port].len = len;
+ return 0;
+}
+
+static void
+l2fwd_mac_updating(struct rte_mbuf *m, uint16_t dest_portid)
+{
+ struct rte_ether_hdr *eth;
+ void *tmp;
+
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+
+ /* 02:00:00:00:00:xx */
+ tmp = &eth->d_addr.addr_bytes[0];
+ *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
+
+ /* src addr */
+ rte_ether_addr_copy(&l2fwd_ports_eth_addr[dest_portid], &eth->s_addr);
+}
+
+static void
+l2fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
+ struct l2fwd_crypto_options *options)
+{
+ uint16_t dst_port;
+
+ dst_port = l2fwd_dst_ports[portid];
+
+ if (options->mac_updating)
+ l2fwd_mac_updating(m, dst_port);
+
+ l2fwd_send_packet(m, dst_port);
+}
+
+/** Generate random key */
+static void
+generate_random_key(uint8_t *key, unsigned length)
+{
+ int fd;
+ int ret;
+
+ fd = open("/dev/urandom", O_RDONLY);
+ if (fd < 0)
+ rte_exit(EXIT_FAILURE, "Failed to generate random key\n");
+
+ ret = read(fd, key, length);
+ close(fd);
+
+ if (ret != (signed)length)
+ rte_exit(EXIT_FAILURE, "Failed to generate random key\n");
+}
+
+static struct rte_cryptodev_sym_session *
+initialize_crypto_session(struct l2fwd_crypto_options *options, uint8_t cdev_id)
+{
+ struct rte_crypto_sym_xform *first_xform;
+ struct rte_cryptodev_sym_session *session;
+ int retval = rte_cryptodev_socket_id(cdev_id);
+
+ if (retval < 0)
+ return NULL;
+
+ uint8_t socket_id = (uint8_t) retval;
+
+ if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
+ first_xform = &options->aead_xform;
+ } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH) {
+ first_xform = &options->cipher_xform;
+ first_xform->next = &options->auth_xform;
+ } else if (options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER) {
+ first_xform = &options->auth_xform;
+ first_xform->next = &options->cipher_xform;
+ } else if (options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
+ first_xform = &options->cipher_xform;
+ } else {
+ first_xform = &options->auth_xform;
+ }
+
+ session = rte_cryptodev_sym_session_create(
+ session_pool_socket[socket_id].sess_mp);
+ if (session == NULL)
+ return NULL;
+
+ if (rte_cryptodev_sym_session_init(cdev_id, session,
+ first_xform,
+ session_pool_socket[socket_id].priv_mp) < 0)
+ return NULL;
+
+ return session;
+}
+
+static void
+l2fwd_crypto_options_print(struct l2fwd_crypto_options *options);
+
+/* main processing loop */
+static void
+l2fwd_main_loop(struct l2fwd_crypto_options *options)
+{
+ struct rte_mbuf *m, *pkts_burst[MAX_PKT_BURST];
+ struct rte_crypto_op *ops_burst[MAX_PKT_BURST];
+
+ unsigned lcore_id = rte_lcore_id();
+ uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
+ unsigned int i, j, nb_rx, len;
+ uint16_t portid;
+ struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
+ US_PER_S * BURST_TX_DRAIN_US;
+ struct l2fwd_crypto_params *cparams;
+ struct l2fwd_crypto_params port_cparams[qconf->nb_crypto_devs];
+ struct rte_cryptodev_sym_session *session;
+
+ if (qconf->nb_rx_ports == 0) {
+ RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->nb_rx_ports; i++) {
+
+ portid = qconf->rx_port_list[i];
+ RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
+ portid);
+ }
+
+ for (i = 0; i < qconf->nb_crypto_devs; i++) {
+ port_cparams[i].do_cipher = 0;
+ port_cparams[i].do_hash = 0;
+ port_cparams[i].do_aead = 0;
+
+ switch (options->xform_chain) {
+ case L2FWD_CRYPTO_AEAD:
+ port_cparams[i].do_aead = 1;
+ break;
+ case L2FWD_CRYPTO_CIPHER_HASH:
+ case L2FWD_CRYPTO_HASH_CIPHER:
+ port_cparams[i].do_cipher = 1;
+ port_cparams[i].do_hash = 1;
+ break;
+ case L2FWD_CRYPTO_HASH_ONLY:
+ port_cparams[i].do_hash = 1;
+ break;
+ case L2FWD_CRYPTO_CIPHER_ONLY:
+ port_cparams[i].do_cipher = 1;
+ break;
+ }
+
+ port_cparams[i].dev_id = qconf->cryptodev_list[i];
+ port_cparams[i].qp_id = 0;
+
+ port_cparams[i].block_size = options->block_size;
+
+ if (port_cparams[i].do_hash) {
+ port_cparams[i].auth_iv.data = options->auth_iv.data;
+ port_cparams[i].auth_iv.length = options->auth_iv.length;
+ if (!options->auth_iv_param)
+ generate_random_key(port_cparams[i].auth_iv.data,
+ port_cparams[i].auth_iv.length);
+ if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ port_cparams[i].hash_verify = 1;
+ else
+ port_cparams[i].hash_verify = 0;
+
+ port_cparams[i].auth_algo = options->auth_xform.auth.algo;
+ port_cparams[i].digest_length =
+ options->auth_xform.auth.digest_length;
+ /* Set IV parameters */
+ if (options->auth_iv.length) {
+ options->auth_xform.auth.iv.offset =
+ IV_OFFSET + options->cipher_iv.length;
+ options->auth_xform.auth.iv.length =
+ options->auth_iv.length;
+ }
+ }
+
+ if (port_cparams[i].do_aead) {
+ port_cparams[i].aead_iv.data = options->aead_iv.data;
+ port_cparams[i].aead_iv.length = options->aead_iv.length;
+ if (!options->aead_iv_param)
+ generate_random_key(port_cparams[i].aead_iv.data,
+ port_cparams[i].aead_iv.length);
+ port_cparams[i].aead_algo = options->aead_xform.aead.algo;
+ port_cparams[i].digest_length =
+ options->aead_xform.aead.digest_length;
+ if (options->aead_xform.aead.aad_length) {
+ port_cparams[i].aad.data = options->aad.data;
+ port_cparams[i].aad.phys_addr = options->aad.phys_addr;
+ port_cparams[i].aad.length = options->aad.length;
+ if (!options->aad_param)
+ generate_random_key(port_cparams[i].aad.data,
+ port_cparams[i].aad.length);
+ /*
+ * If doing AES-CCM, first 18 bytes has to be reserved,
+ * and actual AAD should start from byte 18
+ */
+ if (port_cparams[i].aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
+ memmove(port_cparams[i].aad.data + 18,
+ port_cparams[i].aad.data,
+ port_cparams[i].aad.length);
+
+ } else
+ port_cparams[i].aad.length = 0;
+
+ if (options->aead_xform.aead.op == RTE_CRYPTO_AEAD_OP_DECRYPT)
+ port_cparams[i].hash_verify = 1;
+ else
+ port_cparams[i].hash_verify = 0;
+
+ /* Set IV parameters */
+ options->aead_xform.aead.iv.offset = IV_OFFSET;
+ options->aead_xform.aead.iv.length = options->aead_iv.length;
+ }
+
+ if (port_cparams[i].do_cipher) {
+ port_cparams[i].cipher_iv.data = options->cipher_iv.data;
+ port_cparams[i].cipher_iv.length = options->cipher_iv.length;
+ if (!options->cipher_iv_param)
+ generate_random_key(port_cparams[i].cipher_iv.data,
+ port_cparams[i].cipher_iv.length);
+
+ port_cparams[i].cipher_algo = options->cipher_xform.cipher.algo;
+ /* Set IV parameters */
+ options->cipher_xform.cipher.iv.offset = IV_OFFSET;
+ options->cipher_xform.cipher.iv.length =
+ options->cipher_iv.length;
+ }
+
+ session = initialize_crypto_session(options,
+ port_cparams[i].dev_id);
+ if (session == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to initialize crypto session\n");
+
+ port_cparams[i].session = session;
+
+ RTE_LOG(INFO, L2FWD, " -- lcoreid=%u cryptoid=%u\n", lcore_id,
+ port_cparams[i].dev_id);
+ }
+
+ l2fwd_crypto_options_print(options);
+
+ /*
+ * Initialize previous tsc timestamp before the loop,
+ * to avoid showing the port statistics immediately,
+ * so user can see the crypto information.
+ */
+ prev_tsc = rte_rdtsc();
+ while (1) {
+
+ cur_tsc = rte_rdtsc();
+
+ /*
+ * Crypto device/TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+ /* Enqueue all crypto ops remaining in buffers */
+ for (i = 0; i < qconf->nb_crypto_devs; i++) {
+ cparams = &port_cparams[i];
+ len = qconf->op_buf[cparams->dev_id].len;
+ l2fwd_crypto_send_burst(qconf, len, cparams);
+ qconf->op_buf[cparams->dev_id].len = 0;
+ }
+ /* Transmit all packets remaining in buffers */
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ if (qconf->pkt_buf[portid].len == 0)
+ continue;
+ l2fwd_send_burst(&lcore_queue_conf[lcore_id],
+ qconf->pkt_buf[portid].len,
+ portid);
+ qconf->pkt_buf[portid].len = 0;
+ }
+
+ /* if timer is enabled */
+ if (timer_period > 0) {
+
+ /* advance the timer */
+ timer_tsc += diff_tsc;
+
+ /* if timer has reached its timeout */
+ if (unlikely(timer_tsc >=
+ (uint64_t)timer_period)) {
+
+ /* do this only on master core */
+ if (lcore_id == rte_get_master_lcore()
+ && options->refresh_period) {
+ print_stats();
+ timer_tsc = 0;
+ }
+ }
+ }
+
+ prev_tsc = cur_tsc;
+ }
+
+ /*
+ * Read packet from RX queues
+ */
+ for (i = 0; i < qconf->nb_rx_ports; i++) {
+ portid = qconf->rx_port_list[i];
+
+ cparams = &port_cparams[i];
+
+ nb_rx = rte_eth_rx_burst(portid, 0,
+ pkts_burst, MAX_PKT_BURST);
+
+ port_statistics[portid].rx += nb_rx;
+
+ if (nb_rx) {
+ /*
+ * If we can't allocate a crypto_ops, then drop
+ * the rest of the burst and dequeue and
+ * process the packets to free offload structs
+ */
+ if (rte_crypto_op_bulk_alloc(
+ l2fwd_crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ ops_burst, nb_rx) !=
+ nb_rx) {
+ for (j = 0; j < nb_rx; j++)
+ rte_pktmbuf_free(pkts_burst[j]);
+
+ nb_rx = 0;
+ }
+
+ /* Enqueue packets from Crypto device*/
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
+
+ l2fwd_simple_crypto_enqueue(m,
+ ops_burst[j], cparams);
+ }
+ }
+
+ /* Dequeue packets from Crypto device */
+ do {
+ nb_rx = rte_cryptodev_dequeue_burst(
+ cparams->dev_id, cparams->qp_id,
+ ops_burst, MAX_PKT_BURST);
+
+ crypto_statistics[cparams->dev_id].dequeued +=
+ nb_rx;
+
+ /* Forward crypto'd packets */
+ for (j = 0; j < nb_rx; j++) {
+ m = ops_burst[j]->sym->m_src;
+
+ rte_crypto_op_free(ops_burst[j]);
+ l2fwd_simple_forward(m, portid,
+ options);
+ }
+ } while (nb_rx == MAX_PKT_BURST);
+ }
+ }
+}
+
+static int
+l2fwd_launch_one_lcore(void *arg)
+{
+ l2fwd_main_loop((struct l2fwd_crypto_options *)arg);
+ return 0;
+}
+
+/* Display command line arguments usage */
+static void
+l2fwd_crypto_usage(const char *prgname)
+{
+ printf("%s [EAL options] --\n"
+ " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+ " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
+ " -s manage all ports from single lcore\n"
+ " -T PERIOD: statistics will be refreshed each PERIOD seconds"
+ " (0 to disable, 10 default, 86400 maximum)\n"
+
+ " --cdev_type HW / SW / ANY\n"
+ " --chain HASH_CIPHER / CIPHER_HASH / CIPHER_ONLY /"
+ " HASH_ONLY / AEAD\n"
+
+ " --cipher_algo ALGO\n"
+ " --cipher_op ENCRYPT / DECRYPT\n"
+ " --cipher_key KEY (bytes separated with \":\")\n"
+ " --cipher_key_random_size SIZE: size of cipher key when generated randomly\n"
+ " --cipher_iv IV (bytes separated with \":\")\n"
+ " --cipher_iv_random_size SIZE: size of cipher IV when generated randomly\n"
+
+ " --auth_algo ALGO\n"
+ " --auth_op GENERATE / VERIFY\n"
+ " --auth_key KEY (bytes separated with \":\")\n"
+ " --auth_key_random_size SIZE: size of auth key when generated randomly\n"
+ " --auth_iv IV (bytes separated with \":\")\n"
+ " --auth_iv_random_size SIZE: size of auth IV when generated randomly\n"
+
+ " --aead_algo ALGO\n"
+ " --aead_op ENCRYPT / DECRYPT\n"
+ " --aead_key KEY (bytes separated with \":\")\n"
+ " --aead_key_random_size SIZE: size of AEAD key when generated randomly\n"
+ " --aead_iv IV (bytes separated with \":\")\n"
+ " --aead_iv_random_size SIZE: size of AEAD IV when generated randomly\n"
+ " --aad AAD (bytes separated with \":\")\n"
+ " --aad_random_size SIZE: size of AAD when generated randomly\n"
+
+ " --digest_size SIZE: size of digest to be generated/verified\n"
+
+ " --sessionless\n"
+ " --cryptodev_mask MASK: hexadecimal bitmask of crypto devices to configure\n"
+
+ " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
+ " When enabled:\n"
+ " - The source MAC address is replaced by the TX port MAC address\n"
+ " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n",
+ prgname);
+}
+
+/** Parse crypto device type command line argument */
+static int
+parse_cryptodev_type(enum cdev_type *type, char *optarg)
+{
+ if (strcmp("HW", optarg) == 0) {
+ *type = CDEV_TYPE_HW;
+ return 0;
+ } else if (strcmp("SW", optarg) == 0) {
+ *type = CDEV_TYPE_SW;
+ return 0;
+ } else if (strcmp("ANY", optarg) == 0) {
+ *type = CDEV_TYPE_ANY;
+ return 0;
+ }
+
+ return -1;
+}
+
+/** Parse crypto chain xform command line argument */
+static int
+parse_crypto_opt_chain(struct l2fwd_crypto_options *options, char *optarg)
+{
+ if (strcmp("CIPHER_HASH", optarg) == 0) {
+ options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
+ return 0;
+ } else if (strcmp("HASH_CIPHER", optarg) == 0) {
+ options->xform_chain = L2FWD_CRYPTO_HASH_CIPHER;
+ return 0;
+ } else if (strcmp("CIPHER_ONLY", optarg) == 0) {
+ options->xform_chain = L2FWD_CRYPTO_CIPHER_ONLY;
+ return 0;
+ } else if (strcmp("HASH_ONLY", optarg) == 0) {
+ options->xform_chain = L2FWD_CRYPTO_HASH_ONLY;
+ return 0;
+ } else if (strcmp("AEAD", optarg) == 0) {
+ options->xform_chain = L2FWD_CRYPTO_AEAD;
+ return 0;
+ }
+
+ return -1;
+}
+
+/** Parse crypto cipher algo option command line argument */
+static int
+parse_cipher_algo(enum rte_crypto_cipher_algorithm *algo, char *optarg)
+{
+
+ if (rte_cryptodev_get_cipher_algo_enum(algo, optarg) < 0) {
+ RTE_LOG(ERR, USER1, "Cipher algorithm specified "
+ "not supported!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Parse crypto cipher operation command line argument */
+static int
+parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
+{
+ if (strcmp("ENCRYPT", optarg) == 0) {
+ *op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ return 0;
+ } else if (strcmp("DECRYPT", optarg) == 0) {
+ *op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ return 0;
+ }
+
+ printf("Cipher operation not supported!\n");
+ return -1;
+}
+
+/** Parse bytes from command line argument */
+static int
+parse_bytes(uint8_t *data, char *input_arg, uint16_t max_size)
+{
+ unsigned byte_count;
+ char *token;
+
+ errno = 0;
+ for (byte_count = 0, token = strtok(input_arg, ":");
+ (byte_count < max_size) && (token != NULL);
+ token = strtok(NULL, ":")) {
+
+ int number = (int)strtol(token, NULL, 16);
+
+ if (errno == EINVAL || errno == ERANGE || number > 0xFF)
+ return -1;
+
+ data[byte_count++] = (uint8_t)number;
+ }
+
+ return byte_count;
+}
+
+/** Parse size param*/
+static int
+parse_size(int *size, const char *q_arg)
+{
+ char *end = NULL;
+ unsigned long n;
+
+ /* parse hexadecimal string */
+ n = strtoul(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ n = 0;
+
+ if (n == 0) {
+ printf("invalid size\n");
+ return -1;
+ }
+
+ *size = n;
+ return 0;
+}
+
+/** Parse crypto cipher operation command line argument */
+static int
+parse_auth_algo(enum rte_crypto_auth_algorithm *algo, char *optarg)
+{
+ if (rte_cryptodev_get_auth_algo_enum(algo, optarg) < 0) {
+ RTE_LOG(ERR, USER1, "Authentication algorithm specified "
+ "not supported!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+parse_auth_op(enum rte_crypto_auth_operation *op, char *optarg)
+{
+ if (strcmp("VERIFY", optarg) == 0) {
+ *op = RTE_CRYPTO_AUTH_OP_VERIFY;
+ return 0;
+ } else if (strcmp("GENERATE", optarg) == 0) {
+ *op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ return 0;
+ }
+
+ printf("Authentication operation specified not supported!\n");
+ return -1;
+}
+
+static int
+parse_aead_algo(enum rte_crypto_aead_algorithm *algo, char *optarg)
+{
+ if (rte_cryptodev_get_aead_algo_enum(algo, optarg) < 0) {
+ RTE_LOG(ERR, USER1, "AEAD algorithm specified "
+ "not supported!\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+parse_aead_op(enum rte_crypto_aead_operation *op, char *optarg)
+{
+ if (strcmp("ENCRYPT", optarg) == 0) {
+ *op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ return 0;
+ } else if (strcmp("DECRYPT", optarg) == 0) {
+ *op = RTE_CRYPTO_AEAD_OP_DECRYPT;
+ return 0;
+ }
+
+ printf("AEAD operation specified not supported!\n");
+ return -1;
+}
+static int
+parse_cryptodev_mask(struct l2fwd_crypto_options *options,
+ const char *q_arg)
+{
+ char *end = NULL;
+ uint64_t pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(q_arg, &end, 16);
+ if ((pm == '\0') || (end == NULL) || (*end != '\0'))
+ pm = 0;
+
+ options->cryptodev_mask = pm;
+ if (options->cryptodev_mask == 0) {
+ printf("invalid cryptodev_mask specified\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Parse long options */
+static int
+l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
+ struct option *lgopts, int option_index)
+{
+ int retval;
+
+ if (strcmp(lgopts[option_index].name, "cdev_type") == 0) {
+ retval = parse_cryptodev_type(&options->type, optarg);
+ if (retval == 0)
+ strlcpy(options->string_type, optarg, MAX_STR_LEN);
+ return retval;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "chain") == 0)
+ return parse_crypto_opt_chain(options, optarg);
+
+ /* Cipher options */
+ else if (strcmp(lgopts[option_index].name, "cipher_algo") == 0)
+ return parse_cipher_algo(&options->cipher_xform.cipher.algo,
+ optarg);
+
+ else if (strcmp(lgopts[option_index].name, "cipher_op") == 0)
+ return parse_cipher_op(&options->cipher_xform.cipher.op,
+ optarg);
+
+ else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
+ options->ckey_param = 1;
+ options->cipher_xform.cipher.key.length =
+ parse_bytes(options->cipher_key, optarg, MAX_KEY_SIZE);
+ if (options->cipher_xform.cipher.key.length > 0)
+ return 0;
+ else
+ return -1;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "cipher_key_random_size") == 0)
+ return parse_size(&options->ckey_random_size, optarg);
+
+ else if (strcmp(lgopts[option_index].name, "cipher_iv") == 0) {
+ options->cipher_iv_param = 1;
+ options->cipher_iv.length =
+ parse_bytes(options->cipher_iv.data, optarg, MAX_IV_SIZE);
+ if (options->cipher_iv.length > 0)
+ return 0;
+ else
+ return -1;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "cipher_iv_random_size") == 0)
+ return parse_size(&options->cipher_iv_random_size, optarg);
+
+ /* Authentication options */
+ else if (strcmp(lgopts[option_index].name, "auth_algo") == 0) {
+ return parse_auth_algo(&options->auth_xform.auth.algo,
+ optarg);
+ }
+
+ else if (strcmp(lgopts[option_index].name, "auth_op") == 0)
+ return parse_auth_op(&options->auth_xform.auth.op,
+ optarg);
+
+ else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
+ options->akey_param = 1;
+ options->auth_xform.auth.key.length =
+ parse_bytes(options->auth_key, optarg, MAX_KEY_SIZE);
+ if (options->auth_xform.auth.key.length > 0)
+ return 0;
+ else
+ return -1;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "auth_key_random_size") == 0) {
+ return parse_size(&options->akey_random_size, optarg);
+ }
+
+ else if (strcmp(lgopts[option_index].name, "auth_iv") == 0) {
+ options->auth_iv_param = 1;
+ options->auth_iv.length =
+ parse_bytes(options->auth_iv.data, optarg, MAX_IV_SIZE);
+ if (options->auth_iv.length > 0)
+ return 0;
+ else
+ return -1;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "auth_iv_random_size") == 0)
+ return parse_size(&options->auth_iv_random_size, optarg);
+
+ /* AEAD options */
+ else if (strcmp(lgopts[option_index].name, "aead_algo") == 0) {
+ return parse_aead_algo(&options->aead_xform.aead.algo,
+ optarg);
+ }
+
+ else if (strcmp(lgopts[option_index].name, "aead_op") == 0)
+ return parse_aead_op(&options->aead_xform.aead.op,
+ optarg);
+
+ else if (strcmp(lgopts[option_index].name, "aead_key") == 0) {
+ options->aead_key_param = 1;
+ options->aead_xform.aead.key.length =
+ parse_bytes(options->aead_key, optarg, MAX_KEY_SIZE);
+ if (options->aead_xform.aead.key.length > 0)
+ return 0;
+ else
+ return -1;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "aead_key_random_size") == 0)
+ return parse_size(&options->aead_key_random_size, optarg);
+
+
+ else if (strcmp(lgopts[option_index].name, "aead_iv") == 0) {
+ options->aead_iv_param = 1;
+ options->aead_iv.length =
+ parse_bytes(options->aead_iv.data, optarg, MAX_IV_SIZE);
+ if (options->aead_iv.length > 0)
+ return 0;
+ else
+ return -1;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "aead_iv_random_size") == 0)
+ return parse_size(&options->aead_iv_random_size, optarg);
+
+ else if (strcmp(lgopts[option_index].name, "aad") == 0) {
+ options->aad_param = 1;
+ options->aad.length =
+ parse_bytes(options->aad.data, optarg, MAX_AAD_SIZE);
+ if (options->aad.length > 0)
+ return 0;
+ else
+ return -1;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "aad_random_size") == 0) {
+ return parse_size(&options->aad_random_size, optarg);
+ }
+
+ else if (strcmp(lgopts[option_index].name, "digest_size") == 0) {
+ return parse_size(&options->digest_size, optarg);
+ }
+
+ else if (strcmp(lgopts[option_index].name, "sessionless") == 0) {
+ options->sessionless = 1;
+ return 0;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "cryptodev_mask") == 0)
+ return parse_cryptodev_mask(options, optarg);
+
+ else if (strcmp(lgopts[option_index].name, "mac-updating") == 0) {
+ options->mac_updating = 1;
+ return 0;
+ }
+
+ else if (strcmp(lgopts[option_index].name, "no-mac-updating") == 0) {
+ options->mac_updating = 0;
+ return 0;
+ }
+
+ return -1;
+}
+
+/** Parse port mask */
+static int
+l2fwd_crypto_parse_portmask(struct l2fwd_crypto_options *options,
+ const char *q_arg)
+{
+ char *end = NULL;
+ unsigned long pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(q_arg, &end, 16);
+ if ((pm == '\0') || (end == NULL) || (*end != '\0'))
+ pm = 0;
+
+ options->portmask = pm;
+ if (options->portmask == 0) {
+ printf("invalid portmask specified\n");
+ return -1;
+ }
+
+ return pm;
+}
+
+/** Parse number of queues */
+static int
+l2fwd_crypto_parse_nqueue(struct l2fwd_crypto_options *options,
+ const char *q_arg)
+{
+ char *end = NULL;
+ unsigned long n;
+
+ /* parse hexadecimal string */
+ n = strtoul(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ n = 0;
+ else if (n >= MAX_RX_QUEUE_PER_LCORE)
+ n = 0;
+
+ options->nb_ports_per_lcore = n;
+ if (options->nb_ports_per_lcore == 0) {
+ printf("invalid number of ports selected\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/** Parse timer period */
+static int
+l2fwd_crypto_parse_timer_period(struct l2fwd_crypto_options *options,
+ const char *q_arg)
+{
+ char *end = NULL;
+ unsigned long n;
+
+ /* parse number string */
+ n = (unsigned)strtol(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ n = 0;
+
+ if (n >= MAX_TIMER_PERIOD) {
+ printf("Warning refresh period specified %lu is greater than "
+ "max value %lu! using max value",
+ n, MAX_TIMER_PERIOD);
+ n = MAX_TIMER_PERIOD;
+ }
+
+ options->refresh_period = n * 1000 * TIMER_MILLISECOND;
+
+ return 0;
+}
+
+/** Generate default options for application */
+static void
+l2fwd_crypto_default_options(struct l2fwd_crypto_options *options)
+{
+ options->portmask = 0xffffffff;
+ options->nb_ports_per_lcore = 1;
+ options->refresh_period = 10000;
+ options->single_lcore = 0;
+ options->sessionless = 0;
+
+ options->xform_chain = L2FWD_CRYPTO_CIPHER_HASH;
+
+ /* Cipher Data */
+ options->cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ options->cipher_xform.next = NULL;
+ options->ckey_param = 0;
+ options->ckey_random_size = -1;
+ options->cipher_xform.cipher.key.length = 0;
+ options->cipher_iv_param = 0;
+ options->cipher_iv_random_size = -1;
+ options->cipher_iv.length = 0;
+
+ options->cipher_xform.cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ options->cipher_xform.cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+
+ /* Authentication Data */
+ options->auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ options->auth_xform.next = NULL;
+ options->akey_param = 0;
+ options->akey_random_size = -1;
+ options->auth_xform.auth.key.length = 0;
+ options->auth_iv_param = 0;
+ options->auth_iv_random_size = -1;
+ options->auth_iv.length = 0;
+
+ options->auth_xform.auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ options->auth_xform.auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ /* AEAD Data */
+ options->aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ options->aead_xform.next = NULL;
+ options->aead_key_param = 0;
+ options->aead_key_random_size = -1;
+ options->aead_xform.aead.key.length = 0;
+ options->aead_iv_param = 0;
+ options->aead_iv_random_size = -1;
+ options->aead_iv.length = 0;
+
+ options->aead_xform.aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
+ options->aead_xform.aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
+
+ options->aad_param = 0;
+ options->aad_random_size = -1;
+ options->aad.length = 0;
+
+ options->digest_size = -1;
+
+ options->type = CDEV_TYPE_ANY;
+ options->cryptodev_mask = UINT64_MAX;
+
+ options->mac_updating = 1;
+}
+
+static void
+display_cipher_info(struct l2fwd_crypto_options *options)
+{
+ printf("\n---- Cipher information ---\n");
+ printf("Algorithm: %s\n",
+ rte_crypto_cipher_algorithm_strings[options->cipher_xform.cipher.algo]);
+ rte_hexdump(stdout, "Cipher key:",
+ options->cipher_xform.cipher.key.data,
+ options->cipher_xform.cipher.key.length);
+ rte_hexdump(stdout, "IV:", options->cipher_iv.data, options->cipher_iv.length);
+}
+
+static void
+display_auth_info(struct l2fwd_crypto_options *options)
+{
+ printf("\n---- Authentication information ---\n");
+ printf("Algorithm: %s\n",
+ rte_crypto_auth_algorithm_strings[options->auth_xform.auth.algo]);
+ rte_hexdump(stdout, "Auth key:",
+ options->auth_xform.auth.key.data,
+ options->auth_xform.auth.key.length);
+ rte_hexdump(stdout, "IV:", options->auth_iv.data, options->auth_iv.length);
+}
+
+static void
+display_aead_info(struct l2fwd_crypto_options *options)
+{
+ printf("\n---- AEAD information ---\n");
+ printf("Algorithm: %s\n",
+ rte_crypto_aead_algorithm_strings[options->aead_xform.aead.algo]);
+ rte_hexdump(stdout, "AEAD key:",
+ options->aead_xform.aead.key.data,
+ options->aead_xform.aead.key.length);
+ rte_hexdump(stdout, "IV:", options->aead_iv.data, options->aead_iv.length);
+ rte_hexdump(stdout, "AAD:", options->aad.data, options->aad.length);
+}
+
+static void
+l2fwd_crypto_options_print(struct l2fwd_crypto_options *options)
+{
+ char string_cipher_op[MAX_STR_LEN];
+ char string_auth_op[MAX_STR_LEN];
+ char string_aead_op[MAX_STR_LEN];
+
+ if (options->cipher_xform.cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ strcpy(string_cipher_op, "Encrypt");
+ else
+ strcpy(string_cipher_op, "Decrypt");
+
+ if (options->auth_xform.auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ strcpy(string_auth_op, "Auth generate");
+ else
+ strcpy(string_auth_op, "Auth verify");
+
+ if (options->aead_xform.aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ strcpy(string_aead_op, "Authenticated encryption");
+ else
+ strcpy(string_aead_op, "Authenticated decryption");
+
+
+ printf("Options:-\nn");
+ printf("portmask: %x\n", options->portmask);
+ printf("ports per lcore: %u\n", options->nb_ports_per_lcore);
+ printf("refresh period : %u\n", options->refresh_period);
+ printf("single lcore mode: %s\n",
+ options->single_lcore ? "enabled" : "disabled");
+ printf("stats_printing: %s\n",
+ options->refresh_period == 0 ? "disabled" : "enabled");
+
+ printf("sessionless crypto: %s\n",
+ options->sessionless ? "enabled" : "disabled");
+
+ if (options->ckey_param && (options->ckey_random_size != -1))
+ printf("Cipher key already parsed, ignoring size of random key\n");
+
+ if (options->akey_param && (options->akey_random_size != -1))
+ printf("Auth key already parsed, ignoring size of random key\n");
+
+ if (options->cipher_iv_param && (options->cipher_iv_random_size != -1))
+ printf("Cipher IV already parsed, ignoring size of random IV\n");
+
+ if (options->auth_iv_param && (options->auth_iv_random_size != -1))
+ printf("Auth IV already parsed, ignoring size of random IV\n");
+
+ if (options->aad_param && (options->aad_random_size != -1))
+ printf("AAD already parsed, ignoring size of random AAD\n");
+
+ printf("\nCrypto chain: ");
+ switch (options->xform_chain) {
+ case L2FWD_CRYPTO_AEAD:
+ printf("Input --> %s --> Output\n", string_aead_op);
+ display_aead_info(options);
+ break;
+ case L2FWD_CRYPTO_CIPHER_HASH:
+ printf("Input --> %s --> %s --> Output\n",
+ string_cipher_op, string_auth_op);
+ display_cipher_info(options);
+ display_auth_info(options);
+ break;
+ case L2FWD_CRYPTO_HASH_CIPHER:
+ printf("Input --> %s --> %s --> Output\n",
+ string_auth_op, string_cipher_op);
+ display_cipher_info(options);
+ display_auth_info(options);
+ break;
+ case L2FWD_CRYPTO_HASH_ONLY:
+ printf("Input --> %s --> Output\n", string_auth_op);
+ display_auth_info(options);
+ break;
+ case L2FWD_CRYPTO_CIPHER_ONLY:
+ printf("Input --> %s --> Output\n", string_cipher_op);
+ display_cipher_info(options);
+ break;
+ }
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
+ int argc, char **argv)
+{
+ int opt, retval, option_index;
+ char **argvopt = argv, *prgname = argv[0];
+
+ static struct option lgopts[] = {
+ { "sessionless", no_argument, 0, 0 },
+
+ { "cdev_type", required_argument, 0, 0 },
+ { "chain", required_argument, 0, 0 },
+
+ { "cipher_algo", required_argument, 0, 0 },
+ { "cipher_op", required_argument, 0, 0 },
+ { "cipher_key", required_argument, 0, 0 },
+ { "cipher_key_random_size", required_argument, 0, 0 },
+ { "cipher_iv", required_argument, 0, 0 },
+ { "cipher_iv_random_size", required_argument, 0, 0 },
+
+ { "auth_algo", required_argument, 0, 0 },
+ { "auth_op", required_argument, 0, 0 },
+ { "auth_key", required_argument, 0, 0 },
+ { "auth_key_random_size", required_argument, 0, 0 },
+ { "auth_iv", required_argument, 0, 0 },
+ { "auth_iv_random_size", required_argument, 0, 0 },
+
+ { "aead_algo", required_argument, 0, 0 },
+ { "aead_op", required_argument, 0, 0 },
+ { "aead_key", required_argument, 0, 0 },
+ { "aead_key_random_size", required_argument, 0, 0 },
+ { "aead_iv", required_argument, 0, 0 },
+ { "aead_iv_random_size", required_argument, 0, 0 },
+
+ { "aad", required_argument, 0, 0 },
+ { "aad_random_size", required_argument, 0, 0 },
+
+ { "digest_size", required_argument, 0, 0 },
+
+ { "sessionless", no_argument, 0, 0 },
+ { "cryptodev_mask", required_argument, 0, 0},
+
+ { "mac-updating", no_argument, 0, 0},
+ { "no-mac-updating", no_argument, 0, 0},
+
+ { NULL, 0, 0, 0 }
+ };
+
+ l2fwd_crypto_default_options(options);
+
+ while ((opt = getopt_long(argc, argvopt, "p:q:sT:", lgopts,
+ &option_index)) != EOF) {
+ switch (opt) {
+ /* long options */
+ case 0:
+ retval = l2fwd_crypto_parse_args_long_options(options,
+ lgopts, option_index);
+ if (retval < 0) {
+ l2fwd_crypto_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* portmask */
+ case 'p':
+ retval = l2fwd_crypto_parse_portmask(options, optarg);
+ if (retval < 0) {
+ l2fwd_crypto_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* nqueue */
+ case 'q':
+ retval = l2fwd_crypto_parse_nqueue(options, optarg);
+ if (retval < 0) {
+ l2fwd_crypto_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* single */
+ case 's':
+ options->single_lcore = 1;
+
+ break;
+
+ /* timer period */
+ case 'T':
+ retval = l2fwd_crypto_parse_timer_period(options,
+ optarg);
+ if (retval < 0) {
+ l2fwd_crypto_usage(prgname);
+ return -1;
+ }
+ break;
+
+ default:
+ l2fwd_crypto_usage(prgname);
+ return -1;
+ }
+ }
+
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ retval = optind-1;
+ optind = 1; /* reset getopt lib */
+
+ return retval;
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+ int ret;
+
+ printf("\nChecking link status");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ all_ports_up = 1;
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((port_mask & (1 << portid)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ ret = rte_eth_link_get_nowait(portid, &link);
+ if (ret < 0) {
+ all_ports_up = 0;
+ if (print_flag == 1)
+ printf("Port %u link get failed: %s\n",
+ portid, rte_strerror(-ret));
+ continue;
+ }
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex"));
+ else
+ printf("Port %d Link Down\n", portid);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == ETH_LINK_DOWN) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+
+/* Check if device has to be HW/SW or any */
+static int
+check_type(const struct l2fwd_crypto_options *options,
+ const struct rte_cryptodev_info *dev_info)
+{
+ if (options->type == CDEV_TYPE_HW &&
+ (dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED))
+ return 0;
+ if (options->type == CDEV_TYPE_SW &&
+ !(dev_info->feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED))
+ return 0;
+ if (options->type == CDEV_TYPE_ANY)
+ return 0;
+
+ return -1;
+}
+
+static const struct rte_cryptodev_capabilities *
+check_device_support_cipher_algo(const struct l2fwd_crypto_options *options,
+ const struct rte_cryptodev_info *dev_info,
+ uint8_t cdev_id)
+{
+ unsigned int i = 0;
+ const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
+ enum rte_crypto_cipher_algorithm cap_cipher_algo;
+ enum rte_crypto_cipher_algorithm opt_cipher_algo =
+ options->cipher_xform.cipher.algo;
+
+ while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ cap_cipher_algo = cap->sym.cipher.algo;
+ if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (cap_cipher_algo == opt_cipher_algo) {
+ if (check_type(options, dev_info) == 0)
+ break;
+ }
+ }
+ cap = &dev_info->capabilities[++i];
+ }
+
+ if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ printf("Algorithm %s not supported by cryptodev %u"
+ " or device not of preferred type (%s)\n",
+ rte_crypto_cipher_algorithm_strings[opt_cipher_algo],
+ cdev_id,
+ options->string_type);
+ return NULL;
+ }
+
+ return cap;
+}
+
+static const struct rte_cryptodev_capabilities *
+check_device_support_auth_algo(const struct l2fwd_crypto_options *options,
+ const struct rte_cryptodev_info *dev_info,
+ uint8_t cdev_id)
+{
+ unsigned int i = 0;
+ const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
+ enum rte_crypto_auth_algorithm cap_auth_algo;
+ enum rte_crypto_auth_algorithm opt_auth_algo =
+ options->auth_xform.auth.algo;
+
+ while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ cap_auth_algo = cap->sym.auth.algo;
+ if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (cap_auth_algo == opt_auth_algo) {
+ if (check_type(options, dev_info) == 0)
+ break;
+ }
+ }
+ cap = &dev_info->capabilities[++i];
+ }
+
+ if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ printf("Algorithm %s not supported by cryptodev %u"
+ " or device not of preferred type (%s)\n",
+ rte_crypto_auth_algorithm_strings[opt_auth_algo],
+ cdev_id,
+ options->string_type);
+ return NULL;
+ }
+
+ return cap;
+}
+
+static const struct rte_cryptodev_capabilities *
+check_device_support_aead_algo(const struct l2fwd_crypto_options *options,
+ const struct rte_cryptodev_info *dev_info,
+ uint8_t cdev_id)
+{
+ unsigned int i = 0;
+ const struct rte_cryptodev_capabilities *cap = &dev_info->capabilities[0];
+ enum rte_crypto_aead_algorithm cap_aead_algo;
+ enum rte_crypto_aead_algorithm opt_aead_algo =
+ options->aead_xform.aead.algo;
+
+ while (cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ cap_aead_algo = cap->sym.aead.algo;
+ if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (cap_aead_algo == opt_aead_algo) {
+ if (check_type(options, dev_info) == 0)
+ break;
+ }
+ }
+ cap = &dev_info->capabilities[++i];
+ }
+
+ if (cap->op == RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ printf("Algorithm %s not supported by cryptodev %u"
+ " or device not of preferred type (%s)\n",
+ rte_crypto_aead_algorithm_strings[opt_aead_algo],
+ cdev_id,
+ options->string_type);
+ return NULL;
+ }
+
+ return cap;
+}
+
+/* Check if the device is enabled by cryptodev_mask */
+static int
+check_cryptodev_mask(struct l2fwd_crypto_options *options,
+ uint8_t cdev_id)
+{
+ if (options->cryptodev_mask & (1 << cdev_id))
+ return 0;
+
+ return -1;
+}
+
+static inline int
+check_supported_size(uint16_t length, uint16_t min, uint16_t max,
+ uint16_t increment)
+{
+ uint16_t supp_size;
+
+ /* Single value */
+ if (increment == 0) {
+ if (length == min)
+ return 0;
+ else
+ return -1;
+ }
+
+ /* Range of values */
+ for (supp_size = min; supp_size <= max; supp_size += increment) {
+ if (length == supp_size)
+ return 0;
+ }
+
+ return -1;
+}
+
+static int
+check_iv_param(const struct rte_crypto_param_range *iv_range_size,
+ unsigned int iv_param, int iv_random_size,
+ uint16_t iv_length)
+{
+ /*
+ * Check if length of provided IV is supported
+ * by the algorithm chosen.
+ */
+ if (iv_param) {
+ if (check_supported_size(iv_length,
+ iv_range_size->min,
+ iv_range_size->max,
+ iv_range_size->increment)
+ != 0)
+ return -1;
+ /*
+ * Check if length of IV to be randomly generated
+ * is supported by the algorithm chosen.
+ */
+ } else if (iv_random_size != -1) {
+ if (check_supported_size(iv_random_size,
+ iv_range_size->min,
+ iv_range_size->max,
+ iv_range_size->increment)
+ != 0)
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+check_capabilities(struct l2fwd_crypto_options *options, uint8_t cdev_id)
+{
+ struct rte_cryptodev_info dev_info;
+ const struct rte_cryptodev_capabilities *cap;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+
+ /* Set AEAD parameters */
+ if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
+ /* Check if device supports AEAD algo */
+ cap = check_device_support_aead_algo(options, &dev_info,
+ cdev_id);
+ if (cap == NULL)
+ return -1;
+
+ if (check_iv_param(&cap->sym.aead.iv_size,
+ options->aead_iv_param,
+ options->aead_iv_random_size,
+ options->aead_iv.length) != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support IV length\n",
+ cdev_id);
+ return -1;
+ }
+
+ /*
+ * Check if length of provided AEAD key is supported
+ * by the algorithm chosen.
+ */
+ if (options->aead_key_param) {
+ if (check_supported_size(
+ options->aead_xform.aead.key.length,
+ cap->sym.aead.key_size.min,
+ cap->sym.aead.key_size.max,
+ cap->sym.aead.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "AEAD key length\n",
+ cdev_id);
+ return -1;
+ }
+ /*
+ * Check if length of the aead key to be randomly generated
+ * is supported by the algorithm chosen.
+ */
+ } else if (options->aead_key_random_size != -1) {
+ if (check_supported_size(options->aead_key_random_size,
+ cap->sym.aead.key_size.min,
+ cap->sym.aead.key_size.max,
+ cap->sym.aead.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "AEAD key length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+
+
+ /*
+ * Check if length of provided AAD is supported
+ * by the algorithm chosen.
+ */
+ if (options->aad_param) {
+ if (check_supported_size(options->aad.length,
+ cap->sym.aead.aad_size.min,
+ cap->sym.aead.aad_size.max,
+ cap->sym.aead.aad_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "AAD length\n",
+ cdev_id);
+ return -1;
+ }
+ /*
+ * Check if length of AAD to be randomly generated
+ * is supported by the algorithm chosen.
+ */
+ } else if (options->aad_random_size != -1) {
+ if (check_supported_size(options->aad_random_size,
+ cap->sym.aead.aad_size.min,
+ cap->sym.aead.aad_size.max,
+ cap->sym.aead.aad_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "AAD length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+
+ /* Check if digest size is supported by the algorithm. */
+ if (options->digest_size != -1) {
+ if (check_supported_size(options->digest_size,
+ cap->sym.aead.digest_size.min,
+ cap->sym.aead.digest_size.max,
+ cap->sym.aead.digest_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "digest length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+ }
+
+ /* Set cipher parameters */
+ if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
+ options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
+ options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
+ /* Check if device supports cipher algo */
+ cap = check_device_support_cipher_algo(options, &dev_info,
+ cdev_id);
+ if (cap == NULL)
+ return -1;
+
+ if (check_iv_param(&cap->sym.cipher.iv_size,
+ options->cipher_iv_param,
+ options->cipher_iv_random_size,
+ options->cipher_iv.length) != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support IV length\n",
+ cdev_id);
+ return -1;
+ }
+
+ /*
+ * Check if length of provided cipher key is supported
+ * by the algorithm chosen.
+ */
+ if (options->ckey_param) {
+ if (check_supported_size(
+ options->cipher_xform.cipher.key.length,
+ cap->sym.cipher.key_size.min,
+ cap->sym.cipher.key_size.max,
+ cap->sym.cipher.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support cipher "
+ "key length\n",
+ cdev_id);
+ return -1;
+ }
+ /*
+ * Check if length of the cipher key to be randomly generated
+ * is supported by the algorithm chosen.
+ */
+ } else if (options->ckey_random_size != -1) {
+ if (check_supported_size(options->ckey_random_size,
+ cap->sym.cipher.key_size.min,
+ cap->sym.cipher.key_size.max,
+ cap->sym.cipher.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support cipher "
+ "key length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+ }
+
+ /* Set auth parameters */
+ if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
+ options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
+ options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) {
+ /* Check if device supports auth algo */
+ cap = check_device_support_auth_algo(options, &dev_info,
+ cdev_id);
+ if (cap == NULL)
+ return -1;
+
+ if (check_iv_param(&cap->sym.auth.iv_size,
+ options->auth_iv_param,
+ options->auth_iv_random_size,
+ options->auth_iv.length) != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support IV length\n",
+ cdev_id);
+ return -1;
+ }
+ /*
+ * Check if length of provided auth key is supported
+ * by the algorithm chosen.
+ */
+ if (options->akey_param) {
+ if (check_supported_size(
+ options->auth_xform.auth.key.length,
+ cap->sym.auth.key_size.min,
+ cap->sym.auth.key_size.max,
+ cap->sym.auth.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support auth "
+ "key length\n",
+ cdev_id);
+ return -1;
+ }
+ /*
+ * Check if length of the auth key to be randomly generated
+ * is supported by the algorithm chosen.
+ */
+ } else if (options->akey_random_size != -1) {
+ if (check_supported_size(options->akey_random_size,
+ cap->sym.auth.key_size.min,
+ cap->sym.auth.key_size.max,
+ cap->sym.auth.key_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support auth "
+ "key length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+
+ /* Check if digest size is supported by the algorithm. */
+ if (options->digest_size != -1) {
+ if (check_supported_size(options->digest_size,
+ cap->sym.auth.digest_size.min,
+ cap->sym.auth.digest_size.max,
+ cap->sym.auth.digest_size.increment)
+ != 0) {
+ RTE_LOG(DEBUG, USER1,
+ "Device %u does not support "
+ "digest length\n",
+ cdev_id);
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
+ uint8_t *enabled_cdevs)
+{
+ uint8_t cdev_id, cdev_count, enabled_cdev_count = 0;
+ const struct rte_cryptodev_capabilities *cap;
+ unsigned int sess_sz, max_sess_sz = 0;
+ uint32_t sessions_needed = 0;
+ int retval;
+
+ cdev_count = rte_cryptodev_count();
+ if (cdev_count == 0) {
+ printf("No crypto devices available\n");
+ return -1;
+ }
+
+ for (cdev_id = 0; cdev_id < cdev_count && enabled_cdev_count < nb_ports;
+ cdev_id++) {
+ if (check_cryptodev_mask(options, cdev_id) < 0)
+ continue;
+
+ if (check_capabilities(options, cdev_id) < 0)
+ continue;
+
+ sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
+ if (sess_sz > max_sess_sz)
+ max_sess_sz = sess_sz;
+
+ l2fwd_enabled_crypto_mask |= (((uint64_t)1) << cdev_id);
+
+ enabled_cdevs[cdev_id] = 1;
+ enabled_cdev_count++;
+ }
+
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ struct rte_cryptodev_qp_conf qp_conf;
+ struct rte_cryptodev_info dev_info;
+
+ if (enabled_cdevs[cdev_id] == 0)
+ continue;
+
+ retval = rte_cryptodev_socket_id(cdev_id);
+
+ if (retval < 0) {
+ printf("Invalid crypto device id used\n");
+ return -1;
+ }
+
+ uint8_t socket_id = (uint8_t) retval;
+
+ struct rte_cryptodev_config conf = {
+ .nb_queue_pairs = 1,
+ .socket_id = socket_id,
+ .ff_disable = RTE_CRYPTODEV_FF_SECURITY,
+ };
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+
+ /*
+ * Two sessions objects are required for each session
+ * (one for the header, one for the private data)
+ */
+ if (!strcmp(dev_info.driver_name, "crypto_scheduler")) {
+#ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+ uint32_t nb_slaves =
+ rte_cryptodev_scheduler_slaves_get(cdev_id,
+ NULL);
+
+ sessions_needed = enabled_cdev_count * nb_slaves;
+#endif
+ } else
+ sessions_needed = enabled_cdev_count;
+
+ if (session_pool_socket[socket_id].priv_mp == NULL) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "priv_sess_mp_%u", socket_id);
+
+ session_pool_socket[socket_id].priv_mp =
+ rte_mempool_create(mp_name,
+ sessions_needed,
+ max_sess_sz,
+ 0, 0, NULL, NULL, NULL,
+ NULL, socket_id,
+ 0);
+
+ if (session_pool_socket[socket_id].priv_mp == NULL) {
+ printf("Cannot create pool on socket %d\n",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ printf("Allocated pool \"%s\" on socket %d\n",
+ mp_name, socket_id);
+ }
+
+ if (session_pool_socket[socket_id].sess_mp == NULL) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+
+ session_pool_socket[socket_id].sess_mp =
+ rte_cryptodev_sym_session_pool_create(
+ mp_name,
+ sessions_needed,
+ 0, 0, 0, socket_id);
+
+ if (session_pool_socket[socket_id].sess_mp == NULL) {
+ printf("Cannot create pool on socket %d\n",
+ socket_id);
+ return -ENOMEM;
+ }
+
+ printf("Allocated pool \"%s\" on socket %d\n",
+ mp_name, socket_id);
+ }
+
+ /* Set AEAD parameters */
+ if (options->xform_chain == L2FWD_CRYPTO_AEAD) {
+ cap = check_device_support_aead_algo(options, &dev_info,
+ cdev_id);
+
+ options->block_size = cap->sym.aead.block_size;
+
+ /* Set IV if not provided from command line */
+ if (options->aead_iv_param == 0) {
+ if (options->aead_iv_random_size != -1)
+ options->aead_iv.length =
+ options->aead_iv_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->aead_iv.length =
+ cap->sym.aead.iv_size.min;
+ }
+
+ /* Set key if not provided from command line */
+ if (options->aead_key_param == 0) {
+ if (options->aead_key_random_size != -1)
+ options->aead_xform.aead.key.length =
+ options->aead_key_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->aead_xform.aead.key.length =
+ cap->sym.aead.key_size.min;
+
+ generate_random_key(options->aead_key,
+ options->aead_xform.aead.key.length);
+ }
+
+ /* Set AAD if not provided from command line */
+ if (options->aad_param == 0) {
+ if (options->aad_random_size != -1)
+ options->aad.length =
+ options->aad_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->aad.length =
+ cap->sym.auth.aad_size.min;
+ }
+
+ options->aead_xform.aead.aad_length =
+ options->aad.length;
+
+ /* Set digest size if not provided from command line */
+ if (options->digest_size != -1)
+ options->aead_xform.aead.digest_length =
+ options->digest_size;
+ /* No size provided, use minimum size. */
+ else
+ options->aead_xform.aead.digest_length =
+ cap->sym.aead.digest_size.min;
+ }
+
+ /* Set cipher parameters */
+ if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
+ options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
+ options->xform_chain == L2FWD_CRYPTO_CIPHER_ONLY) {
+ cap = check_device_support_cipher_algo(options, &dev_info,
+ cdev_id);
+ options->block_size = cap->sym.cipher.block_size;
+
+ /* Set IV if not provided from command line */
+ if (options->cipher_iv_param == 0) {
+ if (options->cipher_iv_random_size != -1)
+ options->cipher_iv.length =
+ options->cipher_iv_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->cipher_iv.length =
+ cap->sym.cipher.iv_size.min;
+ }
+
+ /* Set key if not provided from command line */
+ if (options->ckey_param == 0) {
+ if (options->ckey_random_size != -1)
+ options->cipher_xform.cipher.key.length =
+ options->ckey_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->cipher_xform.cipher.key.length =
+ cap->sym.cipher.key_size.min;
+
+ generate_random_key(options->cipher_key,
+ options->cipher_xform.cipher.key.length);
+ }
+ }
+
+ /* Set auth parameters */
+ if (options->xform_chain == L2FWD_CRYPTO_CIPHER_HASH ||
+ options->xform_chain == L2FWD_CRYPTO_HASH_CIPHER ||
+ options->xform_chain == L2FWD_CRYPTO_HASH_ONLY) {
+ cap = check_device_support_auth_algo(options, &dev_info,
+ cdev_id);
+
+ /* Set IV if not provided from command line */
+ if (options->auth_iv_param == 0) {
+ if (options->auth_iv_random_size != -1)
+ options->auth_iv.length =
+ options->auth_iv_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->auth_iv.length =
+ cap->sym.auth.iv_size.min;
+ }
+
+ /* Set key if not provided from command line */
+ if (options->akey_param == 0) {
+ if (options->akey_random_size != -1)
+ options->auth_xform.auth.key.length =
+ options->akey_random_size;
+ /* No size provided, use minimum size. */
+ else
+ options->auth_xform.auth.key.length =
+ cap->sym.auth.key_size.min;
+
+ generate_random_key(options->auth_key,
+ options->auth_xform.auth.key.length);
+ }
+
+ /* Set digest size if not provided from command line */
+ if (options->digest_size != -1)
+ options->auth_xform.auth.digest_length =
+ options->digest_size;
+ /* No size provided, use minimum size. */
+ else
+ options->auth_xform.auth.digest_length =
+ cap->sym.auth.digest_size.min;
+ }
+
+ retval = rte_cryptodev_configure(cdev_id, &conf);
+ if (retval < 0) {
+ printf("Failed to configure cryptodev %u", cdev_id);
+ return -1;
+ }
+
+ qp_conf.nb_descriptors = 2048;
+ qp_conf.mp_session = session_pool_socket[socket_id].sess_mp;
+ qp_conf.mp_session_private =
+ session_pool_socket[socket_id].priv_mp;
+
+ retval = rte_cryptodev_queue_pair_setup(cdev_id, 0, &qp_conf,
+ socket_id);
+ if (retval < 0) {
+ printf("Failed to setup queue pair %u on cryptodev %u",
+ 0, cdev_id);
+ return -1;
+ }
+
+ retval = rte_cryptodev_start(cdev_id);
+ if (retval < 0) {
+ printf("Failed to start device %u: error %d\n",
+ cdev_id, retval);
+ return -1;
+ }
+ }
+
+ return enabled_cdev_count;
+}
+
+static int
+initialize_ports(struct l2fwd_crypto_options *options)
+{
+ uint16_t last_portid = 0, portid;
+ unsigned enabled_portcount = 0;
+ unsigned nb_ports = rte_eth_dev_count_avail();
+
+ if (nb_ports == 0) {
+ printf("No Ethernet ports - bye\n");
+ return -1;
+ }
+
+ /* Reset l2fwd_dst_ports */
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
+ l2fwd_dst_ports[portid] = 0;
+
+ RTE_ETH_FOREACH_DEV(portid) {
+ int retval;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_rxconf rxq_conf;
+ struct rte_eth_txconf txq_conf;
+ struct rte_eth_conf local_port_conf = port_conf;
+
+ /* Skip ports that are not enabled */
+ if ((options->portmask & (1 << portid)) == 0)
+ continue;
+
+ /* init port */
+ printf("Initializing port %u... ", portid);
+ fflush(stdout);
+
+ retval = rte_eth_dev_info_get(portid, &dev_info);
+ if (retval != 0) {
+ printf("Error during getting device (port %u) info: %s\n",
+ portid, strerror(-retval));
+ return retval;
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
+ if (retval < 0) {
+ printf("Cannot configure device: err=%d, port=%u\n",
+ retval, portid);
+ return -1;
+ }
+
+ retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (retval < 0) {
+ printf("Cannot adjust number of descriptors: err=%d, port=%u\n",
+ retval, portid);
+ return -1;
+ }
+
+ /* init one RX queue */
+ fflush(stdout);
+ rxq_conf = dev_info.default_rxconf;
+ rxq_conf.offloads = local_port_conf.rxmode.offloads;
+ retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
+ rte_eth_dev_socket_id(portid),
+ &rxq_conf, l2fwd_pktmbuf_pool);
+ if (retval < 0) {
+ printf("rte_eth_rx_queue_setup:err=%d, port=%u\n",
+ retval, portid);
+ return -1;
+ }
+
+ /* init one TX queue on each port */
+ fflush(stdout);
+ txq_conf = dev_info.default_txconf;
+ txq_conf.offloads = local_port_conf.txmode.offloads;
+ retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
+ rte_eth_dev_socket_id(portid),
+ &txq_conf);
+ if (retval < 0) {
+ printf("rte_eth_tx_queue_setup:err=%d, port=%u\n",
+ retval, portid);
+
+ return -1;
+ }
+
+ /* Start device */
+ retval = rte_eth_dev_start(portid);
+ if (retval < 0) {
+ printf("rte_eth_dev_start:err=%d, port=%u\n",
+ retval, portid);
+ return -1;
+ }
+
+ retval = rte_eth_promiscuous_enable(portid);
+ if (retval != 0) {
+ printf("rte_eth_promiscuous_enable:err=%s, port=%u\n",
+ rte_strerror(-retval), portid);
+ return -1;
+ }
+
+ retval = rte_eth_macaddr_get(portid,
+ &l2fwd_ports_eth_addr[portid]);
+ if (retval < 0) {
+ printf("rte_eth_macaddr_get :err=%d, port=%u\n",
+ retval, portid);
+ return -1;
+ }
+
+ printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+ portid,
+ l2fwd_ports_eth_addr[portid].addr_bytes[0],
+ l2fwd_ports_eth_addr[portid].addr_bytes[1],
+ l2fwd_ports_eth_addr[portid].addr_bytes[2],
+ l2fwd_ports_eth_addr[portid].addr_bytes[3],
+ l2fwd_ports_eth_addr[portid].addr_bytes[4],
+ l2fwd_ports_eth_addr[portid].addr_bytes[5]);
+
+ /* initialize port stats */
+ memset(&port_statistics, 0, sizeof(port_statistics));
+
+ /* Setup port forwarding table */
+ if (enabled_portcount % 2) {
+ l2fwd_dst_ports[portid] = last_portid;
+ l2fwd_dst_ports[last_portid] = portid;
+ } else {
+ last_portid = portid;
+ }
+
+ l2fwd_enabled_port_mask |= (1 << portid);
+ enabled_portcount++;
+ }
+
+ if (enabled_portcount == 1) {
+ l2fwd_dst_ports[last_portid] = last_portid;
+ } else if (enabled_portcount % 2) {
+ printf("odd number of ports in portmask- bye\n");
+ return -1;
+ }
+
+ check_all_ports_link_status(l2fwd_enabled_port_mask);
+
+ return enabled_portcount;
+}
+
+static void
+reserve_key_memory(struct l2fwd_crypto_options *options)
+{
+ options->cipher_xform.cipher.key.data = options->cipher_key;
+
+ options->auth_xform.auth.key.data = options->auth_key;
+
+ options->aead_xform.aead.key.data = options->aead_key;
+
+ options->cipher_iv.data = rte_malloc("cipher iv", MAX_KEY_SIZE, 0);
+ if (options->cipher_iv.data == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to allocate memory for cipher IV");
+
+ options->auth_iv.data = rte_malloc("auth iv", MAX_KEY_SIZE, 0);
+ if (options->auth_iv.data == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to allocate memory for auth IV");
+
+ options->aead_iv.data = rte_malloc("aead_iv", MAX_KEY_SIZE, 0);
+ if (options->aead_iv.data == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to allocate memory for AEAD iv");
+
+ options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0);
+ if (options->aad.data == NULL)
+ rte_exit(EXIT_FAILURE, "Failed to allocate memory for AAD");
+ options->aad.phys_addr = rte_malloc_virt2iova(options->aad.data);
+}
+
+int
+main(int argc, char **argv)
+{
+ struct lcore_queue_conf *qconf = NULL;
+ struct l2fwd_crypto_options options;
+
+ uint8_t nb_cryptodevs, cdev_id;
+ uint16_t portid;
+ unsigned lcore_id, rx_lcore_id = 0;
+ int ret, enabled_cdevcount, enabled_portcount;
+ uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = {0};
+
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+ argc -= ret;
+ argv += ret;
+
+ /* reserve memory for Cipher/Auth key and IV */
+ reserve_key_memory(&options);
+
+ /* parse application arguments (after the EAL ones) */
+ ret = l2fwd_crypto_parse_args(&options, argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid L2FWD-CRYPTO arguments\n");
+
+ printf("MAC updating %s\n",
+ options.mac_updating ? "enabled" : "disabled");
+
+ /* create the mbuf pool */
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 512,
+ sizeof(struct rte_crypto_op),
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ if (l2fwd_pktmbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+
+ /* create crypto op pool */
+ l2fwd_crypto_op_pool = rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MBUF, 128, MAXIMUM_IV_LENGTH,
+ rte_socket_id());
+ if (l2fwd_crypto_op_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create crypto op pool\n");
+
+ /* Enable Ethernet ports */
+ enabled_portcount = initialize_ports(&options);
+ if (enabled_portcount < 1)
+ rte_exit(EXIT_FAILURE, "Failed to initial Ethernet ports\n");
+
+ /* Initialize the port/queue configuration of each logical core */
+ RTE_ETH_FOREACH_DEV(portid) {
+
+ /* skip ports that are not enabled */
+ if ((options.portmask & (1 << portid)) == 0)
+ continue;
+
+ if (options.single_lcore && qconf == NULL) {
+ while (rte_lcore_is_enabled(rx_lcore_id) == 0) {
+ rx_lcore_id++;
+ if (rx_lcore_id >= RTE_MAX_LCORE)
+ rte_exit(EXIT_FAILURE,
+ "Not enough cores\n");
+ }
+ } else if (!options.single_lcore) {
+ /* get the lcore_id for this port */
+ while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+ lcore_queue_conf[rx_lcore_id].nb_rx_ports ==
+ options.nb_ports_per_lcore) {
+ rx_lcore_id++;
+ if (rx_lcore_id >= RTE_MAX_LCORE)
+ rte_exit(EXIT_FAILURE,
+ "Not enough cores\n");
+ }
+ }
+
+ /* Assigned a new logical core in the loop above. */
+ if (qconf != &lcore_queue_conf[rx_lcore_id])
+ qconf = &lcore_queue_conf[rx_lcore_id];
+
+ qconf->rx_port_list[qconf->nb_rx_ports] = portid;
+ qconf->nb_rx_ports++;
+
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
+ }
+
+ /* Enable Crypto devices */
+ enabled_cdevcount = initialize_cryptodevs(&options, enabled_portcount,
+ enabled_cdevs);
+ if (enabled_cdevcount < 0)
+ rte_exit(EXIT_FAILURE, "Failed to initialize crypto devices\n");
+
+ if (enabled_cdevcount < enabled_portcount)
+ rte_exit(EXIT_FAILURE, "Number of capable crypto devices (%d) "
+ "has to be more or equal to number of ports (%d)\n",
+ enabled_cdevcount, enabled_portcount);
+
+ nb_cryptodevs = rte_cryptodev_count();
+
+ /* Initialize the port/cryptodev configuration of each logical core */
+ for (rx_lcore_id = 0, qconf = NULL, cdev_id = 0;
+ cdev_id < nb_cryptodevs && enabled_cdevcount;
+ cdev_id++) {
+ /* Crypto op not supported by crypto device */
+ if (!enabled_cdevs[cdev_id])
+ continue;
+
+ if (options.single_lcore && qconf == NULL) {
+ while (rte_lcore_is_enabled(rx_lcore_id) == 0) {
+ rx_lcore_id++;
+ if (rx_lcore_id >= RTE_MAX_LCORE)
+ rte_exit(EXIT_FAILURE,
+ "Not enough cores\n");
+ }
+ } else if (!options.single_lcore) {
+ /* get the lcore_id for this port */
+ while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+ lcore_queue_conf[rx_lcore_id].nb_crypto_devs ==
+ options.nb_ports_per_lcore) {
+ rx_lcore_id++;
+ if (rx_lcore_id >= RTE_MAX_LCORE)
+ rte_exit(EXIT_FAILURE,
+ "Not enough cores\n");
+ }
+ }
+
+ /* Assigned a new logical core in the loop above. */
+ if (qconf != &lcore_queue_conf[rx_lcore_id])
+ qconf = &lcore_queue_conf[rx_lcore_id];
+
+ qconf->cryptodev_list[qconf->nb_crypto_devs] = cdev_id;
+ qconf->nb_crypto_devs++;
+
+ enabled_cdevcount--;
+
+ printf("Lcore %u: cryptodev %u\n", rx_lcore_id,
+ (unsigned)cdev_id);
+ }
+
+ /* launch per-lcore init on every lcore */
+ rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, (void *)&options,
+ CALL_MASTER);
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (rte_eal_wait_lcore(lcore_id) < 0)
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-crypto/meson.build b/src/spdk/dpdk/examples/l2fwd-crypto/meson.build
new file mode 100644
index 000000000..6c852ad19
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-crypto/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+deps += 'cryptodev'
+allow_experimental_apis = true
+sources = files(
+ 'main.c'
+)
diff --git a/src/spdk/dpdk/examples/l2fwd-event/Makefile b/src/spdk/dpdk/examples/l2fwd-event/Makefile
new file mode 100644
index 000000000..807f7f1b8
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-event/Makefile
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+# binary name
+APP = l2fwd-event
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+SRCS-y += l2fwd_poll.c
+SRCS-y += l2fwd_event.c
+SRCS-y += l2fwd_common.c
+SRCS-y += l2fwd_event_generic.c
+SRCS-y += l2fwd_event_internal_port.c
+
+# Build using pkg-config variables if possible
+ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
+
+all: shared
+.PHONY: shared static
+shared: build/$(APP)-shared
+ ln -sf $(APP)-shared build/$(APP)
+static: build/$(APP)-static
+ ln -sf $(APP)-static build/$(APP)
+
+PKGCONF ?= pkg-config
+
+PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null)
+CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk)
+LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
+LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk)
+
+build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
+
+build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
+
+build:
+ @mkdir -p $@
+
+.PHONY: clean
+clean:
+ rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
+ test -d build && rmdir -p build || true
+
+else # Build using legacy build system
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, detect a build directory, by looking for a path with a .config
+RTE_TARGET ?= $(notdir $(abspath $(dir $(firstword $(wildcard $(RTE_SDK)/*/.config)))))
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+endif
diff --git a/src/spdk/dpdk/examples/l2fwd-event/l2fwd_common.c b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_common.c
new file mode 100644
index 000000000..ab341e55b
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_common.c
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "l2fwd_common.h"
+
+int
+l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
+{
+ uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+ uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+ struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+ .split_hdr_size = 0,
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+ };
+ uint16_t nb_ports_available = 0;
+ uint16_t port_id;
+ int ret;
+
+ if (rsrc->event_mode) {
+ port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
+ port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP;
+ }
+
+ /* Initialise each port */
+ RTE_ETH_FOREACH_DEV(port_id) {
+ struct rte_eth_conf local_port_conf = port_conf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_rxconf rxq_conf;
+ struct rte_eth_txconf txq_conf;
+
+ /* skip ports that are not enabled */
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) {
+ printf("Skipping disabled port %u\n", port_id);
+ continue;
+ }
+ nb_ports_available++;
+
+ /* init port */
+ printf("Initializing port %u... ", port_id);
+ fflush(stdout);
+
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ rte_panic("Error during getting device (port %u) info: %s\n",
+ port_id, strerror(-ret));
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
+ dev_info.flow_type_rss_offloads;
+ if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
+ port_conf.rx_adv_conf.rss_conf.rss_hf) {
+ printf("Port %u modified RSS hash function based on hardware support,"
+ "requested:%#"PRIx64" configured:%#"PRIx64"",
+ port_id,
+ port_conf.rx_adv_conf.rss_conf.rss_hf,
+ local_port_conf.rx_adv_conf.rss_conf.rss_hf);
+ }
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ ret = rte_eth_dev_configure(port_id, 1, 1, &local_port_conf);
+ if (ret < 0)
+ rte_panic("Cannot configure device: err=%d, port=%u\n",
+ ret, port_id);
+
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_panic("Cannot adjust number of descriptors: err=%d, port=%u\n",
+ ret, port_id);
+
+ rte_eth_macaddr_get(port_id, &rsrc->eth_addr[port_id]);
+
+ /* init one RX queue */
+ fflush(stdout);
+ rxq_conf = dev_info.default_rxconf;
+ rxq_conf.offloads = local_port_conf.rxmode.offloads;
+ ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd,
+ rte_eth_dev_socket_id(port_id),
+ &rxq_conf,
+ rsrc->pktmbuf_pool);
+ if (ret < 0)
+ rte_panic("rte_eth_rx_queue_setup:err=%d, port=%u\n",
+ ret, port_id);
+
+ /* init one TX queue on each port */
+ fflush(stdout);
+ txq_conf = dev_info.default_txconf;
+ txq_conf.offloads = local_port_conf.txmode.offloads;
+ ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
+ rte_eth_dev_socket_id(port_id),
+ &txq_conf);
+ if (ret < 0)
+ rte_panic("rte_eth_tx_queue_setup:err=%d, port=%u\n",
+ ret, port_id);
+
+ rte_eth_promiscuous_enable(port_id);
+
+ printf("Port %u,MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+ port_id,
+ rsrc->eth_addr[port_id].addr_bytes[0],
+ rsrc->eth_addr[port_id].addr_bytes[1],
+ rsrc->eth_addr[port_id].addr_bytes[2],
+ rsrc->eth_addr[port_id].addr_bytes[3],
+ rsrc->eth_addr[port_id].addr_bytes[4],
+ rsrc->eth_addr[port_id].addr_bytes[5]);
+ }
+
+ return nb_ports_available;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-event/l2fwd_common.h b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_common.h
new file mode 100644
index 000000000..939221d45
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_common.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __L2FWD_COMMON_H__
+#define __L2FWD_COMMON_H__
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_spinlock.h>
+
+#define MAX_PKT_BURST 32
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+
+#define RTE_TEST_RX_DESC_DEFAULT 1024
+#define RTE_TEST_TX_DESC_DEFAULT 1024
+
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+#define MEMPOOL_CACHE_SIZE 256
+
+#define DEFAULT_TIMER_PERIOD 10 /* default period is 10 seconds */
+#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+
+/* Per-port statistics struct */
+struct l2fwd_port_statistics {
+ uint64_t dropped;
+ uint64_t tx;
+ uint64_t rx;
+} __rte_cache_aligned;
+
+struct l2fwd_resources {
+ volatile uint8_t force_quit;
+ uint8_t event_mode;
+ uint8_t sched_type;
+ uint8_t mac_updating;
+ uint8_t rx_queue_per_lcore;
+ bool port_pairs;
+ uint16_t nb_rxd;
+ uint16_t nb_txd;
+ uint32_t enabled_port_mask;
+ uint64_t timer_period;
+ struct rte_mempool *pktmbuf_pool;
+ uint32_t dst_ports[RTE_MAX_ETHPORTS];
+ struct rte_ether_addr eth_addr[RTE_MAX_ETHPORTS];
+ struct l2fwd_port_statistics port_stats[RTE_MAX_ETHPORTS];
+ void *evt_rsrc;
+ void *poll_rsrc;
+} __rte_cache_aligned;
+
+static __rte_always_inline void
+l2fwd_mac_updating(struct rte_mbuf *m, uint32_t dest_port_id,
+ struct rte_ether_addr *addr)
+{
+ struct rte_ether_hdr *eth;
+ void *tmp;
+
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+
+ /* 02:00:00:00:00:xx */
+ tmp = &eth->d_addr.addr_bytes[0];
+ *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_port_id << 40);
+
+ /* src addr */
+ rte_ether_addr_copy(addr, &eth->s_addr);
+}
+
+static __rte_always_inline struct l2fwd_resources *
+l2fwd_get_rsrc(void)
+{
+ static const char name[RTE_MEMZONE_NAMESIZE] = "rsrc";
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(name);
+ if (mz != NULL)
+ return mz->addr;
+
+ mz = rte_memzone_reserve(name, sizeof(struct l2fwd_resources), 0, 0);
+ if (mz != NULL) {
+ struct l2fwd_resources *rsrc = mz->addr;
+
+ memset(rsrc, 0, sizeof(struct l2fwd_resources));
+ rsrc->mac_updating = true;
+ rsrc->event_mode = true;
+ rsrc->rx_queue_per_lcore = 1;
+ rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
+ rsrc->timer_period = 10 * rte_get_timer_hz();
+
+ return mz->addr;
+ }
+
+ rte_panic("Unable to allocate memory for l2fwd resources\n");
+
+ return NULL;
+}
+
+int l2fwd_event_init_ports(struct l2fwd_resources *rsrc);
+
+#endif /* __L2FWD_COMMON_H__ */
diff --git a/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event.c b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event.c
new file mode 100644
index 000000000..38d590c14
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event.c
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <getopt.h>
+
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+
+#include "l2fwd_event.h"
+
+#define L2FWD_EVENT_SINGLE 0x1
+#define L2FWD_EVENT_BURST 0x2
+#define L2FWD_EVENT_TX_DIRECT 0x4
+#define L2FWD_EVENT_TX_ENQ 0x8
+#define L2FWD_EVENT_UPDT_MAC 0x10
+
+static inline int
+l2fwd_event_service_enable(uint32_t service_id)
+{
+ uint8_t min_service_count = UINT8_MAX;
+ uint32_t slcore_array[RTE_MAX_LCORE];
+ unsigned int slcore = 0;
+ uint8_t service_count;
+ int32_t slcore_count;
+
+ if (!rte_service_lcore_count())
+ return -ENOENT;
+
+ slcore_count = rte_service_lcore_list(slcore_array, RTE_MAX_LCORE);
+ if (slcore_count < 0)
+ return -ENOENT;
+ /* Get the core which has least number of services running. */
+ while (slcore_count--) {
+ /* Reset default mapping */
+ if (rte_service_map_lcore_set(service_id,
+ slcore_array[slcore_count], 0) != 0)
+ return -ENOENT;
+ service_count = rte_service_lcore_count_services(
+ slcore_array[slcore_count]);
+ if (service_count < min_service_count) {
+ slcore = slcore_array[slcore_count];
+ min_service_count = service_count;
+ }
+ }
+ if (rte_service_map_lcore_set(service_id, slcore, 1) != 0)
+ return -ENOENT;
+ rte_service_lcore_start(slcore);
+
+ return 0;
+}
+
+void
+l2fwd_event_service_setup(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_dev_info evdev_info;
+ uint32_t service_id, caps;
+ int ret, i;
+
+ rte_event_dev_info_get(evt_rsrc->event_d_id, &evdev_info);
+ if (!(evdev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
+ ret = rte_event_dev_service_id_get(evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error in starting eventdev service\n");
+ l2fwd_event_service_enable(service_id);
+ }
+
+ for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
+ ret = rte_event_eth_rx_adapter_caps_get(evt_rsrc->event_d_id,
+ evt_rsrc->rx_adptr.rx_adptr[i], &caps);
+ if (ret < 0)
+ rte_panic("Failed to get Rx adapter[%d] caps\n",
+ evt_rsrc->rx_adptr.rx_adptr[i]);
+ ret = rte_event_eth_rx_adapter_service_id_get(
+ evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error in starting Rx adapter[%d] service\n",
+ evt_rsrc->rx_adptr.rx_adptr[i]);
+ l2fwd_event_service_enable(service_id);
+ }
+
+ for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
+ ret = rte_event_eth_tx_adapter_caps_get(evt_rsrc->event_d_id,
+ evt_rsrc->tx_adptr.tx_adptr[i], &caps);
+ if (ret < 0)
+ rte_panic("Failed to get Rx adapter[%d] caps\n",
+ evt_rsrc->tx_adptr.tx_adptr[i]);
+ ret = rte_event_eth_tx_adapter_service_id_get(
+ evt_rsrc->event_d_id,
+ &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error in starting Rx adapter[%d] service\n",
+ evt_rsrc->tx_adptr.tx_adptr[i]);
+ l2fwd_event_service_enable(service_id);
+ }
+}
+
+static void
+l2fwd_event_capability_setup(struct l2fwd_event_resources *evt_rsrc)
+{
+ uint32_t caps = 0;
+ uint16_t i;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(i) {
+ ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps);
+ if (ret)
+ rte_panic("Invalid capability for Tx adptr port %d\n",
+ i);
+
+ evt_rsrc->tx_mode_q |= !(caps &
+ RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT);
+ }
+
+ if (evt_rsrc->tx_mode_q)
+ l2fwd_event_set_generic_ops(&evt_rsrc->ops);
+ else
+ l2fwd_event_set_internal_port_ops(&evt_rsrc->ops);
+}
+
+static __rte_noinline int
+l2fwd_get_free_event_port(struct l2fwd_event_resources *evt_rsrc)
+{
+ static int index;
+ int port_id;
+
+ rte_spinlock_lock(&evt_rsrc->evp.lock);
+ if (index >= evt_rsrc->evp.nb_ports) {
+ printf("No free event port is available\n");
+ return -1;
+ }
+
+ port_id = evt_rsrc->evp.event_p_id[index];
+ index++;
+ rte_spinlock_unlock(&evt_rsrc->evp.lock);
+
+ return port_id;
+}
+
+static __rte_always_inline void
+l2fwd_event_fwd(struct l2fwd_resources *rsrc, struct rte_event *ev,
+ const uint8_t tx_q_id, const uint64_t timer_period,
+ const uint32_t flags)
+{
+ struct rte_mbuf *mbuf = ev->mbuf;
+ uint16_t dst_port;
+
+ rte_prefetch0(rte_pktmbuf_mtod(mbuf, void *));
+ dst_port = rsrc->dst_ports[mbuf->port];
+
+ if (timer_period > 0)
+ __atomic_fetch_add(&rsrc->port_stats[mbuf->port].rx,
+ 1, __ATOMIC_RELAXED);
+ mbuf->port = dst_port;
+
+ if (flags & L2FWD_EVENT_UPDT_MAC)
+ l2fwd_mac_updating(mbuf, dst_port, &rsrc->eth_addr[dst_port]);
+
+ if (flags & L2FWD_EVENT_TX_ENQ) {
+ ev->queue_id = tx_q_id;
+ ev->op = RTE_EVENT_OP_FORWARD;
+ }
+
+ if (flags & L2FWD_EVENT_TX_DIRECT)
+ rte_event_eth_tx_adapter_txq_set(mbuf, 0);
+
+ if (timer_period > 0)
+ __atomic_fetch_add(&rsrc->port_stats[mbuf->port].tx,
+ 1, __ATOMIC_RELAXED);
+}
+
+static __rte_always_inline void
+l2fwd_event_loop_single(struct l2fwd_resources *rsrc,
+ const uint32_t flags)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ const int port_id = l2fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
+ evt_rsrc->evq.nb_queues - 1];
+ const uint64_t timer_period = rsrc->timer_period;
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event ev;
+
+ if (port_id < 0)
+ return;
+
+ printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
+ rte_lcore_id());
+
+ while (!rsrc->force_quit) {
+ /* Read packet from eventdev */
+ if (!rte_event_dequeue_burst(event_d_id, port_id, &ev, 1, 0))
+ continue;
+
+ l2fwd_event_fwd(rsrc, &ev, tx_q_id, timer_period, flags);
+
+ if (flags & L2FWD_EVENT_TX_ENQ) {
+ while (rte_event_enqueue_burst(event_d_id, port_id,
+ &ev, 1) &&
+ !rsrc->force_quit)
+ ;
+ }
+
+ if (flags & L2FWD_EVENT_TX_DIRECT) {
+ while (!rte_event_eth_tx_adapter_enqueue(event_d_id,
+ port_id,
+ &ev, 1, 0) &&
+ !rsrc->force_quit)
+ ;
+ }
+ }
+}
+
+static __rte_always_inline void
+l2fwd_event_loop_burst(struct l2fwd_resources *rsrc,
+ const uint32_t flags)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ const int port_id = l2fwd_get_free_event_port(evt_rsrc);
+ const uint8_t tx_q_id = evt_rsrc->evq.event_q_id[
+ evt_rsrc->evq.nb_queues - 1];
+ const uint64_t timer_period = rsrc->timer_period;
+ const uint8_t event_d_id = evt_rsrc->event_d_id;
+ const uint8_t deq_len = evt_rsrc->deq_depth;
+ struct rte_event ev[MAX_PKT_BURST];
+ uint16_t nb_rx, nb_tx;
+ uint8_t i;
+
+ if (port_id < 0)
+ return;
+
+ printf("%s(): entering eventdev main loop on lcore %u\n", __func__,
+ rte_lcore_id());
+
+ while (!rsrc->force_quit) {
+ /* Read packet from eventdev */
+ nb_rx = rte_event_dequeue_burst(event_d_id, port_id, ev,
+ deq_len, 0);
+ if (nb_rx == 0)
+ continue;
+
+ for (i = 0; i < nb_rx; i++) {
+ l2fwd_event_fwd(rsrc, &ev[i], tx_q_id, timer_period,
+ flags);
+ }
+
+ if (flags & L2FWD_EVENT_TX_ENQ) {
+ nb_tx = rte_event_enqueue_burst(event_d_id, port_id,
+ ev, nb_rx);
+ while (nb_tx < nb_rx && !rsrc->force_quit)
+ nb_tx += rte_event_enqueue_burst(event_d_id,
+ port_id, ev + nb_tx,
+ nb_rx - nb_tx);
+ }
+
+ if (flags & L2FWD_EVENT_TX_DIRECT) {
+ nb_tx = rte_event_eth_tx_adapter_enqueue(event_d_id,
+ port_id, ev,
+ nb_rx, 0);
+ while (nb_tx < nb_rx && !rsrc->force_quit)
+ nb_tx += rte_event_eth_tx_adapter_enqueue(
+ event_d_id, port_id,
+ ev + nb_tx, nb_rx - nb_tx, 0);
+ }
+ }
+}
+
+static __rte_always_inline void
+l2fwd_event_loop(struct l2fwd_resources *rsrc,
+ const uint32_t flags)
+{
+ if (flags & L2FWD_EVENT_SINGLE)
+ l2fwd_event_loop_single(rsrc, flags);
+ if (flags & L2FWD_EVENT_BURST)
+ l2fwd_event_loop_burst(rsrc, flags);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc,
+ L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d_brst(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q_brst(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d_mac(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
+ L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_SINGLE);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_d_brst_mac(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
+ L2FWD_EVENT_TX_DIRECT | L2FWD_EVENT_BURST);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q_mac(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
+ L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_SINGLE);
+}
+
+static void __rte_noinline
+l2fwd_event_main_loop_tx_q_brst_mac(struct l2fwd_resources *rsrc)
+{
+ l2fwd_event_loop(rsrc, L2FWD_EVENT_UPDT_MAC |
+ L2FWD_EVENT_TX_ENQ | L2FWD_EVENT_BURST);
+}
+
+void
+l2fwd_event_resource_setup(struct l2fwd_resources *rsrc)
+{
+ /* [MAC_UPDT][TX_MODE][BURST] */
+ const event_loop_cb event_loop[2][2][2] = {
+ [0][0][0] = l2fwd_event_main_loop_tx_d,
+ [0][0][1] = l2fwd_event_main_loop_tx_d_brst,
+ [0][1][0] = l2fwd_event_main_loop_tx_q,
+ [0][1][1] = l2fwd_event_main_loop_tx_q_brst,
+ [1][0][0] = l2fwd_event_main_loop_tx_d_mac,
+ [1][0][1] = l2fwd_event_main_loop_tx_d_brst_mac,
+ [1][1][0] = l2fwd_event_main_loop_tx_q_mac,
+ [1][1][1] = l2fwd_event_main_loop_tx_q_brst_mac,
+ };
+ struct l2fwd_event_resources *evt_rsrc;
+ uint32_t event_queue_cfg;
+ int ret;
+
+ if (!rte_event_dev_count())
+ rte_panic("No Eventdev found\n");
+
+ evt_rsrc = rte_zmalloc("l2fwd_event",
+ sizeof(struct l2fwd_event_resources), 0);
+ if (evt_rsrc == NULL)
+ rte_panic("Failed to allocate memory\n");
+
+ rsrc->evt_rsrc = evt_rsrc;
+
+ /* Setup eventdev capability callbacks */
+ l2fwd_event_capability_setup(evt_rsrc);
+
+ /* Event device configuration */
+ event_queue_cfg = evt_rsrc->ops.event_device_setup(rsrc);
+
+ /* Event queue configuration */
+ evt_rsrc->ops.event_queue_setup(rsrc, event_queue_cfg);
+
+ /* Event port configuration */
+ evt_rsrc->ops.event_port_setup(rsrc);
+
+ /* Rx/Tx adapters configuration */
+ evt_rsrc->ops.adapter_setup(rsrc);
+
+ /* Start event device */
+ ret = rte_event_dev_start(evt_rsrc->event_d_id);
+ if (ret < 0)
+ rte_panic("Error in starting eventdev\n");
+
+ evt_rsrc->ops.l2fwd_event_loop = event_loop
+ [rsrc->mac_updating]
+ [evt_rsrc->tx_mode_q]
+ [evt_rsrc->has_burst];
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event.h b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event.h
new file mode 100644
index 000000000..78f22e5f9
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __L2FWD_EVENT_H__
+#define __L2FWD_EVENT_H__
+
+#include <rte_common.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_mbuf.h>
+#include <rte_spinlock.h>
+
+#include "l2fwd_common.h"
+
+typedef uint32_t (*event_device_setup_cb)(struct l2fwd_resources *rsrc);
+typedef void (*event_port_setup_cb)(struct l2fwd_resources *rsrc);
+typedef void (*event_queue_setup_cb)(struct l2fwd_resources *rsrc,
+ uint32_t event_queue_cfg);
+typedef void (*adapter_setup_cb)(struct l2fwd_resources *rsrc);
+typedef void (*event_loop_cb)(struct l2fwd_resources *rsrc);
+
+struct event_queues {
+ uint8_t *event_q_id;
+ uint8_t nb_queues;
+};
+
+struct event_ports {
+ uint8_t *event_p_id;
+ uint8_t nb_ports;
+ rte_spinlock_t lock;
+};
+
+struct event_rx_adptr {
+ uint32_t service_id;
+ uint8_t nb_rx_adptr;
+ uint8_t *rx_adptr;
+};
+
+struct event_tx_adptr {
+ uint32_t service_id;
+ uint8_t nb_tx_adptr;
+ uint8_t *tx_adptr;
+};
+
+struct event_setup_ops {
+ event_device_setup_cb event_device_setup;
+ event_queue_setup_cb event_queue_setup;
+ event_port_setup_cb event_port_setup;
+ adapter_setup_cb adapter_setup;
+ event_loop_cb l2fwd_event_loop;
+};
+
+struct l2fwd_event_resources {
+ uint8_t tx_mode_q;
+ uint8_t deq_depth;
+ uint8_t has_burst;
+ uint8_t event_d_id;
+ uint8_t disable_implicit_release;
+ struct event_ports evp;
+ struct event_queues evq;
+ struct event_setup_ops ops;
+ struct event_rx_adptr rx_adptr;
+ struct event_tx_adptr tx_adptr;
+ struct rte_event_port_conf def_p_conf;
+};
+
+void l2fwd_event_resource_setup(struct l2fwd_resources *rsrc);
+void l2fwd_event_set_generic_ops(struct event_setup_ops *ops);
+void l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops);
+void l2fwd_event_service_setup(struct l2fwd_resources *rsrc);
+
+#endif /* __L2FWD_EVENT_H__ */
diff --git a/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event_generic.c b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event_generic.c
new file mode 100644
index 000000000..2dc95e5f7
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event_generic.c
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <getopt.h>
+
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
+
+#include "l2fwd_common.h"
+#include "l2fwd_event.h"
+
+static uint32_t
+l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_dev_config event_d_conf = {
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128
+ };
+ struct rte_event_dev_info dev_info;
+ const uint8_t event_d_id = 0; /* Always use first event device only */
+ uint32_t event_queue_cfg = 0;
+ uint16_t ethdev_count = 0;
+ uint16_t num_workers = 0;
+ uint16_t port_id;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ethdev_count++;
+ }
+
+ /* Event device configurtion */
+ rte_event_dev_info_get(event_d_id, &dev_info);
+
+ /* Enable implicit release */
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
+ evt_rsrc->disable_implicit_release = 0;
+
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
+ event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+ /* One queue for each ethdev port + one Tx adapter Single link queue. */
+ event_d_conf.nb_event_queues = ethdev_count + 1;
+ if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
+ event_d_conf.nb_event_queues = dev_info.max_event_queues;
+
+ if (dev_info.max_num_events < event_d_conf.nb_events_limit)
+ event_d_conf.nb_events_limit = dev_info.max_num_events;
+
+ if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
+ event_d_conf.nb_event_queue_flows =
+ dev_info.max_event_queue_flows;
+
+ if (dev_info.max_event_port_dequeue_depth <
+ event_d_conf.nb_event_port_dequeue_depth)
+ event_d_conf.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+
+ if (dev_info.max_event_port_enqueue_depth <
+ event_d_conf.nb_event_port_enqueue_depth)
+ event_d_conf.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+
+ /* Ignore Master core and service cores. */
+ num_workers = rte_lcore_count() - 1 - rte_service_lcore_count();
+ if (dev_info.max_event_ports < num_workers)
+ num_workers = dev_info.max_event_ports;
+
+ event_d_conf.nb_event_ports = num_workers;
+ evt_rsrc->evp.nb_ports = num_workers;
+ evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
+
+ evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_BURST_MODE);
+
+ ret = rte_event_dev_configure(event_d_id, &event_d_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event device\n");
+
+ evt_rsrc->event_d_id = event_d_id;
+ return event_queue_cfg;
+}
+
+static void
+l2fwd_event_port_setup_generic(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_port_conf event_p_conf = {
+ .dequeue_depth = 32,
+ .enqueue_depth = 32,
+ .new_event_threshold = 4096
+ };
+ struct rte_event_port_conf def_p_conf;
+ uint8_t event_p_id;
+ int32_t ret;
+
+ evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evp.nb_ports);
+ if (!evt_rsrc->evp.event_p_id)
+ rte_panic("No space is available\n");
+
+ memset(&def_p_conf, 0, sizeof(struct rte_event_port_conf));
+ ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
+ if (ret < 0)
+ rte_panic("Error to get default configuration of event port\n");
+
+ if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
+ event_p_conf.new_event_threshold =
+ def_p_conf.new_event_threshold;
+
+ if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
+ event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
+
+ if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
+ event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
+
+ event_p_conf.disable_implicit_release =
+ evt_rsrc->disable_implicit_release;
+ evt_rsrc->deq_depth = def_p_conf.dequeue_depth;
+
+ for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
+ event_p_id++) {
+ ret = rte_event_port_setup(event_d_id, event_p_id,
+ &event_p_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event port %d\n",
+ event_p_id);
+
+ ret = rte_event_port_link(event_d_id, event_p_id,
+ evt_rsrc->evq.event_q_id,
+ NULL,
+ evt_rsrc->evq.nb_queues - 1);
+ if (ret != (evt_rsrc->evq.nb_queues - 1))
+ rte_panic("Error in linking event port %d to queues\n",
+ event_p_id);
+ evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
+ }
+ /* init spinlock */
+ rte_spinlock_init(&evt_rsrc->evp.lock);
+
+ evt_rsrc->def_p_conf = event_p_conf;
+}
+
+static void
+l2fwd_event_queue_setup_generic(struct l2fwd_resources *rsrc,
+ uint32_t event_queue_cfg)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_queue_conf event_q_conf = {
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ .event_queue_cfg = event_queue_cfg,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
+ };
+ struct rte_event_queue_conf def_q_conf;
+ uint8_t event_q_id;
+ int32_t ret;
+
+ event_q_conf.schedule_type = rsrc->sched_type;
+ evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evq.nb_queues);
+ if (!evt_rsrc->evq.event_q_id)
+ rte_panic("Memory allocation failure\n");
+
+ ret = rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf);
+ if (ret < 0)
+ rte_panic("Error to get default config of event queue\n");
+
+ if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
+ event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
+
+ for (event_q_id = 0; event_q_id < (evt_rsrc->evq.nb_queues - 1);
+ event_q_id++) {
+ ret = rte_event_queue_setup(event_d_id, event_q_id,
+ &event_q_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event queue\n");
+ evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+ }
+
+ event_q_conf.event_queue_cfg |= RTE_EVENT_QUEUE_CFG_SINGLE_LINK;
+ event_q_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
+ ret = rte_event_queue_setup(event_d_id, event_q_id, &event_q_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event queue for Tx adapter\n");
+ evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+}
+
+static void
+l2fwd_rx_tx_adapter_setup_generic(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_eth_rx_adapter_queue_conf eth_q_conf;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint8_t rx_adptr_id = 0;
+ uint8_t tx_adptr_id = 0;
+ uint8_t tx_port_id = 0;
+ uint16_t port_id;
+ uint32_t service_id;
+ int32_t ret, i = 0;
+
+ memset(&eth_q_conf, 0, sizeof(eth_q_conf));
+ eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+
+ /* Rx adapter setup */
+ evt_rsrc->rx_adptr.nb_rx_adptr = 1;
+ evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->rx_adptr.nb_rx_adptr);
+ if (!evt_rsrc->rx_adptr.rx_adptr) {
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memery for Rx adapter\n");
+ }
+
+ ret = rte_event_eth_rx_adapter_create(rx_adptr_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create rx adapter\n");
+
+ /* Configure user requested sched type */
+ eth_q_conf.ev.sched_type = rsrc->sched_type;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[i];
+ ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, port_id,
+ -1, &eth_q_conf);
+ if (ret)
+ rte_panic("Failed to add queues to Rx adapter\n");
+ if (i < evt_rsrc->evq.nb_queues)
+ i++;
+ }
+
+ ret = rte_event_eth_rx_adapter_service_id_get(rx_adptr_id, &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Error getting the service ID for rx adptr\n");
+
+ rte_service_runstate_set(service_id, 1);
+ rte_service_set_runstate_mapped_check(service_id, 0);
+ evt_rsrc->rx_adptr.service_id = service_id;
+
+ ret = rte_event_eth_rx_adapter_start(rx_adptr_id);
+ if (ret)
+ rte_panic("Rx adapter[%d] start Failed\n", rx_adptr_id);
+
+ evt_rsrc->rx_adptr.rx_adptr[0] = rx_adptr_id;
+
+ /* Tx adapter setup */
+ evt_rsrc->tx_adptr.nb_tx_adptr = 1;
+ evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->tx_adptr.nb_tx_adptr);
+ if (!evt_rsrc->tx_adptr.tx_adptr) {
+ free(evt_rsrc->rx_adptr.rx_adptr);
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memery for Rx adapter\n");
+ }
+
+ ret = rte_event_eth_tx_adapter_create(tx_adptr_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create tx adapter\n");
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ret = rte_event_eth_tx_adapter_queue_add(tx_adptr_id, port_id,
+ -1);
+ if (ret)
+ rte_panic("Failed to add queues to Tx adapter\n");
+ }
+
+ ret = rte_event_eth_tx_adapter_service_id_get(tx_adptr_id, &service_id);
+ if (ret != -ESRCH && ret != 0)
+ rte_panic("Failed to get Tx adapter service ID\n");
+
+ rte_service_runstate_set(service_id, 1);
+ rte_service_set_runstate_mapped_check(service_id, 0);
+ evt_rsrc->tx_adptr.service_id = service_id;
+
+ ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id);
+ if (ret)
+ rte_panic("Failed to get Tx adapter port id: %d\n", ret);
+
+ ret = rte_event_port_link(event_d_id, tx_port_id,
+ &evt_rsrc->evq.event_q_id[
+ evt_rsrc->evq.nb_queues - 1],
+ NULL, 1);
+ if (ret != 1)
+ rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n",
+ ret);
+
+ ret = rte_event_eth_tx_adapter_start(tx_adptr_id);
+ if (ret)
+ rte_panic("Tx adapter[%d] start Failed\n", tx_adptr_id);
+
+ evt_rsrc->tx_adptr.tx_adptr[0] = tx_adptr_id;
+}
+
+void
+l2fwd_event_set_generic_ops(struct event_setup_ops *ops)
+{
+ ops->event_device_setup = l2fwd_event_device_setup_generic;
+ ops->event_queue_setup = l2fwd_event_queue_setup_generic;
+ ops->event_port_setup = l2fwd_event_port_setup_generic;
+ ops->adapter_setup = l2fwd_rx_tx_adapter_setup_generic;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event_internal_port.c b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event_internal_port.c
new file mode 100644
index 000000000..63d57b46c
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_event_internal_port.c
@@ -0,0 +1,296 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <stdbool.h>
+#include <getopt.h>
+
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_event_eth_tx_adapter.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
+
+#include "l2fwd_common.h"
+#include "l2fwd_event.h"
+
+static uint32_t
+l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_dev_config event_d_conf = {
+ .nb_events_limit = 4096,
+ .nb_event_queue_flows = 1024,
+ .nb_event_port_dequeue_depth = 128,
+ .nb_event_port_enqueue_depth = 128
+ };
+ struct rte_event_dev_info dev_info;
+ const uint8_t event_d_id = 0; /* Always use first event device only */
+ uint32_t event_queue_cfg = 0;
+ uint16_t ethdev_count = 0;
+ uint16_t num_workers = 0;
+ uint16_t port_id;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ethdev_count++;
+ }
+
+ /* Event device configurtion */
+ rte_event_dev_info_get(event_d_id, &dev_info);
+
+ /* Enable implicit release */
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)
+ evt_rsrc->disable_implicit_release = 0;
+
+ if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES)
+ event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+
+ event_d_conf.nb_event_queues = ethdev_count;
+ if (dev_info.max_event_queues < event_d_conf.nb_event_queues)
+ event_d_conf.nb_event_queues = dev_info.max_event_queues;
+
+ if (dev_info.max_num_events < event_d_conf.nb_events_limit)
+ event_d_conf.nb_events_limit = dev_info.max_num_events;
+
+ if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows)
+ event_d_conf.nb_event_queue_flows =
+ dev_info.max_event_queue_flows;
+
+ if (dev_info.max_event_port_dequeue_depth <
+ event_d_conf.nb_event_port_dequeue_depth)
+ event_d_conf.nb_event_port_dequeue_depth =
+ dev_info.max_event_port_dequeue_depth;
+
+ if (dev_info.max_event_port_enqueue_depth <
+ event_d_conf.nb_event_port_enqueue_depth)
+ event_d_conf.nb_event_port_enqueue_depth =
+ dev_info.max_event_port_enqueue_depth;
+
+ /* Ignore Master core. */
+ num_workers = rte_lcore_count() - 1;
+ if (dev_info.max_event_ports < num_workers)
+ num_workers = dev_info.max_event_ports;
+
+ event_d_conf.nb_event_ports = num_workers;
+ evt_rsrc->evp.nb_ports = num_workers;
+ evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues;
+ evt_rsrc->has_burst = !!(dev_info.event_dev_cap &
+ RTE_EVENT_DEV_CAP_BURST_MODE);
+
+ ret = rte_event_dev_configure(event_d_id, &event_d_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event device\n");
+
+ evt_rsrc->event_d_id = event_d_id;
+ return event_queue_cfg;
+}
+
+static void
+l2fwd_event_port_setup_internal_port(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_port_conf event_p_conf = {
+ .dequeue_depth = 32,
+ .enqueue_depth = 32,
+ .new_event_threshold = 4096
+ };
+ struct rte_event_port_conf def_p_conf;
+ uint8_t event_p_id;
+ int32_t ret;
+
+ evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evp.nb_ports);
+ if (!evt_rsrc->evp.event_p_id)
+ rte_panic("Failed to allocate memory for Event Ports\n");
+
+ ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf);
+ if (ret < 0)
+ rte_panic("Error to get default configuration of event port\n");
+
+ if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold)
+ event_p_conf.new_event_threshold =
+ def_p_conf.new_event_threshold;
+
+ if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth)
+ event_p_conf.dequeue_depth = def_p_conf.dequeue_depth;
+
+ if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth)
+ event_p_conf.enqueue_depth = def_p_conf.enqueue_depth;
+
+ event_p_conf.disable_implicit_release =
+ evt_rsrc->disable_implicit_release;
+
+ for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports;
+ event_p_id++) {
+ ret = rte_event_port_setup(event_d_id, event_p_id,
+ &event_p_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event port %d\n",
+ event_p_id);
+
+ ret = rte_event_port_link(event_d_id, event_p_id, NULL,
+ NULL, 0);
+ if (ret < 0)
+ rte_panic("Error in linking event port %d to queue\n",
+ event_p_id);
+ evt_rsrc->evp.event_p_id[event_p_id] = event_p_id;
+
+ /* init spinlock */
+ rte_spinlock_init(&evt_rsrc->evp.lock);
+ }
+
+ evt_rsrc->def_p_conf = event_p_conf;
+}
+
+static void
+l2fwd_event_queue_setup_internal_port(struct l2fwd_resources *rsrc,
+ uint32_t event_queue_cfg)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ struct rte_event_queue_conf event_q_conf = {
+ .nb_atomic_flows = 1024,
+ .nb_atomic_order_sequences = 1024,
+ .event_queue_cfg = event_queue_cfg,
+ .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
+ };
+ struct rte_event_queue_conf def_q_conf;
+ uint8_t event_q_id = 0;
+ int32_t ret;
+
+ ret = rte_event_queue_default_conf_get(event_d_id, event_q_id,
+ &def_q_conf);
+ if (ret < 0)
+ rte_panic("Error to get default config of event queue\n");
+
+ if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows)
+ event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows;
+
+ if (def_q_conf.nb_atomic_order_sequences <
+ event_q_conf.nb_atomic_order_sequences)
+ event_q_conf.nb_atomic_order_sequences =
+ def_q_conf.nb_atomic_order_sequences;
+
+ event_q_conf.event_queue_cfg = event_queue_cfg;
+ event_q_conf.schedule_type = rsrc->sched_type;
+ evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->evq.nb_queues);
+ if (!evt_rsrc->evq.event_q_id)
+ rte_panic("Memory allocation failure\n");
+
+ for (event_q_id = 0; event_q_id < evt_rsrc->evq.nb_queues;
+ event_q_id++) {
+ ret = rte_event_queue_setup(event_d_id, event_q_id,
+ &event_q_conf);
+ if (ret < 0)
+ rte_panic("Error in configuring event queue\n");
+ evt_rsrc->evq.event_q_id[event_q_id] = event_q_id;
+ }
+}
+
+static void
+l2fwd_rx_tx_adapter_setup_internal_port(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_eth_rx_adapter_queue_conf eth_q_conf;
+ uint8_t event_d_id = evt_rsrc->event_d_id;
+ uint16_t adapter_id = 0;
+ uint16_t nb_adapter = 0;
+ uint16_t port_id;
+ uint8_t q_id = 0;
+ int ret;
+
+ memset(&eth_q_conf, 0, sizeof(eth_q_conf));
+ eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ nb_adapter++;
+ }
+
+ evt_rsrc->rx_adptr.nb_rx_adptr = nb_adapter;
+ evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->rx_adptr.nb_rx_adptr);
+ if (!evt_rsrc->rx_adptr.rx_adptr) {
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memery for Rx adapter\n");
+ }
+
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create rx adapter[%d]\n",
+ adapter_id);
+
+ /* Configure user requested sched type*/
+ eth_q_conf.ev.sched_type = rsrc->sched_type;
+ eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[q_id];
+ ret = rte_event_eth_rx_adapter_queue_add(adapter_id, port_id,
+ -1, &eth_q_conf);
+ if (ret)
+ rte_panic("Failed to add queues to Rx adapter\n");
+
+ ret = rte_event_eth_rx_adapter_start(adapter_id);
+ if (ret)
+ rte_panic("Rx adapter[%d] start Failed\n", adapter_id);
+
+ evt_rsrc->rx_adptr.rx_adptr[adapter_id] = adapter_id;
+ adapter_id++;
+ if (q_id < evt_rsrc->evq.nb_queues)
+ q_id++;
+ }
+
+ evt_rsrc->tx_adptr.nb_tx_adptr = nb_adapter;
+ evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) *
+ evt_rsrc->tx_adptr.nb_tx_adptr);
+ if (!evt_rsrc->tx_adptr.tx_adptr) {
+ free(evt_rsrc->rx_adptr.rx_adptr);
+ free(evt_rsrc->evp.event_p_id);
+ free(evt_rsrc->evq.event_q_id);
+ rte_panic("Failed to allocate memery for Rx adapter\n");
+ }
+
+ adapter_id = 0;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ ret = rte_event_eth_tx_adapter_create(adapter_id, event_d_id,
+ &evt_rsrc->def_p_conf);
+ if (ret)
+ rte_panic("Failed to create tx adapter[%d]\n",
+ adapter_id);
+
+ ret = rte_event_eth_tx_adapter_queue_add(adapter_id, port_id,
+ -1);
+ if (ret)
+ rte_panic("Failed to add queues to Tx adapter\n");
+
+ ret = rte_event_eth_tx_adapter_start(adapter_id);
+ if (ret)
+ rte_panic("Tx adapter[%d] start Failed\n", adapter_id);
+
+ evt_rsrc->tx_adptr.tx_adptr[adapter_id] = adapter_id;
+ adapter_id++;
+ }
+}
+
+void
+l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops)
+{
+ ops->event_device_setup = l2fwd_event_device_setup_internal_port;
+ ops->event_queue_setup = l2fwd_event_queue_setup_internal_port;
+ ops->event_port_setup = l2fwd_event_port_setup_internal_port;
+ ops->adapter_setup = l2fwd_rx_tx_adapter_setup_internal_port;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-event/l2fwd_poll.c b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_poll.c
new file mode 100644
index 000000000..2033c65e5
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_poll.c
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "l2fwd_poll.h"
+
+static inline void
+l2fwd_poll_simple_forward(struct l2fwd_resources *rsrc, struct rte_mbuf *m,
+ uint32_t portid)
+{
+ struct rte_eth_dev_tx_buffer *buffer;
+ uint32_t dst_port;
+ int sent;
+
+ dst_port = rsrc->dst_ports[portid];
+
+ if (rsrc->mac_updating)
+ l2fwd_mac_updating(m, dst_port, &rsrc->eth_addr[dst_port]);
+
+ buffer = ((struct l2fwd_poll_resources *)rsrc->poll_rsrc)->tx_buffer[
+ dst_port];
+ sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
+ if (sent)
+ rsrc->port_stats[dst_port].tx += sent;
+}
+
+/* main poll mode processing loop */
+static void
+l2fwd_poll_main_loop(struct l2fwd_resources *rsrc)
+{
+ uint64_t prev_tsc, diff_tsc, cur_tsc, drain_tsc;
+ struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_eth_dev_tx_buffer *buf;
+ struct lcore_queue_conf *qconf;
+ uint32_t i, j, port_id, nb_rx;
+ struct rte_mbuf *m;
+ uint32_t lcore_id;
+ int32_t sent;
+
+ drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S *
+ BURST_TX_DRAIN_US;
+ prev_tsc = 0;
+
+ lcore_id = rte_lcore_id();
+ qconf = &poll_rsrc->lcore_queue_conf[lcore_id];
+
+ if (qconf->n_rx_port == 0) {
+ printf("lcore %u has nothing to do\n", lcore_id);
+ return;
+ }
+
+ printf("entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ port_id = qconf->rx_port_list[i];
+ printf(" -- lcoreid=%u port_id=%u\n", lcore_id, port_id);
+
+ }
+
+ while (!rsrc->force_quit) {
+
+ cur_tsc = rte_rdtsc();
+
+ /*
+ * TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+ for (i = 0; i < qconf->n_rx_port; i++) {
+ port_id =
+ rsrc->dst_ports[qconf->rx_port_list[i]];
+ buf = poll_rsrc->tx_buffer[port_id];
+ sent = rte_eth_tx_buffer_flush(port_id, 0, buf);
+ if (sent)
+ rsrc->port_stats[port_id].tx += sent;
+ }
+
+ prev_tsc = cur_tsc;
+ }
+
+ /*
+ * Read packet from RX queues
+ */
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ port_id = qconf->rx_port_list[i];
+ nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst,
+ MAX_PKT_BURST);
+
+ rsrc->port_stats[port_id].rx += nb_rx;
+
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
+ rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+ l2fwd_poll_simple_forward(rsrc, m, port_id);
+ }
+ }
+ }
+}
+
+static void
+l2fwd_poll_lcore_config(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
+ struct lcore_queue_conf *qconf = NULL;
+ uint32_t rx_lcore_id = 0;
+ uint16_t port_id;
+
+ /* Initialize the port/queue configuration of each logical core */
+ RTE_ETH_FOREACH_DEV(port_id) {
+ /* skip ports that are not enabled */
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+
+ /* get the lcore_id for this port */
+ while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+ rx_lcore_id == rte_get_master_lcore() ||
+ poll_rsrc->lcore_queue_conf[rx_lcore_id].n_rx_port ==
+ rsrc->rx_queue_per_lcore) {
+ rx_lcore_id++;
+ if (rx_lcore_id >= RTE_MAX_LCORE)
+ rte_panic("Not enough cores\n");
+ }
+
+ if (qconf != &poll_rsrc->lcore_queue_conf[rx_lcore_id]) {
+ /* Assigned a new logical core in the loop above. */
+ qconf = &poll_rsrc->lcore_queue_conf[rx_lcore_id];
+ }
+
+ qconf->rx_port_list[qconf->n_rx_port] = port_id;
+ qconf->n_rx_port++;
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, port_id);
+ }
+}
+
+static void
+l2fwd_poll_init_tx_buffers(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
+ uint16_t port_id;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ /* Initialize TX buffers */
+ poll_rsrc->tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer",
+ RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
+ rte_eth_dev_socket_id(port_id));
+ if (poll_rsrc->tx_buffer[port_id] == NULL)
+ rte_panic("Cannot allocate buffer for tx on port %u\n",
+ port_id);
+
+ rte_eth_tx_buffer_init(poll_rsrc->tx_buffer[port_id],
+ MAX_PKT_BURST);
+
+ ret = rte_eth_tx_buffer_set_err_callback(
+ poll_rsrc->tx_buffer[port_id],
+ rte_eth_tx_buffer_count_callback,
+ &rsrc->port_stats[port_id].dropped);
+ if (ret < 0)
+ rte_panic("Cannot set error callback for tx buffer on port %u\n",
+ port_id);
+ }
+}
+
+void
+l2fwd_poll_resource_setup(struct l2fwd_resources *rsrc)
+{
+ struct l2fwd_poll_resources *poll_rsrc;
+
+ poll_rsrc = rte_zmalloc("l2fwd_poll_rsrc",
+ sizeof(struct l2fwd_poll_resources), 0);
+ if (poll_rsrc == NULL)
+ rte_panic("Failed to allocate resources for l2fwd poll mode\n");
+
+ rsrc->poll_rsrc = poll_rsrc;
+ l2fwd_poll_lcore_config(rsrc);
+ l2fwd_poll_init_tx_buffers(rsrc);
+
+ poll_rsrc->poll_main_loop = l2fwd_poll_main_loop;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-event/l2fwd_poll.h b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_poll.h
new file mode 100644
index 000000000..d59b0c844
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-event/l2fwd_poll.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __L2FWD_POLL_H__
+#define __L2FWD_POLL_H__
+
+#include "l2fwd_common.h"
+
+typedef void (*poll_main_loop_cb)(struct l2fwd_resources *rsrc);
+
+struct lcore_queue_conf {
+ uint32_t rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+ uint32_t n_rx_port;
+} __rte_cache_aligned;
+
+struct l2fwd_poll_resources {
+ poll_main_loop_cb poll_main_loop;
+ struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
+ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+};
+
+void l2fwd_poll_resource_setup(struct l2fwd_resources *rsrc);
+
+#endif
diff --git a/src/spdk/dpdk/examples/l2fwd-event/main.c b/src/spdk/dpdk/examples/l2fwd-event/main.c
new file mode 100644
index 000000000..9593ef11e
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-event/main.c
@@ -0,0 +1,720 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_string_fns.h>
+
+#include "l2fwd_event.h"
+#include "l2fwd_poll.h"
+
+/* display usage */
+static void
+l2fwd_event_usage(const char *prgname)
+{
+ printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+ " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+ " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
+ " -T PERIOD: statistics will be refreshed each PERIOD seconds "
+ " (0 to disable, 10 default, 86400 maximum)\n"
+ " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
+ " When enabled:\n"
+ " - The source MAC address is replaced by the TX port MAC address\n"
+ " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n"
+ " --mode: Packet transfer mode for I/O, poll or eventdev\n"
+ " Default mode = eventdev\n"
+ " --eventq-sched: Event queue schedule type, ordered, atomic or parallel.\n"
+ " Default: atomic\n"
+ " Valid only if --mode=eventdev\n"
+ " --config: Configure forwarding port pair mapping\n"
+ " Default: alternate port pairs\n\n",
+ prgname);
+}
+
+static int
+l2fwd_event_parse_portmask(const char *portmask)
+{
+ char *end = NULL;
+ unsigned long pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(portmask, &end, 16);
+ if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (pm == 0)
+ return -1;
+
+ return pm;
+}
+
+static unsigned int
+l2fwd_event_parse_nqueue(const char *q_arg)
+{
+ char *end = NULL;
+ unsigned long n;
+
+ /* parse hexadecimal string */
+ n = strtoul(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return 0;
+ if (n == 0)
+ return 0;
+ if (n >= MAX_RX_QUEUE_PER_LCORE)
+ return 0;
+
+ return n;
+}
+
+static int
+l2fwd_event_parse_timer_period(const char *q_arg)
+{
+ char *end = NULL;
+ int n;
+
+ /* parse number string */
+ n = strtol(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+ if (n >= MAX_TIMER_PERIOD)
+ return -1;
+
+ return n;
+}
+
+static void
+l2fwd_event_parse_mode(const char *optarg,
+ struct l2fwd_resources *rsrc)
+{
+ if (!strncmp(optarg, "poll", 4))
+ rsrc->event_mode = false;
+ else if (!strncmp(optarg, "eventdev", 8))
+ rsrc->event_mode = true;
+}
+
+static void
+l2fwd_event_parse_eventq_sched(const char *optarg,
+ struct l2fwd_resources *rsrc)
+{
+ if (!strncmp(optarg, "ordered", 7))
+ rsrc->sched_type = RTE_SCHED_TYPE_ORDERED;
+ else if (!strncmp(optarg, "atomic", 6))
+ rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC;
+ else if (!strncmp(optarg, "parallel", 8))
+ rsrc->sched_type = RTE_SCHED_TYPE_PARALLEL;
+}
+
+static int
+l2fwd_parse_port_pair_config(const char *q_arg, struct l2fwd_resources *rsrc)
+{
+ enum fieldnames {
+ FLD_PORT1 = 0,
+ FLD_PORT2,
+ _NUM_FLD
+ };
+ const char *p, *p0 = q_arg;
+ uint16_t int_fld[_NUM_FLD];
+ char *str_fld[_NUM_FLD];
+ uint16_t port_pair = 0;
+ unsigned int size;
+ char s[256];
+ char *end;
+ int i;
+
+ while ((p = strchr(p0, '(')) != NULL) {
+ ++p;
+ p0 = strchr(p, ')');
+ if (p0 == NULL)
+ return -1;
+
+ size = p0 - p;
+ if (size >= sizeof(s))
+ return -1;
+
+ memcpy(s, p, size);
+ if (rte_strsplit(s, sizeof(s), str_fld,
+ _NUM_FLD, ',') != _NUM_FLD)
+ return -1;
+
+ for (i = 0; i < _NUM_FLD; i++) {
+ errno = 0;
+ int_fld[i] = strtoul(str_fld[i], &end, 0);
+ if (errno != 0 || end == str_fld[i] ||
+ int_fld[i] >= RTE_MAX_ETHPORTS)
+ return -1;
+ }
+
+ if (port_pair >= RTE_MAX_ETHPORTS / 2) {
+ printf("exceeded max number of port pair params: Current %d Max = %d\n",
+ port_pair, RTE_MAX_ETHPORTS / 2);
+ return -1;
+ }
+
+ if ((rsrc->dst_ports[int_fld[FLD_PORT1]] != UINT32_MAX) ||
+ (rsrc->dst_ports[int_fld[FLD_PORT2]] != UINT32_MAX)) {
+ printf("Duplicate port pair (%d,%d) config\n",
+ int_fld[FLD_PORT1], int_fld[FLD_PORT2]);
+ return -1;
+ }
+
+ rsrc->dst_ports[int_fld[FLD_PORT1]] = int_fld[FLD_PORT2];
+ rsrc->dst_ports[int_fld[FLD_PORT2]] = int_fld[FLD_PORT1];
+
+ port_pair++;
+ }
+
+ rsrc->port_pairs = true;
+
+ return 0;
+}
+
+static const char short_options[] =
+ "p:" /* portmask */
+ "q:" /* number of queues */
+ "T:" /* timer period */
+ ;
+
+#define CMD_LINE_OPT_MAC_UPDATING "mac-updating"
+#define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
+#define CMD_LINE_OPT_MODE "mode"
+#define CMD_LINE_OPT_EVENTQ_SCHED "eventq-sched"
+#define CMD_LINE_OPT_PORT_PAIR_CONF "config"
+
+enum {
+ /* long options mapped to a short option */
+
+ /* first long only option value must be >= 256, so that we won't
+ * conflict with short options
+ */
+ CMD_LINE_OPT_MIN_NUM = 256,
+ CMD_LINE_OPT_MODE_NUM,
+ CMD_LINE_OPT_EVENTQ_SCHED_NUM,
+ CMD_LINE_OPT_PORT_PAIR_CONF_NUM,
+};
+
+/* Parse the argument given in the command line of the application */
+static int
+l2fwd_event_parse_args(int argc, char **argv, struct l2fwd_resources *rsrc)
+{
+ int mac_updating = 1;
+ struct option lgopts[] = {
+ { CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1},
+ { CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0},
+ { CMD_LINE_OPT_MODE, required_argument, NULL,
+ CMD_LINE_OPT_MODE_NUM},
+ { CMD_LINE_OPT_EVENTQ_SCHED, required_argument, NULL,
+ CMD_LINE_OPT_EVENTQ_SCHED_NUM},
+ { CMD_LINE_OPT_PORT_PAIR_CONF, required_argument, NULL,
+ CMD_LINE_OPT_PORT_PAIR_CONF_NUM},
+ {NULL, 0, 0, 0}
+ };
+ int opt, ret, timer_secs;
+ char *prgname = argv[0];
+ uint16_t port_id;
+ int option_index;
+ char **argvopt;
+
+ /* reset l2fwd_dst_ports */
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
+ rsrc->dst_ports[port_id] = UINT32_MAX;
+
+ argvopt = argv;
+ while ((opt = getopt_long(argc, argvopt, short_options,
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ /* portmask */
+ case 'p':
+ rsrc->enabled_port_mask =
+ l2fwd_event_parse_portmask(optarg);
+ if (rsrc->enabled_port_mask == 0) {
+ printf("invalid portmask\n");
+ l2fwd_event_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* nqueue */
+ case 'q':
+ rsrc->rx_queue_per_lcore =
+ l2fwd_event_parse_nqueue(optarg);
+ if (rsrc->rx_queue_per_lcore == 0) {
+ printf("invalid queue number\n");
+ l2fwd_event_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* timer period */
+ case 'T':
+ timer_secs = l2fwd_event_parse_timer_period(optarg);
+ if (timer_secs < 0) {
+ printf("invalid timer period\n");
+ l2fwd_event_usage(prgname);
+ return -1;
+ }
+ rsrc->timer_period = timer_secs;
+ /* convert to number of cycles */
+ rsrc->timer_period *= rte_get_timer_hz();
+ break;
+
+ case CMD_LINE_OPT_MODE_NUM:
+ l2fwd_event_parse_mode(optarg, rsrc);
+ break;
+
+ case CMD_LINE_OPT_EVENTQ_SCHED_NUM:
+ l2fwd_event_parse_eventq_sched(optarg, rsrc);
+ break;
+
+ case CMD_LINE_OPT_PORT_PAIR_CONF_NUM:
+ ret = l2fwd_parse_port_pair_config(optarg, rsrc);
+ if (ret) {
+ printf("Invalid port pair config\n");
+ l2fwd_event_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* long options */
+ case 0:
+ break;
+
+ default:
+ l2fwd_event_usage(prgname);
+ return -1;
+ }
+ }
+
+ rsrc->mac_updating = mac_updating;
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ ret = optind-1;
+ optind = 1; /* reset getopt lib */
+ return ret;
+}
+
+/*
+ * Check port pair config with enabled port mask,
+ * and for valid port pair combinations.
+ */
+static int
+check_port_pair_config(struct l2fwd_resources *rsrc)
+{
+ uint32_t port_pair_mask = 0;
+ uint32_t portid;
+ uint16_t index;
+
+ for (index = 0; index < rte_eth_dev_count_avail(); index++) {
+ if ((rsrc->enabled_port_mask & (1 << index)) == 0 ||
+ (port_pair_mask & (1 << index)))
+ continue;
+
+ portid = rsrc->dst_ports[index];
+ if (portid == UINT32_MAX) {
+ printf("port %u is enabled in but no valid port pair\n",
+ index);
+ return -1;
+ }
+
+ if (!rte_eth_dev_is_valid_port(index)) {
+ printf("port %u is not valid\n", index);
+ return -1;
+ }
+
+ if (!rte_eth_dev_is_valid_port(portid)) {
+ printf("port %u is not valid\n", portid);
+ return -1;
+ }
+
+ if (port_pair_mask & (1 << portid) &&
+ rsrc->dst_ports[portid] != index) {
+ printf("port %u is used in other port pairs\n", portid);
+ return -1;
+ }
+
+ port_pair_mask |= (1 << portid);
+ port_pair_mask |= (1 << index);
+ }
+
+ return 0;
+}
+
+static int
+l2fwd_launch_one_lcore(void *args)
+{
+ struct l2fwd_resources *rsrc = args;
+ struct l2fwd_poll_resources *poll_rsrc = rsrc->poll_rsrc;
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+
+ if (rsrc->event_mode)
+ evt_rsrc->ops.l2fwd_event_loop(rsrc);
+ else
+ poll_rsrc->poll_main_loop(rsrc);
+
+ return 0;
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(struct l2fwd_resources *rsrc,
+ uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint16_t port_id;
+ uint8_t count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+ int ret;
+
+ printf("\nChecking link status...");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ if (rsrc->force_quit)
+ return;
+ all_ports_up = 1;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if (rsrc->force_quit)
+ return;
+ if ((port_mask & (1 << port_id)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ ret = rte_eth_link_get_nowait(port_id, &link);
+ if (ret < 0) {
+ all_ports_up = 0;
+ if (print_flag == 1)
+ printf("Port %u link get failed: %s\n",
+ port_id, rte_strerror(-ret));
+ continue;
+ }
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ port_id, link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex"));
+ else
+ printf("Port %d Link Down\n", port_id);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == ETH_LINK_DOWN) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+
+/* Print out statistics on packets dropped */
+static void
+print_stats(struct l2fwd_resources *rsrc)
+{
+ uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+ uint32_t port_id;
+
+ total_packets_dropped = 0;
+ total_packets_tx = 0;
+ total_packets_rx = 0;
+
+ const char clr[] = {27, '[', '2', 'J', '\0' };
+ const char topLeft[] = {27, '[', '1', ';', '1', 'H', '\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("\nPort statistics ====================================");
+
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ /* skip disabled ports */
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+ printf("\nStatistics for port %u ------------------------------"
+ "\nPackets sent: %29"PRIu64
+ "\nPackets received: %25"PRIu64
+ "\nPackets dropped: %26"PRIu64,
+ port_id,
+ rsrc->port_stats[port_id].tx,
+ rsrc->port_stats[port_id].rx,
+ rsrc->port_stats[port_id].dropped);
+
+ total_packets_dropped +=
+ rsrc->port_stats[port_id].dropped;
+ total_packets_tx += rsrc->port_stats[port_id].tx;
+ total_packets_rx += rsrc->port_stats[port_id].rx;
+ }
+
+ if (rsrc->event_mode) {
+ struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc;
+ struct rte_event_eth_rx_adapter_stats rx_adptr_stats;
+ struct rte_event_eth_tx_adapter_stats tx_adptr_stats;
+ int ret, i;
+
+ for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++) {
+ ret = rte_event_eth_rx_adapter_stats_get(
+ evt_rsrc->rx_adptr.rx_adptr[i],
+ &rx_adptr_stats);
+ if (ret < 0)
+ continue;
+ printf("\nRx adapter[%d] statistics===================="
+ "\nReceive queue poll count: %17"PRIu64
+ "\nReceived packet count: %20"PRIu64
+ "\nEventdev enqueue count: %19"PRIu64
+ "\nEventdev enqueue retry count: %13"PRIu64
+ "\nReceived packet dropped count: %12"PRIu64
+ "\nRx enqueue start timestamp: %15"PRIu64
+ "\nRx enqueue block cycles: %18"PRIu64
+ "\nRx enqueue unblock timestamp: %13"PRIu64,
+ evt_rsrc->rx_adptr.rx_adptr[i],
+ rx_adptr_stats.rx_poll_count,
+ rx_adptr_stats.rx_packets,
+ rx_adptr_stats.rx_enq_count,
+ rx_adptr_stats.rx_enq_retry,
+ rx_adptr_stats.rx_dropped,
+ rx_adptr_stats.rx_enq_start_ts,
+ rx_adptr_stats.rx_enq_block_cycles,
+ rx_adptr_stats.rx_enq_end_ts);
+ }
+ for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++) {
+ ret = rte_event_eth_tx_adapter_stats_get(
+ evt_rsrc->tx_adptr.tx_adptr[i],
+ &tx_adptr_stats);
+ if (ret < 0)
+ continue;
+ printf("\nTx adapter[%d] statistics===================="
+ "\nNumber of transmit retries: %15"PRIu64
+ "\nNumber of packets transmitted: %12"PRIu64
+ "\nNumber of packets dropped: %16"PRIu64,
+ evt_rsrc->tx_adptr.tx_adptr[i],
+ tx_adptr_stats.tx_retry,
+ tx_adptr_stats.tx_packets,
+ tx_adptr_stats.tx_dropped);
+ }
+ }
+ printf("\nAggregate lcore statistics ========================="
+ "\nTotal packets sent: %23"PRIu64
+ "\nTotal packets received: %19"PRIu64
+ "\nTotal packets dropped: %20"PRIu64,
+ total_packets_tx,
+ total_packets_rx,
+ total_packets_dropped);
+ printf("\n====================================================\n");
+}
+
+static void
+l2fwd_event_print_stats(struct l2fwd_resources *rsrc)
+{
+ uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
+ const uint64_t timer_period = rsrc->timer_period;
+
+ while (!rsrc->force_quit) {
+ /* if timer is enabled */
+ if (timer_period > 0) {
+ cur_tsc = rte_rdtsc();
+ diff_tsc = cur_tsc - prev_tsc;
+
+ /* advance the timer */
+ timer_tsc += diff_tsc;
+
+ /* if timer has reached its timeout */
+ if (unlikely(timer_tsc >= timer_period)) {
+ print_stats(rsrc);
+ /* reset the timer */
+ timer_tsc = 0;
+ }
+ prev_tsc = cur_tsc;
+ }
+ }
+}
+
+
+static void
+signal_handler(int signum)
+{
+ struct l2fwd_resources *rsrc = l2fwd_get_rsrc();
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\n\nSignal %d received, preparing to exit...\n",
+ signum);
+ rsrc->force_quit = true;
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ struct l2fwd_resources *rsrc;
+ uint16_t nb_ports_available = 0;
+ uint32_t nb_ports_in_mask = 0;
+ uint16_t port_id, last_port;
+ uint32_t nb_mbufs;
+ uint16_t nb_ports;
+ int i, ret;
+
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_panic("Invalid EAL arguments\n");
+ argc -= ret;
+ argv += ret;
+
+ rsrc = l2fwd_get_rsrc();
+
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ /* parse application arguments (after the EAL ones) */
+ ret = l2fwd_event_parse_args(argc, argv, rsrc);
+ if (ret < 0)
+ rte_panic("Invalid L2FWD arguments\n");
+
+ printf("MAC updating %s\n", rsrc->mac_updating ? "enabled" :
+ "disabled");
+
+ nb_ports = rte_eth_dev_count_avail();
+ if (nb_ports == 0)
+ rte_panic("No Ethernet ports - bye\n");
+
+ /* check port mask to possible port mask */
+ if (rsrc->enabled_port_mask & ~((1 << nb_ports) - 1))
+ rte_panic("Invalid portmask; possible (0x%x)\n",
+ (1 << nb_ports) - 1);
+
+ if (!rsrc->port_pairs) {
+ last_port = 0;
+ /*
+ * Each logical core is assigned a dedicated TX queue on each
+ * port.
+ */
+ RTE_ETH_FOREACH_DEV(port_id) {
+ /* skip ports that are not enabled */
+ if ((rsrc->enabled_port_mask & (1 << port_id)) == 0)
+ continue;
+
+ if (nb_ports_in_mask % 2) {
+ rsrc->dst_ports[port_id] = last_port;
+ rsrc->dst_ports[last_port] = port_id;
+ } else {
+ last_port = port_id;
+ }
+
+ nb_ports_in_mask++;
+ }
+ if (nb_ports_in_mask % 2) {
+ printf("Notice: odd number of ports in portmask.\n");
+ rsrc->dst_ports[last_port] = last_port;
+ }
+ } else {
+ if (check_port_pair_config(rsrc) < 0)
+ rte_panic("Invalid port pair config\n");
+ }
+
+ nb_mbufs = RTE_MAX(nb_ports * (RTE_TEST_RX_DESC_DEFAULT +
+ RTE_TEST_TX_DESC_DEFAULT +
+ MAX_PKT_BURST + rte_lcore_count() *
+ MEMPOOL_CACHE_SIZE), 8192U);
+
+ /* create the mbuf pool */
+ rsrc->pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool",
+ nb_mbufs, MEMPOOL_CACHE_SIZE, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ if (rsrc->pktmbuf_pool == NULL)
+ rte_panic("Cannot init mbuf pool\n");
+
+ nb_ports_available = l2fwd_event_init_ports(rsrc);
+ if (!nb_ports_available)
+ rte_panic("All available ports are disabled. Please set portmask.\n");
+
+ /* Configure eventdev parameters if required */
+ if (rsrc->event_mode)
+ l2fwd_event_resource_setup(rsrc);
+ else
+ l2fwd_poll_resource_setup(rsrc);
+
+ /* initialize port stats */
+ memset(&rsrc->port_stats, 0,
+ sizeof(struct l2fwd_port_statistics));
+
+ /* All settings are done. Now enable eth devices */
+ RTE_ETH_FOREACH_DEV(port_id) {
+ /* skip ports that are not enabled */
+ if ((rsrc->enabled_port_mask &
+ (1 << port_id)) == 0)
+ continue;
+
+ ret = rte_eth_dev_start(port_id);
+ if (ret < 0)
+ rte_panic("rte_eth_dev_start:err=%d, port=%u\n", ret,
+ port_id);
+ }
+
+ if (rsrc->event_mode)
+ l2fwd_event_service_setup(rsrc);
+
+ check_all_ports_link_status(rsrc, rsrc->enabled_port_mask);
+
+ /* launch per-lcore init on every lcore */
+ rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, rsrc,
+ SKIP_MASTER);
+ l2fwd_event_print_stats(rsrc);
+ if (rsrc->event_mode) {
+ struct l2fwd_event_resources *evt_rsrc =
+ rsrc->evt_rsrc;
+ for (i = 0; i < evt_rsrc->rx_adptr.nb_rx_adptr; i++)
+ rte_event_eth_rx_adapter_stop(
+ evt_rsrc->rx_adptr.rx_adptr[i]);
+ for (i = 0; i < evt_rsrc->tx_adptr.nb_tx_adptr; i++)
+ rte_event_eth_tx_adapter_stop(
+ evt_rsrc->tx_adptr.tx_adptr[i]);
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask &
+ (1 << port_id)) == 0)
+ continue;
+ rte_eth_dev_stop(port_id);
+ }
+
+ rte_eal_mp_wait_lcore();
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask &
+ (1 << port_id)) == 0)
+ continue;
+ rte_eth_dev_close(port_id);
+ }
+
+ rte_event_dev_stop(evt_rsrc->event_d_id);
+ rte_event_dev_close(evt_rsrc->event_d_id);
+
+ } else {
+ rte_eal_mp_wait_lcore();
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ if ((rsrc->enabled_port_mask &
+ (1 << port_id)) == 0)
+ continue;
+ printf("Closing port %d...", port_id);
+ rte_eth_dev_stop(port_id);
+ rte_eth_dev_close(port_id);
+ printf(" Done\n");
+ }
+ }
+ printf("Bye...\n");
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-event/meson.build b/src/spdk/dpdk/examples/l2fwd-event/meson.build
new file mode 100644
index 000000000..4a546eaf8
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-event/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+allow_experimental_apis = true
+deps += 'eventdev'
+sources = files(
+ 'main.c',
+ 'l2fwd_poll.c',
+ 'l2fwd_common.c',
+ 'l2fwd_event.c',
+ 'l2fwd_event_internal_port.c',
+ 'l2fwd_event_generic.c'
+)
diff --git a/src/spdk/dpdk/examples/l2fwd-jobstats/Makefile b/src/spdk/dpdk/examples/l2fwd-jobstats/Makefile
new file mode 100644
index 000000000..6cd9dcd9c
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-jobstats/Makefile
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2015 Intel Corporation
+
+# binary name
+APP = l2fwd-jobstats
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+# Build using pkg-config variables if possible
+ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
+
+all: shared
+.PHONY: shared static
+shared: build/$(APP)-shared
+ ln -sf $(APP)-shared build/$(APP)
+static: build/$(APP)-static
+ ln -sf $(APP)-static build/$(APP)
+
+PKGCONF ?= pkg-config
+
+PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null)
+CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk)
+LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
+LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk)
+
+build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
+
+build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
+
+build:
+ @mkdir -p $@
+
+.PHONY: clean
+clean:
+ rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
+ test -d build && rmdir -p build || true
+
+else # Build using legacy build system
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, detect a build directory, by looking for a path with a .config
+RTE_TARGET ?= $(notdir $(abspath $(dir $(firstword $(wildcard $(RTE_SDK)/*/.config)))))
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+endif
diff --git a/src/spdk/dpdk/examples/l2fwd-jobstats/main.c b/src/spdk/dpdk/examples/l2fwd-jobstats/main.c
new file mode 100644
index 000000000..396fd89db
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-jobstats/main.c
@@ -0,0 +1,1029 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <locale.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <ctype.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_spinlock.h>
+
+#include <rte_errno.h>
+#include <rte_jobstats.h>
+#include <rte_timer.h>
+#include <rte_alarm.h>
+#include <rte_pause.h>
+
+#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
+
+#define NB_MBUF 8192
+
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 1024
+#define RTE_TEST_TX_DESC_DEFAULT 1024
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct rte_ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
+
+/* mask of enabled ports */
+static uint32_t l2fwd_enabled_port_mask;
+
+/* list of enabled ports */
+static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
+
+#define UPDATE_STEP_UP 1
+#define UPDATE_STEP_DOWN 32
+
+static unsigned int l2fwd_rx_queue_per_lcore = 1;
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+struct lcore_queue_conf {
+ unsigned n_rx_port;
+ unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+ uint64_t next_flush_time[RTE_MAX_ETHPORTS];
+
+ struct rte_timer rx_timers[MAX_RX_QUEUE_PER_LCORE];
+ struct rte_jobstats port_fwd_jobs[MAX_RX_QUEUE_PER_LCORE];
+
+ struct rte_timer flush_timer;
+ struct rte_jobstats flush_job;
+ struct rte_jobstats idle_job;
+ struct rte_jobstats_context jobs_context;
+
+ rte_atomic16_t stats_read_pending;
+ rte_spinlock_t lock;
+} __rte_cache_aligned;
+struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
+
+static struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .split_hdr_size = 0,
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+};
+
+struct rte_mempool *l2fwd_pktmbuf_pool = NULL;
+
+/* Per-port statistics struct */
+struct l2fwd_port_statistics {
+ uint64_t tx;
+ uint64_t rx;
+ uint64_t dropped;
+} __rte_cache_aligned;
+struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
+
+/* 1 day max */
+#define MAX_TIMER_PERIOD 86400
+/* default period is 10 seconds */
+static int64_t timer_period = 10;
+/* default timer frequency */
+static double hz;
+/* BURST_TX_DRAIN_US converted to cycles */
+uint64_t drain_tsc;
+/* Convert cycles to ns */
+static inline double
+cycles_to_ns(uint64_t cycles)
+{
+ double t = cycles;
+
+ t *= (double)NS_PER_S;
+ t /= hz;
+ return t;
+}
+
+static void
+show_lcore_stats(unsigned lcore_id)
+{
+ struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
+ struct rte_jobstats_context *ctx = &qconf->jobs_context;
+ struct rte_jobstats *job;
+ uint8_t i;
+
+ /* LCore statistics. */
+ uint64_t stats_period, loop_count;
+ uint64_t exec, exec_min, exec_max;
+ uint64_t management, management_min, management_max;
+ uint64_t busy, busy_min, busy_max;
+
+ /* Jobs statistics. */
+ const uint16_t port_cnt = qconf->n_rx_port;
+ uint64_t jobs_exec_cnt[port_cnt], jobs_period[port_cnt];
+ uint64_t jobs_exec[port_cnt], jobs_exec_min[port_cnt],
+ jobs_exec_max[port_cnt];
+
+ uint64_t flush_exec_cnt, flush_period;
+ uint64_t flush_exec, flush_exec_min, flush_exec_max;
+
+ uint64_t idle_exec_cnt;
+ uint64_t idle_exec, idle_exec_min, idle_exec_max;
+ uint64_t collection_time = rte_get_timer_cycles();
+
+ /* Ask forwarding thread to give us stats. */
+ rte_atomic16_set(&qconf->stats_read_pending, 1);
+ rte_spinlock_lock(&qconf->lock);
+ rte_atomic16_set(&qconf->stats_read_pending, 0);
+
+ /* Collect context statistics. */
+ stats_period = ctx->state_time - ctx->start_time;
+ loop_count = ctx->loop_cnt;
+
+ exec = ctx->exec_time;
+ exec_min = ctx->min_exec_time;
+ exec_max = ctx->max_exec_time;
+
+ management = ctx->management_time;
+ management_min = ctx->min_management_time;
+ management_max = ctx->max_management_time;
+
+ rte_jobstats_context_reset(ctx);
+
+ for (i = 0; i < port_cnt; i++) {
+ job = &qconf->port_fwd_jobs[i];
+
+ jobs_exec_cnt[i] = job->exec_cnt;
+ jobs_period[i] = job->period;
+
+ jobs_exec[i] = job->exec_time;
+ jobs_exec_min[i] = job->min_exec_time;
+ jobs_exec_max[i] = job->max_exec_time;
+
+ rte_jobstats_reset(job);
+ }
+
+ flush_exec_cnt = qconf->flush_job.exec_cnt;
+ flush_period = qconf->flush_job.period;
+ flush_exec = qconf->flush_job.exec_time;
+ flush_exec_min = qconf->flush_job.min_exec_time;
+ flush_exec_max = qconf->flush_job.max_exec_time;
+ rte_jobstats_reset(&qconf->flush_job);
+
+ idle_exec_cnt = qconf->idle_job.exec_cnt;
+ idle_exec = qconf->idle_job.exec_time;
+ idle_exec_min = qconf->idle_job.min_exec_time;
+ idle_exec_max = qconf->idle_job.max_exec_time;
+ rte_jobstats_reset(&qconf->idle_job);
+
+ rte_spinlock_unlock(&qconf->lock);
+
+ exec -= idle_exec;
+ busy = exec + management;
+ busy_min = exec_min + management_min;
+ busy_max = exec_max + management_max;
+
+
+ collection_time = rte_get_timer_cycles() - collection_time;
+
+#define STAT_FMT "\n%-18s %'14.0f %6.1f%% %'10.0f %'10.0f %'10.0f"
+
+ printf("\n----------------"
+ "\nLCore %3u: statistics (time in ns, collected in %'9.0f)"
+ "\n%-18s %14s %7s %10s %10s %10s "
+ "\n%-18s %'14.0f"
+ "\n%-18s %'14" PRIu64
+ STAT_FMT /* Exec */
+ STAT_FMT /* Management */
+ STAT_FMT /* Busy */
+ STAT_FMT, /* Idle */
+ lcore_id, cycles_to_ns(collection_time),
+ "Stat type", "total", "%total", "avg", "min", "max",
+ "Stats duration:", cycles_to_ns(stats_period),
+ "Loop count:", loop_count,
+ "Exec time",
+ cycles_to_ns(exec), exec * 100.0 / stats_period,
+ cycles_to_ns(loop_count ? exec / loop_count : 0),
+ cycles_to_ns(exec_min),
+ cycles_to_ns(exec_max),
+ "Management time",
+ cycles_to_ns(management), management * 100.0 / stats_period,
+ cycles_to_ns(loop_count ? management / loop_count : 0),
+ cycles_to_ns(management_min),
+ cycles_to_ns(management_max),
+ "Exec + management",
+ cycles_to_ns(busy), busy * 100.0 / stats_period,
+ cycles_to_ns(loop_count ? busy / loop_count : 0),
+ cycles_to_ns(busy_min),
+ cycles_to_ns(busy_max),
+ "Idle (job)",
+ cycles_to_ns(idle_exec), idle_exec * 100.0 / stats_period,
+ cycles_to_ns(idle_exec_cnt ? idle_exec / idle_exec_cnt : 0),
+ cycles_to_ns(idle_exec_min),
+ cycles_to_ns(idle_exec_max));
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+ job = &qconf->port_fwd_jobs[i];
+ printf("\n\nJob %" PRIu32 ": %-20s "
+ "\n%-18s %'14" PRIu64
+ "\n%-18s %'14.0f"
+ STAT_FMT,
+ i, job->name,
+ "Exec count:", jobs_exec_cnt[i],
+ "Exec period: ", cycles_to_ns(jobs_period[i]),
+ "Exec time",
+ cycles_to_ns(jobs_exec[i]), jobs_exec[i] * 100.0 / stats_period,
+ cycles_to_ns(jobs_exec_cnt[i] ? jobs_exec[i] / jobs_exec_cnt[i]
+ : 0),
+ cycles_to_ns(jobs_exec_min[i]),
+ cycles_to_ns(jobs_exec_max[i]));
+ }
+
+ if (qconf->n_rx_port > 0) {
+ job = &qconf->flush_job;
+ printf("\n\nJob %" PRIu32 ": %-20s "
+ "\n%-18s %'14" PRIu64
+ "\n%-18s %'14.0f"
+ STAT_FMT,
+ i, job->name,
+ "Exec count:", flush_exec_cnt,
+ "Exec period: ", cycles_to_ns(flush_period),
+ "Exec time",
+ cycles_to_ns(flush_exec), flush_exec * 100.0 / stats_period,
+ cycles_to_ns(flush_exec_cnt ? flush_exec / flush_exec_cnt : 0),
+ cycles_to_ns(flush_exec_min),
+ cycles_to_ns(flush_exec_max));
+ }
+}
+
+/* Print out statistics on packets dropped */
+static void
+show_stats_cb(__rte_unused void *param)
+{
+ uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+ unsigned portid, lcore_id;
+
+ total_packets_dropped = 0;
+ total_packets_tx = 0;
+ total_packets_rx = 0;
+
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s"
+ "\nPort statistics ===================================",
+ clr, topLeft);
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ /* skip disabled ports */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ printf("\nStatistics for port %u ------------------------------"
+ "\nPackets sent: %24"PRIu64
+ "\nPackets received: %20"PRIu64
+ "\nPackets dropped: %21"PRIu64,
+ portid,
+ port_statistics[portid].tx,
+ port_statistics[portid].rx,
+ port_statistics[portid].dropped);
+
+ total_packets_dropped += port_statistics[portid].dropped;
+ total_packets_tx += port_statistics[portid].tx;
+ total_packets_rx += port_statistics[portid].rx;
+ }
+
+ printf("\nAggregate statistics ==============================="
+ "\nTotal packets sent: %18"PRIu64
+ "\nTotal packets received: %14"PRIu64
+ "\nTotal packets dropped: %15"PRIu64
+ "\n====================================================",
+ total_packets_tx,
+ total_packets_rx,
+ total_packets_dropped);
+
+ RTE_LCORE_FOREACH(lcore_id) {
+ if (lcore_queue_conf[lcore_id].n_rx_port > 0)
+ show_lcore_stats(lcore_id);
+ }
+
+ printf("\n====================================================\n");
+ rte_eal_alarm_set(timer_period * US_PER_S, show_stats_cb, NULL);
+}
+
+static void
+l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
+{
+ struct rte_ether_hdr *eth;
+ void *tmp;
+ int sent;
+ unsigned dst_port;
+ struct rte_eth_dev_tx_buffer *buffer;
+
+ dst_port = l2fwd_dst_ports[portid];
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+
+ /* 02:00:00:00:00:xx */
+ tmp = &eth->d_addr.addr_bytes[0];
+ *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
+
+ /* src addr */
+ rte_ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], &eth->s_addr);
+
+ buffer = tx_buffer[dst_port];
+ sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
+ if (sent)
+ port_statistics[dst_port].tx += sent;
+}
+
+static void
+l2fwd_job_update_cb(struct rte_jobstats *job, int64_t result)
+{
+ int64_t err = job->target - result;
+ int64_t histeresis = job->target / 8;
+
+ if (err < -histeresis) {
+ if (job->min_period + UPDATE_STEP_DOWN < job->period)
+ job->period -= UPDATE_STEP_DOWN;
+ } else if (err > histeresis) {
+ if (job->period + UPDATE_STEP_UP < job->max_period)
+ job->period += UPDATE_STEP_UP;
+ }
+}
+
+static void
+l2fwd_fwd_job(__rte_unused struct rte_timer *timer, void *arg)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *m;
+
+ const uint16_t port_idx = (uintptr_t) arg;
+ const unsigned lcore_id = rte_lcore_id();
+ struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
+ struct rte_jobstats *job = &qconf->port_fwd_jobs[port_idx];
+ const uint16_t portid = qconf->rx_port_list[port_idx];
+
+ uint8_t j;
+ uint16_t total_nb_rx;
+
+ rte_jobstats_start(&qconf->jobs_context, job);
+
+ /* Call rx burst 2 times. This allow rte_jobstats logic to see if this
+ * function must be called more frequently. */
+
+ total_nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
+ MAX_PKT_BURST);
+
+ for (j = 0; j < total_nb_rx; j++) {
+ m = pkts_burst[j];
+ rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+ l2fwd_simple_forward(m, portid);
+ }
+
+ if (total_nb_rx == MAX_PKT_BURST) {
+ const uint16_t nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
+ MAX_PKT_BURST);
+
+ total_nb_rx += nb_rx;
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
+ rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+ l2fwd_simple_forward(m, portid);
+ }
+ }
+
+ port_statistics[portid].rx += total_nb_rx;
+
+ /* Adjust period time in which we are running here. */
+ if (rte_jobstats_finish(job, total_nb_rx) != 0) {
+ rte_timer_reset(&qconf->rx_timers[port_idx], job->period, PERIODICAL,
+ lcore_id, l2fwd_fwd_job, arg);
+ }
+}
+
+static void
+l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg)
+{
+ uint64_t now;
+ unsigned lcore_id;
+ struct lcore_queue_conf *qconf;
+ uint16_t portid;
+ unsigned i;
+ uint32_t sent;
+ struct rte_eth_dev_tx_buffer *buffer;
+
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_queue_conf[lcore_id];
+
+ rte_jobstats_start(&qconf->jobs_context, &qconf->flush_job);
+
+ now = rte_get_timer_cycles();
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_queue_conf[lcore_id];
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+ portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
+
+ if (qconf->next_flush_time[portid] <= now)
+ continue;
+
+ buffer = tx_buffer[portid];
+ sent = rte_eth_tx_buffer_flush(portid, 0, buffer);
+ if (sent)
+ port_statistics[portid].tx += sent;
+
+ qconf->next_flush_time[portid] = rte_get_timer_cycles() + drain_tsc;
+ }
+
+ /* Pass target to indicate that this job is happy of time interwal
+ * in which it was called. */
+ rte_jobstats_finish(&qconf->flush_job, qconf->flush_job.target);
+}
+
+/* main processing loop */
+static void
+l2fwd_main_loop(void)
+{
+ unsigned lcore_id;
+ unsigned i, portid;
+ struct lcore_queue_conf *qconf;
+ uint8_t stats_read_pending = 0;
+ uint8_t need_manage;
+
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_queue_conf[lcore_id];
+
+ if (qconf->n_rx_port == 0) {
+ RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ portid = qconf->rx_port_list[i];
+ RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
+ portid);
+ }
+
+ rte_jobstats_init(&qconf->idle_job, "idle", 0, 0, 0, 0);
+
+ for (;;) {
+ rte_spinlock_lock(&qconf->lock);
+
+ do {
+ rte_jobstats_context_start(&qconf->jobs_context);
+
+ /* Do the Idle job:
+ * - Read stats_read_pending flag
+ * - check if some real job need to be executed
+ */
+ rte_jobstats_start(&qconf->jobs_context, &qconf->idle_job);
+
+ uint64_t repeats = 0;
+
+ do {
+ uint8_t i;
+ uint64_t now = rte_get_timer_cycles();
+
+ repeats++;
+ need_manage = qconf->flush_timer.expire < now;
+ /* Check if we was esked to give a stats. */
+ stats_read_pending =
+ rte_atomic16_read(&qconf->stats_read_pending);
+ need_manage |= stats_read_pending;
+
+ for (i = 0; i < qconf->n_rx_port && !need_manage; i++)
+ need_manage = qconf->rx_timers[i].expire < now;
+
+ } while (!need_manage);
+
+ if (likely(repeats != 1))
+ rte_jobstats_finish(&qconf->idle_job, qconf->idle_job.target);
+ else
+ rte_jobstats_abort(&qconf->idle_job);
+
+ rte_timer_manage();
+ rte_jobstats_context_finish(&qconf->jobs_context);
+ } while (likely(stats_read_pending == 0));
+
+ rte_spinlock_unlock(&qconf->lock);
+ rte_pause();
+ }
+}
+
+static int
+l2fwd_launch_one_lcore(__rte_unused void *dummy)
+{
+ l2fwd_main_loop();
+ return 0;
+}
+
+/* display usage */
+static void
+l2fwd_usage(const char *prgname)
+{
+ printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+ " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+ " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
+ " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n"
+ " -l set system default locale instead of default (\"C\" locale) for thousands separator in stats.",
+ prgname);
+}
+
+static int
+l2fwd_parse_portmask(const char *portmask)
+{
+ char *end = NULL;
+ unsigned long pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(portmask, &end, 16);
+ if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (pm == 0)
+ return -1;
+
+ return pm;
+}
+
+static unsigned int
+l2fwd_parse_nqueue(const char *q_arg)
+{
+ char *end = NULL;
+ unsigned long n;
+
+ /* parse hexadecimal string */
+ n = strtoul(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return 0;
+ if (n == 0)
+ return 0;
+ if (n >= MAX_RX_QUEUE_PER_LCORE)
+ return 0;
+
+ return n;
+}
+
+static int
+l2fwd_parse_timer_period(const char *q_arg)
+{
+ char *end = NULL;
+ int n;
+
+ /* parse number string */
+ n = strtol(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+ if (n >= MAX_TIMER_PERIOD)
+ return -1;
+
+ return n;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+l2fwd_parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+ static struct option lgopts[] = {
+ {NULL, 0, 0, 0}
+ };
+
+ argvopt = argv;
+
+ while ((opt = getopt_long(argc, argvopt, "p:q:T:l",
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ /* portmask */
+ case 'p':
+ l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
+ if (l2fwd_enabled_port_mask == 0) {
+ printf("invalid portmask\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* nqueue */
+ case 'q':
+ l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
+ if (l2fwd_rx_queue_per_lcore == 0) {
+ printf("invalid queue number\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* timer period */
+ case 'T':
+ timer_period = l2fwd_parse_timer_period(optarg);
+ if (timer_period < 0) {
+ printf("invalid timer period\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* For thousands separator in printf. */
+ case 'l':
+ setlocale(LC_ALL, "");
+ break;
+
+ /* long options */
+ case 0:
+ l2fwd_usage(prgname);
+ return -1;
+
+ default:
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ }
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ ret = optind-1;
+ optind = 1; /* reset getopt lib */
+ return ret;
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+ int ret;
+
+ printf("\nChecking link status");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ all_ports_up = 1;
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((port_mask & (1 << portid)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ ret = rte_eth_link_get_nowait(portid, &link);
+ if (ret < 0) {
+ all_ports_up = 0;
+ if (print_flag == 1)
+ printf("Port %u link get failed: %s\n",
+ portid, rte_strerror(-ret));
+ continue;
+ }
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex"));
+ else
+ printf("Port %d Link Down\n", portid);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == ETH_LINK_DOWN) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ struct lcore_queue_conf *qconf;
+ unsigned lcore_id, rx_lcore_id;
+ unsigned nb_ports_in_mask = 0;
+ int ret;
+ char name[RTE_JOBSTATS_NAMESIZE];
+ uint16_t nb_ports;
+ uint16_t nb_ports_available = 0;
+ uint16_t portid, last_port;
+ uint8_t i;
+
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+ argc -= ret;
+ argv += ret;
+
+ /* parse application arguments (after the EAL ones) */
+ ret = l2fwd_parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+
+ rte_timer_subsystem_init();
+
+ /* fetch default timer frequency. */
+ hz = rte_get_timer_hz();
+
+ /* create the mbuf pool */
+ l2fwd_pktmbuf_pool =
+ rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF, 32,
+ 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+ if (l2fwd_pktmbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+
+ nb_ports = rte_eth_dev_count_avail();
+ if (nb_ports == 0)
+ rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
+ /* reset l2fwd_dst_ports */
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
+ l2fwd_dst_ports[portid] = 0;
+ last_port = 0;
+
+ /*
+ * Each logical core is assigned a dedicated TX queue on each port.
+ */
+ RTE_ETH_FOREACH_DEV(portid) {
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ if (nb_ports_in_mask % 2) {
+ l2fwd_dst_ports[portid] = last_port;
+ l2fwd_dst_ports[last_port] = portid;
+ } else
+ last_port = portid;
+
+ nb_ports_in_mask++;
+ }
+ if (nb_ports_in_mask % 2) {
+ printf("Notice: odd number of ports in portmask.\n");
+ l2fwd_dst_ports[last_port] = last_port;
+ }
+
+ rx_lcore_id = 0;
+ qconf = NULL;
+
+ /* Initialize the port/queue configuration of each logical core */
+ RTE_ETH_FOREACH_DEV(portid) {
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ /* get the lcore_id for this port */
+ while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+ lcore_queue_conf[rx_lcore_id].n_rx_port ==
+ l2fwd_rx_queue_per_lcore) {
+ rx_lcore_id++;
+ if (rx_lcore_id >= RTE_MAX_LCORE)
+ rte_exit(EXIT_FAILURE, "Not enough cores\n");
+ }
+
+ if (qconf != &lcore_queue_conf[rx_lcore_id])
+ /* Assigned a new logical core in the loop above. */
+ qconf = &lcore_queue_conf[rx_lcore_id];
+
+ qconf->rx_port_list[qconf->n_rx_port] = portid;
+ qconf->n_rx_port++;
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
+ }
+
+ /* Initialise each port */
+ RTE_ETH_FOREACH_DEV(portid) {
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_rxconf rxq_conf;
+ struct rte_eth_txconf txq_conf;
+ struct rte_eth_conf local_port_conf = port_conf;
+
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
+ printf("Skipping disabled port %u\n", portid);
+ continue;
+ }
+ nb_ports_available++;
+
+ /* init port */
+ printf("Initializing port %u... ", portid);
+ fflush(stdout);
+
+ ret = rte_eth_dev_info_get(portid, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ portid, strerror(-ret));
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
+ ret, portid);
+
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%u\n",
+ ret, portid);
+
+ ret = rte_eth_macaddr_get(portid,
+ &l2fwd_ports_eth_addr[portid]);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot get MAC address: err=%d, port=%u\n",
+ ret, portid);
+
+ /* init one RX queue */
+ fflush(stdout);
+ rxq_conf = dev_info.default_rxconf;
+ rxq_conf.offloads = local_port_conf.rxmode.offloads;
+ ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
+ rte_eth_dev_socket_id(portid),
+ &rxq_conf,
+ l2fwd_pktmbuf_pool);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
+ ret, portid);
+
+ /* init one TX queue on each port */
+ txq_conf = dev_info.default_txconf;
+ txq_conf.offloads = local_port_conf.txmode.offloads;
+ fflush(stdout);
+ ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
+ rte_eth_dev_socket_id(portid),
+ &txq_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup:err=%d, port=%u\n",
+ ret, portid);
+
+ /* Initialize TX buffers */
+ tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
+ RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
+ rte_eth_dev_socket_id(portid));
+ if (tx_buffer[portid] == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
+ portid);
+
+ rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
+
+ ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
+ rte_eth_tx_buffer_count_callback,
+ &port_statistics[portid].dropped);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
+
+ /* Start device */
+ ret = rte_eth_dev_start(portid);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
+ ret, portid);
+
+ printf("done:\n");
+
+ ret = rte_eth_promiscuous_enable(portid);
+ if (ret != 0) {
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_promiscuous_enable:err=%s, port=%u\n",
+ rte_strerror(-ret), portid);
+ return ret;
+
+ }
+
+ printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+ portid,
+ l2fwd_ports_eth_addr[portid].addr_bytes[0],
+ l2fwd_ports_eth_addr[portid].addr_bytes[1],
+ l2fwd_ports_eth_addr[portid].addr_bytes[2],
+ l2fwd_ports_eth_addr[portid].addr_bytes[3],
+ l2fwd_ports_eth_addr[portid].addr_bytes[4],
+ l2fwd_ports_eth_addr[portid].addr_bytes[5]);
+
+ /* initialize port stats */
+ memset(&port_statistics, 0, sizeof(port_statistics));
+ }
+
+ if (!nb_ports_available) {
+ rte_exit(EXIT_FAILURE,
+ "All available ports are disabled. Please set portmask.\n");
+ }
+
+ check_all_ports_link_status(l2fwd_enabled_port_mask);
+
+ drain_tsc = (hz + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+
+ RTE_LCORE_FOREACH(lcore_id) {
+ qconf = &lcore_queue_conf[lcore_id];
+
+ rte_spinlock_init(&qconf->lock);
+
+ if (rte_jobstats_context_init(&qconf->jobs_context) != 0)
+ rte_panic("Jobs stats context for core %u init failed\n", lcore_id);
+
+ if (qconf->n_rx_port == 0) {
+ RTE_LOG(INFO, L2FWD,
+ "lcore %u: no ports so no jobs stats context initialization\n",
+ lcore_id);
+ continue;
+ }
+ /* Add flush job.
+ * Set fixed period by setting min = max = initial period. Set target to
+ * zero as it is irrelevant for this job. */
+ rte_jobstats_init(&qconf->flush_job, "flush", drain_tsc, drain_tsc,
+ drain_tsc, 0);
+
+ rte_timer_init(&qconf->flush_timer);
+ ret = rte_timer_reset(&qconf->flush_timer, drain_tsc, PERIODICAL,
+ lcore_id, &l2fwd_flush_job, NULL);
+
+ if (ret < 0) {
+ rte_exit(1, "Failed to reset flush job timer for lcore %u: %s",
+ lcore_id, rte_strerror(-ret));
+ }
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+ struct rte_jobstats *job = &qconf->port_fwd_jobs[i];
+
+ portid = qconf->rx_port_list[i];
+ printf("Setting forward job for port %u\n", portid);
+
+ snprintf(name, RTE_DIM(name), "port %u fwd", portid);
+ /* Setup forward job.
+ * Set min, max and initial period. Set target to MAX_PKT_BURST as
+ * this is desired optimal RX/TX burst size. */
+ rte_jobstats_init(job, name, 0, drain_tsc, 0, MAX_PKT_BURST);
+ rte_jobstats_set_update_period_function(job, l2fwd_job_update_cb);
+
+ rte_timer_init(&qconf->rx_timers[i]);
+ ret = rte_timer_reset(&qconf->rx_timers[i], 0, PERIODICAL, lcore_id,
+ &l2fwd_fwd_job, (void *)(uintptr_t)i);
+
+ if (ret < 0) {
+ rte_exit(1, "Failed to reset lcore %u port %u job timer: %s",
+ lcore_id, qconf->rx_port_list[i], rte_strerror(-ret));
+ }
+ }
+ }
+
+ if (timer_period)
+ rte_eal_alarm_set(timer_period * MS_PER_S, show_stats_cb, NULL);
+ else
+ RTE_LOG(INFO, L2FWD, "Stats display disabled\n");
+
+ /* launch per-lcore init on every lcore */
+ rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (rte_eal_wait_lcore(lcore_id) < 0)
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-jobstats/meson.build b/src/spdk/dpdk/examples/l2fwd-jobstats/meson.build
new file mode 100644
index 000000000..72273736b
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-jobstats/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+allow_experimental_apis = true
+deps += ['jobstats', 'timer']
+sources = files(
+ 'main.c'
+)
diff --git a/src/spdk/dpdk/examples/l2fwd-keepalive/Makefile b/src/spdk/dpdk/examples/l2fwd-keepalive/Makefile
new file mode 100644
index 000000000..0db5e6015
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-keepalive/Makefile
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2016 Intel Corporation
+
+# binary name
+APP = l2fwd-keepalive
+
+# all source are stored in SRCS-y
+SRCS-y := main.c shm.c
+
+# Build using pkg-config variables if possible
+ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
+
+all: shared
+.PHONY: shared static
+shared: build/$(APP)-shared
+ ln -sf $(APP)-shared build/$(APP)
+static: build/$(APP)-static
+ ln -sf $(APP)-static build/$(APP)
+
+LDFLAGS += -pthread -lrt
+
+PKGCONF ?= pkg-config
+
+PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null)
+CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk)
+LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
+LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk)
+
+build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
+
+build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
+
+build:
+ @mkdir -p $@
+
+.PHONY: clean
+clean:
+ rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
+ test -d build && rmdir -p build || true
+
+else # Build using legacy build system
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, detect a build directory, by looking for a path with a .config
+RTE_TARGET ?= $(notdir $(abspath $(dir $(firstword $(wildcard $(RTE_SDK)/*/.config)))))
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+LDFLAGS += -lrt
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+endif
diff --git a/src/spdk/dpdk/examples/l2fwd-keepalive/ka-agent/Makefile b/src/spdk/dpdk/examples/l2fwd-keepalive/ka-agent/Makefile
new file mode 100644
index 000000000..8d5061b17
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-keepalive/ka-agent/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2016 Intel Corporation
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, detect a build directory, by looking for a path with a .config
+RTE_TARGET ?= $(notdir $(abspath $(dir $(firstword $(wildcard $(RTE_SDK)/*/.config)))))
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = ka-agent
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)/../
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+LDFLAGS += -lrt
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/src/spdk/dpdk/examples/l2fwd-keepalive/ka-agent/main.c b/src/spdk/dpdk/examples/l2fwd-keepalive/ka-agent/main.c
new file mode 100644
index 000000000..b8a755dbd
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-keepalive/ka-agent/main.c
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/wait.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <time.h>
+
+#include <rte_keepalive.h>
+
+#include <shm.h>
+
+#define MAX_TIMEOUTS 4
+#define SEM_TIMEOUT_SECS 2
+
+static struct rte_keepalive_shm *ka_shm_create(void)
+{
+ int fd = shm_open(RTE_KEEPALIVE_SHM_NAME, O_RDWR, 0666);
+ size_t size = sizeof(struct rte_keepalive_shm);
+ struct rte_keepalive_shm *shm;
+
+ if (fd < 0)
+ printf("Failed to open %s as SHM:%s\n",
+ RTE_KEEPALIVE_SHM_NAME,
+ strerror(errno));
+ else {
+ shm = (struct rte_keepalive_shm *) mmap(
+ 0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+ if (shm == MAP_FAILED)
+ printf("Failed to mmap SHM:%s\n", strerror(errno));
+ else
+ return shm;
+ }
+
+ /* Reset to zero, as it was set to MAP_FAILED aka: (void *)-1 */
+ shm = 0;
+ return NULL;
+}
+
+int main(void)
+{
+ struct rte_keepalive_shm *shm = ka_shm_create();
+ struct timespec timeout = { .tv_nsec = 0 };
+ int idx_core;
+ int cnt_cores;
+ uint64_t last_seen_alive_time = 0;
+ uint64_t most_recent_alive_time;
+ int cnt_timeouts = 0;
+ int sem_errno;
+
+ if (shm == NULL) {
+ printf("Unable to access shared core state\n");
+ return 1;
+ }
+ while (1) {
+ most_recent_alive_time = 0;
+ for (idx_core = 0; idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++)
+ if (shm->core_last_seen_times[idx_core] >
+ most_recent_alive_time)
+ most_recent_alive_time =
+ shm->core_last_seen_times[idx_core];
+
+ timeout.tv_sec = time(NULL) + SEM_TIMEOUT_SECS;
+ if (sem_timedwait(&shm->core_died, &timeout) == -1) {
+ /* Assume no core death signals and no change in any
+ * last-seen times is the keepalive monitor itself
+ * failing.
+ */
+ sem_errno = errno;
+ last_seen_alive_time = most_recent_alive_time;
+ if (sem_errno == ETIMEDOUT) {
+ if (last_seen_alive_time ==
+ most_recent_alive_time &&
+ cnt_timeouts++ >
+ MAX_TIMEOUTS) {
+ printf("No updates. Exiting..\n");
+ break;
+ }
+ } else
+ printf("sem_timedwait() error (%s)\n",
+ strerror(sem_errno));
+ continue;
+ }
+ cnt_timeouts = 0;
+
+ cnt_cores = 0;
+ for (idx_core = 0; idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++)
+ if (shm->core_state[idx_core] == RTE_KA_STATE_DEAD)
+ cnt_cores++;
+ if (cnt_cores == 0) {
+ /* Can happen if core was restarted since Semaphore
+ * was sent, due to agent being offline.
+ */
+ printf("Warning: Empty dead core report\n");
+ continue;
+ }
+
+ printf("%i dead cores: ", cnt_cores);
+ for (idx_core = 0;
+ idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++)
+ if (shm->core_state[idx_core] == RTE_KA_STATE_DEAD)
+ printf("%d, ", idx_core);
+ printf("\b\b\n");
+ }
+ if (munmap(shm, sizeof(struct rte_keepalive_shm)) != 0)
+ printf("Warning: munmap() failed\n");
+ return 0;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-keepalive/main.c b/src/spdk/dpdk/examples/l2fwd-keepalive/main.c
new file mode 100644
index 000000000..b7585d55e
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-keepalive/main.c
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_timer.h>
+#include <rte_keepalive.h>
+
+#include "shm.h"
+
+#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
+
+#define NB_MBUF_PER_PORT 3000
+
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 1024
+#define RTE_TEST_TX_DESC_DEFAULT 1024
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct rte_ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
+
+/* mask of enabled ports */
+static uint32_t l2fwd_enabled_port_mask;
+
+/* list of enabled ports */
+static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
+
+static unsigned int l2fwd_rx_queue_per_lcore = 1;
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+struct lcore_queue_conf {
+ unsigned n_rx_port;
+ unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+} __rte_cache_aligned;
+struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
+
+static struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .split_hdr_size = 0,
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+};
+
+struct rte_mempool *l2fwd_pktmbuf_pool = NULL;
+
+/* Per-port statistics struct */
+struct l2fwd_port_statistics {
+ uint64_t tx;
+ uint64_t rx;
+ uint64_t dropped;
+} __rte_cache_aligned;
+struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
+
+/* A tsc-based timer responsible for triggering statistics printout */
+#define TIMER_MILLISECOND 1
+#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* 10 seconds */
+static int64_t check_period = 5; /* default check cycle is 5ms */
+
+/* Keepalive structure */
+struct rte_keepalive *rte_global_keepalive_info;
+
+/* Termination signalling */
+static int terminate_signal_received;
+
+/* Termination signal handler */
+static void handle_sigterm(__rte_unused int value)
+{
+ terminate_signal_received = 1;
+}
+
+/* Print out statistics on packets dropped */
+static void
+print_stats(__rte_unused struct rte_timer *ptr_timer,
+ __rte_unused void *ptr_data)
+{
+ uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+ uint16_t portid;
+
+ total_packets_dropped = 0;
+ total_packets_tx = 0;
+ total_packets_rx = 0;
+
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("\nPort statistics ====================================");
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ /* skip disabled ports */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ printf("\nStatistics for port %u ------------------------------"
+ "\nPackets sent: %24"PRIu64
+ "\nPackets received: %20"PRIu64
+ "\nPackets dropped: %21"PRIu64,
+ portid,
+ port_statistics[portid].tx,
+ port_statistics[portid].rx,
+ port_statistics[portid].dropped);
+
+ total_packets_dropped += port_statistics[portid].dropped;
+ total_packets_tx += port_statistics[portid].tx;
+ total_packets_rx += port_statistics[portid].rx;
+ }
+ printf("\nAggregate statistics ==============================="
+ "\nTotal packets sent: %18"PRIu64
+ "\nTotal packets received: %14"PRIu64
+ "\nTotal packets dropped: %15"PRIu64,
+ total_packets_tx,
+ total_packets_rx,
+ total_packets_dropped);
+ printf("\n====================================================\n");
+}
+
+static void
+l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
+{
+ struct rte_ether_hdr *eth;
+ void *tmp;
+ int sent;
+ unsigned dst_port;
+ struct rte_eth_dev_tx_buffer *buffer;
+
+ dst_port = l2fwd_dst_ports[portid];
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+
+ /* 02:00:00:00:00:xx */
+ tmp = &eth->d_addr.addr_bytes[0];
+ *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dst_port << 40);
+
+ /* src addr */
+ rte_ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], &eth->s_addr);
+
+ buffer = tx_buffer[dst_port];
+ sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
+ if (sent)
+ port_statistics[dst_port].tx += sent;
+}
+
+/* main processing loop */
+static void
+l2fwd_main_loop(void)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *m;
+ int sent;
+ unsigned lcore_id;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
+ unsigned i, j, portid, nb_rx;
+ struct lcore_queue_conf *qconf;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
+ / US_PER_S * BURST_TX_DRAIN_US;
+ struct rte_eth_dev_tx_buffer *buffer;
+
+ prev_tsc = 0;
+
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_queue_conf[lcore_id];
+
+ if (qconf->n_rx_port == 0) {
+ RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ portid = qconf->rx_port_list[i];
+ RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
+ portid);
+ }
+
+ uint64_t tsc_initial = rte_rdtsc();
+ uint64_t tsc_lifetime = (rand()&0x07) * rte_get_tsc_hz();
+
+ while (!terminate_signal_received) {
+ /* Keepalive heartbeat */
+ rte_keepalive_mark_alive(rte_global_keepalive_info);
+
+ cur_tsc = rte_rdtsc();
+
+ /*
+ * Die randomly within 7 secs for demo purposes if
+ * keepalive enabled
+ */
+ if (check_period > 0 && cur_tsc - tsc_initial > tsc_lifetime)
+ break;
+
+ /*
+ * TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
+ buffer = tx_buffer[portid];
+
+ sent = rte_eth_tx_buffer_flush(portid, 0, buffer);
+ if (sent)
+ port_statistics[portid].tx += sent;
+
+ }
+
+ prev_tsc = cur_tsc;
+ }
+
+ /*
+ * Read packet from RX queues
+ */
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ portid = qconf->rx_port_list[i];
+ nb_rx = rte_eth_rx_burst(portid, 0,
+ pkts_burst, MAX_PKT_BURST);
+
+ port_statistics[portid].rx += nb_rx;
+
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
+ rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+ l2fwd_simple_forward(m, portid);
+ }
+ }
+ }
+}
+
+static int
+l2fwd_launch_one_lcore(__rte_unused void *dummy)
+{
+ l2fwd_main_loop();
+ return 0;
+}
+
+/* display usage */
+static void
+l2fwd_usage(const char *prgname)
+{
+ printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+ " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+ " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
+ " -K PERIOD: Keepalive check period (5 default; 86400 max)\n"
+ " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
+ prgname);
+}
+
+static int
+l2fwd_parse_portmask(const char *portmask)
+{
+ char *end = NULL;
+ unsigned long pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(portmask, &end, 16);
+ if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (pm == 0)
+ return -1;
+
+ return pm;
+}
+
+static unsigned int
+l2fwd_parse_nqueue(const char *q_arg)
+{
+ char *end = NULL;
+ unsigned long n;
+
+ /* parse hexadecimal string */
+ n = strtoul(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return 0;
+ if (n == 0)
+ return 0;
+ if (n >= MAX_RX_QUEUE_PER_LCORE)
+ return 0;
+
+ return n;
+}
+
+static int
+l2fwd_parse_timer_period(const char *q_arg)
+{
+ char *end = NULL;
+ int n;
+
+ /* parse number string */
+ n = strtol(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+ if (n >= MAX_TIMER_PERIOD)
+ return -1;
+
+ return n;
+}
+
+static int
+l2fwd_parse_check_period(const char *q_arg)
+{
+ char *end = NULL;
+ int n;
+
+ /* parse number string */
+ n = strtol(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+ if (n >= MAX_TIMER_PERIOD)
+ return -1;
+
+ return n;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+l2fwd_parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+ static struct option lgopts[] = {
+ {NULL, 0, 0, 0}
+ };
+
+ argvopt = argv;
+
+ while ((opt = getopt_long(argc, argvopt, "p:q:T:K:",
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ /* portmask */
+ case 'p':
+ l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
+ if (l2fwd_enabled_port_mask == 0) {
+ printf("invalid portmask\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* nqueue */
+ case 'q':
+ l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
+ if (l2fwd_rx_queue_per_lcore == 0) {
+ printf("invalid queue number\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* timer period */
+ case 'T':
+ timer_period = l2fwd_parse_timer_period(optarg)
+ * (int64_t)(1000 * TIMER_MILLISECOND);
+ if (timer_period < 0) {
+ printf("invalid timer period\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* Check period */
+ case 'K':
+ check_period = l2fwd_parse_check_period(optarg);
+ if (check_period < 0) {
+ printf("invalid check period\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* long options */
+ case 0:
+ l2fwd_usage(prgname);
+ return -1;
+
+ default:
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ }
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ ret = optind-1;
+ optind = 1; /* reset getopt lib */
+ return ret;
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+ int ret;
+
+ printf("\nChecking link status");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ all_ports_up = 1;
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((port_mask & (1 << portid)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ ret = rte_eth_link_get_nowait(portid, &link);
+ if (ret < 0) {
+ all_ports_up = 0;
+ if (print_flag == 1)
+ printf("Port %u link get failed: %s\n",
+ portid, rte_strerror(-ret));
+ continue;
+ }
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex"));
+ else
+ printf("Port %d Link Down\n", portid);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == ETH_LINK_DOWN) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+
+static void
+dead_core(__rte_unused void *ptr_data, const int id_core)
+{
+ if (terminate_signal_received)
+ return;
+ printf("Dead core %i - restarting..\n", id_core);
+ if (rte_eal_get_lcore_state(id_core) == FINISHED) {
+ rte_eal_wait_lcore(id_core);
+ rte_eal_remote_launch(l2fwd_launch_one_lcore, NULL, id_core);
+ } else {
+ printf("..false positive!\n");
+ }
+}
+
+static void
+relay_core_state(void *ptr_data, const int id_core,
+ const enum rte_keepalive_state core_state, uint64_t last_alive)
+{
+ rte_keepalive_relayed_state((struct rte_keepalive_shm *)ptr_data,
+ id_core, core_state, last_alive);
+}
+
+int
+main(int argc, char **argv)
+{
+ struct lcore_queue_conf *qconf;
+ int ret;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available = 0;
+ uint16_t portid, last_port;
+ unsigned lcore_id, rx_lcore_id;
+ unsigned nb_ports_in_mask = 0;
+ unsigned int total_nb_mbufs;
+ struct sigaction signal_handler;
+ struct rte_keepalive_shm *ka_shm;
+
+ memset(&signal_handler, 0, sizeof(signal_handler));
+ terminate_signal_received = 0;
+ signal_handler.sa_handler = &handle_sigterm;
+ if (sigaction(SIGINT, &signal_handler, NULL) == -1 ||
+ sigaction(SIGTERM, &signal_handler, NULL) == -1)
+ rte_exit(EXIT_FAILURE, "SIGNAL\n");
+
+
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+ argc -= ret;
+ argv += ret;
+
+ l2fwd_enabled_port_mask = 0;
+
+ /* parse application arguments (after the EAL ones) */
+ ret = l2fwd_parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+
+ nb_ports = rte_eth_dev_count_avail();
+ if (nb_ports == 0)
+ rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
+ /* create the mbuf pool */
+ total_nb_mbufs = NB_MBUF_PER_PORT * nb_ports;
+
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool",
+ total_nb_mbufs, 32, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+ if (l2fwd_pktmbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+
+ /* reset l2fwd_dst_ports */
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
+ l2fwd_dst_ports[portid] = 0;
+ last_port = 0;
+
+ /*
+ * Each logical core is assigned a dedicated TX queue on each port.
+ */
+ RTE_ETH_FOREACH_DEV(portid) {
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ if (nb_ports_in_mask % 2) {
+ l2fwd_dst_ports[portid] = last_port;
+ l2fwd_dst_ports[last_port] = portid;
+ } else
+ last_port = portid;
+
+ nb_ports_in_mask++;
+ }
+ if (nb_ports_in_mask % 2) {
+ printf("Notice: odd number of ports in portmask.\n");
+ l2fwd_dst_ports[last_port] = last_port;
+ }
+
+ rx_lcore_id = 1;
+ qconf = NULL;
+
+ /* Initialize the port/queue configuration of each logical core */
+ RTE_ETH_FOREACH_DEV(portid) {
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ /* get the lcore_id for this port */
+ while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+ lcore_queue_conf[rx_lcore_id].n_rx_port ==
+ l2fwd_rx_queue_per_lcore) {
+ rx_lcore_id++;
+ if (rx_lcore_id >= RTE_MAX_LCORE)
+ rte_exit(EXIT_FAILURE, "Not enough cores\n");
+ }
+
+ if (qconf != &lcore_queue_conf[rx_lcore_id])
+ /* Assigned a new logical core in the loop above. */
+ qconf = &lcore_queue_conf[rx_lcore_id];
+
+ qconf->rx_port_list[qconf->n_rx_port] = portid;
+ qconf->n_rx_port++;
+ printf("Lcore %u: RX port %u\n",
+ rx_lcore_id, portid);
+ }
+
+ /* Initialise each port */
+ RTE_ETH_FOREACH_DEV(portid) {
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_rxconf rxq_conf;
+ struct rte_eth_txconf txq_conf;
+ struct rte_eth_conf local_port_conf = port_conf;
+
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
+ printf("Skipping disabled port %u\n", portid);
+ continue;
+ }
+ nb_ports_available++;
+
+ /* init port */
+ printf("Initializing port %u... ", portid);
+ fflush(stdout);
+
+ ret = rte_eth_dev_info_get(portid, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ portid, strerror(-ret));
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot configure device: err=%d, port=%u\n",
+ ret, portid);
+
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%u\n",
+ ret, portid);
+
+ ret = rte_eth_macaddr_get(portid,
+ &l2fwd_ports_eth_addr[portid]);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot mac address: err=%d, port=%u\n",
+ ret, portid);
+
+ /* init one RX queue */
+ fflush(stdout);
+ rxq_conf = dev_info.default_rxconf;
+ rxq_conf.offloads = local_port_conf.rxmode.offloads;
+ ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
+ rte_eth_dev_socket_id(portid),
+ &rxq_conf,
+ l2fwd_pktmbuf_pool);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_rx_queue_setup:err=%d, port=%u\n",
+ ret, portid);
+
+ /* init one TX queue on each port */
+ fflush(stdout);
+ txq_conf = dev_info.default_txconf;
+ txq_conf.offloads = local_port_conf.txmode.offloads;
+ ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
+ rte_eth_dev_socket_id(portid),
+ &txq_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup:err=%d, port=%u\n",
+ ret, portid);
+
+ /* Initialize TX buffers */
+ tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
+ RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
+ rte_eth_dev_socket_id(portid));
+ if (tx_buffer[portid] == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
+ portid);
+
+ rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
+
+ ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
+ rte_eth_tx_buffer_count_callback,
+ &port_statistics[portid].dropped);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
+
+ /* Start device */
+ ret = rte_eth_dev_start(portid);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_start:err=%d, port=%u\n",
+ ret, portid);
+
+ ret = rte_eth_promiscuous_enable(portid);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_promiscuous_enable:err=%s, port=%u\n",
+ rte_strerror(-ret), portid);
+
+ printf("Port %u, MAC address: "
+ "%02X:%02X:%02X:%02X:%02X:%02X\n\n",
+ portid,
+ l2fwd_ports_eth_addr[portid].addr_bytes[0],
+ l2fwd_ports_eth_addr[portid].addr_bytes[1],
+ l2fwd_ports_eth_addr[portid].addr_bytes[2],
+ l2fwd_ports_eth_addr[portid].addr_bytes[3],
+ l2fwd_ports_eth_addr[portid].addr_bytes[4],
+ l2fwd_ports_eth_addr[portid].addr_bytes[5]);
+
+ /* initialize port stats */
+ memset(&port_statistics, 0, sizeof(port_statistics));
+ }
+
+ if (!nb_ports_available) {
+ rte_exit(EXIT_FAILURE,
+ "All available ports are disabled. Please set portmask.\n");
+ }
+
+ check_all_ports_link_status(l2fwd_enabled_port_mask);
+
+ struct rte_timer hb_timer, stats_timer;
+
+ rte_timer_subsystem_init();
+ rte_timer_init(&stats_timer);
+
+ ka_shm = NULL;
+ if (check_period > 0) {
+ ka_shm = rte_keepalive_shm_create();
+ if (ka_shm == NULL)
+ rte_exit(EXIT_FAILURE,
+ "rte_keepalive_shm_create() failed");
+ rte_global_keepalive_info =
+ rte_keepalive_create(&dead_core, ka_shm);
+ if (rte_global_keepalive_info == NULL)
+ rte_exit(EXIT_FAILURE, "init_keep_alive() failed");
+ rte_keepalive_register_relay_callback(rte_global_keepalive_info,
+ relay_core_state, ka_shm);
+ rte_timer_init(&hb_timer);
+ if (rte_timer_reset(&hb_timer,
+ (check_period * rte_get_timer_hz()) / 1000,
+ PERIODICAL,
+ rte_lcore_id(),
+ (void(*)(struct rte_timer*, void*))
+ &rte_keepalive_dispatch_pings,
+ rte_global_keepalive_info
+ ) != 0 )
+ rte_exit(EXIT_FAILURE, "Keepalive setup failure.\n");
+ }
+ if (timer_period > 0) {
+ if (rte_timer_reset(&stats_timer,
+ (timer_period * rte_get_timer_hz()) / 1000,
+ PERIODICAL,
+ rte_lcore_id(),
+ &print_stats, NULL
+ ) != 0 )
+ rte_exit(EXIT_FAILURE, "Stats setup failure.\n");
+ }
+ /* launch per-lcore init on every slave lcore */
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
+
+ if (qconf->n_rx_port == 0)
+ RTE_LOG(INFO, L2FWD,
+ "lcore %u has nothing to do\n",
+ lcore_id
+ );
+ else {
+ rte_eal_remote_launch(
+ l2fwd_launch_one_lcore,
+ NULL,
+ lcore_id
+ );
+ rte_keepalive_register_core(rte_global_keepalive_info,
+ lcore_id);
+ }
+ }
+ while (!terminate_signal_received) {
+ rte_timer_manage();
+ rte_delay_ms(5);
+ }
+
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (rte_eal_wait_lcore(lcore_id) < 0)
+ return -1;
+ }
+
+ if (ka_shm != NULL)
+ rte_keepalive_shm_cleanup(ka_shm);
+ return 0;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-keepalive/meson.build b/src/spdk/dpdk/examples/l2fwd-keepalive/meson.build
new file mode 100644
index 000000000..d678a8ddd
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-keepalive/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+allow_experimental_apis = true
+ext_deps += cc.find_library('rt')
+deps += 'timer'
+sources = files(
+ 'main.c', 'shm.c'
+)
diff --git a/src/spdk/dpdk/examples/l2fwd-keepalive/shm.c b/src/spdk/dpdk/examples/l2fwd-keepalive/shm.c
new file mode 100644
index 000000000..7c7a9ea8e
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-keepalive/shm.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#include <time.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_keepalive.h>
+
+#include "shm.h"
+
+struct rte_keepalive_shm *rte_keepalive_shm_create(void)
+{
+ int fd;
+ int idx_core;
+ struct rte_keepalive_shm *ka_shm;
+
+ /* If any existing object is not unlinked, it makes it all too easy
+ * for clients to end up with stale shared memory blocks when
+ * restarted. Unlinking makes sure subsequent shm_open by clients
+ * will get the new block mapped below.
+ */
+ if (shm_unlink(RTE_KEEPALIVE_SHM_NAME) == -1 && errno != ENOENT)
+ printf("Warning: Error unlinking stale %s (%s)\n",
+ RTE_KEEPALIVE_SHM_NAME, strerror(errno));
+
+ fd = shm_open(RTE_KEEPALIVE_SHM_NAME,
+ O_CREAT | O_TRUNC | O_RDWR, 0666);
+ if (fd < 0)
+ RTE_LOG(INFO, EAL,
+ "Failed to open %s as SHM (%s)\n",
+ RTE_KEEPALIVE_SHM_NAME,
+ strerror(errno));
+ else if (ftruncate(fd, sizeof(struct rte_keepalive_shm)) != 0)
+ RTE_LOG(INFO, EAL,
+ "Failed to resize SHM (%s)\n", strerror(errno));
+ else {
+ ka_shm = (struct rte_keepalive_shm *) mmap(
+ 0, sizeof(struct rte_keepalive_shm),
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ close(fd);
+ if (ka_shm == MAP_FAILED)
+ RTE_LOG(INFO, EAL,
+ "Failed to mmap SHM (%s)\n", strerror(errno));
+ else {
+ memset(ka_shm, 0, sizeof(struct rte_keepalive_shm));
+
+ /* Initialize the semaphores for IPC/SHM use */
+ if (sem_init(&ka_shm->core_died, 1, 0) != 0) {
+ RTE_LOG(INFO, EAL,
+ "Failed to setup SHM semaphore (%s)\n",
+ strerror(errno));
+ munmap(ka_shm,
+ sizeof(struct rte_keepalive_shm));
+ return NULL;
+ }
+
+ /* Set all cores to 'not present' */
+ for (idx_core = 0;
+ idx_core < RTE_KEEPALIVE_MAXCORES;
+ idx_core++) {
+ ka_shm->core_state[idx_core] =
+ RTE_KA_STATE_UNUSED;
+ ka_shm->core_last_seen_times[idx_core] = 0;
+ }
+
+ return ka_shm;
+ }
+ }
+return NULL;
+}
+
+void rte_keepalive_relayed_state(struct rte_keepalive_shm *shm,
+ const int id_core, const enum rte_keepalive_state core_state,
+ __rte_unused uint64_t last_alive)
+{
+ int count;
+
+ shm->core_state[id_core] = core_state;
+ shm->core_last_seen_times[id_core] = last_alive;
+
+ if (core_state == RTE_KEEPALIVE_SHM_DEAD) {
+ /* Since core has died, also signal ka_agent.
+ *
+ * Limit number of times semaphore can be incremented, in case
+ * ka_agent is not active.
+ */
+ if (sem_getvalue(&shm->core_died, &count) == -1) {
+ RTE_LOG(INFO, EAL, "Semaphore check failed(%s)\n",
+ strerror(errno));
+ return;
+ }
+ if (count > 1)
+ return;
+
+ if (sem_post(&shm->core_died) != 0)
+ RTE_LOG(INFO, EAL,
+ "Failed to increment semaphore (%s)\n",
+ strerror(errno));
+ }
+}
+
+void rte_keepalive_shm_cleanup(struct rte_keepalive_shm *ka_shm)
+{
+ if (shm_unlink(RTE_KEEPALIVE_SHM_NAME) == -1 && errno != ENOENT)
+ printf("Warning: Error unlinking %s (%s)\n",
+ RTE_KEEPALIVE_SHM_NAME, strerror(errno));
+
+ if (ka_shm && munmap(ka_shm, sizeof(struct rte_keepalive_shm)) != 0)
+ printf("Warning: munmap() failed\n");
+}
diff --git a/src/spdk/dpdk/examples/l2fwd-keepalive/shm.h b/src/spdk/dpdk/examples/l2fwd-keepalive/shm.h
new file mode 100644
index 000000000..7a9d59729
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd-keepalive/shm.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Intel Corporation
+ */
+
+#define RTE_KEEPALIVE_SHM_NAME "/dpdk_keepalive_shm_name"
+
+#define RTE_KEEPALIVE_SHM_ALIVE 1
+#define RTE_KEEPALIVE_SHM_DEAD 2
+
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <semaphore.h>
+#include <rte_keepalive.h>
+
+/**
+ * Keepalive SHM structure.
+ *
+ * The shared memory allocated by the primary is this size, and contains the
+ * information as contained within this struct. A secondary may open the SHM,
+ * and read the contents.
+ */
+struct rte_keepalive_shm {
+ /** IPC semaphore. Posted when a core dies */
+ sem_t core_died;
+
+ /**
+ * Relayed status of each core.
+ */
+ enum rte_keepalive_state core_state[RTE_KEEPALIVE_MAXCORES];
+
+ /**
+ * Last-seen-alive timestamps for the cores
+ */
+ uint64_t core_last_seen_times[RTE_KEEPALIVE_MAXCORES];
+};
+
+/**
+ * Create shared host memory keepalive object.
+ * @return
+ * Pointer to SHM keepalive structure, or NULL on failure.
+ */
+struct rte_keepalive_shm *rte_keepalive_shm_create(void);
+
+/**
+ * Relays state for given core
+ * @param *shm
+ * Pointer to SHM keepalive structure.
+ * @param id_core
+ * Id of core
+ * @param core_state
+ * State of core
+ * @param last_alive
+ * Last seen timestamp for core
+ */
+void rte_keepalive_relayed_state(struct rte_keepalive_shm *shm,
+ const int id_core, const enum rte_keepalive_state core_state,
+ uint64_t last_alive);
+
+/** Shutdown cleanup of shared host memory keepalive object.
+ * @param *shm
+ * Pointer to SHM keepalive structure. May be NULL.
+ *
+ * If *shm is NULL, this function will only attempt to remove the
+ * shared host memory handle and not unmap the underlying memory.
+ */
+void rte_keepalive_shm_cleanup(struct rte_keepalive_shm *ka_shm);
diff --git a/src/spdk/dpdk/examples/l2fwd/Makefile b/src/spdk/dpdk/examples/l2fwd/Makefile
new file mode 100644
index 000000000..8b7b26cb9
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd/Makefile
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2014 Intel Corporation
+
+# binary name
+APP = l2fwd
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+# Build using pkg-config variables if possible
+ifeq ($(shell pkg-config --exists libdpdk && echo 0),0)
+
+all: shared
+.PHONY: shared static
+shared: build/$(APP)-shared
+ ln -sf $(APP)-shared build/$(APP)
+static: build/$(APP)-static
+ ln -sf $(APP)-static build/$(APP)
+
+PKGCONF ?= pkg-config
+
+PC_FILE := $(shell $(PKGCONF) --path libdpdk 2>/dev/null)
+CFLAGS += -O3 $(shell $(PKGCONF) --cflags libdpdk)
+# Add flag to allow experimental API as l2fwd uses rte_ethdev_set_ptype API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+LDFLAGS_SHARED = $(shell $(PKGCONF) --libs libdpdk)
+LDFLAGS_STATIC = -Wl,-Bstatic $(shell $(PKGCONF) --static --libs libdpdk)
+
+build/$(APP)-shared: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_SHARED)
+
+build/$(APP)-static: $(SRCS-y) Makefile $(PC_FILE) | build
+ $(CC) $(CFLAGS) $(SRCS-y) -o $@ $(LDFLAGS) $(LDFLAGS_STATIC)
+
+build:
+ @mkdir -p $@
+
+.PHONY: clean
+clean:
+ rm -f build/$(APP) build/$(APP)-static build/$(APP)-shared
+ test -d build && rmdir -p build || true
+
+else # Build using legacy build system
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, detect a build directory, by looking for a path with a .config
+RTE_TARGET ?= $(notdir $(abspath $(dir $(firstword $(wildcard $(RTE_SDK)/*/.config)))))
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+# Add flag to allow experimental API as l2fwd uses rte_ethdev_set_ptype API
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+endif
diff --git a/src/spdk/dpdk/examples/l2fwd/main.c b/src/spdk/dpdk/examples/l2fwd/main.c
new file mode 100644
index 000000000..f8d14b843
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd/main.c
@@ -0,0 +1,776 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+static volatile bool force_quit;
+
+/* MAC updating enabled by default */
+static int mac_updating = 1;
+
+#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
+
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
+#define MEMPOOL_CACHE_SIZE 256
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 1024
+#define RTE_TEST_TX_DESC_DEFAULT 1024
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct rte_ether_addr l2fwd_ports_eth_addr[RTE_MAX_ETHPORTS];
+
+/* mask of enabled ports */
+static uint32_t l2fwd_enabled_port_mask = 0;
+
+/* list of enabled ports */
+static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
+
+static unsigned int l2fwd_rx_queue_per_lcore = 1;
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+struct lcore_queue_conf {
+ unsigned n_rx_port;
+ unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+} __rte_cache_aligned;
+struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
+
+static struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .split_hdr_size = 0,
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+};
+
+struct rte_mempool * l2fwd_pktmbuf_pool = NULL;
+
+/* Per-port statistics struct */
+struct l2fwd_port_statistics {
+ uint64_t tx;
+ uint64_t rx;
+ uint64_t dropped;
+} __rte_cache_aligned;
+struct l2fwd_port_statistics port_statistics[RTE_MAX_ETHPORTS];
+
+#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+/* A tsc-based timer responsible for triggering statistics printout */
+static uint64_t timer_period = 10; /* default period is 10 seconds */
+
+/* Print out statistics on packets dropped */
+static void
+print_stats(void)
+{
+ uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+ unsigned portid;
+
+ total_packets_dropped = 0;
+ total_packets_tx = 0;
+ total_packets_rx = 0;
+
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
+
+ /* Clear screen and move to top left */
+ printf("%s%s", clr, topLeft);
+
+ printf("\nPort statistics ====================================");
+
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+ /* skip disabled ports */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ printf("\nStatistics for port %u ------------------------------"
+ "\nPackets sent: %24"PRIu64
+ "\nPackets received: %20"PRIu64
+ "\nPackets dropped: %21"PRIu64,
+ portid,
+ port_statistics[portid].tx,
+ port_statistics[portid].rx,
+ port_statistics[portid].dropped);
+
+ total_packets_dropped += port_statistics[portid].dropped;
+ total_packets_tx += port_statistics[portid].tx;
+ total_packets_rx += port_statistics[portid].rx;
+ }
+ printf("\nAggregate statistics ==============================="
+ "\nTotal packets sent: %18"PRIu64
+ "\nTotal packets received: %14"PRIu64
+ "\nTotal packets dropped: %15"PRIu64,
+ total_packets_tx,
+ total_packets_rx,
+ total_packets_dropped);
+ printf("\n====================================================\n");
+}
+
+static void
+l2fwd_mac_updating(struct rte_mbuf *m, unsigned dest_portid)
+{
+ struct rte_ether_hdr *eth;
+ void *tmp;
+
+ eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+
+ /* 02:00:00:00:00:xx */
+ tmp = &eth->d_addr.addr_bytes[0];
+ *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
+
+ /* src addr */
+ rte_ether_addr_copy(&l2fwd_ports_eth_addr[dest_portid], &eth->s_addr);
+}
+
+static void
+l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
+{
+ unsigned dst_port;
+ int sent;
+ struct rte_eth_dev_tx_buffer *buffer;
+
+ dst_port = l2fwd_dst_ports[portid];
+
+ if (mac_updating)
+ l2fwd_mac_updating(m, dst_port);
+
+ buffer = tx_buffer[dst_port];
+ sent = rte_eth_tx_buffer(dst_port, 0, buffer, m);
+ if (sent)
+ port_statistics[dst_port].tx += sent;
+}
+
+/* main processing loop */
+static void
+l2fwd_main_loop(void)
+{
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *m;
+ int sent;
+ unsigned lcore_id;
+ uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
+ unsigned i, j, portid, nb_rx;
+ struct lcore_queue_conf *qconf;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S *
+ BURST_TX_DRAIN_US;
+ struct rte_eth_dev_tx_buffer *buffer;
+
+ prev_tsc = 0;
+ timer_tsc = 0;
+
+ lcore_id = rte_lcore_id();
+ qconf = &lcore_queue_conf[lcore_id];
+
+ if (qconf->n_rx_port == 0) {
+ RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
+ return;
+ }
+
+ RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ portid = qconf->rx_port_list[i];
+ RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
+ portid);
+
+ }
+
+ while (!force_quit) {
+
+ cur_tsc = rte_rdtsc();
+
+ /*
+ * TX burst queue drain
+ */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ portid = l2fwd_dst_ports[qconf->rx_port_list[i]];
+ buffer = tx_buffer[portid];
+
+ sent = rte_eth_tx_buffer_flush(portid, 0, buffer);
+ if (sent)
+ port_statistics[portid].tx += sent;
+
+ }
+
+ /* if timer is enabled */
+ if (timer_period > 0) {
+
+ /* advance the timer */
+ timer_tsc += diff_tsc;
+
+ /* if timer has reached its timeout */
+ if (unlikely(timer_tsc >= timer_period)) {
+
+ /* do this only on master core */
+ if (lcore_id == rte_get_master_lcore()) {
+ print_stats();
+ /* reset the timer */
+ timer_tsc = 0;
+ }
+ }
+ }
+
+ prev_tsc = cur_tsc;
+ }
+
+ /*
+ * Read packet from RX queues
+ */
+ for (i = 0; i < qconf->n_rx_port; i++) {
+
+ portid = qconf->rx_port_list[i];
+ nb_rx = rte_eth_rx_burst(portid, 0,
+ pkts_burst, MAX_PKT_BURST);
+
+ port_statistics[portid].rx += nb_rx;
+
+ for (j = 0; j < nb_rx; j++) {
+ m = pkts_burst[j];
+ rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+ l2fwd_simple_forward(m, portid);
+ }
+ }
+ }
+}
+
+static int
+l2fwd_launch_one_lcore(__rte_unused void *dummy)
+{
+ l2fwd_main_loop();
+ return 0;
+}
+
+/* display usage */
+static void
+l2fwd_usage(const char *prgname)
+{
+ printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+ " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+ " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
+ " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n"
+ " --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
+ " When enabled:\n"
+ " - The source MAC address is replaced by the TX port MAC address\n"
+ " - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n",
+ prgname);
+}
+
+static int
+l2fwd_parse_portmask(const char *portmask)
+{
+ char *end = NULL;
+ unsigned long pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(portmask, &end, 16);
+ if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (pm == 0)
+ return -1;
+
+ return pm;
+}
+
+static unsigned int
+l2fwd_parse_nqueue(const char *q_arg)
+{
+ char *end = NULL;
+ unsigned long n;
+
+ /* parse hexadecimal string */
+ n = strtoul(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return 0;
+ if (n == 0)
+ return 0;
+ if (n >= MAX_RX_QUEUE_PER_LCORE)
+ return 0;
+
+ return n;
+}
+
+static int
+l2fwd_parse_timer_period(const char *q_arg)
+{
+ char *end = NULL;
+ int n;
+
+ /* parse number string */
+ n = strtol(q_arg, &end, 10);
+ if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+ if (n >= MAX_TIMER_PERIOD)
+ return -1;
+
+ return n;
+}
+
+static const char short_options[] =
+ "p:" /* portmask */
+ "q:" /* number of queues */
+ "T:" /* timer period */
+ ;
+
+#define CMD_LINE_OPT_MAC_UPDATING "mac-updating"
+#define CMD_LINE_OPT_NO_MAC_UPDATING "no-mac-updating"
+
+enum {
+ /* long options mapped to a short option */
+
+ /* first long only option value must be >= 256, so that we won't
+ * conflict with short options */
+ CMD_LINE_OPT_MIN_NUM = 256,
+};
+
+static const struct option lgopts[] = {
+ { CMD_LINE_OPT_MAC_UPDATING, no_argument, &mac_updating, 1},
+ { CMD_LINE_OPT_NO_MAC_UPDATING, no_argument, &mac_updating, 0},
+ {NULL, 0, 0, 0}
+};
+
+/* Parse the argument given in the command line of the application */
+static int
+l2fwd_parse_args(int argc, char **argv)
+{
+ int opt, ret, timer_secs;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+
+ argvopt = argv;
+
+ while ((opt = getopt_long(argc, argvopt, short_options,
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ /* portmask */
+ case 'p':
+ l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
+ if (l2fwd_enabled_port_mask == 0) {
+ printf("invalid portmask\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* nqueue */
+ case 'q':
+ l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
+ if (l2fwd_rx_queue_per_lcore == 0) {
+ printf("invalid queue number\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ break;
+
+ /* timer period */
+ case 'T':
+ timer_secs = l2fwd_parse_timer_period(optarg);
+ if (timer_secs < 0) {
+ printf("invalid timer period\n");
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ timer_period = timer_secs;
+ break;
+
+ /* long options */
+ case 0:
+ break;
+
+ default:
+ l2fwd_usage(prgname);
+ return -1;
+ }
+ }
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ ret = optind-1;
+ optind = 1; /* reset getopt lib */
+ return ret;
+}
+
+/* Check the link status of all ports in up to 9s, and print them finally */
+static void
+check_all_ports_link_status(uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+ int ret;
+
+ printf("\nChecking link status");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ if (force_quit)
+ return;
+ all_ports_up = 1;
+ RTE_ETH_FOREACH_DEV(portid) {
+ if (force_quit)
+ return;
+ if ((port_mask & (1 << portid)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ ret = rte_eth_link_get_nowait(portid, &link);
+ if (ret < 0) {
+ all_ports_up = 0;
+ if (print_flag == 1)
+ printf("Port %u link get failed: %s\n",
+ portid, rte_strerror(-ret));
+ continue;
+ }
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex"));
+ else
+ printf("Port %d Link Down\n", portid);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == ETH_LINK_DOWN) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\n\nSignal %d received, preparing to exit...\n",
+ signum);
+ force_quit = true;
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ struct lcore_queue_conf *qconf;
+ int ret;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available = 0;
+ uint16_t portid, last_port;
+ unsigned lcore_id, rx_lcore_id;
+ unsigned nb_ports_in_mask = 0;
+ unsigned int nb_lcores = 0;
+ unsigned int nb_mbufs;
+
+ /* init EAL */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+ argc -= ret;
+ argv += ret;
+
+ force_quit = false;
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ /* parse application arguments (after the EAL ones) */
+ ret = l2fwd_parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+
+ printf("MAC updating %s\n", mac_updating ? "enabled" : "disabled");
+
+ /* convert to number of cycles */
+ timer_period *= rte_get_timer_hz();
+
+ nb_ports = rte_eth_dev_count_avail();
+ if (nb_ports == 0)
+ rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
+ /* check port mask to possible port mask */
+ if (l2fwd_enabled_port_mask & ~((1 << nb_ports) - 1))
+ rte_exit(EXIT_FAILURE, "Invalid portmask; possible (0x%x)\n",
+ (1 << nb_ports) - 1);
+
+ /* reset l2fwd_dst_ports */
+ for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++)
+ l2fwd_dst_ports[portid] = 0;
+ last_port = 0;
+
+ /*
+ * Each logical core is assigned a dedicated TX queue on each port.
+ */
+ RTE_ETH_FOREACH_DEV(portid) {
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ if (nb_ports_in_mask % 2) {
+ l2fwd_dst_ports[portid] = last_port;
+ l2fwd_dst_ports[last_port] = portid;
+ }
+ else
+ last_port = portid;
+
+ nb_ports_in_mask++;
+ }
+ if (nb_ports_in_mask % 2) {
+ printf("Notice: odd number of ports in portmask.\n");
+ l2fwd_dst_ports[last_port] = last_port;
+ }
+
+ rx_lcore_id = 0;
+ qconf = NULL;
+
+ /* Initialize the port/queue configuration of each logical core */
+ RTE_ETH_FOREACH_DEV(portid) {
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ /* get the lcore_id for this port */
+ while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+ lcore_queue_conf[rx_lcore_id].n_rx_port ==
+ l2fwd_rx_queue_per_lcore) {
+ rx_lcore_id++;
+ if (rx_lcore_id >= RTE_MAX_LCORE)
+ rte_exit(EXIT_FAILURE, "Not enough cores\n");
+ }
+
+ if (qconf != &lcore_queue_conf[rx_lcore_id]) {
+ /* Assigned a new logical core in the loop above. */
+ qconf = &lcore_queue_conf[rx_lcore_id];
+ nb_lcores++;
+ }
+
+ qconf->rx_port_list[qconf->n_rx_port] = portid;
+ qconf->n_rx_port++;
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
+ }
+
+ nb_mbufs = RTE_MAX(nb_ports * (nb_rxd + nb_txd + MAX_PKT_BURST +
+ nb_lcores * MEMPOOL_CACHE_SIZE), 8192U);
+
+ /* create the mbuf pool */
+ l2fwd_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs,
+ MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+ if (l2fwd_pktmbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+
+ /* Initialise each port */
+ RTE_ETH_FOREACH_DEV(portid) {
+ struct rte_eth_rxconf rxq_conf;
+ struct rte_eth_txconf txq_conf;
+ struct rte_eth_conf local_port_conf = port_conf;
+ struct rte_eth_dev_info dev_info;
+
+ /* skip ports that are not enabled */
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
+ printf("Skipping disabled port %u\n", portid);
+ continue;
+ }
+ nb_ports_available++;
+
+ /* init port */
+ printf("Initializing port %u... ", portid);
+ fflush(stdout);
+
+ ret = rte_eth_dev_info_get(portid, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ portid, strerror(-ret));
+
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
+ ret, portid);
+
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
+ &nb_txd);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot adjust number of descriptors: err=%d, port=%u\n",
+ ret, portid);
+
+ ret = rte_eth_macaddr_get(portid,
+ &l2fwd_ports_eth_addr[portid]);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot get MAC address: err=%d, port=%u\n",
+ ret, portid);
+
+ /* init one RX queue */
+ fflush(stdout);
+ rxq_conf = dev_info.default_rxconf;
+ rxq_conf.offloads = local_port_conf.rxmode.offloads;
+ ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
+ rte_eth_dev_socket_id(portid),
+ &rxq_conf,
+ l2fwd_pktmbuf_pool);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
+ ret, portid);
+
+ /* init one TX queue on each port */
+ fflush(stdout);
+ txq_conf = dev_info.default_txconf;
+ txq_conf.offloads = local_port_conf.txmode.offloads;
+ ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
+ rte_eth_dev_socket_id(portid),
+ &txq_conf);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
+ ret, portid);
+
+ /* Initialize TX buffers */
+ tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
+ RTE_ETH_TX_BUFFER_SIZE(MAX_PKT_BURST), 0,
+ rte_eth_dev_socket_id(portid));
+ if (tx_buffer[portid] == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
+ portid);
+
+ rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
+
+ ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[portid],
+ rte_eth_tx_buffer_count_callback,
+ &port_statistics[portid].dropped);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
+
+ ret = rte_eth_dev_set_ptypes(portid, RTE_PTYPE_UNKNOWN, NULL,
+ 0);
+ if (ret < 0)
+ printf("Port %u, Failed to disable Ptype parsing\n",
+ portid);
+ /* Start device */
+ ret = rte_eth_dev_start(portid);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
+ ret, portid);
+
+ printf("done: \n");
+
+ ret = rte_eth_promiscuous_enable(portid);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_promiscuous_enable:err=%s, port=%u\n",
+ rte_strerror(-ret), portid);
+
+ printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+ portid,
+ l2fwd_ports_eth_addr[portid].addr_bytes[0],
+ l2fwd_ports_eth_addr[portid].addr_bytes[1],
+ l2fwd_ports_eth_addr[portid].addr_bytes[2],
+ l2fwd_ports_eth_addr[portid].addr_bytes[3],
+ l2fwd_ports_eth_addr[portid].addr_bytes[4],
+ l2fwd_ports_eth_addr[portid].addr_bytes[5]);
+
+ /* initialize port stats */
+ memset(&port_statistics, 0, sizeof(port_statistics));
+ }
+
+ if (!nb_ports_available) {
+ rte_exit(EXIT_FAILURE,
+ "All available ports are disabled. Please set portmask.\n");
+ }
+
+ check_all_ports_link_status(l2fwd_enabled_port_mask);
+
+ ret = 0;
+ /* launch per-lcore init on every lcore */
+ rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
+ RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+ if (rte_eal_wait_lcore(lcore_id) < 0) {
+ ret = -1;
+ break;
+ }
+ }
+
+ RTE_ETH_FOREACH_DEV(portid) {
+ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+ continue;
+ printf("Closing port %d...", portid);
+ rte_eth_dev_stop(portid);
+ rte_eth_dev_close(portid);
+ printf(" Done\n");
+ }
+ printf("Bye...\n");
+
+ return ret;
+}
diff --git a/src/spdk/dpdk/examples/l2fwd/meson.build b/src/spdk/dpdk/examples/l2fwd/meson.build
new file mode 100644
index 000000000..50d88caa0
--- /dev/null
+++ b/src/spdk/dpdk/examples/l2fwd/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+# meson file, for building this example as part of a main DPDK build.
+#
+# To build this example as a standalone application with an already-installed
+# DPDK instance, use 'make'
+
+# Enable experimental API flag as l2fwd uses rte_ethdev_set_ptype API
+allow_experimental_apis = true
+sources = files(
+ 'main.c'
+)