summaryrefslogtreecommitdiffstats
path: root/src/seastar/dpdk/examples/ip_pipeline
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 18:24:20 +0000
commit483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch)
treee5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/seastar/dpdk/examples/ip_pipeline
parentInitial commit. (diff)
downloadceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz
ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/seastar/dpdk/examples/ip_pipeline')
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/Makefile78
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/app.h1434
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/action.cfg68
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/action.sh119
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/action.txt8
-rwxr-xr-xsrc/seastar/dpdk/examples/ip_pipeline/config/diagram-generator.py346
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/edge_router_downstream.cfg97
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/edge_router_downstream.sh13
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/edge_router_upstream.cfg124
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/edge_router_upstream.sh33
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/firewall.cfg68
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/firewall.sh13
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/firewall.txt9
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/flow.cfg72
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/flow.sh25
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/flow.txt17
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/ip_pipeline.cfg9
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/ip_pipeline.sh5
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/kni.cfg67
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/l2fwd.cfg58
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/l3fwd.cfg68
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/l3fwd.sh33
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/l3fwd_arp.cfg70
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/l3fwd_arp.sh43
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/network_layers.cfg227
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/network_layers.sh79
-rwxr-xr-xsrc/seastar/dpdk/examples/ip_pipeline/config/pipeline-to-core-mapping.py935
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/tap.cfg64
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config/tm_profile.cfg105
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config_check.c517
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config_parse.c3450
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/config_parse_tm.c448
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/cpu_core_map.c500
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/cpu_core_map.h69
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/init.c1925
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/main.c64
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/parser.c745
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/parser.h84
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline.h102
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/hash_func.h351
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_actions_common.h231
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_be.c205
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_be.h163
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_fe.c1484
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_fe.h260
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall.c1450
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall.h89
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall_be.c885
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall_be.h176
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions.c1315
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions.h89
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c989
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h168
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification.c1905
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification.h135
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c789
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h142
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master.c49
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master.h41
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master_be.c170
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master_be.h41
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough.c74
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough.h41
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c958
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h73
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing.c1642
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing.h100
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing_be.c1992
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing_be.h312
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/pipeline_be.h351
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/thread.c322
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/thread.h98
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/thread_fe.c457
-rw-r--r--src/seastar/dpdk/examples/ip_pipeline/thread_fe.h101
74 files changed, 29839 insertions, 0 deletions
diff --git a/src/seastar/dpdk/examples/ip_pipeline/Makefile b/src/seastar/dpdk/examples/ip_pipeline/Makefile
new file mode 100644
index 00000000..dc7e0ddd
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/Makefile
@@ -0,0 +1,78 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = ip_pipeline
+
+VPATH += $(SRCDIR)/pipeline
+
+INC += $(wildcard *.h) $(wildcard pipeline/*.h)
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := main.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_parse.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += parser.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_parse_tm.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += config_check.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += init.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += thread.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += thread_fe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += cpu_core_map.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_common_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_common_fe.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_master_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_master.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_passthrough_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_passthrough.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_firewall_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_firewall.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_flow_classification_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_flow_classification.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_flow_actions_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_flow_actions.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_routing_be.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += pipeline_routing.c
+
+CFLAGS += -I$(SRCDIR) -I$(SRCDIR)/pipeline
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -Wno-error=unused-function -Wno-error=unused-variable
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/src/seastar/dpdk/examples/ip_pipeline/app.h b/src/seastar/dpdk/examples/ip_pipeline/app.h
new file mode 100644
index 00000000..e41290e7
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/app.h
@@ -0,0 +1,1434 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_APP_H__
+#define __INCLUDE_APP_H__
+
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_mempool.h>
+#include <rte_ring.h>
+#include <rte_sched.h>
+#include <cmdline_parse.h>
+
+#include <rte_ethdev.h>
+#ifdef RTE_LIBRTE_KNI
+#include <rte_kni.h>
+#endif
+
+#include "cpu_core_map.h"
+#include "pipeline.h"
+
+#define APP_PARAM_NAME_SIZE PIPELINE_NAME_SIZE
+#define APP_LINK_PCI_BDF_SIZE 16
+
+#ifndef APP_LINK_MAX_HWQ_IN
+#define APP_LINK_MAX_HWQ_IN 128
+#endif
+
+#ifndef APP_LINK_MAX_HWQ_OUT
+#define APP_LINK_MAX_HWQ_OUT 128
+#endif
+
+struct app_mempool_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t buffer_size;
+ uint32_t pool_size;
+ uint32_t cache_size;
+ uint32_t cpu_socket_id;
+};
+
+struct app_link_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t pmd_id; /* Generated based on port mask */
+ uint32_t arp_q; /* 0 = Disabled (packets go to default queue 0) */
+ uint32_t tcp_syn_q; /* 0 = Disabled (pkts go to default queue) */
+ uint32_t ip_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t tcp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t udp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t sctp_local_q; /* 0 = Disabled (pkts go to default queue 0) */
+ uint32_t rss_qs[APP_LINK_MAX_HWQ_IN];
+ uint32_t n_rss_qs;
+ uint64_t rss_proto_ipv4;
+ uint64_t rss_proto_ipv6;
+ uint64_t rss_proto_l2;
+ uint32_t promisc;
+ uint32_t state; /* DOWN = 0, UP = 1 */
+ uint32_t ip; /* 0 = Invalid */
+ uint32_t depth; /* Valid only when IP is valid */
+ uint64_t mac_addr; /* Read from HW */
+ char pci_bdf[APP_LINK_PCI_BDF_SIZE];
+
+ struct rte_eth_conf conf;
+};
+
+struct app_pktq_hwq_in_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t mempool_id; /* Position in the app->mempool_params */
+ uint32_t size;
+ uint32_t burst;
+
+ struct rte_eth_rxconf conf;
+};
+
+struct app_pktq_hwq_out_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t size;
+ uint32_t burst;
+ uint32_t dropless;
+ uint64_t n_retries;
+ struct rte_eth_txconf conf;
+};
+
+struct app_pktq_swq_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t size;
+ uint32_t burst_read;
+ uint32_t burst_write;
+ uint32_t dropless;
+ uint64_t n_retries;
+ uint32_t cpu_socket_id;
+ uint32_t ipv4_frag;
+ uint32_t ipv6_frag;
+ uint32_t ipv4_ras;
+ uint32_t ipv6_ras;
+ uint32_t mtu;
+ uint32_t metadata_size;
+ uint32_t mempool_direct_id;
+ uint32_t mempool_indirect_id;
+};
+
+struct app_pktq_kni_params {
+ char *name;
+ uint32_t parsed;
+
+ uint32_t socket_id;
+ uint32_t core_id;
+ uint32_t hyper_th_id;
+ uint32_t force_bind;
+
+ uint32_t mempool_id; /* Position in the app->mempool_params */
+ uint32_t burst_read;
+ uint32_t burst_write;
+ uint32_t dropless;
+ uint64_t n_retries;
+};
+
+#ifndef APP_FILE_NAME_SIZE
+#define APP_FILE_NAME_SIZE 256
+#endif
+
+#ifndef APP_MAX_SCHED_SUBPORTS
+#define APP_MAX_SCHED_SUBPORTS 8
+#endif
+
+#ifndef APP_MAX_SCHED_PIPES
+#define APP_MAX_SCHED_PIPES 4096
+#endif
+
+struct app_pktq_tm_params {
+ char *name;
+ uint32_t parsed;
+ const char *file_name;
+ struct rte_sched_port_params sched_port_params;
+ struct rte_sched_subport_params
+ sched_subport_params[APP_MAX_SCHED_SUBPORTS];
+ struct rte_sched_pipe_params
+ sched_pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT];
+ int sched_pipe_to_profile[APP_MAX_SCHED_SUBPORTS * APP_MAX_SCHED_PIPES];
+ uint32_t burst_read;
+ uint32_t burst_write;
+};
+
+struct app_pktq_tap_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t burst_read;
+ uint32_t burst_write;
+ uint32_t dropless;
+ uint64_t n_retries;
+ uint32_t mempool_id; /* Position in the app->mempool_params */
+};
+
+struct app_pktq_source_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t mempool_id; /* Position in the app->mempool_params array */
+ uint32_t burst;
+ const char *file_name; /* Full path of PCAP file to be copied to mbufs */
+ uint32_t n_bytes_per_pkt;
+};
+
+struct app_pktq_sink_params {
+ char *name;
+ uint8_t parsed;
+ const char *file_name; /* Full path of PCAP file to be copied to mbufs */
+ uint32_t n_pkts_to_dump;
+};
+
+struct app_msgq_params {
+ char *name;
+ uint32_t parsed;
+ uint32_t size;
+ uint32_t cpu_socket_id;
+};
+
+enum app_pktq_in_type {
+ APP_PKTQ_IN_HWQ,
+ APP_PKTQ_IN_SWQ,
+ APP_PKTQ_IN_TM,
+ APP_PKTQ_IN_TAP,
+ APP_PKTQ_IN_KNI,
+ APP_PKTQ_IN_SOURCE,
+};
+
+struct app_pktq_in_params {
+ enum app_pktq_in_type type;
+ uint32_t id; /* Position in the appropriate app array */
+};
+
+enum app_pktq_out_type {
+ APP_PKTQ_OUT_HWQ,
+ APP_PKTQ_OUT_SWQ,
+ APP_PKTQ_OUT_TM,
+ APP_PKTQ_OUT_TAP,
+ APP_PKTQ_OUT_KNI,
+ APP_PKTQ_OUT_SINK,
+};
+
+struct app_pktq_out_params {
+ enum app_pktq_out_type type;
+ uint32_t id; /* Position in the appropriate app array */
+};
+
+#define APP_PIPELINE_TYPE_SIZE PIPELINE_TYPE_SIZE
+
+#define APP_MAX_PIPELINE_PKTQ_IN PIPELINE_MAX_PORT_IN
+#define APP_MAX_PIPELINE_PKTQ_OUT PIPELINE_MAX_PORT_OUT
+#define APP_MAX_PIPELINE_MSGQ_IN PIPELINE_MAX_MSGQ_IN
+#define APP_MAX_PIPELINE_MSGQ_OUT PIPELINE_MAX_MSGQ_OUT
+
+#define APP_MAX_PIPELINE_ARGS PIPELINE_MAX_ARGS
+
+struct app_pipeline_params {
+ char *name;
+ uint8_t parsed;
+
+ char type[APP_PIPELINE_TYPE_SIZE];
+
+ uint32_t socket_id;
+ uint32_t core_id;
+ uint32_t hyper_th_id;
+
+ struct app_pktq_in_params pktq_in[APP_MAX_PIPELINE_PKTQ_IN];
+ struct app_pktq_out_params pktq_out[APP_MAX_PIPELINE_PKTQ_OUT];
+ uint32_t msgq_in[APP_MAX_PIPELINE_MSGQ_IN];
+ uint32_t msgq_out[APP_MAX_PIPELINE_MSGQ_OUT];
+
+ uint32_t n_pktq_in;
+ uint32_t n_pktq_out;
+ uint32_t n_msgq_in;
+ uint32_t n_msgq_out;
+
+ uint32_t timer_period;
+
+ char *args_name[APP_MAX_PIPELINE_ARGS];
+ char *args_value[APP_MAX_PIPELINE_ARGS];
+ uint32_t n_args;
+};
+
+struct app_params;
+
+typedef void (*app_link_op)(struct app_params *app,
+ uint32_t link_id,
+ uint32_t up,
+ void *arg);
+
+#ifndef APP_MAX_PIPELINES
+#define APP_MAX_PIPELINES 64
+#endif
+
+struct app_link_data {
+ app_link_op f_link[APP_MAX_PIPELINES];
+ void *arg[APP_MAX_PIPELINES];
+};
+
+struct app_pipeline_data {
+ void *be;
+ void *fe;
+ struct pipeline_type *ptype;
+ uint64_t timer_period;
+ uint32_t enabled;
+};
+
+struct app_thread_pipeline_data {
+ uint32_t pipeline_id;
+ void *be;
+ pipeline_be_op_run f_run;
+ pipeline_be_op_timer f_timer;
+ uint64_t timer_period;
+ uint64_t deadline;
+};
+
+#ifndef APP_MAX_THREAD_PIPELINES
+#define APP_MAX_THREAD_PIPELINES 64
+#endif
+
+#ifndef APP_THREAD_TIMER_PERIOD
+#define APP_THREAD_TIMER_PERIOD 1
+#endif
+
+struct app_thread_data {
+ struct app_thread_pipeline_data regular[APP_MAX_THREAD_PIPELINES];
+ struct app_thread_pipeline_data custom[APP_MAX_THREAD_PIPELINES];
+
+ uint32_t n_regular;
+ uint32_t n_custom;
+
+ uint64_t timer_period;
+ uint64_t thread_req_deadline;
+
+ uint64_t deadline;
+
+ struct rte_ring *msgq_in;
+ struct rte_ring *msgq_out;
+
+ uint64_t headroom_time;
+ uint64_t headroom_cycles;
+ double headroom_ratio;
+} __rte_cache_aligned;
+
+#ifndef APP_MAX_LINKS
+#define APP_MAX_LINKS 16
+#endif
+
+struct app_eal_params {
+ /* Map lcore set to physical cpu set */
+ char *coremap;
+
+ /* Core ID that is used as master */
+ uint32_t master_lcore_present;
+ uint32_t master_lcore;
+
+ /* Number of memory channels */
+ uint32_t channels_present;
+ uint32_t channels;
+
+ /* Memory to allocate (see also --socket-mem) */
+ uint32_t memory_present;
+ uint32_t memory;
+
+ /* Force number of memory ranks (don't detect) */
+ uint32_t ranks_present;
+ uint32_t ranks;
+
+ /* Add a PCI device in black list. */
+ char *pci_blacklist[APP_MAX_LINKS];
+
+ /* Add a PCI device in white list. */
+ char *pci_whitelist[APP_MAX_LINKS];
+
+ /* Add a virtual device. */
+ char *vdev[APP_MAX_LINKS];
+
+ /* Use VMware TSC map instead of native RDTSC */
+ uint32_t vmware_tsc_map_present;
+ int vmware_tsc_map;
+
+ /* Type of this process (primary|secondary|auto) */
+ char *proc_type;
+
+ /* Set syslog facility */
+ char *syslog;
+
+ /* Set default log level */
+ uint32_t log_level_present;
+ uint32_t log_level;
+
+ /* Display version information on startup */
+ uint32_t version_present;
+ int version;
+
+ /* This help */
+ uint32_t help_present;
+ int help;
+
+ /* Use malloc instead of hugetlbfs */
+ uint32_t no_huge_present;
+ int no_huge;
+
+ /* Disable PCI */
+ uint32_t no_pci_present;
+ int no_pci;
+
+ /* Disable HPET */
+ uint32_t no_hpet_present;
+ int no_hpet;
+
+ /* No shared config (mmap'd files) */
+ uint32_t no_shconf_present;
+ int no_shconf;
+
+ /* Add driver */
+ char *add_driver;
+
+ /* Memory to allocate on sockets (comma separated values)*/
+ char *socket_mem;
+
+ /* Directory where hugetlbfs is mounted */
+ char *huge_dir;
+
+ /* Prefix for hugepage filenames */
+ char *file_prefix;
+
+ /* Base virtual address */
+ char *base_virtaddr;
+
+ /* Create /dev/uioX (usually done by hotplug) */
+ uint32_t create_uio_dev_present;
+ int create_uio_dev;
+
+ /* Interrupt mode for VFIO (legacy|msi|msix) */
+ char *vfio_intr;
+
+ /* Support running on Xen dom0 without hugetlbfs */
+ uint32_t xen_dom0_present;
+ int xen_dom0;
+
+ uint32_t parsed;
+};
+
+#ifndef APP_APPNAME_SIZE
+#define APP_APPNAME_SIZE 256
+#endif
+
+#ifndef APP_MAX_MEMPOOLS
+#define APP_MAX_MEMPOOLS 8
+#endif
+
+#define APP_MAX_HWQ_IN (APP_MAX_LINKS * APP_LINK_MAX_HWQ_IN)
+
+#define APP_MAX_HWQ_OUT (APP_MAX_LINKS * APP_LINK_MAX_HWQ_OUT)
+
+#ifndef APP_MAX_PKTQ_SWQ
+#define APP_MAX_PKTQ_SWQ 256
+#endif
+
+#define APP_MAX_PKTQ_TM APP_MAX_LINKS
+
+#ifndef APP_MAX_PKTQ_TAP
+#define APP_MAX_PKTQ_TAP APP_MAX_LINKS
+#endif
+
+#define APP_MAX_PKTQ_KNI APP_MAX_LINKS
+
+#ifndef APP_MAX_PKTQ_SOURCE
+#define APP_MAX_PKTQ_SOURCE 64
+#endif
+
+#ifndef APP_MAX_PKTQ_SINK
+#define APP_MAX_PKTQ_SINK 64
+#endif
+
+#ifndef APP_MAX_MSGQ
+#define APP_MAX_MSGQ 256
+#endif
+
+#ifndef APP_EAL_ARGC
+#define APP_EAL_ARGC 64
+#endif
+
+#ifndef APP_MAX_PIPELINE_TYPES
+#define APP_MAX_PIPELINE_TYPES 64
+#endif
+
+#ifndef APP_MAX_THREADS
+#define APP_MAX_THREADS RTE_MAX_LCORE
+#endif
+
+#ifndef APP_MAX_CMDS
+#define APP_MAX_CMDS 64
+#endif
+
+#ifndef APP_THREAD_HEADROOM_STATS_COLLECT
+#define APP_THREAD_HEADROOM_STATS_COLLECT 1
+#endif
+
+#define APP_CORE_MASK_SIZE \
+ (RTE_MAX_LCORE / 64 + ((RTE_MAX_LCORE % 64) ? 1 : 0))
+
+struct app_params {
+ /* Config */
+ char app_name[APP_APPNAME_SIZE];
+ const char *config_file;
+ const char *script_file;
+ const char *parser_file;
+ const char *output_file;
+ const char *preproc;
+ const char *preproc_args;
+ uint64_t port_mask;
+ uint32_t log_level;
+
+ struct app_eal_params eal_params;
+ struct app_mempool_params mempool_params[APP_MAX_MEMPOOLS];
+ struct app_link_params link_params[APP_MAX_LINKS];
+ struct app_pktq_hwq_in_params hwq_in_params[APP_MAX_HWQ_IN];
+ struct app_pktq_hwq_out_params hwq_out_params[APP_MAX_HWQ_OUT];
+ struct app_pktq_swq_params swq_params[APP_MAX_PKTQ_SWQ];
+ struct app_pktq_tm_params tm_params[APP_MAX_PKTQ_TM];
+ struct app_pktq_tap_params tap_params[APP_MAX_PKTQ_TAP];
+ struct app_pktq_kni_params kni_params[APP_MAX_PKTQ_KNI];
+ struct app_pktq_source_params source_params[APP_MAX_PKTQ_SOURCE];
+ struct app_pktq_sink_params sink_params[APP_MAX_PKTQ_SINK];
+ struct app_msgq_params msgq_params[APP_MAX_MSGQ];
+ struct app_pipeline_params pipeline_params[APP_MAX_PIPELINES];
+
+ uint32_t n_mempools;
+ uint32_t n_links;
+ uint32_t n_pktq_hwq_in;
+ uint32_t n_pktq_hwq_out;
+ uint32_t n_pktq_swq;
+ uint32_t n_pktq_tm;
+ uint32_t n_pktq_tap;
+ uint32_t n_pktq_kni;
+ uint32_t n_pktq_source;
+ uint32_t n_pktq_sink;
+ uint32_t n_msgq;
+ uint32_t n_pipelines;
+
+ /* Init */
+ char *eal_argv[1 + APP_EAL_ARGC];
+ struct cpu_core_map *core_map;
+ uint64_t core_mask[APP_CORE_MASK_SIZE];
+ struct rte_mempool *mempool[APP_MAX_MEMPOOLS];
+ struct app_link_data link_data[APP_MAX_LINKS];
+ struct rte_ring *swq[APP_MAX_PKTQ_SWQ];
+ struct rte_sched_port *tm[APP_MAX_PKTQ_TM];
+ int tap[APP_MAX_PKTQ_TAP];
+#ifdef RTE_LIBRTE_KNI
+ struct rte_kni *kni[APP_MAX_PKTQ_KNI];
+#endif /* RTE_LIBRTE_KNI */
+ struct rte_ring *msgq[APP_MAX_MSGQ];
+ struct pipeline_type pipeline_type[APP_MAX_PIPELINE_TYPES];
+ struct app_pipeline_data pipeline_data[APP_MAX_PIPELINES];
+ struct app_thread_data thread_data[APP_MAX_THREADS];
+ cmdline_parse_ctx_t cmds[APP_MAX_CMDS + 1];
+
+ int eal_argc;
+ uint32_t n_pipeline_types;
+ uint32_t n_cmds;
+};
+
+#define APP_PARAM_VALID(obj) ((obj)->name != NULL)
+
+#define APP_PARAM_COUNT(obj_array, n_objs) \
+{ \
+ size_t i; \
+ \
+ n_objs = 0; \
+ for (i = 0; i < RTE_DIM(obj_array); i++) \
+ if (APP_PARAM_VALID(&((obj_array)[i]))) \
+ n_objs++; \
+}
+
+#define APP_PARAM_FIND(obj_array, key) \
+({ \
+ ssize_t obj_idx; \
+ const ssize_t obj_count = RTE_DIM(obj_array); \
+ \
+ for (obj_idx = 0; obj_idx < obj_count; obj_idx++) { \
+ if (!APP_PARAM_VALID(&((obj_array)[obj_idx]))) \
+ continue; \
+ \
+ if (strcmp(key, (obj_array)[obj_idx].name) == 0) \
+ break; \
+ } \
+ obj_idx < obj_count ? obj_idx : -ENOENT; \
+})
+
+#define APP_PARAM_FIND_BY_ID(obj_array, prefix, id, obj) \
+do { \
+ char name[APP_PARAM_NAME_SIZE]; \
+ ssize_t pos; \
+ \
+ sprintf(name, prefix "%" PRIu32, id); \
+ pos = APP_PARAM_FIND(obj_array, name); \
+ obj = (pos < 0) ? NULL : &((obj_array)[pos]); \
+} while (0)
+
+#define APP_PARAM_GET_ID(obj, prefix, id) \
+do \
+ sscanf(obj->name, prefix "%" SCNu32, &id); \
+while (0) \
+
+#define APP_CHECK(exp, fmt, ...) \
+do { \
+ if (!(exp)) { \
+ fprintf(stderr, fmt "\n", ## __VA_ARGS__); \
+ abort(); \
+ } \
+} while (0)
+
+enum app_log_level {
+ APP_LOG_LEVEL_HIGH = 1,
+ APP_LOG_LEVEL_LOW,
+ APP_LOG_LEVELS
+};
+
+#define APP_LOG(app, level, fmt, ...) \
+do { \
+ if (app->log_level >= APP_LOG_LEVEL_ ## level) \
+ fprintf(stdout, "[APP] " fmt "\n", ## __VA_ARGS__); \
+} while (0)
+
+static inline uint32_t
+app_link_get_n_rxq(struct app_params *app, struct app_link_params *link)
+{
+ uint32_t n_rxq = 0, link_id, i;
+ uint32_t n_pktq_hwq_in = RTE_MIN(app->n_pktq_hwq_in,
+ RTE_DIM(app->hwq_in_params));
+
+ APP_PARAM_GET_ID(link, "LINK", link_id);
+
+ for (i = 0; i < n_pktq_hwq_in; i++) {
+ struct app_pktq_hwq_in_params *p = &app->hwq_in_params[i];
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ sscanf(p->name, "RXQ%" SCNu32 ".%" SCNu32,
+ &rxq_link_id, &rxq_queue_id);
+ if (rxq_link_id == link_id)
+ n_rxq++;
+ }
+
+ return n_rxq;
+}
+
+static inline uint32_t
+app_link_get_n_txq(struct app_params *app, struct app_link_params *link)
+{
+ uint32_t n_txq = 0, link_id, i;
+ uint32_t n_pktq_hwq_out = RTE_MIN(app->n_pktq_hwq_out,
+ RTE_DIM(app->hwq_out_params));
+
+ APP_PARAM_GET_ID(link, "LINK", link_id);
+
+ for (i = 0; i < n_pktq_hwq_out; i++) {
+ struct app_pktq_hwq_out_params *p = &app->hwq_out_params[i];
+ uint32_t txq_link_id, txq_queue_id;
+
+ sscanf(p->name, "TXQ%" SCNu32 ".%" SCNu32,
+ &txq_link_id, &txq_queue_id);
+ if (txq_link_id == link_id)
+ n_txq++;
+ }
+
+ return n_txq;
+}
+
+static inline uint32_t
+app_rxq_get_readers(struct app_params *app, struct app_pktq_hwq_in_params *rxq)
+{
+ uint32_t pos = rxq - app->hwq_in_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_HWQ) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_swq_get_readers(struct app_params *app, struct app_pktq_swq_params *swq)
+{
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_SWQ) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline struct app_pipeline_params *
+app_swq_get_reader(struct app_params *app,
+ struct app_pktq_swq_params *swq,
+ uint32_t *pktq_in_id)
+{
+ struct app_pipeline_params *reader = NULL;
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_SWQ) &&
+ (pktq->id == pos)) {
+ n_readers++;
+ reader = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_readers != 1)
+ return NULL;
+
+ *pktq_in_id = id;
+ return reader;
+}
+
+static inline uint32_t
+app_tm_get_readers(struct app_params *app, struct app_pktq_tm_params *tm)
+{
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_TM) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline struct app_pipeline_params *
+app_tm_get_reader(struct app_params *app,
+ struct app_pktq_tm_params *tm,
+ uint32_t *pktq_in_id)
+{
+ struct app_pipeline_params *reader = NULL;
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_TM) &&
+ (pktq->id == pos)) {
+ n_readers++;
+ reader = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_readers != 1)
+ return NULL;
+
+ *pktq_in_id = id;
+ return reader;
+}
+
+static inline uint32_t
+app_tap_get_readers(struct app_params *app, struct app_pktq_tap_params *tap)
+{
+ uint32_t pos = tap - app->tap_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_TAP) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline struct app_pipeline_params *
+app_tap_get_reader(struct app_params *app,
+ struct app_pktq_tap_params *tap,
+ uint32_t *pktq_in_id)
+{
+ struct app_pipeline_params *reader = NULL;
+ uint32_t pos = tap - app->tap_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_TAP) &&
+ (pktq->id == pos)) {
+ n_readers++;
+ reader = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_readers != 1)
+ return NULL;
+
+ *pktq_in_id = id;
+ return reader;
+}
+
+static inline uint32_t
+app_kni_get_readers(struct app_params *app, struct app_pktq_kni_params *kni)
+{
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_KNI) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline struct app_pipeline_params *
+app_kni_get_reader(struct app_params *app,
+ struct app_pktq_kni_params *kni,
+ uint32_t *pktq_in_id)
+{
+ struct app_pipeline_params *reader = NULL;
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_KNI) &&
+ (pktq->id == pos)) {
+ n_readers++;
+ reader = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_readers != 1)
+ return NULL;
+
+ *pktq_in_id = id;
+ return reader;
+}
+
+static inline uint32_t
+app_source_get_readers(struct app_params *app,
+struct app_pktq_source_params *source)
+{
+ uint32_t pos = source - app->source_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_in = RTE_MIN(p->n_pktq_in, RTE_DIM(p->pktq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_in; j++) {
+ struct app_pktq_in_params *pktq = &p->pktq_in[j];
+
+ if ((pktq->type == APP_PKTQ_IN_SOURCE) &&
+ (pktq->id == pos))
+ n_readers++;
+ }
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_msgq_get_readers(struct app_params *app, struct app_msgq_params *msgq)
+{
+ uint32_t pos = msgq - app->msgq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_readers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_msgq_in = RTE_MIN(p->n_msgq_in, RTE_DIM(p->msgq_in));
+ uint32_t j;
+
+ for (j = 0; j < n_msgq_in; j++)
+ if (p->msgq_in[j] == pos)
+ n_readers++;
+ }
+
+ return n_readers;
+}
+
+static inline uint32_t
+app_txq_get_writers(struct app_params *app, struct app_pktq_hwq_out_params *txq)
+{
+ uint32_t pos = txq - app->hwq_out_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_HWQ) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline uint32_t
+app_swq_get_writers(struct app_params *app, struct app_pktq_swq_params *swq)
+{
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_SWQ) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline struct app_pipeline_params *
+app_swq_get_writer(struct app_params *app,
+ struct app_pktq_swq_params *swq,
+ uint32_t *pktq_out_id)
+{
+ struct app_pipeline_params *writer = NULL;
+ uint32_t pos = swq - app->swq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_SWQ) &&
+ (pktq->id == pos)) {
+ n_writers++;
+ writer = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_writers != 1)
+ return NULL;
+
+ *pktq_out_id = id;
+ return writer;
+}
+
+static inline uint32_t
+app_tm_get_writers(struct app_params *app, struct app_pktq_tm_params *tm)
+{
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_TM) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline struct app_pipeline_params *
+app_tm_get_writer(struct app_params *app,
+ struct app_pktq_tm_params *tm,
+ uint32_t *pktq_out_id)
+{
+ struct app_pipeline_params *writer = NULL;
+ uint32_t pos = tm - app->tm_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_TM) &&
+ (pktq->id == pos)) {
+ n_writers++;
+ writer = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_writers != 1)
+ return NULL;
+
+ *pktq_out_id = id;
+ return writer;
+}
+
+static inline uint32_t
+app_tap_get_writers(struct app_params *app, struct app_pktq_tap_params *tap)
+{
+ uint32_t pos = tap - app->tap_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_TAP) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline struct app_pipeline_params *
+app_tap_get_writer(struct app_params *app,
+ struct app_pktq_tap_params *tap,
+ uint32_t *pktq_out_id)
+{
+ struct app_pipeline_params *writer = NULL;
+ uint32_t pos = tap - app->tap_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_TAP) &&
+ (pktq->id == pos)) {
+ n_writers++;
+ writer = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_writers != 1)
+ return NULL;
+
+ *pktq_out_id = id;
+ return writer;
+}
+
+static inline uint32_t
+app_kni_get_writers(struct app_params *app, struct app_pktq_kni_params *kni)
+{
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_KNI) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline struct app_pipeline_params *
+app_kni_get_writer(struct app_params *app,
+ struct app_pktq_kni_params *kni,
+ uint32_t *pktq_out_id)
+{
+ struct app_pipeline_params *writer = NULL;
+ uint32_t pos = kni - app->kni_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, id = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_KNI) &&
+ (pktq->id == pos)) {
+ n_writers++;
+ writer = p;
+ id = j;
+ }
+ }
+ }
+
+ if (n_writers != 1)
+ return NULL;
+
+ *pktq_out_id = id;
+ return writer;
+}
+
+static inline uint32_t
+app_sink_get_writers(struct app_params *app, struct app_pktq_sink_params *sink)
+{
+ uint32_t pos = sink - app->sink_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_pktq_out = RTE_MIN(p->n_pktq_out,
+ RTE_DIM(p->pktq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_pktq_out; j++) {
+ struct app_pktq_out_params *pktq = &p->pktq_out[j];
+
+ if ((pktq->type == APP_PKTQ_OUT_SINK) &&
+ (pktq->id == pos))
+ n_writers++;
+ }
+ }
+
+ return n_writers;
+}
+
+static inline uint32_t
+app_msgq_get_writers(struct app_params *app, struct app_msgq_params *msgq)
+{
+ uint32_t pos = msgq - app->msgq_params;
+ uint32_t n_pipelines = RTE_MIN(app->n_pipelines,
+ RTE_DIM(app->pipeline_params));
+ uint32_t n_writers = 0, i;
+
+ for (i = 0; i < n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ uint32_t n_msgq_out = RTE_MIN(p->n_msgq_out,
+ RTE_DIM(p->msgq_out));
+ uint32_t j;
+
+ for (j = 0; j < n_msgq_out; j++)
+ if (p->msgq_out[j] == pos)
+ n_writers++;
+ }
+
+ return n_writers;
+}
+
+static inline struct app_link_params *
+app_get_link_for_rxq(struct app_params *app, struct app_pktq_hwq_in_params *p)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ ssize_t link_param_idx;
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ sscanf(p->name, "RXQ%" SCNu32 ".%" SCNu32,
+ &rxq_link_id, &rxq_queue_id);
+ sprintf(link_name, "LINK%" PRIu32, rxq_link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+static inline struct app_link_params *
+app_get_link_for_txq(struct app_params *app, struct app_pktq_hwq_out_params *p)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ ssize_t link_param_idx;
+ uint32_t txq_link_id, txq_queue_id;
+
+ sscanf(p->name, "TXQ%" SCNu32 ".%" SCNu32,
+ &txq_link_id, &txq_queue_id);
+ sprintf(link_name, "LINK%" PRIu32, txq_link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+static inline struct app_link_params *
+app_get_link_for_tm(struct app_params *app, struct app_pktq_tm_params *p_tm)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ uint32_t link_id;
+ ssize_t link_param_idx;
+
+ sscanf(p_tm->name, "TM%" PRIu32, &link_id);
+ sprintf(link_name, "LINK%" PRIu32, link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p_tm->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+static inline struct app_link_params *
+app_get_link_for_kni(struct app_params *app, struct app_pktq_kni_params *p_kni)
+{
+ char link_name[APP_PARAM_NAME_SIZE];
+ uint32_t link_id;
+ ssize_t link_param_idx;
+
+ sscanf(p_kni->name, "KNI%" PRIu32, &link_id);
+ sprintf(link_name, "LINK%" PRIu32, link_id);
+ link_param_idx = APP_PARAM_FIND(app->link_params, link_name);
+ APP_CHECK((link_param_idx >= 0),
+ "Cannot find %s for %s", link_name, p_kni->name);
+
+ return &app->link_params[link_param_idx];
+}
+
+static inline uint32_t
+app_core_is_enabled(struct app_params *app, uint32_t lcore_id)
+{
+ return(app->core_mask[lcore_id / 64] &
+ (1LLU << (lcore_id % 64)));
+}
+
+static inline void
+app_core_enable_in_core_mask(struct app_params *app, int lcore_id)
+{
+ app->core_mask[lcore_id / 64] |= 1LLU << (lcore_id % 64);
+
+}
+
+static inline void
+app_core_build_core_mask_string(struct app_params *app, char *mask_buffer)
+{
+ int i;
+
+ mask_buffer[0] = '\0';
+ for (i = (int)RTE_DIM(app->core_mask); i > 0; i--) {
+ /* For Hex representation of bits in uint64_t */
+ char buffer[(64 / 8) * 2 + 1];
+ memset(buffer, 0, sizeof(buffer));
+ snprintf(buffer, sizeof(buffer), "%016" PRIx64,
+ app->core_mask[i-1]);
+ strcat(mask_buffer, buffer);
+ }
+}
+
+void app_pipeline_params_get(struct app_params *app,
+ struct app_pipeline_params *p_in,
+ struct pipeline_params *p_out);
+
+int app_config_init(struct app_params *app);
+
+int app_config_args(struct app_params *app,
+ int argc, char **argv);
+
+int app_config_preproc(struct app_params *app);
+
+int app_config_parse(struct app_params *app,
+ const char *file_name);
+
+int app_config_parse_tm(struct app_params *app);
+
+void app_config_save(struct app_params *app,
+ const char *file_name);
+
+int app_config_check(struct app_params *app);
+
+int app_init(struct app_params *app);
+
+int app_post_init(struct app_params *app);
+
+int app_thread(void *arg);
+
+int app_pipeline_type_register(struct app_params *app,
+ struct pipeline_type *ptype);
+
+struct pipeline_type *app_pipeline_type_find(struct app_params *app,
+ char *name);
+
+void app_link_up_internal(struct app_params *app,
+ struct app_link_params *cp);
+
+void app_link_down_internal(struct app_params *app,
+ struct app_link_params *cp);
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/action.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/action.cfg
new file mode 100644
index 00000000..994ae94a
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/action.cfg
@@ -0,0 +1,68 @@
+; BSD LICENSE
+;
+; Copyright(c) 2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; ________________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Flow |
+; RXQ2.0 --->| Actions |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |________________|
+;
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = FLOW_ACTIONS
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+n_flows = 65536
+n_meters_per_flow = 4
+flow_id_offset = 286; ipdaddr
+ip_hdr_offset = 270
+color_offset = 128
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/action.sh b/src/seastar/dpdk/examples/ip_pipeline/config/action.sh
new file mode 100644
index 00000000..2986ae60
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/action.sh
@@ -0,0 +1,119 @@
+#
+# run ./config/action.sh
+#
+
+p 1 action flow 0 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 0 g G y Y r R
+p 1 action flow 0 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 1 g G y Y r R
+p 1 action flow 0 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 2 g G y Y r R
+p 1 action flow 0 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 0 policer 3 g G y Y r R
+p 1 action flow 0 port 0
+
+p 1 action flow 1 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 0 g G y Y r R
+p 1 action flow 1 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 1 g G y Y r R
+p 1 action flow 1 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 2 g G y Y r R
+p 1 action flow 1 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 1 policer 3 g G y Y r R
+p 1 action flow 1 port 1
+
+p 1 action flow 2 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 0 g G y Y r R
+p 1 action flow 2 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 1 g G y Y r R
+p 1 action flow 2 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 2 g G y Y r R
+p 1 action flow 2 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 2 policer 3 g G y Y r R
+p 1 action flow 2 port 2
+
+p 1 action flow 3 meter 0 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 0 g G y Y r R
+p 1 action flow 3 meter 1 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 1 g G y Y r R
+p 1 action flow 3 meter 2 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 2 g G y Y r R
+p 1 action flow 3 meter 3 trtcm 1250000000 1250000000 1000000 1000000
+p 1 action flow 3 policer 3 g G y Y r R
+p 1 action flow 3 port 3
+
+#p 1 action flow bulk ./config/action.txt
+
+#p 1 action flow ls
+
+p 1 action flow 0 stats
+p 1 action flow 1 stats
+p 1 action flow 2 stats
+p 1 action flow 3 stats
+
+p 1 action dscp 0 class 0 color G
+p 1 action dscp 1 class 1 color G
+p 1 action dscp 2 class 2 color G
+p 1 action dscp 3 class 3 color G
+p 1 action dscp 4 class 0 color G
+p 1 action dscp 5 class 1 color G
+p 1 action dscp 6 class 2 color G
+p 1 action dscp 7 class 3 color G
+p 1 action dscp 8 class 0 color G
+p 1 action dscp 9 class 1 color G
+p 1 action dscp 10 class 2 color G
+p 1 action dscp 11 class 3 color G
+p 1 action dscp 12 class 0 color G
+p 1 action dscp 13 class 1 color G
+p 1 action dscp 14 class 2 color G
+p 1 action dscp 15 class 3 color G
+p 1 action dscp 16 class 0 color G
+p 1 action dscp 17 class 1 color G
+p 1 action dscp 18 class 2 color G
+p 1 action dscp 19 class 3 color G
+p 1 action dscp 20 class 0 color G
+p 1 action dscp 21 class 1 color G
+p 1 action dscp 22 class 2 color G
+p 1 action dscp 23 class 3 color G
+p 1 action dscp 24 class 0 color G
+p 1 action dscp 25 class 1 color G
+p 1 action dscp 26 class 2 color G
+p 1 action dscp 27 class 3 color G
+p 1 action dscp 27 class 0 color G
+p 1 action dscp 29 class 1 color G
+p 1 action dscp 30 class 2 color G
+p 1 action dscp 31 class 3 color G
+p 1 action dscp 32 class 0 color G
+p 1 action dscp 33 class 1 color G
+p 1 action dscp 34 class 2 color G
+p 1 action dscp 35 class 3 color G
+p 1 action dscp 36 class 0 color G
+p 1 action dscp 37 class 1 color G
+p 1 action dscp 38 class 2 color G
+p 1 action dscp 39 class 3 color G
+p 1 action dscp 40 class 0 color G
+p 1 action dscp 41 class 1 color G
+p 1 action dscp 42 class 2 color G
+p 1 action dscp 43 class 3 color G
+p 1 action dscp 44 class 0 color G
+p 1 action dscp 45 class 1 color G
+p 1 action dscp 46 class 2 color G
+p 1 action dscp 47 class 3 color G
+p 1 action dscp 48 class 0 color G
+p 1 action dscp 49 class 1 color G
+p 1 action dscp 50 class 2 color G
+p 1 action dscp 51 class 3 color G
+p 1 action dscp 52 class 0 color G
+p 1 action dscp 53 class 1 color G
+p 1 action dscp 54 class 2 color G
+p 1 action dscp 55 class 3 color G
+p 1 action dscp 56 class 0 color G
+p 1 action dscp 57 class 1 color G
+p 1 action dscp 58 class 2 color G
+p 1 action dscp 59 class 3 color G
+p 1 action dscp 60 class 0 color G
+p 1 action dscp 61 class 1 color G
+p 1 action dscp 62 class 2 color G
+p 1 action dscp 63 class 3 color G
+
+p 1 action dscp ls
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/action.txt b/src/seastar/dpdk/examples/ip_pipeline/config/action.txt
new file mode 100644
index 00000000..f14207b9
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/action.txt
@@ -0,0 +1,8 @@
+#
+# p <pipelineid> action flow bulk ./config/action.txt
+#
+
+flow 0 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 0
+flow 1 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 1
+flow 2 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 2
+flow 3 meter 0 trtcm 1250000000 1250000000 1000000 1000000 policer 0 g G y Y r R meter 1 trtcm 1250000000 1250000000 1000000 1000000 policer 1 g G y Y r R meter 2 trtcm 1250000000 1250000000 1000000 1000000 policer 2 g G y Y r R meter 3 trtcm 1250000000 1250000000 1000000 1000000 policer 3 g G y Y r R port 3
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/diagram-generator.py b/src/seastar/dpdk/examples/ip_pipeline/config/diagram-generator.py
new file mode 100755
index 00000000..17488330
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/diagram-generator.py
@@ -0,0 +1,346 @@
+#!/usr/bin/env python
+
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# This script creates a visual representation for a configuration file used by
+# the DPDK ip_pipeline application.
+#
+# The input configuration file is translated to an output file in DOT syntax,
+# which is then used to create the image file using graphviz
+# (www.graphviz.org).
+#
+
+from __future__ import print_function
+import argparse
+import re
+import os
+
+#
+# Command to generate the image file
+#
+DOT_COMMAND = 'dot -Gsize=20,30 -Tpng %s > %s'
+
+#
+# Layout of generated DOT file
+#
+DOT_INTRO = \
+ '#\n# Command to generate image file:\n# \t%s\n#\n\n'
+DOT_GRAPH_BEGIN = \
+ 'digraph g {\n graph [ splines = true rankdir = "LR" ]\n'
+DOT_NODE_LINK_RX = \
+ ' "%s RX" [ shape = box style = filled fillcolor = yellowgreen ]\n'
+DOT_NODE_LINK_TX = \
+ ' "%s TX" [ shape = box style = filled fillcolor = yellowgreen ]\n'
+DOT_NODE_KNI_RX = \
+ ' "%s RX" [ shape = box style = filled fillcolor = orange ]\n'
+DOT_NODE_KNI_TX = \
+ ' "%s TX" [ shape = box style = filled fillcolor = orange ]\n'
+DOT_NODE_TAP_RX = \
+ ' "%s RX" [ shape = box style = filled fillcolor = gold ]\n'
+DOT_NODE_TAP_TX = \
+ ' "%s TX" [ shape = box style = filled fillcolor = gold ]\n'
+DOT_NODE_SOURCE = \
+ ' "%s" [ shape = box style = filled fillcolor = darkgreen ]\n'
+DOT_NODE_SINK = \
+ ' "%s" [ shape = box style = filled fillcolor = peachpuff ]\n'
+DOT_NODE_PIPELINE = \
+ ' "%s" [ shape = box style = filled fillcolor = royalblue ]\n'
+DOT_EDGE_PKTQ = \
+ ' "%s" -> "%s" [ label = "%s" color = gray ]\n'
+DOT_GRAPH_END = \
+ '}\n'
+
+# Relationships between the graph nodes and the graph edges:
+#
+# Edge ID | Edge Label | Writer Node | Reader Node | Dependencies
+# --------+------------+-------------+---------------+--------------
+# RXQx.y | RXQx.y | LINKx | PIPELINEz | LINKx
+# TXQx.y | TXQx.y | PIPELINEz | LINKx | LINKx
+# SWQx | SWQx | PIPELINEy | PIPELINEz | -
+# TMx | TMx | PIPELINEy | PIPELINEz | LINKx
+# KNIx RX | KNIx | KNIx RX | PIPELINEy | KNIx, LINKx
+# KNIx TX | KNIx | PIPELINEy | KNIx TX | KNIx, LINKx
+# TAPx RX | TAPx | TAPx RX | PIPELINEy | TAPx
+# TAPx TX | TAPx | PIPELINEy | TAPx TX | TAPx
+# SOURCEx | SOURCEx | SOURCEx | PIPELINEy | SOURCEx
+# SINKx | SINKx | PIPELINEy | SINKx | SINKx
+
+
+#
+# Parse the input configuration file to detect the graph nodes and edges
+#
+def process_config_file(cfgfile):
+ edges = {}
+ links = set()
+ knis = set()
+ taps = set()
+ sources = set()
+ sinks = set()
+ pipelines = set()
+ pipeline = ''
+
+ dotfile = cfgfile + '.txt'
+ imgfile = cfgfile + '.png'
+
+ #
+ # Read configuration file
+ #
+ lines = open(cfgfile, 'r')
+ for line in lines:
+ # Remove any leading and trailing white space characters
+ line = line.strip()
+
+ # Remove any comment at end of line
+ line, sep, tail = line.partition(';')
+
+ # Look for next "PIPELINE" section
+ match = re.search(r'\[(PIPELINE\d+)\]', line)
+ if match:
+ pipeline = match.group(1)
+ continue
+
+ # Look for next "pktq_in" section entry
+ match = re.search(r'pktq_in\s*=\s*(.+)', line)
+ if match:
+ pipelines.add(pipeline)
+ for q in re.findall('\S+', match.group(1)):
+ match_rxq = re.search(r'^RXQ(\d+)\.\d+$', q)
+ match_swq = re.search(r'^SWQ\d+$', q)
+ match_tm = re.search(r'^TM(\d+)$', q)
+ match_kni = re.search(r'^KNI(\d+)$', q)
+ match_tap = re.search(r'^TAP\d+$', q)
+ match_source = re.search(r'^SOURCE\d+$', q)
+
+ # Set ID for the current packet queue (graph edge)
+ q_id = ''
+ if match_rxq or match_swq or match_tm or match_source:
+ q_id = q
+ elif match_kni or match_tap:
+ q_id = q + ' RX'
+ else:
+ print('Error: Unrecognized pktq_in element "%s"' % q)
+ return
+
+ # Add current packet queue to the set of graph edges
+ if q_id not in edges:
+ edges[q_id] = {}
+ if 'label' not in edges[q_id]:
+ edges[q_id]['label'] = q
+ if 'readers' not in edges[q_id]:
+ edges[q_id]['readers'] = []
+ if 'writers' not in edges[q_id]:
+ edges[q_id]['writers'] = []
+
+ # Add reader for the new edge
+ edges[q_id]['readers'].append(pipeline)
+
+ # Check for RXQ
+ if match_rxq:
+ link = 'LINK' + str(match_rxq.group(1))
+ edges[q_id]['writers'].append(link + ' RX')
+ links.add(link)
+ continue
+
+ # Check for SWQ
+ if match_swq:
+ continue
+
+ # Check for TM
+ if match_tm:
+ link = 'LINK' + str(match_tm.group(1))
+ links.add(link)
+ continue
+
+ # Check for KNI
+ if match_kni:
+ link = 'LINK' + str(match_kni.group(1))
+ edges[q_id]['writers'].append(q_id)
+ knis.add(q)
+ links.add(link)
+ continue
+
+ # Check for TAP
+ if match_tap:
+ edges[q_id]['writers'].append(q_id)
+ taps.add(q)
+ continue
+
+ # Check for SOURCE
+ if match_source:
+ edges[q_id]['writers'].append(q)
+ sources.add(q)
+ continue
+
+ continue
+
+ # Look for next "pktq_out" section entry
+ match = re.search(r'pktq_out\s*=\s*(.+)', line)
+ if match:
+ for q in re.findall('\S+', match.group(1)):
+ match_txq = re.search(r'^TXQ(\d+)\.\d+$', q)
+ match_swq = re.search(r'^SWQ\d+$', q)
+ match_tm = re.search(r'^TM(\d+)$', q)
+ match_kni = re.search(r'^KNI(\d+)$', q)
+ match_tap = re.search(r'^TAP(\d+)$', q)
+ match_sink = re.search(r'^SINK(\d+)$', q)
+
+ # Set ID for the current packet queue (graph edge)
+ q_id = ''
+ if match_txq or match_swq or match_tm or match_sink:
+ q_id = q
+ elif match_kni or match_tap:
+ q_id = q + ' TX'
+ else:
+ print('Error: Unrecognized pktq_out element "%s"' % q)
+ return
+
+ # Add current packet queue to the set of graph edges
+ if q_id not in edges:
+ edges[q_id] = {}
+ if 'label' not in edges[q_id]:
+ edges[q_id]['label'] = q
+ if 'readers' not in edges[q_id]:
+ edges[q_id]['readers'] = []
+ if 'writers' not in edges[q_id]:
+ edges[q_id]['writers'] = []
+
+ # Add writer for the new edge
+ edges[q_id]['writers'].append(pipeline)
+
+ # Check for TXQ
+ if match_txq:
+ link = 'LINK' + str(match_txq.group(1))
+ edges[q_id]['readers'].append(link + ' TX')
+ links.add(link)
+ continue
+
+ # Check for SWQ
+ if match_swq:
+ continue
+
+ # Check for TM
+ if match_tm:
+ link = 'LINK' + str(match_tm.group(1))
+ links.add(link)
+ continue
+
+ # Check for KNI
+ if match_kni:
+ link = 'LINK' + str(match_kni.group(1))
+ edges[q_id]['readers'].append(q_id)
+ knis.add(q)
+ links.add(link)
+ continue
+
+ # Check for TAP
+ if match_tap:
+ edges[q_id]['readers'].append(q_id)
+ taps.add(q)
+ continue
+
+ # Check for SINK
+ if match_sink:
+ edges[q_id]['readers'].append(q)
+ sinks.add(q)
+ continue
+
+ continue
+
+ #
+ # Write DOT file
+ #
+ print('Creating DOT file "%s" ...' % dotfile)
+ dot_cmd = DOT_COMMAND % (dotfile, imgfile)
+ file = open(dotfile, 'w')
+ file.write(DOT_INTRO % dot_cmd)
+ file.write(DOT_GRAPH_BEGIN)
+
+ # Write the graph nodes to the DOT file
+ for l in sorted(links):
+ file.write(DOT_NODE_LINK_RX % l)
+ file.write(DOT_NODE_LINK_TX % l)
+ for k in sorted(knis):
+ file.write(DOT_NODE_KNI_RX % k)
+ file.write(DOT_NODE_KNI_TX % k)
+ for t in sorted(taps):
+ file.write(DOT_NODE_TAP_RX % t)
+ file.write(DOT_NODE_TAP_TX % t)
+ for s in sorted(sources):
+ file.write(DOT_NODE_SOURCE % s)
+ for s in sorted(sinks):
+ file.write(DOT_NODE_SINK % s)
+ for p in sorted(pipelines):
+ file.write(DOT_NODE_PIPELINE % p)
+
+ # Write the graph edges to the DOT file
+ for q in sorted(edges.keys()):
+ rw = edges[q]
+ if 'writers' not in rw:
+ print('Error: "%s" has no writer' % q)
+ return
+ if 'readers' not in rw:
+ print('Error: "%s" has no reader' % q)
+ return
+ for w in rw['writers']:
+ for r in rw['readers']:
+ file.write(DOT_EDGE_PKTQ % (w, r, rw['label']))
+
+ file.write(DOT_GRAPH_END)
+ file.close()
+
+ #
+ # Execute the DOT command to create the image file
+ #
+ print('Creating image file "%s" ...' % imgfile)
+ if os.system('which dot > /dev/null'):
+ print('Error: Unable to locate "dot" executable.'
+ 'Please install the "graphviz" package (www.graphviz.org).')
+ return
+
+ os.system(dot_cmd)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Create diagram for IP '
+ 'pipeline configuration '
+ 'file.')
+
+ parser.add_argument(
+ '-f',
+ '--file',
+ help='input configuration file (e.g. "ip_pipeline.cfg")',
+ required=True)
+
+ args = parser.parse_args()
+
+ process_config_file(args.file)
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_downstream.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_downstream.cfg
new file mode 100644
index 00000000..c6b4e1f2
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_downstream.cfg
@@ -0,0 +1,97 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+; An edge router typically sits between two networks such as the provider
+; core network and the provider access network. A typical packet processing
+; pipeline for the downstream traffic (i.e. traffic from core to access
+; network) contains the following functional blocks: Packet RX & Routing,
+; Traffic management and Packet TX. The input packets are assumed to be
+; IPv4, while the output packets are Q-in-Q IPv4.
+;
+; A simple implementation for this functional pipeline is presented below.
+;
+; Packet Rx & Traffic Management Packet Tx
+; Routing (Pass-Through) (Pass-Through)
+; _____________________ SWQ0 ______________________ SWQ4 _____________________
+; RXQ0.0 --->| |----->| |----->| |---> TXQ0.0
+; | | SWQ1 | | SWQ5 | |
+; RXQ1.0 --->| |----->| |----->| |---> TXQ1.0
+; | (P1) | SWQ2 | (P2) | SWQ6 | (P3) |
+; RXQ2.0 --->| |----->| |----->| |---> TXQ2.0
+; | | SWQ3 | | SWQ7 | |
+; RXQ3.0 --->| |----->| |----->| |---> TXQ3.0
+; |_____________________| |______________________| |_____________________|
+; | | ^ | ^ | ^ | ^
+; | |__| |__| |__| |__|
+; +--> SINK0 TM0 TM1 TM2 TM3
+; (Default)
+;
+; Input packet: Ethernet/IPv4
+; Output packet: Ethernet/QinQ/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = SWQ0 SWQ1 SWQ2 SWQ3 SINK0
+encap = ethernet_qinq
+qinq_sched = test
+ip_hdr_offset = 270
+
+[PIPELINE2]
+type = PASS-THROUGH
+core = 2
+pktq_in = SWQ0 SWQ1 SWQ2 SWQ3 TM0 TM1 TM2 TM3
+pktq_out = TM0 TM1 TM2 TM3 SWQ4 SWQ5 SWQ6 SWQ7
+
+[PIPELINE3]
+type = PASS-THROUGH
+core = 3
+pktq_in = SWQ4 SWQ5 SWQ6 SWQ7
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
+
+[MEMPOOL0]
+pool_size = 2M
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_downstream.sh b/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_downstream.sh
new file mode 100644
index 00000000..67c3a0d1
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_downstream.sh
@@ -0,0 +1,13 @@
+#
+# run ./config/edge_router_downstream.sh
+#
+
+################################################################################
+# Routing: Ether QinQ, ARP off
+################################################################################
+p 1 route add default 4 #SINK0
+p 1 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 qinq 256 257
+p 1 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 qinq 258 259
+p 1 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 qinq 260 261
+p 1 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 qinq 262 263
+#p 1 route ls
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_upstream.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_upstream.cfg
new file mode 100644
index 00000000..dea42b95
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_upstream.cfg
@@ -0,0 +1,124 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+; An edge router typically sits between two networks such as the provider
+; core network and the provider access network. A typical packet processing
+; pipeline for the upstream traffic (i.e. traffic from access to core
+; network) contains the following functional blocks: Packet RX & Firewall,
+; Flow classification, Metering, Routing and Packet TX. The input packets
+; are assumed to be Q-in-Q IPv4, while the output packets are MPLS IPv4
+; (with variable number of labels per route).
+;
+; A simple implementation for this functional pipeline is presented below.
+;
+; Packet RX & Pass-Through Flow Classification Flow Actions Routing
+: Firewall
+; __________ SWQ0 __________ SWQ4 __________ SWQ8 __________ SWQ12 __________
+; RXQ0.0 --->| |------>| |------>| |------>| |------>| |------> TXQ0.0
+; | | SWQ1 | | SWQ5 | | SWQ9 | | SWQ13 | |
+; RXQ1.0 --->| |------>| |------>| |------>| |------>| |------> TXQ1.0
+; | (P1) | SWQ2 | (P2) | SWQ6 | (P3) | SWQ10 | (P4) | SWQ14 | (P5) |
+; RXQ2.0 --->| |------>| |------>| |------>| |------>| |------> TXQ2.0
+; | | SWQ3 | | SWQ7 | | SWQ11 | | SWQ15 | |
+; RXQ3.0 --->| |------>| |------>| |------>| |------>| |------> TXQ3.0
+; |__________| |__________| |__________| |__________| |__________|
+; | | |
+; +--> SINK0 (Default) +--> SINK1 (Default) +--> SINK2 (Default)
+;
+; Input packet: Ethernet/QinQ/IPv4
+; Output packet: Ethernet/MPLS/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 QinQ header 270 8
+; 4 IPv4 header 278 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = FIREWALL
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = SWQ0 SWQ1 SWQ2 SWQ3 SINK0
+n_rules = 4096
+pkt_type = qinq_ipv4
+
+[PIPELINE2]
+type = PASS-THROUGH
+core = 2
+pktq_in = SWQ0 SWQ1 SWQ2 SWQ3
+pktq_out = SWQ4 SWQ5 SWQ6 SWQ7
+dma_size = 8
+dma_dst_offset = 128
+dma_src_offset = 268; 1st Ethertype offset
+dma_src_mask = 00000FFF00000FFF; qinq
+dma_hash_offset = 136; dma_dst_offset + dma_size
+
+[PIPELINE3]
+type = FLOW_CLASSIFICATION
+core = 2
+pktq_in = SWQ4 SWQ5 SWQ6 SWQ7
+pktq_out = SWQ8 SWQ9 SWQ10 SWQ11 SINK1
+n_flows = 65536
+key_size = 8; dma_size
+key_offset = 128; dma_dst_offset
+hash_offset = 136; dma_hash_offset
+flowid_offset = 192
+
+[PIPELINE4]
+type = FLOW_ACTIONS
+core = 3
+pktq_in = SWQ8 SWQ9 SWQ10 SWQ11
+pktq_out = SWQ12 SWQ13 SWQ14 SWQ15
+n_flows = 65536
+n_meters_per_flow = 1
+flow_id_offset = 192; flowid_offset
+ip_hdr_offset = 278
+color_offset = 196; flowid_offset + sizeof(flow_id)
+
+[PIPELINE5]
+type = ROUTING
+core = 4
+pktq_in = SWQ12 SWQ13 SWQ14 SWQ15
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK2
+encap = ethernet_mpls
+mpls_color_mark = yes
+ip_hdr_offset = 278
+color_offset = 196; flowid_offset + sizeof(flow_id)
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_upstream.sh b/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_upstream.sh
new file mode 100644
index 00000000..5d574c1a
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/edge_router_upstream.sh
@@ -0,0 +1,33 @@
+#
+# run ./config/edge_router_upstream.sh
+#
+
+################################################################################
+# Firewall
+################################################################################
+p 1 firewall add default 4 #SINK0
+p 1 firewall add bulk ./config/edge_router_upstream_firewall.txt
+#p 1 firewall ls
+
+################################################################################
+# Flow Classification
+################################################################################
+p 3 flow add default 4 #SINK1
+p 3 flow add qinq bulk ./config/edge_router_upstream_flow.txt
+#p 3 flow ls
+
+################################################################################
+# Flow Actions - Metering and Policing
+################################################################################
+p 4 action flow bulk ./config/edge_router_upstream_action.txt
+#p 4 action flow ls
+
+################################################################################
+# Routing: Ether MPLS, ARP off
+################################################################################
+p 5 route add default 4 #SINK2
+p 5 route add 0.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 mpls 0:1
+p 5 route add 0.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 mpls 10:11
+p 5 route add 0.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 mpls 20:21
+p 5 route add 0.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 mpls 30:31
+#p 5 route ls
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/firewall.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/firewall.cfg
new file mode 100644
index 00000000..2f5dd9f6
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/firewall.cfg
@@ -0,0 +1,68 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; _______________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Firewall |
+; RXQ2.0 --->| |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |_______________|
+; |
+; +-----------> SINK0 (default rule)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = FIREWALL
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
+n_rules = 4096
+pkt_type = ipv4
+;pkt_type = vlan_ipv4
+;pkt_type = qinq_ipv4
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/firewall.sh b/src/seastar/dpdk/examples/ip_pipeline/config/firewall.sh
new file mode 100644
index 00000000..c83857ee
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/firewall.sh
@@ -0,0 +1,13 @@
+#
+# run ./config/firewall.sh
+#
+
+p 1 firewall add default 4 #SINK0
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.0.0.0 10 0 65535 0 65535 6 0xF port 0
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.64.0.0 10 0 65535 0 65535 6 0xF port 1
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.128.0.0 10 0 65535 0 65535 6 0xF port 2
+p 1 firewall add priority 1 ipv4 0.0.0.0 0 100.192.0.0 10 0 65535 0 65535 6 0xF port 3
+
+#p 1 firewall add bulk ./config/firewall.txt
+
+p 1 firewall ls
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/firewall.txt b/src/seastar/dpdk/examples/ip_pipeline/config/firewall.txt
new file mode 100644
index 00000000..54cfffda
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/firewall.txt
@@ -0,0 +1,9 @@
+#
+# p <pipelineid> firewall add bulk ./config/firewall.txt
+# p <pipelineid> firewall del bulk ./config/firewall.txt
+#
+
+priority 1 ipv4 0.0.0.0 0 100.0.0.0 10 0 65535 0 65535 6 0xF port 0
+priority 1 ipv4 0.0.0.0 0 100.64.0.0 10 0 65535 0 65535 6 0xF port 1
+priority 1 ipv4 0.0.0.0 0 100.128.0.0 10 0 65535 0 65535 6 0xF port 2
+priority 1 ipv4 0.0.0.0 0 100.192.0.0 10 0 65535 0 65535 6 0xF port 3
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/flow.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/flow.cfg
new file mode 100644
index 00000000..cec990ab
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/flow.cfg
@@ -0,0 +1,72 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; ________________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Flow |
+; RXQ2.0 --->| Classification |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |________________|
+; |
+; +-----------> SINK0 (flow lookup miss)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 QinQ/IPv4/IPv6 header 270 8/20/40
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = FLOW_CLASSIFICATION
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
+n_flows = 65536
+;key_size = 8 ; QinQ key size
+;key_offset = 268 ; QinQ key offset
+;key_mask = 00000FFF00000FFF ; QinQ key mask
+key_size = 16 ; IPv4 5-tuple key size
+key_offset = 278 ; IPv4 5-tuple key offset
+key_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF ; IPv4 5-tuple key mask
+flowid_offset = 128
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/flow.sh b/src/seastar/dpdk/examples/ip_pipeline/config/flow.sh
new file mode 100644
index 00000000..489c7079
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/flow.sh
@@ -0,0 +1,25 @@
+#
+# run ./config/flow.sh
+#
+
+################################################################################
+# Flow classification (QinQ)
+################################################################################
+#p 1 flow add default 4 #SINK0
+#p 1 flow add qinq 100 200 port 0 id 0
+#p 1 flow add qinq 101 201 port 1 id 1
+#p 1 flow add qinq 102 202 port 2 id 2
+#p 1 flow add qinq 103 203 port 3 id 3
+
+#p 1 flow add qinq bulk ./config/flow.txt
+
+################################################################################
+# Flow classification (IPv4 5-tuple)
+################################################################################
+p 1 flow add default 4 #SINK0
+p 1 flow add ipv4 100.0.0.10 200.0.0.10 100 200 6 port 0 id 0
+p 1 flow add ipv4 100.0.0.11 200.0.0.11 101 201 6 port 1 id 1
+p 1 flow add ipv4 100.0.0.12 200.0.0.12 102 202 6 port 2 id 2
+p 1 flow add ipv4 100.0.0.13 200.0.0.13 103 203 6 port 3 id 3
+
+#p 1 flow add ipv4 bulk ./config/flow.txt
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/flow.txt b/src/seastar/dpdk/examples/ip_pipeline/config/flow.txt
new file mode 100644
index 00000000..c1a141dd
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/flow.txt
@@ -0,0 +1,17 @@
+#
+# p <pipelineid> flow add qinq bulk ./config/flow.txt
+#
+
+#qinq 100 200 port 0 id 0
+#qinq 101 201 port 1 id 1
+#qinq 102 202 port 2 id 2
+#qinq 103 203 port 3 id 3
+
+#
+# p <pipelineid> flow add ipv4 bulk ./config/flow.txt
+#
+
+ipv4 100.0.0.10 200.0.0.10 100 200 6 port 0 id 0
+ipv4 100.0.0.11 200.0.0.11 101 201 6 port 1 id 1
+ipv4 100.0.0.12 200.0.0.12 102 202 6 port 2 id 2
+ipv4 100.0.0.13 200.0.0.13 103 203 6 port 3 id 3
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/ip_pipeline.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/ip_pipeline.cfg
new file mode 100644
index 00000000..095ed25e
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/ip_pipeline.cfg
@@ -0,0 +1,9 @@
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/ip_pipeline.sh b/src/seastar/dpdk/examples/ip_pipeline/config/ip_pipeline.sh
new file mode 100644
index 00000000..4fca2597
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/ip_pipeline.sh
@@ -0,0 +1,5 @@
+#
+#run config/ip_pipeline.sh
+#
+
+p 1 ping
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/kni.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/kni.cfg
new file mode 100644
index 00000000..cea208b4
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/kni.cfg
@@ -0,0 +1,67 @@
+; BSD LICENSE
+;
+; Copyright(c) 2016 Intel Corporation.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;
+; ______________ ______________________
+; | | KNI0 | |
+; RXQ0.0 --->| |------->|--+ |
+; | | KNI1 | | br0 |
+; TXQ1.0 <---| |<-------|<-+ |
+; | Pass-through | | Linux Kernel |
+; | (P1) | | Network Stack |
+; | | KNI1 | |
+; RXQ1.0 --->| |------->|--+ |
+; | | KNI0 | | br0 |
+; TXQ0.0 <---| |<-------|<-+ |
+; |______________| |______________________|
+;
+; Insert Linux kernel KNI module:
+; [Linux]$ insmod rte_kni.ko
+;
+; Configure Linux kernel bridge between KNI0 and KNI1 interfaces:
+; [Linux]$ ifconfig KNI0 up
+; [Linux]$ ifconfig KNI1 up
+; [Linux]$ brctl addbr "br0"
+; [Linux]$ brctl addif br0 KNI0
+; [Linux]$ brctl addif br0 KNI1
+; [Linux]$ ifconfig br0 up
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = RXQ0.0 KNI1 RXQ1.0 KNI0
+pktq_out = KNI0 TXQ1.0 KNI1 TXQ0.0
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/l2fwd.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/l2fwd.cfg
new file mode 100644
index 00000000..a1df9e6a
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/l2fwd.cfg
@@ -0,0 +1,58 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+;
+; The pass-through pipeline below connects the input ports to the output ports
+; as follows: RXQ0.0 -> TXQ1.0, RXQ1.0 -> TXQ0.0, RXQ2.0 -> TXQ3.0 and
+; RXQ3.0 -> TXQ2.0.
+; ________________
+; RXQ0.0 --->|................|---> TXQ1.0
+; | |
+; RXQ1.0 --->|................|---> TXQ0.0
+; | Pass-through |
+; RXQ2.0 --->|................|---> TXQ3.0
+; | |
+; RXQ3.0 --->|................|---> TXQ2.0
+; |________________|
+;
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ1.0 TXQ0.0 TXQ3.0 TXQ2.0
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd.cfg
new file mode 100644
index 00000000..02c8f36f
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd.cfg
@@ -0,0 +1,68 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; _______________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Routing |
+; RXQ2.0 --->| |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |_______________|
+; |
+; +-----------> SINK0 (route miss)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
+encap = ethernet
+;encap = ethernet_qinq
+;encap = ethernet_mpls
+ip_hdr_offset = 270
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd.sh b/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd.sh
new file mode 100644
index 00000000..47406aa4
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd.sh
@@ -0,0 +1,33 @@
+#
+# run ./config/l3fwd.sh
+#
+
+################################################################################
+# Routing: encap = ethernet, arp = off
+################################################################################
+p 1 route add default 4 #SINK0
+p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0
+p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1
+p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2
+p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3
+p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_qinq, arp = off
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 qinq 1000 2000
+#p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 qinq 1001 2001
+#p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 qinq 1002 2002
+#p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 qinq 1003 2003
+#p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_mpls, arp = off
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether a0:b0:c0:d0:e0:f0 mpls 1000:2000
+#p 1 route add 100.64.0.0 10 port 1 ether a1:b1:c1:d1:e1:f1 mpls 1001:2001
+#p 1 route add 100.128.0.0 10 port 2 ether a2:b2:c2:d2:e2:f2 mpls 1002:2002
+#p 1 route add 100.192.0.0 10 port 3 ether a3:b3:c3:d3:e3:f3 mpls 1003:2003
+#p 1 route ls
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd_arp.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd_arp.cfg
new file mode 100644
index 00000000..2c63c8fd
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd_arp.cfg
@@ -0,0 +1,70 @@
+; BSD LICENSE
+;
+; Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; _______________
+; RXQ0.0 --->| |---> TXQ0.0
+; | |
+; RXQ1.0 --->| |---> TXQ1.0
+; | Routing |
+; RXQ2.0 --->| |---> TXQ2.0
+; | |
+; RXQ3.0 --->| |---> TXQ3.0
+; |_______________|
+; |
+; +-----------> SINK0 (route miss)
+;
+; Input packet: Ethernet/IPv4
+;
+; Packet buffer layout:
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK0
+encap = ethernet
+;encap = ethernet_qinq
+;encap = ethernet_mpls
+n_arp_entries = 1024
+ip_hdr_offset = 270
+arp_key_offset = 128
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd_arp.sh b/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd_arp.sh
new file mode 100644
index 00000000..20bea582
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/l3fwd_arp.sh
@@ -0,0 +1,43 @@
+#
+# run ./config/l3fwd_arp.sh
+#
+
+################################################################################
+# ARP
+################################################################################
+p 1 arp add default 4 #SINK0
+p 1 arp add 0 10.0.0.1 a0:b0:c0:d0:e0:f0
+p 1 arp add 1 11.0.0.1 a1:b1:c1:d1:e1:f1
+p 1 arp add 2 12.0.0.1 a2:b2:c2:d2:e2:f2
+p 1 arp add 3 13.0.0.1 a3:b3:c3:d3:e3:f3
+p 1 arp ls
+
+################################################################################
+# Routing: encap = ethernet, arp = on
+################################################################################
+p 1 route add default 4 #SINK0
+p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1
+p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1
+p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1
+p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1
+p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_qinq, arp = on
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1 qinq 1000 2000
+#p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1 qinq 1001 2001
+#p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1 qinq 1002 2002
+#p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1 qinq 1003 2003
+#p 1 route ls
+
+################################################################################
+# Routing: encap = ethernet_mpls, arp = on
+################################################################################
+#p 1 route add default 4 #SINK0
+#p 1 route add 100.0.0.0 10 port 0 ether 10.0.0.1 mpls 1000:2000
+#p 1 route add 100.64.0.0 10 port 1 ether 11.0.0.1 mpls 1001:2001
+#p 1 route add 100.128.0.0 10 port 2 ether 12.0.0.1 mpls 1002:2002
+#p 1 route add 100.192.0.0 10 port 3 ether 13.0.0.1 mpls 1003:2003
+#p 1 route ls
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/network_layers.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/network_layers.cfg
new file mode 100644
index 00000000..397b5d77
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/network_layers.cfg
@@ -0,0 +1,227 @@
+; BSD LICENSE
+;
+; Copyright(c) 2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; The diagram below shows how additional protocol components can be plugged into
+; the IP layer implemented by the ip_pipeline application. Pick your favorite
+; open source components for dynamic ARP, ICMP, UDP or TCP termination, etc and
+; connect them through SWQs to the IP infrastructure.
+;
+; The input packets with local destination are sent to the UDP/TCP applications
+; while the input packets with remote destination are routed back to the
+; network. Additional features can easily be added to this setup:
+; * IP Reassembly: add SWQs with IP reassembly enabled (typically required for
+; the input traffic with local destination);
+; * IP Fragmentation: add SWQs with IP fragmentation enabled (typically
+; required to enforce the MTU for the routed output traffic);
+; * Traffic Metering: add Flow Action pipeline instances (e.g. for metering the
+; TCP connections or ICMP input traffic);
+; * Traffic Management: add TMs for the required output LINKs;
+; * Protocol encapsulations (QinQ, MPLS) for the output packets: part of the
+; routing pipeline configuration.
+;
+; _________ _________
+; | | | |
+; | UDP | | TCP |
+; | App | | App |
+; |_________| |_________|
+; ^ | ^ |
+; __|___V__ __|___V__
+; | | SWQ0 (UDP TX) | | SWQ1 (TCP TX)
+; | UDP |-------+ | TCP |------------+
+; | | | | | |
+; |_________| | |_________| |
+; ^ | ^ |
+; | SWQ2 | | SWQ3 |
+; | (UDP RX) | | (TCP RX) |
+; ____|____ | ____|____ |
+; | | | | | |
+; RXQ<0..3>.1 ------>|Firewall +--->| | +------>| Flow +--->| |
+; (UDP local dest) | (P2) | SINK0 | | | (P3) | SINK1 |
+; |_________| (Deny)| | |_________| (RST) |
+; RXQ<0..3>.2 -------------------------|-----+ |
+; (TCP local dest) | |
+; | +------------------------------+
+; | |
+; _V_____V_
+; | |
+; | Routing | TXQ<0..3>.0
+; RXQ<0..3>.0 ---------------------->| & ARP +----------------------------->
+; (IP remote dest) | (P1) |
+; |_________|
+; | ^ |
+; SWQ4 +-------------+ | | SWQ5 (ARP miss)
+; (Route miss) | | +------------+
+; | +-------------+ |
+; ___V__|__ SWQ6 ____V____
+; | | (ICMP TX) | | TXQ<0..3>.1
+; RXQ<0..3>.3 ------>| ICMP | +------>| Dyn ARP +------------->
+; (IP local dest) | | | | |
+; |_________| | |_________|
+; RXQ<0..3>.4 -------------------------------+
+; (ARP)
+;
+; This configuration file implements the diagram presented below, where the
+; dynamic ARP, ICMP, UDP and TCP components have been stubbed out and replaced
+; with loop-back and packet drop devices.
+;
+; _________ _________
+; | | SWQ0 (UDP TX) | | SWQ1 (TCP TX)
+; |Loobpack |-------+ |Loopback |------------+
+; | (P4) | | | (P5) | |
+; |_________| | |_________| |
+; ^ | ^ |
+; | SWQ2 | | SWQ3 |
+; | (UDP RX) | | (TCP RX) |
+; ____|____ | ____|____ |
+; | | | | | |
+; RXQ<0..3>.1 ------>|Firewall +--->| | +------>| Flow +--->| |
+; (UDP local dest) | (P2) | SINK0 | | | (P3) | SINK1 |
+; |_________| (Deny)| | |_________| (RST) |
+; RXQ<0..3>.2 -------------------------|-----+ |
+; (TCP local dest) | |
+; | +------------------------------+
+; | |
+; _V_____V_
+; | |
+; | Routing | TXQ<0..3>.0
+; RXQ<0..3>.0 ---------------------->| & ARP +----------------------------->
+; (IP remote dest) | (P1) |
+; |_________|
+; | |
+; SINK2 |<---+ +--->| SINK3
+; (Route miss) (ARP miss)
+;
+; _________ _________
+; | | | |
+; RXQ<0..3>.3 ------>| Drop +--->| SINK<4..7> +------>| Drop +--->| SINK<8..11>
+; (IP local dest) | (P6) | (IP local dest) | | (P7) | (ARP)
+; |_________| | |_________|
+; RXQ<0..3>.4 ------------------------------------+
+; (ARP)
+;
+;
+; Input packet: Ethernet/IPv4 or Ethernet/ARP
+; Output packet: Ethernet/IPv4 or Ethernet/ARP
+;
+; Packet buffer layout (for input IPv4 packets):
+; # Field Name Offset (Bytes) Size (Bytes)
+; 0 Mbuf 0 128
+; 1 Headroom 128 128
+; 2 Ethernet header 256 14
+; 3 IPv4 header 270 20
+; 4 ICMP/UDP/TCP header 290 8/8/20
+
+[EAL]
+log_level = 0
+
+[LINK0]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[LINK1]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[LINK2]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[LINK3]
+udp_local_q = 1
+tcp_local_q = 2
+ip_local_q = 3
+arp_q = 4
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = ROUTING
+core = 1
+pktq_in = RXQ0.0 RXQ1.0 RXQ2.0 RXQ3.0 SWQ0 SWQ1
+pktq_out = TXQ0.0 TXQ1.0 TXQ2.0 TXQ3.0 SINK2 SINK3
+port_local_dest = 4 ; SINK2 (Drop)
+n_arp_entries = 1000
+ip_hdr_offset = 270
+arp_key_offset = 128
+
+[PIPELINE2]
+type = FIREWALL
+core = 1
+pktq_in = RXQ0.1 RXQ1.1 RXQ2.1 RXQ3.1
+pktq_out = SWQ2 SINK0
+n_rules = 4096
+
+[PIPELINE3]
+type = FLOW_CLASSIFICATION
+core = 1
+pktq_in = RXQ0.2 RXQ1.2 RXQ2.2 RXQ3.2
+pktq_out = SWQ3 SINK1
+n_flows = 65536
+key_size = 16 ; IPv4 5-tuple key size
+key_offset = 278 ; IPv4 5-tuple key offset
+key_mask = 00FF0000FFFFFFFFFFFFFFFFFFFFFFFF ; IPv4 5-tuple key mask
+flowid_offset = 128 ; Flow ID effectively acts as TCP socket ID
+
+[PIPELINE4]
+type = PASS-THROUGH ; Loop-back (UDP place-holder)
+core = 1
+pktq_in = SWQ2
+pktq_out = SWQ0
+swap = 282 286 ; IPSRC <-> IPDST
+swap = 290 292 ; PORTSRC <-> PORTDST
+
+[PIPELINE5]
+type = PASS-THROUGH ; Loop-back (TCP place-holder)
+core = 1
+pktq_in = SWQ3
+pktq_out = SWQ1
+swap = 282 286 ; IPSRC <-> IPDST
+swap = 290 292 ; PORTSRC <-> PORTDST
+
+[PIPELINE6]
+type = PASS-THROUGH ; Drop (ICMP place-holder)
+core = 1
+pktq_in = RXQ0.3 RXQ1.3 RXQ2.3 RXQ3.3
+pktq_out = SINK4 SINK5 SINK6 SINK7
+
+[PIPELINE7]
+type = PASS-THROUGH ; Drop (Dynamic ARP place-holder)
+core = 1
+pktq_in = RXQ0.4 RXQ1.4 RXQ2.4 RXQ3.4
+pktq_out = SINK8 SINK9 SINK10 SINK11
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/network_layers.sh b/src/seastar/dpdk/examples/ip_pipeline/config/network_layers.sh
new file mode 100644
index 00000000..449b0069
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/network_layers.sh
@@ -0,0 +1,79 @@
+#
+# run ./config/network_layers.sh
+#
+
+################################################################################
+# Link configuration
+################################################################################
+# Routes added implicitly when links are brought UP:
+# IP Prefix = 10.0.0.1/16 => (Port 0, Local)
+# IP Prefix = 10.0.0.1/32 => (Port 4, Local)
+# IP Prefix = 10.1.0.1/16 => (Port 1, Local)
+# IP Prefix = 10.1.0.1/32 => (Port 4, Local)
+# IP Prefix = 10.2.0.1/16 => (Port 2, Local)
+# IP Prefix = 10.2.0.1/32 => (Port 4, Local)
+# IP Prefix = 10.3.0.1/16 => (Port 3, Local)
+# IP Prefix = 10.3.0.1/32 => (Port 4, Local)
+link 0 down
+link 1 down
+link 2 down
+link 3 down
+link 0 config 10.0.0.1 16
+link 1 config 10.1.0.1 16
+link 2 config 10.2.0.1 16
+link 3 config 10.3.0.1 16
+link 0 up
+link 1 up
+link 2 up
+link 3 up
+#link ls
+
+################################################################################
+# Static ARP
+################################################################################
+p 1 arp add default 5 #SINK3
+p 1 arp add 0 10.0.0.2 a0:b0:c0:d0:e0:f0
+p 1 arp add 1 10.1.0.2 a1:b1:c1:d1:e1:f1
+p 1 arp add 2 10.2.0.2 a2:b2:c2:d2:e2:f2
+p 1 arp add 3 10.3.0.2 a3:b3:c3:d3:e3:f3
+#p 1 arp ls
+
+################################################################################
+# Routes
+################################################################################
+p 1 route add default 4 #SINK2
+p 1 route add 100.0.0.0 16 port 0 ether 10.0.0.2
+p 1 route add 100.1.0.0 16 port 1 ether 10.1.0.2
+p 1 route add 100.2.0.0 16 port 2 ether 10.2.0.2
+p 1 route add 100.3.0.0 16 port 3 ether 10.3.0.2
+#p 1 route ls
+
+################################################################################
+# Local destination UDP traffic
+################################################################################
+# Prio = Lowest: [SA = ANY, DA = ANY, SP = ANY, DP = ANY, PROTO = ANY] => Drop
+# Prio = 1 (High): [SA = ANY, DA = 10.0.0.1, SP = ANY, DP = 1000, PROTO = UDP] => Allow
+# Prio = 1 (High): [SA = ANY, DA = 10.1.0.1, SP = ANY, DP = 1001, PROTO = UDP] => Allow
+# Prio = 1 (High): [SA = ANY, DA = 10.2.0.1, SP = ANY, DP = 1002, PROTO = UDP] => Allow
+# Prio = 1 (High): [SA = ANY, DA = 10.3.0.1, SP = ANY, DP = 1003, PROTO = UDP] => Allow
+p 2 firewall add default 1 #SINK0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.0.0.1 32 0 65535 1000 1000 17 0xF port 0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.1.0.1 32 0 65535 1001 1001 17 0xF port 0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.2.0.1 32 0 65535 1002 1002 17 0xF port 0
+p 2 firewall add priority 1 ipv4 0.0.0.0 0 10.3.0.1 32 0 65535 1003 1003 17 0xF port 0
+#p 2 firewall ls
+
+################################################################################
+# Local destination TCP traffic
+################################################################################
+# Unknown connection => Drop
+# TCP [SA = 100.0.0.10, DA = 10.0.0.1, SP = 1000, DP = 80] => socket ID = 0
+# TCP [SA = 100.1.0.10, DA = 10.1.0.1, SP = 1001, DP = 80] => socket ID = 1
+# TCP [SA = 100.2.0.10, DA = 10.2.0.1, SP = 1002, DP = 80] => socket ID = 2
+# TCP [SA = 100.3.0.10, DA = 10.3.0.1, SP = 1003, DP = 80] => socket ID = 3
+p 3 flow add default 1 #SINK1
+p 3 flow add ipv4 100.0.0.10 10.0.0.1 1000 80 6 port 0 id 0
+p 3 flow add ipv4 100.1.0.10 10.1.0.1 1001 80 6 port 0 id 1
+p 3 flow add ipv4 100.2.0.10 10.2.0.1 1002 80 6 port 0 id 2
+p 3 flow add ipv4 100.3.0.10 10.3.0.1 1003 80 6 port 0 id 3
+#p 3 flow ls
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/pipeline-to-core-mapping.py b/src/seastar/dpdk/examples/ip_pipeline/config/pipeline-to-core-mapping.py
new file mode 100755
index 00000000..7a4eaa20
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/pipeline-to-core-mapping.py
@@ -0,0 +1,935 @@
+#!/usr/bin/env python
+
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# This script maps the set of pipelines identified (MASTER pipelines are
+# ignored) from the input configuration file to the set of cores
+# provided as input argument and creates configuration files for each of
+# the mapping combinations.
+#
+
+from __future__ import print_function
+from collections import namedtuple
+import argparse
+import array
+import errno
+import itertools
+import os
+import re
+import sys
+
+# default values
+enable_stage0_traceout = 1
+enable_stage1_traceout = 1
+enable_stage2_traceout = 1
+
+enable_stage1_fileout = 1
+enable_stage2_fileout = 1
+
+Constants = namedtuple('Constants', ['MAX_CORES', 'MAX_PIPELINES'])
+constants = Constants(16, 64)
+
+# pattern for physical core
+pattern_phycore = '^(s|S)\d(c|C)[1-9][0-9]*$'
+reg_phycore = re.compile(pattern_phycore)
+
+
+def popcount(mask):
+ return bin(mask).count("1")
+
+
+def len2mask(length):
+ if (length == 0):
+ return 0
+
+ if (length > 64):
+ sys.exit('error: len2mask - length %i > 64. exiting' % length)
+
+ return int('1' * length, 2)
+
+
+def bitstring_write(n, n_bits):
+ tmpstr = ""
+ if (n_bits > 64):
+ return
+
+ i = n_bits - 1
+ while (i >= 0):
+ cond = (n & (1 << i))
+ if (cond):
+ print('1', end='')
+ tmpstr += '1'
+ else:
+ print('0', end='')
+ tmpstr += '0'
+ i -= 1
+ return tmpstr
+
+
+class Cores0:
+
+ def __init__(self):
+ self.n_pipelines = 0
+
+
+class Cores1:
+
+ def __init__(self):
+ self.pipelines = 0
+ self.n_pipelines = 0
+
+
+class Cores2:
+
+ def __init__(self):
+ self.pipelines = 0
+ self.n_pipelines = 0
+ self.counter = 0
+ self.counter_max = 0
+ self.bitpos = array.array(
+ "L", itertools.repeat(0, constants.MAX_PIPELINES))
+
+
+class Context0:
+
+ def __init__(self):
+ self.cores = [Cores0() for i in range(0, constants.MAX_CORES)]
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.n_pipelines0 = 0
+ self.pos = 0
+ self.file_comment = ""
+ self.ctx1 = None
+ self.ctx2 = None
+
+ def stage0_print(self):
+ print('printing Context0 obj')
+ print('c0.cores(n_pipelines) = [ ', end='')
+ for cores_count in range(0, constants.MAX_CORES):
+ print(self.cores[cores_count].n_pipelines, end=' ')
+ print(']')
+ print('c0.n_cores = %d' % self.n_cores)
+ print('c0.n_pipelines = %d' % self.n_pipelines)
+ print('c0.n_pipelines0 = %d' % self.n_pipelines0)
+ print('c0.pos = %d' % self.pos)
+ print('c0.file_comment = %s' % self.file_comment)
+ if (self.ctx1 is not None):
+ print('c0.ctx1 = ', end='')
+ print(repr(self.ctx1))
+ else:
+ print('c0.ctx1 = None')
+
+ if (self.ctx2 is not None):
+ print('c0.ctx2 = ', end='')
+ print(repr(self.ctx2))
+ else:
+ print('c0.ctx2 = None')
+
+ def stage0_init(self, num_cores, num_pipelines, ctx1, ctx2):
+ self.n_cores = num_cores
+ self.n_pipelines = num_pipelines
+ self.ctx1 = ctx1
+ self.ctx2 = ctx2
+
+ def stage0_process(self):
+ # stage0 init
+ self.cores[0].n_pipelines = self.n_pipelines
+ self.n_pipelines0 = 0
+ self.pos = 1
+
+ while True:
+ # go forward
+ while True:
+ if ((self.pos < self.n_cores) and (self.n_pipelines0 > 0)):
+ self.cores[self.pos].n_pipelines = min(
+ self.cores[self.pos - 1].n_pipelines,
+ self.n_pipelines0)
+ self.n_pipelines0 -= self.cores[self.pos].n_pipelines
+ self.pos += 1
+ else:
+ break
+
+ # check solution
+ if (self.n_pipelines0 == 0):
+ self.stage0_log()
+ self.ctx1.stage1_init(self, self.ctx2) # self is object c0
+ self.ctx1.stage1_process()
+
+ # go backward
+ while True:
+ if (self.pos == 0):
+ return
+
+ self.pos -= 1
+ if ((self.cores[self.pos].n_pipelines > 1) and
+ (self.pos != (self.n_cores - 1))):
+ break
+
+ self.n_pipelines0 += self.cores[self.pos].n_pipelines
+ self.cores[self.pos].n_pipelines = 0
+
+ # rearm
+ self.cores[self.pos].n_pipelines -= 1
+ self.n_pipelines0 += 1
+ self.pos += 1
+
+ def stage0_log(self):
+ tmp_file_comment = ""
+ if(enable_stage0_traceout != 1):
+ return
+
+ print('STAGE0: ', end='')
+ tmp_file_comment += 'STAGE0: '
+ for cores_count in range(0, self.n_cores):
+ print('C%d = %d\t'
+ % (cores_count,
+ self.cores[cores_count].n_pipelines), end='')
+ tmp_file_comment += "C{} = {}\t".format(
+ cores_count, self.cores[cores_count].n_pipelines)
+ # end for
+ print('')
+ self.ctx1.stage0_file_comment = tmp_file_comment
+ self.ctx2.stage0_file_comment = tmp_file_comment
+
+
+class Context1:
+ _fileTrace = None
+
+ def __init__(self):
+ self.cores = [Cores1() for i in range(constants.MAX_CORES)]
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ self.stage0_file_comment = ""
+ self.stage1_file_comment = ""
+
+ self.ctx2 = None
+ self.arr_pipelines2cores = []
+
+ def stage1_reset(self):
+ for i in range(constants.MAX_CORES):
+ self.cores[i].pipelines = 0
+ self.cores[i].n_pipelines = 0
+
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ self.ctx2 = None
+ # clear list
+ del self.arr_pipelines2cores[:]
+
+ def stage1_print(self):
+ print('printing Context1 obj')
+ print('ctx1.cores(pipelines,n_pipelines) = [ ', end='')
+ for cores_count in range(0, constants.MAX_CORES):
+ print('(%d,%d)' % (self.cores[cores_count].pipelines,
+ self.cores[cores_count].n_pipelines), end=' ')
+ print(']')
+ print('ctx1.n_cores = %d' % self.n_cores)
+ print('ctx1.n_pipelines = %d' % self.n_pipelines)
+ print('ctx1.pos = %d' % self.pos)
+ print('ctx1.stage0_file_comment = %s' % self.stage0_file_comment)
+ print('ctx1.stage1_file_comment = %s' % self.stage1_file_comment)
+ if (self.ctx2 is not None):
+ print('ctx1.ctx2 = ', end='')
+ print(self.ctx2)
+ else:
+ print('ctx1.ctx2 = None')
+
+ def stage1_init(self, c0, ctx2):
+ self.stage1_reset()
+ self.n_cores = 0
+ while (c0.cores[self.n_cores].n_pipelines > 0):
+ self.n_cores += 1
+
+ self.n_pipelines = c0.n_pipelines
+ self.ctx2 = ctx2
+
+ self.arr_pipelines2cores = [0] * self.n_pipelines
+
+ i = 0
+ while (i < self.n_cores):
+ self.cores[i].n_pipelines = c0.cores[i].n_pipelines
+ i += 1
+
+ def stage1_process(self):
+ pipelines_max = len2mask(self.n_pipelines)
+ while True:
+ pos = 0
+ overlap = 0
+
+ if (self.cores[self.pos].pipelines == pipelines_max):
+ if (self.pos == 0):
+ return
+
+ self.cores[self.pos].pipelines = 0
+ self.pos -= 1
+ continue
+
+ self.cores[self.pos].pipelines += 1
+ if (popcount(self.cores[self.pos].pipelines) !=
+ self.cores[self.pos].n_pipelines):
+ continue
+
+ overlap = 0
+ pos = 0
+ while (pos < self.pos):
+ if ((self.cores[self.pos].pipelines) &
+ (self.cores[pos].pipelines)):
+ overlap = 1
+ break
+ pos += 1
+
+ if (overlap):
+ continue
+
+ if ((self.pos > 0) and
+ ((self.cores[self.pos].n_pipelines) ==
+ (self.cores[self.pos - 1].n_pipelines)) and
+ ((self.cores[self.pos].pipelines) <
+ (self.cores[self.pos - 1].pipelines))):
+ continue
+
+ if (self.pos == self.n_cores - 1):
+ self.stage1_log()
+ self.ctx2.stage2_init(self)
+ self.ctx2.stage2_process()
+
+ if (self.pos == 0):
+ return
+
+ self.cores[self.pos].pipelines = 0
+ self.pos -= 1
+ continue
+
+ self.pos += 1
+
+ def stage1_log(self):
+ tmp_file_comment = ""
+ if(enable_stage1_traceout == 1):
+ print('STAGE1: ', end='')
+ tmp_file_comment += 'STAGE1: '
+ i = 0
+ while (i < self.n_cores):
+ print('C%d = [' % i, end='')
+ tmp_file_comment += "C{} = [".format(i)
+
+ j = self.n_pipelines - 1
+ while (j >= 0):
+ cond = ((self.cores[i].pipelines) & (1 << j))
+ if (cond):
+ print('1', end='')
+ tmp_file_comment += '1'
+ else:
+ print('0', end='')
+ tmp_file_comment += '0'
+ j -= 1
+
+ print(']\t', end='')
+ tmp_file_comment += ']\t'
+ i += 1
+
+ print('\n', end='')
+ self.stage1_file_comment = tmp_file_comment
+ self.ctx2.stage1_file_comment = tmp_file_comment
+
+ # check if file traceing is enabled
+ if(enable_stage1_fileout != 1):
+ return
+
+ # spit out the combination to file
+ self.stage1_process_file()
+
+ def stage1_updateCoresInBuf(self, nPipeline, sCore):
+ rePipeline = self._fileTrace.arr_pipelines[nPipeline]
+ rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
+ reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
+ sSubs = 'core = ' + sCore + '\n'
+
+ reg_pipeline = re.compile(rePipeline)
+ search_match = reg_pipeline.search(self._fileTrace.in_buf)
+
+ if(search_match):
+ pos = search_match.start()
+ substr1 = self._fileTrace.in_buf[:pos]
+ substr2 = self._fileTrace.in_buf[pos:]
+ substr2 = re.sub(reCore, sSubs, substr2, 1)
+ self._fileTrace.in_buf = substr1 + substr2
+
+ def stage1_process_file(self):
+ outFileName = os.path.join(self._fileTrace.out_path,
+ self._fileTrace.prefix_outfile)
+ outFileName += "_{}CoReS".format(self.n_cores)
+
+ i = 0 # represents core number
+ while (i < self.n_cores):
+ j = self.n_pipelines - 1
+ pipeline_idx = 0
+ while(j >= 0):
+ cond = ((self.cores[i].pipelines) & (1 << j))
+ if (cond):
+ # update the pipelines array to match the core
+ # only in case of cond match
+ self.arr_pipelines2cores[
+ pipeline_idx] = fileTrace.in_physical_cores[i]
+
+ j -= 1
+ pipeline_idx += 1
+
+ i += 1
+
+ # update the in_buf as per the arr_pipelines2cores
+ for pipeline_idx in range(len(self.arr_pipelines2cores)):
+ outFileName += "_{}".format(self.arr_pipelines2cores[pipeline_idx])
+ self.stage1_updateCoresInBuf(
+ pipeline_idx, self.arr_pipelines2cores[pipeline_idx])
+
+ # by now the in_buf is all set to be written to file
+ outFileName += self._fileTrace.suffix_outfile
+ outputFile = open(outFileName, "w")
+
+ # write out the comments
+ strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
+ outputFile.write(
+ "; =============== Pipeline-to-Core Mapping ================\n"
+ "; Generated from file {}\n"
+ "; Input pipelines = {}\n"
+ "; Input cores = {}\n"
+ "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {}\n"
+ .format(
+ self._fileTrace.in_file_namepath,
+ fileTrace.arr_pipelines,
+ fileTrace.in_physical_cores,
+ self._fileTrace.n_pipelines,
+ self._fileTrace.n_cores,
+ strTruncated,
+ self._fileTrace.hyper_thread))
+
+ outputFile.write(
+ "; {stg0cmt}\n"
+ "; {stg1cmt}\n"
+ "; ========================================================\n"
+ "; \n"
+ .format(
+ stg0cmt=self.stage0_file_comment,
+ stg1cmt=self.stage1_file_comment))
+
+ # write buffer contents
+ outputFile.write(self._fileTrace.in_buf)
+ outputFile.flush()
+ outputFile.close()
+
+
+class Context2:
+ _fileTrace = None
+
+ def __init__(self):
+ self.cores = [Cores2() for i in range(constants.MAX_CORES)]
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ self.stage0_file_comment = ""
+ self.stage1_file_comment = ""
+ self.stage2_file_comment = ""
+
+ # each array entry is a pipeline mapped to core stored as string
+ # pipeline ranging from 1 to n, however stored in zero based array
+ self.arr2_pipelines2cores = []
+
+ def stage2_print(self):
+ print('printing Context2 obj')
+ print('ctx2.cores(pipelines, n_pipelines, counter, counter_max) =')
+ for cores_count in range(0, constants.MAX_CORES):
+ print('core[%d] = (%d,%d,%d,%d)' % (
+ cores_count,
+ self.cores[cores_count].pipelines,
+ self.cores[cores_count].n_pipelines,
+ self.cores[cores_count].counter,
+ self.cores[cores_count].counter_max))
+
+ print('ctx2.n_cores = %d' % self.n_cores, end='')
+ print('ctx2.n_pipelines = %d' % self.n_pipelines, end='')
+ print('ctx2.pos = %d' % self.pos)
+ print('ctx2.stage0_file_comment = %s' %
+ self.self.stage0_file_comment)
+ print('ctx2.stage1_file_comment = %s' %
+ self.self.stage1_file_comment)
+ print('ctx2.stage2_file_comment = %s' %
+ self.self.stage2_file_comment)
+
+ def stage2_reset(self):
+ for i in range(0, constants.MAX_CORES):
+ self.cores[i].pipelines = 0
+ self.cores[i].n_pipelines = 0
+ self.cores[i].counter = 0
+ self.cores[i].counter_max = 0
+
+ for idx in range(0, constants.MAX_PIPELINES):
+ self.cores[i].bitpos[idx] = 0
+
+ self.n_cores = 0
+ self.n_pipelines = 0
+ self.pos = 0
+ # clear list
+ del self.arr2_pipelines2cores[:]
+
+ def bitpos_load(self, coreidx):
+ i = j = 0
+ while (i < self.n_pipelines):
+ if ((self.cores[coreidx].pipelines) &
+ (1 << i)):
+ self.cores[coreidx].bitpos[j] = i
+ j += 1
+ i += 1
+ self.cores[coreidx].n_pipelines = j
+
+ def bitpos_apply(self, in_buf, pos, n_pos):
+ out = 0
+ for i in range(0, n_pos):
+ out |= (in_buf & (1 << i)) << (pos[i] - i)
+
+ return out
+
+ def stage2_init(self, ctx1):
+ self.stage2_reset()
+ self.n_cores = ctx1.n_cores
+ self.n_pipelines = ctx1.n_pipelines
+
+ self.arr2_pipelines2cores = [''] * self.n_pipelines
+
+ core_idx = 0
+ while (core_idx < self.n_cores):
+ self.cores[core_idx].pipelines = ctx1.cores[core_idx].pipelines
+
+ self.bitpos_load(core_idx)
+ core_idx += 1
+
+ def stage2_log(self):
+ tmp_file_comment = ""
+ if(enable_stage2_traceout == 1):
+ print('STAGE2: ', end='')
+ tmp_file_comment += 'STAGE2: '
+
+ for i in range(0, self.n_cores):
+ mask = len2mask(self.cores[i].n_pipelines)
+ pipelines_ht0 = self.bitpos_apply(
+ (~self.cores[i].counter) & mask,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ pipelines_ht1 = self.bitpos_apply(
+ self.cores[i].counter,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ print('C%dHT0 = [' % i, end='')
+ tmp_file_comment += "C{}HT0 = [".format(i)
+ tmp_file_comment += bitstring_write(
+ pipelines_ht0, self.n_pipelines)
+
+ print(']\tC%dHT1 = [' % i, end='')
+ tmp_file_comment += "]\tC{}HT1 = [".format(i)
+ tmp_file_comment += bitstring_write(
+ pipelines_ht1, self.n_pipelines)
+ print(']\t', end='')
+ tmp_file_comment += ']\t'
+
+ print('')
+ self.stage2_file_comment = tmp_file_comment
+
+ # check if file traceing is enabled
+ if(enable_stage2_fileout != 1):
+ return
+ # spit out the combination to file
+ self.stage2_process_file()
+
+ def stage2_updateCoresInBuf(self, nPipeline, sCore):
+ rePipeline = self._fileTrace.arr_pipelines[nPipeline]
+ rePipeline = rePipeline.replace("[", "\[").replace("]", "\]")
+ reCore = 'core\s*=\s*((\d*)|(((s|S)\d)?(c|C)[1-9][0-9]*)).*\n'
+ sSubs = 'core = ' + sCore + '\n'
+
+ reg_pipeline = re.compile(rePipeline)
+ search_match = reg_pipeline.search(self._fileTrace.in_buf)
+
+ if(search_match):
+ pos = search_match.start()
+ substr1 = self._fileTrace.in_buf[:pos]
+ substr2 = self._fileTrace.in_buf[pos:]
+ substr2 = re.sub(reCore, sSubs, substr2, 1)
+ self._fileTrace.in_buf = substr1 + substr2
+
+ def pipelines2cores(self, n, n_bits, nCore, bHT):
+ if (n_bits > 64):
+ return
+
+ i = n_bits - 1
+ pipeline_idx = 0
+ while (i >= 0):
+ cond = (n & (1 << i))
+ if (cond):
+ # update the pipelines array to match the core
+ # only in case of cond match
+ # PIPELINE0 and core 0 are reserved
+ if(bHT):
+ tmpCore = fileTrace.in_physical_cores[nCore] + 'h'
+ self.arr2_pipelines2cores[pipeline_idx] = tmpCore
+ else:
+ self.arr2_pipelines2cores[pipeline_idx] = \
+ fileTrace.in_physical_cores[nCore]
+
+ i -= 1
+ pipeline_idx += 1
+
+ def stage2_process_file(self):
+ outFileName = os.path.join(self._fileTrace.out_path,
+ self._fileTrace.prefix_outfile)
+ outFileName += "_{}CoReS".format(self.n_cores)
+
+ for i in range(0, self.n_cores):
+ mask = len2mask(self.cores[i].n_pipelines)
+ pipelines_ht0 = self.bitpos_apply((~self.cores[i].counter) & mask,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ pipelines_ht1 = self.bitpos_apply(self.cores[i].counter,
+ self.cores[i].bitpos,
+ self.cores[i].n_pipelines)
+
+ # update pipelines to core mapping
+ self.pipelines2cores(pipelines_ht0, self.n_pipelines, i, False)
+ self.pipelines2cores(pipelines_ht1, self.n_pipelines, i, True)
+
+ # update the in_buf as per the arr_pipelines2cores
+ for pipeline_idx in range(len(self.arr2_pipelines2cores)):
+ outFileName += "_{}".format(
+ self.arr2_pipelines2cores[pipeline_idx])
+ self.stage2_updateCoresInBuf(
+ pipeline_idx, self.arr2_pipelines2cores[pipeline_idx])
+
+ # by now the in_buf is all set to be written to file
+ outFileName += self._fileTrace.suffix_outfile
+ outputFile = open(outFileName, "w")
+
+ # write the file comments
+ strTruncated = ("", "(Truncated)")[self._fileTrace.ncores_truncated]
+ outputFile.write(
+ "; =============== Pipeline-to-Core Mapping ================\n"
+ "; Generated from file {}\n"
+ "; Input pipelines = {}\n"
+ "; Input cores = {}\n"
+ "; N_PIPELINES = {} N_CORES = {} {} hyper_thread = {} \n"
+ .format(
+ self._fileTrace.in_file_namepath,
+ fileTrace.arr_pipelines,
+ fileTrace.in_physical_cores,
+ self._fileTrace.n_pipelines,
+ self._fileTrace.n_cores,
+ strTruncated,
+ self._fileTrace.hyper_thread))
+
+ outputFile.write(
+ "; {stg0cmt}\n"
+ "; {stg1cmt}\n"
+ "; {stg2cmt}\n"
+ "; ========================================================\n"
+ "; \n"
+ .format(
+ stg0cmt=self.stage0_file_comment,
+ stg1cmt=self.stage1_file_comment,
+ stg2cmt=self.stage2_file_comment))
+
+ # write the buffer contents
+ outputFile.write(self._fileTrace.in_buf)
+ outputFile.flush()
+ outputFile.close()
+
+ def stage2_process(self):
+ i = 0
+ while(i < self.n_cores):
+ self.cores[i].counter_max = len2mask(
+ self.cores[i].n_pipelines - 1)
+ i += 1
+
+ self.pos = self.n_cores - 1
+ while True:
+ if (self.pos == self.n_cores - 1):
+ self.stage2_log()
+
+ if (self.cores[self.pos].counter ==
+ self.cores[self.pos].counter_max):
+ if (self.pos == 0):
+ return
+
+ self.cores[self.pos].counter = 0
+ self.pos -= 1
+ continue
+
+ self.cores[self.pos].counter += 1
+ if(self.pos < self.n_cores - 1):
+ self.pos += 1
+
+
+class FileTrace:
+
+ def __init__(self, filenamepath):
+ self.in_file_namepath = os.path.abspath(filenamepath)
+ self.in_filename = os.path.basename(self.in_file_namepath)
+ self.in_path = os.path.dirname(self.in_file_namepath)
+
+ filenamesplit = self.in_filename.split('.')
+ self.prefix_outfile = filenamesplit[0]
+ self.suffix_outfile = ".cfg"
+
+ # output folder: in the same folder as input file
+ # create new folder in the name of input file
+ self.out_path = os.path.join(
+ os.path.abspath(os.path.dirname(__file__)),
+ self.prefix_outfile)
+
+ try:
+ os.makedirs(self.out_path)
+ except OSError as excep:
+ if excep.errno == errno.EEXIST and os.path.isdir(self.out_path):
+ pass
+ else:
+ raise
+
+ self.in_buf = None
+ self.arr_pipelines = [] # holds the positions of search
+
+ self.max_cores = 15
+ self.max_pipelines = 15
+
+ self.in_physical_cores = None
+ self.hyper_thread = None
+
+ # save the num of pipelines determined from input file
+ self.n_pipelines = 0
+ # save the num of cores input (or the truncated value)
+ self.n_cores = 0
+ self.ncores_truncated = False
+
+ def print_TraceFile(self):
+ print("self.in_file_namepath = ", self.in_file_namepath)
+ print("self.in_filename = ", self.in_filename)
+ print("self.in_path = ", self.in_path)
+ print("self.out_path = ", self.out_path)
+ print("self.prefix_outfile = ", self.prefix_outfile)
+ print("self.suffix_outfile = ", self.suffix_outfile)
+ print("self.in_buf = ", self.in_buf)
+ print("self.arr_pipelines =", self.arr_pipelines)
+ print("self.in_physical_cores", self.in_physical_cores)
+ print("self.hyper_thread", self.hyper_thread)
+
+
+def process(n_cores, n_pipelines, fileTrace):
+ '''process and map pipelines, cores.'''
+ if (n_cores == 0):
+ sys.exit('N_CORES is 0, exiting')
+
+ if (n_pipelines == 0):
+ sys.exit('N_PIPELINES is 0, exiting')
+
+ if (n_cores > n_pipelines):
+ print('\nToo many cores, truncating N_CORES to N_PIPELINES')
+ n_cores = n_pipelines
+ fileTrace.ncores_truncated = True
+
+ fileTrace.n_pipelines = n_pipelines
+ fileTrace.n_cores = n_cores
+
+ strTruncated = ("", "(Truncated)")[fileTrace.ncores_truncated]
+ print("N_PIPELINES = {}, N_CORES = {} {}"
+ .format(n_pipelines, n_cores, strTruncated))
+ print("---------------------------------------------------------------")
+
+ ctx0_inst = Context0()
+ ctx1_inst = Context1()
+ ctx2_inst = Context2()
+
+ # initialize the class variables
+ ctx1_inst._fileTrace = fileTrace
+ ctx2_inst._fileTrace = fileTrace
+
+ ctx0_inst.stage0_init(n_cores, n_pipelines, ctx1_inst, ctx2_inst)
+ ctx0_inst.stage0_process()
+
+
+def validate_core(core):
+ match = reg_phycore.match(core)
+ if(match):
+ return True
+ else:
+ return False
+
+
+def validate_phycores(phy_cores):
+ '''validate physical cores, check if unique.'''
+ # eat up whitespaces
+ phy_cores = phy_cores.strip().split(',')
+
+ # check if the core list is unique
+ if(len(phy_cores) != len(set(phy_cores))):
+ print('list of physical cores has duplicates')
+ return None
+
+ for core in phy_cores:
+ if not validate_core(core):
+ print('invalid physical core specified.')
+ return None
+ return phy_cores
+
+
+def scanconfigfile(fileTrace):
+ '''scan input file for pipelines, validate then process.'''
+ # open file
+ filetoscan = open(fileTrace.in_file_namepath, 'r')
+ fileTrace.in_buf = filetoscan.read()
+
+ # reset iterator on open file
+ filetoscan.seek(0)
+
+ # scan input file for pipelines
+ # master pipelines to be ignored
+ pattern_pipeline = r'\[PIPELINE\d*\]'
+ pattern_mastertype = r'type\s*=\s*MASTER'
+
+ pending_pipeline = False
+ for line in filetoscan:
+ match_pipeline = re.search(pattern_pipeline, line)
+ match_type = re.search('type\s*=', line)
+ match_mastertype = re.search(pattern_mastertype, line)
+
+ if(match_pipeline):
+ sPipeline = line[match_pipeline.start():match_pipeline.end()]
+ pending_pipeline = True
+ elif(match_type):
+ # found a type definition...
+ if(match_mastertype is None):
+ # and this is not a master pipeline...
+ if(pending_pipeline):
+ # add it to the list of pipelines to be mapped
+ fileTrace.arr_pipelines.append(sPipeline)
+ pending_pipeline = False
+ else:
+ # and this is a master pipeline...
+ # ignore the current and move on to next
+ sPipeline = ""
+ pending_pipeline = False
+ filetoscan.close()
+
+ # validate if pipelines are unique
+ if(len(fileTrace.arr_pipelines) != len(set(fileTrace.arr_pipelines))):
+ sys.exit('Error: duplicate pipelines in input file')
+
+ num_pipelines = len(fileTrace.arr_pipelines)
+ num_cores = len(fileTrace.in_physical_cores)
+
+ print("-------------------Pipeline-to-core mapping--------------------")
+ print("Input pipelines = {}\nInput cores = {}"
+ .format(fileTrace.arr_pipelines, fileTrace.in_physical_cores))
+
+ # input configuration file validations goes here
+ if (num_cores > fileTrace.max_cores):
+ sys.exit('Error: number of cores specified > max_cores (%d)' %
+ fileTrace.max_cores)
+
+ if (num_pipelines > fileTrace.max_pipelines):
+ sys.exit('Error: number of pipelines in input \
+ cfg file > max_pipelines (%d)' % fileTrace.max_pipelines)
+
+ # call process to generate pipeline-to-core mapping, trace and log
+ process(num_cores, num_pipelines, fileTrace)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description='mappipelines')
+
+ reqNamedGrp = parser.add_argument_group('required named args')
+ reqNamedGrp.add_argument(
+ '-i',
+ '--input-file',
+ type=argparse.FileType('r'),
+ help='Input config file',
+ required=True)
+
+ reqNamedGrp.add_argument(
+ '-pc',
+ '--physical-cores',
+ type=validate_phycores,
+ help='''Enter available CPU cores in
+ format:\"<core>,<core>,...\"
+ where each core format: \"s<SOCKETID>c<COREID>\"
+ where SOCKETID={0..9}, COREID={1-99}''',
+ required=True)
+
+ # add optional arguments
+ parser.add_argument(
+ '-ht',
+ '--hyper-thread',
+ help='enable/disable hyper threading. default is ON',
+ default='ON',
+ choices=['ON', 'OFF'])
+
+ parser.add_argument(
+ '-nO',
+ '--no-output-file',
+ help='''disable output config file generation.
+ Output file generation is enabled by default''',
+ action="store_true")
+
+ args = parser.parse_args()
+
+ if(args.physical_cores is None):
+ parser.error("invalid physical_cores specified")
+
+ # create object of FileTrace and initialise
+ fileTrace = FileTrace(args.input_file.name)
+ fileTrace.in_physical_cores = args.physical_cores
+ fileTrace.hyper_thread = args.hyper_thread
+
+ if(fileTrace.hyper_thread == 'OFF'):
+ print("!!!!disabling stage2 HT!!!!")
+ enable_stage2_traceout = 0
+ enable_stage2_fileout = 0
+ elif(fileTrace.hyper_thread == 'ON'):
+ print("!!!!HT enabled. disabling stage1 file generation.!!!!")
+ enable_stage1_fileout = 0
+
+ if(args.no_output_file is True):
+ print("!!!!disabling stage1 and stage2 fileout!!!!")
+ enable_stage1_fileout = 0
+ enable_stage2_fileout = 0
+
+ scanconfigfile(fileTrace)
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/tap.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/tap.cfg
new file mode 100644
index 00000000..10d35ebb
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/tap.cfg
@@ -0,0 +1,64 @@
+; BSD LICENSE
+;
+; Copyright(c) 2016 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; ______________ ______________________
+; | | TAP0 | |
+; RXQ0.0 --->| |------->|--+ |
+; | | TAP1 | | br0 |
+; TXQ1.0 <---| |<-------|<-+ |
+; | Pass-through | | Linux Kernel |
+; | (P1) | | Network Stack |
+; | | TAP1 | |
+; RXQ1.0 --->| |------->|--+ |
+; | | TAP0 | | br0 |
+; TXQ0.0 <---| |<-------|<-+ |
+; |______________| |______________________|
+;
+; Configure Linux kernel bridge between TAP0 and TAP1 interfaces:
+; [Linux]$ ifconfig TAP0 up
+; [Linux]$ ifconfig TAP1 up
+; [Linux]$ brctl addbr "br0"
+; [Linux]$ brctl addif br0 TAP0
+; [Linux]$ brctl addif br0 TAP1
+; [Linux]$ ifconfig br0 up
+
+[EAL]
+log_level = 0
+
+[PIPELINE0]
+type = MASTER
+core = 0
+
+[PIPELINE1]
+type = PASS-THROUGH
+core = 1
+pktq_in = RXQ0.0 TAP1 RXQ1.0 TAP0
+pktq_out = TAP0 TXQ1.0 TAP1 TXQ0.0
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config/tm_profile.cfg b/src/seastar/dpdk/examples/ip_pipeline/config/tm_profile.cfg
new file mode 100644
index 00000000..2dfb215e
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config/tm_profile.cfg
@@ -0,0 +1,105 @@
+; BSD LICENSE
+;
+; Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+;
+; * Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in
+; the documentation and/or other materials provided with the
+; distribution.
+; * Neither the name of Intel Corporation nor the names of its
+; contributors may be used to endorse or promote products derived
+; from this software without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+; This file enables the following hierarchical scheduler configuration for each
+; 10GbE output port:
+; * Single subport (subport 0):
+; - Subport rate set to 100% of port rate
+; - Each of the 4 traffic classes has rate set to 100% of port rate
+; * 4K pipes per subport 0 (pipes 0 .. 4095) with identical configuration:
+; - Pipe rate set to 1/4K of port rate
+; - Each of the 4 traffic classes has rate set to 100% of pipe rate
+; - Within each traffic class, the byte-level WRR weights for the 4 queues
+; are set to 1:1:1:1
+;
+; For more details, please refer to chapter "Quality of Service (QoS) Framework"
+; of Data Plane Development Kit (DPDK) Programmer's Guide.
+
+; Port configuration
+[port]
+frame overhead = 24 ; frame overhead = Preamble (7) + SFD (1) + FCS (4) + IFG (12)
+mtu = 1522; mtu = Q-in-Q MTU (FCS not included)
+number of subports per port = 1
+number of pipes per subport = 4096
+queue sizes = 64 64 64 64
+
+; Subport configuration
+[subport 0]
+tb rate = 1250000000 ; Bytes per second
+tb size = 1000000 ; Bytes
+
+tc 0 rate = 1250000000 ; Bytes per second
+tc 1 rate = 1250000000 ; Bytes per second
+tc 2 rate = 1250000000 ; Bytes per second
+tc 3 rate = 1250000000 ; Bytes per second
+tc period = 10 ; Milliseconds
+
+pipe 0-4095 = 0 ; These pipes are configured with pipe profile 0
+
+; Pipe configuration
+[pipe profile 0]
+tb rate = 305175 ; Bytes per second
+tb size = 1000000 ; Bytes
+
+tc 0 rate = 305175 ; Bytes per second
+tc 1 rate = 305175 ; Bytes per second
+tc 2 rate = 305175 ; Bytes per second
+tc 3 rate = 305175 ; Bytes per second
+tc period = 40 ; Milliseconds
+
+tc 3 oversubscription weight = 1
+
+tc 0 wrr weights = 1 1 1 1
+tc 1 wrr weights = 1 1 1 1
+tc 2 wrr weights = 1 1 1 1
+tc 3 wrr weights = 1 1 1 1
+
+; RED params per traffic class and color (Green / Yellow / Red)
+[red]
+tc 0 wred min = 48 40 32
+tc 0 wred max = 64 64 64
+tc 0 wred inv prob = 10 10 10
+tc 0 wred weight = 9 9 9
+
+tc 1 wred min = 48 40 32
+tc 1 wred max = 64 64 64
+tc 1 wred inv prob = 10 10 10
+tc 1 wred weight = 9 9 9
+
+tc 2 wred min = 48 40 32
+tc 2 wred max = 64 64 64
+tc 2 wred inv prob = 10 10 10
+tc 2 wred weight = 9 9 9
+
+tc 3 wred min = 48 40 32
+tc 3 wred max = 64 64 64
+tc 3 wred inv prob = 10 10 10
+tc 3 wred weight = 9 9 9
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config_check.c b/src/seastar/dpdk/examples/ip_pipeline/config_check.c
new file mode 100644
index 00000000..dd9d4d8b
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config_check.c
@@ -0,0 +1,517 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+
+#include <rte_ip.h>
+
+#include "app.h"
+
+static void
+check_mempools(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_mempools; i++) {
+ struct app_mempool_params *p = &app->mempool_params[i];
+
+ APP_CHECK((p->pool_size > 0),
+ "Mempool %s size is 0\n", p->name);
+
+ APP_CHECK((p->cache_size > 0),
+ "Mempool %s cache size is 0\n", p->name);
+
+ APP_CHECK(rte_is_power_of_2(p->cache_size),
+ "Mempool %s cache size not a power of 2\n", p->name);
+ }
+}
+
+static inline uint32_t
+link_rxq_used(struct app_link_params *link, uint32_t q_id)
+{
+ uint32_t i;
+
+ if ((link->arp_q == q_id) ||
+ (link->tcp_syn_q == q_id) ||
+ (link->ip_local_q == q_id) ||
+ (link->tcp_local_q == q_id) ||
+ (link->udp_local_q == q_id) ||
+ (link->sctp_local_q == q_id))
+ return 1;
+
+ for (i = 0; i < link->n_rss_qs; i++)
+ if (link->rss_qs[i] == q_id)
+ return 1;
+
+ return 0;
+}
+
+static void
+check_links(struct app_params *app)
+{
+ uint32_t i;
+
+ /* Check that number of links matches the port mask */
+ if (app->port_mask) {
+ uint32_t n_links_port_mask =
+ __builtin_popcountll(app->port_mask);
+
+ APP_CHECK((app->n_links == n_links_port_mask),
+ "Not enough links provided in the PORT_MASK\n");
+ }
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *link = &app->link_params[i];
+ uint32_t rxq_max, n_rxq, n_txq, link_id, i;
+
+ APP_PARAM_GET_ID(link, "LINK", link_id);
+
+ /* Check that link RXQs are contiguous */
+ rxq_max = 0;
+ if (link->arp_q > rxq_max)
+ rxq_max = link->arp_q;
+ if (link->tcp_syn_q > rxq_max)
+ rxq_max = link->tcp_syn_q;
+ if (link->ip_local_q > rxq_max)
+ rxq_max = link->ip_local_q;
+ if (link->tcp_local_q > rxq_max)
+ rxq_max = link->tcp_local_q;
+ if (link->udp_local_q > rxq_max)
+ rxq_max = link->udp_local_q;
+ if (link->sctp_local_q > rxq_max)
+ rxq_max = link->sctp_local_q;
+ for (i = 0; i < link->n_rss_qs; i++)
+ if (link->rss_qs[i] > rxq_max)
+ rxq_max = link->rss_qs[i];
+
+ for (i = 1; i <= rxq_max; i++)
+ APP_CHECK((link_rxq_used(link, i)),
+ "%s RXQs are not contiguous (A)\n", link->name);
+
+ n_rxq = app_link_get_n_rxq(app, link);
+
+ APP_CHECK((n_rxq), "%s does not have any RXQ\n", link->name);
+
+ APP_CHECK((n_rxq == rxq_max + 1),
+ "%s RXQs are not contiguous (B)\n", link->name);
+
+ for (i = 0; i < n_rxq; i++) {
+ char name[APP_PARAM_NAME_SIZE];
+ int pos;
+
+ sprintf(name, "RXQ%" PRIu32 ".%" PRIu32,
+ link_id, i);
+ pos = APP_PARAM_FIND(app->hwq_in_params, name);
+ APP_CHECK((pos >= 0),
+ "%s RXQs are not contiguous (C)\n", link->name);
+ }
+
+ /* Check that link TXQs are contiguous */
+ n_txq = app_link_get_n_txq(app, link);
+
+ APP_CHECK((n_txq), "%s does not have any TXQ\n", link->name);
+
+ for (i = 0; i < n_txq; i++) {
+ char name[APP_PARAM_NAME_SIZE];
+ int pos;
+
+ sprintf(name, "TXQ%" PRIu32 ".%" PRIu32,
+ link_id, i);
+ pos = APP_PARAM_FIND(app->hwq_out_params, name);
+ APP_CHECK((pos >= 0),
+ "%s TXQs are not contiguous\n", link->name);
+ }
+ }
+}
+
+static void
+check_rxqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_hwq_in; i++) {
+ struct app_pktq_hwq_in_params *p = &app->hwq_in_params[i];
+ uint32_t n_readers = app_rxq_get_readers(app, p);
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ APP_CHECK((p->burst > 0),
+ "%s burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst <= p->size),
+ "%s burst size is bigger than its size\n", p->name);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+ }
+}
+
+static void
+check_txqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_hwq_out; i++) {
+ struct app_pktq_hwq_out_params *p = &app->hwq_out_params[i];
+ uint32_t n_writers = app_txq_get_writers(app, p);
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ APP_CHECK((p->burst > 0),
+ "%s burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst <= p->size),
+ "%s burst size is bigger than its size\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
+check_swqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_swq; i++) {
+ struct app_pktq_swq_params *p = &app->swq_params[i];
+ uint32_t n_readers = app_swq_get_readers(app, p);
+ uint32_t n_writers = app_swq_get_writers(app, p);
+ uint32_t n_flags;
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ APP_CHECK((p->burst_read > 0),
+ "%s read burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst_read <= p->size),
+ "%s read burst size is bigger than its size\n",
+ p->name);
+
+ APP_CHECK((p->burst_write > 0),
+ "%s write burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst_write <= p->size),
+ "%s write burst size is bigger than its size\n",
+ p->name);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ if (n_readers > 1)
+ APP_LOG(app, LOW, "%s has more than one reader", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ if (n_writers > 1)
+ APP_LOG(app, LOW, "%s has more than one writer", p->name);
+
+ n_flags = p->ipv4_frag + p->ipv6_frag + p->ipv4_ras + p->ipv6_ras;
+
+ APP_CHECK((n_flags < 2),
+ "%s has more than one fragmentation or reassembly mode enabled\n",
+ p->name);
+
+ APP_CHECK((!((n_readers > 1) && (n_flags == 1))),
+ "%s has more than one reader when fragmentation or reassembly"
+ " mode enabled\n",
+ p->name);
+
+ APP_CHECK((!((n_writers > 1) && (n_flags == 1))),
+ "%s has more than one writer when fragmentation or reassembly"
+ " mode enabled\n",
+ p->name);
+
+ n_flags = p->ipv4_ras + p->ipv6_ras;
+
+ APP_CHECK((!((p->dropless == 1) && (n_flags == 1))),
+ "%s has dropless when reassembly mode enabled\n", p->name);
+
+ n_flags = p->ipv4_frag + p->ipv6_frag;
+
+ if (n_flags == 1) {
+ uint16_t ip_hdr_size = (p->ipv4_frag) ? sizeof(struct ipv4_hdr) :
+ sizeof(struct ipv6_hdr);
+
+ APP_CHECK((p->mtu > ip_hdr_size),
+ "%s mtu size is smaller than ip header\n", p->name);
+
+ APP_CHECK((!((p->mtu - ip_hdr_size) % 8)),
+ "%s mtu size is incorrect\n", p->name);
+ }
+ }
+}
+
+static void
+check_tms(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_tm; i++) {
+ struct app_pktq_tm_params *p = &app->tm_params[i];
+ uint32_t n_readers = app_tm_get_readers(app, p);
+ uint32_t n_writers = app_tm_get_writers(app, p);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
+check_taps(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_tap; i++) {
+ struct app_pktq_tap_params *p = &app->tap_params[i];
+ uint32_t n_readers = app_tap_get_readers(app, p);
+ uint32_t n_writers = app_tap_get_writers(app, p);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+
+ APP_CHECK((p->burst_read > 0),
+ "%s read burst size is 0\n", p->name);
+
+ APP_CHECK((p->burst_write > 0),
+ "%s write burst size is 0\n", p->name);
+ }
+}
+
+static void
+check_knis(struct app_params *app) {
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_kni; i++) {
+ struct app_pktq_kni_params *p = &app->kni_params[i];
+ uint32_t n_readers = app_kni_get_readers(app, p);
+ uint32_t n_writers = app_kni_get_writers(app, p);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
+check_sources(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_source; i++) {
+ struct app_pktq_source_params *p = &app->source_params[i];
+ uint32_t n_readers = app_source_get_readers(app, p);
+
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+ }
+}
+
+static void
+check_sinks(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_sink; i++) {
+ struct app_pktq_sink_params *p = &app->sink_params[i];
+ uint32_t n_writers = app_sink_get_writers(app, p);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+}
+
+static void
+check_msgqs(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_msgq; i++) {
+ struct app_msgq_params *p = &app->msgq_params[i];
+ uint32_t n_readers = app_msgq_get_readers(app, p);
+ uint32_t n_writers = app_msgq_get_writers(app, p);
+ uint32_t msgq_req_pipeline, msgq_rsp_pipeline;
+ uint32_t msgq_req_core, msgq_rsp_core;
+
+ APP_CHECK((p->size > 0),
+ "%s size is 0\n", p->name);
+
+ APP_CHECK((rte_is_power_of_2(p->size)),
+ "%s size is not a power of 2\n", p->name);
+
+ msgq_req_pipeline = (strncmp(p->name, "MSGQ-REQ-PIPELINE",
+ strlen("MSGQ-REQ-PIPELINE")) == 0);
+
+ msgq_rsp_pipeline = (strncmp(p->name, "MSGQ-RSP-PIPELINE",
+ strlen("MSGQ-RSP-PIPELINE")) == 0);
+
+ msgq_req_core = (strncmp(p->name, "MSGQ-REQ-CORE",
+ strlen("MSGQ-REQ-CORE")) == 0);
+
+ msgq_rsp_core = (strncmp(p->name, "MSGQ-RSP-CORE",
+ strlen("MSGQ-RSP-CORE")) == 0);
+
+ if ((msgq_req_pipeline == 0) &&
+ (msgq_rsp_pipeline == 0) &&
+ (msgq_req_core == 0) &&
+ (msgq_rsp_core == 0)) {
+ APP_CHECK((n_readers != 0),
+ "%s has no reader\n", p->name);
+
+ APP_CHECK((n_readers == 1),
+ "%s has more than one reader\n", p->name);
+
+ APP_CHECK((n_writers != 0),
+ "%s has no writer\n", p->name);
+
+ APP_CHECK((n_writers == 1),
+ "%s has more than one writer\n", p->name);
+ }
+
+ if (msgq_req_pipeline) {
+ struct app_pipeline_params *pipeline;
+ uint32_t pipeline_id;
+
+ APP_PARAM_GET_ID(p, "MSGQ-REQ-PIPELINE", pipeline_id);
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params,
+ "PIPELINE",
+ pipeline_id,
+ pipeline);
+
+ APP_CHECK((pipeline != NULL),
+ "%s is not associated with a valid pipeline\n",
+ p->name);
+ }
+
+ if (msgq_rsp_pipeline) {
+ struct app_pipeline_params *pipeline;
+ uint32_t pipeline_id;
+
+ APP_PARAM_GET_ID(p, "MSGQ-RSP-PIPELINE", pipeline_id);
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params,
+ "PIPELINE",
+ pipeline_id,
+ pipeline);
+
+ APP_CHECK((pipeline != NULL),
+ "%s is not associated with a valid pipeline\n",
+ p->name);
+ }
+ }
+}
+
+static void
+check_pipelines(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+
+ APP_CHECK((p->n_msgq_in == p->n_msgq_out),
+ "%s number of input MSGQs does not match "
+ "the number of output MSGQs\n", p->name);
+ }
+}
+
+int
+app_config_check(struct app_params *app)
+{
+ check_mempools(app);
+ check_links(app);
+ check_rxqs(app);
+ check_txqs(app);
+ check_swqs(app);
+ check_tms(app);
+ check_taps(app);
+ check_knis(app);
+ check_sources(app);
+ check_sinks(app);
+ check_msgqs(app);
+ check_pipelines(app);
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config_parse.c b/src/seastar/dpdk/examples/ip_pipeline/config_parse.c
new file mode 100644
index 00000000..0b761346
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config_parse.c
@@ -0,0 +1,3450 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#include <rte_errno.h>
+#include <rte_cfgfile.h>
+#include <rte_string_fns.h>
+
+#include "app.h"
+#include "parser.h"
+
+/**
+ * Default config values
+ **/
+
+static struct app_params app_params_default = {
+ .config_file = "./config/ip_pipeline.cfg",
+ .log_level = APP_LOG_LEVEL_HIGH,
+ .port_mask = 0,
+
+ .eal_params = {
+ .channels = 4,
+ },
+};
+
+static const struct app_mempool_params mempool_params_default = {
+ .parsed = 0,
+ .buffer_size = 2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM,
+ .pool_size = 32 * 1024,
+ .cache_size = 256,
+ .cpu_socket_id = 0,
+};
+
+static const struct app_link_params link_params_default = {
+ .parsed = 0,
+ .pmd_id = 0,
+ .arp_q = 0,
+ .tcp_syn_q = 0,
+ .ip_local_q = 0,
+ .tcp_local_q = 0,
+ .udp_local_q = 0,
+ .sctp_local_q = 0,
+ .rss_qs = {0},
+ .n_rss_qs = 0,
+ .rss_proto_ipv4 = ETH_RSS_IPV4,
+ .rss_proto_ipv6 = ETH_RSS_IPV6,
+ .rss_proto_l2 = 0,
+ .state = 0,
+ .ip = 0,
+ .depth = 0,
+ .mac_addr = 0,
+ .pci_bdf = {0},
+
+ .conf = {
+ .link_speeds = 0,
+ .rxmode = {
+ .mq_mode = ETH_MQ_RX_NONE,
+
+ .header_split = 0, /* Header split */
+ .hw_ip_checksum = 0, /* IP checksum offload */
+ .hw_vlan_filter = 0, /* VLAN filtering */
+ .hw_vlan_strip = 0, /* VLAN strip */
+ .hw_vlan_extend = 0, /* Extended VLAN */
+ .jumbo_frame = 0, /* Jumbo frame support */
+ .hw_strip_crc = 1, /* CRC strip by HW */
+ .enable_scatter = 0, /* Scattered packets RX handler */
+
+ .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+ .split_hdr_size = 0, /* Header split buffer size */
+ },
+ .rx_adv_conf = {
+ .rss_conf = {
+ .rss_key = NULL,
+ .rss_key_len = 40,
+ .rss_hf = 0,
+ },
+ },
+ .txmode = {
+ .mq_mode = ETH_MQ_TX_NONE,
+ },
+ .lpbk_mode = 0,
+ },
+
+ .promisc = 1,
+};
+
+static const struct app_pktq_hwq_in_params default_hwq_in_params = {
+ .parsed = 0,
+ .mempool_id = 0,
+ .size = 128,
+ .burst = 32,
+
+ .conf = {
+ .rx_thresh = {
+ .pthresh = 8,
+ .hthresh = 8,
+ .wthresh = 4,
+ },
+ .rx_free_thresh = 64,
+ .rx_drop_en = 0,
+ .rx_deferred_start = 0,
+ }
+};
+
+static const struct app_pktq_hwq_out_params default_hwq_out_params = {
+ .parsed = 0,
+ .size = 512,
+ .burst = 32,
+ .dropless = 0,
+ .n_retries = 0,
+
+ .conf = {
+ .tx_thresh = {
+ .pthresh = 36,
+ .hthresh = 0,
+ .wthresh = 0,
+ },
+ .tx_rs_thresh = 0,
+ .tx_free_thresh = 0,
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
+ ETH_TXQ_FLAGS_NOOFFLOADS,
+ .tx_deferred_start = 0,
+ }
+};
+
+static const struct app_pktq_swq_params default_swq_params = {
+ .parsed = 0,
+ .size = 256,
+ .burst_read = 32,
+ .burst_write = 32,
+ .dropless = 0,
+ .n_retries = 0,
+ .cpu_socket_id = 0,
+ .ipv4_frag = 0,
+ .ipv6_frag = 0,
+ .ipv4_ras = 0,
+ .ipv6_ras = 0,
+ .mtu = 0,
+ .metadata_size = 0,
+ .mempool_direct_id = 0,
+ .mempool_indirect_id = 0,
+};
+
+struct app_pktq_tm_params default_tm_params = {
+ .parsed = 0,
+ .file_name = "./config/tm_profile.cfg",
+ .burst_read = 24,
+ .burst_write = 32,
+};
+
+struct app_pktq_tap_params default_tap_params = {
+ .parsed = 0,
+ .burst_read = 32,
+ .burst_write = 32,
+ .dropless = 0,
+ .n_retries = 0,
+ .mempool_id = 0,
+};
+
+struct app_pktq_kni_params default_kni_params = {
+ .parsed = 0,
+ .socket_id = 0,
+ .core_id = 0,
+ .hyper_th_id = 0,
+ .force_bind = 0,
+
+ .mempool_id = 0,
+ .burst_read = 32,
+ .burst_write = 32,
+ .dropless = 0,
+ .n_retries = 0,
+};
+
+struct app_pktq_source_params default_source_params = {
+ .parsed = 0,
+ .mempool_id = 0,
+ .burst = 32,
+ .file_name = "./config/packets.pcap",
+ .n_bytes_per_pkt = 0,
+};
+
+struct app_pktq_sink_params default_sink_params = {
+ .parsed = 0,
+ .file_name = NULL,
+ .n_pkts_to_dump = 0,
+};
+
+struct app_msgq_params default_msgq_params = {
+ .parsed = 0,
+ .size = 64,
+ .cpu_socket_id = 0,
+};
+
+struct app_pipeline_params default_pipeline_params = {
+ .parsed = 0,
+ .socket_id = 0,
+ .core_id = 0,
+ .hyper_th_id = 0,
+ .n_pktq_in = 0,
+ .n_pktq_out = 0,
+ .n_msgq_in = 0,
+ .n_msgq_out = 0,
+ .timer_period = 1,
+ .n_args = 0,
+};
+
+static const char app_usage[] =
+ "Usage: %s [-f CONFIG_FILE] [-s SCRIPT_FILE] [-p PORT_MASK] "
+ "[-l LOG_LEVEL] [--preproc PREPROCESSOR] [--preproc-args ARGS]\n"
+ "\n"
+ "Arguments:\n"
+ "\t-f CONFIG_FILE: Default config file is %s\n"
+ "\t-p PORT_MASK: Mask of NIC port IDs in hex format (generated from "
+ "config file when not provided)\n"
+ "\t-s SCRIPT_FILE: No CLI script file is run when not specified\n"
+ "\t-l LOG_LEVEL: 0 = NONE, 1 = HIGH PRIO (default), 2 = LOW PRIO\n"
+ "\t--preproc PREPROCESSOR: Configuration file pre-processor\n"
+ "\t--preproc-args ARGS: Arguments to be passed to pre-processor\n"
+ "\n";
+
+static void
+app_print_usage(char *prgname)
+{
+ rte_exit(0, app_usage, prgname, app_params_default.config_file);
+}
+
+#define APP_PARAM_ADD(set, key) \
+({ \
+ ssize_t pos = APP_PARAM_FIND(set, key); \
+ ssize_t size = RTE_DIM(set); \
+ \
+ if (pos < 0) { \
+ for (pos = 0; pos < size; pos++) { \
+ if (!APP_PARAM_VALID(&((set)[pos]))) \
+ break; \
+ } \
+ \
+ APP_CHECK((pos < size), \
+ "Parse error: size of %s is limited to %u elements",\
+ #set, (uint32_t) size); \
+ \
+ (set)[pos].name = strdup(key); \
+ APP_CHECK(((set)[pos].name), \
+ "Parse error: no free memory"); \
+ } \
+ pos; \
+})
+
+#define APP_PARAM_ADD_LINK_FOR_RXQ(app, rxq_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id, queue_id; \
+ \
+ sscanf((rxq_name), "RXQ%" SCNu32 ".%" SCNu32, &link_id, &queue_id);\
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
+
+#define APP_PARAM_ADD_LINK_FOR_TXQ(app, txq_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id, queue_id; \
+ \
+ sscanf((txq_name), "TXQ%" SCNu32 ".%" SCNu32, &link_id, &queue_id);\
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
+
+#define APP_PARAM_ADD_LINK_FOR_TM(app, tm_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id; \
+ \
+ sscanf((tm_name), "TM%" SCNu32, &link_id); \
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
+
+#define APP_PARAM_ADD_LINK_FOR_KNI(app, kni_name) \
+({ \
+ char link_name[APP_PARAM_NAME_SIZE]; \
+ ssize_t link_param_pos; \
+ uint32_t link_id; \
+ \
+ sscanf((kni_name), "KNI%" SCNu32, &link_id); \
+ sprintf(link_name, "LINK%" PRIu32, link_id); \
+ link_param_pos = APP_PARAM_ADD((app)->link_params, link_name); \
+ link_param_pos; \
+})
+
+#define PARSE_CHECK_DUPLICATE_SECTION(obj) \
+do { \
+ APP_CHECK(((obj)->parsed == 0), \
+ "Parse error: duplicate \"%s\" section", (obj)->name); \
+ (obj)->parsed++; \
+} while (0)
+
+#define PARSE_CHECK_DUPLICATE_SECTION_EAL(obj) \
+do { \
+ APP_CHECK(((obj)->parsed == 0), \
+ "Parse error: duplicate \"%s\" section", "EAL"); \
+ (obj)->parsed++; \
+} while (0)
+
+#define PARSE_ERROR(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\": entry \"%s\"", section, entry)
+
+#define PARSE_ERROR_MESSAGE(exp, section, entry, message) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": %s", \
+ section, entry, message)
+
+#define PARSE_ERROR_NO_ELEMENTS(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
+ "no elements detected", \
+ section, entry)
+
+#define PARSE_ERROR_TOO_MANY_ELEMENTS(exp, section, entry, max) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
+ "maximum number of elements allowed is %u", \
+ section, entry, max)
+
+#define PARSE_ERROR_INVALID_ELEMENT(exp, section, entry, value) \
+APP_CHECK(exp, "Parse error in section \"%s\", entry \"%s\": " \
+ "Invalid element value \"%s\"", \
+ section, entry, value)
+
+#define PARSE_ERROR_MALLOC(exp) \
+APP_CHECK(exp, "Parse error: no free memory")
+
+#define PARSE_ERROR_SECTION(exp, section) \
+APP_CHECK(exp, "Parse error in section \"%s\"", section)
+
+#define PARSE_ERROR_SECTION_NO_ENTRIES(exp, section) \
+APP_CHECK(exp, "Parse error in section \"%s\": no entries", section)
+
+#define PARSE_WARNING_IGNORED(exp, section, entry) \
+do \
+if (!(exp)) \
+ fprintf(stderr, "Parse warning in section \"%s\": " \
+ "entry \"%s\" is ignored", section, entry); \
+while (0)
+
+#define PARSE_ERROR_INVALID(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\": unrecognized entry \"%s\"",\
+ section, entry)
+
+#define PARSE_ERROR_DUPLICATE(exp, section, entry) \
+APP_CHECK(exp, "Parse error in section \"%s\": duplicate entry \"%s\"", \
+ section, entry)
+
+static int
+validate_name(const char *name, const char *prefix, int num)
+{
+ size_t i, j;
+
+ for (i = 0; (name[i] != '\0') && (prefix[i] != '\0'); i++) {
+ if (name[i] != prefix[i])
+ return -1;
+ }
+
+ if (prefix[i] != '\0')
+ return -1;
+
+ if (!num) {
+ if (name[i] != '\0')
+ return -1;
+ else
+ return 0;
+ }
+
+ if (num == 2) {
+ j = skip_digits(&name[i]);
+ i += j;
+ if ((j == 0) || (name[i] != '.'))
+ return -1;
+ i++;
+ }
+
+ if (num == 1) {
+ j = skip_digits(&name[i]);
+ i += j;
+ if ((j == 0) || (name[i] != '\0'))
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+parse_eal(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_eal_params *p = &app->eal_params;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ PARSE_CHECK_DUPLICATE_SECTION_EAL(p);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *entry = &entries[i];
+
+ /* coremask */
+ if (strcmp(entry->name, "c") == 0) {
+ PARSE_WARNING_IGNORED(0, section_name, entry->name);
+ continue;
+ }
+
+ /* corelist */
+ if (strcmp(entry->name, "l") == 0) {
+ PARSE_WARNING_IGNORED(0, section_name, entry->name);
+ continue;
+ }
+
+ /* coremap */
+ if (strcmp(entry->name, "lcores") == 0) {
+ PARSE_ERROR_DUPLICATE((p->coremap == NULL),
+ section_name,
+ entry->name);
+ p->coremap = strdup(entry->value);
+ continue;
+ }
+
+ /* master_lcore */
+ if (strcmp(entry->name, "master_lcore") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->master_lcore_present == 0),
+ section_name,
+ entry->name);
+ p->master_lcore_present = 1;
+
+ status = parser_read_uint32(&p->master_lcore,
+ entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* channels */
+ if (strcmp(entry->name, "n") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->channels_present == 0),
+ section_name,
+ entry->name);
+ p->channels_present = 1;
+
+ status = parser_read_uint32(&p->channels, entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* memory */
+ if (strcmp(entry->name, "m") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->memory_present == 0),
+ section_name,
+ entry->name);
+ p->memory_present = 1;
+
+ status = parser_read_uint32(&p->memory, entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* ranks */
+ if (strcmp(entry->name, "r") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->ranks_present == 0),
+ section_name,
+ entry->name);
+ p->ranks_present = 1;
+
+ status = parser_read_uint32(&p->ranks, entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* pci_blacklist */
+ if ((strcmp(entry->name, "pci_blacklist") == 0) ||
+ (strcmp(entry->name, "b") == 0)) {
+ uint32_t i;
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_blacklist[i])
+ continue;
+
+ p->pci_blacklist[i] =
+ strdup(entry->value);
+ PARSE_ERROR_MALLOC(p->pci_blacklist[i]);
+
+ break;
+ }
+
+ PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
+ section_name, entry->name,
+ "too many elements");
+ continue;
+ }
+
+ /* pci_whitelist */
+ if ((strcmp(entry->name, "pci_whitelist") == 0) ||
+ (strcmp(entry->name, "w") == 0)) {
+ uint32_t i;
+
+ PARSE_ERROR_MESSAGE((app->port_mask != 0),
+ section_name, entry->name, "entry to be "
+ "generated by the application (port_mask "
+ "not provided)");
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_whitelist[i])
+ continue;
+
+ p->pci_whitelist[i] = strdup(entry->value);
+ PARSE_ERROR_MALLOC(p->pci_whitelist[i]);
+
+ break;
+ }
+
+ PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
+ section_name, entry->name,
+ "too many elements");
+ continue;
+ }
+
+ /* vdev */
+ if (strcmp(entry->name, "vdev") == 0) {
+ uint32_t i;
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->vdev[i])
+ continue;
+
+ p->vdev[i] = strdup(entry->value);
+ PARSE_ERROR_MALLOC(p->vdev[i]);
+
+ break;
+ }
+
+ PARSE_ERROR_MESSAGE((i < APP_MAX_LINKS),
+ section_name, entry->name,
+ "too many elements");
+ continue;
+ }
+
+ /* vmware_tsc_map */
+ if (strcmp(entry->name, "vmware_tsc_map") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->vmware_tsc_map_present == 0),
+ section_name,
+ entry->name);
+ p->vmware_tsc_map_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->vmware_tsc_map = val;
+ continue;
+ }
+
+ /* proc_type */
+ if (strcmp(entry->name, "proc_type") == 0) {
+ PARSE_ERROR_DUPLICATE((p->proc_type == NULL),
+ section_name,
+ entry->name);
+ p->proc_type = strdup(entry->value);
+ continue;
+ }
+
+ /* syslog */
+ if (strcmp(entry->name, "syslog") == 0) {
+ PARSE_ERROR_DUPLICATE((p->syslog == NULL),
+ section_name,
+ entry->name);
+ p->syslog = strdup(entry->value);
+ continue;
+ }
+
+ /* log_level */
+ if (strcmp(entry->name, "log_level") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((p->log_level_present == 0),
+ section_name,
+ entry->name);
+ p->log_level_present = 1;
+
+ status = parser_read_uint32(&p->log_level,
+ entry->value);
+ PARSE_ERROR((status == 0), section_name, entry->name);
+ continue;
+ }
+
+ /* version */
+ if (strcmp(entry->name, "v") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->version_present == 0),
+ section_name,
+ entry->name);
+ p->version_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->version = val;
+ continue;
+ }
+
+ /* help */
+ if ((strcmp(entry->name, "help") == 0) ||
+ (strcmp(entry->name, "h") == 0)) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->help_present == 0),
+ section_name,
+ entry->name);
+ p->help_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->help = val;
+ continue;
+ }
+
+ /* no_huge */
+ if (strcmp(entry->name, "no_huge") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_huge_present == 0),
+ section_name,
+ entry->name);
+ p->no_huge_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_huge = val;
+ continue;
+ }
+
+ /* no_pci */
+ if (strcmp(entry->name, "no_pci") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_pci_present == 0),
+ section_name,
+ entry->name);
+ p->no_pci_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_pci = val;
+ continue;
+ }
+
+ /* no_hpet */
+ if (strcmp(entry->name, "no_hpet") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_hpet_present == 0),
+ section_name,
+ entry->name);
+ p->no_hpet_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_hpet = val;
+ continue;
+ }
+
+ /* no_shconf */
+ if (strcmp(entry->name, "no_shconf") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->no_shconf_present == 0),
+ section_name,
+ entry->name);
+ p->no_shconf_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->no_shconf = val;
+ continue;
+ }
+
+ /* add_driver */
+ if (strcmp(entry->name, "d") == 0) {
+ PARSE_ERROR_DUPLICATE((p->add_driver == NULL),
+ section_name,
+ entry->name);
+ p->add_driver = strdup(entry->value);
+ continue;
+ }
+
+ /* socket_mem */
+ if (strcmp(entry->name, "socket_mem") == 0) {
+ PARSE_ERROR_DUPLICATE((p->socket_mem == NULL),
+ section_name,
+ entry->name);
+ p->socket_mem = strdup(entry->value);
+ continue;
+ }
+
+ /* huge_dir */
+ if (strcmp(entry->name, "huge_dir") == 0) {
+ PARSE_ERROR_DUPLICATE((p->huge_dir == NULL),
+ section_name,
+ entry->name);
+ p->huge_dir = strdup(entry->value);
+ continue;
+ }
+
+ /* file_prefix */
+ if (strcmp(entry->name, "file_prefix") == 0) {
+ PARSE_ERROR_DUPLICATE((p->file_prefix == NULL),
+ section_name,
+ entry->name);
+ p->file_prefix = strdup(entry->value);
+ continue;
+ }
+
+ /* base_virtaddr */
+ if (strcmp(entry->name, "base_virtaddr") == 0) {
+ PARSE_ERROR_DUPLICATE((p->base_virtaddr == NULL),
+ section_name,
+ entry->name);
+ p->base_virtaddr = strdup(entry->value);
+ continue;
+ }
+
+ /* create_uio_dev */
+ if (strcmp(entry->name, "create_uio_dev") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->create_uio_dev_present == 0),
+ section_name,
+ entry->name);
+ p->create_uio_dev_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->create_uio_dev = val;
+ continue;
+ }
+
+ /* vfio_intr */
+ if (strcmp(entry->name, "vfio_intr") == 0) {
+ PARSE_ERROR_DUPLICATE((p->vfio_intr == NULL),
+ section_name,
+ entry->name);
+ p->vfio_intr = strdup(entry->value);
+ continue;
+ }
+
+ /* xen_dom0 */
+ if (strcmp(entry->name, "xen_dom0") == 0) {
+ int val;
+
+ PARSE_ERROR_DUPLICATE((p->xen_dom0_present == 0),
+ section_name,
+ entry->name);
+ p->xen_dom0_present = 1;
+
+ val = parser_read_arg_bool(entry->value);
+ PARSE_ERROR((val >= 0), section_name, entry->name);
+ p->xen_dom0 = val;
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, entry->name);
+ }
+
+ free(entries);
+}
+
+static void
+parse_pipeline_pktq_in(struct app_params *app,
+ struct app_pipeline_params *p,
+ char *value)
+{
+ p->n_pktq_in = 0;
+
+ while (1) {
+ enum app_pktq_in_type type;
+ int id;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (name == NULL)
+ break;
+
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_pktq_in < RTE_DIM(p->pktq_in)),
+ p->name, "pktq_in", (uint32_t)RTE_DIM(p->pktq_in));
+
+ if (validate_name(name, "RXQ", 2) == 0) {
+ type = APP_PKTQ_IN_HWQ;
+ id = APP_PARAM_ADD(app->hwq_in_params, name);
+ APP_PARAM_ADD_LINK_FOR_RXQ(app, name);
+ } else if (validate_name(name, "SWQ", 1) == 0) {
+ type = APP_PKTQ_IN_SWQ;
+ id = APP_PARAM_ADD(app->swq_params, name);
+ } else if (validate_name(name, "TM", 1) == 0) {
+ type = APP_PKTQ_IN_TM;
+ id = APP_PARAM_ADD(app->tm_params, name);
+ APP_PARAM_ADD_LINK_FOR_TM(app, name);
+ } else if (validate_name(name, "TAP", 1) == 0) {
+ type = APP_PKTQ_IN_TAP;
+ id = APP_PARAM_ADD(app->tap_params, name);
+ } else if (validate_name(name, "KNI", 1) == 0) {
+ type = APP_PKTQ_IN_KNI;
+ id = APP_PARAM_ADD(app->kni_params, name);
+ APP_PARAM_ADD_LINK_FOR_KNI(app, name);
+ } else if (validate_name(name, "SOURCE", 1) == 0) {
+ type = APP_PKTQ_IN_SOURCE;
+ id = APP_PARAM_ADD(app->source_params, name);
+ } else
+ PARSE_ERROR_INVALID_ELEMENT(0,
+ p->name, "pktq_in", name);
+
+ p->pktq_in[p->n_pktq_in].type = type;
+ p->pktq_in[p->n_pktq_in].id = (uint32_t) id;
+ p->n_pktq_in++;
+ }
+
+ PARSE_ERROR_NO_ELEMENTS((p->n_pktq_in > 0), p->name, "pktq_in");
+}
+
+static void
+parse_pipeline_pktq_out(struct app_params *app,
+ struct app_pipeline_params *p,
+ char *value)
+{
+ p->n_pktq_out = 0;
+
+ while (1) {
+ enum app_pktq_out_type type;
+ int id;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (name == NULL)
+ break;
+
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_pktq_out < RTE_DIM(p->pktq_out)),
+ p->name, "pktq_out", (uint32_t)RTE_DIM(p->pktq_out));
+
+ if (validate_name(name, "TXQ", 2) == 0) {
+ type = APP_PKTQ_OUT_HWQ;
+ id = APP_PARAM_ADD(app->hwq_out_params, name);
+ APP_PARAM_ADD_LINK_FOR_TXQ(app, name);
+ } else if (validate_name(name, "SWQ", 1) == 0) {
+ type = APP_PKTQ_OUT_SWQ;
+ id = APP_PARAM_ADD(app->swq_params, name);
+ } else if (validate_name(name, "TM", 1) == 0) {
+ type = APP_PKTQ_OUT_TM;
+ id = APP_PARAM_ADD(app->tm_params, name);
+ APP_PARAM_ADD_LINK_FOR_TM(app, name);
+ } else if (validate_name(name, "TAP", 1) == 0) {
+ type = APP_PKTQ_OUT_TAP;
+ id = APP_PARAM_ADD(app->tap_params, name);
+ } else if (validate_name(name, "KNI", 1) == 0) {
+ type = APP_PKTQ_OUT_KNI;
+ id = APP_PARAM_ADD(app->kni_params, name);
+ APP_PARAM_ADD_LINK_FOR_KNI(app, name);
+ } else if (validate_name(name, "SINK", 1) == 0) {
+ type = APP_PKTQ_OUT_SINK;
+ id = APP_PARAM_ADD(app->sink_params, name);
+ } else
+ PARSE_ERROR_INVALID_ELEMENT(0,
+ p->name, "pktq_out", name);
+
+ p->pktq_out[p->n_pktq_out].type = type;
+ p->pktq_out[p->n_pktq_out].id = id;
+ p->n_pktq_out++;
+ }
+
+ PARSE_ERROR_NO_ELEMENTS((p->n_pktq_out > 0), p->name, "pktq_out");
+}
+
+static void
+parse_pipeline_msgq_in(struct app_params *app,
+ struct app_pipeline_params *p,
+ char *value)
+{
+ p->n_msgq_in = 0;
+
+ while (1) {
+ int idx;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (name == NULL)
+ break;
+
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_msgq_in < RTE_DIM(p->msgq_in)),
+ p->name, "msgq_in", (uint32_t)(RTE_DIM(p->msgq_in)));
+
+ PARSE_ERROR_INVALID_ELEMENT(
+ (validate_name(name, "MSGQ", 1) == 0),
+ p->name, "msgq_in", name);
+
+ idx = APP_PARAM_ADD(app->msgq_params, name);
+ p->msgq_in[p->n_msgq_in] = idx;
+ p->n_msgq_in++;
+ }
+
+ PARSE_ERROR_NO_ELEMENTS((p->n_msgq_in > 0), p->name, "msgq_in");
+}
+
+static void
+parse_pipeline_msgq_out(struct app_params *app,
+ struct app_pipeline_params *p,
+ char *value)
+{
+ p->n_msgq_out = 0;
+
+ while (1) {
+ int idx;
+ char *name = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (name == NULL)
+ break;
+
+ PARSE_ERROR_TOO_MANY_ELEMENTS(
+ (p->n_msgq_out < RTE_DIM(p->msgq_out)),
+ p->name, "msgq_out", (uint32_t)RTE_DIM(p->msgq_out));
+
+ PARSE_ERROR_INVALID_ELEMENT(
+ (validate_name(name, "MSGQ", 1) == 0),
+ p->name, "msgq_out", name);
+
+ idx = APP_PARAM_ADD(app->msgq_params, name);
+ p->msgq_out[p->n_msgq_out] = idx;
+ p->n_msgq_out++;
+ }
+
+ PARSE_ERROR_NO_ELEMENTS((p->n_msgq_out > 0), p->name, "msgq_out");
+}
+
+static void
+parse_pipeline(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ char name[CFG_NAME_LEN];
+ struct app_pipeline_params *param;
+ struct rte_cfgfile_entry *entries;
+ ssize_t param_idx;
+ int n_entries, i;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->pipeline_params, section_name);
+ param = &app->pipeline_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "type") == 0) {
+ int w_size = snprintf(param->type, RTE_DIM(param->type),
+ "%s", ent->value);
+
+ PARSE_ERROR(((w_size > 0) &&
+ (w_size < (int)RTE_DIM(param->type))),
+ section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "core") == 0) {
+ int status = parse_pipeline_core(
+ &param->socket_id, &param->core_id,
+ &param->hyper_th_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pktq_in") == 0) {
+ parse_pipeline_pktq_in(app, param, ent->value);
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "pktq_out") == 0) {
+ parse_pipeline_pktq_out(app, param, ent->value);
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "msgq_in") == 0) {
+ parse_pipeline_msgq_in(app, param, ent->value);
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "msgq_out") == 0) {
+ parse_pipeline_msgq_out(app, param, ent->value);
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "timer_period") == 0) {
+ int status = parser_read_uint32(
+ &param->timer_period,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* pipeline type specific items */
+ APP_CHECK((param->n_args < APP_MAX_PIPELINE_ARGS),
+ "Parse error in section \"%s\": too many "
+ "pipeline specified parameters", section_name);
+
+ param->args_name[param->n_args] = strdup(ent->name);
+ param->args_value[param->n_args] = strdup(ent->value);
+
+ APP_CHECK((param->args_name[param->n_args] != NULL) &&
+ (param->args_value[param->n_args] != NULL),
+ "Parse error: no free memory");
+
+ param->n_args++;
+ }
+
+ snprintf(name, sizeof(name), "MSGQ-REQ-%s", section_name);
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+ param->msgq_in[param->n_msgq_in++] = param_idx;
+
+ snprintf(name, sizeof(name), "MSGQ-RSP-%s", section_name);
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+ param->msgq_out[param->n_msgq_out++] = param_idx;
+
+ snprintf(name, sizeof(name), "MSGQ-REQ-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ param->socket_id,
+ param->core_id,
+ (param->hyper_th_id) ? "h" : "");
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+
+ snprintf(name, sizeof(name), "MSGQ-RSP-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ param->socket_id,
+ param->core_id,
+ (param->hyper_th_id) ? "h" : "");
+ param_idx = APP_PARAM_ADD(app->msgq_params, name);
+ app->msgq_params[param_idx].cpu_socket_id = param->socket_id;
+
+ free(entries);
+}
+
+static void
+parse_mempool(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_mempool_params *param;
+ struct rte_cfgfile_entry *entries;
+ ssize_t param_idx;
+ int n_entries, i;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->mempool_params, section_name);
+ param = &app->mempool_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "buffer_size") == 0) {
+ int status = parser_read_uint32(
+ &param->buffer_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pool_size") == 0) {
+ int status = parser_read_uint32(
+ &param->pool_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cache_size") == 0) {
+ int status = parser_read_uint32(
+ &param->cache_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cpu") == 0) {
+ int status = parser_read_uint32(
+ &param->cpu_socket_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ free(entries);
+}
+
+static int
+parse_link_rss_qs(struct app_link_params *p,
+ char *value)
+{
+ p->n_rss_qs = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (p->n_rss_qs == RTE_DIM(p->rss_qs))
+ return -ENOMEM;
+
+ if (parser_read_uint32(&p->rss_qs[p->n_rss_qs++], token))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+parse_link_rss_proto_ipv4(struct app_link_params *p,
+ char *value)
+{
+ uint64_t mask = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (strcmp(token, "IP") == 0) {
+ mask |= ETH_RSS_IPV4;
+ continue;
+ }
+ if (strcmp(token, "FRAG") == 0) {
+ mask |= ETH_RSS_FRAG_IPV4;
+ continue;
+ }
+ if (strcmp(token, "TCP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_TCP;
+ continue;
+ }
+ if (strcmp(token, "UDP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_UDP;
+ continue;
+ }
+ if (strcmp(token, "SCTP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_SCTP;
+ continue;
+ }
+ if (strcmp(token, "OTHER") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV4_OTHER;
+ continue;
+ }
+ return -EINVAL;
+ }
+
+ p->rss_proto_ipv4 = mask;
+ return 0;
+}
+
+static int
+parse_link_rss_proto_ipv6(struct app_link_params *p,
+ char *value)
+{
+ uint64_t mask = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (strcmp(token, "IP") == 0) {
+ mask |= ETH_RSS_IPV6;
+ continue;
+ }
+ if (strcmp(token, "FRAG") == 0) {
+ mask |= ETH_RSS_FRAG_IPV6;
+ continue;
+ }
+ if (strcmp(token, "TCP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_TCP;
+ continue;
+ }
+ if (strcmp(token, "UDP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_UDP;
+ continue;
+ }
+ if (strcmp(token, "SCTP") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_SCTP;
+ continue;
+ }
+ if (strcmp(token, "OTHER") == 0) {
+ mask |= ETH_RSS_NONFRAG_IPV6_OTHER;
+ continue;
+ }
+ if (strcmp(token, "IP_EX") == 0) {
+ mask |= ETH_RSS_IPV6_EX;
+ continue;
+ }
+ if (strcmp(token, "TCP_EX") == 0) {
+ mask |= ETH_RSS_IPV6_TCP_EX;
+ continue;
+ }
+ if (strcmp(token, "UDP_EX") == 0) {
+ mask |= ETH_RSS_IPV6_UDP_EX;
+ continue;
+ }
+ return -EINVAL;
+ }
+
+ p->rss_proto_ipv6 = mask;
+ return 0;
+}
+
+static int
+parse_link_rss_proto_l2(struct app_link_params *p,
+ char *value)
+{
+ uint64_t mask = 0;
+
+ while (1) {
+ char *token = strtok_r(value, PARSE_DELIMITER, &value);
+
+ if (token == NULL)
+ break;
+
+ if (strcmp(token, "L2") == 0) {
+ mask |= ETH_RSS_L2_PAYLOAD;
+ continue;
+ }
+ return -EINVAL;
+ }
+
+ p->rss_proto_l2 = mask;
+ return 0;
+}
+
+static void
+parse_link(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_link_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ int rss_qs_present = 0;
+ int rss_proto_ipv4_present = 0;
+ int rss_proto_ipv6_present = 0;
+ int rss_proto_l2_present = 0;
+ int pci_bdf_present = 0;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->link_params, section_name);
+ param = &app->link_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "promisc") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->promisc = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "arp_q") == 0) {
+ int status = parser_read_uint32(&param->arp_q,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "tcp_syn_q") == 0) {
+ int status = parser_read_uint32(
+ &param->tcp_syn_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name, ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "ip_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->ip_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "tcp_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->tcp_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "udp_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->udp_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "sctp_local_q") == 0) {
+ int status = parser_read_uint32(
+ &param->sctp_local_q, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "rss_qs") == 0) {
+ int status = parse_link_rss_qs(param, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ rss_qs_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "rss_proto_ipv4") == 0) {
+ int status =
+ parse_link_rss_proto_ipv4(param, ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ rss_proto_ipv4_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "rss_proto_ipv6") == 0) {
+ int status =
+ parse_link_rss_proto_ipv6(param, ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ rss_proto_ipv6_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "rss_proto_l2") == 0) {
+ int status = parse_link_rss_proto_l2(param, ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ rss_proto_l2_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "pci_bdf") == 0) {
+ PARSE_ERROR_DUPLICATE((pci_bdf_present == 0),
+ section_name, ent->name);
+
+ snprintf(param->pci_bdf, APP_LINK_PCI_BDF_SIZE,
+ "%s", ent->value);
+ pci_bdf_present = 1;
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ /* Check for mandatory fields */
+ if (app->port_mask)
+ PARSE_ERROR_MESSAGE((pci_bdf_present == 0),
+ section_name, "pci_bdf",
+ "entry not allowed (port_mask is provided)");
+ else
+ PARSE_ERROR_MESSAGE((pci_bdf_present),
+ section_name, "pci_bdf",
+ "this entry is mandatory (port_mask is not "
+ "provided)");
+
+ if (rss_proto_ipv4_present)
+ PARSE_ERROR_MESSAGE((rss_qs_present),
+ section_name, "rss_proto_ipv4",
+ "entry not allowed (rss_qs entry is not provided)");
+ if (rss_proto_ipv6_present)
+ PARSE_ERROR_MESSAGE((rss_qs_present),
+ section_name, "rss_proto_ipv6",
+ "entry not allowed (rss_qs entry is not provided)");
+ if (rss_proto_l2_present)
+ PARSE_ERROR_MESSAGE((rss_qs_present),
+ section_name, "rss_proto_l2",
+ "entry not allowed (rss_qs entry is not provided)");
+ if (rss_proto_ipv4_present |
+ rss_proto_ipv6_present |
+ rss_proto_l2_present){
+ if (rss_proto_ipv4_present == 0)
+ param->rss_proto_ipv4 = 0;
+ if (rss_proto_ipv6_present == 0)
+ param->rss_proto_ipv6 = 0;
+ if (rss_proto_l2_present == 0)
+ param->rss_proto_l2 = 0;
+ }
+
+ free(entries);
+}
+
+static void
+parse_rxq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_hwq_in_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->hwq_in_params, section_name);
+ param = &app->hwq_in_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_RXQ(app, section_name);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "mempool") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
+ param->mempool_id = idx;
+ continue;
+ }
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst") == 0) {
+ int status = parser_read_uint32(&param->burst,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ free(entries);
+}
+
+static void
+parse_txq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_hwq_out_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->hwq_out_params, section_name);
+ param = &app->hwq_out_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_TXQ(app, section_name);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst") == 0) {
+ int status = parser_read_uint32(&param->burst,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "dropless") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->dropless = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "n_retries") == 0) {
+ int status = parser_read_uint64(&param->n_retries,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ free(entries);
+}
+
+static void
+parse_swq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_swq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ uint32_t mtu_present = 0;
+ uint32_t metadata_size_present = 0;
+ uint32_t mempool_direct_present = 0;
+ uint32_t mempool_indirect_present = 0;
+
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->swq_params, section_name);
+ param = &app->swq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_read") == 0) {
+ int status = parser_read_uint32(&
+ param->burst_read, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_write") == 0) {
+ int status = parser_read_uint32(
+ &param->burst_write, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "dropless") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->dropless = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "n_retries") == 0) {
+ int status = parser_read_uint64(&param->n_retries,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cpu") == 0) {
+ int status = parser_read_uint32(
+ &param->cpu_socket_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name, ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv4_frag") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+
+ param->ipv4_frag = status;
+ if (param->mtu == 0)
+ param->mtu = 1500;
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv6_frag") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->ipv6_frag = status;
+ if (param->mtu == 0)
+ param->mtu = 1320;
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv4_ras") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->ipv4_ras = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "ipv6_ras") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->ipv6_ras = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mtu") == 0) {
+ int status = parser_read_uint32(&param->mtu,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ mtu_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "metadata_size") == 0) {
+ int status = parser_read_uint32(
+ &param->metadata_size, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ metadata_size_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mempool_direct") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
+ param->mempool_direct_id = idx;
+
+ mempool_direct_present = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mempool_indirect") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
+ param->mempool_indirect_id = idx;
+
+ mempool_indirect_present = 1;
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ APP_CHECK(((mtu_present == 0) ||
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"mtu\" is not allowed",
+ section_name);
+
+ APP_CHECK(((metadata_size_present == 0) ||
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"metadata_size\" is "
+ "not allowed", section_name);
+
+ APP_CHECK(((mempool_direct_present == 0) ||
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"mempool_direct\" is "
+ "not allowed", section_name);
+
+ APP_CHECK(((mempool_indirect_present == 0) ||
+ ((param->ipv4_frag == 1) || (param->ipv6_frag == 1))),
+ "Parse error in section \"%s\": IPv4/IPv6 fragmentation "
+ "is off, therefore entry \"mempool_indirect\" is "
+ "not allowed", section_name);
+
+ free(entries);
+}
+
+static void
+parse_tm(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_tm_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->tm_params, section_name);
+ param = &app->tm_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_TM(app, section_name);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "cfg") == 0) {
+ param->file_name = strdup(ent->value);
+ PARSE_ERROR_MALLOC(param->file_name != NULL);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_read") == 0) {
+ int status = parser_read_uint32(
+ &param->burst_read, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_write") == 0) {
+ int status = parser_read_uint32(
+ &param->burst_write, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ free(entries);
+}
+
+static void
+parse_tap(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_tap_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->tap_params, section_name);
+ param = &app->tap_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "burst_read") == 0) {
+ int status = parser_read_uint32(
+ &param->burst_read, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_write") == 0) {
+ int status = parser_read_uint32(
+ &param->burst_write, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "dropless") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->dropless = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "n_retries") == 0) {
+ int status = parser_read_uint64(&param->n_retries,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "mempool") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
+ param->mempool_id = idx;
+
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ free(entries);
+}
+
+static void
+parse_kni(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_kni_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->kni_params, section_name);
+ param = &app->kni_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ APP_PARAM_ADD_LINK_FOR_KNI(app, section_name);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "core") == 0) {
+ int status = parse_pipeline_core(
+ &param->socket_id,
+ &param->core_id,
+ &param->hyper_th_id,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ param->force_bind = 1;
+ continue;
+ }
+
+ if (strcmp(ent->name, "mempool") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
+ param->mempool_id = idx;
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_read") == 0) {
+ int status = parser_read_uint32(&param->burst_read,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst_write") == 0) {
+ int status = parser_read_uint32(&param->burst_write,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "dropless") == 0) {
+ int status = parser_read_arg_bool(ent->value);
+
+ PARSE_ERROR((status != -EINVAL), section_name,
+ ent->name);
+ param->dropless = status;
+ continue;
+ }
+
+ if (strcmp(ent->name, "n_retries") == 0) {
+ int status = parser_read_uint64(&param->n_retries,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ free(entries);
+}
+
+static void
+parse_source(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_source_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+ uint32_t pcap_file_present = 0;
+ uint32_t pcap_size_present = 0;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->source_params, section_name);
+ param = &app->source_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "mempool") == 0) {
+ int status = validate_name(ent->value,
+ "MEMPOOL", 1);
+ ssize_t idx;
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ idx = APP_PARAM_ADD(app->mempool_params, ent->value);
+ param->mempool_id = idx;
+ continue;
+ }
+
+ if (strcmp(ent->name, "burst") == 0) {
+ int status = parser_read_uint32(&param->burst,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_file_rd") == 0) {
+ PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
+ section_name, ent->name);
+
+ param->file_name = strdup(ent->value);
+
+ PARSE_ERROR_MALLOC(param->file_name != NULL);
+ pcap_file_present = 1;
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_bytes_rd_per_pkt") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((pcap_size_present == 0),
+ section_name, ent->name);
+
+ status = parser_read_uint32(
+ &param->n_bytes_per_pkt, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ pcap_size_present = 1;
+
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ free(entries);
+}
+
+static void
+parse_sink(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_pktq_sink_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+ uint32_t pcap_file_present = 0;
+ uint32_t pcap_n_pkt_present = 0;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->sink_params, section_name);
+ param = &app->sink_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "pcap_file_wr") == 0) {
+ PARSE_ERROR_DUPLICATE((pcap_file_present == 0),
+ section_name, ent->name);
+
+ param->file_name = strdup(ent->value);
+
+ PARSE_ERROR_MALLOC((param->file_name != NULL));
+
+ continue;
+ }
+
+ if (strcmp(ent->name, "pcap_n_pkt_wr") == 0) {
+ int status;
+
+ PARSE_ERROR_DUPLICATE((pcap_n_pkt_present == 0),
+ section_name, ent->name);
+
+ status = parser_read_uint32(
+ &param->n_pkts_to_dump, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ free(entries);
+}
+
+static void
+parse_msgq_req_pipeline(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_msgq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
+ param = &app->msgq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ free(entries);
+}
+
+static void
+parse_msgq_rsp_pipeline(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_msgq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
+ param = &app->msgq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ free(entries);
+}
+
+static void
+parse_msgq(struct app_params *app,
+ const char *section_name,
+ struct rte_cfgfile *cfg)
+{
+ struct app_msgq_params *param;
+ struct rte_cfgfile_entry *entries;
+ int n_entries, i;
+ ssize_t param_idx;
+
+ n_entries = rte_cfgfile_section_num_entries(cfg, section_name);
+ PARSE_ERROR_SECTION_NO_ENTRIES((n_entries > 0), section_name);
+
+ entries = malloc(n_entries * sizeof(struct rte_cfgfile_entry));
+ PARSE_ERROR_MALLOC(entries != NULL);
+
+ rte_cfgfile_section_entries(cfg, section_name, entries, n_entries);
+
+ param_idx = APP_PARAM_ADD(app->msgq_params, section_name);
+ param = &app->msgq_params[param_idx];
+ PARSE_CHECK_DUPLICATE_SECTION(param);
+
+ for (i = 0; i < n_entries; i++) {
+ struct rte_cfgfile_entry *ent = &entries[i];
+
+ if (strcmp(ent->name, "size") == 0) {
+ int status = parser_read_uint32(&param->size,
+ ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ if (strcmp(ent->name, "cpu") == 0) {
+ int status = parser_read_uint32(
+ &param->cpu_socket_id, ent->value);
+
+ PARSE_ERROR((status == 0), section_name,
+ ent->name);
+ continue;
+ }
+
+ /* unrecognized */
+ PARSE_ERROR_INVALID(0, section_name, ent->name);
+ }
+
+ free(entries);
+}
+
+typedef void (*config_section_load)(struct app_params *p,
+ const char *section_name,
+ struct rte_cfgfile *cfg);
+
+struct config_section {
+ const char prefix[CFG_NAME_LEN];
+ int numbers;
+ config_section_load load;
+};
+
+static const struct config_section cfg_file_scheme[] = {
+ {"EAL", 0, parse_eal},
+ {"PIPELINE", 1, parse_pipeline},
+ {"MEMPOOL", 1, parse_mempool},
+ {"LINK", 1, parse_link},
+ {"RXQ", 2, parse_rxq},
+ {"TXQ", 2, parse_txq},
+ {"SWQ", 1, parse_swq},
+ {"TM", 1, parse_tm},
+ {"TAP", 1, parse_tap},
+ {"KNI", 1, parse_kni},
+ {"SOURCE", 1, parse_source},
+ {"SINK", 1, parse_sink},
+ {"MSGQ-REQ-PIPELINE", 1, parse_msgq_req_pipeline},
+ {"MSGQ-RSP-PIPELINE", 1, parse_msgq_rsp_pipeline},
+ {"MSGQ", 1, parse_msgq},
+};
+
+static void
+create_implicit_mempools(struct app_params *app)
+{
+ APP_PARAM_ADD(app->mempool_params, "MEMPOOL0");
+}
+
+static void
+create_implicit_links_from_port_mask(struct app_params *app,
+ uint64_t port_mask)
+{
+ uint32_t pmd_id, link_id;
+
+ link_id = 0;
+ for (pmd_id = 0; pmd_id < RTE_MAX_ETHPORTS; pmd_id++) {
+ char name[APP_PARAM_NAME_SIZE];
+ ssize_t idx;
+
+ if ((port_mask & (1LLU << pmd_id)) == 0)
+ continue;
+
+ snprintf(name, sizeof(name), "LINK%" PRIu32, link_id);
+ idx = APP_PARAM_ADD(app->link_params, name);
+
+ app->link_params[idx].pmd_id = pmd_id;
+ link_id++;
+ }
+}
+
+static void
+assign_link_pmd_id_from_pci_bdf(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *link = &app->link_params[i];
+
+ APP_CHECK((strlen(link->pci_bdf)),
+ "Parse error: %s pci_bdf is not configured "
+ "(port_mask is not provided)",
+ link->name);
+
+ link->pmd_id = i;
+ }
+}
+
+int
+app_config_parse(struct app_params *app, const char *file_name)
+{
+ struct rte_cfgfile *cfg;
+ char **section_names;
+ int i, j, sect_count;
+
+ /* Implicit mempools */
+ create_implicit_mempools(app);
+
+ /* Port mask */
+ if (app->port_mask)
+ create_implicit_links_from_port_mask(app, app->port_mask);
+
+ /* Load application configuration file */
+ cfg = rte_cfgfile_load(file_name, 0);
+ APP_CHECK((cfg != NULL), "Parse error: Unable to load config "
+ "file %s", file_name);
+
+ sect_count = rte_cfgfile_num_sections(cfg, NULL, 0);
+ APP_CHECK((sect_count > 0), "Parse error: number of sections "
+ "in file \"%s\" return %d", file_name,
+ sect_count);
+
+ section_names = malloc(sect_count * sizeof(char *));
+ PARSE_ERROR_MALLOC(section_names != NULL);
+
+ for (i = 0; i < sect_count; i++)
+ section_names[i] = malloc(CFG_NAME_LEN);
+
+ rte_cfgfile_sections(cfg, section_names, sect_count);
+
+ for (i = 0; i < sect_count; i++) {
+ const struct config_section *sch_s;
+ int len, cfg_name_len;
+
+ cfg_name_len = strlen(section_names[i]);
+
+ /* Find section type */
+ for (j = 0; j < (int)RTE_DIM(cfg_file_scheme); j++) {
+ sch_s = &cfg_file_scheme[j];
+ len = strlen(sch_s->prefix);
+
+ if (cfg_name_len < len)
+ continue;
+
+ /* After section name we expect only '\0' or digit or
+ * digit dot digit, so protect against false matching,
+ * for example: "ABC" should match section name
+ * "ABC0.0", but it should not match section_name
+ * "ABCDEF".
+ */
+ if ((section_names[i][len] != '\0') &&
+ !isdigit(section_names[i][len]))
+ continue;
+
+ if (strncmp(sch_s->prefix, section_names[i], len) == 0)
+ break;
+ }
+
+ APP_CHECK(j < (int)RTE_DIM(cfg_file_scheme),
+ "Parse error: unknown section %s",
+ section_names[i]);
+
+ APP_CHECK(validate_name(section_names[i],
+ sch_s->prefix,
+ sch_s->numbers) == 0,
+ "Parse error: invalid section name \"%s\"",
+ section_names[i]);
+
+ sch_s->load(app, section_names[i], cfg);
+ }
+
+ for (i = 0; i < sect_count; i++)
+ free(section_names[i]);
+
+ free(section_names);
+
+ rte_cfgfile_close(cfg);
+
+ APP_PARAM_COUNT(app->mempool_params, app->n_mempools);
+ APP_PARAM_COUNT(app->link_params, app->n_links);
+ APP_PARAM_COUNT(app->hwq_in_params, app->n_pktq_hwq_in);
+ APP_PARAM_COUNT(app->hwq_out_params, app->n_pktq_hwq_out);
+ APP_PARAM_COUNT(app->swq_params, app->n_pktq_swq);
+ APP_PARAM_COUNT(app->tm_params, app->n_pktq_tm);
+ APP_PARAM_COUNT(app->tap_params, app->n_pktq_tap);
+ APP_PARAM_COUNT(app->kni_params, app->n_pktq_kni);
+ APP_PARAM_COUNT(app->source_params, app->n_pktq_source);
+ APP_PARAM_COUNT(app->sink_params, app->n_pktq_sink);
+ APP_PARAM_COUNT(app->msgq_params, app->n_msgq);
+ APP_PARAM_COUNT(app->pipeline_params, app->n_pipelines);
+
+ if (app->port_mask == 0)
+ assign_link_pmd_id_from_pci_bdf(app);
+
+ /* Save configuration to output file */
+ app_config_save(app, app->output_file);
+
+ /* Load TM configuration files */
+ app_config_parse_tm(app);
+
+ return 0;
+}
+
+static void
+save_eal_params(struct app_params *app, FILE *f)
+{
+ struct app_eal_params *p = &app->eal_params;
+ uint32_t i;
+
+ fprintf(f, "[EAL]\n");
+
+ if (p->coremap)
+ fprintf(f, "%s = %s\n", "lcores", p->coremap);
+
+ if (p->master_lcore_present)
+ fprintf(f, "%s = %" PRIu32 "\n",
+ "master_lcore", p->master_lcore);
+
+ fprintf(f, "%s = %" PRIu32 "\n", "n", p->channels);
+
+ if (p->memory_present)
+ fprintf(f, "%s = %" PRIu32 "\n", "m", p->memory);
+
+ if (p->ranks_present)
+ fprintf(f, "%s = %" PRIu32 "\n", "r", p->ranks);
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_blacklist[i] == NULL)
+ break;
+
+ fprintf(f, "%s = %s\n", "pci_blacklist",
+ p->pci_blacklist[i]);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_whitelist[i] == NULL)
+ break;
+
+ fprintf(f, "%s = %s\n", "pci_whitelist",
+ p->pci_whitelist[i]);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->vdev[i] == NULL)
+ break;
+
+ fprintf(f, "%s = %s\n", "vdev",
+ p->vdev[i]);
+ }
+
+ if (p->vmware_tsc_map_present)
+ fprintf(f, "%s = %s\n", "vmware_tsc_map",
+ (p->vmware_tsc_map) ? "yes" : "no");
+
+ if (p->proc_type)
+ fprintf(f, "%s = %s\n", "proc_type", p->proc_type);
+
+ if (p->syslog)
+ fprintf(f, "%s = %s\n", "syslog", p->syslog);
+
+ if (p->log_level_present)
+ fprintf(f, "%s = %" PRIu32 "\n", "log_level", p->log_level);
+
+ if (p->version_present)
+ fprintf(f, "%s = %s\n", "v", (p->version) ? "yes" : "no");
+
+ if (p->help_present)
+ fprintf(f, "%s = %s\n", "help", (p->help) ? "yes" : "no");
+
+ if (p->no_huge_present)
+ fprintf(f, "%s = %s\n", "no_huge", (p->no_huge) ? "yes" : "no");
+
+ if (p->no_pci_present)
+ fprintf(f, "%s = %s\n", "no_pci", (p->no_pci) ? "yes" : "no");
+
+ if (p->no_hpet_present)
+ fprintf(f, "%s = %s\n", "no_hpet", (p->no_hpet) ? "yes" : "no");
+
+ if (p->no_shconf_present)
+ fprintf(f, "%s = %s\n", "no_shconf",
+ (p->no_shconf) ? "yes" : "no");
+
+ if (p->add_driver)
+ fprintf(f, "%s = %s\n", "d", p->add_driver);
+
+ if (p->socket_mem)
+ fprintf(f, "%s = %s\n", "socket_mem", p->socket_mem);
+
+ if (p->huge_dir)
+ fprintf(f, "%s = %s\n", "huge_dir", p->huge_dir);
+
+ if (p->file_prefix)
+ fprintf(f, "%s = %s\n", "file_prefix", p->file_prefix);
+
+ if (p->base_virtaddr)
+ fprintf(f, "%s = %s\n", "base_virtaddr", p->base_virtaddr);
+
+ if (p->create_uio_dev_present)
+ fprintf(f, "%s = %s\n", "create_uio_dev",
+ (p->create_uio_dev) ? "yes" : "no");
+
+ if (p->vfio_intr)
+ fprintf(f, "%s = %s\n", "vfio_intr", p->vfio_intr);
+
+ if (p->xen_dom0_present)
+ fprintf(f, "%s = %s\n", "xen_dom0",
+ (p->xen_dom0) ? "yes" : "no");
+
+ fputc('\n', f);
+}
+
+static void
+save_mempool_params(struct app_params *app, FILE *f)
+{
+ struct app_mempool_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->mempool_params);
+ for (i = 0; i < count; i++) {
+ p = &app->mempool_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "buffer_size", p->buffer_size);
+ fprintf(f, "%s = %" PRIu32 "\n", "pool_size", p->pool_size);
+ fprintf(f, "%s = %" PRIu32 "\n", "cache_size", p->cache_size);
+ fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_links_params(struct app_params *app, FILE *f)
+{
+ struct app_link_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->link_params);
+ for (i = 0; i < count; i++) {
+ p = &app->link_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "; %s = %" PRIu32 "\n", "pmd_id", p->pmd_id);
+ fprintf(f, "%s = %s\n", "promisc", p->promisc ? "yes" : "no");
+ fprintf(f, "%s = %" PRIu32 "\n", "arp_q", p->arp_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "tcp_syn_q",
+ p->tcp_syn_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "ip_local_q", p->ip_local_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "tcp_local_q", p->tcp_local_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "udp_local_q", p->udp_local_q);
+ fprintf(f, "%s = %" PRIu32 "\n", "sctp_local_q",
+ p->sctp_local_q);
+
+ if (p->n_rss_qs) {
+ uint32_t j;
+
+ /* rss_qs */
+ fprintf(f, "rss_qs = ");
+ for (j = 0; j < p->n_rss_qs; j++)
+ fprintf(f, "%" PRIu32 " ", p->rss_qs[j]);
+ fputc('\n', f);
+
+ /* rss_proto_ipv4 */
+ if (p->rss_proto_ipv4) {
+ fprintf(f, "rss_proto_ipv4 = ");
+ if (p->rss_proto_ipv4 & ETH_RSS_IPV4)
+ fprintf(f, "IP ");
+ if (p->rss_proto_ipv4 & ETH_RSS_FRAG_IPV4)
+ fprintf(f, "FRAG ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_TCP)
+ fprintf(f, "TCP ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_UDP)
+ fprintf(f, "UDP ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_SCTP)
+ fprintf(f, "SCTP ");
+ if (p->rss_proto_ipv4 &
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+ fprintf(f, "OTHER ");
+ fprintf(f, "\n");
+ } else
+ fprintf(f, "; rss_proto_ipv4 = <NONE>\n");
+
+ /* rss_proto_ipv6 */
+ if (p->rss_proto_ipv6) {
+ fprintf(f, "rss_proto_ipv6 = ");
+ if (p->rss_proto_ipv6 & ETH_RSS_IPV6)
+ fprintf(f, "IP ");
+ if (p->rss_proto_ipv6 & ETH_RSS_FRAG_IPV6)
+ fprintf(f, "FRAG ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_TCP)
+ fprintf(f, "TCP ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_UDP)
+ fprintf(f, "UDP ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_SCTP)
+ fprintf(f, "SCTP ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_NONFRAG_IPV6_OTHER)
+ fprintf(f, "OTHER ");
+ if (p->rss_proto_ipv6 & ETH_RSS_IPV6_EX)
+ fprintf(f, "IP_EX ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_IPV6_TCP_EX)
+ fprintf(f, "TCP_EX ");
+ if (p->rss_proto_ipv6 &
+ ETH_RSS_IPV6_UDP_EX)
+ fprintf(f, "UDP_EX ");
+ fprintf(f, "\n");
+ } else
+ fprintf(f, "; rss_proto_ipv6 = <NONE>\n");
+
+ /* rss_proto_l2 */
+ if (p->rss_proto_l2) {
+ fprintf(f, "rss_proto_l2 = ");
+ if (p->rss_proto_l2 & ETH_RSS_L2_PAYLOAD)
+ fprintf(f, "L2 ");
+ fprintf(f, "\n");
+ } else
+ fprintf(f, "; rss_proto_l2 = <NONE>\n");
+ } else {
+ fprintf(f, "; rss_qs = <NONE>\n");
+ fprintf(f, "; rss_proto_ipv4 = <NONE>\n");
+ fprintf(f, "; rss_proto_ipv6 = <NONE>\n");
+ fprintf(f, "; rss_proto_l2 = <NONE>\n");
+ }
+
+ if (strlen(p->pci_bdf))
+ fprintf(f, "%s = %s\n", "pci_bdf", p->pci_bdf);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_rxq_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_hwq_in_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->hwq_in_params);
+ for (i = 0; i < count; i++) {
+ p = &app->hwq_in_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n",
+ "mempool",
+ app->mempool_params[p->mempool_id].name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_txq_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_hwq_out_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->hwq_out_params);
+ for (i = 0; i < count; i++) {
+ p = &app->hwq_out_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
+ fprintf(f, "%s = %s\n",
+ "dropless",
+ p->dropless ? "yes" : "no");
+ fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_swq_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_swq_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->swq_params);
+ for (i = 0; i < count; i++) {
+ p = &app->swq_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
+ fprintf(f, "%s = %s\n", "dropless", p->dropless ? "yes" : "no");
+ fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
+ fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
+ fprintf(f, "%s = %s\n", "ipv4_frag", p->ipv4_frag ? "yes" : "no");
+ fprintf(f, "%s = %s\n", "ipv6_frag", p->ipv6_frag ? "yes" : "no");
+ fprintf(f, "%s = %s\n", "ipv4_ras", p->ipv4_ras ? "yes" : "no");
+ fprintf(f, "%s = %s\n", "ipv6_ras", p->ipv6_ras ? "yes" : "no");
+ if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1)) {
+ fprintf(f, "%s = %" PRIu32 "\n", "mtu", p->mtu);
+ fprintf(f, "%s = %" PRIu32 "\n", "metadata_size", p->metadata_size);
+ fprintf(f, "%s = %s\n",
+ "mempool_direct",
+ app->mempool_params[p->mempool_direct_id].name);
+ fprintf(f, "%s = %s\n",
+ "mempool_indirect",
+ app->mempool_params[p->mempool_indirect_id].name);
+ }
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_tm_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_tm_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->tm_params);
+ for (i = 0; i < count; i++) {
+ p = &app->tm_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n", "cfg", p->file_name);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_tap_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_tap_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->tap_params);
+ for (i = 0; i < count; i++) {
+ p = &app->tap_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
+ fprintf(f, "%s = %s\n", "dropless", p->dropless ? "yes" : "no");
+ fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
+ fprintf(f, "%s = %s\n", "mempool",
+ app->mempool_params[p->mempool_id].name);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_kni_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_kni_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->kni_params);
+ for (i = 0; i < count; i++) {
+ p = &app->kni_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ /* section name */
+ fprintf(f, "[%s]\n", p->name);
+
+ /* core */
+ if (p->force_bind) {
+ fprintf(f, "; force_bind = 1\n");
+ fprintf(f, "core = s%" PRIu32 "c%" PRIu32 "%s\n",
+ p->socket_id,
+ p->core_id,
+ (p->hyper_th_id) ? "h" : "");
+ } else
+ fprintf(f, "; force_bind = 0\n");
+
+ /* mempool */
+ fprintf(f, "%s = %s\n", "mempool",
+ app->mempool_params[p->mempool_id].name);
+
+ /* burst_read */
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_read", p->burst_read);
+
+ /* burst_write */
+ fprintf(f, "%s = %" PRIu32 "\n", "burst_write", p->burst_write);
+
+ /* dropless */
+ fprintf(f, "%s = %s\n",
+ "dropless",
+ p->dropless ? "yes" : "no");
+
+ /* n_retries */
+ fprintf(f, "%s = %" PRIu64 "\n", "n_retries", p->n_retries);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_source_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_source_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->source_params);
+ for (i = 0; i < count; i++) {
+ p = &app->source_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n",
+ "mempool",
+ app->mempool_params[p->mempool_id].name);
+ fprintf(f, "%s = %" PRIu32 "\n", "burst", p->burst);
+ fprintf(f, "%s = %s\n", "pcap_file_rd", p->file_name);
+ fprintf(f, "%s = %" PRIu32 "\n", "pcap_bytes_rd_per_pkt",
+ p->n_bytes_per_pkt);
+ fputc('\n', f);
+ }
+}
+
+static void
+save_sink_params(struct app_params *app, FILE *f)
+{
+ struct app_pktq_sink_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->sink_params);
+ for (i = 0; i < count; i++) {
+ p = &app->sink_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %s\n", "pcap_file_wr", p->file_name);
+ fprintf(f, "%s = %" PRIu32 "\n",
+ "pcap_n_pkt_wr", p->n_pkts_to_dump);
+ fputc('\n', f);
+ }
+}
+
+static void
+save_msgq_params(struct app_params *app, FILE *f)
+{
+ struct app_msgq_params *p;
+ size_t i, count;
+
+ count = RTE_DIM(app->msgq_params);
+ for (i = 0; i < count; i++) {
+ p = &app->msgq_params[i];
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ fprintf(f, "[%s]\n", p->name);
+ fprintf(f, "%s = %" PRIu32 "\n", "size", p->size);
+ fprintf(f, "%s = %" PRIu32 "\n", "cpu", p->cpu_socket_id);
+
+ fputc('\n', f);
+ }
+}
+
+static void
+save_pipeline_params(struct app_params *app, FILE *f)
+{
+ size_t i, count;
+
+ count = RTE_DIM(app->pipeline_params);
+ for (i = 0; i < count; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+
+ if (!APP_PARAM_VALID(p))
+ continue;
+
+ /* section name */
+ fprintf(f, "[%s]\n", p->name);
+
+ /* type */
+ fprintf(f, "type = %s\n", p->type);
+
+ /* core */
+ fprintf(f, "core = s%" PRIu32 "c%" PRIu32 "%s\n",
+ p->socket_id,
+ p->core_id,
+ (p->hyper_th_id) ? "h" : "");
+
+ /* pktq_in */
+ if (p->n_pktq_in) {
+ uint32_t j;
+
+ fprintf(f, "pktq_in =");
+ for (j = 0; j < p->n_pktq_in; j++) {
+ struct app_pktq_in_params *pp = &p->pktq_in[j];
+ char *name;
+
+ switch (pp->type) {
+ case APP_PKTQ_IN_HWQ:
+ name = app->hwq_in_params[pp->id].name;
+ break;
+ case APP_PKTQ_IN_SWQ:
+ name = app->swq_params[pp->id].name;
+ break;
+ case APP_PKTQ_IN_TM:
+ name = app->tm_params[pp->id].name;
+ break;
+ case APP_PKTQ_IN_TAP:
+ name = app->tap_params[pp->id].name;
+ break;
+ case APP_PKTQ_IN_KNI:
+ name = app->kni_params[pp->id].name;
+ break;
+ case APP_PKTQ_IN_SOURCE:
+ name = app->source_params[pp->id].name;
+ break;
+ default:
+ APP_CHECK(0, "System error "
+ "occurred while saving "
+ "parameter to file");
+ }
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* pktq_in */
+ if (p->n_pktq_out) {
+ uint32_t j;
+
+ fprintf(f, "pktq_out =");
+ for (j = 0; j < p->n_pktq_out; j++) {
+ struct app_pktq_out_params *pp =
+ &p->pktq_out[j];
+ char *name;
+
+ switch (pp->type) {
+ case APP_PKTQ_OUT_HWQ:
+ name = app->hwq_out_params[pp->id].name;
+ break;
+ case APP_PKTQ_OUT_SWQ:
+ name = app->swq_params[pp->id].name;
+ break;
+ case APP_PKTQ_OUT_TM:
+ name = app->tm_params[pp->id].name;
+ break;
+ case APP_PKTQ_OUT_TAP:
+ name = app->tap_params[pp->id].name;
+ break;
+ case APP_PKTQ_OUT_KNI:
+ name = app->kni_params[pp->id].name;
+ break;
+ case APP_PKTQ_OUT_SINK:
+ name = app->sink_params[pp->id].name;
+ break;
+ default:
+ APP_CHECK(0, "System error "
+ "occurred while saving "
+ "parameter to file");
+ }
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* msgq_in */
+ if (p->n_msgq_in) {
+ uint32_t j;
+
+ fprintf(f, "msgq_in =");
+ for (j = 0; j < p->n_msgq_in; j++) {
+ uint32_t id = p->msgq_in[j];
+ char *name = app->msgq_params[id].name;
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* msgq_out */
+ if (p->n_msgq_out) {
+ uint32_t j;
+
+ fprintf(f, "msgq_out =");
+ for (j = 0; j < p->n_msgq_out; j++) {
+ uint32_t id = p->msgq_out[j];
+ char *name = app->msgq_params[id].name;
+
+ fprintf(f, " %s", name);
+ }
+ fprintf(f, "\n");
+ }
+
+ /* timer_period */
+ fprintf(f, "timer_period = %" PRIu32 "\n", p->timer_period);
+
+ /* args */
+ if (p->n_args) {
+ uint32_t j;
+
+ for (j = 0; j < p->n_args; j++)
+ fprintf(f, "%s = %s\n", p->args_name[j],
+ p->args_value[j]);
+ }
+
+ fprintf(f, "\n");
+ }
+}
+
+void
+app_config_save(struct app_params *app, const char *file_name)
+{
+ FILE *file;
+ char *name, *dir_name;
+ int status;
+
+ name = strdup(file_name);
+ dir_name = dirname(name);
+ status = access(dir_name, W_OK);
+ APP_CHECK((status == 0),
+ "Error: need write access privilege to directory "
+ "\"%s\" to save configuration\n", dir_name);
+
+ file = fopen(file_name, "w");
+ APP_CHECK((file != NULL),
+ "Error: failed to save configuration to file \"%s\"",
+ file_name);
+
+ save_eal_params(app, file);
+ save_pipeline_params(app, file);
+ save_mempool_params(app, file);
+ save_links_params(app, file);
+ save_rxq_params(app, file);
+ save_txq_params(app, file);
+ save_swq_params(app, file);
+ save_tm_params(app, file);
+ save_tap_params(app, file);
+ save_kni_params(app, file);
+ save_source_params(app, file);
+ save_sink_params(app, file);
+ save_msgq_params(app, file);
+
+ fclose(file);
+ free(name);
+}
+
+int
+app_config_init(struct app_params *app)
+{
+ size_t i;
+
+ memcpy(app, &app_params_default, sizeof(struct app_params));
+
+ for (i = 0; i < RTE_DIM(app->mempool_params); i++)
+ memcpy(&app->mempool_params[i],
+ &mempool_params_default,
+ sizeof(struct app_mempool_params));
+
+ for (i = 0; i < RTE_DIM(app->link_params); i++)
+ memcpy(&app->link_params[i],
+ &link_params_default,
+ sizeof(struct app_link_params));
+
+ for (i = 0; i < RTE_DIM(app->hwq_in_params); i++)
+ memcpy(&app->hwq_in_params[i],
+ &default_hwq_in_params,
+ sizeof(default_hwq_in_params));
+
+ for (i = 0; i < RTE_DIM(app->hwq_out_params); i++)
+ memcpy(&app->hwq_out_params[i],
+ &default_hwq_out_params,
+ sizeof(default_hwq_out_params));
+
+ for (i = 0; i < RTE_DIM(app->swq_params); i++)
+ memcpy(&app->swq_params[i],
+ &default_swq_params,
+ sizeof(default_swq_params));
+
+ for (i = 0; i < RTE_DIM(app->tm_params); i++)
+ memcpy(&app->tm_params[i],
+ &default_tm_params,
+ sizeof(default_tm_params));
+
+ for (i = 0; i < RTE_DIM(app->tap_params); i++)
+ memcpy(&app->tap_params[i],
+ &default_tap_params,
+ sizeof(default_tap_params));
+
+ for (i = 0; i < RTE_DIM(app->kni_params); i++)
+ memcpy(&app->kni_params[i],
+ &default_kni_params,
+ sizeof(default_kni_params));
+
+ for (i = 0; i < RTE_DIM(app->source_params); i++)
+ memcpy(&app->source_params[i],
+ &default_source_params,
+ sizeof(default_source_params));
+
+ for (i = 0; i < RTE_DIM(app->sink_params); i++)
+ memcpy(&app->sink_params[i],
+ &default_sink_params,
+ sizeof(default_sink_params));
+
+ for (i = 0; i < RTE_DIM(app->msgq_params); i++)
+ memcpy(&app->msgq_params[i],
+ &default_msgq_params,
+ sizeof(default_msgq_params));
+
+ for (i = 0; i < RTE_DIM(app->pipeline_params); i++)
+ memcpy(&app->pipeline_params[i],
+ &default_pipeline_params,
+ sizeof(default_pipeline_params));
+
+ return 0;
+}
+
+static char *
+filenamedup(const char *filename, const char *suffix)
+{
+ char *s = malloc(strlen(filename) + strlen(suffix) + 1);
+
+ if (!s)
+ return NULL;
+
+ sprintf(s, "%s%s", filename, suffix);
+ return s;
+}
+
+int
+app_config_args(struct app_params *app, int argc, char **argv)
+{
+ const char *optname;
+ int opt, option_index;
+ int f_present, s_present, p_present, l_present;
+ int preproc_present, preproc_params_present;
+ int scaned = 0;
+
+ static struct option lgopts[] = {
+ { "preproc", 1, 0, 0 },
+ { "preproc-args", 1, 0, 0 },
+ { NULL, 0, 0, 0 }
+ };
+
+ /* Copy application name */
+ strncpy(app->app_name, argv[0], APP_APPNAME_SIZE - 1);
+
+ f_present = 0;
+ s_present = 0;
+ p_present = 0;
+ l_present = 0;
+ preproc_present = 0;
+ preproc_params_present = 0;
+
+ while ((opt = getopt_long(argc, argv, "f:s:p:l:", lgopts,
+ &option_index)) != EOF)
+ switch (opt) {
+ case 'f':
+ if (f_present)
+ rte_panic("Error: Config file is provided "
+ "more than once\n");
+ f_present = 1;
+
+ if (!strlen(optarg))
+ rte_panic("Error: Config file name is null\n");
+
+ app->config_file = strdup(optarg);
+ if (app->config_file == NULL)
+ rte_panic("Error: Memory allocation failure\n");
+
+ break;
+
+ case 's':
+ if (s_present)
+ rte_panic("Error: Script file is provided "
+ "more than once\n");
+ s_present = 1;
+
+ if (!strlen(optarg))
+ rte_panic("Error: Script file name is null\n");
+
+ app->script_file = strdup(optarg);
+ if (app->script_file == NULL)
+ rte_panic("Error: Memory allocation failure\n");
+
+ break;
+
+ case 'p':
+ if (p_present)
+ rte_panic("Error: PORT_MASK is provided "
+ "more than once\n");
+ p_present = 1;
+
+ if ((sscanf(optarg, "%" SCNx64 "%n", &app->port_mask,
+ &scaned) != 1) ||
+ ((size_t) scaned != strlen(optarg)))
+ rte_panic("Error: PORT_MASK is not "
+ "a hexadecimal integer\n");
+
+ if (app->port_mask == 0)
+ rte_panic("Error: PORT_MASK is null\n");
+
+ break;
+
+ case 'l':
+ if (l_present)
+ rte_panic("Error: LOG_LEVEL is provided "
+ "more than once\n");
+ l_present = 1;
+
+ if ((sscanf(optarg, "%" SCNu32 "%n", &app->log_level,
+ &scaned) != 1) ||
+ ((size_t) scaned != strlen(optarg)) ||
+ (app->log_level >= APP_LOG_LEVELS))
+ rte_panic("Error: LOG_LEVEL invalid value\n");
+
+ break;
+
+ case 0:
+ optname = lgopts[option_index].name;
+
+ if (strcmp(optname, "preproc") == 0) {
+ if (preproc_present)
+ rte_panic("Error: Preprocessor argument "
+ "is provided more than once\n");
+ preproc_present = 1;
+
+ app->preproc = strdup(optarg);
+ break;
+ }
+
+ if (strcmp(optname, "preproc-args") == 0) {
+ if (preproc_params_present)
+ rte_panic("Error: Preprocessor args "
+ "are provided more than once\n");
+ preproc_params_present = 1;
+
+ app->preproc_args = strdup(optarg);
+ break;
+ }
+
+ app_print_usage(argv[0]);
+ break;
+
+ default:
+ app_print_usage(argv[0]);
+ }
+
+ optind = 1; /* reset getopt lib */
+
+ /* Check dependencies between args */
+ if (preproc_params_present && (preproc_present == 0))
+ rte_panic("Error: Preprocessor args specified while "
+ "preprocessor is not defined\n");
+
+ app->parser_file = preproc_present ?
+ filenamedup(app->config_file, ".preproc") :
+ strdup(app->config_file);
+ app->output_file = filenamedup(app->config_file, ".out");
+
+ return 0;
+}
+
+int
+app_config_preproc(struct app_params *app)
+{
+ char buffer[256];
+ int status;
+
+ if (app->preproc == NULL)
+ return 0;
+
+ status = access(app->config_file, F_OK | R_OK);
+ APP_CHECK((status == 0), "Error: Unable to open file %s",
+ app->config_file);
+
+ snprintf(buffer, sizeof(buffer), "%s %s %s > %s",
+ app->preproc,
+ app->preproc_args ? app->preproc_args : "",
+ app->config_file,
+ app->parser_file);
+
+ status = system(buffer);
+ APP_CHECK((WIFEXITED(status) && (WEXITSTATUS(status) == 0)),
+ "Error occurred while pre-processing file \"%s\"\n",
+ app->config_file);
+
+ return status;
+}
diff --git a/src/seastar/dpdk/examples/ip_pipeline/config_parse_tm.c b/src/seastar/dpdk/examples/ip_pipeline/config_parse_tm.c
new file mode 100644
index 00000000..e75eed71
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/config_parse_tm.c
@@ -0,0 +1,448 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+
+#include <rte_errno.h>
+#include <rte_cfgfile.h>
+#include <rte_string_fns.h>
+
+#include "app.h"
+
+static int
+tm_cfgfile_load_sched_port(
+ struct rte_cfgfile *file,
+ struct rte_sched_port_params *port_params)
+{
+ const char *entry;
+ int j;
+
+ entry = rte_cfgfile_get_entry(file, "port", "frame overhead");
+ if (entry)
+ port_params->frame_overhead = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, "port", "mtu");
+ if (entry)
+ port_params->mtu = (uint32_t)atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ "port",
+ "number of subports per port");
+ if (entry)
+ port_params->n_subports_per_port = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ "port",
+ "number of pipes per subport");
+ if (entry)
+ port_params->n_pipes_per_subport = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, "port", "queue sizes");
+ if (entry) {
+ char *next;
+
+ for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
+ port_params->qsize[j] = (uint16_t)
+ strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+#ifdef RTE_SCHED_RED
+ for (j = 0; j < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; j++) {
+ char str[32];
+
+ /* Parse WRED min thresholds */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred min", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].min_th
+ = (uint16_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED max thresholds */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred max", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].max_th
+ = (uint16_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED inverse mark probabilities */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred inv prob", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].maxp_inv
+ = (uint8_t)strtol(entry, &next, 10);
+
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+
+ /* Parse WRED EWMA filter weights */
+ snprintf(str, sizeof(str), "tc %" PRId32 " wred weight", j);
+ entry = rte_cfgfile_get_entry(file, "red", str);
+ if (entry) {
+ char *next;
+ int k;
+
+ /* for each packet colour (green, yellow, red) */
+ for (k = 0; k < e_RTE_METER_COLORS; k++) {
+ port_params->red_params[j][k].wq_log2
+ = (uint8_t)strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+ }
+#endif /* RTE_SCHED_RED */
+
+ return 0;
+}
+
+static int
+tm_cfgfile_load_sched_pipe(
+ struct rte_cfgfile *file,
+ struct rte_sched_port_params *port_params,
+ struct rte_sched_pipe_params *pipe_params)
+{
+ int i, j;
+ char *next;
+ const char *entry;
+ int profiles;
+
+ profiles = rte_cfgfile_num_sections(file,
+ "pipe profile", sizeof("pipe profile") - 1);
+ port_params->n_pipe_profiles = profiles;
+
+ for (j = 0; j < profiles; j++) {
+ char pipe_name[32];
+
+ snprintf(pipe_name, sizeof(pipe_name),
+ "pipe profile %" PRId32, j);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tb rate");
+ if (entry)
+ pipe_params[j].tb_rate = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tb size");
+ if (entry)
+ pipe_params[j].tb_size = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc period");
+ if (entry)
+ pipe_params[j].tc_period = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 0 rate");
+ if (entry)
+ pipe_params[j].tc_rate[0] = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 1 rate");
+ if (entry)
+ pipe_params[j].tc_rate[1] = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 2 rate");
+ if (entry)
+ pipe_params[j].tc_rate[2] = (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 3 rate");
+ if (entry)
+ pipe_params[j].tc_rate[3] = (uint32_t) atoi(entry);
+
+#ifdef RTE_SCHED_SUBPORT_TC_OV
+ entry = rte_cfgfile_get_entry(file, pipe_name,
+ "tc 3 oversubscription weight");
+ if (entry)
+ pipe_params[j].tc_ov_weight = (uint8_t)atoi(entry);
+#endif
+
+ entry = rte_cfgfile_get_entry(file,
+ pipe_name,
+ "tc 0 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*0 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 1 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*1 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 2 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*2 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+
+ entry = rte_cfgfile_get_entry(file, pipe_name, "tc 3 wrr weights");
+ if (entry)
+ for (i = 0; i < RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; i++) {
+ pipe_params[j].wrr_weights[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE*3 + i] =
+ (uint8_t) strtol(entry, &next, 10);
+ if (next == NULL)
+ break;
+ entry = next;
+ }
+ }
+ return 0;
+}
+
+static int
+tm_cfgfile_load_sched_subport(
+ struct rte_cfgfile *file,
+ struct rte_sched_subport_params *subport_params,
+ int *pipe_to_profile)
+{
+ const char *entry;
+ int i, j, k;
+
+ for (i = 0; i < APP_MAX_SCHED_SUBPORTS; i++) {
+ char sec_name[CFG_NAME_LEN];
+
+ snprintf(sec_name, sizeof(sec_name),
+ "subport %" PRId32, i);
+
+ if (rte_cfgfile_has_section(file, sec_name)) {
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tb rate");
+ if (entry)
+ subport_params[i].tb_rate =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tb size");
+ if (entry)
+ subport_params[i].tb_size =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc period");
+ if (entry)
+ subport_params[i].tc_period =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 0 rate");
+ if (entry)
+ subport_params[i].tc_rate[0] =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 1 rate");
+ if (entry)
+ subport_params[i].tc_rate[1] =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 2 rate");
+ if (entry)
+ subport_params[i].tc_rate[2] =
+ (uint32_t) atoi(entry);
+
+ entry = rte_cfgfile_get_entry(file,
+ sec_name,
+ "tc 3 rate");
+ if (entry)
+ subport_params[i].tc_rate[3] =
+ (uint32_t) atoi(entry);
+
+ int n_entries = rte_cfgfile_section_num_entries(file,
+ sec_name);
+ struct rte_cfgfile_entry entries[n_entries];
+
+ rte_cfgfile_section_entries(file,
+ sec_name,
+ entries,
+ n_entries);
+
+ for (j = 0; j < n_entries; j++)
+ if (strncmp("pipe",
+ entries[j].name,
+ sizeof("pipe") - 1) == 0) {
+ int profile;
+ char *tokens[2] = {NULL, NULL};
+ int n_tokens;
+ int begin, end;
+ char name[CFG_NAME_LEN + 1];
+
+ profile = atoi(entries[j].value);
+ strncpy(name,
+ entries[j].name,
+ sizeof(name));
+ n_tokens = rte_strsplit(
+ &name[sizeof("pipe")],
+ strnlen(name, CFG_NAME_LEN),
+ tokens, 2, '-');
+
+ begin = atoi(tokens[0]);
+ if (n_tokens == 2)
+ end = atoi(tokens[1]);
+ else
+ end = begin;
+
+ if ((end >= APP_MAX_SCHED_PIPES) ||
+ (begin > end))
+ return -1;
+
+ for (k = begin; k <= end; k++) {
+ char profile_name[CFG_NAME_LEN];
+
+ snprintf(profile_name,
+ sizeof(profile_name),
+ "pipe profile %" PRId32,
+ profile);
+ if (rte_cfgfile_has_section(file, profile_name))
+ pipe_to_profile[i * APP_MAX_SCHED_PIPES + k] = profile;
+ else
+ rte_exit(EXIT_FAILURE,
+ "Wrong pipe profile %s\n",
+ entries[j].value);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+tm_cfgfile_load(struct app_pktq_tm_params *tm)
+{
+ struct rte_cfgfile *file;
+ uint32_t i;
+
+ memset(tm->sched_subport_params, 0, sizeof(tm->sched_subport_params));
+ memset(tm->sched_pipe_profiles, 0, sizeof(tm->sched_pipe_profiles));
+ memset(&tm->sched_port_params, 0, sizeof(tm->sched_port_params));
+ for (i = 0; i < APP_MAX_SCHED_SUBPORTS * APP_MAX_SCHED_PIPES; i++)
+ tm->sched_pipe_to_profile[i] = -1;
+
+ tm->sched_port_params.pipe_profiles = &tm->sched_pipe_profiles[0];
+
+ if (tm->file_name[0] == '\0')
+ return -1;
+
+ file = rte_cfgfile_load(tm->file_name, 0);
+ if (file == NULL)
+ return -1;
+
+ tm_cfgfile_load_sched_port(file,
+ &tm->sched_port_params);
+ tm_cfgfile_load_sched_subport(file,
+ tm->sched_subport_params,
+ tm->sched_pipe_to_profile);
+ tm_cfgfile_load_sched_pipe(file,
+ &tm->sched_port_params,
+ tm->sched_pipe_profiles);
+
+ rte_cfgfile_close(file);
+ return 0;
+}
+
+int
+app_config_parse_tm(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < RTE_DIM(app->tm_params); i++) {
+ struct app_pktq_tm_params *p = &app->tm_params[i];
+ int status;
+
+ if (!APP_PARAM_VALID(p))
+ break;
+
+ status = tm_cfgfile_load(p);
+ APP_CHECK(status == 0,
+ "Parse error for %s configuration file \"%s\"\n",
+ p->name,
+ p->file_name);
+ }
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/examples/ip_pipeline/cpu_core_map.c b/src/seastar/dpdk/examples/ip_pipeline/cpu_core_map.c
new file mode 100644
index 00000000..dd8f6785
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/cpu_core_map.c
@@ -0,0 +1,500 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_lcore.h>
+
+#include "cpu_core_map.h"
+
+struct cpu_core_map {
+ uint32_t n_max_sockets;
+ uint32_t n_max_cores_per_socket;
+ uint32_t n_max_ht_per_core;
+ uint32_t n_sockets;
+ uint32_t n_cores_per_socket;
+ uint32_t n_ht_per_core;
+ int map[0];
+};
+
+static inline uint32_t
+cpu_core_map_pos(struct cpu_core_map *map,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t ht_id)
+{
+ return (socket_id * map->n_max_cores_per_socket + core_id) *
+ map->n_max_ht_per_core + ht_id;
+}
+
+static int
+cpu_core_map_compute_eal(struct cpu_core_map *map);
+
+static int
+cpu_core_map_compute_linux(struct cpu_core_map *map);
+
+static int
+cpu_core_map_compute_and_check(struct cpu_core_map *map);
+
+struct cpu_core_map *
+cpu_core_map_init(uint32_t n_max_sockets,
+ uint32_t n_max_cores_per_socket,
+ uint32_t n_max_ht_per_core,
+ uint32_t eal_initialized)
+{
+ uint32_t map_size, map_mem_size, i;
+ struct cpu_core_map *map;
+ int status;
+
+ /* Check input arguments */
+ if ((n_max_sockets == 0) ||
+ (n_max_cores_per_socket == 0) ||
+ (n_max_ht_per_core == 0))
+ return NULL;
+
+ /* Memory allocation */
+ map_size = n_max_sockets * n_max_cores_per_socket * n_max_ht_per_core;
+ map_mem_size = sizeof(struct cpu_core_map) + map_size * sizeof(int);
+ map = (struct cpu_core_map *) malloc(map_mem_size);
+ if (map == NULL)
+ return NULL;
+
+ /* Initialization */
+ map->n_max_sockets = n_max_sockets;
+ map->n_max_cores_per_socket = n_max_cores_per_socket;
+ map->n_max_ht_per_core = n_max_ht_per_core;
+ map->n_sockets = 0;
+ map->n_cores_per_socket = 0;
+ map->n_ht_per_core = 0;
+
+ for (i = 0; i < map_size; i++)
+ map->map[i] = -1;
+
+ status = (eal_initialized) ?
+ cpu_core_map_compute_eal(map) :
+ cpu_core_map_compute_linux(map);
+
+ if (status) {
+ free(map);
+ return NULL;
+ }
+
+ status = cpu_core_map_compute_and_check(map);
+ if (status) {
+ free(map);
+ return NULL;
+ }
+
+ return map;
+}
+
+int
+cpu_core_map_compute_eal(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+
+ /* Compute map */
+ for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
+ uint32_t n_detected, core_id_contig;
+ int lcore_id;
+
+ n_detected = 0;
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ struct lcore_config *p = &lcore_config[lcore_id];
+
+ if ((p->detected) && (p->socket_id == socket_id))
+ n_detected++;
+ }
+
+ core_id_contig = 0;
+
+ for (core_id = 0; n_detected ; core_id++) {
+ ht_id = 0;
+
+ for (lcore_id = 0;
+ lcore_id < RTE_MAX_LCORE;
+ lcore_id++) {
+ struct lcore_config *p =
+ &lcore_config[lcore_id];
+
+ if ((p->detected) &&
+ (p->socket_id == socket_id) &&
+ (p->core_id == core_id)) {
+ uint32_t pos = cpu_core_map_pos(map,
+ socket_id,
+ core_id_contig,
+ ht_id);
+
+ map->map[pos] = lcore_id;
+ ht_id++;
+ n_detected--;
+ }
+ }
+
+ if (ht_id) {
+ core_id_contig++;
+ if (core_id_contig ==
+ map->n_max_cores_per_socket)
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int
+cpu_core_map_compute_and_check(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+
+ /* Compute n_ht_per_core, n_cores_per_socket, n_sockets */
+ for (ht_id = 0; ht_id < map->n_max_ht_per_core; ht_id++) {
+ if (map->map[ht_id] == -1)
+ break;
+
+ map->n_ht_per_core++;
+ }
+
+ if (map->n_ht_per_core == 0)
+ return -1;
+
+ for (core_id = 0; core_id < map->n_max_cores_per_socket; core_id++) {
+ uint32_t pos = core_id * map->n_max_ht_per_core;
+
+ if (map->map[pos] == -1)
+ break;
+
+ map->n_cores_per_socket++;
+ }
+
+ if (map->n_cores_per_socket == 0)
+ return -1;
+
+ for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
+ uint32_t pos = socket_id * map->n_max_cores_per_socket *
+ map->n_max_ht_per_core;
+
+ if (map->map[pos] == -1)
+ break;
+
+ map->n_sockets++;
+ }
+
+ if (map->n_sockets == 0)
+ return -1;
+
+ /* Check that each socket has exactly the same number of cores
+ and that each core has exactly the same number of hyper-threads */
+ for (socket_id = 0; socket_id < map->n_sockets; socket_id++) {
+ for (core_id = 0; core_id < map->n_cores_per_socket; core_id++)
+ for (ht_id = 0;
+ ht_id < map->n_max_ht_per_core;
+ ht_id++) {
+ uint32_t pos = (socket_id *
+ map->n_max_cores_per_socket + core_id) *
+ map->n_max_ht_per_core + ht_id;
+
+ if (((ht_id < map->n_ht_per_core) &&
+ (map->map[pos] == -1)) ||
+ ((ht_id >= map->n_ht_per_core) &&
+ (map->map[pos] != -1)))
+ return -1;
+ }
+
+ for ( ; core_id < map->n_max_cores_per_socket; core_id++)
+ for (ht_id = 0;
+ ht_id < map->n_max_ht_per_core;
+ ht_id++) {
+ uint32_t pos = cpu_core_map_pos(map,
+ socket_id,
+ core_id,
+ ht_id);
+
+ if (map->map[pos] != -1)
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+#define FILE_LINUX_CPU_N_LCORES \
+ "/sys/devices/system/cpu/present"
+
+static int
+cpu_core_map_get_n_lcores_linux(void)
+{
+ char buffer[64], *string;
+ FILE *fd;
+
+ fd = fopen(FILE_LINUX_CPU_N_LCORES, "r");
+ if (fd == NULL)
+ return -1;
+
+ if (fgets(buffer, sizeof(buffer), fd) == NULL) {
+ fclose(fd);
+ return -1;
+ }
+
+ fclose(fd);
+
+ string = index(buffer, '-');
+ if (string == NULL)
+ return -1;
+
+ return atoi(++string) + 1;
+}
+
+#define FILE_LINUX_CPU_CORE_ID \
+ "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/core_id"
+
+static int
+cpu_core_map_get_core_id_linux(int lcore_id)
+{
+ char buffer[64];
+ FILE *fd;
+ int core_id;
+
+ snprintf(buffer, sizeof(buffer), FILE_LINUX_CPU_CORE_ID, lcore_id);
+ fd = fopen(buffer, "r");
+ if (fd == NULL)
+ return -1;
+
+ if (fgets(buffer, sizeof(buffer), fd) == NULL) {
+ fclose(fd);
+ return -1;
+ }
+
+ fclose(fd);
+
+ core_id = atoi(buffer);
+ return core_id;
+}
+
+#define FILE_LINUX_CPU_SOCKET_ID \
+ "/sys/devices/system/cpu/cpu%" PRIu32 "/topology/physical_package_id"
+
+static int
+cpu_core_map_get_socket_id_linux(int lcore_id)
+{
+ char buffer[64];
+ FILE *fd;
+ int socket_id;
+
+ snprintf(buffer, sizeof(buffer), FILE_LINUX_CPU_SOCKET_ID, lcore_id);
+ fd = fopen(buffer, "r");
+ if (fd == NULL)
+ return -1;
+
+ if (fgets(buffer, sizeof(buffer), fd) == NULL) {
+ fclose(fd);
+ return -1;
+ }
+
+ fclose(fd);
+
+ socket_id = atoi(buffer);
+ return socket_id;
+}
+
+int
+cpu_core_map_compute_linux(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+ int n_lcores;
+
+ n_lcores = cpu_core_map_get_n_lcores_linux();
+ if (n_lcores <= 0)
+ return -1;
+
+ /* Compute map */
+ for (socket_id = 0; socket_id < map->n_max_sockets; socket_id++) {
+ uint32_t n_detected, core_id_contig;
+ int lcore_id;
+
+ n_detected = 0;
+ for (lcore_id = 0; lcore_id < n_lcores; lcore_id++) {
+ int lcore_socket_id =
+ cpu_core_map_get_socket_id_linux(lcore_id);
+
+#if !defined(RTE_ARCH_PPC_64)
+ if (lcore_socket_id < 0)
+ return -1;
+#endif
+
+ if (((uint32_t) lcore_socket_id) == socket_id)
+ n_detected++;
+ }
+
+ core_id_contig = 0;
+
+ for (core_id = 0; n_detected ; core_id++) {
+ ht_id = 0;
+
+ for (lcore_id = 0; lcore_id < n_lcores; lcore_id++) {
+ int lcore_socket_id =
+ cpu_core_map_get_socket_id_linux(
+ lcore_id);
+
+#if !defined(RTE_ARCH_PPC_64)
+ if (lcore_socket_id < 0)
+ return -1;
+
+ int lcore_core_id =
+ cpu_core_map_get_core_id_linux(
+ lcore_id);
+
+ if (lcore_core_id < 0)
+ return -1;
+#endif
+
+#if !defined(RTE_ARCH_PPC_64)
+ if (((uint32_t) lcore_socket_id == socket_id) &&
+ ((uint32_t) lcore_core_id == core_id)) {
+#else
+ if (((uint32_t) lcore_socket_id == socket_id)) {
+#endif
+ uint32_t pos = cpu_core_map_pos(map,
+ socket_id,
+ core_id_contig,
+ ht_id);
+
+ map->map[pos] = lcore_id;
+ ht_id++;
+ n_detected--;
+ }
+ }
+
+ if (ht_id) {
+ core_id_contig++;
+ if (core_id_contig ==
+ map->n_max_cores_per_socket)
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void
+cpu_core_map_print(struct cpu_core_map *map)
+{
+ uint32_t socket_id, core_id, ht_id;
+
+ if (map == NULL)
+ return;
+
+ for (socket_id = 0; socket_id < map->n_sockets; socket_id++) {
+ printf("Socket %" PRIu32 ":\n", socket_id);
+
+ for (core_id = 0;
+ core_id < map->n_cores_per_socket;
+ core_id++) {
+ printf("[%" PRIu32 "] = [", core_id);
+
+ for (ht_id = 0; ht_id < map->n_ht_per_core; ht_id++) {
+ int lcore_id = cpu_core_map_get_lcore_id(map,
+ socket_id,
+ core_id,
+ ht_id);
+
+ uint32_t core_id_noncontig =
+ cpu_core_map_get_core_id_linux(
+ lcore_id);
+
+ printf(" %" PRId32 " (%" PRIu32 ") ",
+ lcore_id,
+ core_id_noncontig);
+ }
+
+ printf("]\n");
+ }
+ }
+}
+
+uint32_t
+cpu_core_map_get_n_sockets(struct cpu_core_map *map)
+{
+ if (map == NULL)
+ return 0;
+
+ return map->n_sockets;
+}
+
+uint32_t
+cpu_core_map_get_n_cores_per_socket(struct cpu_core_map *map)
+{
+ if (map == NULL)
+ return 0;
+
+ return map->n_cores_per_socket;
+}
+
+uint32_t
+cpu_core_map_get_n_ht_per_core(struct cpu_core_map *map)
+{
+ if (map == NULL)
+ return 0;
+
+ return map->n_ht_per_core;
+}
+
+int
+cpu_core_map_get_lcore_id(struct cpu_core_map *map,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t ht_id)
+{
+ uint32_t pos;
+
+ if ((map == NULL) ||
+ (socket_id >= map->n_sockets) ||
+ (core_id >= map->n_cores_per_socket) ||
+ (ht_id >= map->n_ht_per_core))
+ return -1;
+
+ pos = cpu_core_map_pos(map, socket_id, core_id, ht_id);
+
+ return map->map[pos];
+}
+
+void
+cpu_core_map_free(struct cpu_core_map *map)
+{
+ free(map);
+}
diff --git a/src/seastar/dpdk/examples/ip_pipeline/cpu_core_map.h b/src/seastar/dpdk/examples/ip_pipeline/cpu_core_map.h
new file mode 100644
index 00000000..5c2ec729
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/cpu_core_map.h
@@ -0,0 +1,69 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_CPU_CORE_MAP_H__
+#define __INCLUDE_CPU_CORE_MAP_H__
+
+#include <stdio.h>
+
+#include <rte_lcore.h>
+
+struct cpu_core_map;
+
+struct cpu_core_map *
+cpu_core_map_init(uint32_t n_max_sockets,
+ uint32_t n_max_cores_per_socket,
+ uint32_t n_max_ht_per_core,
+ uint32_t eal_initialized);
+
+uint32_t
+cpu_core_map_get_n_sockets(struct cpu_core_map *map);
+
+uint32_t
+cpu_core_map_get_n_cores_per_socket(struct cpu_core_map *map);
+
+uint32_t
+cpu_core_map_get_n_ht_per_core(struct cpu_core_map *map);
+
+int
+cpu_core_map_get_lcore_id(struct cpu_core_map *map,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t ht_id);
+
+void cpu_core_map_print(struct cpu_core_map *map);
+
+void
+cpu_core_map_free(struct cpu_core_map *map);
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/init.c b/src/seastar/dpdk/examples/ip_pipeline/init.c
new file mode 100644
index 00000000..be148fca
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/init.c
@@ -0,0 +1,1925 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+#include <netinet/in.h>
+#ifdef RTE_EXEC_ENV_LINUXAPP
+#include <linux/if.h>
+#include <linux/if_tun.h>
+#endif
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_eal.h>
+#include <rte_malloc.h>
+
+#include "app.h"
+#include "pipeline.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_master.h"
+#include "pipeline_passthrough.h"
+#include "pipeline_firewall.h"
+#include "pipeline_flow_classification.h"
+#include "pipeline_flow_actions.h"
+#include "pipeline_routing.h"
+#include "thread_fe.h"
+
+#define APP_NAME_SIZE 32
+
+#define APP_RETA_SIZE_MAX (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE)
+
+static void
+app_init_core_map(struct app_params *app)
+{
+ APP_LOG(app, HIGH, "Initializing CPU core map ...");
+ app->core_map = cpu_core_map_init(RTE_MAX_NUMA_NODES, RTE_MAX_LCORE,
+ 4, 0);
+
+ if (app->core_map == NULL)
+ rte_panic("Cannot create CPU core map\n");
+
+ if (app->log_level >= APP_LOG_LEVEL_LOW)
+ cpu_core_map_print(app->core_map);
+}
+
+/* Core Mask String in Hex Representation */
+#define APP_CORE_MASK_STRING_SIZE ((64 * APP_CORE_MASK_SIZE) / 8 * 2 + 1)
+
+static void
+app_init_core_mask(struct app_params *app)
+{
+ uint32_t i;
+ char core_mask_str[APP_CORE_MASK_STRING_SIZE];
+
+ for (i = 0; i < app->n_pipelines; i++) {
+ struct app_pipeline_params *p = &app->pipeline_params[i];
+ int lcore_id;
+
+ lcore_id = cpu_core_map_get_lcore_id(app->core_map,
+ p->socket_id,
+ p->core_id,
+ p->hyper_th_id);
+
+ if (lcore_id < 0)
+ rte_panic("Cannot create CPU core mask\n");
+
+ app_core_enable_in_core_mask(app, lcore_id);
+ }
+
+ app_core_build_core_mask_string(app, core_mask_str);
+ APP_LOG(app, HIGH, "CPU core mask = 0x%s", core_mask_str);
+}
+
+static void
+app_init_eal(struct app_params *app)
+{
+ char buffer[256];
+ char core_mask_str[APP_CORE_MASK_STRING_SIZE];
+ struct app_eal_params *p = &app->eal_params;
+ uint32_t n_args = 0;
+ uint32_t i;
+ int status;
+
+ app->eal_argv[n_args++] = strdup(app->app_name);
+
+ app_core_build_core_mask_string(app, core_mask_str);
+ snprintf(buffer, sizeof(buffer), "-c%s", core_mask_str);
+ app->eal_argv[n_args++] = strdup(buffer);
+
+ if (p->coremap) {
+ snprintf(buffer, sizeof(buffer), "--lcores=%s", p->coremap);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->master_lcore_present) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--master-lcore=%" PRIu32,
+ p->master_lcore);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ snprintf(buffer, sizeof(buffer), "-n%" PRIu32, p->channels);
+ app->eal_argv[n_args++] = strdup(buffer);
+
+ if (p->memory_present) {
+ snprintf(buffer, sizeof(buffer), "-m%" PRIu32, p->memory);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->ranks_present) {
+ snprintf(buffer, sizeof(buffer), "-r%" PRIu32, p->ranks);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_blacklist[i] == NULL)
+ break;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--pci-blacklist=%s",
+ p->pci_blacklist[i]);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (app->port_mask != 0)
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->pci_whitelist[i] == NULL)
+ break;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--pci-whitelist=%s",
+ p->pci_whitelist[i]);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+ else
+ for (i = 0; i < app->n_links; i++) {
+ char *pci_bdf = app->link_params[i].pci_bdf;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--pci-whitelist=%s",
+ pci_bdf);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ for (i = 0; i < APP_MAX_LINKS; i++) {
+ if (p->vdev[i] == NULL)
+ break;
+
+ snprintf(buffer,
+ sizeof(buffer),
+ "--vdev=%s",
+ p->vdev[i]);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->vmware_tsc_map_present) && p->vmware_tsc_map) {
+ snprintf(buffer, sizeof(buffer), "--vmware-tsc-map");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->proc_type) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--proc-type=%s",
+ p->proc_type);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->syslog) {
+ snprintf(buffer, sizeof(buffer), "--syslog=%s", p->syslog);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->log_level_present) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--log-level=%" PRIu32,
+ p->log_level);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->version_present) && p->version) {
+ snprintf(buffer, sizeof(buffer), "-v");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->help_present) && p->help) {
+ snprintf(buffer, sizeof(buffer), "--help");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_huge_present) && p->no_huge) {
+ snprintf(buffer, sizeof(buffer), "--no-huge");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_pci_present) && p->no_pci) {
+ snprintf(buffer, sizeof(buffer), "--no-pci");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_hpet_present) && p->no_hpet) {
+ snprintf(buffer, sizeof(buffer), "--no-hpet");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->no_shconf_present) && p->no_shconf) {
+ snprintf(buffer, sizeof(buffer), "--no-shconf");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->add_driver) {
+ snprintf(buffer, sizeof(buffer), "-d%s", p->add_driver);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->socket_mem) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--socket-mem=%s",
+ p->socket_mem);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->huge_dir) {
+ snprintf(buffer, sizeof(buffer), "--huge-dir=%s", p->huge_dir);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->file_prefix) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--file-prefix=%s",
+ p->file_prefix);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->base_virtaddr) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--base-virtaddr=%s",
+ p->base_virtaddr);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->create_uio_dev_present) && p->create_uio_dev) {
+ snprintf(buffer, sizeof(buffer), "--create-uio-dev");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if (p->vfio_intr) {
+ snprintf(buffer,
+ sizeof(buffer),
+ "--vfio-intr=%s",
+ p->vfio_intr);
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ if ((p->xen_dom0_present) && (p->xen_dom0)) {
+ snprintf(buffer, sizeof(buffer), "--xen-dom0");
+ app->eal_argv[n_args++] = strdup(buffer);
+ }
+
+ snprintf(buffer, sizeof(buffer), "--");
+ app->eal_argv[n_args++] = strdup(buffer);
+
+ app->eal_argc = n_args;
+
+ APP_LOG(app, HIGH, "Initializing EAL ...");
+ if (app->log_level >= APP_LOG_LEVEL_LOW) {
+ int i;
+
+ fprintf(stdout, "[APP] EAL arguments: \"");
+ for (i = 1; i < app->eal_argc; i++)
+ fprintf(stdout, "%s ", app->eal_argv[i]);
+ fprintf(stdout, "\"\n");
+ }
+
+ status = rte_eal_init(app->eal_argc, app->eal_argv);
+ if (status < 0)
+ rte_panic("EAL init error\n");
+}
+
+static void
+app_init_mempool(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_mempools; i++) {
+ struct app_mempool_params *p = &app->mempool_params[i];
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p->name);
+ app->mempool[i] = rte_pktmbuf_pool_create(
+ p->name,
+ p->pool_size,
+ p->cache_size,
+ 0, /* priv_size */
+ p->buffer_size -
+ sizeof(struct rte_mbuf), /* mbuf data size */
+ p->cpu_socket_id);
+
+ if (app->mempool[i] == NULL)
+ rte_panic("%s init error\n", p->name);
+ }
+}
+
+static inline int
+app_link_filter_arp_add(struct app_link_params *link)
+{
+ struct rte_eth_ethertype_filter filter = {
+ .ether_type = ETHER_TYPE_ARP,
+ .flags = 0,
+ .queue = link->arp_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(link->pmd_id,
+ RTE_ETH_FILTER_ETHERTYPE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_tcp_syn_add(struct app_link_params *link)
+{
+ struct rte_eth_syn_filter filter = {
+ .hig_pri = 1,
+ .queue = link->tcp_syn_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(link->pmd_id,
+ RTE_ETH_FILTER_SYN,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_ip_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = 0,
+ .proto_mask = 0, /* Disable */
+ .tcp_flags = 0,
+ .priority = 1, /* Lowest */
+ .queue = l1->ip_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_ip_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = 0,
+ .proto_mask = 0, /* Disable */
+ .tcp_flags = 0,
+ .priority = 1, /* Lowest */
+ .queue = l1->ip_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+
+static inline int
+app_link_filter_tcp_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_TCP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->tcp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_tcp_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_TCP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->tcp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+
+static inline int
+app_link_filter_udp_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_UDP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->udp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_udp_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_UDP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->udp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+
+static inline int
+app_link_filter_sctp_add(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_SCTP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->sctp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_ADD,
+ &filter);
+}
+
+static inline int
+app_link_filter_sctp_del(struct app_link_params *l1, struct app_link_params *l2)
+{
+ struct rte_eth_ntuple_filter filter = {
+ .flags = RTE_5TUPLE_FLAGS,
+ .dst_ip = rte_bswap32(l2->ip),
+ .dst_ip_mask = UINT32_MAX, /* Enable */
+ .src_ip = 0,
+ .src_ip_mask = 0, /* Disable */
+ .dst_port = 0,
+ .dst_port_mask = 0, /* Disable */
+ .src_port = 0,
+ .src_port_mask = 0, /* Disable */
+ .proto = IPPROTO_SCTP,
+ .proto_mask = UINT8_MAX, /* Enable */
+ .tcp_flags = 0,
+ .priority = 2, /* Higher priority than IP */
+ .queue = l1->sctp_local_q,
+ };
+
+ return rte_eth_dev_filter_ctrl(l1->pmd_id,
+ RTE_ETH_FILTER_NTUPLE,
+ RTE_ETH_FILTER_DELETE,
+ &filter);
+}
+
+static void
+app_link_set_arp_filter(struct app_params *app, struct app_link_params *cp)
+{
+ if (cp->arp_q != 0) {
+ int status = app_link_filter_arp_add(cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding ARP filter (queue = %" PRIu32 ")",
+ cp->name, cp->pmd_id, cp->arp_q);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding ARP filter "
+ "(queue = %" PRIu32 ") (%" PRId32 ")\n",
+ cp->name, cp->pmd_id, cp->arp_q, status);
+ }
+}
+
+static void
+app_link_set_tcp_syn_filter(struct app_params *app, struct app_link_params *cp)
+{
+ if (cp->tcp_syn_q != 0) {
+ int status = app_link_filter_tcp_syn_add(cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding TCP SYN filter (queue = %" PRIu32 ")",
+ cp->name, cp->pmd_id, cp->tcp_syn_q);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding TCP SYN filter "
+ "(queue = %" PRIu32 ") (%" PRId32 ")\n",
+ cp->name, cp->pmd_id, cp->tcp_syn_q,
+ status);
+ }
+}
+
+void
+app_link_up_internal(struct app_params *app, struct app_link_params *cp)
+{
+ uint32_t i;
+ int status;
+
+ /* For each link, add filters for IP of current link */
+ if (cp->ip != 0) {
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p = &app->link_params[i];
+
+ /* IP */
+ if (p->ip_local_q != 0) {
+ int status = app_link_filter_ip_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding IP filter (queue= %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->ip_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding IP "
+ "filter (queue= %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->ip_local_q, cp->ip, status);
+ }
+
+ /* TCP */
+ if (p->tcp_local_q != 0) {
+ int status = app_link_filter_tcp_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding TCP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->tcp_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding TCP "
+ "filter (queue = %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->tcp_local_q, cp->ip, status);
+ }
+
+ /* UDP */
+ if (p->udp_local_q != 0) {
+ int status = app_link_filter_udp_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32 "): "
+ "Adding UDP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->udp_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding UDP "
+ "filter (queue = %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->udp_local_q, cp->ip, status);
+ }
+
+ /* SCTP */
+ if (p->sctp_local_q != 0) {
+ int status = app_link_filter_sctp_add(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Adding SCTP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%08" PRIx32 ")",
+ p->name, p->pmd_id, p->sctp_local_q,
+ cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32 "): "
+ "Error adding SCTP "
+ "filter (queue = %" PRIu32 ", "
+ "IP = 0x%08" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id,
+ p->sctp_local_q, cp->ip,
+ status);
+ }
+ }
+ }
+
+ /* PMD link up */
+ status = rte_eth_dev_set_link_up(cp->pmd_id);
+ /* Do not panic if PMD does not provide link up functionality */
+ if (status < 0 && status != -ENOTSUP)
+ rte_panic("%s (%" PRIu32 "): PMD set link up error %"
+ PRId32 "\n", cp->name, cp->pmd_id, status);
+
+ /* Mark link as UP */
+ cp->state = 1;
+}
+
+void
+app_link_down_internal(struct app_params *app, struct app_link_params *cp)
+{
+ uint32_t i;
+ int status;
+
+ /* PMD link down */
+ status = rte_eth_dev_set_link_down(cp->pmd_id);
+ /* Do not panic if PMD does not provide link down functionality */
+ if (status < 0 && status != -ENOTSUP)
+ rte_panic("%s (%" PRIu32 "): PMD set link down error %"
+ PRId32 "\n", cp->name, cp->pmd_id, status);
+
+ /* Mark link as DOWN */
+ cp->state = 0;
+
+ /* Return if current link IP is not valid */
+ if (cp->ip == 0)
+ return;
+
+ /* For each link, remove filters for IP of current link */
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p = &app->link_params[i];
+
+ /* IP */
+ if (p->ip_local_q != 0) {
+ int status = app_link_filter_ip_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting IP filter "
+ "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->ip_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting IP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->ip_local_q,
+ cp->ip, status);
+ }
+
+ /* TCP */
+ if (p->tcp_local_q != 0) {
+ int status = app_link_filter_tcp_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting TCP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->tcp_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting TCP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->tcp_local_q,
+ cp->ip, status);
+ }
+
+ /* UDP */
+ if (p->udp_local_q != 0) {
+ int status = app_link_filter_udp_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting UDP filter "
+ "(queue = %" PRIu32 ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->udp_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting UDP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->udp_local_q,
+ cp->ip, status);
+ }
+
+ /* SCTP */
+ if (p->sctp_local_q != 0) {
+ int status = app_link_filter_sctp_del(p, cp);
+
+ APP_LOG(app, LOW, "%s (%" PRIu32
+ "): Deleting SCTP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32 ")",
+ p->name, p->pmd_id, p->sctp_local_q, cp->ip);
+
+ if (status)
+ rte_panic("%s (%" PRIu32
+ "): Error deleting SCTP filter "
+ "(queue = %" PRIu32
+ ", IP = 0x%" PRIx32
+ ") (%" PRId32 ")\n",
+ p->name, p->pmd_id, p->sctp_local_q,
+ cp->ip, status);
+ }
+ }
+}
+
+static void
+app_check_link(struct app_params *app)
+{
+ uint32_t all_links_up, i;
+
+ all_links_up = 1;
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p = &app->link_params[i];
+ struct rte_eth_link link_params;
+
+ memset(&link_params, 0, sizeof(link_params));
+ rte_eth_link_get(p->pmd_id, &link_params);
+
+ APP_LOG(app, HIGH, "%s (%" PRIu32 ") (%" PRIu32 " Gbps) %s",
+ p->name,
+ p->pmd_id,
+ link_params.link_speed / 1000,
+ link_params.link_status ? "UP" : "DOWN");
+
+ if (link_params.link_status == ETH_LINK_DOWN)
+ all_links_up = 0;
+ }
+
+ if (all_links_up == 0)
+ rte_panic("Some links are DOWN\n");
+}
+
+static uint32_t
+is_any_swq_frag_or_ras(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_swq; i++) {
+ struct app_pktq_swq_params *p = &app->swq_params[i];
+
+ if ((p->ipv4_frag == 1) || (p->ipv6_frag == 1) ||
+ (p->ipv4_ras == 1) || (p->ipv6_ras == 1))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+app_init_link_frag_ras(struct app_params *app)
+{
+ uint32_t i;
+
+ if (is_any_swq_frag_or_ras(app)) {
+ for (i = 0; i < app->n_pktq_hwq_out; i++) {
+ struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i];
+
+ p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS;
+ }
+ }
+}
+
+static inline int
+app_get_cpu_socket_id(uint32_t pmd_id)
+{
+ int status = rte_eth_dev_socket_id(pmd_id);
+
+ return (status != SOCKET_ID_ANY) ? status : 0;
+}
+
+static inline int
+app_link_rss_enabled(struct app_link_params *cp)
+{
+ return (cp->n_rss_qs) ? 1 : 0;
+}
+
+static void
+app_link_rss_setup(struct app_link_params *cp)
+{
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_rss_reta_entry64 reta_conf[APP_RETA_SIZE_MAX];
+ uint32_t i;
+ int status;
+
+ /* Get RETA size */
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(cp->pmd_id, &dev_info);
+
+ if (dev_info.reta_size == 0)
+ rte_panic("%s (%u): RSS setup error (null RETA size)\n",
+ cp->name, cp->pmd_id);
+
+ if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512)
+ rte_panic("%s (%u): RSS setup error (RETA size too big)\n",
+ cp->name, cp->pmd_id);
+
+ /* Setup RETA contents */
+ memset(reta_conf, 0, sizeof(reta_conf));
+
+ for (i = 0; i < dev_info.reta_size; i++)
+ reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+
+ for (i = 0; i < dev_info.reta_size; i++) {
+ uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
+ uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
+ uint32_t rss_qs_pos = i % cp->n_rss_qs;
+
+ reta_conf[reta_id].reta[reta_pos] =
+ (uint16_t) cp->rss_qs[rss_qs_pos];
+ }
+
+ /* RETA update */
+ status = rte_eth_dev_rss_reta_update(cp->pmd_id,
+ reta_conf,
+ dev_info.reta_size);
+ if (status != 0)
+ rte_panic("%s (%u): RSS setup error (RETA update failed)\n",
+ cp->name, cp->pmd_id);
+}
+
+static void
+app_init_link_set_config(struct app_link_params *p)
+{
+ if (p->n_rss_qs) {
+ p->conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ p->conf.rx_adv_conf.rss_conf.rss_hf = p->rss_proto_ipv4 |
+ p->rss_proto_ipv6 |
+ p->rss_proto_l2;
+ }
+}
+
+static void
+app_init_link(struct app_params *app)
+{
+ uint32_t i;
+
+ app_init_link_frag_ras(app);
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *p_link = &app->link_params[i];
+ uint32_t link_id, n_hwq_in, n_hwq_out, j;
+ int status;
+
+ sscanf(p_link->name, "LINK%" PRIu32, &link_id);
+ n_hwq_in = app_link_get_n_rxq(app, p_link);
+ n_hwq_out = app_link_get_n_txq(app, p_link);
+ app_init_link_set_config(p_link);
+
+ APP_LOG(app, HIGH, "Initializing %s (%" PRIu32") "
+ "(%" PRIu32 " RXQ, %" PRIu32 " TXQ) ...",
+ p_link->name,
+ p_link->pmd_id,
+ n_hwq_in,
+ n_hwq_out);
+
+ /* LINK */
+ status = rte_eth_dev_configure(
+ p_link->pmd_id,
+ n_hwq_in,
+ n_hwq_out,
+ &p_link->conf);
+ if (status < 0)
+ rte_panic("%s (%" PRId32 "): "
+ "init error (%" PRId32 ")\n",
+ p_link->name, p_link->pmd_id, status);
+
+ rte_eth_macaddr_get(p_link->pmd_id,
+ (struct ether_addr *) &p_link->mac_addr);
+
+ if (p_link->promisc)
+ rte_eth_promiscuous_enable(p_link->pmd_id);
+
+ /* RXQ */
+ for (j = 0; j < app->n_pktq_hwq_in; j++) {
+ struct app_pktq_hwq_in_params *p_rxq =
+ &app->hwq_in_params[j];
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ sscanf(p_rxq->name, "RXQ%" PRIu32 ".%" PRIu32,
+ &rxq_link_id, &rxq_queue_id);
+ if (rxq_link_id != link_id)
+ continue;
+
+ status = rte_eth_rx_queue_setup(
+ p_link->pmd_id,
+ rxq_queue_id,
+ p_rxq->size,
+ app_get_cpu_socket_id(p_link->pmd_id),
+ &p_rxq->conf,
+ app->mempool[p_rxq->mempool_id]);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): "
+ "%s init error (%" PRId32 ")\n",
+ p_link->name,
+ p_link->pmd_id,
+ p_rxq->name,
+ status);
+ }
+
+ /* TXQ */
+ for (j = 0; j < app->n_pktq_hwq_out; j++) {
+ struct app_pktq_hwq_out_params *p_txq =
+ &app->hwq_out_params[j];
+ uint32_t txq_link_id, txq_queue_id;
+
+ sscanf(p_txq->name, "TXQ%" PRIu32 ".%" PRIu32,
+ &txq_link_id, &txq_queue_id);
+ if (txq_link_id != link_id)
+ continue;
+
+ status = rte_eth_tx_queue_setup(
+ p_link->pmd_id,
+ txq_queue_id,
+ p_txq->size,
+ app_get_cpu_socket_id(p_link->pmd_id),
+ &p_txq->conf);
+ if (status < 0)
+ rte_panic("%s (%" PRIu32 "): "
+ "%s init error (%" PRId32 ")\n",
+ p_link->name,
+ p_link->pmd_id,
+ p_txq->name,
+ status);
+ }
+
+ /* LINK START */
+ status = rte_eth_dev_start(p_link->pmd_id);
+ if (status < 0)
+ rte_panic("Cannot start %s (error %" PRId32 ")\n",
+ p_link->name, status);
+
+ /* LINK FILTERS */
+ app_link_set_arp_filter(app, p_link);
+ app_link_set_tcp_syn_filter(app, p_link);
+ if (app_link_rss_enabled(p_link))
+ app_link_rss_setup(p_link);
+
+ /* LINK UP */
+ app_link_up_internal(app, p_link);
+ }
+
+ app_check_link(app);
+}
+
+static void
+app_init_swq(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_swq; i++) {
+ struct app_pktq_swq_params *p = &app->swq_params[i];
+ unsigned flags = 0;
+
+ if (app_swq_get_readers(app, p) == 1)
+ flags |= RING_F_SC_DEQ;
+ if (app_swq_get_writers(app, p) == 1)
+ flags |= RING_F_SP_ENQ;
+
+ APP_LOG(app, HIGH, "Initializing %s...", p->name);
+ app->swq[i] = rte_ring_create(
+ p->name,
+ p->size,
+ p->cpu_socket_id,
+ flags);
+
+ if (app->swq[i] == NULL)
+ rte_panic("%s init error\n", p->name);
+ }
+}
+
+static void
+app_init_tm(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_tm; i++) {
+ struct app_pktq_tm_params *p_tm = &app->tm_params[i];
+ struct app_link_params *p_link;
+ struct rte_eth_link link_eth_params;
+ struct rte_sched_port *sched;
+ uint32_t n_subports, subport_id;
+ int status;
+
+ p_link = app_get_link_for_tm(app, p_tm);
+ /* LINK */
+ rte_eth_link_get(p_link->pmd_id, &link_eth_params);
+
+ /* TM */
+ p_tm->sched_port_params.name = p_tm->name;
+ p_tm->sched_port_params.socket =
+ app_get_cpu_socket_id(p_link->pmd_id);
+ p_tm->sched_port_params.rate =
+ (uint64_t) link_eth_params.link_speed * 1000 * 1000 / 8;
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p_tm->name);
+ sched = rte_sched_port_config(&p_tm->sched_port_params);
+ if (sched == NULL)
+ rte_panic("%s init error\n", p_tm->name);
+ app->tm[i] = sched;
+
+ /* Subport */
+ n_subports = p_tm->sched_port_params.n_subports_per_port;
+ for (subport_id = 0; subport_id < n_subports; subport_id++) {
+ uint32_t n_pipes_per_subport, pipe_id;
+
+ status = rte_sched_subport_config(sched,
+ subport_id,
+ &p_tm->sched_subport_params[subport_id]);
+ if (status)
+ rte_panic("%s subport %" PRIu32
+ " init error (%" PRId32 ")\n",
+ p_tm->name, subport_id, status);
+
+ /* Pipe */
+ n_pipes_per_subport =
+ p_tm->sched_port_params.n_pipes_per_subport;
+ for (pipe_id = 0;
+ pipe_id < n_pipes_per_subport;
+ pipe_id++) {
+ int profile_id = p_tm->sched_pipe_to_profile[
+ subport_id * APP_MAX_SCHED_PIPES +
+ pipe_id];
+
+ if (profile_id == -1)
+ continue;
+
+ status = rte_sched_pipe_config(sched,
+ subport_id,
+ pipe_id,
+ profile_id);
+ if (status)
+ rte_panic("%s subport %" PRIu32
+ " pipe %" PRIu32
+ " (profile %" PRId32 ") "
+ "init error (% " PRId32 ")\n",
+ p_tm->name, subport_id, pipe_id,
+ profile_id, status);
+ }
+ }
+ }
+}
+
+#ifndef RTE_EXEC_ENV_LINUXAPP
+static void
+app_init_tap(struct app_params *app) {
+ if (app->n_pktq_tap == 0)
+ return;
+
+ rte_panic("TAP device not supported.\n");
+}
+#else
+static void
+app_init_tap(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pktq_tap; i++) {
+ struct app_pktq_tap_params *p_tap = &app->tap_params[i];
+ struct ifreq ifr;
+ int fd, status;
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p_tap->name);
+
+ fd = open("/dev/net/tun", O_RDWR | O_NONBLOCK);
+ if (fd < 0)
+ rte_panic("Cannot open file /dev/net/tun\n");
+
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", p_tap->name);
+
+ status = ioctl(fd, TUNSETIFF, (void *) &ifr);
+ if (status < 0)
+ rte_panic("TAP setup error\n");
+
+ app->tap[i] = fd;
+ }
+}
+#endif
+
+#ifdef RTE_LIBRTE_KNI
+static int
+kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
+ int ret = 0;
+
+ if (port_id >= rte_eth_dev_count())
+ return -EINVAL;
+
+ ret = (if_up) ?
+ rte_eth_dev_set_link_up(port_id) :
+ rte_eth_dev_set_link_down(port_id);
+
+ return ret;
+}
+
+static int
+kni_change_mtu(uint8_t port_id, unsigned new_mtu) {
+ int ret;
+
+ if (port_id >= rte_eth_dev_count())
+ return -EINVAL;
+
+ if (new_mtu > ETHER_MAX_LEN)
+ return -EINVAL;
+
+ /* Set new MTU */
+ ret = rte_eth_dev_set_mtu(port_id, new_mtu);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+#endif /* RTE_LIBRTE_KNI */
+
+#ifndef RTE_LIBRTE_KNI
+static void
+app_init_kni(struct app_params *app) {
+ if (app->n_pktq_kni == 0)
+ return;
+
+ rte_panic("Can not init KNI without librte_kni support.\n");
+}
+#else
+static void
+app_init_kni(struct app_params *app) {
+ uint32_t i;
+
+ if (app->n_pktq_kni == 0)
+ return;
+
+ rte_kni_init(app->n_pktq_kni);
+
+ for (i = 0; i < app->n_pktq_kni; i++) {
+ struct app_pktq_kni_params *p_kni = &app->kni_params[i];
+ struct app_link_params *p_link;
+ struct rte_eth_dev_info dev_info;
+ struct app_mempool_params *mempool_params;
+ struct rte_mempool *mempool;
+ struct rte_kni_conf conf;
+ struct rte_kni_ops ops;
+
+ /* LINK */
+ p_link = app_get_link_for_kni(app, p_kni);
+ memset(&dev_info, 0, sizeof(dev_info));
+ rte_eth_dev_info_get(p_link->pmd_id, &dev_info);
+
+ /* MEMPOOL */
+ mempool_params = &app->mempool_params[p_kni->mempool_id];
+ mempool = app->mempool[p_kni->mempool_id];
+
+ /* KNI */
+ memset(&conf, 0, sizeof(conf));
+ snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", p_kni->name);
+ conf.force_bind = p_kni->force_bind;
+ if (conf.force_bind) {
+ int lcore_id;
+
+ lcore_id = cpu_core_map_get_lcore_id(app->core_map,
+ p_kni->socket_id,
+ p_kni->core_id,
+ p_kni->hyper_th_id);
+
+ if (lcore_id < 0)
+ rte_panic("%s invalid CPU core\n", p_kni->name);
+
+ conf.core_id = (uint32_t) lcore_id;
+ }
+ conf.group_id = p_link->pmd_id;
+ conf.mbuf_size = mempool_params->buffer_size;
+ conf.addr = dev_info.pci_dev->addr;
+ conf.id = dev_info.pci_dev->id;
+
+ memset(&ops, 0, sizeof(ops));
+ ops.port_id = (uint8_t) p_link->pmd_id;
+ ops.change_mtu = kni_change_mtu;
+ ops.config_network_if = kni_config_network_interface;
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p_kni->name);
+ app->kni[i] = rte_kni_alloc(mempool, &conf, &ops);
+ if (!app->kni[i])
+ rte_panic("%s init error\n", p_kni->name);
+ }
+}
+#endif /* RTE_LIBRTE_KNI */
+
+static void
+app_init_msgq(struct app_params *app)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_msgq; i++) {
+ struct app_msgq_params *p = &app->msgq_params[i];
+
+ APP_LOG(app, HIGH, "Initializing %s ...", p->name);
+ app->msgq[i] = rte_ring_create(
+ p->name,
+ p->size,
+ p->cpu_socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+
+ if (app->msgq[i] == NULL)
+ rte_panic("%s init error\n", p->name);
+ }
+}
+
+void app_pipeline_params_get(struct app_params *app,
+ struct app_pipeline_params *p_in,
+ struct pipeline_params *p_out)
+{
+ uint32_t i;
+
+ snprintf(p_out->name, PIPELINE_NAME_SIZE, "%s", p_in->name);
+
+ snprintf(p_out->type, PIPELINE_TYPE_SIZE, "%s", p_in->type);
+
+ p_out->socket_id = (int) p_in->socket_id;
+
+ p_out->log_level = app->log_level;
+
+ /* pktq_in */
+ p_out->n_ports_in = p_in->n_pktq_in;
+ for (i = 0; i < p_in->n_pktq_in; i++) {
+ struct app_pktq_in_params *in = &p_in->pktq_in[i];
+ struct pipeline_port_in_params *out = &p_out->port_in[i];
+
+ switch (in->type) {
+ case APP_PKTQ_IN_HWQ:
+ {
+ struct app_pktq_hwq_in_params *p_hwq_in =
+ &app->hwq_in_params[in->id];
+ struct app_link_params *p_link =
+ app_get_link_for_rxq(app, p_hwq_in);
+ uint32_t rxq_link_id, rxq_queue_id;
+
+ sscanf(p_hwq_in->name, "RXQ%" SCNu32 ".%" SCNu32,
+ &rxq_link_id,
+ &rxq_queue_id);
+
+ out->type = PIPELINE_PORT_IN_ETHDEV_READER;
+ out->params.ethdev.port_id = p_link->pmd_id;
+ out->params.ethdev.queue_id = rxq_queue_id;
+ out->burst_size = p_hwq_in->burst;
+ break;
+ }
+ case APP_PKTQ_IN_SWQ:
+ {
+ struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
+
+ if ((swq_params->ipv4_frag == 0) && (swq_params->ipv6_frag == 0)) {
+ if (app_swq_get_readers(app, swq_params) == 1) {
+ out->type = PIPELINE_PORT_IN_RING_READER;
+ out->params.ring.ring = app->swq[in->id];
+ out->burst_size = app->swq_params[in->id].burst_read;
+ } else {
+ out->type = PIPELINE_PORT_IN_RING_MULTI_READER;
+ out->params.ring_multi.ring = app->swq[in->id];
+ out->burst_size = swq_params->burst_read;
+ }
+ } else {
+ if (swq_params->ipv4_frag == 1) {
+ struct rte_port_ring_reader_ipv4_frag_params *params =
+ &out->params.ring_ipv4_frag;
+
+ out->type = PIPELINE_PORT_IN_RING_READER_IPV4_FRAG;
+ params->ring = app->swq[in->id];
+ params->mtu = swq_params->mtu;
+ params->metadata_size = swq_params->metadata_size;
+ params->pool_direct =
+ app->mempool[swq_params->mempool_direct_id];
+ params->pool_indirect =
+ app->mempool[swq_params->mempool_indirect_id];
+ out->burst_size = swq_params->burst_read;
+ } else {
+ struct rte_port_ring_reader_ipv6_frag_params *params =
+ &out->params.ring_ipv6_frag;
+
+ out->type = PIPELINE_PORT_IN_RING_READER_IPV6_FRAG;
+ params->ring = app->swq[in->id];
+ params->mtu = swq_params->mtu;
+ params->metadata_size = swq_params->metadata_size;
+ params->pool_direct =
+ app->mempool[swq_params->mempool_direct_id];
+ params->pool_indirect =
+ app->mempool[swq_params->mempool_indirect_id];
+ out->burst_size = swq_params->burst_read;
+ }
+ }
+ break;
+ }
+ case APP_PKTQ_IN_TM:
+ {
+ out->type = PIPELINE_PORT_IN_SCHED_READER;
+ out->params.sched.sched = app->tm[in->id];
+ out->burst_size = app->tm_params[in->id].burst_read;
+ break;
+ }
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ case APP_PKTQ_IN_TAP:
+ {
+ struct app_pktq_tap_params *tap_params =
+ &app->tap_params[in->id];
+ struct app_mempool_params *mempool_params =
+ &app->mempool_params[tap_params->mempool_id];
+ struct rte_mempool *mempool =
+ app->mempool[tap_params->mempool_id];
+
+ out->type = PIPELINE_PORT_IN_FD_READER;
+ out->params.fd.fd = app->tap[in->id];
+ out->params.fd.mtu = mempool_params->buffer_size;
+ out->params.fd.mempool = mempool;
+ out->burst_size = app->tap_params[in->id].burst_read;
+ break;
+ }
+#endif
+#ifdef RTE_LIBRTE_KNI
+ case APP_PKTQ_IN_KNI:
+ {
+ out->type = PIPELINE_PORT_IN_KNI_READER;
+ out->params.kni.kni = app->kni[in->id];
+ out->burst_size = app->kni_params[in->id].burst_read;
+ break;
+ }
+#endif /* RTE_LIBRTE_KNI */
+ case APP_PKTQ_IN_SOURCE:
+ {
+ uint32_t mempool_id =
+ app->source_params[in->id].mempool_id;
+
+ out->type = PIPELINE_PORT_IN_SOURCE;
+ out->params.source.mempool = app->mempool[mempool_id];
+ out->burst_size = app->source_params[in->id].burst;
+ out->params.source.file_name =
+ app->source_params[in->id].file_name;
+ out->params.source.n_bytes_per_pkt =
+ app->source_params[in->id].n_bytes_per_pkt;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ /* pktq_out */
+ p_out->n_ports_out = p_in->n_pktq_out;
+ for (i = 0; i < p_in->n_pktq_out; i++) {
+ struct app_pktq_out_params *in = &p_in->pktq_out[i];
+ struct pipeline_port_out_params *out = &p_out->port_out[i];
+
+ switch (in->type) {
+ case APP_PKTQ_OUT_HWQ:
+ {
+ struct app_pktq_hwq_out_params *p_hwq_out =
+ &app->hwq_out_params[in->id];
+ struct app_link_params *p_link =
+ app_get_link_for_txq(app, p_hwq_out);
+ uint32_t txq_link_id, txq_queue_id;
+
+ sscanf(p_hwq_out->name,
+ "TXQ%" SCNu32 ".%" SCNu32,
+ &txq_link_id,
+ &txq_queue_id);
+
+ if (p_hwq_out->dropless == 0) {
+ struct rte_port_ethdev_writer_params *params =
+ &out->params.ethdev;
+
+ out->type = PIPELINE_PORT_OUT_ETHDEV_WRITER;
+ params->port_id = p_link->pmd_id;
+ params->queue_id = txq_queue_id;
+ params->tx_burst_sz =
+ app->hwq_out_params[in->id].burst;
+ } else {
+ struct rte_port_ethdev_writer_nodrop_params
+ *params = &out->params.ethdev_nodrop;
+
+ out->type =
+ PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP;
+ params->port_id = p_link->pmd_id;
+ params->queue_id = txq_queue_id;
+ params->tx_burst_sz = p_hwq_out->burst;
+ params->n_retries = p_hwq_out->n_retries;
+ }
+ break;
+ }
+ case APP_PKTQ_OUT_SWQ:
+ {
+ struct app_pktq_swq_params *swq_params = &app->swq_params[in->id];
+
+ if ((swq_params->ipv4_ras == 0) && (swq_params->ipv6_ras == 0)) {
+ if (app_swq_get_writers(app, swq_params) == 1) {
+ if (app->swq_params[in->id].dropless == 0) {
+ struct rte_port_ring_writer_params *params =
+ &out->params.ring;
+
+ out->type = PIPELINE_PORT_OUT_RING_WRITER;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz =
+ app->swq_params[in->id].burst_write;
+ } else {
+ struct rte_port_ring_writer_nodrop_params
+ *params = &out->params.ring_nodrop;
+
+ out->type =
+ PIPELINE_PORT_OUT_RING_WRITER_NODROP;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz =
+ app->swq_params[in->id].burst_write;
+ params->n_retries =
+ app->swq_params[in->id].n_retries;
+ }
+ } else {
+ if (swq_params->dropless == 0) {
+ struct rte_port_ring_multi_writer_params *params =
+ &out->params.ring_multi;
+
+ out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ } else {
+ struct rte_port_ring_multi_writer_nodrop_params
+ *params = &out->params.ring_multi_nodrop;
+
+ out->type = PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ params->n_retries = swq_params->n_retries;
+ }
+ }
+ } else {
+ if (swq_params->ipv4_ras == 1) {
+ struct rte_port_ring_writer_ipv4_ras_params *params =
+ &out->params.ring_ipv4_ras;
+
+ out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ } else {
+ struct rte_port_ring_writer_ipv6_ras_params *params =
+ &out->params.ring_ipv6_ras;
+
+ out->type = PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS;
+ params->ring = app->swq[in->id];
+ params->tx_burst_sz = swq_params->burst_write;
+ }
+ }
+ break;
+ }
+ case APP_PKTQ_OUT_TM:
+ {
+ struct rte_port_sched_writer_params *params =
+ &out->params.sched;
+
+ out->type = PIPELINE_PORT_OUT_SCHED_WRITER;
+ params->sched = app->tm[in->id];
+ params->tx_burst_sz =
+ app->tm_params[in->id].burst_write;
+ break;
+ }
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ case APP_PKTQ_OUT_TAP:
+ {
+ struct rte_port_fd_writer_params *params =
+ &out->params.fd;
+
+ out->type = PIPELINE_PORT_OUT_FD_WRITER;
+ params->fd = app->tap[in->id];
+ params->tx_burst_sz =
+ app->tap_params[in->id].burst_write;
+ break;
+ }
+#endif
+#ifdef RTE_LIBRTE_KNI
+ case APP_PKTQ_OUT_KNI:
+ {
+ struct app_pktq_kni_params *p_kni =
+ &app->kni_params[in->id];
+
+ if (p_kni->dropless == 0) {
+ struct rte_port_kni_writer_params *params =
+ &out->params.kni;
+
+ out->type = PIPELINE_PORT_OUT_KNI_WRITER;
+ params->kni = app->kni[in->id];
+ params->tx_burst_sz =
+ app->kni_params[in->id].burst_write;
+ } else {
+ struct rte_port_kni_writer_nodrop_params
+ *params = &out->params.kni_nodrop;
+
+ out->type = PIPELINE_PORT_OUT_KNI_WRITER_NODROP;
+ params->kni = app->kni[in->id];
+ params->tx_burst_sz =
+ app->kni_params[in->id].burst_write;
+ params->n_retries =
+ app->kni_params[in->id].n_retries;
+ }
+ break;
+ }
+#endif /* RTE_LIBRTE_KNI */
+ case APP_PKTQ_OUT_SINK:
+ {
+ out->type = PIPELINE_PORT_OUT_SINK;
+ out->params.sink.file_name =
+ app->sink_params[in->id].file_name;
+ out->params.sink.max_n_pkts =
+ app->sink_params[in->id].
+ n_pkts_to_dump;
+
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ /* msgq */
+ p_out->n_msgq = p_in->n_msgq_in;
+
+ for (i = 0; i < p_in->n_msgq_in; i++)
+ p_out->msgq_in[i] = app->msgq[p_in->msgq_in[i]];
+
+ for (i = 0; i < p_in->n_msgq_out; i++)
+ p_out->msgq_out[i] = app->msgq[p_in->msgq_out[i]];
+
+ /* args */
+ p_out->n_args = p_in->n_args;
+ for (i = 0; i < p_in->n_args; i++) {
+ p_out->args_name[i] = p_in->args_name[i];
+ p_out->args_value[i] = p_in->args_value[i];
+ }
+}
+
+static void
+app_init_pipelines(struct app_params *app)
+{
+ uint32_t p_id;
+
+ for (p_id = 0; p_id < app->n_pipelines; p_id++) {
+ struct app_pipeline_params *params =
+ &app->pipeline_params[p_id];
+ struct app_pipeline_data *data = &app->pipeline_data[p_id];
+ struct pipeline_type *ptype;
+ struct pipeline_params pp;
+
+ APP_LOG(app, HIGH, "Initializing %s ...", params->name);
+
+ ptype = app_pipeline_type_find(app, params->type);
+ if (ptype == NULL)
+ rte_panic("Init error: Unknown pipeline type \"%s\"\n",
+ params->type);
+
+ app_pipeline_params_get(app, params, &pp);
+
+ /* Back-end */
+ data->be = NULL;
+ if (ptype->be_ops->f_init) {
+ data->be = ptype->be_ops->f_init(&pp, (void *) app);
+
+ if (data->be == NULL)
+ rte_panic("Pipeline instance \"%s\" back-end "
+ "init error\n", params->name);
+ }
+
+ /* Front-end */
+ data->fe = NULL;
+ if (ptype->fe_ops->f_init) {
+ data->fe = ptype->fe_ops->f_init(&pp, (void *) app);
+
+ if (data->fe == NULL)
+ rte_panic("Pipeline instance \"%s\" front-end "
+ "init error\n", params->name);
+ }
+
+ data->ptype = ptype;
+
+ data->timer_period = (rte_get_tsc_hz() *
+ params->timer_period) / 100;
+ }
+}
+
+static void
+app_post_init_pipelines(struct app_params *app)
+{
+ uint32_t p_id;
+
+ for (p_id = 0; p_id < app->n_pipelines; p_id++) {
+ struct app_pipeline_params *params =
+ &app->pipeline_params[p_id];
+ struct app_pipeline_data *data = &app->pipeline_data[p_id];
+ int status;
+
+ if (data->ptype->fe_ops->f_post_init == NULL)
+ continue;
+
+ status = data->ptype->fe_ops->f_post_init(data->fe);
+ if (status)
+ rte_panic("Pipeline instance \"%s\" front-end "
+ "post-init error\n", params->name);
+ }
+}
+
+static void
+app_init_threads(struct app_params *app)
+{
+ uint64_t time = rte_get_tsc_cycles();
+ uint32_t p_id;
+
+ for (p_id = 0; p_id < app->n_pipelines; p_id++) {
+ struct app_pipeline_params *params =
+ &app->pipeline_params[p_id];
+ struct app_pipeline_data *data = &app->pipeline_data[p_id];
+ struct pipeline_type *ptype;
+ struct app_thread_data *t;
+ struct app_thread_pipeline_data *p;
+ int lcore_id;
+
+ lcore_id = cpu_core_map_get_lcore_id(app->core_map,
+ params->socket_id,
+ params->core_id,
+ params->hyper_th_id);
+
+ if (lcore_id < 0)
+ rte_panic("Invalid core s%" PRIu32 "c%" PRIu32 "%s\n",
+ params->socket_id,
+ params->core_id,
+ (params->hyper_th_id) ? "h" : "");
+
+ t = &app->thread_data[lcore_id];
+
+ t->timer_period = (rte_get_tsc_hz() * APP_THREAD_TIMER_PERIOD) / 1000;
+ t->thread_req_deadline = time + t->timer_period;
+
+ t->headroom_cycles = 0;
+ t->headroom_time = rte_get_tsc_cycles();
+ t->headroom_ratio = 0.0;
+
+ t->msgq_in = app_thread_msgq_in_get(app,
+ params->socket_id,
+ params->core_id,
+ params->hyper_th_id);
+ if (t->msgq_in == NULL)
+ rte_panic("Init error: Cannot find MSGQ_IN for thread %" PRId32,
+ lcore_id);
+
+ t->msgq_out = app_thread_msgq_out_get(app,
+ params->socket_id,
+ params->core_id,
+ params->hyper_th_id);
+ if (t->msgq_out == NULL)
+ rte_panic("Init error: Cannot find MSGQ_OUT for thread %" PRId32,
+ lcore_id);
+
+ ptype = app_pipeline_type_find(app, params->type);
+ if (ptype == NULL)
+ rte_panic("Init error: Unknown pipeline "
+ "type \"%s\"\n", params->type);
+
+ p = (ptype->be_ops->f_run == NULL) ?
+ &t->regular[t->n_regular] :
+ &t->custom[t->n_custom];
+
+ p->pipeline_id = p_id;
+ p->be = data->be;
+ p->f_run = ptype->be_ops->f_run;
+ p->f_timer = ptype->be_ops->f_timer;
+ p->timer_period = data->timer_period;
+ p->deadline = time + data->timer_period;
+
+ data->enabled = 1;
+
+ if (ptype->be_ops->f_run == NULL)
+ t->n_regular++;
+ else
+ t->n_custom++;
+ }
+}
+
+int app_init(struct app_params *app)
+{
+ app_init_core_map(app);
+ app_init_core_mask(app);
+
+ app_init_eal(app);
+ app_init_mempool(app);
+ app_init_link(app);
+ app_init_swq(app);
+ app_init_tm(app);
+ app_init_tap(app);
+ app_init_kni(app);
+ app_init_msgq(app);
+
+ app_pipeline_common_cmd_push(app);
+ app_pipeline_thread_cmd_push(app);
+ app_pipeline_type_register(app, &pipeline_master);
+ app_pipeline_type_register(app, &pipeline_passthrough);
+ app_pipeline_type_register(app, &pipeline_flow_classification);
+ app_pipeline_type_register(app, &pipeline_flow_actions);
+ app_pipeline_type_register(app, &pipeline_firewall);
+ app_pipeline_type_register(app, &pipeline_routing);
+
+ app_init_pipelines(app);
+ app_init_threads(app);
+
+ return 0;
+}
+
+int app_post_init(struct app_params *app)
+{
+ app_post_init_pipelines(app);
+
+ return 0;
+}
+
+static int
+app_pipeline_type_cmd_push(struct app_params *app,
+ struct pipeline_type *ptype)
+{
+ cmdline_parse_ctx_t *cmds;
+ uint32_t n_cmds, i;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (ptype == NULL))
+ return -EINVAL;
+
+ n_cmds = pipeline_type_cmds_count(ptype);
+ if (n_cmds == 0)
+ return 0;
+
+ cmds = ptype->fe_ops->cmds;
+
+ /* Check for available slots in the application commands array */
+ if (n_cmds > APP_MAX_CMDS - app->n_cmds)
+ return -ENOMEM;
+
+ /* Push pipeline commands into the application */
+ memcpy(&app->cmds[app->n_cmds],
+ cmds,
+ n_cmds * sizeof(cmdline_parse_ctx_t));
+
+ for (i = 0; i < n_cmds; i++)
+ app->cmds[app->n_cmds + i]->data = app;
+
+ app->n_cmds += n_cmds;
+ app->cmds[app->n_cmds] = NULL;
+
+ return 0;
+}
+
+int
+app_pipeline_type_register(struct app_params *app, struct pipeline_type *ptype)
+{
+ uint32_t n_cmds, i;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (ptype == NULL) ||
+ (ptype->name == NULL) ||
+ (strlen(ptype->name) == 0) ||
+ (ptype->be_ops->f_init == NULL) ||
+ (ptype->be_ops->f_timer == NULL))
+ return -EINVAL;
+
+ /* Check for duplicate entry */
+ for (i = 0; i < app->n_pipeline_types; i++)
+ if (strcmp(app->pipeline_type[i].name, ptype->name) == 0)
+ return -EEXIST;
+
+ /* Check for resource availability */
+ n_cmds = pipeline_type_cmds_count(ptype);
+ if ((app->n_pipeline_types == APP_MAX_PIPELINE_TYPES) ||
+ (n_cmds > APP_MAX_CMDS - app->n_cmds))
+ return -ENOMEM;
+
+ /* Copy pipeline type */
+ memcpy(&app->pipeline_type[app->n_pipeline_types++],
+ ptype,
+ sizeof(struct pipeline_type));
+
+ /* Copy CLI commands */
+ if (n_cmds)
+ app_pipeline_type_cmd_push(app, ptype);
+
+ return 0;
+}
+
+struct
+pipeline_type *app_pipeline_type_find(struct app_params *app, char *name)
+{
+ uint32_t i;
+
+ for (i = 0; i < app->n_pipeline_types; i++)
+ if (strcmp(app->pipeline_type[i].name, name) == 0)
+ return &app->pipeline_type[i];
+
+ return NULL;
+}
diff --git a/src/seastar/dpdk/examples/ip_pipeline/main.c b/src/seastar/dpdk/examples/ip_pipeline/main.c
new file mode 100644
index 00000000..4944dcfb
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/main.c
@@ -0,0 +1,64 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "app.h"
+
+static struct app_params app;
+
+int
+main(int argc, char **argv)
+{
+ rte_openlog_stream(stderr);
+
+ /* Config */
+ app_config_init(&app);
+
+ app_config_args(&app, argc, argv);
+
+ app_config_preproc(&app);
+
+ app_config_parse(&app, app.parser_file);
+
+ app_config_check(&app);
+
+ /* Init */
+ app_init(&app);
+
+ /* Run-time */
+ rte_eal_mp_remote_launch(
+ app_thread,
+ (void *) &app,
+ CALL_MASTER);
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/examples/ip_pipeline/parser.c b/src/seastar/dpdk/examples/ip_pipeline/parser.c
new file mode 100644
index 00000000..689e2065
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/parser.c
@@ -0,0 +1,745 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * For my_ether_aton() function:
+ *
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the University of California, Berkeley nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * For inet_pton4() and inet_pton6() functions:
+ *
+ * Copyright (c) 1996 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
+ * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
+ * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <getopt.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <string.h>
+#include <libgen.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#include <rte_errno.h>
+#include <rte_cfgfile.h>
+#include <rte_string_fns.h>
+
+#include "app.h"
+#include "parser.h"
+
+static uint32_t
+get_hex_val(char c)
+{
+ switch (c) {
+ case '0': case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+ return c - '0';
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ return c - 'A' + 10;
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ return c - 'a' + 10;
+ default:
+ return 0;
+ }
+}
+
+int
+parser_read_arg_bool(const char *p)
+{
+ p = skip_white_spaces(p);
+ int result = -EINVAL;
+
+ if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) ||
+ ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) {
+ p += 3;
+ result = 1;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'n')) ||
+ ((p[0] == 'O') && (p[1] == 'N'))) {
+ p += 2;
+ result = 1;
+ }
+
+ if (((p[0] == 'n') && (p[1] == 'o')) ||
+ ((p[0] == 'N') && (p[1] == 'O'))) {
+ p += 2;
+ result = 0;
+ }
+
+ if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) ||
+ ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) {
+ p += 3;
+ result = 0;
+ }
+
+ p = skip_white_spaces(p);
+
+ if (p[0] != '\0')
+ return -EINVAL;
+
+ return result;
+}
+
+int
+parser_read_uint64(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+ if (!isdigit(*p))
+ return -EINVAL;
+
+ val = strtoul(p, &next, 10);
+ if (p == next)
+ return -EINVAL;
+
+ p = next;
+ switch (*p) {
+ case 'T':
+ val *= 1024ULL;
+ /* fall through */
+ case 'G':
+ val *= 1024ULL;
+ /* fall through */
+ case 'M':
+ val *= 1024ULL;
+ /* fall through */
+ case 'k':
+ case 'K':
+ val *= 1024ULL;
+ p++;
+ break;
+ }
+
+ p = skip_white_spaces(p);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint64_hex(uint64_t *value, const char *p)
+{
+ char *next;
+ uint64_t val;
+
+ p = skip_white_spaces(p);
+
+ val = strtoul(p, &next, 16);
+ if (p == next)
+ return -EINVAL;
+
+ p = skip_white_spaces(next);
+ if (*p != '\0')
+ return -EINVAL;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint32_hex(uint32_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT32_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint16_hex(uint16_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT16_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parser_read_uint8_hex(uint8_t *value, const char *p)
+{
+ uint64_t val = 0;
+ int ret = parser_read_uint64_hex(&val, p);
+
+ if (ret < 0)
+ return ret;
+
+ if (val > UINT8_MAX)
+ return -ERANGE;
+
+ *value = val;
+ return 0;
+}
+
+int
+parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens)
+{
+ uint32_t i;
+
+ if ((string == NULL) ||
+ (tokens == NULL) ||
+ (*n_tokens < 1))
+ return -EINVAL;
+
+ for (i = 0; i < *n_tokens; i++) {
+ tokens[i] = strtok_r(string, PARSE_DELIMITER, &string);
+ if (tokens[i] == NULL)
+ break;
+ }
+
+ if ((i == *n_tokens) &&
+ (NULL != strtok_r(string, PARSE_DELIMITER, &string)))
+ return -E2BIG;
+
+ *n_tokens = i;
+ return 0;
+}
+
+int
+parse_hex_string(char *src, uint8_t *dst, uint32_t *size)
+{
+ char *c;
+ uint32_t len, i;
+
+ /* Check input parameters */
+ if ((src == NULL) ||
+ (dst == NULL) ||
+ (size == NULL) ||
+ (*size == 0))
+ return -1;
+
+ len = strlen(src);
+ if (((len & 3) != 0) ||
+ (len > (*size) * 2))
+ return -1;
+ *size = len / 2;
+
+ for (c = src; *c != 0; c++) {
+ if ((((*c) >= '0') && ((*c) <= '9')) ||
+ (((*c) >= 'A') && ((*c) <= 'F')) ||
+ (((*c) >= 'a') && ((*c) <= 'f')))
+ continue;
+
+ return -1;
+ }
+
+ /* Convert chars to bytes */
+ for (i = 0; i < *size; i++)
+ dst[i] = get_hex_val(src[2 * i]) * 16 +
+ get_hex_val(src[2 * i + 1]);
+
+ return 0;
+}
+
+int
+parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels)
+{
+ uint32_t n_max_labels = *n_labels, count = 0;
+
+ /* Check for void list of labels */
+ if (strcmp(string, "<void>") == 0) {
+ *n_labels = 0;
+ return 0;
+ }
+
+ /* At least one label should be present */
+ for ( ; (*string != '\0'); ) {
+ char *next;
+ int value;
+
+ if (count >= n_max_labels)
+ return -1;
+
+ if (count > 0) {
+ if (string[0] != ':')
+ return -1;
+
+ string++;
+ }
+
+ value = strtol(string, &next, 10);
+ if (next == string)
+ return -1;
+ string = next;
+
+ labels[count++] = (uint32_t) value;
+ }
+
+ *n_labels = count;
+ return 0;
+}
+
+#define INADDRSZ 4
+#define IN6ADDRSZ 16
+
+/* int
+ * inet_pton4(src, dst)
+ * like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ * 1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ * does not touch `dst' unless it's returning 1.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton4(const char *src, unsigned char *dst)
+{
+ static const char digits[] = "0123456789";
+ int saw_digit, octets, ch;
+ unsigned char tmp[INADDRSZ], *tp;
+
+ saw_digit = 0;
+ octets = 0;
+ *(tp = tmp) = 0;
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr(digits, ch);
+ if (pch != NULL) {
+ unsigned int new = *tp * 10 + (pch - digits);
+
+ if (new > 255)
+ return 0;
+ if (!saw_digit) {
+ if (++octets > 4)
+ return 0;
+ saw_digit = 1;
+ }
+ *tp = (unsigned char)new;
+ } else if (ch == '.' && saw_digit) {
+ if (octets == 4)
+ return 0;
+ *++tp = 0;
+ saw_digit = 0;
+ } else
+ return 0;
+ }
+ if (octets < 4)
+ return 0;
+
+ memcpy(dst, tmp, INADDRSZ);
+ return 1;
+}
+
+/* int
+ * inet_pton6(src, dst)
+ * convert presentation level address to network order binary form.
+ * return:
+ * 1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ * (1) does not touch `dst' unless it's returning 1.
+ * (2) :: in a full address is silently ignored.
+ * credit:
+ * inspired by Mark Andrews.
+ * author:
+ * Paul Vixie, 1996.
+ */
+static int
+inet_pton6(const char *src, unsigned char *dst)
+{
+ static const char xdigits_l[] = "0123456789abcdef",
+ xdigits_u[] = "0123456789ABCDEF";
+ unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0;
+ const char *xdigits = 0, *curtok = 0;
+ int ch = 0, saw_xdigit = 0, count_xdigit = 0;
+ unsigned int val = 0;
+ unsigned dbloct_count = 0;
+
+ memset((tp = tmp), '\0', IN6ADDRSZ);
+ endp = tp + IN6ADDRSZ;
+ colonp = NULL;
+ /* Leading :: requires some special handling. */
+ if (*src == ':')
+ if (*++src != ':')
+ return 0;
+ curtok = src;
+ saw_xdigit = count_xdigit = 0;
+ val = 0;
+
+ while ((ch = *src++) != '\0') {
+ const char *pch;
+
+ pch = strchr((xdigits = xdigits_l), ch);
+ if (pch == NULL)
+ pch = strchr((xdigits = xdigits_u), ch);
+ if (pch != NULL) {
+ if (count_xdigit >= 4)
+ return 0;
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return 0;
+ saw_xdigit = 1;
+ count_xdigit++;
+ continue;
+ }
+ if (ch == ':') {
+ curtok = src;
+ if (!saw_xdigit) {
+ if (colonp)
+ return 0;
+ colonp = tp;
+ continue;
+ } else if (*src == '\0') {
+ return 0;
+ }
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char) ((val >> 8) & 0xff);
+ *tp++ = (unsigned char) (val & 0xff);
+ saw_xdigit = 0;
+ count_xdigit = 0;
+ val = 0;
+ dbloct_count++;
+ continue;
+ }
+ if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
+ inet_pton4(curtok, tp) > 0) {
+ tp += INADDRSZ;
+ saw_xdigit = 0;
+ dbloct_count += 2;
+ break; /* '\0' was seen by inet_pton4(). */
+ }
+ return 0;
+ }
+ if (saw_xdigit) {
+ if (tp + sizeof(int16_t) > endp)
+ return 0;
+ *tp++ = (unsigned char) ((val >> 8) & 0xff);
+ *tp++ = (unsigned char) (val & 0xff);
+ dbloct_count++;
+ }
+ if (colonp != NULL) {
+ /* if we already have 8 double octets, having a colon means error */
+ if (dbloct_count == 8)
+ return 0;
+
+ /*
+ * Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ for (i = 1; i <= n; i++) {
+ endp[-i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp)
+ return 0;
+ memcpy(dst, tmp, IN6ADDRSZ);
+ return 1;
+}
+
+static struct ether_addr *
+my_ether_aton(const char *a)
+{
+ int i;
+ char *end;
+ unsigned long o[ETHER_ADDR_LEN];
+ static struct ether_addr ether_addr;
+
+ i = 0;
+ do {
+ errno = 0;
+ o[i] = strtoul(a, &end, 16);
+ if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
+ return NULL;
+ a = end + 1;
+ } while (++i != sizeof(o) / sizeof(o[0]) && end[0] != 0);
+
+ /* Junk at the end of line */
+ if (end[0] != 0)
+ return NULL;
+
+ /* Support the format XX:XX:XX:XX:XX:XX */
+ if (i == ETHER_ADDR_LEN) {
+ while (i-- != 0) {
+ if (o[i] > UINT8_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i] = (uint8_t)o[i];
+ }
+ /* Support the format XXXX:XXXX:XXXX */
+ } else if (i == ETHER_ADDR_LEN / 2) {
+ while (i-- != 0) {
+ if (o[i] > UINT16_MAX)
+ return NULL;
+ ether_addr.addr_bytes[i * 2] = (uint8_t)(o[i] >> 8);
+ ether_addr.addr_bytes[i * 2 + 1] = (uint8_t)(o[i] & 0xff);
+ }
+ /* unknown format */
+ } else
+ return NULL;
+
+ return (struct ether_addr *)&ether_addr;
+}
+
+int
+parse_ipv4_addr(const char *token, struct in_addr *ipv4)
+{
+ if (strlen(token) >= INET_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton4(token, (unsigned char *)ipv4) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+parse_ipv6_addr(const char *token, struct in6_addr *ipv6)
+{
+ if (strlen(token) >= INET6_ADDRSTRLEN)
+ return -EINVAL;
+
+ if (inet_pton6(token, (unsigned char *)ipv6) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+parse_mac_addr(const char *token, struct ether_addr *addr)
+{
+ struct ether_addr *tmp;
+
+ tmp = my_ether_aton(token);
+ if (tmp == NULL)
+ return -1;
+
+ memcpy(addr, tmp, sizeof(struct ether_addr));
+ return 0;
+}
+
+int
+parse_pipeline_core(uint32_t *socket,
+ uint32_t *core,
+ uint32_t *ht,
+ const char *entry)
+{
+ size_t num_len;
+ char num[8];
+
+ uint32_t s = 0, c = 0, h = 0, val;
+ uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0;
+ const char *next = skip_white_spaces(entry);
+ char type;
+
+ /* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */
+ while (*next != '\0') {
+ /* If everything parsed nothing should left */
+ if (s_parsed && c_parsed && h_parsed)
+ return -EINVAL;
+
+ type = *next;
+ switch (type) {
+ case 's':
+ case 'S':
+ if (s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+ s_parsed = 1;
+ next++;
+ break;
+ case 'c':
+ case 'C':
+ if (c_parsed || h_parsed)
+ return -EINVAL;
+ c_parsed = 1;
+ next++;
+ break;
+ case 'h':
+ case 'H':
+ if (h_parsed)
+ return -EINVAL;
+ h_parsed = 1;
+ next++;
+ break;
+ default:
+ /* If it start from digit it must be only core id. */
+ if (!isdigit(*next) || s_parsed || c_parsed || h_parsed)
+ return -EINVAL;
+
+ type = 'C';
+ }
+
+ for (num_len = 0; *next != '\0'; next++, num_len++) {
+ if (num_len == RTE_DIM(num))
+ return -EINVAL;
+
+ if (!isdigit(*next))
+ break;
+
+ num[num_len] = *next;
+ }
+
+ if (num_len == 0 && type != 'h' && type != 'H')
+ return -EINVAL;
+
+ if (num_len != 0 && (type == 'h' || type == 'H'))
+ return -EINVAL;
+
+ num[num_len] = '\0';
+ val = strtol(num, NULL, 10);
+
+ h = 0;
+ switch (type) {
+ case 's':
+ case 'S':
+ s = val;
+ break;
+ case 'c':
+ case 'C':
+ c = val;
+ break;
+ case 'h':
+ case 'H':
+ h = 1;
+ break;
+ }
+ }
+
+ *socket = s;
+ *core = c;
+ *ht = h;
+ return 0;
+}
diff --git a/src/seastar/dpdk/examples/ip_pipeline/parser.h b/src/seastar/dpdk/examples/ip_pipeline/parser.h
new file mode 100644
index 00000000..9bd36af3
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/parser.h
@@ -0,0 +1,84 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PARSER_H__
+#define __INCLUDE_PARSER_H__
+
+#include <stdint.h>
+
+#include <rte_ip.h>
+#include <rte_ether.h>
+
+#define PARSE_DELIMITER " \f\n\r\t\v"
+
+#define skip_white_spaces(pos) \
+({ \
+ __typeof__(pos) _p = (pos); \
+ for ( ; isspace(*_p); _p++) \
+ ; \
+ _p; \
+})
+
+static inline size_t
+skip_digits(const char *src)
+{
+ size_t i;
+
+ for (i = 0; isdigit(src[i]); i++)
+ ;
+
+ return i;
+}
+
+int parser_read_arg_bool(const char *p);
+
+int parser_read_uint64(uint64_t *value, const char *p);
+int parser_read_uint32(uint32_t *value, const char *p);
+int parser_read_uint16(uint16_t *value, const char *p);
+int parser_read_uint8(uint8_t *value, const char *p);
+
+int parser_read_uint64_hex(uint64_t *value, const char *p);
+int parser_read_uint32_hex(uint32_t *value, const char *p);
+int parser_read_uint16_hex(uint16_t *value, const char *p);
+int parser_read_uint8_hex(uint8_t *value, const char *p);
+
+int parse_hex_string(char *src, uint8_t *dst, uint32_t *size);
+
+int parse_ipv4_addr(const char *token, struct in_addr *ipv4);
+int parse_ipv6_addr(const char *token, struct in6_addr *ipv6);
+int parse_mac_addr(const char *token, struct ether_addr *addr);
+int parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels);
+
+int parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens);
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline.h
new file mode 100644
index 00000000..14a551db
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline.h
@@ -0,0 +1,102 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_H__
+#define __INCLUDE_PIPELINE_H__
+
+#include <cmdline_parse.h>
+
+#include "pipeline_be.h"
+
+/*
+ * Pipeline type front-end operations
+ */
+
+typedef void* (*pipeline_fe_op_init)(struct pipeline_params *params,
+ void *arg);
+
+typedef int (*pipeline_fe_op_post_init)(void *pipeline);
+
+typedef int (*pipeline_fe_op_free)(void *pipeline);
+
+typedef int (*pipeline_fe_op_track)(struct pipeline_params *params,
+ uint32_t port_in,
+ uint32_t *port_out);
+
+struct pipeline_fe_ops {
+ pipeline_fe_op_init f_init;
+ pipeline_fe_op_post_init f_post_init;
+ pipeline_fe_op_free f_free;
+ pipeline_fe_op_track f_track;
+ cmdline_parse_ctx_t *cmds;
+};
+
+/*
+ * Pipeline type
+ */
+
+struct pipeline_type {
+ const char *name;
+
+ /* pipeline back-end */
+ struct pipeline_be_ops *be_ops;
+
+ /* pipeline front-end */
+ struct pipeline_fe_ops *fe_ops;
+};
+
+static inline uint32_t
+pipeline_type_cmds_count(struct pipeline_type *ptype)
+{
+ cmdline_parse_ctx_t *cmds;
+ uint32_t n_cmds;
+
+ if (ptype->fe_ops == NULL)
+ return 0;
+
+ cmds = ptype->fe_ops->cmds;
+ if (cmds == NULL)
+ return 0;
+
+ for (n_cmds = 0; cmds[n_cmds]; n_cmds++);
+
+ return n_cmds;
+}
+
+int
+parse_pipeline_core(uint32_t *socket,
+ uint32_t *core,
+ uint32_t *ht,
+ const char *entry);
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/hash_func.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/hash_func.h
new file mode 100644
index 00000000..9db7173f
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/hash_func.h
@@ -0,0 +1,351 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __INCLUDE_HASH_FUNC_H__
+#define __INCLUDE_HASH_FUNC_H__
+
+static inline uint64_t
+hash_xor_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0;
+
+ xor0 = seed ^ k[0];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+
+ xor0 ^= k[2];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+
+ xor0 ^= xor1;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+
+ xor0 ^= xor1;
+
+ xor0 ^= k[4];
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+ xor2 = k[4] ^ k[5];
+
+ xor0 ^= xor1;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1, xor2;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+ xor2 = k[4] ^ k[5];
+
+ xor0 ^= xor1;
+ xor2 ^= k[6];
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+static inline uint64_t
+hash_xor_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t xor0, xor1, xor2, xor3;
+
+ xor0 = (k[0] ^ seed) ^ k[1];
+ xor1 = k[2] ^ k[3];
+ xor2 = k[4] ^ k[5];
+ xor3 = k[6] ^ k[7];
+
+ xor0 ^= xor1;
+ xor2 ^= xor3;
+
+ xor0 ^= xor2;
+
+ return (xor0 >> 32) ^ xor0;
+}
+
+#if defined(RTE_ARCH_X86_64) && defined(RTE_MACHINE_CPUFLAG_SSE4_2)
+
+#include <x86intrin.h>
+
+static inline uint64_t
+hash_crc_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t crc0;
+
+ crc0 = _mm_crc32_u64(seed, k[0]);
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, crc0, crc1;
+
+ k0 = k[0];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, crc0, crc1;
+
+ k0 = k[0];
+ k2 = k[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc0 = _mm_crc32_u64(crc0, k2);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0];
+ k2 = k[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = k2 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, crc0, crc1, crc2, crc3;
+
+ k0 = k[0];
+ k2 = k[2];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc0 = _mm_crc32_u64(crc0, crc1);
+ crc1 = _mm_crc32_u64(crc2, crc3);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
+
+ k0 = k[0];
+ k2 = k[2];
+ k5 = k[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, k5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0];
+ k2 = k[2];
+ k5 = k[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6]);
+ crc5 = k5 >> 32;
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0];
+ k2 = k[2];
+ k5 = k[5];
+
+ crc0 = _mm_crc32_u64(k0, seed);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+
+ crc2 = _mm_crc32_u64(k2, k[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+
+ crc4 = _mm_crc32_u64(k5, k[6]);
+ crc5 = _mm_crc32_u64(k5 >> 32, k[7]);
+
+ crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
+ crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+#define hash_default_key8 hash_crc_key8
+#define hash_default_key16 hash_crc_key16
+#define hash_default_key24 hash_crc_key24
+#define hash_default_key32 hash_crc_key32
+#define hash_default_key40 hash_crc_key40
+#define hash_default_key48 hash_crc_key48
+#define hash_default_key56 hash_crc_key56
+#define hash_default_key64 hash_crc_key64
+
+#else
+
+#define hash_default_key8 hash_xor_key8
+#define hash_default_key16 hash_xor_key16
+#define hash_default_key24 hash_xor_key24
+#define hash_default_key32 hash_xor_key32
+#define hash_default_key40 hash_xor_key40
+#define hash_default_key48 hash_xor_key48
+#define hash_default_key56 hash_xor_key56
+#define hash_default_key64 hash_xor_key64
+
+#endif
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_actions_common.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_actions_common.h
new file mode 100644
index 00000000..ab08612d
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_actions_common.h
@@ -0,0 +1,231 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
+#define __INCLUDE_PIPELINE_ACTIONS_COMMON_H__
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_mbuf.h>
+#include <rte_pipeline.h>
+
+#define PIPELINE_PORT_IN_AH(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint32_t n_pkts, \
+ void *arg) \
+{ \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], arg); \
+ \
+ return 0; \
+}
+
+#define PIPELINE_PORT_IN_AH_HIJACK_ALL(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint32_t n_pkts, \
+ void *arg) \
+{ \
+ uint64_t pkt_mask = RTE_LEN2MASK(n_pkts, uint64_t); \
+ uint32_t i; \
+ \
+ rte_pipeline_ah_packet_hijack(p, pkt_mask); \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], arg); \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_HIT(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_in_mask, \
+ struct rte_pipeline_table_entry **entries, \
+ void *arg) \
+{ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], &entries[i], arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], entries[i], arg); \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ f_pkt_work(pkts[pos], entries[pos], arg); \
+ } \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_MISS(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ __rte_unused struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_in_mask, \
+ struct rte_pipeline_table_entry *entry, \
+ void *arg) \
+{ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) \
+ f_pkt4_work(&pkts[i], entry, arg); \
+ \
+ for ( ; i < n_pkts; i++) \
+ f_pkt_work(pkts[i], entry, arg); \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ f_pkt_work(pkts[pos], entry, arg); \
+ } \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_HIT_DROP_TIME(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_mask, \
+ struct rte_pipeline_table_entry **entries, \
+ void *arg) \
+{ \
+ uint64_t pkts_in_mask = pkts_mask; \
+ uint64_t pkts_out_mask = pkts_mask; \
+ uint64_t time = rte_rdtsc(); \
+ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
+ uint64_t mask = f_pkt4_work(&pkts[i], \
+ &entries[i], arg, time); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ \
+ for ( ; i < n_pkts; i++) { \
+ uint64_t mask = f_pkt_work(pkts[i], \
+ entries[i], arg, time); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ uint64_t mask = f_pkt_work(pkts[pos], \
+ entries[pos], arg, time); \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ pkts_out_mask ^= mask << pos; \
+ } \
+ \
+ rte_pipeline_ah_packet_drop(p, pkts_out_mask ^ pkts_mask); \
+ \
+ return 0; \
+}
+
+#define PIPELINE_TABLE_AH_MISS_DROP_TIME(f_ah, f_pkt_work, f_pkt4_work) \
+static int \
+f_ah( \
+ struct rte_pipeline *p, \
+ struct rte_mbuf **pkts, \
+ uint64_t pkts_mask, \
+ struct rte_pipeline_table_entry *entry, \
+ void *arg) \
+{ \
+ uint64_t pkts_in_mask = pkts_mask; \
+ uint64_t pkts_out_mask = pkts_mask; \
+ uint64_t time = rte_rdtsc(); \
+ \
+ if ((pkts_in_mask & (pkts_in_mask + 1)) == 0) { \
+ uint64_t n_pkts = __builtin_popcountll(pkts_in_mask); \
+ uint32_t i; \
+ \
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) { \
+ uint64_t mask = f_pkt4_work(&pkts[i], \
+ entry, arg, time); \
+ pkts_out_mask ^= mask << i; \
+ } \
+ \
+ for ( ; i < n_pkts; i++) { \
+ uint64_t mask = f_pkt_work(pkts[i], entry, arg, time);\
+ pkts_out_mask ^= mask << i; \
+ } \
+ } else \
+ for ( ; pkts_in_mask; ) { \
+ uint32_t pos = __builtin_ctzll(pkts_in_mask); \
+ uint64_t pkt_mask = 1LLU << pos; \
+ uint64_t mask = f_pkt_work(pkts[pos], \
+ entry, arg, time); \
+ \
+ pkts_in_mask &= ~pkt_mask; \
+ pkts_out_mask ^= mask << pos; \
+ } \
+ \
+ rte_pipeline_ah_packet_drop(p, pkts_out_mask ^ pkts_mask); \
+ \
+ return 0; \
+}
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_be.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_be.c
new file mode 100644
index 00000000..347e72b5
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_be.c
@@ -0,0 +1,205 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "pipeline_common_be.h"
+
+void *
+pipeline_msg_req_ping_handler(__rte_unused struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_msg_rsp *rsp = msg;
+
+ rsp->status = 0; /* OK */
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_stats_port_in_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_stats_msg_req *req = msg;
+ struct pipeline_stats_port_in_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->id >= p->n_ports_in) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_in_id[req->id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_in_stats_read(p->p,
+ port_id,
+ &rsp->stats,
+ 1);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_stats_port_out_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_stats_msg_req *req = msg;
+ struct pipeline_stats_port_out_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->id >= p->n_ports_out) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_out_id[req->id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_out_stats_read(p->p,
+ port_id,
+ &rsp->stats,
+ 1);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_stats_table_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_stats_msg_req *req = msg;
+ struct pipeline_stats_table_msg_rsp *rsp = msg;
+ uint32_t table_id;
+
+ /* Check request */
+ if (req->id >= p->n_tables) {
+ rsp->status = -1;
+ return rsp;
+ }
+ table_id = p->table_id[req->id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_table_stats_read(p->p,
+ table_id,
+ &rsp->stats,
+ 1);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_port_in_enable_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_port_in_msg_req *req = msg;
+ struct pipeline_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->port_id >= p->n_ports_in) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_in_id[req->port_id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_in_enable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_port_in_disable_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_port_in_msg_req *req = msg;
+ struct pipeline_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ /* Check request */
+ if (req->port_id >= p->n_ports_in) {
+ rsp->status = -1;
+ return rsp;
+ }
+ port_id = p->port_in_id[req->port_id];
+
+ /* Process request */
+ rsp->status = rte_pipeline_port_in_disable(p->p,
+ port_id);
+
+ return rsp;
+}
+
+void *
+pipeline_msg_req_invalid_handler(__rte_unused struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_msg_rsp *rsp = msg;
+
+ rsp->status = -1; /* Error */
+
+ return rsp;
+}
+
+int
+pipeline_msg_req_handle(struct pipeline *p)
+{
+ uint32_t msgq_id;
+
+ for (msgq_id = 0; msgq_id < p->n_msgq; msgq_id++) {
+ for ( ; ; ) {
+ struct pipeline_msg_req *req;
+ pipeline_msg_req_handler f_handle;
+
+ req = pipeline_msg_recv(p, msgq_id);
+ if (req == NULL)
+ break;
+
+ f_handle = (req->type < PIPELINE_MSG_REQS) ?
+ p->handlers[req->type] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ pipeline_msg_send(p,
+ msgq_id,
+ f_handle(p, (void *) req));
+ }
+ }
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_be.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_be.h
new file mode 100644
index 00000000..07fdca09
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_be.h
@@ -0,0 +1,163 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_COMMON_BE_H__
+#define __INCLUDE_PIPELINE_COMMON_BE_H__
+
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_be.h"
+
+struct pipeline;
+
+enum pipeline_msg_req_type {
+ PIPELINE_MSG_REQ_PING = 0,
+ PIPELINE_MSG_REQ_STATS_PORT_IN,
+ PIPELINE_MSG_REQ_STATS_PORT_OUT,
+ PIPELINE_MSG_REQ_STATS_TABLE,
+ PIPELINE_MSG_REQ_PORT_IN_ENABLE,
+ PIPELINE_MSG_REQ_PORT_IN_DISABLE,
+ PIPELINE_MSG_REQ_CUSTOM,
+ PIPELINE_MSG_REQS
+};
+
+typedef void *(*pipeline_msg_req_handler)(struct pipeline *p, void *msg);
+
+struct pipeline {
+ struct rte_pipeline *p;
+ uint32_t port_in_id[PIPELINE_MAX_PORT_IN];
+ uint32_t port_out_id[PIPELINE_MAX_PORT_OUT];
+ uint32_t table_id[PIPELINE_MAX_TABLES];
+ struct rte_ring *msgq_in[PIPELINE_MAX_MSGQ_IN];
+ struct rte_ring *msgq_out[PIPELINE_MAX_MSGQ_OUT];
+
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ uint32_t n_tables;
+ uint32_t n_msgq;
+
+ pipeline_msg_req_handler handlers[PIPELINE_MSG_REQS];
+ char name[PIPELINE_NAME_SIZE];
+ uint32_t log_level;
+};
+
+enum pipeline_log_level {
+ PIPELINE_LOG_LEVEL_HIGH = 1,
+ PIPELINE_LOG_LEVEL_LOW,
+ PIPELINE_LOG_LEVELS
+};
+
+#define PLOG(p, level, fmt, ...) \
+do { \
+ if (p->log_level >= PIPELINE_LOG_LEVEL_ ## level) \
+ fprintf(stdout, "[%s] " fmt "\n", p->name, ## __VA_ARGS__);\
+} while (0)
+
+static inline void *
+pipeline_msg_recv(struct pipeline *p,
+ uint32_t msgq_id)
+{
+ struct rte_ring *r = p->msgq_in[msgq_id];
+ void *msg;
+ int status = rte_ring_sc_dequeue(r, &msg);
+
+ if (status != 0)
+ return NULL;
+
+ return msg;
+}
+
+static inline void
+pipeline_msg_send(struct pipeline *p,
+ uint32_t msgq_id,
+ void *msg)
+{
+ struct rte_ring *r = p->msgq_out[msgq_id];
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(r, msg);
+ } while (status == -ENOBUFS);
+}
+
+struct pipeline_msg_req {
+ enum pipeline_msg_req_type type;
+};
+
+struct pipeline_stats_msg_req {
+ enum pipeline_msg_req_type type;
+ uint32_t id;
+};
+
+struct pipeline_port_in_msg_req {
+ enum pipeline_msg_req_type type;
+ uint32_t port_id;
+};
+
+struct pipeline_custom_msg_req {
+ enum pipeline_msg_req_type type;
+ uint32_t subtype;
+};
+
+struct pipeline_msg_rsp {
+ int status;
+};
+
+struct pipeline_stats_port_in_msg_rsp {
+ int status;
+ struct rte_pipeline_port_in_stats stats;
+};
+
+struct pipeline_stats_port_out_msg_rsp {
+ int status;
+ struct rte_pipeline_port_out_stats stats;
+};
+
+struct pipeline_stats_table_msg_rsp {
+ int status;
+ struct rte_pipeline_table_stats stats;
+};
+
+void *pipeline_msg_req_ping_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_stats_port_in_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_stats_port_out_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_stats_table_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_port_in_enable_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_port_in_disable_handler(struct pipeline *p, void *msg);
+void *pipeline_msg_req_invalid_handler(struct pipeline *p, void *msg);
+
+int pipeline_msg_req_handle(struct pipeline *p);
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_fe.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_fe.c
new file mode 100644
index 00000000..75211878
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_fe.c
@@ -0,0 +1,1484 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline.h>
+
+#include "pipeline_common_fe.h"
+#include "parser.h"
+
+struct app_link_params *
+app_pipeline_track_pktq_out_to_link(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t pktq_out_id)
+{
+ struct app_pipeline_params *p;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return NULL;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return NULL;
+
+ for ( ; ; ) {
+ struct app_pktq_out_params *pktq_out =
+ &p->pktq_out[pktq_out_id];
+
+ switch (pktq_out->type) {
+ case APP_PKTQ_OUT_HWQ:
+ {
+ struct app_pktq_hwq_out_params *hwq_out;
+
+ hwq_out = &app->hwq_out_params[pktq_out->id];
+
+ return app_get_link_for_txq(app, hwq_out);
+ }
+
+ case APP_PKTQ_OUT_SWQ:
+ {
+ struct pipeline_params pp;
+ struct pipeline_type *ptype;
+ struct app_pktq_swq_params *swq;
+ uint32_t pktq_in_id;
+ int status;
+
+ swq = &app->swq_params[pktq_out->id];
+ p = app_swq_get_reader(app, swq, &pktq_in_id);
+ if (p == NULL)
+ return NULL;
+
+ ptype = app_pipeline_type_find(app, p->type);
+ if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
+ return NULL;
+
+ app_pipeline_params_get(app, p, &pp);
+ status = ptype->fe_ops->f_track(&pp,
+ pktq_in_id,
+ &pktq_out_id);
+ if (status)
+ return NULL;
+
+ break;
+ }
+
+ case APP_PKTQ_OUT_TM:
+ {
+ struct pipeline_params pp;
+ struct pipeline_type *ptype;
+ struct app_pktq_tm_params *tm;
+ uint32_t pktq_in_id;
+ int status;
+
+ tm = &app->tm_params[pktq_out->id];
+ p = app_tm_get_reader(app, tm, &pktq_in_id);
+ if (p == NULL)
+ return NULL;
+
+ ptype = app_pipeline_type_find(app, p->type);
+ if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
+ return NULL;
+
+ app_pipeline_params_get(app, p, &pp);
+ status = ptype->fe_ops->f_track(&pp,
+ pktq_in_id,
+ &pktq_out_id);
+ if (status)
+ return NULL;
+
+ break;
+ }
+
+ case APP_PKTQ_OUT_KNI:
+ {
+ struct pipeline_params pp;
+ struct pipeline_type *ptype;
+ struct app_pktq_kni_params *kni;
+ uint32_t pktq_in_id;
+ int status;
+
+ kni = &app->kni_params[pktq_out->id];
+ p = app_kni_get_reader(app, kni, &pktq_in_id);
+ if (p == NULL)
+ return NULL;
+
+ ptype = app_pipeline_type_find(app, p->type);
+ if ((ptype == NULL) || (ptype->fe_ops->f_track == NULL))
+ return NULL;
+
+ app_pipeline_params_get(app, p, &pp);
+ status = ptype->fe_ops->f_track(&pp,
+ pktq_in_id,
+ &pktq_out_id);
+ if (status)
+ return NULL;
+
+ break;
+ }
+
+ case APP_PKTQ_OUT_TAP:
+ case APP_PKTQ_OUT_SINK:
+ default:
+ return NULL;
+ }
+ }
+}
+
+int
+app_pipeline_track_default(struct pipeline_params *p,
+ uint32_t port_in,
+ uint32_t *port_out)
+{
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ if (p->n_ports_out == 1) {
+ *port_out = 0;
+ return 0;
+ }
+
+ return -1;
+}
+
+int
+app_pipeline_ping(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_PING;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_stats_port_in(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_stats_msg_req *req;
+ struct pipeline_stats_port_in_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_in))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_STATS_PORT_IN;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = (struct pipeline_stats_port_in_msg_rsp *)
+ app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+ if (status == 0)
+ memcpy(stats, &rsp->stats, sizeof(rsp->stats));
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_stats_port_out(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_stats_msg_req *req;
+ struct pipeline_stats_port_out_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (pipeline_id >= app->n_pipelines) ||
+ (stats == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_out))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_STATS_PORT_OUT;
+ req->id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+ if (status == 0)
+ memcpy(stats, &rsp->stats, sizeof(rsp->stats));
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_stats_table(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_stats_msg_req *req;
+ struct pipeline_stats_table_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (stats == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_STATS_TABLE;
+ req->id = table_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+ if (status == 0)
+ memcpy(stats, &rsp->stats, sizeof(rsp->stats));
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_port_in_enable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_port_in_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_in))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_PORT_IN_ENABLE;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_pipeline_port_in_disable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_port_in_msg_req *req;
+ struct pipeline_msg_rsp *rsp;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if ((p == NULL) ||
+ (port_id >= p->n_pktq_in))
+ return -1;
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ /* Fill in request */
+ req->type = PIPELINE_MSG_REQ_PORT_IN_DISABLE;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Check response */
+ status = rsp->status;
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+
+ return status;
+}
+
+int
+app_link_set_op(struct app_params *app,
+ uint32_t link_id,
+ uint32_t pipeline_id,
+ app_link_op op,
+ void *arg)
+{
+ struct app_pipeline_params *pp;
+ struct app_link_params *lp;
+ struct app_link_data *ld;
+ uint32_t ppos, lpos;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (op == NULL))
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, lp);
+ if (lp == NULL)
+ return -1;
+ lpos = lp - app->link_params;
+ ld = &app->link_data[lpos];
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, pp);
+ if (pp == NULL)
+ return -1;
+ ppos = pp - app->pipeline_params;
+
+ ld->f_link[ppos] = op;
+ ld->arg[ppos] = arg;
+
+ return 0;
+}
+
+int
+app_link_config(struct app_params *app,
+ uint32_t link_id,
+ uint32_t ip,
+ uint32_t depth)
+{
+ struct app_link_params *p;
+ uint32_t i, netmask, host, bcast;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p == NULL) {
+ APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
+ link_id);
+ return -1;
+ }
+
+ if (p->state) {
+ APP_LOG(app, HIGH, "%s is UP, please bring it DOWN first",
+ p->name);
+ return -1;
+ }
+
+ netmask = (~0U) << (32 - depth);
+ host = ip & netmask;
+ bcast = host | (~netmask);
+
+ if ((ip == 0) ||
+ (ip == UINT32_MAX) ||
+ (ip == host) ||
+ (ip == bcast)) {
+ APP_LOG(app, HIGH, "Illegal IP address");
+ return -1;
+ }
+
+ for (i = 0; i < app->n_links; i++) {
+ struct app_link_params *link = &app->link_params[i];
+
+ if (strcmp(p->name, link->name) == 0)
+ continue;
+
+ if (link->ip == ip) {
+ APP_LOG(app, HIGH,
+ "%s is already assigned this IP address",
+ link->name);
+ return -1;
+ }
+ }
+
+ if ((depth == 0) || (depth > 32)) {
+ APP_LOG(app, HIGH, "Illegal value for depth parameter "
+ "(%" PRIu32 ")",
+ depth);
+ return -1;
+ }
+
+ /* Save link parameters */
+ p->ip = ip;
+ p->depth = depth;
+
+ return 0;
+}
+
+int
+app_link_up(struct app_params *app,
+ uint32_t link_id)
+{
+ struct app_link_params *p;
+ struct app_link_data *d;
+ int i;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p == NULL) {
+ APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
+ link_id);
+ return -1;
+ }
+
+ d = &app->link_data[p - app->link_params];
+
+ /* Check link state */
+ if (p->state) {
+ APP_LOG(app, HIGH, "%s is already UP", p->name);
+ return 0;
+ }
+
+ /* Check that IP address is valid */
+ if (p->ip == 0) {
+ APP_LOG(app, HIGH, "%s IP address is not set", p->name);
+ return 0;
+ }
+
+ app_link_up_internal(app, p);
+
+ /* Callbacks */
+ for (i = 0; i < APP_MAX_PIPELINES; i++)
+ if (d->f_link[i])
+ d->f_link[i](app, link_id, 1, d->arg[i]);
+
+ return 0;
+}
+
+int
+app_link_down(struct app_params *app,
+ uint32_t link_id)
+{
+ struct app_link_params *p;
+ struct app_link_data *d;
+ uint32_t i;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ if (p == NULL) {
+ APP_LOG(app, HIGH, "LINK%" PRIu32 " is not a valid link",
+ link_id);
+ return -1;
+ }
+
+ d = &app->link_data[p - app->link_params];
+
+ /* Check link state */
+ if (p->state == 0) {
+ APP_LOG(app, HIGH, "%s is already DOWN", p->name);
+ return 0;
+ }
+
+ app_link_down_internal(app, p);
+
+ /* Callbacks */
+ for (i = 0; i < APP_MAX_PIPELINES; i++)
+ if (d->f_link[i])
+ d->f_link[i](app, link_id, 0, d->arg[i]);
+
+ return 0;
+}
+
+/*
+ * ping
+ */
+
+struct cmd_ping_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t ping_string;
+};
+
+static void
+cmd_ping_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_ping_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_ping(app, params->pipeline_id);
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+static cmdline_parse_token_string_t cmd_ping_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_ping_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_ping_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_ping_result, pipeline_id, UINT32);
+
+static cmdline_parse_token_string_t cmd_ping_ping_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_ping_result, ping_string, "ping");
+
+static cmdline_parse_inst_t cmd_ping = {
+ .f = cmd_ping_parsed,
+ .data = NULL,
+ .help_str = "Pipeline ping",
+ .tokens = {
+ (void *) &cmd_ping_p_string,
+ (void *) &cmd_ping_pipeline_id,
+ (void *) &cmd_ping_ping_string,
+ NULL,
+ },
+};
+
+/*
+ * stats port in
+ */
+
+struct cmd_stats_port_in_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t in_string;
+ uint32_t port_in_id;
+
+};
+
+static void
+cmd_stats_port_in_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_stats_port_in_result *params = parsed_result;
+ struct app_params *app = data;
+ struct rte_pipeline_port_in_stats stats;
+ int status;
+
+ status = app_pipeline_stats_port_in(app,
+ params->pipeline_id,
+ params->port_in_id,
+ &stats);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ /* Display stats */
+ printf("Pipeline %" PRIu32 " - stats for input port %" PRIu32 ":\n"
+ "\tPkts in: %" PRIu64 "\n"
+ "\tPkts dropped by AH: %" PRIu64 "\n"
+ "\tPkts dropped by other: %" PRIu64 "\n",
+ params->pipeline_id,
+ params->port_in_id,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+static cmdline_parse_token_string_t cmd_stats_port_in_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_stats_port_in_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, pipeline_id,
+ UINT32);
+
+static cmdline_parse_token_string_t cmd_stats_port_in_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, stats_string,
+ "stats");
+
+static cmdline_parse_token_string_t cmd_stats_port_in_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, port_string,
+ "port");
+
+static cmdline_parse_token_string_t cmd_stats_port_in_in_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_in_result, in_string,
+ "in");
+
+ cmdline_parse_token_num_t cmd_stats_port_in_port_in_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_in_result, port_in_id,
+ UINT32);
+
+static cmdline_parse_inst_t cmd_stats_port_in = {
+ .f = cmd_stats_port_in_parsed,
+ .data = NULL,
+ .help_str = "Pipeline input port stats",
+ .tokens = {
+ (void *) &cmd_stats_port_in_p_string,
+ (void *) &cmd_stats_port_in_pipeline_id,
+ (void *) &cmd_stats_port_in_stats_string,
+ (void *) &cmd_stats_port_in_port_string,
+ (void *) &cmd_stats_port_in_in_string,
+ (void *) &cmd_stats_port_in_port_in_id,
+ NULL,
+ },
+};
+
+/*
+ * stats port out
+ */
+
+struct cmd_stats_port_out_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t out_string;
+ uint32_t port_out_id;
+};
+
+static void
+cmd_stats_port_out_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+
+ struct cmd_stats_port_out_result *params = parsed_result;
+ struct app_params *app = data;
+ struct rte_pipeline_port_out_stats stats;
+ int status;
+
+ status = app_pipeline_stats_port_out(app,
+ params->pipeline_id,
+ params->port_out_id,
+ &stats);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ /* Display stats */
+ printf("Pipeline %" PRIu32 " - stats for output port %" PRIu32 ":\n"
+ "\tPkts in: %" PRIu64 "\n"
+ "\tPkts dropped by AH: %" PRIu64 "\n"
+ "\tPkts dropped by other: %" PRIu64 "\n",
+ params->pipeline_id,
+ params->port_out_id,
+ stats.stats.n_pkts_in,
+ stats.n_pkts_dropped_by_ah,
+ stats.stats.n_pkts_drop);
+}
+
+static cmdline_parse_token_string_t cmd_stats_port_out_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_stats_port_out_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, pipeline_id,
+ UINT32);
+
+static cmdline_parse_token_string_t cmd_stats_port_out_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, stats_string,
+ "stats");
+
+static cmdline_parse_token_string_t cmd_stats_port_out_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, port_string,
+ "port");
+
+static cmdline_parse_token_string_t cmd_stats_port_out_out_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_port_out_result, out_string,
+ "out");
+
+static cmdline_parse_token_num_t cmd_stats_port_out_port_out_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_port_out_result, port_out_id,
+ UINT32);
+
+static cmdline_parse_inst_t cmd_stats_port_out = {
+ .f = cmd_stats_port_out_parsed,
+ .data = NULL,
+ .help_str = "Pipeline output port stats",
+ .tokens = {
+ (void *) &cmd_stats_port_out_p_string,
+ (void *) &cmd_stats_port_out_pipeline_id,
+ (void *) &cmd_stats_port_out_stats_string,
+ (void *) &cmd_stats_port_out_port_string,
+ (void *) &cmd_stats_port_out_out_string,
+ (void *) &cmd_stats_port_out_port_out_id,
+ NULL,
+ },
+};
+
+/*
+ * stats table
+ */
+
+struct cmd_stats_table_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t stats_string;
+ cmdline_fixed_string_t table_string;
+ uint32_t table_id;
+};
+
+static void
+cmd_stats_table_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_stats_table_result *params = parsed_result;
+ struct app_params *app = data;
+ struct rte_pipeline_table_stats stats;
+ int status;
+
+ status = app_pipeline_stats_table(app,
+ params->pipeline_id,
+ params->table_id,
+ &stats);
+
+ if (status != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ /* Display stats */
+ printf("Pipeline %" PRIu32 " - stats for table %" PRIu32 ":\n"
+ "\tPkts in: %" PRIu64 "\n"
+ "\tPkts in with lookup miss: %" PRIu64 "\n"
+ "\tPkts in with lookup hit dropped by AH: %" PRIu64 "\n"
+ "\tPkts in with lookup hit dropped by others: %" PRIu64 "\n"
+ "\tPkts in with lookup miss dropped by AH: %" PRIu64 "\n"
+ "\tPkts in with lookup miss dropped by others: %" PRIu64 "\n",
+ params->pipeline_id,
+ params->table_id,
+ stats.stats.n_pkts_in,
+ stats.stats.n_pkts_lookup_miss,
+ stats.n_pkts_dropped_by_lkp_hit_ah,
+ stats.n_pkts_dropped_lkp_hit,
+ stats.n_pkts_dropped_by_lkp_miss_ah,
+ stats.n_pkts_dropped_lkp_miss);
+}
+
+static cmdline_parse_token_string_t cmd_stats_table_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_stats_table_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, pipeline_id,
+ UINT32);
+
+static cmdline_parse_token_string_t cmd_stats_table_stats_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, stats_string,
+ "stats");
+
+static cmdline_parse_token_string_t cmd_stats_table_table_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_stats_table_result, table_string,
+ "table");
+
+static cmdline_parse_token_num_t cmd_stats_table_table_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_stats_table_result, table_id, UINT32);
+
+static cmdline_parse_inst_t cmd_stats_table = {
+ .f = cmd_stats_table_parsed,
+ .data = NULL,
+ .help_str = "Pipeline table stats",
+ .tokens = {
+ (void *) &cmd_stats_table_p_string,
+ (void *) &cmd_stats_table_pipeline_id,
+ (void *) &cmd_stats_table_stats_string,
+ (void *) &cmd_stats_table_table_string,
+ (void *) &cmd_stats_table_table_id,
+ NULL,
+ },
+};
+
+/*
+ * port in enable
+ */
+
+struct cmd_port_in_enable_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t in_string;
+ uint32_t port_in_id;
+ cmdline_fixed_string_t enable_string;
+};
+
+static void
+cmd_port_in_enable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_port_in_enable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_port_in_enable(app,
+ params->pipeline_id,
+ params->port_in_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+static cmdline_parse_token_string_t cmd_port_in_enable_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_port_in_enable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, pipeline_id,
+ UINT32);
+
+static cmdline_parse_token_string_t cmd_port_in_enable_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, port_string,
+ "port");
+
+static cmdline_parse_token_string_t cmd_port_in_enable_in_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result, in_string,
+ "in");
+
+static cmdline_parse_token_num_t cmd_port_in_enable_port_in_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_enable_result, port_in_id,
+ UINT32);
+
+static cmdline_parse_token_string_t cmd_port_in_enable_enable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_enable_result,
+ enable_string, "enable");
+
+static cmdline_parse_inst_t cmd_port_in_enable = {
+ .f = cmd_port_in_enable_parsed,
+ .data = NULL,
+ .help_str = "Pipeline input port enable",
+ .tokens = {
+ (void *) &cmd_port_in_enable_p_string,
+ (void *) &cmd_port_in_enable_pipeline_id,
+ (void *) &cmd_port_in_enable_port_string,
+ (void *) &cmd_port_in_enable_in_string,
+ (void *) &cmd_port_in_enable_port_in_id,
+ (void *) &cmd_port_in_enable_enable_string,
+ NULL,
+ },
+};
+
+/*
+ * port in disable
+ */
+
+struct cmd_port_in_disable_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t port_string;
+ cmdline_fixed_string_t in_string;
+ uint32_t port_in_id;
+ cmdline_fixed_string_t disable_string;
+};
+
+static void
+cmd_port_in_disable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_port_in_disable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ status = app_pipeline_port_in_disable(app,
+ params->pipeline_id,
+ params->port_in_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+static cmdline_parse_token_string_t cmd_port_in_disable_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, p_string,
+ "p");
+
+static cmdline_parse_token_num_t cmd_port_in_disable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, pipeline_id,
+ UINT32);
+
+static cmdline_parse_token_string_t cmd_port_in_disable_port_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, port_string,
+ "port");
+
+static cmdline_parse_token_string_t cmd_port_in_disable_in_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result, in_string,
+ "in");
+
+static cmdline_parse_token_num_t cmd_port_in_disable_port_in_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_port_in_disable_result, port_in_id,
+ UINT32);
+
+static cmdline_parse_token_string_t cmd_port_in_disable_disable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_port_in_disable_result,
+ disable_string, "disable");
+
+static cmdline_parse_inst_t cmd_port_in_disable = {
+ .f = cmd_port_in_disable_parsed,
+ .data = NULL,
+ .help_str = "Pipeline input port disable",
+ .tokens = {
+ (void *) &cmd_port_in_disable_p_string,
+ (void *) &cmd_port_in_disable_pipeline_id,
+ (void *) &cmd_port_in_disable_port_string,
+ (void *) &cmd_port_in_disable_in_string,
+ (void *) &cmd_port_in_disable_port_in_id,
+ (void *) &cmd_port_in_disable_disable_string,
+ NULL,
+ },
+};
+
+/*
+ * link config
+ */
+
+static void
+print_link_info(struct app_link_params *p)
+{
+ struct rte_eth_stats stats;
+ struct ether_addr *mac_addr;
+ uint32_t netmask = (~0U) << (32 - p->depth);
+ uint32_t host = p->ip & netmask;
+ uint32_t bcast = host | (~netmask);
+
+ memset(&stats, 0, sizeof(stats));
+ rte_eth_stats_get(p->pmd_id, &stats);
+
+ mac_addr = (struct ether_addr *) &p->mac_addr;
+
+ if (strlen(p->pci_bdf))
+ printf("%s(%s): flags=<%s>\n",
+ p->name,
+ p->pci_bdf,
+ (p->state) ? "UP" : "DOWN");
+ else
+ printf("%s: flags=<%s>\n",
+ p->name,
+ (p->state) ? "UP" : "DOWN");
+
+ if (p->ip)
+ printf("\tinet %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32
+ " netmask %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32 " "
+ "broadcast %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32 "\n",
+ (p->ip >> 24) & 0xFF,
+ (p->ip >> 16) & 0xFF,
+ (p->ip >> 8) & 0xFF,
+ p->ip & 0xFF,
+ (netmask >> 24) & 0xFF,
+ (netmask >> 16) & 0xFF,
+ (netmask >> 8) & 0xFF,
+ netmask & 0xFF,
+ (bcast >> 24) & 0xFF,
+ (bcast >> 16) & 0xFF,
+ (bcast >> 8) & 0xFF,
+ bcast & 0xFF);
+
+ printf("\tether %02" PRIx32 ":%02" PRIx32 ":%02" PRIx32
+ ":%02" PRIx32 ":%02" PRIx32 ":%02" PRIx32 "\n",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5]);
+
+ printf("\tRX packets %" PRIu64
+ " bytes %" PRIu64
+ "\n",
+ stats.ipackets,
+ stats.ibytes);
+
+ printf("\tRX errors %" PRIu64
+ " missed %" PRIu64
+ " no-mbuf %" PRIu64
+ "\n",
+ stats.ierrors,
+ stats.imissed,
+ stats.rx_nombuf);
+
+ printf("\tTX packets %" PRIu64
+ " bytes %" PRIu64 "\n",
+ stats.opackets,
+ stats.obytes);
+
+ printf("\tTX errors %" PRIu64
+ "\n",
+ stats.oerrors);
+
+ printf("\n");
+}
+
+/*
+ * link
+ *
+ * link config:
+ * link <linkid> config <ipaddr> <depth>
+ *
+ * link up:
+ * link <linkid> up
+ *
+ * link down:
+ * link <linkid> down
+ *
+ * link ls:
+ * link ls
+ */
+
+struct cmd_link_result {
+ cmdline_fixed_string_t link_string;
+ cmdline_multi_string_t multi_string;
+};
+
+static void
+cmd_link_parsed(
+ void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_link_result *params = parsed_result;
+ struct app_params *app = data;
+
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
+
+ uint32_t link_id;
+
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status != 0) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "link");
+ return;
+ }
+
+ /* link ls */
+ if ((n_tokens == 1) && (strcmp(tokens[0], "ls") == 0)) {
+ for (link_id = 0; link_id < app->n_links; link_id++) {
+ struct app_link_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, p);
+ print_link_info(p);
+ }
+ return;
+ } /* link ls */
+
+ if (n_tokens < 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link");
+ return;
+ }
+
+ if (parser_read_uint32(&link_id, tokens[0])) {
+ printf(CMD_MSG_INVALID_ARG, "linkid");
+ return;
+ }
+
+ /* link config */
+ if (strcmp(tokens[1], "config") == 0) {
+ struct in_addr ipaddr_ipv4;
+ uint32_t depth;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link config");
+ return;
+ }
+
+ if (parse_ipv4_addr(tokens[2], &ipaddr_ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
+
+ if (parser_read_uint32(&depth, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "depth");
+ return;
+ }
+
+ status = app_link_config(app,
+ link_id,
+ rte_be_to_cpu_32(ipaddr_ipv4.s_addr),
+ depth);
+ if (status)
+ printf(CMD_MSG_FAIL, "link config");
+
+ return;
+ } /* link config */
+
+ /* link up */
+ if (strcmp(tokens[1], "up") == 0) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link up");
+ return;
+ }
+
+ status = app_link_up(app, link_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "link up");
+
+ return;
+ } /* link up */
+
+ /* link down */
+ if (strcmp(tokens[1], "down") == 0) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "link down");
+ return;
+ }
+
+ status = app_link_down(app, link_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "link down");
+
+ return;
+ } /* link down */
+
+ printf(CMD_MSG_MISMATCH_ARGS, "link");
+}
+
+static cmdline_parse_token_string_t cmd_link_link_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_result, link_string, "link");
+
+static cmdline_parse_token_string_t cmd_link_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_link_result, multi_string,
+ TOKEN_STRING_MULTI);
+
+static cmdline_parse_inst_t cmd_link = {
+ .f = cmd_link_parsed,
+ .data = NULL,
+ .help_str = "link config / up / down / ls",
+ .tokens = {
+ (void *) &cmd_link_link_string,
+ (void *) &cmd_link_multi_string,
+ NULL,
+ },
+};
+
+/*
+ * quit
+ */
+
+struct cmd_quit_result {
+ cmdline_fixed_string_t quit;
+};
+
+static void
+cmd_quit_parsed(
+ __rte_unused void *parsed_result,
+ struct cmdline *cl,
+ __rte_unused void *data)
+{
+ cmdline_quit(cl);
+}
+
+static cmdline_parse_token_string_t cmd_quit_quit =
+ TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
+
+static cmdline_parse_inst_t cmd_quit = {
+ .f = cmd_quit_parsed,
+ .data = NULL,
+ .help_str = "Quit",
+ .tokens = {
+ (void *) &cmd_quit_quit,
+ NULL,
+ },
+};
+
+/*
+ * run
+ *
+ * run <file>
+ * run <file> [<count> [<interval>]]
+ <count> default is 1
+ * <interval> is measured in milliseconds, default is 1 second
+ */
+
+static void
+app_run_file(
+ cmdline_parse_ctx_t *ctx,
+ const char *file_name)
+{
+ struct cmdline *file_cl;
+ int fd;
+
+ fd = open(file_name, O_RDONLY);
+ if (fd < 0) {
+ printf("Cannot open file \"%s\"\n", file_name);
+ return;
+ }
+
+ file_cl = cmdline_new(ctx, "", fd, 1);
+ cmdline_interact(file_cl);
+ close(fd);
+}
+
+struct cmd_run_result {
+ cmdline_fixed_string_t run_string;
+ cmdline_multi_string_t multi_string;
+};
+
+static void
+cmd_run_parsed(
+ void *parsed_result,
+ struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ struct cmd_run_result *params = parsed_result;
+
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
+
+ char *file_name;
+ uint32_t count, interval, i;
+
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "run");
+ return;
+ }
+
+ switch (n_tokens) {
+ case 0:
+ printf(CMD_MSG_NOT_ENOUGH_ARGS, "run");
+ return;
+
+ case 1:
+ file_name = tokens[0];
+ count = 1;
+ interval = 1000;
+ break;
+
+ case 2:
+ file_name = tokens[0];
+
+ if (parser_read_uint32(&count, tokens[1]) ||
+ (count == 0)) {
+ printf(CMD_MSG_INVALID_ARG, "count");
+ return;
+ }
+
+ interval = 1000;
+ break;
+
+ case 3:
+ file_name = tokens[0];
+
+ if (parser_read_uint32(&count, tokens[1]) ||
+ (count == 0)) {
+ printf(CMD_MSG_INVALID_ARG, "count");
+ return;
+ }
+
+ if (parser_read_uint32(&interval, tokens[2]) ||
+ (interval == 0)) {
+ printf(CMD_MSG_INVALID_ARG, "interval");
+ return;
+ }
+ break;
+
+ default:
+ printf(CMD_MSG_MISMATCH_ARGS, "run");
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ app_run_file(cl->ctx, file_name);
+ if (interval)
+ usleep(interval * 1000);
+ }
+}
+
+static cmdline_parse_token_string_t cmd_run_run_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_result, run_string, "run");
+
+static cmdline_parse_token_string_t cmd_run_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_run_result, multi_string,
+ TOKEN_STRING_MULTI);
+
+
+static cmdline_parse_inst_t cmd_run = {
+ .f = cmd_run_parsed,
+ .data = NULL,
+ .help_str = "Run CLI script file",
+ .tokens = {
+ (void *) &cmd_run_run_string,
+ (void *) &cmd_run_multi_string,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_common_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_quit,
+ (cmdline_parse_inst_t *) &cmd_run,
+ (cmdline_parse_inst_t *) &cmd_link,
+ (cmdline_parse_inst_t *) &cmd_ping,
+ (cmdline_parse_inst_t *) &cmd_stats_port_in,
+ (cmdline_parse_inst_t *) &cmd_stats_port_out,
+ (cmdline_parse_inst_t *) &cmd_stats_table,
+ (cmdline_parse_inst_t *) &cmd_port_in_enable,
+ (cmdline_parse_inst_t *) &cmd_port_in_disable,
+ NULL,
+};
+
+int
+app_pipeline_common_cmd_push(struct app_params *app)
+{
+ uint32_t n_cmds, i;
+
+ /* Check for available slots in the application commands array */
+ n_cmds = RTE_DIM(pipeline_common_cmds) - 1;
+ if (n_cmds > APP_MAX_CMDS - app->n_cmds)
+ return -ENOMEM;
+
+ /* Push pipeline commands into the application */
+ memcpy(&app->cmds[app->n_cmds],
+ pipeline_common_cmds,
+ n_cmds * sizeof(cmdline_parse_ctx_t));
+
+ for (i = 0; i < n_cmds; i++)
+ app->cmds[app->n_cmds + i]->data = app;
+
+ app->n_cmds += n_cmds;
+ app->cmds[app->n_cmds] = NULL;
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_fe.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_fe.h
new file mode 100644
index 00000000..ce0bf13e
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_common_fe.h
@@ -0,0 +1,260 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_COMMON_FE_H__
+#define __INCLUDE_PIPELINE_COMMON_FE_H__
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <cmdline_parse.h>
+
+#include "pipeline_common_be.h"
+#include "pipeline.h"
+#include "app.h"
+
+#ifndef MSG_TIMEOUT_DEFAULT
+#define MSG_TIMEOUT_DEFAULT 1000
+#endif
+
+static inline struct app_pipeline_data *
+app_pipeline_data(struct app_params *app, uint32_t id)
+{
+ struct app_pipeline_params *params;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", id, params);
+ if (params == NULL)
+ return NULL;
+
+ return &app->pipeline_data[params - app->pipeline_params];
+}
+
+static inline void *
+app_pipeline_data_fe(struct app_params *app, uint32_t id, struct pipeline_type *ptype)
+{
+ struct app_pipeline_data *pipeline_data;
+
+ pipeline_data = app_pipeline_data(app, id);
+ if (pipeline_data == NULL)
+ return NULL;
+
+ if (strcmp(pipeline_data->ptype->name, ptype->name) != 0)
+ return NULL;
+
+ if (pipeline_data->enabled == 0)
+ return NULL;
+
+ return pipeline_data->fe;
+}
+
+static inline struct rte_ring *
+app_pipeline_msgq_in_get(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_msgq_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->msgq_params,
+ "MSGQ-REQ-PIPELINE",
+ pipeline_id,
+ p);
+ if (p == NULL)
+ return NULL;
+
+ return app->msgq[p - app->msgq_params];
+}
+
+static inline struct rte_ring *
+app_pipeline_msgq_out_get(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_msgq_params *p;
+
+ APP_PARAM_FIND_BY_ID(app->msgq_params,
+ "MSGQ-RSP-PIPELINE",
+ pipeline_id,
+ p);
+ if (p == NULL)
+ return NULL;
+
+ return app->msgq[p - app->msgq_params];
+}
+
+static inline void *
+app_msg_alloc(__rte_unused struct app_params *app)
+{
+ return rte_malloc(NULL, 2048, RTE_CACHE_LINE_SIZE);
+}
+
+static inline void
+app_msg_free(__rte_unused struct app_params *app,
+ void *msg)
+{
+ rte_free(msg);
+}
+
+static inline void
+app_msg_send(struct app_params *app,
+ uint32_t pipeline_id,
+ void *msg)
+{
+ struct rte_ring *r = app_pipeline_msgq_in_get(app, pipeline_id);
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(r, msg);
+ } while (status == -ENOBUFS);
+}
+
+static inline void *
+app_msg_recv(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct rte_ring *r = app_pipeline_msgq_out_get(app, pipeline_id);
+ void *msg;
+ int status = rte_ring_sc_dequeue(r, &msg);
+
+ if (status != 0)
+ return NULL;
+
+ return msg;
+}
+
+static inline void *
+app_msg_send_recv(struct app_params *app,
+ uint32_t pipeline_id,
+ void *msg,
+ uint32_t timeout_ms)
+{
+ struct rte_ring *r_req = app_pipeline_msgq_in_get(app, pipeline_id);
+ struct rte_ring *r_rsp = app_pipeline_msgq_out_get(app, pipeline_id);
+ uint64_t hz = rte_get_tsc_hz();
+ void *msg_recv;
+ uint64_t deadline;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(r_req, (void *) msg);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ deadline = (timeout_ms) ?
+ (rte_rdtsc() + ((hz * timeout_ms) / 1000)) :
+ UINT64_MAX;
+
+ do {
+ if (rte_rdtsc() > deadline)
+ return NULL;
+
+ status = rte_ring_sc_dequeue(r_rsp, &msg_recv);
+ } while (status != 0);
+
+ return msg_recv;
+}
+
+struct app_link_params *
+app_pipeline_track_pktq_out_to_link(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t pktq_out_id);
+
+int
+app_pipeline_track_default(struct pipeline_params *params,
+ uint32_t port_in,
+ uint32_t *port_out);
+
+int
+app_pipeline_ping(struct app_params *app,
+ uint32_t pipeline_id);
+
+int
+app_pipeline_stats_port_in(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats);
+
+int
+app_pipeline_stats_port_out(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats);
+
+int
+app_pipeline_stats_table(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t table_id,
+ struct rte_pipeline_table_stats *stats);
+
+int
+app_pipeline_port_in_enable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_pipeline_port_in_disable(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_link_set_op(struct app_params *app,
+ uint32_t link_id,
+ uint32_t pipeline_id,
+ app_link_op op,
+ void *arg);
+
+int
+app_link_config(struct app_params *app,
+ uint32_t link_id,
+ uint32_t ip,
+ uint32_t depth);
+
+int
+app_link_up(struct app_params *app,
+ uint32_t link_id);
+
+int
+app_link_down(struct app_params *app,
+ uint32_t link_id);
+
+int
+app_pipeline_common_cmd_push(struct app_params *app);
+
+#define CMD_MSG_OUT_OF_MEMORY "Not enough memory\n"
+#define CMD_MSG_NOT_ENOUGH_ARGS "Not enough arguments for command \"%s\"\n"
+#define CMD_MSG_TOO_MANY_ARGS "Too many arguments for command \"%s\"\n"
+#define CMD_MSG_MISMATCH_ARGS "Incorrect set of arguments for command \"%s\"\n"
+#define CMD_MSG_INVALID_ARG "Invalid value for argument \"%s\"\n"
+#define CMD_MSG_ARG_NOT_FOUND "Syntax error: \"%s\" not found\n"
+#define CMD_MSG_FILE_ERR "Error in file \"%s\" at line %u\n"
+#define CMD_MSG_FAIL "Command \"%s\" failed\n"
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall.c
new file mode 100644
index 00000000..a82e552d
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall.c
@@ -0,0 +1,1450 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_firewall.h"
+#include "parser.h"
+
+struct app_pipeline_firewall_rule {
+ struct pipeline_firewall_key key;
+ int32_t priority;
+ uint32_t port_id;
+ void *entry_ptr;
+
+ TAILQ_ENTRY(app_pipeline_firewall_rule) node;
+};
+
+struct app_pipeline_firewall {
+ /* parameters */
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+
+ /* rules */
+ TAILQ_HEAD(, app_pipeline_firewall_rule) rules;
+ uint32_t n_rules;
+ uint32_t default_rule_present;
+ uint32_t default_rule_port_id;
+ void *default_rule_entry_ptr;
+};
+
+static void
+print_firewall_ipv4_rule(struct app_pipeline_firewall_rule *rule)
+{
+ printf("Prio = %" PRId32 " (SA = %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32 "/%" PRIu32 ", "
+ "DA = %" PRIu32 ".%" PRIu32
+ ".%"PRIu32 ".%" PRIu32 "/%" PRIu32 ", "
+ "SP = %" PRIu32 "-%" PRIu32 ", "
+ "DP = %" PRIu32 "-%" PRIu32 ", "
+ "Proto = %" PRIu32 " / 0x%" PRIx32 ") => "
+ "Port = %" PRIu32 " (entry ptr = %p)\n",
+
+ rule->priority,
+
+ (rule->key.key.ipv4_5tuple.src_ip >> 24) & 0xFF,
+ (rule->key.key.ipv4_5tuple.src_ip >> 16) & 0xFF,
+ (rule->key.key.ipv4_5tuple.src_ip >> 8) & 0xFF,
+ rule->key.key.ipv4_5tuple.src_ip & 0xFF,
+ rule->key.key.ipv4_5tuple.src_ip_mask,
+
+ (rule->key.key.ipv4_5tuple.dst_ip >> 24) & 0xFF,
+ (rule->key.key.ipv4_5tuple.dst_ip >> 16) & 0xFF,
+ (rule->key.key.ipv4_5tuple.dst_ip >> 8) & 0xFF,
+ rule->key.key.ipv4_5tuple.dst_ip & 0xFF,
+ rule->key.key.ipv4_5tuple.dst_ip_mask,
+
+ rule->key.key.ipv4_5tuple.src_port_from,
+ rule->key.key.ipv4_5tuple.src_port_to,
+
+ rule->key.key.ipv4_5tuple.dst_port_from,
+ rule->key.key.ipv4_5tuple.dst_port_to,
+
+ rule->key.key.ipv4_5tuple.proto,
+ rule->key.key.ipv4_5tuple.proto_mask,
+
+ rule->port_id,
+ rule->entry_ptr);
+}
+
+static struct app_pipeline_firewall_rule *
+app_pipeline_firewall_rule_find(struct app_pipeline_firewall *p,
+ struct pipeline_firewall_key *key)
+{
+ struct app_pipeline_firewall_rule *r;
+
+ TAILQ_FOREACH(r, &p->rules, node)
+ if (memcmp(key,
+ &r->key,
+ sizeof(struct pipeline_firewall_key)) == 0)
+ return r;
+
+ return NULL;
+}
+
+static int
+app_pipeline_firewall_ls(
+ struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_firewall *p;
+ struct app_pipeline_firewall_rule *rule;
+ uint32_t n_rules;
+ int priority;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ n_rules = p->n_rules;
+ for (priority = 0; n_rules; priority++)
+ TAILQ_FOREACH(rule, &p->rules, node)
+ if (rule->priority == priority) {
+ print_firewall_ipv4_rule(rule);
+ n_rules--;
+ }
+
+ if (p->default_rule_present)
+ printf("Default rule: port %" PRIu32 " (entry ptr = %p)\n",
+ p->default_rule_port_id,
+ p->default_rule_entry_ptr);
+ else
+ printf("Default rule: DROP\n");
+
+ printf("\n");
+
+ return 0;
+}
+
+static void*
+app_pipeline_firewall_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct app_pipeline_firewall *p;
+ uint32_t size;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_firewall));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+
+ TAILQ_INIT(&p->rules);
+ p->n_rules = 0;
+ p->default_rule_present = 0;
+ p->default_rule_port_id = 0;
+ p->default_rule_entry_ptr = NULL;
+
+ return (void *) p;
+}
+
+static int
+app_pipeline_firewall_free(void *pipeline)
+{
+ struct app_pipeline_firewall *p = pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ while (!TAILQ_EMPTY(&p->rules)) {
+ struct app_pipeline_firewall_rule *rule;
+
+ rule = TAILQ_FIRST(&p->rules);
+ TAILQ_REMOVE(&p->rules, rule, node);
+ rte_free(rule);
+ }
+
+ rte_free(p);
+ return 0;
+}
+
+static int
+app_pipeline_firewall_key_check_and_normalize(struct pipeline_firewall_key *key)
+{
+ switch (key->type) {
+ case PIPELINE_FIREWALL_IPV4_5TUPLE:
+ {
+ uint32_t src_ip_depth = key->key.ipv4_5tuple.src_ip_mask;
+ uint32_t dst_ip_depth = key->key.ipv4_5tuple.dst_ip_mask;
+ uint16_t src_port_from = key->key.ipv4_5tuple.src_port_from;
+ uint16_t src_port_to = key->key.ipv4_5tuple.src_port_to;
+ uint16_t dst_port_from = key->key.ipv4_5tuple.dst_port_from;
+ uint16_t dst_port_to = key->key.ipv4_5tuple.dst_port_to;
+
+ uint32_t src_ip_netmask = 0;
+ uint32_t dst_ip_netmask = 0;
+
+ if ((src_ip_depth > 32) ||
+ (dst_ip_depth > 32) ||
+ (src_port_from > src_port_to) ||
+ (dst_port_from > dst_port_to))
+ return -1;
+
+ if (src_ip_depth)
+ src_ip_netmask = (~0U) << (32 - src_ip_depth);
+
+ if (dst_ip_depth)
+ dst_ip_netmask = ((~0U) << (32 - dst_ip_depth));
+
+ key->key.ipv4_5tuple.src_ip &= src_ip_netmask;
+ key->key.ipv4_5tuple.dst_ip &= dst_ip_netmask;
+
+ return 0;
+ }
+
+ default:
+ return -1;
+ }
+}
+
+int
+app_pipeline_firewall_load_file(char *filename,
+ struct pipeline_firewall_key *keys,
+ uint32_t *priorities,
+ uint32_t *port_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
+{
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
+
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (priorities == NULL) ||
+ (port_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
+
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
+ }
+
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ uint32_t priority = 0;
+ struct in_addr sipaddr;
+ uint32_t sipdepth = 0;
+ struct in_addr dipaddr;
+ uint32_t dipdepth = 0;
+ uint16_t sport0 = 0;
+ uint16_t sport1 = 0;
+ uint16_t dport0 = 0;
+ uint16_t dport1 = 0;
+ uint8_t proto = 0;
+ uint8_t protomask = 0;
+ uint32_t port_id = 0;
+
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error1;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 15) ||
+ strcmp(tokens[0], "priority") ||
+ parser_read_uint32(&priority, tokens[1]) ||
+ strcmp(tokens[2], "ipv4") ||
+ parse_ipv4_addr(tokens[3], &sipaddr) ||
+ parser_read_uint32(&sipdepth, tokens[4]) ||
+ parse_ipv4_addr(tokens[5], &dipaddr) ||
+ parser_read_uint32(&dipdepth, tokens[6]) ||
+ parser_read_uint16(&sport0, tokens[7]) ||
+ parser_read_uint16(&sport1, tokens[8]) ||
+ parser_read_uint16(&dport0, tokens[9]) ||
+ parser_read_uint16(&dport1, tokens[10]) ||
+ parser_read_uint8(&proto, tokens[11]) ||
+ parser_read_uint8_hex(&protomask, tokens[12]) ||
+ strcmp(tokens[13], "port") ||
+ parser_read_uint32(&port_id, tokens[14]))
+ goto error1;
+
+ keys[i].type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ keys[i].key.ipv4_5tuple.src_ip =
+ rte_be_to_cpu_32(sipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.src_ip_mask = sipdepth;
+ keys[i].key.ipv4_5tuple.dst_ip =
+ rte_be_to_cpu_32(dipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.dst_ip_mask = dipdepth;
+ keys[i].key.ipv4_5tuple.src_port_from = sport0;
+ keys[i].key.ipv4_5tuple.src_port_to = sport1;
+ keys[i].key.ipv4_5tuple.dst_port_from = dport0;
+ keys[i].key.ipv4_5tuple.dst_port_to = dport1;
+ keys[i].key.ipv4_5tuple.proto = proto;
+ keys[i].key.ipv4_5tuple.proto_mask = protomask;
+
+ port_ids[i] = port_id;
+ priorities[i] = priority;
+
+ if (app_pipeline_firewall_key_check_and_normalize(&keys[i]))
+ goto error1;
+
+ i++;
+ }
+
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
+
+error1:
+ *line = l;
+ fclose(f);
+ return -1;
+}
+
+int
+app_pipeline_firewall_add_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *key,
+ uint32_t priority,
+ uint32_t port_id)
+{
+ struct app_pipeline_firewall *p;
+ struct app_pipeline_firewall_rule *rule;
+ struct pipeline_firewall_add_msg_req *req;
+ struct pipeline_firewall_add_msg_rsp *rsp;
+ int new_rule;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL) ||
+ (key->type != PIPELINE_FIREWALL_IPV4_5TUPLE))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ if (app_pipeline_firewall_key_check_and_normalize(key) != 0)
+ return -1;
+
+ /* Find existing rule or allocate new rule */
+ rule = app_pipeline_firewall_rule_find(p, key);
+ new_rule = (rule == NULL);
+ if (rule == NULL) {
+ rule = rte_malloc(NULL, sizeof(*rule), RTE_CACHE_LINE_SIZE);
+
+ if (rule == NULL)
+ return -1;
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ if (new_rule)
+ rte_free(rule);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_ADD;
+ memcpy(&req->key, key, sizeof(*key));
+ req->priority = priority;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ if (new_rule)
+ rte_free(rule);
+ return -1;
+ }
+
+ /* Read response and write rule */
+ if (rsp->status ||
+ (rsp->entry_ptr == NULL) ||
+ ((new_rule == 0) && (rsp->key_found == 0)) ||
+ ((new_rule == 1) && (rsp->key_found == 1))) {
+ app_msg_free(app, rsp);
+ if (new_rule)
+ rte_free(rule);
+ return -1;
+ }
+
+ memcpy(&rule->key, key, sizeof(*key));
+ rule->priority = priority;
+ rule->port_id = port_id;
+ rule->entry_ptr = rsp->entry_ptr;
+
+ /* Commit rule */
+ if (new_rule) {
+ TAILQ_INSERT_TAIL(&p->rules, rule, node);
+ p->n_rules++;
+ }
+
+ print_firewall_ipv4_rule(rule);
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_firewall_delete_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *key)
+{
+ struct app_pipeline_firewall *p;
+ struct app_pipeline_firewall_rule *rule;
+ struct pipeline_firewall_del_msg_req *req;
+ struct pipeline_firewall_del_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL) ||
+ (key->type != PIPELINE_FIREWALL_IPV4_5TUPLE))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ if (app_pipeline_firewall_key_check_and_normalize(key) != 0)
+ return -1;
+
+ /* Find rule */
+ rule = app_pipeline_firewall_rule_find(p, key);
+ if (rule == NULL)
+ return 0;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_DEL;
+ memcpy(&req->key, key, sizeof(*key));
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status || !rsp->key_found) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Remove rule */
+ TAILQ_REMOVE(&p->rules, rule, node);
+ p->n_rules--;
+ rte_free(rule);
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_firewall_add_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *keys,
+ uint32_t n_keys,
+ uint32_t *priorities,
+ uint32_t *port_ids)
+{
+ struct app_pipeline_firewall *p;
+ struct pipeline_firewall_add_bulk_msg_req *req;
+ struct pipeline_firewall_add_bulk_msg_rsp *rsp;
+
+ struct app_pipeline_firewall_rule **rules;
+ int *new_rules;
+
+ int *keys_found;
+ void **entries_ptr;
+
+ uint32_t i;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ rules = rte_malloc(NULL,
+ n_keys * sizeof(struct app_pipeline_firewall_rule *),
+ RTE_CACHE_LINE_SIZE);
+ if (rules == NULL)
+ return -1;
+
+ new_rules = rte_malloc(NULL,
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
+ if (new_rules == NULL) {
+ rte_free(rules);
+ return -1;
+ }
+
+ /* check data integrity and add to rule list */
+ for (i = 0; i < n_keys; i++) {
+ if (port_ids[i] >= p->n_ports_out) {
+ rte_free(rules);
+ rte_free(new_rules);
+ return -1;
+ }
+
+ if (app_pipeline_firewall_key_check_and_normalize(&keys[i]) != 0) {
+ rte_free(rules);
+ rte_free(new_rules);
+ return -1;
+ }
+
+ rules[i] = app_pipeline_firewall_rule_find(p, &keys[i]);
+ new_rules[i] = (rules[i] == NULL);
+ if (rules[i] == NULL) {
+ rules[i] = rte_malloc(NULL,
+ sizeof(*rules[i]),
+ RTE_CACHE_LINE_SIZE);
+
+ if (rules[i] == NULL) {
+ uint32_t j;
+
+ for (j = 0; j <= i; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ return -1;
+ }
+ }
+ }
+
+ keys_found = rte_malloc(NULL,
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
+ if (keys_found == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < n_keys; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ return -1;
+ }
+
+ entries_ptr = rte_malloc(NULL,
+ n_keys * sizeof(struct rte_pipeline_table_entry *),
+ RTE_CACHE_LINE_SIZE);
+ if (entries_ptr == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < n_keys; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ rte_free(keys_found);
+ return -1;
+ }
+ for (i = 0; i < n_keys; i++) {
+ entries_ptr[i] = rte_malloc(NULL,
+ sizeof(struct rte_pipeline_table_entry),
+ RTE_CACHE_LINE_SIZE);
+
+ if (entries_ptr[i] == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < n_keys; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ for (j = 0; j <= i; j++)
+ rte_free(entries_ptr[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ rte_free(keys_found);
+ rte_free(entries_ptr);
+ return -1;
+ }
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < n_keys; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ for (j = 0; j < n_keys; j++)
+ rte_free(entries_ptr[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ rte_free(keys_found);
+ rte_free(entries_ptr);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_ADD_BULK;
+
+ req->keys = keys;
+ req->n_keys = n_keys;
+ req->port_ids = port_ids;
+ req->priorities = priorities;
+ req->keys_found = keys_found;
+ req->entries_ptr = entries_ptr;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < n_keys; j++)
+ if (new_rules[j])
+ rte_free(rules[j]);
+
+ for (j = 0; j < n_keys; j++)
+ rte_free(entries_ptr[j]);
+
+ rte_free(rules);
+ rte_free(new_rules);
+ rte_free(keys_found);
+ rte_free(entries_ptr);
+ return -1;
+ }
+
+ if (rsp->status) {
+ for (i = 0; i < n_keys; i++)
+ if (new_rules[i])
+ rte_free(rules[i]);
+
+ for (i = 0; i < n_keys; i++)
+ rte_free(entries_ptr[i]);
+
+ status = -1;
+ goto cleanup;
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ if (entries_ptr[i] == NULL ||
+ ((new_rules[i] == 0) && (keys_found[i] == 0)) ||
+ ((new_rules[i] == 1) && (keys_found[i] == 1))) {
+ for (i = 0; i < n_keys; i++)
+ if (new_rules[i])
+ rte_free(rules[i]);
+
+ for (i = 0; i < n_keys; i++)
+ rte_free(entries_ptr[i]);
+
+ status = -1;
+ goto cleanup;
+ }
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ memcpy(&rules[i]->key, &keys[i], sizeof(keys[i]));
+ rules[i]->priority = priorities[i];
+ rules[i]->port_id = port_ids[i];
+ rules[i]->entry_ptr = entries_ptr[i];
+
+ /* Commit rule */
+ if (new_rules[i]) {
+ TAILQ_INSERT_TAIL(&p->rules, rules[i], node);
+ p->n_rules++;
+ }
+
+ print_firewall_ipv4_rule(rules[i]);
+ }
+
+cleanup:
+ app_msg_free(app, rsp);
+ rte_free(rules);
+ rte_free(new_rules);
+ rte_free(keys_found);
+ rte_free(entries_ptr);
+
+ return status;
+}
+
+int
+app_pipeline_firewall_delete_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *keys,
+ uint32_t n_keys)
+{
+ struct app_pipeline_firewall *p;
+ struct pipeline_firewall_del_bulk_msg_req *req;
+ struct pipeline_firewall_del_bulk_msg_rsp *rsp;
+
+ struct app_pipeline_firewall_rule **rules;
+ int *keys_found;
+
+ uint32_t i;
+ int status = 0;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ rules = rte_malloc(NULL,
+ n_keys * sizeof(struct app_pipeline_firewall_rule *),
+ RTE_CACHE_LINE_SIZE);
+ if (rules == NULL)
+ return -1;
+
+ for (i = 0; i < n_keys; i++) {
+ if (app_pipeline_firewall_key_check_and_normalize(&keys[i]) != 0) {
+ return -1;
+ }
+
+ rules[i] = app_pipeline_firewall_rule_find(p, &keys[i]);
+ }
+
+ keys_found = rte_malloc(NULL,
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
+ if (keys_found == NULL) {
+ rte_free(rules);
+ return -1;
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ rte_free(rules);
+ rte_free(keys_found);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_DEL_BULK;
+
+ req->keys = keys;
+ req->n_keys = n_keys;
+ req->keys_found = keys_found;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ rte_free(rules);
+ rte_free(keys_found);
+ return -1;
+ }
+
+ if (rsp->status) {
+ status = -1;
+ goto cleanup;
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ if (keys_found[i] == 0) {
+ status = -1;
+ goto cleanup;
+ }
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ TAILQ_REMOVE(&p->rules, rules[i], node);
+ p->n_rules--;
+ rte_free(rules[i]);
+ }
+
+cleanup:
+ app_msg_free(app, rsp);
+ rte_free(rules);
+ rte_free(keys_found);
+
+ return status;
+}
+
+int
+app_pipeline_firewall_add_default_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct app_pipeline_firewall *p;
+ struct pipeline_firewall_add_default_msg_req *req;
+ struct pipeline_firewall_add_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_ADD_DEFAULT;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write rule */
+ if (rsp->status || (rsp->entry_ptr == NULL)) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ p->default_rule_port_id = port_id;
+ p->default_rule_entry_ptr = rsp->entry_ptr;
+
+ /* Commit rule */
+ p->default_rule_present = 1;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_firewall_delete_default_rule(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_firewall *p;
+ struct pipeline_firewall_del_default_msg_req *req;
+ struct pipeline_firewall_del_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_firewall);
+ if (p == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FIREWALL_MSG_REQ_DEL_DEFAULT;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write rule */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Commit rule */
+ p->default_rule_present = 0;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/*
+ * firewall
+ *
+ * firewall add:
+ * p <pipelineid> firewall add priority <priority>
+ * ipv4 <sipaddr> <sipdepth> <dipaddr> <dipdepth>
+ * <sport0> <sport1> <dport0> <dport1> <proto> <protomask>
+ * port <portid>
+ * Note: <protomask> is a hex value
+ *
+ * p <pipelineid> firewall add bulk <file>
+ *
+ * firewall add default:
+ * p <pipelineid> firewall add default <port ID>
+ *
+ * firewall del:
+ * p <pipelineid> firewall del
+ * ipv4 <sipaddr> <sipdepth> <dipaddr> <dipdepth>
+ * <sport0> <sport1> <dport0> <dport1> <proto> <protomask>
+ *
+ * p <pipelineid> firewall del bulk <file>
+ *
+ * firewall del default:
+ * p <pipelineid> firewall del default
+ *
+ * firewall ls:
+ * p <pipelineid> firewall ls
+ */
+
+struct cmd_firewall_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t firewall_string;
+ cmdline_multi_string_t multi_string;
+};
+
+static void cmd_firewall_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_firewall_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+
+ char *tokens[17];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "firewall");
+ return;
+ }
+
+ /* firewall add */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "priority") == 0)) {
+ struct pipeline_firewall_key key;
+ uint32_t priority;
+ struct in_addr sipaddr;
+ uint32_t sipdepth;
+ struct in_addr dipaddr;
+ uint32_t dipdepth;
+ uint16_t sport0;
+ uint16_t sport1;
+ uint16_t dport0;
+ uint16_t dport1;
+ uint8_t proto;
+ uint8_t protomask;
+ uint32_t port_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 16) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall add");
+ return;
+ }
+
+ if (parser_read_uint32(&priority, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "priority");
+ return;
+ }
+
+ if (strcmp(tokens[3], "ipv4")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "ipv4");
+ return;
+ }
+
+ if (parse_ipv4_addr(tokens[4], &sipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "sipaddr");
+ return;
+ }
+
+ if (parser_read_uint32(&sipdepth, tokens[5])) {
+ printf(CMD_MSG_INVALID_ARG, "sipdepth");
+ return;
+ }
+
+ if (parse_ipv4_addr(tokens[6], &dipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "dipaddr");
+ return;
+ }
+
+ if (parser_read_uint32(&dipdepth, tokens[7])) {
+ printf(CMD_MSG_INVALID_ARG, "dipdepth");
+ return;
+ }
+
+ if (parser_read_uint16(&sport0, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "sport0");
+ return;
+ }
+
+ if (parser_read_uint16(&sport1, tokens[9])) {
+ printf(CMD_MSG_INVALID_ARG, "sport1");
+ return;
+ }
+
+ if (parser_read_uint16(&dport0, tokens[10])) {
+ printf(CMD_MSG_INVALID_ARG, "dport0");
+ return;
+ }
+
+ if (parser_read_uint16(&dport1, tokens[11])) {
+ printf(CMD_MSG_INVALID_ARG, "dport1");
+ return;
+ }
+
+ if (parser_read_uint8(&proto, tokens[12])) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
+
+ if (parser_read_uint8_hex(&protomask, tokens[13])) {
+ printf(CMD_MSG_INVALID_ARG, "protomask");
+ return;
+ }
+
+ if (strcmp(tokens[14], "port")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[15])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.src_ip = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.src_ip_mask = sipdepth;
+ key.key.ipv4_5tuple.dst_ip = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.dst_ip_mask = dipdepth;
+ key.key.ipv4_5tuple.src_port_from = sport0;
+ key.key.ipv4_5tuple.src_port_to = sport1;
+ key.key.ipv4_5tuple.dst_port_from = dport0;
+ key.key.ipv4_5tuple.dst_port_to = dport1;
+ key.key.ipv4_5tuple.proto = proto;
+ key.key.ipv4_5tuple.proto_mask = protomask;
+
+ status = app_pipeline_firewall_add_rule(app,
+ params->pipeline_id,
+ &key,
+ priority,
+ port_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall add");
+
+ return;
+ } /* firewall add */
+
+ /* firewall add bulk */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "bulk") == 0)) {
+ struct pipeline_firewall_key *keys;
+ uint32_t *priorities, *port_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall add bulk");
+ return;
+ }
+
+ filename = tokens[2];
+
+ n_keys = APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_firewall_key));
+ if (keys == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ return;
+ }
+ memset(keys, 0, n_keys * sizeof(struct pipeline_firewall_key));
+
+ priorities = malloc(n_keys * sizeof(uint32_t));
+ if (priorities == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(keys);
+ return;
+ }
+
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(priorities);
+ free(keys);
+ return;
+ }
+
+ status = app_pipeline_firewall_load_file(filename,
+ keys,
+ priorities,
+ port_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(port_ids);
+ free(priorities);
+ free(keys);
+ return;
+ }
+
+ status = app_pipeline_firewall_add_bulk(app,
+ params->pipeline_id,
+ keys,
+ n_keys,
+ priorities,
+ port_ids);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall add bulk");
+
+ free(keys);
+ free(priorities);
+ free(port_ids);
+ return;
+ } /* firewall add bulk */
+
+ /* firewall add default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall add default");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ status = app_pipeline_firewall_add_default_rule(app,
+ params->pipeline_id,
+ port_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall add default");
+
+ return;
+ } /* firewall add default */
+
+ /* firewall del */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0)) {
+ struct pipeline_firewall_key key;
+ struct in_addr sipaddr;
+ uint32_t sipdepth;
+ struct in_addr dipaddr;
+ uint32_t dipdepth;
+ uint16_t sport0;
+ uint16_t sport1;
+ uint16_t dport0;
+ uint16_t dport1;
+ uint8_t proto;
+ uint8_t protomask;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 12) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall del");
+ return;
+ }
+
+ if (parse_ipv4_addr(tokens[2], &sipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "sipaddr");
+ return;
+ }
+
+ if (parser_read_uint32(&sipdepth, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "sipdepth");
+ return;
+ }
+
+ if (parse_ipv4_addr(tokens[4], &dipaddr)) {
+ printf(CMD_MSG_INVALID_ARG, "dipaddr");
+ return;
+ }
+
+ if (parser_read_uint32(&dipdepth, tokens[5])) {
+ printf(CMD_MSG_INVALID_ARG, "dipdepth");
+ return;
+ }
+
+ if (parser_read_uint16(&sport0, tokens[6])) {
+ printf(CMD_MSG_INVALID_ARG, "sport0");
+ return;
+ }
+
+ if (parser_read_uint16(&sport1, tokens[7])) {
+ printf(CMD_MSG_INVALID_ARG, "sport1");
+ return;
+ }
+
+ if (parser_read_uint16(&dport0, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "dport0");
+ return;
+ }
+
+ if (parser_read_uint16(&dport1, tokens[9])) {
+ printf(CMD_MSG_INVALID_ARG, "dport1");
+ return;
+ }
+
+ if (parser_read_uint8(&proto, tokens[10])) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
+
+ if (parser_read_uint8_hex(&protomask, tokens[11])) {
+ printf(CMD_MSG_INVALID_ARG, "protomask");
+ return;
+ }
+
+ key.type = PIPELINE_FIREWALL_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.src_ip = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.src_ip_mask = sipdepth;
+ key.key.ipv4_5tuple.dst_ip = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.dst_ip_mask = dipdepth;
+ key.key.ipv4_5tuple.src_port_from = sport0;
+ key.key.ipv4_5tuple.src_port_to = sport1;
+ key.key.ipv4_5tuple.dst_port_from = dport0;
+ key.key.ipv4_5tuple.dst_port_to = dport1;
+ key.key.ipv4_5tuple.proto = proto;
+ key.key.ipv4_5tuple.proto_mask = protomask;
+
+ status = app_pipeline_firewall_delete_rule(app,
+ params->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall del");
+
+ return;
+ } /* firewall del */
+
+ /* firewall del bulk */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "bulk") == 0)) {
+ struct pipeline_firewall_key *keys;
+ uint32_t *priorities, *port_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall del bulk");
+ return;
+ }
+
+ filename = tokens[2];
+
+ n_keys = APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_firewall_key));
+ if (keys == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ return;
+ }
+ memset(keys, 0, n_keys * sizeof(struct pipeline_firewall_key));
+
+ priorities = malloc(n_keys * sizeof(uint32_t));
+ if (priorities == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(keys);
+ return;
+ }
+
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(priorities);
+ free(keys);
+ return;
+ }
+
+ status = app_pipeline_firewall_load_file(filename,
+ keys,
+ priorities,
+ port_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(port_ids);
+ free(priorities);
+ free(keys);
+ return;
+ }
+
+ status = app_pipeline_firewall_delete_bulk(app,
+ params->pipeline_id,
+ keys,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall del bulk");
+
+ free(port_ids);
+ free(priorities);
+ free(keys);
+ return;
+ } /* firewall del bulk */
+
+ /* firewall del default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall del default");
+ return;
+ }
+
+ status = app_pipeline_firewall_delete_default_rule(app,
+ params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall del default");
+
+ return;
+
+ } /* firewall del default */
+
+ /* firewall ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall ls");
+ return;
+ }
+
+ status = app_pipeline_firewall_ls(app, params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "firewall ls");
+
+ return;
+ } /* firewall ls */
+
+ printf(CMD_MSG_MISMATCH_ARGS, "firewall");
+}
+
+static cmdline_parse_token_string_t cmd_firewall_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_firewall_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_firewall_result, pipeline_id, UINT32);
+
+static cmdline_parse_token_string_t cmd_firewall_firewall_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, firewall_string,
+ "firewall");
+
+static cmdline_parse_token_string_t cmd_firewall_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_firewall_result, multi_string,
+ TOKEN_STRING_MULTI);
+
+static cmdline_parse_inst_t cmd_firewall = {
+ .f = cmd_firewall_parsed,
+ .data = NULL,
+ .help_str = "firewall add / add bulk / add default / del / del bulk"
+ " / del default / ls",
+ .tokens = {
+ (void *) &cmd_firewall_p_string,
+ (void *) &cmd_firewall_pipeline_id,
+ (void *) &cmd_firewall_firewall_string,
+ (void *) &cmd_firewall_multi_string,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_firewall,
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_firewall_fe_ops = {
+ .f_init = app_pipeline_firewall_init,
+ .f_post_init = NULL,
+ .f_free = app_pipeline_firewall_free,
+ .f_track = app_pipeline_track_default,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_firewall = {
+ .name = "FIREWALL",
+ .be_ops = &pipeline_firewall_be_ops,
+ .fe_ops = &pipeline_firewall_fe_ops,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall.h
new file mode 100644
index 00000000..aa79a2a0
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall.h
@@ -0,0 +1,89 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FIREWALL_H__
+#define __INCLUDE_PIPELINE_FIREWALL_H__
+
+#include "pipeline.h"
+#include "pipeline_firewall_be.h"
+
+int
+app_pipeline_firewall_add_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *key,
+ uint32_t priority,
+ uint32_t port_id);
+
+int
+app_pipeline_firewall_delete_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *key);
+
+int
+app_pipeline_firewall_add_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *keys,
+ uint32_t n_keys,
+ uint32_t *priorities,
+ uint32_t *port_ids);
+
+int
+app_pipeline_firewall_delete_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_firewall_key *keys,
+ uint32_t n_keys);
+
+int
+app_pipeline_firewall_add_default_rule(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_pipeline_firewall_delete_default_rule(struct app_params *app,
+ uint32_t pipeline_id);
+
+#ifndef APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE
+#define APP_PIPELINE_FIREWALL_MAX_RULES_IN_FILE 65536
+#endif
+
+int
+app_pipeline_firewall_load_file(char *filename,
+ struct pipeline_firewall_key *keys,
+ uint32_t *priorities,
+ uint32_t *port_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
+extern struct pipeline_type pipeline_firewall;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall_be.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
new file mode 100644
index 00000000..2980492b
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall_be.c
@@ -0,0 +1,885 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_byteorder.h>
+#include <rte_table_acl.h>
+
+#include "pipeline_firewall_be.h"
+#include "parser.h"
+
+struct pipeline_firewall {
+ struct pipeline p;
+ pipeline_msg_req_handler custom_handlers[PIPELINE_FIREWALL_MSG_REQS];
+
+ uint32_t n_rules;
+ uint32_t n_rule_fields;
+ struct rte_acl_field_def *field_format;
+ uint32_t field_format_size;
+} __rte_cache_aligned;
+
+static void *
+pipeline_firewall_msg_req_custom_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_firewall_msg_req_custom_handler,
+};
+
+static void *
+pipeline_firewall_msg_req_add_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_firewall_msg_req_del_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_firewall_msg_req_add_bulk_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_firewall_msg_req_del_bulk_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_firewall_msg_req_add_default_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_firewall_msg_req_del_default_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_FIREWALL_MSG_REQ_ADD] =
+ pipeline_firewall_msg_req_add_handler,
+ [PIPELINE_FIREWALL_MSG_REQ_DEL] =
+ pipeline_firewall_msg_req_del_handler,
+ [PIPELINE_FIREWALL_MSG_REQ_ADD_BULK] =
+ pipeline_firewall_msg_req_add_bulk_handler,
+ [PIPELINE_FIREWALL_MSG_REQ_DEL_BULK] =
+ pipeline_firewall_msg_req_del_bulk_handler,
+ [PIPELINE_FIREWALL_MSG_REQ_ADD_DEFAULT] =
+ pipeline_firewall_msg_req_add_default_handler,
+ [PIPELINE_FIREWALL_MSG_REQ_DEL_DEFAULT] =
+ pipeline_firewall_msg_req_del_default_handler,
+};
+
+/*
+ * Firewall table
+ */
+struct firewall_table_entry {
+ struct rte_pipeline_table_entry head;
+};
+
+static struct rte_acl_field_def field_format_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+#define SIZEOF_VLAN_HDR 4
+
+static struct rte_acl_field_def field_format_vlan_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_VLAN_HDR +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+#define SIZEOF_QINQ_HEADER 8
+
+static struct rte_acl_field_def field_format_qinq_ipv4[] = {
+ /* Protocol */
+ [0] = {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = 0,
+ .input_index = 0,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+
+ /* Source IP address (IPv4) */
+ [1] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 1,
+ .input_index = 1,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+
+ /* Destination IP address (IPv4) */
+ [2] = {
+ .type = RTE_ACL_FIELD_TYPE_MASK,
+ .size = sizeof(uint32_t),
+ .field_index = 2,
+ .input_index = 2,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+
+ /* Source Port */
+ [3] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 3,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+
+ /* Destination Port */
+ [4] = {
+ .type = RTE_ACL_FIELD_TYPE_RANGE,
+ .size = sizeof(uint16_t),
+ .field_index = 4,
+ .input_index = 3,
+ .offset = sizeof(struct ether_hdr) +
+ SIZEOF_QINQ_HEADER +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+static int
+pipeline_firewall_parse_args(struct pipeline_firewall *p,
+ struct pipeline_params *params)
+{
+ uint32_t n_rules_present = 0;
+ uint32_t pkt_type_present = 0;
+ uint32_t i;
+
+ /* defaults */
+ p->n_rules = 4 * 1024;
+ p->n_rule_fields = RTE_DIM(field_format_ipv4);
+ p->field_format = field_format_ipv4;
+ p->field_format_size = sizeof(field_format_ipv4);
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ if (strcmp(arg_name, "n_rules") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_rules_present == 0, params->name,
+ arg_name);
+ n_rules_present = 1;
+
+ status = parser_read_uint32(&p->n_rules,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+ continue;
+ }
+
+ if (strcmp(arg_name, "pkt_type") == 0) {
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ pkt_type_present == 0, params->name,
+ arg_name);
+ pkt_type_present = 1;
+
+ /* ipv4 */
+ if (strcmp(arg_value, "ipv4") == 0) {
+ p->n_rule_fields = RTE_DIM(field_format_ipv4);
+ p->field_format = field_format_ipv4;
+ p->field_format_size =
+ sizeof(field_format_ipv4);
+ continue;
+ }
+
+ /* vlan_ipv4 */
+ if (strcmp(arg_value, "vlan_ipv4") == 0) {
+ p->n_rule_fields =
+ RTE_DIM(field_format_vlan_ipv4);
+ p->field_format = field_format_vlan_ipv4;
+ p->field_format_size =
+ sizeof(field_format_vlan_ipv4);
+ continue;
+ }
+
+ /* qinq_ipv4 */
+ if (strcmp(arg_value, "qinq_ipv4") == 0) {
+ p->n_rule_fields =
+ RTE_DIM(field_format_qinq_ipv4);
+ p->field_format = field_format_qinq_ipv4;
+ p->field_format_size =
+ sizeof(field_format_qinq_ipv4);
+ continue;
+ }
+
+ /* other */
+ PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
+ arg_name, arg_value);
+ }
+
+ /* other */
+ PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
+ }
+
+ return 0;
+}
+
+static void *
+pipeline_firewall_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_firewall *p_fw;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_firewall));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ p_fw = (struct pipeline_firewall *) p;
+ if (p == NULL)
+ return NULL;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "Firewall");
+
+ /* Parse arguments */
+ if (pipeline_firewall_parse_args(p_fw, params))
+ return NULL;
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = params->name,
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Input ports */
+ p->n_ports_in = params->n_ports_in;
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = pipeline_port_in_params_get_ops(
+ &params->port_in[i]),
+ .arg_create = pipeline_port_in_params_convert(
+ &params->port_in[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ p->n_ports_out = params->n_ports_out;
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Tables */
+ p->n_tables = 1;
+ {
+ struct rte_table_acl_params table_acl_params = {
+ .name = params->name,
+ .n_rules = p_fw->n_rules,
+ .n_rule_fields = p_fw->n_rule_fields,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_acl_ops,
+ .arg_create = &table_acl_params,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size =
+ sizeof(struct firewall_table_entry) -
+ sizeof(struct rte_pipeline_table_entry),
+ };
+
+ int status;
+
+ memcpy(table_acl_params.field_format,
+ p_fw->field_format,
+ p_fw->field_format_size);
+
+ status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id[i],
+ p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+ memcpy(p_fw->custom_handlers,
+ custom_handlers,
+ sizeof(p_fw->custom_handlers));
+
+ return p;
+}
+
+static int
+pipeline_firewall_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_firewall_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+void *
+pipeline_firewall_msg_req_custom_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_firewall *p_fw = (struct pipeline_firewall *) p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_FIREWALL_MSG_REQS) ?
+ p_fw->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+void *
+pipeline_firewall_msg_req_add_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_add_msg_req *req = msg;
+ struct pipeline_firewall_add_msg_rsp *rsp = msg;
+
+ struct rte_table_acl_rule_add_params params;
+ struct firewall_table_entry entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+ };
+
+ memset(&params, 0, sizeof(params));
+
+ switch (req->key.type) {
+ case PIPELINE_FIREWALL_IPV4_5TUPLE:
+ params.priority = req->priority;
+ params.field_value[0].value.u8 =
+ req->key.key.ipv4_5tuple.proto;
+ params.field_value[0].mask_range.u8 =
+ req->key.key.ipv4_5tuple.proto_mask;
+ params.field_value[1].value.u32 =
+ req->key.key.ipv4_5tuple.src_ip;
+ params.field_value[1].mask_range.u32 =
+ req->key.key.ipv4_5tuple.src_ip_mask;
+ params.field_value[2].value.u32 =
+ req->key.key.ipv4_5tuple.dst_ip;
+ params.field_value[2].mask_range.u32 =
+ req->key.key.ipv4_5tuple.dst_ip_mask;
+ params.field_value[3].value.u16 =
+ req->key.key.ipv4_5tuple.src_port_from;
+ params.field_value[3].mask_range.u16 =
+ req->key.key.ipv4_5tuple.src_port_to;
+ params.field_value[4].value.u16 =
+ req->key.key.ipv4_5tuple.dst_port_from;
+ params.field_value[4].mask_range.u16 =
+ req->key.key.ipv4_5tuple.dst_port_to;
+ break;
+
+ default:
+ rsp->status = -1; /* Error */
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &params,
+ (struct rte_pipeline_table_entry *) &entry,
+ &rsp->key_found,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_firewall_msg_req_del_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_del_msg_req *req = msg;
+ struct pipeline_firewall_del_msg_rsp *rsp = msg;
+
+ struct rte_table_acl_rule_delete_params params;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (req->key.type) {
+ case PIPELINE_FIREWALL_IPV4_5TUPLE:
+ params.field_value[0].value.u8 =
+ req->key.key.ipv4_5tuple.proto;
+ params.field_value[0].mask_range.u8 =
+ req->key.key.ipv4_5tuple.proto_mask;
+ params.field_value[1].value.u32 =
+ req->key.key.ipv4_5tuple.src_ip;
+ params.field_value[1].mask_range.u32 =
+ req->key.key.ipv4_5tuple.src_ip_mask;
+ params.field_value[2].value.u32 =
+ req->key.key.ipv4_5tuple.dst_ip;
+ params.field_value[2].mask_range.u32 =
+ req->key.key.ipv4_5tuple.dst_ip_mask;
+ params.field_value[3].value.u16 =
+ req->key.key.ipv4_5tuple.src_port_from;
+ params.field_value[3].mask_range.u16 =
+ req->key.key.ipv4_5tuple.src_port_to;
+ params.field_value[4].value.u16 =
+ req->key.key.ipv4_5tuple.dst_port_from;
+ params.field_value[4].mask_range.u16 =
+ req->key.key.ipv4_5tuple.dst_port_to;
+ break;
+
+ default:
+ rsp->status = -1; /* Error */
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ p->table_id[0],
+ &params,
+ &rsp->key_found,
+ NULL);
+
+ return rsp;
+}
+
+static void *
+pipeline_firewall_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_add_bulk_msg_req *req = msg;
+ struct pipeline_firewall_add_bulk_msg_rsp *rsp = msg;
+
+ struct rte_table_acl_rule_add_params *params[req->n_keys];
+ struct firewall_table_entry *entries[req->n_keys];
+
+ uint32_t i, n_keys;
+
+ n_keys = req->n_keys;
+
+ for (i = 0; i < n_keys; i++) {
+ entries[i] = rte_zmalloc(NULL,
+ sizeof(struct firewall_table_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (entries[i] == NULL) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ params[i] = rte_zmalloc(NULL,
+ sizeof(struct rte_table_acl_rule_add_params),
+ RTE_CACHE_LINE_SIZE);
+ if (params[i] == NULL) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ entries[i]->head.action = RTE_PIPELINE_ACTION_PORT;
+ entries[i]->head.port_id = p->port_out_id[req->port_ids[i]];
+
+ switch (req->keys[i].type) {
+ case PIPELINE_FIREWALL_IPV4_5TUPLE:
+ params[i]->priority = req->priorities[i];
+ params[i]->field_value[0].value.u8 =
+ req->keys[i].key.ipv4_5tuple.proto;
+ params[i]->field_value[0].mask_range.u8 =
+ req->keys[i].key.ipv4_5tuple.proto_mask;
+ params[i]->field_value[1].value.u32 =
+ req->keys[i].key.ipv4_5tuple.src_ip;
+ params[i]->field_value[1].mask_range.u32 =
+ req->keys[i].key.ipv4_5tuple.src_ip_mask;
+ params[i]->field_value[2].value.u32 =
+ req->keys[i].key.ipv4_5tuple.dst_ip;
+ params[i]->field_value[2].mask_range.u32 =
+ req->keys[i].key.ipv4_5tuple.dst_ip_mask;
+ params[i]->field_value[3].value.u16 =
+ req->keys[i].key.ipv4_5tuple.src_port_from;
+ params[i]->field_value[3].mask_range.u16 =
+ req->keys[i].key.ipv4_5tuple.src_port_to;
+ params[i]->field_value[4].value.u16 =
+ req->keys[i].key.ipv4_5tuple.dst_port_from;
+ params[i]->field_value[4].mask_range.u16 =
+ req->keys[i].key.ipv4_5tuple.dst_port_to;
+ break;
+
+ default:
+ rsp->status = -1; /* Error */
+
+ for (i = 0; i < n_keys; i++) {
+ rte_free(entries[i]);
+ rte_free(params[i]);
+ }
+
+ return rsp;
+ }
+ }
+
+ rsp->status = rte_pipeline_table_entry_add_bulk(p->p, p->table_id[0],
+ (void *)params, (struct rte_pipeline_table_entry **)entries,
+ n_keys, req->keys_found,
+ (struct rte_pipeline_table_entry **)req->entries_ptr);
+
+ for (i = 0; i < n_keys; i++) {
+ rte_free(entries[i]);
+ rte_free(params[i]);
+ }
+
+ return rsp;
+}
+
+static void *
+pipeline_firewall_msg_req_del_bulk_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_del_bulk_msg_req *req = msg;
+ struct pipeline_firewall_del_bulk_msg_rsp *rsp = msg;
+
+ struct rte_table_acl_rule_delete_params *params[req->n_keys];
+
+ uint32_t i, n_keys;
+
+ n_keys = req->n_keys;
+
+ for (i = 0; i < n_keys; i++) {
+ params[i] = rte_zmalloc(NULL,
+ sizeof(struct rte_table_acl_rule_delete_params),
+ RTE_CACHE_LINE_SIZE);
+ if (params[i] == NULL) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ switch (req->keys[i].type) {
+ case PIPELINE_FIREWALL_IPV4_5TUPLE:
+ params[i]->field_value[0].value.u8 =
+ req->keys[i].key.ipv4_5tuple.proto;
+ params[i]->field_value[0].mask_range.u8 =
+ req->keys[i].key.ipv4_5tuple.proto_mask;
+ params[i]->field_value[1].value.u32 =
+ req->keys[i].key.ipv4_5tuple.src_ip;
+ params[i]->field_value[1].mask_range.u32 =
+ req->keys[i].key.ipv4_5tuple.src_ip_mask;
+ params[i]->field_value[2].value.u32 =
+ req->keys[i].key.ipv4_5tuple.dst_ip;
+ params[i]->field_value[2].mask_range.u32 =
+ req->keys[i].key.ipv4_5tuple.dst_ip_mask;
+ params[i]->field_value[3].value.u16 =
+ req->keys[i].key.ipv4_5tuple.src_port_from;
+ params[i]->field_value[3].mask_range.u16 =
+ req->keys[i].key.ipv4_5tuple.src_port_to;
+ params[i]->field_value[4].value.u16 =
+ req->keys[i].key.ipv4_5tuple.dst_port_from;
+ params[i]->field_value[4].mask_range.u16 =
+ req->keys[i].key.ipv4_5tuple.dst_port_to;
+ break;
+
+ default:
+ rsp->status = -1; /* Error */
+
+ for (i = 0; i < n_keys; i++)
+ rte_free(params[i]);
+
+ return rsp;
+ }
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete_bulk(p->p, p->table_id[0],
+ (void **)&params, n_keys, req->keys_found, NULL);
+
+ for (i = 0; i < n_keys; i++)
+ rte_free(params[i]);
+
+ return rsp;
+}
+
+void *
+pipeline_firewall_msg_req_add_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_add_default_msg_req *req = msg;
+ struct pipeline_firewall_add_default_msg_rsp *rsp = msg;
+
+ struct firewall_table_entry default_entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+ };
+
+ rsp->status = rte_pipeline_table_default_entry_add(p->p,
+ p->table_id[0],
+ (struct rte_pipeline_table_entry *) &default_entry,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_firewall_msg_req_del_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_firewall_del_default_msg_rsp *rsp = msg;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ p->table_id[0],
+ NULL);
+
+ return rsp;
+}
+
+struct pipeline_be_ops pipeline_firewall_be_ops = {
+ .f_init = pipeline_firewall_init,
+ .f_free = pipeline_firewall_free,
+ .f_run = NULL,
+ .f_timer = pipeline_firewall_timer,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall_be.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall_be.h
new file mode 100644
index 00000000..f5b0522f
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_firewall_be.h
@@ -0,0 +1,176 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FIREWALL_BE_H__
+#define __INCLUDE_PIPELINE_FIREWALL_BE_H__
+
+#include "pipeline_common_be.h"
+
+enum pipeline_firewall_key_type {
+ PIPELINE_FIREWALL_IPV4_5TUPLE,
+};
+
+struct pipeline_firewall_key_ipv4_5tuple {
+ uint32_t src_ip;
+ uint32_t src_ip_mask;
+ uint32_t dst_ip;
+ uint32_t dst_ip_mask;
+ uint16_t src_port_from;
+ uint16_t src_port_to;
+ uint16_t dst_port_from;
+ uint16_t dst_port_to;
+ uint8_t proto;
+ uint8_t proto_mask;
+};
+
+struct pipeline_firewall_key {
+ enum pipeline_firewall_key_type type;
+ union {
+ struct pipeline_firewall_key_ipv4_5tuple ipv4_5tuple;
+ } key;
+};
+
+enum pipeline_firewall_msg_req_type {
+ PIPELINE_FIREWALL_MSG_REQ_ADD = 0,
+ PIPELINE_FIREWALL_MSG_REQ_DEL,
+ PIPELINE_FIREWALL_MSG_REQ_ADD_BULK,
+ PIPELINE_FIREWALL_MSG_REQ_DEL_BULK,
+ PIPELINE_FIREWALL_MSG_REQ_ADD_DEFAULT,
+ PIPELINE_FIREWALL_MSG_REQ_DEL_DEFAULT,
+ PIPELINE_FIREWALL_MSG_REQS
+};
+
+/*
+ * MSG ADD
+ */
+struct pipeline_firewall_add_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_firewall_key key;
+
+ /* data */
+ int32_t priority;
+ uint32_t port_id;
+};
+
+struct pipeline_firewall_add_msg_rsp {
+ int status;
+ int key_found;
+ void *entry_ptr;
+};
+
+/*
+ * MSG DEL
+ */
+struct pipeline_firewall_del_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_firewall_key key;
+};
+
+struct pipeline_firewall_del_msg_rsp {
+ int status;
+ int key_found;
+};
+
+/*
+ * MSG ADD BULK
+ */
+struct pipeline_firewall_add_bulk_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+
+ struct pipeline_firewall_key *keys;
+ uint32_t n_keys;
+
+ uint32_t *priorities;
+ uint32_t *port_ids;
+ int *keys_found;
+ void **entries_ptr;
+};
+struct pipeline_firewall_add_bulk_msg_rsp {
+ int status;
+};
+
+/*
+ * MSG DEL BULK
+ */
+struct pipeline_firewall_del_bulk_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_firewall_key *keys;
+ uint32_t n_keys;
+ int *keys_found;
+};
+
+struct pipeline_firewall_del_bulk_msg_rsp {
+ int status;
+};
+
+/*
+ * MSG ADD DEFAULT
+ */
+struct pipeline_firewall_add_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+
+ /* data */
+ uint32_t port_id;
+};
+
+struct pipeline_firewall_add_default_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/*
+ * MSG DEL DEFAULT
+ */
+struct pipeline_firewall_del_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_firewall_msg_req_type subtype;
+};
+
+struct pipeline_firewall_del_default_msg_rsp {
+ int status;
+};
+
+extern struct pipeline_be_ops pipeline_firewall_be_ops;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
new file mode 100644
index 00000000..349db6b1
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions.c
@@ -0,0 +1,1315 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_flow_actions.h"
+#include "hash_func.h"
+#include "parser.h"
+
+/*
+ * Flow actions pipeline
+ */
+#ifndef N_FLOWS_BULK
+#define N_FLOWS_BULK 4096
+#endif
+
+struct app_pipeline_fa_flow {
+ struct pipeline_fa_flow_params params;
+ void *entry_ptr;
+};
+
+struct app_pipeline_fa_dscp {
+ uint32_t traffic_class;
+ enum rte_meter_color color;
+};
+
+struct app_pipeline_fa {
+ /* Parameters */
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ struct pipeline_fa_params params;
+
+ /* Flows */
+ struct app_pipeline_fa_dscp dscp[PIPELINE_FA_N_DSCP];
+ struct app_pipeline_fa_flow *flows;
+} __rte_cache_aligned;
+
+static void*
+app_pipeline_fa_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct app_pipeline_fa *p;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_fa));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+ if (pipeline_fa_parse_args(&p->params, params)) {
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(
+ p->params.n_flows * sizeof(struct app_pipeline_fa_flow));
+ p->flows = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p->flows == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Initialization of flow table */
+ for (i = 0; i < p->params.n_flows; i++)
+ pipeline_fa_flow_params_set_default(&p->flows[i].params);
+
+ /* Initialization of DSCP table */
+ for (i = 0; i < RTE_DIM(p->dscp); i++) {
+ p->dscp[i].traffic_class = 0;
+ p->dscp[i].color = e_RTE_METER_GREEN;
+ }
+
+ return (void *) p;
+}
+
+static int
+app_pipeline_fa_free(void *pipeline)
+{
+ struct app_pipeline_fa *p = pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_free(p->flows);
+ rte_free(p);
+
+ return 0;
+}
+
+static int
+flow_params_check(struct app_pipeline_fa *p,
+ __rte_unused uint32_t meter_update_mask,
+ uint32_t policer_update_mask,
+ uint32_t port_update,
+ struct pipeline_fa_flow_params *params)
+{
+ uint32_t mask, i;
+
+ /* Meter */
+
+ /* Policer */
+ for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
+ struct pipeline_fa_policer_params *p = &params->p[i];
+ uint32_t j;
+
+ if ((mask & policer_update_mask) == 0)
+ continue;
+
+ for (j = 0; j < e_RTE_METER_COLORS; j++) {
+ struct pipeline_fa_policer_action *action =
+ &p->action[j];
+
+ if ((action->drop == 0) &&
+ (action->color >= e_RTE_METER_COLORS))
+ return -1;
+ }
+ }
+
+ /* Port */
+ if (port_update && (params->port_id >= p->n_ports_out))
+ return -1;
+
+ return 0;
+}
+
+int
+app_pipeline_fa_flow_config(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t flow_id,
+ uint32_t meter_update_mask,
+ uint32_t policer_update_mask,
+ uint32_t port_update,
+ struct pipeline_fa_flow_params *params)
+{
+ struct app_pipeline_fa *p;
+ struct app_pipeline_fa_flow *flow;
+
+ struct pipeline_fa_flow_config_msg_req *req;
+ struct pipeline_fa_flow_config_msg_rsp *rsp;
+
+ uint32_t i, mask;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ ((meter_update_mask == 0) &&
+ (policer_update_mask == 0) &&
+ (port_update == 0)) ||
+ (meter_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
+ (policer_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
+ (params == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ if (flow_params_check(p,
+ meter_update_mask,
+ policer_update_mask,
+ port_update,
+ params) != 0)
+ return -1;
+
+ flow_id %= p->params.n_flows;
+ flow = &p->flows[flow_id];
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FA_MSG_REQ_FLOW_CONFIG;
+ req->entry_ptr = flow->entry_ptr;
+ req->flow_id = flow_id;
+ req->meter_update_mask = meter_update_mask;
+ req->policer_update_mask = policer_update_mask;
+ req->port_update = port_update;
+ memcpy(&req->params, params, sizeof(*params));
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status ||
+ (rsp->entry_ptr == NULL)) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Commit flow */
+ for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
+ if ((mask & meter_update_mask) == 0)
+ continue;
+
+ memcpy(&flow->params.m[i], &params->m[i], sizeof(params->m[i]));
+ }
+
+ for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
+ if ((mask & policer_update_mask) == 0)
+ continue;
+
+ memcpy(&flow->params.p[i], &params->p[i], sizeof(params->p[i]));
+ }
+
+ if (port_update)
+ flow->params.port_id = params->port_id;
+
+ flow->entry_ptr = rsp->entry_ptr;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_fa_flow_config_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t *flow_id,
+ uint32_t n_flows,
+ uint32_t meter_update_mask,
+ uint32_t policer_update_mask,
+ uint32_t port_update,
+ struct pipeline_fa_flow_params *params)
+{
+ struct app_pipeline_fa *p;
+ struct pipeline_fa_flow_config_bulk_msg_req *req;
+ struct pipeline_fa_flow_config_bulk_msg_rsp *rsp;
+ void **req_entry_ptr;
+ uint32_t *req_flow_id;
+ uint32_t i;
+ int status;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (flow_id == NULL) ||
+ (n_flows == 0) ||
+ ((meter_update_mask == 0) &&
+ (policer_update_mask == 0) &&
+ (port_update == 0)) ||
+ (meter_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
+ (policer_update_mask >= (1 << PIPELINE_FA_N_TC_MAX)) ||
+ (params == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ for (i = 0; i < n_flows; i++) {
+ struct pipeline_fa_flow_params *flow_params = &params[i];
+
+ if (flow_params_check(p,
+ meter_update_mask,
+ policer_update_mask,
+ port_update,
+ flow_params) != 0)
+ return -1;
+ }
+
+ /* Allocate and write request */
+ req_entry_ptr = (void **) rte_malloc(NULL,
+ n_flows * sizeof(void *),
+ RTE_CACHE_LINE_SIZE);
+ if (req_entry_ptr == NULL)
+ return -1;
+
+ req_flow_id = (uint32_t *) rte_malloc(NULL,
+ n_flows * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE);
+ if (req_flow_id == NULL) {
+ rte_free(req_entry_ptr);
+ return -1;
+ }
+
+ for (i = 0; i < n_flows; i++) {
+ uint32_t fid = flow_id[i] % p->params.n_flows;
+ struct app_pipeline_fa_flow *flow = &p->flows[fid];
+
+ req_flow_id[i] = fid;
+ req_entry_ptr[i] = flow->entry_ptr;
+ }
+
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ rte_free(req_flow_id);
+ rte_free(req_entry_ptr);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FA_MSG_REQ_FLOW_CONFIG_BULK;
+ req->entry_ptr = req_entry_ptr;
+ req->flow_id = req_flow_id;
+ req->n_flows = n_flows;
+ req->meter_update_mask = meter_update_mask;
+ req->policer_update_mask = policer_update_mask;
+ req->port_update = port_update;
+ req->params = params;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ rte_free(req_flow_id);
+ rte_free(req_entry_ptr);
+ return -1;
+ }
+
+ /* Read response */
+ status = (rsp->n_flows == n_flows) ? 0 : -1;
+
+ /* Commit flows */
+ for (i = 0; i < rsp->n_flows; i++) {
+ uint32_t fid = flow_id[i] % p->params.n_flows;
+ struct app_pipeline_fa_flow *flow = &p->flows[fid];
+ struct pipeline_fa_flow_params *flow_params = &params[i];
+ void *entry_ptr = req_entry_ptr[i];
+ uint32_t j, mask;
+
+ for (j = 0, mask = 1; j < PIPELINE_FA_N_TC_MAX;
+ j++, mask <<= 1) {
+ if ((mask & meter_update_mask) == 0)
+ continue;
+
+ memcpy(&flow->params.m[j],
+ &flow_params->m[j],
+ sizeof(flow_params->m[j]));
+ }
+
+ for (j = 0, mask = 1; j < PIPELINE_FA_N_TC_MAX;
+ j++, mask <<= 1) {
+ if ((mask & policer_update_mask) == 0)
+ continue;
+
+ memcpy(&flow->params.p[j],
+ &flow_params->p[j],
+ sizeof(flow_params->p[j]));
+ }
+
+ if (port_update)
+ flow->params.port_id = flow_params->port_id;
+
+ flow->entry_ptr = entry_ptr;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+ rte_free(req_flow_id);
+ rte_free(req_entry_ptr);
+
+ return status;
+}
+
+int
+app_pipeline_fa_dscp_config(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t dscp,
+ uint32_t traffic_class,
+ enum rte_meter_color color)
+{
+ struct app_pipeline_fa *p;
+
+ struct pipeline_fa_dscp_config_msg_req *req;
+ struct pipeline_fa_dscp_config_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (dscp >= PIPELINE_FA_N_DSCP) ||
+ (traffic_class >= PIPELINE_FA_N_TC_MAX) ||
+ (color >= e_RTE_METER_COLORS))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ if (p->params.dscp_enabled == 0)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FA_MSG_REQ_DSCP_CONFIG;
+ req->dscp = dscp;
+ req->traffic_class = traffic_class;
+ req->color = color;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Commit DSCP */
+ p->dscp[dscp].traffic_class = traffic_class;
+ p->dscp[dscp].color = color;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_fa_flow_policer_stats_read(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t flow_id,
+ uint32_t policer_id,
+ int clear,
+ struct pipeline_fa_policer_stats *stats)
+{
+ struct app_pipeline_fa *p;
+ struct app_pipeline_fa_flow *flow;
+
+ struct pipeline_fa_policer_stats_msg_req *req;
+ struct pipeline_fa_policer_stats_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if ((app == NULL) || (stats == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ flow_id %= p->params.n_flows;
+ flow = &p->flows[flow_id];
+
+ if ((policer_id >= p->params.n_meters_per_flow) ||
+ (flow->entry_ptr == NULL))
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FA_MSG_REQ_POLICER_STATS_READ;
+ req->entry_ptr = flow->entry_ptr;
+ req->policer_id = policer_id;
+ req->clear = clear;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ memcpy(stats, &rsp->stats, sizeof(*stats));
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+static const char *
+color_to_string(enum rte_meter_color color)
+{
+ switch (color) {
+ case e_RTE_METER_GREEN: return "G";
+ case e_RTE_METER_YELLOW: return "Y";
+ case e_RTE_METER_RED: return "R";
+ default: return "?";
+ }
+}
+
+static int
+string_to_color(char *s, enum rte_meter_color *c)
+{
+ if (strcmp(s, "G") == 0) {
+ *c = e_RTE_METER_GREEN;
+ return 0;
+ }
+
+ if (strcmp(s, "Y") == 0) {
+ *c = e_RTE_METER_YELLOW;
+ return 0;
+ }
+
+ if (strcmp(s, "R") == 0) {
+ *c = e_RTE_METER_RED;
+ return 0;
+ }
+
+ return -1;
+}
+
+static const char *
+policer_action_to_string(struct pipeline_fa_policer_action *a)
+{
+ if (a->drop)
+ return "D";
+
+ return color_to_string(a->color);
+}
+
+static int
+string_to_policer_action(char *s, struct pipeline_fa_policer_action *a)
+{
+ if (strcmp(s, "G") == 0) {
+ a->drop = 0;
+ a->color = e_RTE_METER_GREEN;
+ return 0;
+ }
+
+ if (strcmp(s, "Y") == 0) {
+ a->drop = 0;
+ a->color = e_RTE_METER_YELLOW;
+ return 0;
+ }
+
+ if (strcmp(s, "R") == 0) {
+ a->drop = 0;
+ a->color = e_RTE_METER_RED;
+ return 0;
+ }
+
+ if (strcmp(s, "D") == 0) {
+ a->drop = 1;
+ a->color = e_RTE_METER_GREEN;
+ return 0;
+ }
+
+ return -1;
+}
+
+static void
+print_flow(struct app_pipeline_fa *p,
+ uint32_t flow_id,
+ struct app_pipeline_fa_flow *flow)
+{
+ uint32_t i;
+
+ printf("Flow ID = %" PRIu32 "\n", flow_id);
+
+ for (i = 0; i < p->params.n_meters_per_flow; i++) {
+ struct rte_meter_trtcm_params *meter = &flow->params.m[i];
+ struct pipeline_fa_policer_params *policer = &flow->params.p[i];
+
+ printf("\ttrTCM [CIR = %" PRIu64
+ ", CBS = %" PRIu64 ", PIR = %" PRIu64
+ ", PBS = %" PRIu64 "] Policer [G : %s, Y : %s, R : %s]\n",
+ meter->cir,
+ meter->cbs,
+ meter->pir,
+ meter->pbs,
+ policer_action_to_string(&policer->action[e_RTE_METER_GREEN]),
+ policer_action_to_string(&policer->action[e_RTE_METER_YELLOW]),
+ policer_action_to_string(&policer->action[e_RTE_METER_RED]));
+ }
+
+ printf("\tPort %u (entry_ptr = %p)\n",
+ flow->params.port_id,
+ flow->entry_ptr);
+}
+
+
+static int
+app_pipeline_fa_flow_ls(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_fa *p;
+ uint32_t i;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ for (i = 0; i < p->params.n_flows; i++) {
+ struct app_pipeline_fa_flow *flow = &p->flows[i];
+
+ print_flow(p, i, flow);
+ }
+
+ return 0;
+}
+
+static int
+app_pipeline_fa_dscp_ls(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_fa *p;
+ uint32_t i;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id,
+ &pipeline_flow_actions);
+ if (p == NULL)
+ return -1;
+
+ if (p->params.dscp_enabled == 0)
+ return -1;
+
+ for (i = 0; i < RTE_DIM(p->dscp); i++) {
+ struct app_pipeline_fa_dscp *dscp = &p->dscp[i];
+
+ printf("DSCP = %2" PRIu32 ": Traffic class = %" PRIu32
+ ", Color = %s\n",
+ i,
+ dscp->traffic_class,
+ color_to_string(dscp->color));
+ }
+
+ return 0;
+}
+
+int
+app_pipeline_fa_load_file(char *filename,
+ uint32_t *flow_ids,
+ struct pipeline_fa_flow_params *p,
+ uint32_t *n_flows,
+ uint32_t *line)
+{
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
+
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (flow_ids == NULL) ||
+ (p == NULL) ||
+ (n_flows == NULL) ||
+ (*n_flows == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
+
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
+ }
+
+ /* Read file */
+ for (i = 0, l = 1; i < *n_flows; l++) {
+ char *tokens[64];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error1;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+
+ if ((n_tokens != 64) ||
+ /* flow */
+ strcmp(tokens[0], "flow") ||
+ parser_read_uint32(&flow_ids[i], tokens[1]) ||
+
+ /* meter & policer 0 */
+ strcmp(tokens[2], "meter") ||
+ strcmp(tokens[3], "0") ||
+ strcmp(tokens[4], "trtcm") ||
+ parser_read_uint64(&p[i].m[0].cir, tokens[5]) ||
+ parser_read_uint64(&p[i].m[0].pir, tokens[6]) ||
+ parser_read_uint64(&p[i].m[0].cbs, tokens[7]) ||
+ parser_read_uint64(&p[i].m[0].pbs, tokens[8]) ||
+ strcmp(tokens[9], "policer") ||
+ strcmp(tokens[10], "0") ||
+ strcmp(tokens[11], "g") ||
+ string_to_policer_action(tokens[12],
+ &p[i].p[0].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[13], "y") ||
+ string_to_policer_action(tokens[14],
+ &p[i].p[0].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[15], "r") ||
+ string_to_policer_action(tokens[16],
+ &p[i].p[0].action[e_RTE_METER_RED]) ||
+
+ /* meter & policer 1 */
+ strcmp(tokens[17], "meter") ||
+ strcmp(tokens[18], "1") ||
+ strcmp(tokens[19], "trtcm") ||
+ parser_read_uint64(&p[i].m[1].cir, tokens[20]) ||
+ parser_read_uint64(&p[i].m[1].pir, tokens[21]) ||
+ parser_read_uint64(&p[i].m[1].cbs, tokens[22]) ||
+ parser_read_uint64(&p[i].m[1].pbs, tokens[23]) ||
+ strcmp(tokens[24], "policer") ||
+ strcmp(tokens[25], "1") ||
+ strcmp(tokens[26], "g") ||
+ string_to_policer_action(tokens[27],
+ &p[i].p[1].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[28], "y") ||
+ string_to_policer_action(tokens[29],
+ &p[i].p[1].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[30], "r") ||
+ string_to_policer_action(tokens[31],
+ &p[i].p[1].action[e_RTE_METER_RED]) ||
+
+ /* meter & policer 2 */
+ strcmp(tokens[32], "meter") ||
+ strcmp(tokens[33], "2") ||
+ strcmp(tokens[34], "trtcm") ||
+ parser_read_uint64(&p[i].m[2].cir, tokens[35]) ||
+ parser_read_uint64(&p[i].m[2].pir, tokens[36]) ||
+ parser_read_uint64(&p[i].m[2].cbs, tokens[37]) ||
+ parser_read_uint64(&p[i].m[2].pbs, tokens[38]) ||
+ strcmp(tokens[39], "policer") ||
+ strcmp(tokens[40], "2") ||
+ strcmp(tokens[41], "g") ||
+ string_to_policer_action(tokens[42],
+ &p[i].p[2].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[43], "y") ||
+ string_to_policer_action(tokens[44],
+ &p[i].p[2].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[45], "r") ||
+ string_to_policer_action(tokens[46],
+ &p[i].p[2].action[e_RTE_METER_RED]) ||
+
+ /* meter & policer 3 */
+ strcmp(tokens[47], "meter") ||
+ strcmp(tokens[48], "3") ||
+ strcmp(tokens[49], "trtcm") ||
+ parser_read_uint64(&p[i].m[3].cir, tokens[50]) ||
+ parser_read_uint64(&p[i].m[3].pir, tokens[51]) ||
+ parser_read_uint64(&p[i].m[3].cbs, tokens[52]) ||
+ parser_read_uint64(&p[i].m[3].pbs, tokens[53]) ||
+ strcmp(tokens[54], "policer") ||
+ strcmp(tokens[55], "3") ||
+ strcmp(tokens[56], "g") ||
+ string_to_policer_action(tokens[57],
+ &p[i].p[3].action[e_RTE_METER_GREEN]) ||
+ strcmp(tokens[58], "y") ||
+ string_to_policer_action(tokens[59],
+ &p[i].p[3].action[e_RTE_METER_YELLOW]) ||
+ strcmp(tokens[60], "r") ||
+ string_to_policer_action(tokens[61],
+ &p[i].p[3].action[e_RTE_METER_RED]) ||
+
+ /* port */
+ strcmp(tokens[62], "port") ||
+ parser_read_uint32(&p[i].port_id, tokens[63]))
+ goto error1;
+
+ i++;
+ }
+
+ /* Close file */
+ *n_flows = i;
+ fclose(f);
+ return 0;
+
+error1:
+ *line = l;
+ fclose(f);
+ return -1;
+}
+
+/*
+ * action
+ *
+ * flow meter, policer and output port configuration:
+ * p <pipelineid> action flow <flowid> meter <meterid> trtcm <cir> <pir> <cbs> <pbs>
+ *
+ * p <pipelineid> action flow <flowid> policer <policerid> g <gaction> y <yaction> r <raction>
+ * <action> is one of the following:
+ * G = recolor to green
+ * Y = recolor as yellow
+ * R = recolor as red
+ * D = drop
+ *
+ * p <pipelineid> action flow <flowid> port <port ID>
+ *
+ * p <pipelineid> action flow bulk <file>
+ *
+ * flow policer stats read:
+ * p <pipelineid> action flow <flowid> stats
+ *
+ * flow ls:
+ * p <pipelineid> action flow ls
+ *
+ * dscp table configuration:
+ * p <pipelineid> action dscp <dscpid> class <class ID> color <color>
+ *
+ * dscp table ls:
+ * p <pipelineid> action dscp ls
+**/
+
+struct cmd_action_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t action_string;
+ cmdline_multi_string_t multi_string;
+};
+
+static void
+cmd_action_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_action_result *params = parsed_result;
+ struct app_params *app = data;
+
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
+
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status != 0) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "action");
+ return;
+ }
+
+ /* action flow meter */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "meter") == 0)) {
+ struct pipeline_fa_flow_params flow_params;
+ uint32_t flow_id, meter_id;
+
+ if (n_tokens != 9) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow meter");
+ return;
+ }
+
+ memset(&flow_params, 0, sizeof(flow_params));
+
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
+
+ if (parser_read_uint32(&meter_id, tokens[3]) ||
+ (meter_id >= PIPELINE_FA_N_TC_MAX)) {
+ printf(CMD_MSG_INVALID_ARG, "meterid");
+ return;
+ }
+
+ if (strcmp(tokens[4], "trtcm")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "trtcm");
+ return;
+ }
+
+ if (parser_read_uint64(&flow_params.m[meter_id].cir, tokens[5])) {
+ printf(CMD_MSG_INVALID_ARG, "cir");
+ return;
+ }
+
+ if (parser_read_uint64(&flow_params.m[meter_id].pir, tokens[6])) {
+ printf(CMD_MSG_INVALID_ARG, "pir");
+ return;
+ }
+
+ if (parser_read_uint64(&flow_params.m[meter_id].cbs, tokens[7])) {
+ printf(CMD_MSG_INVALID_ARG, "cbs");
+ return;
+ }
+
+ if (parser_read_uint64(&flow_params.m[meter_id].pbs, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "pbs");
+ return;
+ }
+
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ flow_id,
+ 1 << meter_id,
+ 0,
+ 0,
+ &flow_params);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow meter");
+
+ return;
+ } /* action flow meter */
+
+ /* action flow policer */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "policer") == 0)) {
+ struct pipeline_fa_flow_params flow_params;
+ uint32_t flow_id, policer_id;
+
+ if (n_tokens != 10) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow policer");
+ return;
+ }
+
+ memset(&flow_params, 0, sizeof(flow_params));
+
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
+
+ if (parser_read_uint32(&policer_id, tokens[3]) ||
+ (policer_id >= PIPELINE_FA_N_TC_MAX)) {
+ printf(CMD_MSG_INVALID_ARG, "policerid");
+ return;
+ }
+
+ if (strcmp(tokens[4], "g")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "g");
+ return;
+ }
+
+ if (string_to_policer_action(tokens[5],
+ &flow_params.p[policer_id].action[e_RTE_METER_GREEN])) {
+ printf(CMD_MSG_INVALID_ARG, "gaction");
+ return;
+ }
+
+ if (strcmp(tokens[6], "y")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "y");
+ return;
+ }
+
+ if (string_to_policer_action(tokens[7],
+ &flow_params.p[policer_id].action[e_RTE_METER_YELLOW])) {
+ printf(CMD_MSG_INVALID_ARG, "yaction");
+ return;
+ }
+
+ if (strcmp(tokens[8], "r")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "r");
+ return;
+ }
+
+ if (string_to_policer_action(tokens[9],
+ &flow_params.p[policer_id].action[e_RTE_METER_RED])) {
+ printf(CMD_MSG_INVALID_ARG, "raction");
+ return;
+ }
+
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ flow_id,
+ 0,
+ 1 << policer_id,
+ 0,
+ &flow_params);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "action flow policer");
+
+ return;
+ } /* action flow policer */
+
+ /* action flow port */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "port") == 0)) {
+ struct pipeline_fa_flow_params flow_params;
+ uint32_t flow_id, port_id;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow port");
+ return;
+ }
+
+ memset(&flow_params, 0, sizeof(flow_params));
+
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ flow_params.port_id = port_id;
+
+ status = app_pipeline_fa_flow_config(app,
+ params->pipeline_id,
+ flow_id,
+ 0,
+ 0,
+ 1,
+ &flow_params);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow port");
+
+ return;
+ } /* action flow port */
+
+ /* action flow stats */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ strcmp(tokens[1], "bulk") &&
+ strcmp(tokens[1], "ls") &&
+ (strcmp(tokens[2], "stats") == 0)) {
+ struct pipeline_fa_policer_stats stats;
+ uint32_t flow_id, policer_id;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow stats");
+ return;
+ }
+
+ if (parser_read_uint32(&flow_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
+
+ for (policer_id = 0;
+ policer_id < PIPELINE_FA_N_TC_MAX;
+ policer_id++) {
+ status = app_pipeline_fa_flow_policer_stats_read(app,
+ params->pipeline_id,
+ flow_id,
+ policer_id,
+ 1,
+ &stats);
+ if (status != 0) {
+ printf(CMD_MSG_FAIL, "action flow stats");
+ return;
+ }
+
+ /* Display stats */
+ printf("\tPolicer: %" PRIu32
+ "\tPkts G: %" PRIu64
+ "\tPkts Y: %" PRIu64
+ "\tPkts R: %" PRIu64
+ "\tPkts D: %" PRIu64 "\n",
+ policer_id,
+ stats.n_pkts[e_RTE_METER_GREEN],
+ stats.n_pkts[e_RTE_METER_YELLOW],
+ stats.n_pkts[e_RTE_METER_RED],
+ stats.n_pkts_drop);
+ }
+
+ return;
+ } /* action flow stats */
+
+ /* action flow bulk */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ (strcmp(tokens[1], "bulk") == 0)) {
+ struct pipeline_fa_flow_params *flow_params;
+ uint32_t *flow_ids, n_flows, line;
+ char *filename;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow bulk");
+ return;
+ }
+
+ filename = tokens[2];
+
+ n_flows = APP_PIPELINE_FA_MAX_RECORDS_IN_FILE;
+ flow_ids = malloc(n_flows * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ return;
+ }
+
+ flow_params = malloc(n_flows * sizeof(struct pipeline_fa_flow_params));
+ if (flow_params == NULL) {
+ printf(CMD_MSG_OUT_OF_MEMORY);
+ free(flow_ids);
+ return;
+ }
+
+ status = app_pipeline_fa_load_file(filename,
+ flow_ids,
+ flow_params,
+ &n_flows,
+ &line);
+ if (status) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_params);
+ free(flow_ids);
+ return;
+ }
+
+ status = app_pipeline_fa_flow_config_bulk(app,
+ params->pipeline_id,
+ flow_ids,
+ n_flows,
+ 0xF,
+ 0xF,
+ 1,
+ flow_params);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow bulk");
+
+ free(flow_params);
+ free(flow_ids);
+ return;
+ } /* action flow bulk */
+
+ /* action flow ls */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "flow") == 0) &&
+ (strcmp(tokens[1], "ls") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action flow ls");
+ return;
+ }
+
+ status = app_pipeline_fa_flow_ls(app,
+ params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "action flow ls");
+
+ return;
+ } /* action flow ls */
+
+ /* action dscp */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "dscp") == 0) &&
+ strcmp(tokens[1], "ls")) {
+ uint32_t dscp_id, tc_id;
+ enum rte_meter_color color;
+
+ if (n_tokens != 6) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action dscp");
+ return;
+ }
+
+ if (parser_read_uint32(&dscp_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "dscpid");
+ return;
+ }
+
+ if (strcmp(tokens[2], "class")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "class");
+ return;
+ }
+
+ if (parser_read_uint32(&tc_id, tokens[3])) {
+ printf(CMD_MSG_INVALID_ARG, "classid");
+ return;
+ }
+
+ if (strcmp(tokens[4], "color")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "color");
+ return;
+ }
+
+ if (string_to_color(tokens[5], &color)) {
+ printf(CMD_MSG_INVALID_ARG, "colorid");
+ return;
+ }
+
+ status = app_pipeline_fa_dscp_config(app,
+ params->pipeline_id,
+ dscp_id,
+ tc_id,
+ color);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "action dscp");
+
+ return;
+ } /* action dscp */
+
+ /* action dscp ls */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "dscp") == 0) &&
+ (strcmp(tokens[1], "ls") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "action dscp ls");
+ return;
+ }
+
+ status = app_pipeline_fa_dscp_ls(app,
+ params->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "action dscp ls");
+
+ return;
+ } /* action dscp ls */
+
+ printf(CMD_MSG_FAIL, "action");
+}
+
+static cmdline_parse_token_string_t cmd_action_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_action_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_action_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_action_result, pipeline_id, UINT32);
+
+static cmdline_parse_token_string_t cmd_action_action_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_action_result, action_string, "action");
+
+static cmdline_parse_token_string_t cmd_action_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_action_result, multi_string,
+ TOKEN_STRING_MULTI);
+
+cmdline_parse_inst_t cmd_action = {
+ .f = cmd_action_parsed,
+ .data = NULL,
+ .help_str = "flow actions (meter, policer, policer stats, dscp table)",
+ .tokens = {
+ (void *) &cmd_action_p_string,
+ (void *) &cmd_action_pipeline_id,
+ (void *) &cmd_action_action_string,
+ (void *) &cmd_action_multi_string,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_action,
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_flow_actions_fe_ops = {
+ .f_init = app_pipeline_fa_init,
+ .f_post_init = NULL,
+ .f_free = app_pipeline_fa_free,
+ .f_track = app_pipeline_track_default,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_flow_actions = {
+ .name = "FLOW_ACTIONS",
+ .be_ops = &pipeline_flow_actions_be_ops,
+ .fe_ops = &pipeline_flow_actions_fe_ops,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
new file mode 100644
index 00000000..9c609741
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions.h
@@ -0,0 +1,89 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FLOW_ACTIONS_H__
+#define __INCLUDE_PIPELINE_FLOW_ACTIONS_H__
+
+#include <rte_meter.h>
+
+#include "pipeline.h"
+#include "pipeline_flow_actions_be.h"
+
+int
+app_pipeline_fa_flow_config(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t flow_id,
+ uint32_t meter_update_mask,
+ uint32_t policer_update_mask,
+ uint32_t port_update,
+ struct pipeline_fa_flow_params *params);
+
+int
+app_pipeline_fa_flow_config_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t *flow_id,
+ uint32_t n_flows,
+ uint32_t meter_update_mask,
+ uint32_t policer_update_mask,
+ uint32_t port_update,
+ struct pipeline_fa_flow_params *params);
+
+int
+app_pipeline_fa_dscp_config(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t dscp,
+ uint32_t traffic_class,
+ enum rte_meter_color color);
+
+int
+app_pipeline_fa_flow_policer_stats_read(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t flow_id,
+ uint32_t policer_id,
+ int clear,
+ struct pipeline_fa_policer_stats *stats);
+
+#ifndef APP_PIPELINE_FA_MAX_RECORDS_IN_FILE
+#define APP_PIPELINE_FA_MAX_RECORDS_IN_FILE 65536
+#endif
+
+int
+app_pipeline_fa_load_file(char *filename,
+ uint32_t *flow_ids,
+ struct pipeline_fa_flow_params *p,
+ uint32_t *n_flows,
+ uint32_t *line);
+
+extern struct pipeline_type pipeline_flow_actions;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
new file mode 100644
index 00000000..11fcbb76
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.c
@@ -0,0 +1,989 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_table_array.h>
+#include <rte_byteorder.h>
+#include <rte_ip.h>
+
+#include "pipeline_actions_common.h"
+#include "pipeline_flow_actions_be.h"
+#include "parser.h"
+#include "hash_func.h"
+
+int
+pipeline_fa_flow_params_set_default(struct pipeline_fa_flow_params *params)
+{
+ uint32_t i;
+
+ if (params == NULL)
+ return -1;
+
+ for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++) {
+ struct rte_meter_trtcm_params *m = &params->m[i];
+
+ m->cir = 1;
+ m->cbs = 1;
+ m->pir = 1;
+ m->pbs = 2;
+ }
+
+ for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++) {
+ struct pipeline_fa_policer_params *p = &params->p[i];
+ uint32_t j;
+
+ for (j = 0; j < e_RTE_METER_COLORS; j++) {
+ struct pipeline_fa_policer_action *a = &p->action[j];
+
+ a->drop = 0;
+ a->color = (enum rte_meter_color) j;
+ }
+ }
+
+ params->port_id = 0;
+
+ return 0;
+}
+
+struct dscp_entry {
+ uint32_t traffic_class;
+ enum rte_meter_color color;
+};
+
+struct pipeline_flow_actions {
+ struct pipeline p;
+ struct pipeline_fa_params params;
+ pipeline_msg_req_handler custom_handlers[PIPELINE_FA_MSG_REQS];
+
+ struct dscp_entry dscp[PIPELINE_FA_N_DSCP];
+} __rte_cache_aligned;
+
+static void *
+pipeline_fa_msg_req_custom_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_fa_msg_req_custom_handler,
+};
+
+static void *
+pipeline_fa_msg_req_flow_config_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fa_msg_req_flow_config_bulk_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fa_msg_req_dscp_config_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fa_msg_req_policer_stats_read_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_FA_MSG_REQ_FLOW_CONFIG] =
+ pipeline_fa_msg_req_flow_config_handler,
+ [PIPELINE_FA_MSG_REQ_FLOW_CONFIG_BULK] =
+ pipeline_fa_msg_req_flow_config_bulk_handler,
+ [PIPELINE_FA_MSG_REQ_DSCP_CONFIG] =
+ pipeline_fa_msg_req_dscp_config_handler,
+ [PIPELINE_FA_MSG_REQ_POLICER_STATS_READ] =
+ pipeline_fa_msg_req_policer_stats_read_handler,
+};
+
+/*
+ * Flow table
+ */
+struct meter_policer {
+ struct rte_meter_trtcm meter;
+ struct pipeline_fa_policer_params policer;
+ struct pipeline_fa_policer_stats stats;
+};
+
+struct flow_table_entry {
+ struct rte_pipeline_table_entry head;
+ struct meter_policer mp[PIPELINE_FA_N_TC_MAX];
+};
+
+static int
+flow_table_entry_set_meter(struct flow_table_entry *entry,
+ uint32_t meter_id,
+ struct pipeline_fa_flow_params *params)
+{
+ struct rte_meter_trtcm *meter = &entry->mp[meter_id].meter;
+ struct rte_meter_trtcm_params *meter_params = &params->m[meter_id];
+
+ return rte_meter_trtcm_config(meter, meter_params);
+}
+
+static void
+flow_table_entry_set_policer(struct flow_table_entry *entry,
+ uint32_t policer_id,
+ struct pipeline_fa_flow_params *params)
+{
+ struct pipeline_fa_policer_params *p0 = &entry->mp[policer_id].policer;
+ struct pipeline_fa_policer_params *p1 = &params->p[policer_id];
+
+ memcpy(p0, p1, sizeof(*p0));
+}
+
+static void
+flow_table_entry_set_port_id(struct pipeline_flow_actions *p,
+ struct flow_table_entry *entry,
+ struct pipeline_fa_flow_params *params)
+{
+ entry->head.action = RTE_PIPELINE_ACTION_PORT;
+ entry->head.port_id = p->p.port_out_id[params->port_id];
+}
+
+static int
+flow_table_entry_set_default(struct pipeline_flow_actions *p,
+ struct flow_table_entry *entry)
+{
+ struct pipeline_fa_flow_params params;
+ uint32_t i;
+
+ pipeline_fa_flow_params_set_default(&params);
+
+ memset(entry, 0, sizeof(*entry));
+
+ flow_table_entry_set_port_id(p, entry, &params);
+
+ for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++) {
+ int status;
+
+ status = flow_table_entry_set_meter(entry, i, &params);
+ if (status)
+ return status;
+ }
+
+ for (i = 0; i < PIPELINE_FA_N_TC_MAX; i++)
+ flow_table_entry_set_policer(entry, i, &params);
+
+ return 0;
+}
+
+static inline uint64_t
+pkt_work(
+ struct rte_mbuf *pkt,
+ struct rte_pipeline_table_entry *table_entry,
+ void *arg,
+ uint64_t time)
+{
+ struct pipeline_flow_actions *p = arg;
+ struct flow_table_entry *entry =
+ (struct flow_table_entry *) table_entry;
+
+ struct ipv4_hdr *pkt_ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, p->params.ip_hdr_offset);
+ enum rte_meter_color *pkt_color = (enum rte_meter_color *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, p->params.color_offset);
+
+ /* Read (IP header) */
+ uint32_t total_length = rte_bswap16(pkt_ip->total_length);
+ uint32_t dscp = pkt_ip->type_of_service >> 2;
+
+ uint32_t tc = p->dscp[dscp].traffic_class;
+ enum rte_meter_color color = p->dscp[dscp].color;
+
+ struct rte_meter_trtcm *meter = &entry->mp[tc].meter;
+ struct pipeline_fa_policer_params *policer = &entry->mp[tc].policer;
+ struct pipeline_fa_policer_stats *stats = &entry->mp[tc].stats;
+
+ /* Read (entry), compute */
+ enum rte_meter_color color2 = rte_meter_trtcm_color_aware_check(meter,
+ time,
+ total_length,
+ color);
+
+ enum rte_meter_color color3 = policer->action[color2].color;
+ uint64_t drop = policer->action[color2].drop;
+
+ /* Read (entry), write (entry, color) */
+ stats->n_pkts[color3] += drop ^ 1LLU;
+ stats->n_pkts_drop += drop;
+ *pkt_color = color3;
+
+ return drop;
+}
+
+static inline uint64_t
+pkt4_work(
+ struct rte_mbuf **pkts,
+ struct rte_pipeline_table_entry **table_entries,
+ void *arg,
+ uint64_t time)
+{
+ struct pipeline_flow_actions *p = arg;
+
+ struct flow_table_entry *entry0 =
+ (struct flow_table_entry *) table_entries[0];
+ struct flow_table_entry *entry1 =
+ (struct flow_table_entry *) table_entries[1];
+ struct flow_table_entry *entry2 =
+ (struct flow_table_entry *) table_entries[2];
+ struct flow_table_entry *entry3 =
+ (struct flow_table_entry *) table_entries[3];
+
+ struct ipv4_hdr *pkt0_ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p->params.ip_hdr_offset);
+ struct ipv4_hdr *pkt1_ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p->params.ip_hdr_offset);
+ struct ipv4_hdr *pkt2_ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p->params.ip_hdr_offset);
+ struct ipv4_hdr *pkt3_ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p->params.ip_hdr_offset);
+
+ enum rte_meter_color *pkt0_color = (enum rte_meter_color *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p->params.color_offset);
+ enum rte_meter_color *pkt1_color = (enum rte_meter_color *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p->params.color_offset);
+ enum rte_meter_color *pkt2_color = (enum rte_meter_color *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p->params.color_offset);
+ enum rte_meter_color *pkt3_color = (enum rte_meter_color *)
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p->params.color_offset);
+
+ /* Read (IP header) */
+ uint32_t total_length0 = rte_bswap16(pkt0_ip->total_length);
+ uint32_t dscp0 = pkt0_ip->type_of_service >> 2;
+
+ uint32_t total_length1 = rte_bswap16(pkt1_ip->total_length);
+ uint32_t dscp1 = pkt1_ip->type_of_service >> 2;
+
+ uint32_t total_length2 = rte_bswap16(pkt2_ip->total_length);
+ uint32_t dscp2 = pkt2_ip->type_of_service >> 2;
+
+ uint32_t total_length3 = rte_bswap16(pkt3_ip->total_length);
+ uint32_t dscp3 = pkt3_ip->type_of_service >> 2;
+
+ uint32_t tc0 = p->dscp[dscp0].traffic_class;
+ enum rte_meter_color color0 = p->dscp[dscp0].color;
+
+ uint32_t tc1 = p->dscp[dscp1].traffic_class;
+ enum rte_meter_color color1 = p->dscp[dscp1].color;
+
+ uint32_t tc2 = p->dscp[dscp2].traffic_class;
+ enum rte_meter_color color2 = p->dscp[dscp2].color;
+
+ uint32_t tc3 = p->dscp[dscp3].traffic_class;
+ enum rte_meter_color color3 = p->dscp[dscp3].color;
+
+ struct rte_meter_trtcm *meter0 = &entry0->mp[tc0].meter;
+ struct pipeline_fa_policer_params *policer0 = &entry0->mp[tc0].policer;
+ struct pipeline_fa_policer_stats *stats0 = &entry0->mp[tc0].stats;
+
+ struct rte_meter_trtcm *meter1 = &entry1->mp[tc1].meter;
+ struct pipeline_fa_policer_params *policer1 = &entry1->mp[tc1].policer;
+ struct pipeline_fa_policer_stats *stats1 = &entry1->mp[tc1].stats;
+
+ struct rte_meter_trtcm *meter2 = &entry2->mp[tc2].meter;
+ struct pipeline_fa_policer_params *policer2 = &entry2->mp[tc2].policer;
+ struct pipeline_fa_policer_stats *stats2 = &entry2->mp[tc2].stats;
+
+ struct rte_meter_trtcm *meter3 = &entry3->mp[tc3].meter;
+ struct pipeline_fa_policer_params *policer3 = &entry3->mp[tc3].policer;
+ struct pipeline_fa_policer_stats *stats3 = &entry3->mp[tc3].stats;
+
+ /* Read (entry), compute, write (entry) */
+ enum rte_meter_color color2_0 = rte_meter_trtcm_color_aware_check(
+ meter0,
+ time,
+ total_length0,
+ color0);
+
+ enum rte_meter_color color2_1 = rte_meter_trtcm_color_aware_check(
+ meter1,
+ time,
+ total_length1,
+ color1);
+
+ enum rte_meter_color color2_2 = rte_meter_trtcm_color_aware_check(
+ meter2,
+ time,
+ total_length2,
+ color2);
+
+ enum rte_meter_color color2_3 = rte_meter_trtcm_color_aware_check(
+ meter3,
+ time,
+ total_length3,
+ color3);
+
+ enum rte_meter_color color3_0 = policer0->action[color2_0].color;
+ enum rte_meter_color color3_1 = policer1->action[color2_1].color;
+ enum rte_meter_color color3_2 = policer2->action[color2_2].color;
+ enum rte_meter_color color3_3 = policer3->action[color2_3].color;
+
+ uint64_t drop0 = policer0->action[color2_0].drop;
+ uint64_t drop1 = policer1->action[color2_1].drop;
+ uint64_t drop2 = policer2->action[color2_2].drop;
+ uint64_t drop3 = policer3->action[color2_3].drop;
+
+ /* Read (entry), write (entry, color) */
+ stats0->n_pkts[color3_0] += drop0 ^ 1LLU;
+ stats0->n_pkts_drop += drop0;
+
+ stats1->n_pkts[color3_1] += drop1 ^ 1LLU;
+ stats1->n_pkts_drop += drop1;
+
+ stats2->n_pkts[color3_2] += drop2 ^ 1LLU;
+ stats2->n_pkts_drop += drop2;
+
+ stats3->n_pkts[color3_3] += drop3 ^ 1LLU;
+ stats3->n_pkts_drop += drop3;
+
+ *pkt0_color = color3_0;
+ *pkt1_color = color3_1;
+ *pkt2_color = color3_2;
+ *pkt3_color = color3_3;
+
+ return drop0 | (drop1 << 1) | (drop2 << 2) | (drop3 << 3);
+}
+
+PIPELINE_TABLE_AH_HIT_DROP_TIME(fa_table_ah_hit, pkt_work, pkt4_work);
+
+static rte_pipeline_table_action_handler_hit
+get_fa_table_ah_hit(__rte_unused struct pipeline_flow_actions *p)
+{
+ return fa_table_ah_hit;
+}
+
+/*
+ * Argument parsing
+ */
+int
+pipeline_fa_parse_args(struct pipeline_fa_params *p,
+ struct pipeline_params *params)
+{
+ uint32_t n_flows_present = 0;
+ uint32_t n_meters_per_flow_present = 0;
+ uint32_t flow_id_offset_present = 0;
+ uint32_t ip_hdr_offset_present = 0;
+ uint32_t color_offset_present = 0;
+ uint32_t i;
+
+ /* Default values */
+ p->n_meters_per_flow = 1;
+ p->dscp_enabled = 0;
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ /* n_flows */
+ if (strcmp(arg_name, "n_flows") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_flows_present == 0, params->name,
+ arg_name);
+ n_flows_present = 1;
+
+ status = parser_read_uint32(&p->n_flows,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->n_flows != 0)), params->name,
+ arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* n_meters_per_flow */
+ if (strcmp(arg_name, "n_meters_per_flow") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_meters_per_flow_present == 0,
+ params->name, arg_name);
+ n_meters_per_flow_present = 1;
+
+ status = parser_read_uint32(&p->n_meters_per_flow,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->n_meters_per_flow != 0)),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG(((status != -ERANGE) &&
+ (p->n_meters_per_flow <=
+ PIPELINE_FA_N_TC_MAX)), params->name,
+ arg_name, arg_value);
+
+ continue;
+ }
+
+ /* flow_id_offset */
+ if (strcmp(arg_name, "flow_id_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ flow_id_offset_present == 0,
+ params->name, arg_name);
+ flow_id_offset_present = 1;
+
+ status = parser_read_uint32(&p->flow_id_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* ip_hdr_offset */
+ if (strcmp(arg_name, "ip_hdr_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ ip_hdr_offset_present == 0,
+ params->name, arg_name);
+ ip_hdr_offset_present = 1;
+
+ status = parser_read_uint32(&p->ip_hdr_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* color_offset */
+ if (strcmp(arg_name, "color_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ color_offset_present == 0, params->name,
+ arg_name);
+ color_offset_present = 1;
+
+ status = parser_read_uint32(&p->color_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->dscp_enabled = 1;
+
+ continue;
+ }
+
+ /* Unknown argument */
+ PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
+ }
+
+ /* Check that mandatory arguments are present */
+ PIPELINE_PARSE_ERR_MANDATORY((n_flows_present), params->name,
+ "n_flows");
+ PIPELINE_PARSE_ERR_MANDATORY((flow_id_offset_present),
+ params->name, "flow_id_offset");
+ PIPELINE_PARSE_ERR_MANDATORY((ip_hdr_offset_present),
+ params->name, "ip_hdr_offset");
+ PIPELINE_PARSE_ERR_MANDATORY((color_offset_present), params->name,
+ "color_offset");
+
+ return 0;
+}
+
+static void
+dscp_init(struct pipeline_flow_actions *p)
+{
+ uint32_t i;
+
+ for (i = 0; i < PIPELINE_FA_N_DSCP; i++) {
+ p->dscp[i].traffic_class = 0;
+ p->dscp[i].color = e_RTE_METER_GREEN;
+ }
+}
+
+static void *pipeline_fa_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_flow_actions *p_fa;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if (params == NULL)
+ return NULL;
+
+ if (params->n_ports_in != params->n_ports_out)
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct pipeline_flow_actions));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+ p_fa = (struct pipeline_flow_actions *) p;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "Flow actions");
+
+ /* Parse arguments */
+ if (pipeline_fa_parse_args(&p_fa->params, params))
+ return NULL;
+
+ dscp_init(p_fa);
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = params->name,
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Input ports */
+ p->n_ports_in = params->n_ports_in;
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = pipeline_port_in_params_get_ops(
+ &params->port_in[i]),
+ .arg_create = pipeline_port_in_params_convert(
+ &params->port_in[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ p->n_ports_out = params->n_ports_out;
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Tables */
+ p->n_tables = 1;
+ {
+ struct rte_table_array_params table_array_params = {
+ .n_entries = p_fa->params.n_flows,
+ .offset = p_fa->params.flow_id_offset,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_array_ops,
+ .arg_create = &table_array_params,
+ .f_action_hit = get_fa_table_ah_hit(p_fa),
+ .f_action_miss = NULL,
+ .arg_ah = p_fa,
+ .action_data_size =
+ sizeof(struct flow_table_entry) -
+ sizeof(struct rte_pipeline_table_entry),
+ };
+
+ int status;
+
+ status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id[i],
+ p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Initialize table entries */
+ for (i = 0; i < p_fa->params.n_flows; i++) {
+ struct rte_table_array_key key = {
+ .pos = i,
+ };
+
+ struct flow_table_entry entry;
+ struct rte_pipeline_table_entry *entry_ptr;
+ int key_found, status;
+
+ flow_table_entry_set_default(p_fa, &entry);
+
+ status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &key,
+ (struct rte_pipeline_table_entry *) &entry,
+ &key_found,
+ &entry_ptr);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+ memcpy(p_fa->custom_handlers,
+ custom_handlers,
+ sizeof(p_fa->custom_handlers));
+
+ return p;
+}
+
+static int
+pipeline_fa_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_fa_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+void *
+pipeline_fa_msg_req_custom_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_flow_actions *p_fa =
+ (struct pipeline_flow_actions *) p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_FA_MSG_REQS) ?
+ p_fa->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+void *
+pipeline_fa_msg_req_flow_config_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_flow_actions *p_fa = (struct pipeline_flow_actions *) p;
+ struct pipeline_fa_flow_config_msg_req *req = msg;
+ struct pipeline_fa_flow_config_msg_rsp *rsp = msg;
+ struct flow_table_entry *entry;
+ uint32_t mask, i;
+
+ /* Set flow table entry to default if not configured before */
+ if (req->entry_ptr == NULL) {
+ struct rte_table_array_key key = {
+ .pos = req->flow_id % p_fa->params.n_flows,
+ };
+
+ struct flow_table_entry default_entry;
+
+ int key_found, status;
+
+ flow_table_entry_set_default(p_fa, &default_entry);
+
+ status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &key,
+ (struct rte_pipeline_table_entry *) &default_entry,
+ &key_found,
+ (struct rte_pipeline_table_entry **) &entry);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+ } else
+ entry = (struct flow_table_entry *) req->entry_ptr;
+
+ /* Meter */
+ for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
+ int status;
+
+ if ((mask & req->meter_update_mask) == 0)
+ continue;
+
+ status = flow_table_entry_set_meter(entry, i, &req->params);
+ if (status) {
+ rsp->status = -1;
+ return rsp;
+ }
+ }
+
+ /* Policer */
+ for (i = 0, mask = 1; i < PIPELINE_FA_N_TC_MAX; i++, mask <<= 1) {
+ if ((mask & req->policer_update_mask) == 0)
+ continue;
+
+ flow_table_entry_set_policer(entry, i, &req->params);
+ }
+
+ /* Port */
+ if (req->port_update)
+ flow_table_entry_set_port_id(p_fa, entry, &req->params);
+
+ /* Response */
+ rsp->status = 0;
+ rsp->entry_ptr = (void *) entry;
+ return rsp;
+}
+
+void *
+pipeline_fa_msg_req_flow_config_bulk_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_flow_actions *p_fa = (struct pipeline_flow_actions *) p;
+ struct pipeline_fa_flow_config_bulk_msg_req *req = msg;
+ struct pipeline_fa_flow_config_bulk_msg_rsp *rsp = msg;
+ uint32_t i;
+
+ for (i = 0; i < req->n_flows; i++) {
+ struct flow_table_entry *entry;
+ uint32_t j, mask;
+
+ /* Set flow table entry to default if not configured before */
+ if (req->entry_ptr[i] == NULL) {
+ struct rte_table_array_key key = {
+ .pos = req->flow_id[i] % p_fa->params.n_flows,
+ };
+
+ struct flow_table_entry entry_to_add;
+
+ int key_found, status;
+
+ flow_table_entry_set_default(p_fa, &entry_to_add);
+
+ status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &key,
+ (struct rte_pipeline_table_entry *) &entry_to_add,
+ &key_found,
+ (struct rte_pipeline_table_entry **) &entry);
+ if (status) {
+ rsp->n_flows = i;
+ return rsp;
+ }
+
+ req->entry_ptr[i] = (void *) entry;
+ } else
+ entry = (struct flow_table_entry *) req->entry_ptr[i];
+
+ /* Meter */
+ for (j = 0, mask = 1;
+ j < PIPELINE_FA_N_TC_MAX;
+ j++, mask <<= 1) {
+ int status;
+
+ if ((mask & req->meter_update_mask) == 0)
+ continue;
+
+ status = flow_table_entry_set_meter(entry,
+ j, &req->params[i]);
+ if (status) {
+ rsp->n_flows = i;
+ return rsp;
+ }
+ }
+
+ /* Policer */
+ for (j = 0, mask = 1;
+ j < PIPELINE_FA_N_TC_MAX;
+ j++, mask <<= 1) {
+ if ((mask & req->policer_update_mask) == 0)
+ continue;
+
+ flow_table_entry_set_policer(entry,
+ j, &req->params[i]);
+ }
+
+ /* Port */
+ if (req->port_update)
+ flow_table_entry_set_port_id(p_fa,
+ entry, &req->params[i]);
+ }
+
+ /* Response */
+ rsp->n_flows = i;
+ return rsp;
+}
+
+void *
+pipeline_fa_msg_req_dscp_config_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_flow_actions *p_fa = (struct pipeline_flow_actions *) p;
+ struct pipeline_fa_dscp_config_msg_req *req = msg;
+ struct pipeline_fa_dscp_config_msg_rsp *rsp = msg;
+
+ /* Check request */
+ if ((req->dscp >= PIPELINE_FA_N_DSCP) ||
+ (req->traffic_class >= PIPELINE_FA_N_TC_MAX) ||
+ (req->color >= e_RTE_METER_COLORS)) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ p_fa->dscp[req->dscp].traffic_class = req->traffic_class;
+ p_fa->dscp[req->dscp].color = req->color;
+ rsp->status = 0;
+ return rsp;
+}
+
+void *
+pipeline_fa_msg_req_policer_stats_read_handler(__rte_unused struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_fa_policer_stats_msg_req *req = msg;
+ struct pipeline_fa_policer_stats_msg_rsp *rsp = msg;
+
+ struct flow_table_entry *entry = req->entry_ptr;
+ uint32_t policer_id = req->policer_id;
+ int clear = req->clear;
+
+ /* Check request */
+ if ((req->entry_ptr == NULL) ||
+ (req->policer_id >= PIPELINE_FA_N_TC_MAX)) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ memcpy(&rsp->stats,
+ &entry->mp[policer_id].stats,
+ sizeof(rsp->stats));
+ if (clear)
+ memset(&entry->mp[policer_id].stats,
+ 0, sizeof(entry->mp[policer_id].stats));
+ rsp->status = 0;
+ return rsp;
+}
+
+struct pipeline_be_ops pipeline_flow_actions_be_ops = {
+ .f_init = pipeline_fa_init,
+ .f_free = pipeline_fa_free,
+ .f_run = NULL,
+ .f_timer = pipeline_fa_timer,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h
new file mode 100644
index 00000000..456f2cca
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_actions_be.h
@@ -0,0 +1,168 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FLOW_ACTIONS_BE_H__
+#define __INCLUDE_PIPELINE_FLOW_ACTIONS_BE_H__
+
+#include <rte_meter.h>
+
+#include "pipeline_common_be.h"
+
+#ifndef PIPELINE_FA_N_TC_MAX
+#define PIPELINE_FA_N_TC_MAX 4
+#endif
+
+#define PIPELINE_FA_N_DSCP 64
+
+struct pipeline_fa_params {
+ uint32_t n_flows;
+ uint32_t n_meters_per_flow;
+ uint32_t flow_id_offset;
+ uint32_t ip_hdr_offset;
+ uint32_t color_offset;
+ uint32_t dscp_enabled;
+};
+
+int
+pipeline_fa_parse_args(struct pipeline_fa_params *p,
+ struct pipeline_params *params);
+
+struct pipeline_fa_policer_action {
+ uint32_t drop;
+ enum rte_meter_color color;
+};
+
+struct pipeline_fa_policer_params {
+ struct pipeline_fa_policer_action action[e_RTE_METER_COLORS];
+};
+
+struct pipeline_fa_flow_params {
+ struct rte_meter_trtcm_params m[PIPELINE_FA_N_TC_MAX];
+ struct pipeline_fa_policer_params p[PIPELINE_FA_N_TC_MAX];
+ uint32_t port_id;
+};
+
+int
+pipeline_fa_flow_params_set_default(struct pipeline_fa_flow_params *params);
+
+struct pipeline_fa_policer_stats {
+ uint64_t n_pkts[e_RTE_METER_COLORS];
+ uint64_t n_pkts_drop;
+};
+
+enum pipeline_fa_msg_req_type {
+ PIPELINE_FA_MSG_REQ_FLOW_CONFIG = 0,
+ PIPELINE_FA_MSG_REQ_FLOW_CONFIG_BULK,
+ PIPELINE_FA_MSG_REQ_DSCP_CONFIG,
+ PIPELINE_FA_MSG_REQ_POLICER_STATS_READ,
+ PIPELINE_FA_MSG_REQS,
+};
+
+/*
+ * MSG FLOW CONFIG
+ */
+struct pipeline_fa_flow_config_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fa_msg_req_type subtype;
+
+ void *entry_ptr;
+ uint32_t flow_id;
+
+ uint32_t meter_update_mask;
+ uint32_t policer_update_mask;
+ uint32_t port_update;
+ struct pipeline_fa_flow_params params;
+};
+
+struct pipeline_fa_flow_config_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/*
+ * MSG FLOW CONFIG BULK
+ */
+struct pipeline_fa_flow_config_bulk_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fa_msg_req_type subtype;
+
+ void **entry_ptr;
+ uint32_t *flow_id;
+ uint32_t n_flows;
+
+ uint32_t meter_update_mask;
+ uint32_t policer_update_mask;
+ uint32_t port_update;
+ struct pipeline_fa_flow_params *params;
+};
+
+struct pipeline_fa_flow_config_bulk_msg_rsp {
+ uint32_t n_flows;
+};
+
+/*
+ * MSG DSCP CONFIG
+ */
+struct pipeline_fa_dscp_config_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fa_msg_req_type subtype;
+
+ uint32_t dscp;
+ uint32_t traffic_class;
+ enum rte_meter_color color;
+};
+
+struct pipeline_fa_dscp_config_msg_rsp {
+ int status;
+};
+
+/*
+ * MSG POLICER STATS READ
+ */
+struct pipeline_fa_policer_stats_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fa_msg_req_type subtype;
+
+ void *entry_ptr;
+ uint32_t policer_id;
+ int clear;
+};
+
+struct pipeline_fa_policer_stats_msg_rsp {
+ int status;
+ struct pipeline_fa_policer_stats stats;
+};
+
+extern struct pipeline_be_ops pipeline_flow_actions_be_ops;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
new file mode 100644
index 00000000..9ef50cc9
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
@@ -0,0 +1,1905 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_flow_classification.h"
+#include "hash_func.h"
+#include "parser.h"
+
+/*
+ * Key conversion
+ */
+
+struct pkt_key_qinq {
+ uint16_t ethertype_svlan;
+ uint16_t svlan;
+ uint16_t ethertype_cvlan;
+ uint16_t cvlan;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv4_5tuple {
+ uint8_t ttl;
+ uint8_t proto;
+ uint16_t checksum;
+ uint32_t ip_src;
+ uint32_t ip_dst;
+ uint16_t port_src;
+ uint16_t port_dst;
+} __attribute__((__packed__));
+
+struct pkt_key_ipv6_5tuple {
+ uint16_t payload_length;
+ uint8_t proto;
+ uint8_t hop_limit;
+ uint8_t ip_src[16];
+ uint8_t ip_dst[16];
+ uint16_t port_src;
+ uint16_t port_dst;
+} __attribute__((__packed__));
+
+static int
+app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
+ uint8_t *key_out,
+ uint32_t *signature)
+{
+ uint8_t buffer[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+ void *key_buffer = (key_out) ? key_out : buffer;
+
+ switch (key_in->type) {
+ case FLOW_KEY_QINQ:
+ {
+ struct pkt_key_qinq *qinq = key_buffer;
+
+ qinq->ethertype_svlan = 0;
+ qinq->svlan = rte_cpu_to_be_16(key_in->key.qinq.svlan);
+ qinq->ethertype_cvlan = 0;
+ qinq->cvlan = rte_cpu_to_be_16(key_in->key.qinq.cvlan);
+
+ if (signature)
+ *signature = (uint32_t) hash_default_key8(qinq, 8, 0);
+ return 0;
+ }
+
+ case FLOW_KEY_IPV4_5TUPLE:
+ {
+ struct pkt_key_ipv4_5tuple *ipv4 = key_buffer;
+
+ ipv4->ttl = 0;
+ ipv4->proto = key_in->key.ipv4_5tuple.proto;
+ ipv4->checksum = 0;
+ ipv4->ip_src = rte_cpu_to_be_32(key_in->key.ipv4_5tuple.ip_src);
+ ipv4->ip_dst = rte_cpu_to_be_32(key_in->key.ipv4_5tuple.ip_dst);
+ ipv4->port_src = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_src);
+ ipv4->port_dst = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_dst);
+
+ if (signature)
+ *signature = (uint32_t) hash_default_key16(ipv4, 16, 0);
+ return 0;
+ }
+
+ case FLOW_KEY_IPV6_5TUPLE:
+ {
+ struct pkt_key_ipv6_5tuple *ipv6 = key_buffer;
+
+ memset(ipv6, 0, 64);
+ ipv6->payload_length = 0;
+ ipv6->proto = key_in->key.ipv6_5tuple.proto;
+ ipv6->hop_limit = 0;
+ memcpy(&ipv6->ip_src, &key_in->key.ipv6_5tuple.ip_src, 16);
+ memcpy(&ipv6->ip_dst, &key_in->key.ipv6_5tuple.ip_dst, 16);
+ ipv6->port_src = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_src);
+ ipv6->port_dst = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_dst);
+
+ if (signature)
+ *signature = (uint32_t) hash_default_key64(ipv6, 64, 0);
+ return 0;
+ }
+
+ default:
+ return -1;
+ }
+}
+
+/*
+ * Flow classification pipeline
+ */
+
+struct app_pipeline_fc_flow {
+ struct pipeline_fc_key key;
+ uint32_t port_id;
+ uint32_t flow_id;
+ uint32_t signature;
+ void *entry_ptr;
+
+ TAILQ_ENTRY(app_pipeline_fc_flow) node;
+};
+
+#define N_BUCKETS 65536
+
+struct app_pipeline_fc {
+ /* Parameters */
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+
+ /* Flows */
+ TAILQ_HEAD(, app_pipeline_fc_flow) flows[N_BUCKETS];
+ uint32_t n_flows;
+
+ /* Default flow */
+ uint32_t default_flow_present;
+ uint32_t default_flow_port_id;
+ void *default_flow_entry_ptr;
+};
+
+static struct app_pipeline_fc_flow *
+app_pipeline_fc_flow_find(struct app_pipeline_fc *p,
+ struct pipeline_fc_key *key)
+{
+ struct app_pipeline_fc_flow *f;
+ uint32_t signature, bucket_id;
+
+ app_pipeline_fc_key_convert(key, NULL, &signature);
+ bucket_id = signature & (N_BUCKETS - 1);
+
+ TAILQ_FOREACH(f, &p->flows[bucket_id], node)
+ if ((signature == f->signature) &&
+ (memcmp(key,
+ &f->key,
+ sizeof(struct pipeline_fc_key)) == 0))
+ return f;
+
+ return NULL;
+}
+
+static void*
+app_pipeline_fc_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct app_pipeline_fc *p;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct app_pipeline_fc));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+
+ for (i = 0; i < N_BUCKETS; i++)
+ TAILQ_INIT(&p->flows[i]);
+ p->n_flows = 0;
+
+ return (void *) p;
+}
+
+static int
+app_pipeline_fc_free(void *pipeline)
+{
+ struct app_pipeline_fc *p = pipeline;
+ uint32_t i;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ for (i = 0; i < N_BUCKETS; i++)
+ while (!TAILQ_EMPTY(&p->flows[i])) {
+ struct app_pipeline_fc_flow *flow;
+
+ flow = TAILQ_FIRST(&p->flows[i]);
+ TAILQ_REMOVE(&p->flows[i], flow, node);
+ rte_free(flow);
+ }
+
+ rte_free(p);
+ return 0;
+}
+
+static int
+app_pipeline_fc_key_check(struct pipeline_fc_key *key)
+{
+ switch (key->type) {
+ case FLOW_KEY_QINQ:
+ {
+ uint16_t svlan = key->key.qinq.svlan;
+ uint16_t cvlan = key->key.qinq.cvlan;
+
+ if ((svlan & 0xF000) ||
+ (cvlan & 0xF000))
+ return -1;
+
+ return 0;
+ }
+
+ case FLOW_KEY_IPV4_5TUPLE:
+ return 0;
+
+ case FLOW_KEY_IPV6_5TUPLE:
+ return 0;
+
+ default:
+ return -1;
+ }
+}
+
+int
+app_pipeline_fc_load_file_qinq(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
+{
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
+
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (port_ids == NULL) ||
+ (flow_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
+
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
+ }
+
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ uint16_t svlan, cvlan;
+ uint32_t portid, flowid;
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error1;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 7) ||
+ strcmp(tokens[0], "qinq") ||
+ parser_read_uint16(&svlan, tokens[1]) ||
+ parser_read_uint16(&cvlan, tokens[2]) ||
+ strcmp(tokens[3], "port") ||
+ parser_read_uint32(&portid, tokens[4]) ||
+ strcmp(tokens[5], "id") ||
+ parser_read_uint32(&flowid, tokens[6]))
+ goto error1;
+
+ keys[i].type = FLOW_KEY_QINQ;
+ keys[i].key.qinq.svlan = svlan;
+ keys[i].key.qinq.cvlan = cvlan;
+
+ port_ids[i] = portid;
+ flow_ids[i] = flowid;
+
+ if (app_pipeline_fc_key_check(&keys[i]))
+ goto error1;
+
+ i++;
+ }
+
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
+
+error1:
+ *line = l;
+ fclose(f);
+ return -1;
+}
+
+int
+app_pipeline_fc_load_file_ipv4(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
+{
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
+
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (port_ids == NULL) ||
+ (flow_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
+
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
+ }
+
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ struct in_addr sipaddr, dipaddr;
+ uint16_t sport, dport;
+ uint8_t proto;
+ uint32_t portid, flowid;
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error2;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 10) ||
+ strcmp(tokens[0], "ipv4") ||
+ parse_ipv4_addr(tokens[1], &sipaddr) ||
+ parse_ipv4_addr(tokens[2], &dipaddr) ||
+ parser_read_uint16(&sport, tokens[3]) ||
+ parser_read_uint16(&dport, tokens[4]) ||
+ parser_read_uint8(&proto, tokens[5]) ||
+ strcmp(tokens[6], "port") ||
+ parser_read_uint32(&portid, tokens[7]) ||
+ strcmp(tokens[8], "id") ||
+ parser_read_uint32(&flowid, tokens[9]))
+ goto error2;
+
+ keys[i].type = FLOW_KEY_IPV4_5TUPLE;
+ keys[i].key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
+ keys[i].key.ipv4_5tuple.port_src = sport;
+ keys[i].key.ipv4_5tuple.port_dst = dport;
+ keys[i].key.ipv4_5tuple.proto = proto;
+
+ port_ids[i] = portid;
+ flow_ids[i] = flowid;
+
+ if (app_pipeline_fc_key_check(&keys[i]))
+ goto error2;
+
+ i++;
+ }
+
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
+
+error2:
+ *line = l;
+ fclose(f);
+ return -1;
+}
+
+int
+app_pipeline_fc_load_file_ipv6(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line)
+{
+ FILE *f = NULL;
+ char file_buf[1024];
+ uint32_t i, l;
+
+ /* Check input arguments */
+ if ((filename == NULL) ||
+ (keys == NULL) ||
+ (port_ids == NULL) ||
+ (flow_ids == NULL) ||
+ (n_keys == NULL) ||
+ (*n_keys == 0) ||
+ (line == NULL)) {
+ if (line)
+ *line = 0;
+ return -1;
+ }
+
+ /* Open input file */
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ *line = 0;
+ return -1;
+ }
+
+ /* Read file */
+ for (i = 0, l = 1; i < *n_keys; l++) {
+ char *tokens[32];
+ uint32_t n_tokens = RTE_DIM(tokens);
+
+ struct in6_addr sipaddr, dipaddr;
+ uint16_t sport, dport;
+ uint8_t proto;
+ uint32_t portid, flowid;
+ int status;
+
+ if (fgets(file_buf, sizeof(file_buf), f) == NULL)
+ break;
+
+ status = parse_tokenize_string(file_buf, tokens, &n_tokens);
+ if (status)
+ goto error3;
+
+ if ((n_tokens == 0) || (tokens[0][0] == '#'))
+ continue;
+
+ if ((n_tokens != 10) ||
+ strcmp(tokens[0], "ipv6") ||
+ parse_ipv6_addr(tokens[1], &sipaddr) ||
+ parse_ipv6_addr(tokens[2], &dipaddr) ||
+ parser_read_uint16(&sport, tokens[3]) ||
+ parser_read_uint16(&dport, tokens[4]) ||
+ parser_read_uint8(&proto, tokens[5]) ||
+ strcmp(tokens[6], "port") ||
+ parser_read_uint32(&portid, tokens[7]) ||
+ strcmp(tokens[8], "id") ||
+ parser_read_uint32(&flowid, tokens[9]))
+ goto error3;
+
+ keys[i].type = FLOW_KEY_IPV6_5TUPLE;
+ memcpy(keys[i].key.ipv6_5tuple.ip_src,
+ sipaddr.s6_addr,
+ sizeof(sipaddr.s6_addr));
+ memcpy(keys[i].key.ipv6_5tuple.ip_dst,
+ dipaddr.s6_addr,
+ sizeof(dipaddr.s6_addr));
+ keys[i].key.ipv6_5tuple.port_src = sport;
+ keys[i].key.ipv6_5tuple.port_dst = dport;
+ keys[i].key.ipv6_5tuple.proto = proto;
+
+ port_ids[i] = portid;
+ flow_ids[i] = flowid;
+
+ if (app_pipeline_fc_key_check(&keys[i]))
+ goto error3;
+
+ i++;
+ }
+
+ /* Close file */
+ *n_keys = i;
+ fclose(f);
+ return 0;
+
+error3:
+ *line = l;
+ fclose(f);
+ return -1;
+}
+
+
+
+int
+app_pipeline_fc_add(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key,
+ uint32_t port_id,
+ uint32_t flow_id)
+{
+ struct app_pipeline_fc *p;
+ struct app_pipeline_fc_flow *flow;
+
+ struct pipeline_fc_add_msg_req *req;
+ struct pipeline_fc_add_msg_rsp *rsp;
+
+ uint32_t signature;
+ int new_flow;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ if (app_pipeline_fc_key_check(key) != 0)
+ return -1;
+
+ /* Find existing flow or allocate new flow */
+ flow = app_pipeline_fc_flow_find(p, key);
+ new_flow = (flow == NULL);
+ if (flow == NULL) {
+ flow = rte_malloc(NULL, sizeof(*flow), RTE_CACHE_LINE_SIZE);
+
+ if (flow == NULL)
+ return -1;
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD;
+ app_pipeline_fc_key_convert(key, req->key, &signature);
+ req->port_id = port_id;
+ req->flow_id = flow_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ if (new_flow)
+ rte_free(flow);
+ return -1;
+ }
+
+ /* Read response and write flow */
+ if (rsp->status ||
+ (rsp->entry_ptr == NULL) ||
+ ((new_flow == 0) && (rsp->key_found == 0)) ||
+ ((new_flow == 1) && (rsp->key_found == 1))) {
+ app_msg_free(app, rsp);
+ if (new_flow)
+ rte_free(flow);
+ return -1;
+ }
+
+ memset(&flow->key, 0, sizeof(flow->key));
+ memcpy(&flow->key, key, sizeof(flow->key));
+ flow->port_id = port_id;
+ flow->flow_id = flow_id;
+ flow->signature = signature;
+ flow->entry_ptr = rsp->entry_ptr;
+
+ /* Commit rule */
+ if (new_flow) {
+ uint32_t bucket_id = signature & (N_BUCKETS - 1);
+
+ TAILQ_INSERT_TAIL(&p->flows[bucket_id], flow, node);
+ p->n_flows++;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_fc_add_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key,
+ uint32_t *port_id,
+ uint32_t *flow_id,
+ uint32_t n_keys)
+{
+ struct app_pipeline_fc *p;
+ struct pipeline_fc_add_bulk_msg_req *req;
+ struct pipeline_fc_add_bulk_msg_rsp *rsp;
+
+ struct app_pipeline_fc_flow **flow;
+ uint32_t *signature;
+ int *new_flow;
+ struct pipeline_fc_add_bulk_flow_req *flow_req;
+ struct pipeline_fc_add_bulk_flow_rsp *flow_rsp;
+
+ uint32_t i;
+ int status;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL) ||
+ (port_id == NULL) ||
+ (flow_id == NULL) ||
+ (n_keys == 0))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -1;
+
+ for (i = 0; i < n_keys; i++)
+ if (port_id[i] >= p->n_ports_out)
+ return -1;
+
+ for (i = 0; i < n_keys; i++)
+ if (app_pipeline_fc_key_check(&key[i]) != 0)
+ return -1;
+
+ /* Memory allocation */
+ flow = rte_malloc(NULL,
+ n_keys * sizeof(struct app_pipeline_fc_flow *),
+ RTE_CACHE_LINE_SIZE);
+ if (flow == NULL)
+ return -1;
+
+ signature = rte_malloc(NULL,
+ n_keys * sizeof(uint32_t),
+ RTE_CACHE_LINE_SIZE);
+ if (signature == NULL) {
+ rte_free(flow);
+ return -1;
+ }
+
+ new_flow = rte_malloc(
+ NULL,
+ n_keys * sizeof(int),
+ RTE_CACHE_LINE_SIZE);
+ if (new_flow == NULL) {
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+
+ flow_req = rte_malloc(NULL,
+ n_keys * sizeof(struct pipeline_fc_add_bulk_flow_req),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_req == NULL) {
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+
+ flow_rsp = rte_malloc(NULL,
+ n_keys * sizeof(struct pipeline_fc_add_bulk_flow_rsp),
+ RTE_CACHE_LINE_SIZE);
+ if (flow_rsp == NULL) {
+ rte_free(flow_req);
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+
+ /* Find existing flow or allocate new flow */
+ for (i = 0; i < n_keys; i++) {
+ flow[i] = app_pipeline_fc_flow_find(p, &key[i]);
+ new_flow[i] = (flow[i] == NULL);
+ if (flow[i] == NULL) {
+ flow[i] = rte_zmalloc(NULL,
+ sizeof(struct app_pipeline_fc_flow),
+ RTE_CACHE_LINE_SIZE);
+
+ if (flow[i] == NULL) {
+ uint32_t j;
+
+ for (j = 0; j < i; j++)
+ if (new_flow[j])
+ rte_free(flow[j]);
+
+ rte_free(flow_rsp);
+ rte_free(flow_req);
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+ }
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ for (i = 0; i < n_keys; i++)
+ if (new_flow[i])
+ rte_free(flow[i]);
+
+ rte_free(flow_rsp);
+ rte_free(flow_req);
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ app_pipeline_fc_key_convert(&key[i],
+ flow_req[i].key,
+ &signature[i]);
+ flow_req[i].port_id = port_id[i];
+ flow_req[i].flow_id = flow_id[i];
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD_BULK;
+ req->req = flow_req;
+ req->rsp = flow_rsp;
+ req->n_keys = n_keys;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, 10000);
+ if (rsp == NULL) {
+ for (i = 0; i < n_keys; i++)
+ if (new_flow[i])
+ rte_free(flow[i]);
+
+ rte_free(flow_rsp);
+ rte_free(flow_req);
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+ return -1;
+ }
+
+ /* Read response */
+ status = 0;
+
+ for (i = 0; i < rsp->n_keys; i++)
+ if ((flow_rsp[i].entry_ptr == NULL) ||
+ ((new_flow[i] == 0) && (flow_rsp[i].key_found == 0)) ||
+ ((new_flow[i] == 1) && (flow_rsp[i].key_found == 1)))
+ status = -1;
+
+ if (rsp->n_keys < n_keys)
+ status = -1;
+
+ /* Commit flows */
+ for (i = 0; i < rsp->n_keys; i++) {
+ memcpy(&flow[i]->key, &key[i], sizeof(flow[i]->key));
+ flow[i]->port_id = port_id[i];
+ flow[i]->flow_id = flow_id[i];
+ flow[i]->signature = signature[i];
+ flow[i]->entry_ptr = flow_rsp[i].entry_ptr;
+
+ if (new_flow[i]) {
+ uint32_t bucket_id = signature[i] & (N_BUCKETS - 1);
+
+ TAILQ_INSERT_TAIL(&p->flows[bucket_id], flow[i], node);
+ p->n_flows++;
+ }
+ }
+
+ /* Free resources */
+ app_msg_free(app, rsp);
+
+ for (i = rsp->n_keys; i < n_keys; i++)
+ if (new_flow[i])
+ rte_free(flow[i]);
+
+ rte_free(flow_rsp);
+ rte_free(flow_req);
+ rte_free(new_flow);
+ rte_free(signature);
+ rte_free(flow);
+
+ return status;
+}
+
+int
+app_pipeline_fc_del(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key)
+{
+ struct app_pipeline_fc *p;
+ struct app_pipeline_fc_flow *flow;
+
+ struct pipeline_fc_del_msg_req *req;
+ struct pipeline_fc_del_msg_rsp *rsp;
+
+ uint32_t signature, bucket_id;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -1;
+
+ if (app_pipeline_fc_key_check(key) != 0)
+ return -1;
+
+ /* Find rule */
+ flow = app_pipeline_fc_flow_find(p, key);
+ if (flow == NULL)
+ return 0;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FC_MSG_REQ_FLOW_DEL;
+ app_pipeline_fc_key_convert(key, req->key, &signature);
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status || !rsp->key_found) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Remove rule */
+ bucket_id = signature & (N_BUCKETS - 1);
+ TAILQ_REMOVE(&p->flows[bucket_id], flow, node);
+ p->n_flows--;
+ rte_free(flow);
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_fc_add_default(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct app_pipeline_fc *p;
+
+ struct pipeline_fc_add_default_msg_req *req;
+ struct pipeline_fc_add_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FC_MSG_REQ_FLOW_ADD_DEFAULT;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write flow */
+ if (rsp->status || (rsp->entry_ptr == NULL)) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ p->default_flow_port_id = port_id;
+ p->default_flow_entry_ptr = rsp->entry_ptr;
+
+ /* Commit route */
+ p->default_flow_present = 1;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_fc_del_default(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_fc *p;
+
+ struct pipeline_fc_del_default_msg_req *req;
+ struct pipeline_fc_del_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -EINVAL;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_FC_MSG_REQ_FLOW_DEL_DEFAULT;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Commit route */
+ p->default_flow_present = 0;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/*
+ * Flow ls
+ */
+
+static void
+print_fc_qinq_flow(struct app_pipeline_fc_flow *flow)
+{
+ printf("(SVLAN = %" PRIu32 ", "
+ "CVLAN = %" PRIu32 ") => "
+ "Port = %" PRIu32 ", "
+ "Flow ID = %" PRIu32 ", "
+ "(signature = 0x%08" PRIx32 ", "
+ "entry_ptr = %p)\n",
+
+ flow->key.key.qinq.svlan,
+ flow->key.key.qinq.cvlan,
+ flow->port_id,
+ flow->flow_id,
+ flow->signature,
+ flow->entry_ptr);
+}
+
+static void
+print_fc_ipv4_5tuple_flow(struct app_pipeline_fc_flow *flow)
+{
+ printf("(SA = %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 ", "
+ "DA = %" PRIu32 ".%" PRIu32 ".%" PRIu32 ".%" PRIu32 ", "
+ "SP = %" PRIu32 ", "
+ "DP = %" PRIu32 ", "
+ "Proto = %" PRIu32 ") => "
+ "Port = %" PRIu32 ", "
+ "Flow ID = %" PRIu32 " "
+ "(signature = 0x%08" PRIx32 ", "
+ "entry_ptr = %p)\n",
+
+ (flow->key.key.ipv4_5tuple.ip_src >> 24) & 0xFF,
+ (flow->key.key.ipv4_5tuple.ip_src >> 16) & 0xFF,
+ (flow->key.key.ipv4_5tuple.ip_src >> 8) & 0xFF,
+ flow->key.key.ipv4_5tuple.ip_src & 0xFF,
+
+ (flow->key.key.ipv4_5tuple.ip_dst >> 24) & 0xFF,
+ (flow->key.key.ipv4_5tuple.ip_dst >> 16) & 0xFF,
+ (flow->key.key.ipv4_5tuple.ip_dst >> 8) & 0xFF,
+ flow->key.key.ipv4_5tuple.ip_dst & 0xFF,
+
+ flow->key.key.ipv4_5tuple.port_src,
+ flow->key.key.ipv4_5tuple.port_dst,
+
+ flow->key.key.ipv4_5tuple.proto,
+
+ flow->port_id,
+ flow->flow_id,
+ flow->signature,
+ flow->entry_ptr);
+}
+
+static void
+print_fc_ipv6_5tuple_flow(struct app_pipeline_fc_flow *flow) {
+ printf("(SA = %02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32 ", "
+ "DA = %02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32
+ ":%02" PRIx32 "%02" PRIx32 ":%02" PRIx32 "%02" PRIx32 ", "
+ "SP = %" PRIu32 ", "
+ "DP = %" PRIu32 " "
+ "Proto = %" PRIu32 " "
+ "=> Port = %" PRIu32 ", "
+ "Flow ID = %" PRIu32 " "
+ "(signature = 0x%08" PRIx32 ", "
+ "entry_ptr = %p)\n",
+
+ flow->key.key.ipv6_5tuple.ip_src[0],
+ flow->key.key.ipv6_5tuple.ip_src[1],
+ flow->key.key.ipv6_5tuple.ip_src[2],
+ flow->key.key.ipv6_5tuple.ip_src[3],
+ flow->key.key.ipv6_5tuple.ip_src[4],
+ flow->key.key.ipv6_5tuple.ip_src[5],
+ flow->key.key.ipv6_5tuple.ip_src[6],
+ flow->key.key.ipv6_5tuple.ip_src[7],
+ flow->key.key.ipv6_5tuple.ip_src[8],
+ flow->key.key.ipv6_5tuple.ip_src[9],
+ flow->key.key.ipv6_5tuple.ip_src[10],
+ flow->key.key.ipv6_5tuple.ip_src[11],
+ flow->key.key.ipv6_5tuple.ip_src[12],
+ flow->key.key.ipv6_5tuple.ip_src[13],
+ flow->key.key.ipv6_5tuple.ip_src[14],
+ flow->key.key.ipv6_5tuple.ip_src[15],
+
+ flow->key.key.ipv6_5tuple.ip_dst[0],
+ flow->key.key.ipv6_5tuple.ip_dst[1],
+ flow->key.key.ipv6_5tuple.ip_dst[2],
+ flow->key.key.ipv6_5tuple.ip_dst[3],
+ flow->key.key.ipv6_5tuple.ip_dst[4],
+ flow->key.key.ipv6_5tuple.ip_dst[5],
+ flow->key.key.ipv6_5tuple.ip_dst[6],
+ flow->key.key.ipv6_5tuple.ip_dst[7],
+ flow->key.key.ipv6_5tuple.ip_dst[8],
+ flow->key.key.ipv6_5tuple.ip_dst[9],
+ flow->key.key.ipv6_5tuple.ip_dst[10],
+ flow->key.key.ipv6_5tuple.ip_dst[11],
+ flow->key.key.ipv6_5tuple.ip_dst[12],
+ flow->key.key.ipv6_5tuple.ip_dst[13],
+ flow->key.key.ipv6_5tuple.ip_dst[14],
+ flow->key.key.ipv6_5tuple.ip_dst[15],
+
+ flow->key.key.ipv6_5tuple.port_src,
+ flow->key.key.ipv6_5tuple.port_dst,
+
+ flow->key.key.ipv6_5tuple.proto,
+
+ flow->port_id,
+ flow->flow_id,
+ flow->signature,
+ flow->entry_ptr);
+}
+
+static void
+print_fc_flow(struct app_pipeline_fc_flow *flow)
+{
+ switch (flow->key.type) {
+ case FLOW_KEY_QINQ:
+ print_fc_qinq_flow(flow);
+ break;
+
+ case FLOW_KEY_IPV4_5TUPLE:
+ print_fc_ipv4_5tuple_flow(flow);
+ break;
+
+ case FLOW_KEY_IPV6_5TUPLE:
+ print_fc_ipv6_5tuple_flow(flow);
+ break;
+ }
+}
+
+static int
+app_pipeline_fc_ls(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_fc *p;
+ struct app_pipeline_fc_flow *flow;
+ uint32_t i;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_flow_classification);
+ if (p == NULL)
+ return -1;
+
+ for (i = 0; i < N_BUCKETS; i++)
+ TAILQ_FOREACH(flow, &p->flows[i], node)
+ print_fc_flow(flow);
+
+ if (p->default_flow_present)
+ printf("Default flow: port %" PRIu32 " (entry ptr = %p)\n",
+ p->default_flow_port_id,
+ p->default_flow_entry_ptr);
+ else
+ printf("Default: DROP\n");
+
+ return 0;
+}
+/*
+ * flow
+ *
+ * flow add:
+ * p <pipelineid> flow add qinq <svlan> <cvlan> port <portid> id <flowid>
+ * p <pipelineid> flow add qinq bulk <file>
+ * p <pipelineid> flow add ipv4 <sipaddr> <dipaddr> <sport> <dport> <proto> port <port ID> id <flowid>
+ * p <pipelineid> flow add ipv4 bulk <file>
+ * p <pipelineid> flow add ipv6 <sipaddr> <dipaddr> <sport> <dport> <proto> port <port ID> id <flowid>
+ * p <pipelineid> flow add ipv6 bulk <file>
+ *
+ * flow add default:
+ * p <pipelineid> flow add default <portid>
+ *
+ * flow del:
+ * p <pipelineid> flow del qinq <svlan> <cvlan>
+ * p <pipelineid> flow del ipv4 <sipaddr> <dipaddr> <sport> <dport> <proto>
+ * p <pipelineid> flow del ipv6 <sipaddr> <dipaddr> <sport> <dport> <proto>
+ *
+ * flow del default:
+ * p <pipelineid> flow del default
+ *
+ * flow ls:
+ * p <pipelineid> flow ls
+ */
+
+struct cmd_flow_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t flow_string;
+ cmdline_multi_string_t multi_string;
+};
+
+static void
+cmd_flow_parsed(void *parsed_result,
+ __attribute__((unused)) struct cmdline *cl,
+ void *data)
+{
+ struct cmd_flow_result *results = parsed_result;
+ struct app_params *app = data;
+
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
+
+ status = parse_tokenize_string(results->multi_string, tokens, &n_tokens);
+ if (status) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "flow");
+ return;
+ }
+
+ /* flow add qinq */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "qinq") == 0) &&
+ strcmp(tokens[2], "bulk")) {
+ struct pipeline_fc_key key;
+ uint32_t svlan;
+ uint32_t cvlan;
+ uint32_t port_id;
+ uint32_t flow_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 8) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add qinq");
+ return;
+ }
+
+ if (parser_read_uint32(&svlan, tokens[2]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "svlan");
+ return;
+ }
+
+ if (parser_read_uint32(&cvlan, tokens[3]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "cvlan");
+ return;
+ }
+
+ if (strcmp(tokens[4], "port") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ if (strcmp(tokens[6], "id") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (parser_read_uint32(&flow_id, tokens[7]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
+
+ key.type = FLOW_KEY_QINQ;
+ key.key.qinq.svlan = svlan;
+ key.key.qinq.cvlan = cvlan;
+
+ status = app_pipeline_fc_add(app,
+ results->pipeline_id,
+ &key,
+ port_id,
+ flow_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add qinq");
+
+ return;
+ } /* flow add qinq */
+
+ /* flow add ipv4 */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0) &&
+ strcmp(tokens[2], "bulk")) {
+ struct pipeline_fc_key key;
+ struct in_addr sipaddr;
+ struct in_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+ uint32_t port_id;
+ uint32_t flow_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 11) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv4");
+ return;
+ }
+
+ if (parse_ipv4_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv4addr");
+ return;
+ }
+ if (parse_ipv4_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv4addr");
+ return;
+ }
+
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
+
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
+
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
+
+ if (strcmp(tokens[7], "port") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[8]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ if (strcmp(tokens[9], "id") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (parser_read_uint32(&flow_id, tokens[10]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
+
+ key.type = FLOW_KEY_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.port_src = sport;
+ key.key.ipv4_5tuple.port_dst = dport;
+ key.key.ipv4_5tuple.proto = proto;
+
+ status = app_pipeline_fc_add(app,
+ results->pipeline_id,
+ &key,
+ port_id,
+ flow_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv4");
+
+ return;
+ } /* flow add ipv4 */
+
+ /* flow add ipv6 */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv6") == 0) &&
+ strcmp(tokens[2], "bulk")) {
+ struct pipeline_fc_key key;
+ struct in6_addr sipaddr;
+ struct in6_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+ uint32_t port_id;
+ uint32_t flow_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 11) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv6");
+ return;
+ }
+
+ if (parse_ipv6_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv6addr");
+ return;
+ }
+ if (parse_ipv6_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv6addr");
+ return;
+ }
+
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
+
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
+
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
+
+ if (strcmp(tokens[7], "port") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[8]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ if (strcmp(tokens[9], "id") != 0) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "id");
+ return;
+ }
+
+ if (parser_read_uint32(&flow_id, tokens[10]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "flowid");
+ return;
+ }
+
+ key.type = FLOW_KEY_IPV6_5TUPLE;
+ memcpy(key.key.ipv6_5tuple.ip_src, (void *)&sipaddr, 16);
+ memcpy(key.key.ipv6_5tuple.ip_dst, (void *)&dipaddr, 16);
+ key.key.ipv6_5tuple.port_src = sport;
+ key.key.ipv6_5tuple.port_dst = dport;
+ key.key.ipv6_5tuple.proto = proto;
+
+ status = app_pipeline_fc_add(app,
+ results->pipeline_id,
+ &key,
+ port_id,
+ flow_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv6");
+
+ return;
+ } /* flow add ipv6 */
+
+ /* flow add qinq bulk */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "qinq") == 0) &&
+ (strcmp(tokens[2], "bulk") == 0)) {
+ struct pipeline_fc_key *keys;
+ uint32_t *port_ids, *flow_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add qinq bulk");
+ return;
+ }
+
+ filename = tokens[3];
+
+ n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
+ if (keys == NULL)
+ return;
+ memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
+
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ free(keys);
+ return;
+ }
+
+ flow_ids = malloc(n_keys * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ free(port_ids);
+ free(keys);
+ return;
+ }
+
+ status = app_pipeline_fc_load_file_qinq(filename,
+ keys,
+ port_ids,
+ flow_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ }
+
+ status = app_pipeline_fc_add_bulk(app,
+ results->pipeline_id,
+ keys,
+ port_ids,
+ flow_ids,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add qinq bulk");
+
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ } /* flow add qinq bulk */
+
+ /* flow add ipv4 bulk */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0) &&
+ (strcmp(tokens[2], "bulk") == 0)) {
+ struct pipeline_fc_key *keys;
+ uint32_t *port_ids, *flow_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv4 bulk");
+ return;
+ }
+
+ filename = tokens[3];
+
+ n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
+ if (keys == NULL)
+ return;
+ memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
+
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ free(keys);
+ return;
+ }
+
+ flow_ids = malloc(n_keys * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ free(port_ids);
+ free(keys);
+ return;
+ }
+
+ status = app_pipeline_fc_load_file_ipv4(filename,
+ keys,
+ port_ids,
+ flow_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ }
+
+ status = app_pipeline_fc_add_bulk(app,
+ results->pipeline_id,
+ keys,
+ port_ids,
+ flow_ids,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv4 bulk");
+
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ } /* flow add ipv4 bulk */
+
+ /* flow add ipv6 bulk */
+ if ((n_tokens >= 3) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "ipv6") == 0) &&
+ (strcmp(tokens[2], "bulk") == 0)) {
+ struct pipeline_fc_key *keys;
+ uint32_t *port_ids, *flow_ids, n_keys, line;
+ char *filename;
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add ipv6 bulk");
+ return;
+ }
+
+ filename = tokens[3];
+
+ n_keys = APP_PIPELINE_FC_MAX_FLOWS_IN_FILE;
+ keys = malloc(n_keys * sizeof(struct pipeline_fc_key));
+ if (keys == NULL)
+ return;
+ memset(keys, 0, n_keys * sizeof(struct pipeline_fc_key));
+
+ port_ids = malloc(n_keys * sizeof(uint32_t));
+ if (port_ids == NULL) {
+ free(keys);
+ return;
+ }
+
+ flow_ids = malloc(n_keys * sizeof(uint32_t));
+ if (flow_ids == NULL) {
+ free(port_ids);
+ free(keys);
+ return;
+ }
+
+ status = app_pipeline_fc_load_file_ipv6(filename,
+ keys,
+ port_ids,
+ flow_ids,
+ &n_keys,
+ &line);
+ if (status != 0) {
+ printf(CMD_MSG_FILE_ERR, filename, line);
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ }
+
+ status = app_pipeline_fc_add_bulk(app,
+ results->pipeline_id,
+ keys,
+ port_ids,
+ flow_ids,
+ n_keys);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add ipv6 bulk");
+
+ free(flow_ids);
+ free(port_ids);
+ free(keys);
+ return;
+ } /* flow add ipv6 bulk */
+
+ /* flow add default*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow add default");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[2]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ status = app_pipeline_fc_add_default(app,
+ results->pipeline_id,
+ port_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow add default");
+
+ return;
+ } /* flow add default */
+
+ /* flow del qinq */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "qinq") == 0)) {
+ struct pipeline_fc_key key;
+ uint32_t svlan;
+ uint32_t cvlan;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del qinq");
+ return;
+ }
+
+ if (parser_read_uint32(&svlan, tokens[2]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "svlan");
+ return;
+ }
+
+ if (parser_read_uint32(&cvlan, tokens[3]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "cvlan");
+ return;
+ }
+
+ key.type = FLOW_KEY_QINQ;
+ key.key.qinq.svlan = svlan;
+ key.key.qinq.cvlan = cvlan;
+
+ status = app_pipeline_fc_del(app,
+ results->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del qinq");
+
+ return;
+ } /* flow del qinq */
+
+ /* flow del ipv4 */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "ipv4") == 0)) {
+ struct pipeline_fc_key key;
+ struct in_addr sipaddr;
+ struct in_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 7) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del ipv4");
+ return;
+ }
+
+ if (parse_ipv4_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv4addr");
+ return;
+ }
+ if (parse_ipv4_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv4addr");
+ return;
+ }
+
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
+
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
+
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
+
+ key.type = FLOW_KEY_IPV4_5TUPLE;
+ key.key.ipv4_5tuple.ip_src = rte_be_to_cpu_32(sipaddr.s_addr);
+ key.key.ipv4_5tuple.ip_dst = rte_be_to_cpu_32(dipaddr.s_addr);
+ key.key.ipv4_5tuple.port_src = sport;
+ key.key.ipv4_5tuple.port_dst = dport;
+ key.key.ipv4_5tuple.proto = proto;
+
+ status = app_pipeline_fc_del(app,
+ results->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del ipv4");
+
+ return;
+ } /* flow del ipv4 */
+
+ /* flow del ipv6 */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "ipv6") == 0)) {
+ struct pipeline_fc_key key;
+ struct in6_addr sipaddr;
+ struct in6_addr dipaddr;
+ uint32_t sport;
+ uint32_t dport;
+ uint32_t proto;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 7) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del ipv6");
+ return;
+ }
+
+ if (parse_ipv6_addr(tokens[2], &sipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sipv6addr");
+ return;
+ }
+
+ if (parse_ipv6_addr(tokens[3], &dipaddr) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dipv6addr");
+ return;
+ }
+
+ if (parser_read_uint32(&sport, tokens[4]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "sport");
+ return;
+ }
+
+ if (parser_read_uint32(&dport, tokens[5]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "dport");
+ return;
+ }
+
+ if (parser_read_uint32(&proto, tokens[6]) != 0) {
+ printf(CMD_MSG_INVALID_ARG, "proto");
+ return;
+ }
+
+ key.type = FLOW_KEY_IPV6_5TUPLE;
+ memcpy(key.key.ipv6_5tuple.ip_src, &sipaddr, 16);
+ memcpy(key.key.ipv6_5tuple.ip_dst, &dipaddr, 16);
+ key.key.ipv6_5tuple.port_src = sport;
+ key.key.ipv6_5tuple.port_dst = dport;
+ key.key.ipv6_5tuple.proto = proto;
+
+ status = app_pipeline_fc_del(app,
+ results->pipeline_id,
+ &key);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del ipv6");
+
+ return;
+ } /* flow del ipv6 */
+
+ /* flow del default*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow del default");
+ return;
+ }
+
+ status = app_pipeline_fc_del_default(app,
+ results->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow del default");
+
+ return;
+ } /* flow del default */
+
+ /* flow ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "flow ls");
+ return;
+ }
+
+ status = app_pipeline_fc_ls(app, results->pipeline_id);
+ if (status)
+ printf(CMD_MSG_FAIL, "flow ls");
+
+ return;
+ } /* flow ls */
+
+ printf(CMD_MSG_MISMATCH_ARGS, "flow");
+}
+
+static cmdline_parse_token_string_t cmd_flow_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_flow_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_flow_result, pipeline_id, UINT32);
+
+static cmdline_parse_token_string_t cmd_flow_flow_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_result, flow_string, "flow");
+
+static cmdline_parse_token_string_t cmd_flow_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_flow_result, multi_string,
+ TOKEN_STRING_MULTI);
+
+static cmdline_parse_inst_t cmd_flow = {
+ .f = cmd_flow_parsed,
+ .data = NULL,
+ .help_str = "flow add / add bulk / add default / del / del default / ls",
+ .tokens = {
+ (void *) &cmd_flow_p_string,
+ (void *) &cmd_flow_pipeline_id,
+ (void *) &cmd_flow_flow_string,
+ (void *) &cmd_flow_multi_string,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_flow,
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_flow_classification_fe_ops = {
+ .f_init = app_pipeline_fc_init,
+ .f_post_init = NULL,
+ .f_free = app_pipeline_fc_free,
+ .f_track = app_pipeline_track_default,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_flow_classification = {
+ .name = "FLOW_CLASSIFICATION",
+ .be_ops = &pipeline_flow_classification_be_ops,
+ .fe_ops = &pipeline_flow_classification_fe_ops,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
new file mode 100644
index 00000000..6c5ed384
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification.h
@@ -0,0 +1,135 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_H__
+#define __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_H__
+
+#include "pipeline.h"
+#include "pipeline_flow_classification_be.h"
+
+enum flow_key_type {
+ FLOW_KEY_QINQ,
+ FLOW_KEY_IPV4_5TUPLE,
+ FLOW_KEY_IPV6_5TUPLE,
+};
+
+struct flow_key_qinq {
+ uint16_t svlan;
+ uint16_t cvlan;
+};
+
+struct flow_key_ipv4_5tuple {
+ uint32_t ip_src;
+ uint32_t ip_dst;
+ uint16_t port_src;
+ uint16_t port_dst;
+ uint32_t proto;
+};
+
+struct flow_key_ipv6_5tuple {
+ uint8_t ip_src[16];
+ uint8_t ip_dst[16];
+ uint16_t port_src;
+ uint16_t port_dst;
+ uint32_t proto;
+};
+
+struct pipeline_fc_key {
+ enum flow_key_type type;
+ union {
+ struct flow_key_qinq qinq;
+ struct flow_key_ipv4_5tuple ipv4_5tuple;
+ struct flow_key_ipv6_5tuple ipv6_5tuple;
+ } key;
+};
+
+int
+app_pipeline_fc_add(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key,
+ uint32_t port_id,
+ uint32_t flow_id);
+
+int
+app_pipeline_fc_add_bulk(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key,
+ uint32_t *port_id,
+ uint32_t *flow_id,
+ uint32_t n_keys);
+
+int
+app_pipeline_fc_del(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_fc_key *key);
+
+int
+app_pipeline_fc_add_default(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_pipeline_fc_del_default(struct app_params *app,
+ uint32_t pipeline_id);
+
+#ifndef APP_PIPELINE_FC_MAX_FLOWS_IN_FILE
+#define APP_PIPELINE_FC_MAX_FLOWS_IN_FILE (16 * 1024 * 1024)
+#endif
+
+int
+app_pipeline_fc_load_file_qinq(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
+int
+app_pipeline_fc_load_file_ipv4(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
+int
+app_pipeline_fc_load_file_ipv6(char *filename,
+ struct pipeline_fc_key *keys,
+ uint32_t *port_ids,
+ uint32_t *flow_ids,
+ uint32_t *n_keys,
+ uint32_t *line);
+
+extern struct pipeline_type pipeline_flow_classification;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
new file mode 100644
index 00000000..026f00cd
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
@@ -0,0 +1,789 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_table_hash.h>
+#include <rte_byteorder.h>
+#include <pipeline.h>
+
+#include "pipeline_flow_classification_be.h"
+#include "pipeline_actions_common.h"
+#include "parser.h"
+#include "hash_func.h"
+
+struct pipeline_flow_classification {
+ struct pipeline p;
+ pipeline_msg_req_handler custom_handlers[PIPELINE_FC_MSG_REQS];
+
+ uint32_t n_flows;
+ uint32_t key_size;
+ uint32_t flow_id;
+
+ uint32_t key_offset;
+ uint32_t hash_offset;
+ uint8_t key_mask[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+ uint32_t key_mask_present;
+ uint32_t flow_id_offset;
+
+} __rte_cache_aligned;
+
+static void *
+pipeline_fc_msg_req_custom_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_fc_msg_req_custom_handler,
+};
+
+static void *
+pipeline_fc_msg_req_add_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fc_msg_req_add_bulk_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fc_msg_req_del_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fc_msg_req_add_default_handler(struct pipeline *p, void *msg);
+
+static void *
+pipeline_fc_msg_req_del_default_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_FC_MSG_REQ_FLOW_ADD] =
+ pipeline_fc_msg_req_add_handler,
+ [PIPELINE_FC_MSG_REQ_FLOW_ADD_BULK] =
+ pipeline_fc_msg_req_add_bulk_handler,
+ [PIPELINE_FC_MSG_REQ_FLOW_DEL] =
+ pipeline_fc_msg_req_del_handler,
+ [PIPELINE_FC_MSG_REQ_FLOW_ADD_DEFAULT] =
+ pipeline_fc_msg_req_add_default_handler,
+ [PIPELINE_FC_MSG_REQ_FLOW_DEL_DEFAULT] =
+ pipeline_fc_msg_req_del_default_handler,
+};
+
+/*
+ * Flow table
+ */
+struct flow_table_entry {
+ struct rte_pipeline_table_entry head;
+
+ uint32_t flow_id;
+ uint32_t pad;
+};
+
+rte_table_hash_op_hash hash_func[] = {
+ hash_default_key8,
+ hash_default_key16,
+ hash_default_key24,
+ hash_default_key32,
+ hash_default_key40,
+ hash_default_key48,
+ hash_default_key56,
+ hash_default_key64
+};
+
+/*
+ * Flow table AH - Write flow_id to packet meta-data
+ */
+static inline void
+pkt_work_flow_id(
+ struct rte_mbuf *pkt,
+ struct rte_pipeline_table_entry *table_entry,
+ void *arg)
+{
+ struct pipeline_flow_classification *p_fc = arg;
+ uint32_t *flow_id_ptr =
+ RTE_MBUF_METADATA_UINT32_PTR(pkt, p_fc->flow_id_offset);
+ struct flow_table_entry *entry =
+ (struct flow_table_entry *) table_entry;
+
+ /* Read */
+ uint32_t flow_id = entry->flow_id;
+
+ /* Compute */
+
+ /* Write */
+ *flow_id_ptr = flow_id;
+}
+
+static inline void
+pkt4_work_flow_id(
+ struct rte_mbuf **pkts,
+ struct rte_pipeline_table_entry **table_entries,
+ void *arg)
+{
+ struct pipeline_flow_classification *p_fc = arg;
+
+ uint32_t *flow_id_ptr0 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[0], p_fc->flow_id_offset);
+ uint32_t *flow_id_ptr1 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[1], p_fc->flow_id_offset);
+ uint32_t *flow_id_ptr2 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[2], p_fc->flow_id_offset);
+ uint32_t *flow_id_ptr3 =
+ RTE_MBUF_METADATA_UINT32_PTR(pkts[3], p_fc->flow_id_offset);
+
+ struct flow_table_entry *entry0 =
+ (struct flow_table_entry *) table_entries[0];
+ struct flow_table_entry *entry1 =
+ (struct flow_table_entry *) table_entries[1];
+ struct flow_table_entry *entry2 =
+ (struct flow_table_entry *) table_entries[2];
+ struct flow_table_entry *entry3 =
+ (struct flow_table_entry *) table_entries[3];
+
+ /* Read */
+ uint32_t flow_id0 = entry0->flow_id;
+ uint32_t flow_id1 = entry1->flow_id;
+ uint32_t flow_id2 = entry2->flow_id;
+ uint32_t flow_id3 = entry3->flow_id;
+
+ /* Compute */
+
+ /* Write */
+ *flow_id_ptr0 = flow_id0;
+ *flow_id_ptr1 = flow_id1;
+ *flow_id_ptr2 = flow_id2;
+ *flow_id_ptr3 = flow_id3;
+}
+
+PIPELINE_TABLE_AH_HIT(fc_table_ah_hit,
+ pkt_work_flow_id, pkt4_work_flow_id);
+
+static rte_pipeline_table_action_handler_hit
+get_fc_table_ah_hit(struct pipeline_flow_classification *p)
+{
+ if (p->flow_id)
+ return fc_table_ah_hit;
+
+ return NULL;
+}
+
+/*
+ * Argument parsing
+ */
+static int
+pipeline_fc_parse_args(struct pipeline_flow_classification *p,
+ struct pipeline_params *params)
+{
+ uint32_t n_flows_present = 0;
+ uint32_t key_offset_present = 0;
+ uint32_t key_size_present = 0;
+ uint32_t hash_offset_present = 0;
+ uint32_t key_mask_present = 0;
+ uint32_t flow_id_offset_present = 0;
+
+ uint32_t i;
+ char key_mask_str[PIPELINE_FC_FLOW_KEY_MAX_SIZE * 2 + 1];
+
+ p->hash_offset = 0;
+
+ /* default values */
+ p->flow_id = 0;
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ /* n_flows */
+ if (strcmp(arg_name, "n_flows") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_flows_present == 0, params->name,
+ arg_name);
+ n_flows_present = 1;
+
+ status = parser_read_uint32(&p->n_flows,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->n_flows != 0)), params->name,
+ arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* key_offset */
+ if (strcmp(arg_name, "key_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ key_offset_present == 0, params->name,
+ arg_name);
+ key_offset_present = 1;
+
+ status = parser_read_uint32(&p->key_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* key_size */
+ if (strcmp(arg_name, "key_size") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ key_size_present == 0, params->name,
+ arg_name);
+ key_size_present = 1;
+
+ status = parser_read_uint32(&p->key_size,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->key_size != 0) &&
+ (p->key_size % 8 == 0)),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG(((status != -ERANGE) &&
+ (p->key_size <=
+ PIPELINE_FC_FLOW_KEY_MAX_SIZE)),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* key_mask */
+ if (strcmp(arg_name, "key_mask") == 0) {
+ int mask_str_len = strlen(arg_value);
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ key_mask_present == 0,
+ params->name, arg_name);
+ key_mask_present = 1;
+
+ PIPELINE_ARG_CHECK((mask_str_len <=
+ (PIPELINE_FC_FLOW_KEY_MAX_SIZE * 2)),
+ "Parse error in section \"%s\": entry "
+ "\"%s\" is too long", params->name,
+ arg_name);
+
+ snprintf(key_mask_str, mask_str_len + 1, "%s",
+ arg_value);
+
+ continue;
+ }
+
+ /* hash_offset */
+ if (strcmp(arg_name, "hash_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ hash_offset_present == 0, params->name,
+ arg_name);
+ hash_offset_present = 1;
+
+ status = parser_read_uint32(&p->hash_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* flow_id_offset */
+ if (strcmp(arg_name, "flowid_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ flow_id_offset_present == 0, params->name,
+ arg_name);
+ flow_id_offset_present = 1;
+
+ status = parser_read_uint32(&p->flow_id_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->flow_id = 1;
+
+ continue;
+ }
+
+ /* Unknown argument */
+ PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
+ }
+
+ /* Check that mandatory arguments are present */
+ PIPELINE_PARSE_ERR_MANDATORY((n_flows_present), params->name,
+ "n_flows");
+ PIPELINE_PARSE_ERR_MANDATORY((key_offset_present), params->name,
+ "key_offset");
+ PIPELINE_PARSE_ERR_MANDATORY((key_size_present), params->name,
+ "key_size");
+
+ if (key_mask_present) {
+ uint32_t key_size = p->key_size;
+ int status;
+
+ PIPELINE_ARG_CHECK(((key_size == 8) || (key_size == 16)),
+ "Parse error in section \"%s\": entry key_mask "
+ "only allowed for key_size of 8 or 16 bytes",
+ params->name);
+
+ PIPELINE_ARG_CHECK((strlen(key_mask_str) ==
+ (key_size * 2)), "Parse error in section "
+ "\"%s\": key_mask should have exactly %u hex "
+ "digits", params->name, (key_size * 2));
+
+ PIPELINE_ARG_CHECK((hash_offset_present == 0), "Parse "
+ "error in section \"%s\": entry hash_offset only "
+ "allowed when key_mask is not present",
+ params->name);
+
+ status = parse_hex_string(key_mask_str, p->key_mask,
+ &p->key_size);
+
+ PIPELINE_PARSE_ERR_INV_VAL(((status == 0) &&
+ (key_size == p->key_size)), params->name,
+ "key_mask", key_mask_str);
+ }
+
+ p->key_mask_present = key_mask_present;
+
+ return 0;
+}
+
+static void *pipeline_fc_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_flow_classification *p_fc;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if (params == NULL)
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(
+ sizeof(struct pipeline_flow_classification));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+ p_fc = (struct pipeline_flow_classification *) p;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "Flow classification");
+
+ /* Parse arguments */
+ if (pipeline_fc_parse_args(p_fc, params))
+ return NULL;
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = params->name,
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Input ports */
+ p->n_ports_in = params->n_ports_in;
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = pipeline_port_in_params_get_ops(
+ &params->port_in[i]),
+ .arg_create = pipeline_port_in_params_convert(
+ &params->port_in[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ p->n_ports_out = params->n_ports_out;
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Tables */
+ p->n_tables = 1;
+ {
+ struct rte_table_hash_key8_ext_params
+ table_hash_key8_params = {
+ .n_entries = p_fc->n_flows,
+ .n_entries_ext = p_fc->n_flows,
+ .signature_offset = p_fc->hash_offset,
+ .key_offset = p_fc->key_offset,
+ .f_hash = hash_func[(p_fc->key_size / 8) - 1],
+ .key_mask = (p_fc->key_mask_present) ?
+ p_fc->key_mask : NULL,
+ .seed = 0,
+ };
+
+ struct rte_table_hash_key16_ext_params
+ table_hash_key16_params = {
+ .n_entries = p_fc->n_flows,
+ .n_entries_ext = p_fc->n_flows,
+ .signature_offset = p_fc->hash_offset,
+ .key_offset = p_fc->key_offset,
+ .f_hash = hash_func[(p_fc->key_size / 8) - 1],
+ .key_mask = (p_fc->key_mask_present) ?
+ p_fc->key_mask : NULL,
+ .seed = 0,
+ };
+
+ struct rte_table_hash_ext_params
+ table_hash_params = {
+ .key_size = p_fc->key_size,
+ .n_keys = p_fc->n_flows,
+ .n_buckets = p_fc->n_flows / 4,
+ .n_buckets_ext = p_fc->n_flows / 4,
+ .f_hash = hash_func[(p_fc->key_size / 8) - 1],
+ .seed = 0,
+ .signature_offset = p_fc->hash_offset,
+ .key_offset = p_fc->key_offset,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = NULL, /* set below */
+ .arg_create = NULL, /* set below */
+ .f_action_hit = get_fc_table_ah_hit(p_fc),
+ .f_action_miss = NULL,
+ .arg_ah = p_fc,
+ .action_data_size = sizeof(struct flow_table_entry) -
+ sizeof(struct rte_pipeline_table_entry),
+ };
+
+ int status;
+
+ switch (p_fc->key_size) {
+ case 8:
+ if (p_fc->hash_offset != 0) {
+ table_params.ops =
+ &rte_table_hash_key8_ext_ops;
+ } else {
+ table_params.ops =
+ &rte_table_hash_key8_ext_dosig_ops;
+ }
+ table_params.arg_create = &table_hash_key8_params;
+ break;
+
+ case 16:
+ if (p_fc->hash_offset != 0) {
+ table_params.ops =
+ &rte_table_hash_key16_ext_ops;
+ } else {
+ table_params.ops =
+ &rte_table_hash_key16_ext_dosig_ops;
+ }
+ table_params.arg_create = &table_hash_key16_params;
+ break;
+
+ default:
+ table_params.ops = &rte_table_hash_ext_ops;
+ table_params.arg_create = &table_hash_params;
+ }
+
+ status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id[i],
+ p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+ memcpy(p_fc->custom_handlers,
+ custom_handlers,
+ sizeof(p_fc->custom_handlers));
+
+ return p;
+}
+
+static int
+pipeline_fc_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_fc_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+static void *
+pipeline_fc_msg_req_custom_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_flow_classification *p_fc =
+ (struct pipeline_flow_classification *) p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_FC_MSG_REQS) ?
+ p_fc->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+static void *
+pipeline_fc_msg_req_add_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_fc_add_msg_req *req = msg;
+ struct pipeline_fc_add_msg_rsp *rsp = msg;
+
+ struct flow_table_entry entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+ .flow_id = req->flow_id,
+ };
+
+ rsp->status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &req->key,
+ (struct rte_pipeline_table_entry *) &entry,
+ &rsp->key_found,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+static void *
+pipeline_fc_msg_req_add_bulk_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_fc_add_bulk_msg_req *req = msg;
+ struct pipeline_fc_add_bulk_msg_rsp *rsp = msg;
+ uint32_t i;
+
+ for (i = 0; i < req->n_keys; i++) {
+ struct pipeline_fc_add_bulk_flow_req *flow_req = &req->req[i];
+ struct pipeline_fc_add_bulk_flow_rsp *flow_rsp = &req->rsp[i];
+
+ struct flow_table_entry entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[flow_req->port_id]},
+ },
+ .flow_id = flow_req->flow_id,
+ };
+
+ int status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &flow_req->key,
+ (struct rte_pipeline_table_entry *) &entry,
+ &flow_rsp->key_found,
+ (struct rte_pipeline_table_entry **)
+ &flow_rsp->entry_ptr);
+
+ if (status)
+ break;
+ }
+
+ rsp->n_keys = i;
+
+ return rsp;
+}
+
+static void *
+pipeline_fc_msg_req_del_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_fc_del_msg_req *req = msg;
+ struct pipeline_fc_del_msg_rsp *rsp = msg;
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ p->table_id[0],
+ &req->key,
+ &rsp->key_found,
+ NULL);
+
+ return rsp;
+}
+
+static void *
+pipeline_fc_msg_req_add_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_fc_add_default_msg_req *req = msg;
+ struct pipeline_fc_add_default_msg_rsp *rsp = msg;
+
+ struct flow_table_entry default_entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+
+ .flow_id = 0,
+ };
+
+ rsp->status = rte_pipeline_table_default_entry_add(p->p,
+ p->table_id[0],
+ (struct rte_pipeline_table_entry *) &default_entry,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+static void *
+pipeline_fc_msg_req_del_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_fc_del_default_msg_rsp *rsp = msg;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ p->table_id[0],
+ NULL);
+
+ return rsp;
+}
+
+struct pipeline_be_ops pipeline_flow_classification_be_ops = {
+ .f_init = pipeline_fc_init,
+ .f_free = pipeline_fc_free,
+ .f_run = NULL,
+ .f_timer = pipeline_fc_timer,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
new file mode 100644
index 00000000..d8129b21
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.h
@@ -0,0 +1,142 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_BE_H__
+#define __INCLUDE_PIPELINE_FLOW_CLASSIFICATION_BE_H__
+
+#include "pipeline_common_be.h"
+
+enum pipeline_fc_msg_req_type {
+ PIPELINE_FC_MSG_REQ_FLOW_ADD = 0,
+ PIPELINE_FC_MSG_REQ_FLOW_ADD_BULK,
+ PIPELINE_FC_MSG_REQ_FLOW_DEL,
+ PIPELINE_FC_MSG_REQ_FLOW_ADD_DEFAULT,
+ PIPELINE_FC_MSG_REQ_FLOW_DEL_DEFAULT,
+ PIPELINE_FC_MSG_REQS,
+};
+
+#ifndef PIPELINE_FC_FLOW_KEY_MAX_SIZE
+#define PIPELINE_FC_FLOW_KEY_MAX_SIZE 64
+#endif
+
+/*
+ * MSG ADD
+ */
+struct pipeline_fc_add_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fc_msg_req_type subtype;
+
+ uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+
+ uint32_t port_id;
+ uint32_t flow_id;
+};
+
+struct pipeline_fc_add_msg_rsp {
+ int status;
+ int key_found;
+ void *entry_ptr;
+};
+
+/*
+ * MSG ADD BULK
+ */
+struct pipeline_fc_add_bulk_flow_req {
+ uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+ uint32_t port_id;
+ uint32_t flow_id;
+};
+
+struct pipeline_fc_add_bulk_flow_rsp {
+ int key_found;
+ void *entry_ptr;
+};
+
+struct pipeline_fc_add_bulk_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fc_msg_req_type subtype;
+
+ struct pipeline_fc_add_bulk_flow_req *req;
+ struct pipeline_fc_add_bulk_flow_rsp *rsp;
+ uint32_t n_keys;
+};
+
+struct pipeline_fc_add_bulk_msg_rsp {
+ uint32_t n_keys;
+};
+
+/*
+ * MSG DEL
+ */
+struct pipeline_fc_del_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fc_msg_req_type subtype;
+
+ uint8_t key[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+};
+
+struct pipeline_fc_del_msg_rsp {
+ int status;
+ int key_found;
+};
+
+/*
+ * MSG ADD DEFAULT
+ */
+struct pipeline_fc_add_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fc_msg_req_type subtype;
+
+ uint32_t port_id;
+};
+
+struct pipeline_fc_add_default_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/*
+ * MSG DEL DEFAULT
+ */
+struct pipeline_fc_del_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_fc_msg_req_type subtype;
+};
+
+struct pipeline_fc_del_default_msg_rsp {
+ int status;
+};
+
+extern struct pipeline_be_ops pipeline_flow_classification_be_ops;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master.c
new file mode 100644
index 00000000..aab58a27
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master.c
@@ -0,0 +1,49 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pipeline_master.h"
+#include "pipeline_master_be.h"
+
+static struct pipeline_fe_ops pipeline_master_fe_ops = {
+ .f_init = NULL,
+ .f_post_init = NULL,
+ .f_free = NULL,
+ .f_track = NULL,
+ .cmds = NULL,
+};
+
+struct pipeline_type pipeline_master = {
+ .name = "MASTER",
+ .be_ops = &pipeline_master_be_ops,
+ .fe_ops = &pipeline_master_fe_ops,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master.h
new file mode 100644
index 00000000..3fe3030f
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master.h
@@ -0,0 +1,41 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_MASTER_H__
+#define __INCLUDE_PIPELINE_MASTER_H__
+
+#include "pipeline.h"
+
+extern struct pipeline_type pipeline_master;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master_be.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master_be.c
new file mode 100644
index 00000000..9a7c8c13
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master_be.c
@@ -0,0 +1,170 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include "app.h"
+#include "pipeline_master_be.h"
+
+struct pipeline_master {
+ struct app_params *app;
+ struct cmdline *cl;
+ int post_init_done;
+ int script_file_done;
+} __rte_cache_aligned;
+
+static void*
+pipeline_init(__rte_unused struct pipeline_params *params, void *arg)
+{
+ struct app_params *app = (struct app_params *) arg;
+ struct pipeline_master *p;
+ uint32_t size;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_master));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->app = app;
+
+ p->cl = cmdline_stdin_new(app->cmds, "pipeline> ");
+ if (p->cl == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+
+ p->post_init_done = 0;
+ p->script_file_done = 0;
+ if (app->script_file == NULL)
+ p->script_file_done = 1;
+
+ return (void *) p;
+}
+
+static int
+pipeline_free(void *pipeline)
+{
+ struct pipeline_master *p = (struct pipeline_master *) pipeline;
+
+ if (p == NULL)
+ return -EINVAL;
+
+ cmdline_stdin_exit(p->cl);
+ rte_free(p);
+
+ return 0;
+}
+
+static int
+pipeline_run(void *pipeline)
+{
+ struct pipeline_master *p = (struct pipeline_master *) pipeline;
+ struct app_params *app = p->app;
+ int status;
+#ifdef RTE_LIBRTE_KNI
+ uint32_t i;
+#endif /* RTE_LIBRTE_KNI */
+
+ /* Application post-init phase */
+ if (p->post_init_done == 0) {
+ app_post_init(app);
+
+ p->post_init_done = 1;
+ }
+
+ /* Run startup script file */
+ if (p->script_file_done == 0) {
+ struct app_params *app = p->app;
+ int fd = open(app->script_file, O_RDONLY);
+
+ if (fd < 0)
+ printf("Cannot open CLI script file \"%s\"\n",
+ app->script_file);
+ else {
+ struct cmdline *file_cl;
+
+ printf("Running CLI script file \"%s\" ...\n",
+ app->script_file);
+ file_cl = cmdline_new(p->cl->ctx, "", fd, 1);
+ cmdline_interact(file_cl);
+ close(fd);
+ }
+
+ p->script_file_done = 1;
+ }
+
+ /* Command Line Interface (CLI) */
+ status = cmdline_poll(p->cl);
+ if (status < 0)
+ rte_panic("CLI poll error (%" PRId32 ")\n", status);
+ else if (status == RDLINE_EXITED) {
+ cmdline_stdin_exit(p->cl);
+ rte_exit(0, "Bye!\n");
+ }
+
+#ifdef RTE_LIBRTE_KNI
+ /* Handle KNI requests from Linux kernel */
+ for (i = 0; i < app->n_pktq_kni; i++)
+ rte_kni_handle_request(app->kni[i]);
+#endif /* RTE_LIBRTE_KNI */
+
+ return 0;
+}
+
+static int
+pipeline_timer(__rte_unused void *pipeline)
+{
+ return 0;
+}
+
+struct pipeline_be_ops pipeline_master_be_ops = {
+ .f_init = pipeline_init,
+ .f_free = pipeline_free,
+ .f_run = pipeline_run,
+ .f_timer = pipeline_timer,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master_be.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master_be.h
new file mode 100644
index 00000000..00b71fe8
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_master_be.h
@@ -0,0 +1,41 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_MASTER_BE_H__
+#define __INCLUDE_PIPELINE_MASTER_BE_H__
+
+#include "pipeline_common_be.h"
+
+extern struct pipeline_be_ops pipeline_master_be_ops;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough.c
new file mode 100644
index 00000000..2c9eb2e3
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough.c
@@ -0,0 +1,74 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pipeline_passthrough.h"
+#include "pipeline_passthrough_be.h"
+
+static int
+app_pipeline_passthrough_track(struct pipeline_params *p,
+ uint32_t port_in,
+ uint32_t *port_out)
+{
+ struct pipeline_passthrough_params pp;
+ int status;
+
+ /* Check input arguments */
+ if ((p == NULL) ||
+ (port_in >= p->n_ports_in) ||
+ (port_out == NULL))
+ return -1;
+
+ status = pipeline_passthrough_parse_args(&pp, p);
+ if (status)
+ return -1;
+
+ if (pp.dma_hash_lb_enabled)
+ return -1;
+
+ *port_out = port_in / (p->n_ports_in / p->n_ports_out);
+ return 0;
+}
+
+static struct pipeline_fe_ops pipeline_passthrough_fe_ops = {
+ .f_init = NULL,
+ .f_post_init = NULL,
+ .f_free = NULL,
+ .f_track = app_pipeline_passthrough_track,
+ .cmds = NULL,
+};
+
+struct pipeline_type pipeline_passthrough = {
+ .name = "PASS-THROUGH",
+ .be_ops = &pipeline_passthrough_be_ops,
+ .fe_ops = &pipeline_passthrough_fe_ops,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough.h
new file mode 100644
index 00000000..420a8768
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough.h
@@ -0,0 +1,41 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_PASSTHROUGH_H__
+#define __INCLUDE_PIPELINE_PASSTHROUGH_H__
+
+#include "pipeline.h"
+
+extern struct pipeline_type pipeline_passthrough;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
new file mode 100644
index 00000000..7ab0afed
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
@@ -0,0 +1,958 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_byteorder.h>
+#include <rte_table_stub.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_passthrough_be.h"
+#include "pipeline_actions_common.h"
+#include "parser.h"
+#include "hash_func.h"
+
+#define SWAP_DIM (PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX * \
+ (PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX / sizeof(uint64_t)))
+
+struct pipeline_passthrough {
+ struct pipeline p;
+ struct pipeline_passthrough_params params;
+ rte_table_hash_op_hash f_hash;
+ uint32_t swap_field0_offset[SWAP_DIM];
+ uint32_t swap_field1_offset[SWAP_DIM];
+ uint64_t swap_field_mask[SWAP_DIM];
+ uint32_t swap_n_fields;
+} __rte_cache_aligned;
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_msg_req_invalid_handler,
+};
+
+static inline __attribute__((always_inline)) void
+pkt_work_dma(
+ struct rte_mbuf *pkt,
+ void *arg,
+ uint32_t dma_size,
+ uint32_t hash_enabled,
+ uint32_t lb_hash,
+ uint32_t port_out_pow2)
+{
+ struct pipeline_passthrough *p = arg;
+
+ uint64_t *dma_dst = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ p->params.dma_dst_offset);
+ uint64_t *dma_src = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ p->params.dma_src_offset);
+ uint64_t *dma_mask = (uint64_t *) p->params.dma_src_mask;
+ uint32_t *dma_hash = RTE_MBUF_METADATA_UINT32_PTR(pkt,
+ p->params.dma_hash_offset);
+ uint32_t i;
+
+ /* Read (dma_src), compute (dma_dst), write (dma_dst) */
+ for (i = 0; i < (dma_size / 8); i++)
+ dma_dst[i] = dma_src[i] & dma_mask[i];
+
+ /* Read (dma_dst), compute (hash), write (hash) */
+ if (hash_enabled) {
+ uint32_t hash = p->f_hash(dma_dst, dma_size, 0);
+ *dma_hash = hash;
+
+ if (lb_hash) {
+ uint32_t port_out;
+
+ if (port_out_pow2)
+ port_out
+ = hash & (p->p.n_ports_out - 1);
+ else
+ port_out
+ = hash % p->p.n_ports_out;
+
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out, pkt);
+ }
+ }
+}
+
+static inline __attribute__((always_inline)) void
+pkt4_work_dma(
+ struct rte_mbuf **pkts,
+ void *arg,
+ uint32_t dma_size,
+ uint32_t hash_enabled,
+ uint32_t lb_hash,
+ uint32_t port_out_pow2)
+{
+ struct pipeline_passthrough *p = arg;
+
+ uint64_t *dma_dst0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ p->params.dma_dst_offset);
+ uint64_t *dma_dst1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ p->params.dma_dst_offset);
+ uint64_t *dma_dst2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ p->params.dma_dst_offset);
+ uint64_t *dma_dst3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ p->params.dma_dst_offset);
+
+ uint64_t *dma_src0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ p->params.dma_src_offset);
+ uint64_t *dma_src1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ p->params.dma_src_offset);
+ uint64_t *dma_src2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ p->params.dma_src_offset);
+ uint64_t *dma_src3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ p->params.dma_src_offset);
+
+ uint64_t *dma_mask = (uint64_t *) p->params.dma_src_mask;
+
+ uint32_t *dma_hash0 = RTE_MBUF_METADATA_UINT32_PTR(pkts[0],
+ p->params.dma_hash_offset);
+ uint32_t *dma_hash1 = RTE_MBUF_METADATA_UINT32_PTR(pkts[1],
+ p->params.dma_hash_offset);
+ uint32_t *dma_hash2 = RTE_MBUF_METADATA_UINT32_PTR(pkts[2],
+ p->params.dma_hash_offset);
+ uint32_t *dma_hash3 = RTE_MBUF_METADATA_UINT32_PTR(pkts[3],
+ p->params.dma_hash_offset);
+
+ uint32_t i;
+
+ /* Read (dma_src), compute (dma_dst), write (dma_dst) */
+ for (i = 0; i < (dma_size / 8); i++) {
+ dma_dst0[i] = dma_src0[i] & dma_mask[i];
+ dma_dst1[i] = dma_src1[i] & dma_mask[i];
+ dma_dst2[i] = dma_src2[i] & dma_mask[i];
+ dma_dst3[i] = dma_src3[i] & dma_mask[i];
+ }
+
+ /* Read (dma_dst), compute (hash), write (hash) */
+ if (hash_enabled) {
+ uint32_t hash0 = p->f_hash(dma_dst0, dma_size, 0);
+ uint32_t hash1 = p->f_hash(dma_dst1, dma_size, 0);
+ uint32_t hash2 = p->f_hash(dma_dst2, dma_size, 0);
+ uint32_t hash3 = p->f_hash(dma_dst3, dma_size, 0);
+
+ *dma_hash0 = hash0;
+ *dma_hash1 = hash1;
+ *dma_hash2 = hash2;
+ *dma_hash3 = hash3;
+
+ if (lb_hash) {
+ uint32_t port_out0, port_out1, port_out2, port_out3;
+
+ if (port_out_pow2) {
+ port_out0
+ = hash0 & (p->p.n_ports_out - 1);
+ port_out1
+ = hash1 & (p->p.n_ports_out - 1);
+ port_out2
+ = hash2 & (p->p.n_ports_out - 1);
+ port_out3
+ = hash3 & (p->p.n_ports_out - 1);
+ } else {
+ port_out0
+ = hash0 % p->p.n_ports_out;
+ port_out1
+ = hash1 % p->p.n_ports_out;
+ port_out2
+ = hash2 % p->p.n_ports_out;
+ port_out3
+ = hash3 % p->p.n_ports_out;
+ }
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out0, pkts[0]);
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out1, pkts[1]);
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out2, pkts[2]);
+ rte_pipeline_port_out_packet_insert(p->p.p,
+ port_out3, pkts[3]);
+ }
+ }
+}
+
+static inline __attribute__((always_inline)) void
+pkt_work_swap(
+ struct rte_mbuf *pkt,
+ void *arg)
+{
+ struct pipeline_passthrough *p = arg;
+ uint32_t i;
+
+ /* Read(field0, field1), compute(field0, field1), write(field0, field1) */
+ for (i = 0; i < p->swap_n_fields; i++) {
+ uint64_t *field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ p->swap_field0_offset[i]);
+ uint64_t *field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ p->swap_field1_offset[i]);
+ uint64_t mask = p->swap_field_mask[i];
+
+ uint64_t field0 = *field0_ptr;
+ uint64_t field1 = *field1_ptr;
+
+ *field0_ptr = (field0 & (~mask)) + (field1 & mask);
+ *field1_ptr = (field0 & mask) + (field1 & (~mask));
+ }
+}
+
+static inline __attribute__((always_inline)) void
+pkt4_work_swap(
+ struct rte_mbuf **pkts,
+ void *arg)
+{
+ struct pipeline_passthrough *p = arg;
+ uint32_t i;
+
+ /* Read(field0, field1), compute(field0, field1), write(field0, field1) */
+ for (i = 0; i < p->swap_n_fields; i++) {
+ uint64_t *pkt0_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ p->swap_field0_offset[i]);
+ uint64_t *pkt1_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ p->swap_field0_offset[i]);
+ uint64_t *pkt2_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ p->swap_field0_offset[i]);
+ uint64_t *pkt3_field0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ p->swap_field0_offset[i]);
+
+ uint64_t *pkt0_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ p->swap_field1_offset[i]);
+ uint64_t *pkt1_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ p->swap_field1_offset[i]);
+ uint64_t *pkt2_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ p->swap_field1_offset[i]);
+ uint64_t *pkt3_field1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ p->swap_field1_offset[i]);
+
+ uint64_t mask = p->swap_field_mask[i];
+
+ uint64_t pkt0_field0 = *pkt0_field0_ptr;
+ uint64_t pkt1_field0 = *pkt1_field0_ptr;
+ uint64_t pkt2_field0 = *pkt2_field0_ptr;
+ uint64_t pkt3_field0 = *pkt3_field0_ptr;
+
+ uint64_t pkt0_field1 = *pkt0_field1_ptr;
+ uint64_t pkt1_field1 = *pkt1_field1_ptr;
+ uint64_t pkt2_field1 = *pkt2_field1_ptr;
+ uint64_t pkt3_field1 = *pkt3_field1_ptr;
+
+ *pkt0_field0_ptr = (pkt0_field0 & (~mask)) + (pkt0_field1 & mask);
+ *pkt1_field0_ptr = (pkt1_field0 & (~mask)) + (pkt1_field1 & mask);
+ *pkt2_field0_ptr = (pkt2_field0 & (~mask)) + (pkt2_field1 & mask);
+ *pkt3_field0_ptr = (pkt3_field0 & (~mask)) + (pkt3_field1 & mask);
+
+ *pkt0_field1_ptr = (pkt0_field0 & mask) + (pkt0_field1 & (~mask));
+ *pkt1_field1_ptr = (pkt1_field0 & mask) + (pkt1_field1 & (~mask));
+ *pkt2_field1_ptr = (pkt2_field0 & mask) + (pkt2_field1 & (~mask));
+ *pkt3_field1_ptr = (pkt3_field0 & mask) + (pkt3_field1 & (~mask));
+ }
+}
+
+#define PKT_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
+static inline void \
+pkt_work_dma_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2( \
+ struct rte_mbuf *pkt, \
+ void *arg) \
+{ \
+ pkt_work_dma(pkt, arg, dma_size, hash_enabled, lb_hash, port_pow2); \
+}
+
+#define PKT4_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
+static inline void \
+pkt4_work_dma_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2( \
+ struct rte_mbuf **pkts, \
+ void *arg) \
+{ \
+ pkt4_work_dma(pkts, arg, dma_size, hash_enabled, lb_hash, port_pow2); \
+}
+
+#define port_in_ah_dma(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT4_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
+PIPELINE_PORT_IN_AH(port_in_ah_dma_size##dma_size##_hash \
+ ##hash_enabled##_lb##lb_hash##_pw##port_pow2, \
+ pkt_work_dma_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2, \
+ pkt4_work_dma_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2)
+
+
+#define port_in_ah_lb(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
+PKT4_WORK_DMA(dma_size, hash_enabled, lb_hash, port_pow2) \
+PIPELINE_PORT_IN_AH_HIJACK_ALL( \
+ port_in_ah_lb_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2, \
+ pkt_work_dma_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2, \
+ pkt4_work_dma_size##dma_size##_hash##hash_enabled \
+ ##_lb##lb_hash##_pw##port_pow2)
+
+PIPELINE_PORT_IN_AH(port_in_ah_swap, pkt_work_swap, pkt4_work_swap)
+
+
+/* Port in AH DMA(dma_size, hash_enabled, lb_hash, port_pow2) */
+
+port_in_ah_dma(8, 0, 0, 0)
+port_in_ah_dma(8, 1, 0, 0)
+port_in_ah_lb(8, 1, 1, 0)
+port_in_ah_lb(8, 1, 1, 1)
+
+port_in_ah_dma(16, 0, 0, 0)
+port_in_ah_dma(16, 1, 0, 0)
+port_in_ah_lb(16, 1, 1, 0)
+port_in_ah_lb(16, 1, 1, 1)
+
+port_in_ah_dma(24, 0, 0, 0)
+port_in_ah_dma(24, 1, 0, 0)
+port_in_ah_lb(24, 1, 1, 0)
+port_in_ah_lb(24, 1, 1, 1)
+
+port_in_ah_dma(32, 0, 0, 0)
+port_in_ah_dma(32, 1, 0, 0)
+port_in_ah_lb(32, 1, 1, 0)
+port_in_ah_lb(32, 1, 1, 1)
+
+port_in_ah_dma(40, 0, 0, 0)
+port_in_ah_dma(40, 1, 0, 0)
+port_in_ah_lb(40, 1, 1, 0)
+port_in_ah_lb(40, 1, 1, 1)
+
+port_in_ah_dma(48, 0, 0, 0)
+port_in_ah_dma(48, 1, 0, 0)
+port_in_ah_lb(48, 1, 1, 0)
+port_in_ah_lb(48, 1, 1, 1)
+
+port_in_ah_dma(56, 0, 0, 0)
+port_in_ah_dma(56, 1, 0, 0)
+port_in_ah_lb(56, 1, 1, 0)
+port_in_ah_lb(56, 1, 1, 1)
+
+port_in_ah_dma(64, 0, 0, 0)
+port_in_ah_dma(64, 1, 0, 0)
+port_in_ah_lb(64, 1, 1, 0)
+port_in_ah_lb(64, 1, 1, 1)
+
+static rte_pipeline_port_in_action_handler
+get_port_in_ah(struct pipeline_passthrough *p)
+{
+ if ((p->params.dma_enabled == 0) &&
+ (p->params.swap_enabled == 0))
+ return NULL;
+
+ if (p->params.swap_enabled)
+ return port_in_ah_swap;
+
+ if (p->params.dma_hash_enabled) {
+ if (p->params.dma_hash_lb_enabled) {
+ if (rte_is_power_of_2(p->p.n_ports_out))
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_lb_size8_hash1_lb1_pw1;
+ case 16: return port_in_ah_lb_size16_hash1_lb1_pw1;
+ case 24: return port_in_ah_lb_size24_hash1_lb1_pw1;
+ case 32: return port_in_ah_lb_size32_hash1_lb1_pw1;
+ case 40: return port_in_ah_lb_size40_hash1_lb1_pw1;
+ case 48: return port_in_ah_lb_size48_hash1_lb1_pw1;
+ case 56: return port_in_ah_lb_size56_hash1_lb1_pw1;
+ case 64: return port_in_ah_lb_size64_hash1_lb1_pw1;
+ default: return NULL;
+ }
+ else
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_lb_size8_hash1_lb1_pw0;
+ case 16: return port_in_ah_lb_size16_hash1_lb1_pw0;
+ case 24: return port_in_ah_lb_size24_hash1_lb1_pw0;
+ case 32: return port_in_ah_lb_size32_hash1_lb1_pw0;
+ case 40: return port_in_ah_lb_size40_hash1_lb1_pw0;
+ case 48: return port_in_ah_lb_size48_hash1_lb1_pw0;
+ case 56: return port_in_ah_lb_size56_hash1_lb1_pw0;
+ case 64: return port_in_ah_lb_size64_hash1_lb1_pw0;
+ default: return NULL;
+ }
+ } else
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_dma_size8_hash1_lb0_pw0;
+ case 16: return port_in_ah_dma_size16_hash1_lb0_pw0;
+ case 24: return port_in_ah_dma_size24_hash1_lb0_pw0;
+ case 32: return port_in_ah_dma_size32_hash1_lb0_pw0;
+ case 40: return port_in_ah_dma_size40_hash1_lb0_pw0;
+ case 48: return port_in_ah_dma_size48_hash1_lb0_pw0;
+ case 56: return port_in_ah_dma_size56_hash1_lb0_pw0;
+ case 64: return port_in_ah_dma_size64_hash1_lb0_pw0;
+ default: return NULL;
+ }
+ } else
+ switch (p->params.dma_size) {
+
+ case 8: return port_in_ah_dma_size8_hash0_lb0_pw0;
+ case 16: return port_in_ah_dma_size16_hash0_lb0_pw0;
+ case 24: return port_in_ah_dma_size24_hash0_lb0_pw0;
+ case 32: return port_in_ah_dma_size32_hash0_lb0_pw0;
+ case 40: return port_in_ah_dma_size40_hash0_lb0_pw0;
+ case 48: return port_in_ah_dma_size48_hash0_lb0_pw0;
+ case 56: return port_in_ah_dma_size56_hash0_lb0_pw0;
+ case 64: return port_in_ah_dma_size64_hash0_lb0_pw0;
+ default: return NULL;
+ }
+}
+
+int
+pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
+ struct pipeline_params *params)
+{
+ uint32_t dma_dst_offset_present = 0;
+ uint32_t dma_src_offset_present = 0;
+ uint32_t dma_src_mask_present = 0;
+ char dma_mask_str[PIPELINE_PASSTHROUGH_DMA_SIZE_MAX * 2 + 1];
+ uint32_t dma_size_present = 0;
+ uint32_t dma_hash_offset_present = 0;
+ uint32_t dma_hash_lb_present = 0;
+ uint32_t i;
+
+ /* default values */
+ p->dma_enabled = 0;
+ p->dma_hash_enabled = 0;
+ p->dma_hash_lb_enabled = 0;
+ memset(p->dma_src_mask, 0xFF, sizeof(p->dma_src_mask));
+ p->swap_enabled = 0;
+ p->swap_n_fields = 0;
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ /* dma_dst_offset */
+ if (strcmp(arg_name, "dma_dst_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_dst_offset_present == 0, params->name,
+ arg_name);
+ dma_dst_offset_present = 1;
+
+ status = parser_read_uint32(&p->dma_dst_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_src_offset */
+ if (strcmp(arg_name, "dma_src_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_src_offset_present == 0, params->name,
+ arg_name);
+ dma_src_offset_present = 1;
+
+ status = parser_read_uint32(&p->dma_src_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_size */
+ if (strcmp(arg_name, "dma_size") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_size_present == 0, params->name,
+ arg_name);
+ dma_size_present = 1;
+
+ status = parser_read_uint32(&p->dma_size,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->dma_size != 0) &&
+ ((p->dma_size % 8) == 0)),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG(((status != -ERANGE) &&
+ (p->dma_size <=
+ PIPELINE_PASSTHROUGH_DMA_SIZE_MAX)),
+ params->name, arg_name, arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_src_mask */
+ if (strcmp(arg_name, "dma_src_mask") == 0) {
+ int mask_str_len = strlen(arg_value);
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_src_mask_present == 0,
+ params->name, arg_name);
+ dma_src_mask_present = 1;
+
+ PIPELINE_ARG_CHECK((mask_str_len <=
+ (PIPELINE_PASSTHROUGH_DMA_SIZE_MAX * 2)),
+ "Parse error in section \"%s\": entry "
+ "\"%s\" too long", params->name,
+ arg_name);
+
+ snprintf(dma_mask_str, mask_str_len + 1,
+ "%s", arg_value);
+
+ p->dma_enabled = 1;
+
+ continue;
+ }
+
+ /* dma_hash_offset */
+ if (strcmp(arg_name, "dma_hash_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_hash_offset_present == 0,
+ params->name, arg_name);
+ dma_hash_offset_present = 1;
+
+ status = parser_read_uint32(&p->dma_hash_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ p->dma_hash_enabled = 1;
+
+ continue;
+ }
+
+ /* load_balance mode */
+ if (strcmp(arg_name, "lb") == 0) {
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dma_hash_lb_present == 0,
+ params->name, arg_name);
+ dma_hash_lb_present = 1;
+
+ if (strcmp(arg_value, "hash") &&
+ strcmp(arg_value, "HASH"))
+
+ PIPELINE_PARSE_ERR_INV_VAL(0,
+ params->name,
+ arg_name,
+ arg_value);
+
+ p->dma_hash_lb_enabled = 1;
+
+ continue;
+ }
+
+ /* swap */
+ if (strcmp(arg_name, "swap") == 0) {
+ uint32_t a, b, n_args;
+ int len;
+
+ n_args = sscanf(arg_value, "%" SCNu32 " %" SCNu32 "%n",
+ &a, &b, &len);
+ PIPELINE_PARSE_ERR_INV_VAL(((n_args == 2) &&
+ ((size_t) len == strlen(arg_value))),
+ params->name, arg_name, arg_value);
+
+ p->swap_field0_offset[p->swap_n_fields] = a;
+ p->swap_field1_offset[p->swap_n_fields] = b;
+ p->swap_n_fields++;
+ p->swap_enabled = 1;
+
+ continue;
+ }
+
+ /* any other */
+ PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
+ }
+
+ /* Check correlations between arguments */
+ PIPELINE_ARG_CHECK((p->dma_enabled + p->swap_enabled < 2),
+ "Parse error in section \"%s\": DMA and SWAP actions are both enabled",
+ params->name);
+ PIPELINE_ARG_CHECK((dma_dst_offset_present == p->dma_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_dst_offset\"", params->name);
+ PIPELINE_ARG_CHECK((dma_src_offset_present == p->dma_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_src_offset\"", params->name);
+ PIPELINE_ARG_CHECK((dma_size_present == p->dma_enabled),
+ "Parse error in section \"%s\": missing entry "
+ "\"dma_size\"", params->name);
+ PIPELINE_ARG_CHECK((p->dma_hash_enabled <= p->dma_enabled),
+ "Parse error in section \"%s\": missing all DMA entries",
+ params->name);
+ PIPELINE_ARG_CHECK((p->dma_hash_lb_enabled <= p->dma_hash_enabled),
+ "Parse error in section \"%s\": missing all DMA hash entries ",
+ params->name);
+
+ if (dma_src_mask_present) {
+ uint32_t dma_size = p->dma_size;
+ int status;
+
+ PIPELINE_ARG_CHECK((strlen(dma_mask_str) ==
+ (dma_size * 2)), "Parse error in section "
+ "\"%s\": dma_src_mask should have exactly %u hex "
+ "digits", params->name, (dma_size * 2));
+
+ status = parse_hex_string(dma_mask_str, p->dma_src_mask,
+ &p->dma_size);
+
+ PIPELINE_PARSE_ERR_INV_VAL(((status == 0) &&
+ (dma_size == p->dma_size)), params->name,
+ "dma_src_mask", dma_mask_str);
+ }
+
+ if (p->dma_hash_lb_enabled)
+ PIPELINE_ARG_CHECK((params->n_ports_out > 1),
+ "Parse error in section \"%s\": entry \"lb\" not "
+ "allowed for single output port pipeline",
+ params->name);
+ else
+ PIPELINE_ARG_CHECK(((params->n_ports_in >= params->n_ports_out)
+ && ((params->n_ports_in % params->n_ports_out) == 0)),
+ "Parse error in section \"%s\": n_ports_in needs to be "
+ "a multiple of n_ports_out (lb mode disabled)",
+ params->name);
+
+ return 0;
+}
+
+static rte_table_hash_op_hash
+get_hash_function(struct pipeline_passthrough *p)
+{
+ switch (p->params.dma_size) {
+
+ case 8: return hash_default_key8;
+ case 16: return hash_default_key16;
+ case 24: return hash_default_key24;
+ case 32: return hash_default_key32;
+ case 40: return hash_default_key40;
+ case 48: return hash_default_key48;
+ case 56: return hash_default_key56;
+ case 64: return hash_default_key64;
+ default: return NULL;
+ }
+}
+
+static int
+pipeline_passthrough_swap_convert(struct pipeline_passthrough *p)
+{
+ uint32_t i;
+
+ p->swap_n_fields = 0;
+
+ for (i = 0; i < p->params.swap_n_fields; i++) {
+ uint32_t offset0 = p->params.swap_field0_offset[i];
+ uint32_t offset1 = p->params.swap_field1_offset[i];
+ uint32_t size = offset1 - offset0;
+ uint32_t j;
+
+ /* Check */
+ if ((offset0 >= offset1) ||
+ (size > PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX) ||
+ (p->swap_n_fields >= SWAP_DIM))
+ return -1;
+
+ for (j = 0; j < (size / sizeof(uint64_t)); j++) {
+ p->swap_field0_offset[p->swap_n_fields] = offset0;
+ p->swap_field1_offset[p->swap_n_fields] = offset1;
+ p->swap_field_mask[p->swap_n_fields] = UINT64_MAX;
+ p->swap_n_fields++;
+ offset0 += sizeof(uint64_t);
+ offset1 += sizeof(uint64_t);
+ }
+ if (size % sizeof(uint64_t)) {
+ uint32_t n_bits = (size % sizeof(uint64_t)) * 8;
+
+ p->swap_field0_offset[p->swap_n_fields] = offset0;
+ p->swap_field1_offset[p->swap_n_fields] = offset1;
+ p->swap_field_mask[p->swap_n_fields] =
+ RTE_LEN2MASK(n_bits, uint64_t);
+ p->swap_n_fields++;
+ }
+ }
+
+ return 0;
+}
+
+static void*
+pipeline_passthrough_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_passthrough *p_pt;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_passthrough));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ p_pt = (struct pipeline_passthrough *) p;
+ if (p == NULL)
+ return NULL;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "Pass-through");
+
+ /* Parse arguments */
+ if (pipeline_passthrough_parse_args(&p_pt->params, params))
+ return NULL;
+ if (pipeline_passthrough_swap_convert(p_pt))
+ return NULL;
+ p_pt->f_hash = get_hash_function(p_pt);
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = "PASS-THROUGH",
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+ p->n_tables = p->n_ports_in;
+
+ /*Input ports*/
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = pipeline_port_in_params_get_ops(
+ &params->port_in[i]),
+ .arg_create = pipeline_port_in_params_convert(
+ &params->port_in[i]),
+ .f_action = get_port_in_ah(p_pt),
+ .arg_ah = p_pt,
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_stub_ops,
+ .arg_create = NULL,
+ .f_action_hit = NULL,
+ .f_action_miss = NULL,
+ .arg_ah = NULL,
+ .action_data_size = 0,
+ };
+
+ int status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id[i],
+ p->table_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Add entries to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ uint32_t port_out_id = (p_pt->params.dma_hash_lb_enabled == 0) ?
+ (i / (p->n_ports_in / p->n_ports_out)) :
+ 0;
+
+ struct rte_pipeline_table_entry default_entry = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[port_out_id]},
+ };
+
+ struct rte_pipeline_table_entry *default_entry_ptr;
+
+ int status = rte_pipeline_table_default_entry_add(p->p,
+ p->table_id[i],
+ &default_entry,
+ &default_entry_ptr);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+
+ return p;
+}
+
+static int
+pipeline_passthrough_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_passthrough_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+struct pipeline_be_ops pipeline_passthrough_be_ops = {
+ .f_init = pipeline_passthrough_init,
+ .f_free = pipeline_passthrough_free,
+ .f_run = NULL,
+ .f_timer = pipeline_passthrough_timer,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h
new file mode 100644
index 00000000..decb2684
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_passthrough_be.h
@@ -0,0 +1,73 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_PASSTHROUGH_BE_H__
+#define __INCLUDE_PIPELINE_PASSTHROUGH_BE_H__
+
+#include "pipeline_common_be.h"
+
+#define PIPELINE_PASSTHROUGH_DMA_SIZE_MAX 64
+
+#ifndef PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX
+#define PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX 8
+#endif
+
+#ifndef PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX
+#define PIPELINE_PASSTHROUGH_SWAP_FIELD_SIZE_MAX 16
+#endif
+
+struct pipeline_passthrough_params {
+ uint32_t dma_enabled;
+ uint32_t dma_dst_offset;
+ uint32_t dma_src_offset;
+ uint8_t dma_src_mask[PIPELINE_PASSTHROUGH_DMA_SIZE_MAX];
+ uint32_t dma_size;
+
+ uint32_t dma_hash_enabled;
+ uint32_t dma_hash_offset;
+
+ uint32_t dma_hash_lb_enabled;
+
+ uint32_t swap_enabled;
+ uint32_t swap_field0_offset[PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX];
+ uint32_t swap_field1_offset[PIPELINE_PASSTHROUGH_SWAP_N_FIELDS_MAX];
+ uint32_t swap_n_fields;
+};
+
+int
+pipeline_passthrough_parse_args(struct pipeline_passthrough_params *p,
+ struct pipeline_params *params);
+
+extern struct pipeline_be_ops pipeline_passthrough_be_ops;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing.c
new file mode 100644
index 00000000..3deaff9c
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing.c
@@ -0,0 +1,1642 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+
+#include "app.h"
+#include "pipeline_common_fe.h"
+#include "pipeline_routing.h"
+#include "parser.h"
+
+struct app_pipeline_routing_route {
+ struct pipeline_routing_route_key key;
+ struct pipeline_routing_route_data data;
+ void *entry_ptr;
+
+ TAILQ_ENTRY(app_pipeline_routing_route) node;
+};
+
+struct app_pipeline_routing_arp_entry {
+ struct pipeline_routing_arp_key key;
+ struct ether_addr macaddr;
+ void *entry_ptr;
+
+ TAILQ_ENTRY(app_pipeline_routing_arp_entry) node;
+};
+
+struct pipeline_routing {
+ /* Parameters */
+ struct app_params *app;
+ uint32_t pipeline_id;
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ struct pipeline_routing_params rp;
+
+ /* Links */
+ uint32_t link_id[PIPELINE_MAX_PORT_OUT];
+
+ /* Routes */
+ TAILQ_HEAD(, app_pipeline_routing_route) routes;
+ uint32_t n_routes;
+
+ uint32_t default_route_present;
+ uint32_t default_route_port_id;
+ void *default_route_entry_ptr;
+
+ /* ARP entries */
+ TAILQ_HEAD(, app_pipeline_routing_arp_entry) arp_entries;
+ uint32_t n_arp_entries;
+
+ uint32_t default_arp_entry_present;
+ uint32_t default_arp_entry_port_id;
+ void *default_arp_entry_ptr;
+};
+
+static int
+app_pipeline_routing_find_link(struct pipeline_routing *p,
+ uint32_t link_id,
+ uint32_t *port_id)
+{
+ uint32_t i;
+
+ for (i = 0; i < p->n_ports_out; i++)
+ if (p->link_id[i] == link_id) {
+ *port_id = i;
+ return 0;
+ }
+
+ return -1;
+}
+
+static void
+app_pipeline_routing_link_op(__rte_unused struct app_params *app,
+ uint32_t link_id,
+ uint32_t up,
+ void *arg)
+{
+ struct pipeline_routing_route_key key0, key1;
+ struct pipeline_routing *p = arg;
+ struct app_link_params *lp;
+ uint32_t port_id, netmask;
+ int status;
+
+ if (app == NULL)
+ return;
+
+ APP_PARAM_FIND_BY_ID(app->link_params, "LINK", link_id, lp);
+ if (lp == NULL)
+ return;
+
+ status = app_pipeline_routing_find_link(p,
+ link_id,
+ &port_id);
+ if (status)
+ return;
+
+ netmask = (~0U) << (32 - lp->depth);
+
+ /* Local network (directly attached network) */
+ key0.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key0.key.ipv4.ip = lp->ip & netmask;
+ key0.key.ipv4.depth = lp->depth;
+
+ /* Local termination */
+ key1.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key1.key.ipv4.ip = lp->ip;
+ key1.key.ipv4.depth = 32;
+
+ if (up) {
+ struct pipeline_routing_route_data data0, data1;
+
+ /* Local network (directly attached network) */
+ memset(&data0, 0, sizeof(data0));
+ data0.flags = PIPELINE_ROUTING_ROUTE_LOCAL |
+ PIPELINE_ROUTING_ROUTE_ARP;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ)
+ data0.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) {
+ data0.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
+ data0.l2.mpls.n_labels = 1;
+ }
+ data0.port_id = port_id;
+
+ if (p->rp.n_arp_entries)
+ app_pipeline_routing_add_route(app,
+ p->pipeline_id,
+ &key0,
+ &data0);
+
+ /* Local termination */
+ memset(&data1, 0, sizeof(data1));
+ data1.flags = PIPELINE_ROUTING_ROUTE_LOCAL |
+ PIPELINE_ROUTING_ROUTE_ARP;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ)
+ data1.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
+ if (p->rp.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) {
+ data1.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
+ data1.l2.mpls.n_labels = 1;
+ }
+ data1.port_id = p->rp.port_local_dest;
+
+ app_pipeline_routing_add_route(app,
+ p->pipeline_id,
+ &key1,
+ &data1);
+ } else {
+ /* Local network (directly attached network) */
+ if (p->rp.n_arp_entries)
+ app_pipeline_routing_delete_route(app,
+ p->pipeline_id,
+ &key0);
+
+ /* Local termination */
+ app_pipeline_routing_delete_route(app,
+ p->pipeline_id,
+ &key1);
+ }
+}
+
+static int
+app_pipeline_routing_set_link_op(
+ struct app_params *app,
+ struct pipeline_routing *p)
+{
+ uint32_t port_id;
+
+ for (port_id = 0; port_id < p->n_ports_out; port_id++) {
+ struct app_link_params *link;
+ uint32_t link_id;
+ int status;
+
+ link = app_pipeline_track_pktq_out_to_link(app,
+ p->pipeline_id,
+ port_id);
+ if (link == NULL)
+ continue;
+
+ link_id = link - app->link_params;
+ p->link_id[port_id] = link_id;
+
+ status = app_link_set_op(app,
+ link_id,
+ p->pipeline_id,
+ app_pipeline_routing_link_op,
+ (void *) p);
+ if (status)
+ return status;
+ }
+
+ return 0;
+}
+
+static void *
+app_pipeline_routing_init(struct pipeline_params *params,
+ void *arg)
+{
+ struct app_params *app = (struct app_params *) arg;
+ struct pipeline_routing *p;
+ uint32_t pipeline_id, size;
+ int status;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ APP_PARAM_GET_ID(params, "PIPELINE", pipeline_id);
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_routing));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (p == NULL)
+ return NULL;
+
+ /* Initialization */
+ p->app = app;
+ p->pipeline_id = pipeline_id;
+ p->n_ports_in = params->n_ports_in;
+ p->n_ports_out = params->n_ports_out;
+
+ status = pipeline_routing_parse_args(&p->rp, params);
+ if (status) {
+ rte_free(p);
+ return NULL;
+ }
+ TAILQ_INIT(&p->routes);
+ p->n_routes = 0;
+
+ TAILQ_INIT(&p->arp_entries);
+ p->n_arp_entries = 0;
+
+ app_pipeline_routing_set_link_op(app, p);
+
+ return p;
+}
+
+static int
+app_pipeline_routing_post_init(void *pipeline)
+{
+ struct pipeline_routing *p = pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ return app_pipeline_routing_set_macaddr(p->app, p->pipeline_id);
+}
+
+static int
+app_pipeline_routing_free(void *pipeline)
+{
+ struct pipeline_routing *p = pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ while (!TAILQ_EMPTY(&p->routes)) {
+ struct app_pipeline_routing_route *route;
+
+ route = TAILQ_FIRST(&p->routes);
+ TAILQ_REMOVE(&p->routes, route, node);
+ rte_free(route);
+ }
+
+ while (!TAILQ_EMPTY(&p->arp_entries)) {
+ struct app_pipeline_routing_arp_entry *arp_entry;
+
+ arp_entry = TAILQ_FIRST(&p->arp_entries);
+ TAILQ_REMOVE(&p->arp_entries, arp_entry, node);
+ rte_free(arp_entry);
+ }
+
+ rte_free(p);
+ return 0;
+}
+
+static struct app_pipeline_routing_route *
+app_pipeline_routing_find_route(struct pipeline_routing *p,
+ const struct pipeline_routing_route_key *key)
+{
+ struct app_pipeline_routing_route *it, *found;
+
+ found = NULL;
+ TAILQ_FOREACH(it, &p->routes, node) {
+ if ((key->type == it->key.type) &&
+ (key->key.ipv4.ip == it->key.key.ipv4.ip) &&
+ (key->key.ipv4.depth == it->key.key.ipv4.depth)) {
+ found = it;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static struct app_pipeline_routing_arp_entry *
+app_pipeline_routing_find_arp_entry(struct pipeline_routing *p,
+ const struct pipeline_routing_arp_key *key)
+{
+ struct app_pipeline_routing_arp_entry *it, *found;
+
+ found = NULL;
+ TAILQ_FOREACH(it, &p->arp_entries, node) {
+ if ((key->type == it->key.type) &&
+ (key->key.ipv4.port_id == it->key.key.ipv4.port_id) &&
+ (key->key.ipv4.ip == it->key.key.ipv4.ip)) {
+ found = it;
+ break;
+ }
+ }
+
+ return found;
+}
+
+static void
+print_route(const struct app_pipeline_routing_route *route)
+{
+ if (route->key.type == PIPELINE_ROUTING_ROUTE_IPV4) {
+ const struct pipeline_routing_route_key_ipv4 *key =
+ &route->key.key.ipv4;
+
+ printf("IP Prefix = %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32 "/%" PRIu32
+ " => (Port = %" PRIu32,
+
+ (key->ip >> 24) & 0xFF,
+ (key->ip >> 16) & 0xFF,
+ (key->ip >> 8) & 0xFF,
+ key->ip & 0xFF,
+
+ key->depth,
+ route->data.port_id);
+
+ if (route->data.flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ printf(", Local");
+ else if (route->data.flags & PIPELINE_ROUTING_ROUTE_ARP)
+ printf(
+ ", Next Hop IP = %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32,
+
+ (route->data.ethernet.ip >> 24) & 0xFF,
+ (route->data.ethernet.ip >> 16) & 0xFF,
+ (route->data.ethernet.ip >> 8) & 0xFF,
+ route->data.ethernet.ip & 0xFF);
+ else
+ printf(
+ ", Next Hop HWaddress = %02" PRIx32
+ ":%02" PRIx32 ":%02" PRIx32
+ ":%02" PRIx32 ":%02" PRIx32
+ ":%02" PRIx32,
+
+ route->data.ethernet.macaddr.addr_bytes[0],
+ route->data.ethernet.macaddr.addr_bytes[1],
+ route->data.ethernet.macaddr.addr_bytes[2],
+ route->data.ethernet.macaddr.addr_bytes[3],
+ route->data.ethernet.macaddr.addr_bytes[4],
+ route->data.ethernet.macaddr.addr_bytes[5]);
+
+ if (route->data.flags & PIPELINE_ROUTING_ROUTE_QINQ)
+ printf(", QinQ SVLAN = %" PRIu32 " CVLAN = %" PRIu32,
+ route->data.l2.qinq.svlan,
+ route->data.l2.qinq.cvlan);
+
+ if (route->data.flags & PIPELINE_ROUTING_ROUTE_MPLS) {
+ uint32_t i;
+
+ printf(", MPLS labels");
+ for (i = 0; i < route->data.l2.mpls.n_labels; i++)
+ printf(" %" PRIu32,
+ route->data.l2.mpls.labels[i]);
+ }
+
+ printf(")\n");
+ }
+}
+
+static void
+print_arp_entry(const struct app_pipeline_routing_arp_entry *entry)
+{
+ printf("(Port = %" PRIu32 ", IP = %" PRIu32 ".%" PRIu32
+ ".%" PRIu32 ".%" PRIu32
+ ") => HWaddress = %02" PRIx32 ":%02" PRIx32 ":%02" PRIx32
+ ":%02" PRIx32 ":%02" PRIx32 ":%02" PRIx32 "\n",
+
+ entry->key.key.ipv4.port_id,
+ (entry->key.key.ipv4.ip >> 24) & 0xFF,
+ (entry->key.key.ipv4.ip >> 16) & 0xFF,
+ (entry->key.key.ipv4.ip >> 8) & 0xFF,
+ entry->key.key.ipv4.ip & 0xFF,
+
+ entry->macaddr.addr_bytes[0],
+ entry->macaddr.addr_bytes[1],
+ entry->macaddr.addr_bytes[2],
+ entry->macaddr.addr_bytes[3],
+ entry->macaddr.addr_bytes[4],
+ entry->macaddr.addr_bytes[5]);
+}
+
+static int
+app_pipeline_routing_route_ls(struct app_params *app, uint32_t pipeline_id)
+{
+ struct pipeline_routing *p;
+ struct app_pipeline_routing_route *it;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -EINVAL;
+
+ TAILQ_FOREACH(it, &p->routes, node)
+ print_route(it);
+
+ if (p->default_route_present)
+ printf("Default route: port %" PRIu32 " (entry ptr = %p)\n",
+ p->default_route_port_id,
+ p->default_route_entry_ptr);
+ else
+ printf("Default: DROP\n");
+
+ return 0;
+}
+
+int
+app_pipeline_routing_add_route(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_route_key *key,
+ struct pipeline_routing_route_data *data)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_route_add_msg_req *req;
+ struct pipeline_routing_route_add_msg_rsp *rsp;
+
+ struct app_pipeline_routing_route *entry;
+
+ int new_entry;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL) ||
+ (data == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ switch (key->type) {
+ case PIPELINE_ROUTING_ROUTE_IPV4:
+ {
+ uint32_t depth = key->key.ipv4.depth;
+ uint32_t netmask;
+
+ /* key */
+ if ((depth == 0) || (depth > 32))
+ return -1;
+
+ netmask = (~0U) << (32 - depth);
+ key->key.ipv4.ip &= netmask;
+
+ /* data */
+ if (data->port_id >= p->n_ports_out)
+ return -1;
+
+ /* Valid range of VLAN tags 12 bits */
+ if (data->flags & PIPELINE_ROUTING_ROUTE_QINQ)
+ if ((data->l2.qinq.svlan & 0xF000) ||
+ (data->l2.qinq.cvlan & 0xF000))
+ return -1;
+
+ /* Max number of MPLS labels supported */
+ if (data->flags & PIPELINE_ROUTING_ROUTE_MPLS) {
+ uint32_t i;
+
+ if (data->l2.mpls.n_labels >
+ PIPELINE_ROUTING_MPLS_LABELS_MAX)
+ return -1;
+
+ /* Max MPLS label value 20 bits */
+ for (i = 0; i < data->l2.mpls.n_labels; i++)
+ if (data->l2.mpls.labels[i] & 0xFFF00000)
+ return -1;
+ }
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ /* Find existing rule or allocate new rule */
+ entry = app_pipeline_routing_find_route(p, key);
+ new_entry = (entry == NULL);
+ if (entry == NULL) {
+ entry = rte_malloc(NULL, sizeof(*entry), RTE_CACHE_LINE_SIZE);
+
+ if (entry == NULL)
+ return -1;
+ }
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD;
+ memcpy(&req->key, key, sizeof(*key));
+ memcpy(&req->data, data, sizeof(*data));
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ /* Read response and write entry */
+ if (rsp->status ||
+ (rsp->entry_ptr == NULL) ||
+ ((new_entry == 0) && (rsp->key_found == 0)) ||
+ ((new_entry == 1) && (rsp->key_found == 1))) {
+ app_msg_free(app, rsp);
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ memcpy(&entry->key, key, sizeof(*key));
+ memcpy(&entry->data, data, sizeof(*data));
+ entry->entry_ptr = rsp->entry_ptr;
+
+ /* Commit entry */
+ if (new_entry) {
+ TAILQ_INSERT_TAIL(&p->routes, entry, node);
+ p->n_routes++;
+ }
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+ return 0;
+}
+
+int
+app_pipeline_routing_delete_route(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_route_key *key)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_route_delete_msg_req *req;
+ struct pipeline_routing_route_delete_msg_rsp *rsp;
+
+ struct app_pipeline_routing_route *entry;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ switch (key->type) {
+ case PIPELINE_ROUTING_ROUTE_IPV4:
+ {
+ uint32_t depth = key->key.ipv4.depth;
+ uint32_t netmask;
+
+ /* key */
+ if ((depth == 0) || (depth > 32))
+ return -1;
+
+ netmask = (~0U) << (32 - depth);
+ key->key.ipv4.ip &= netmask;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ /* Find rule */
+ entry = app_pipeline_routing_find_route(p, key);
+ if (entry == NULL)
+ return 0;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL;
+ memcpy(&req->key, key, sizeof(*key));
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status || !rsp->key_found) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Remove route */
+ TAILQ_REMOVE(&p->routes, entry, node);
+ p->n_routes--;
+ rte_free(entry);
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_routing_add_default_route(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_route_add_default_msg_req *req;
+ struct pipeline_routing_route_add_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD_DEFAULT;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write route */
+ if (rsp->status || (rsp->entry_ptr == NULL)) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ p->default_route_port_id = port_id;
+ p->default_route_entry_ptr = rsp->entry_ptr;
+
+ /* Commit route */
+ p->default_route_present = 1;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_routing_delete_default_route(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_arp_delete_default_msg_req *req;
+ struct pipeline_routing_arp_delete_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL_DEFAULT;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write route */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Commit route */
+ p->default_route_present = 0;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+static int
+app_pipeline_routing_arp_ls(struct app_params *app, uint32_t pipeline_id)
+{
+ struct pipeline_routing *p;
+ struct app_pipeline_routing_arp_entry *it;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -EINVAL;
+
+ TAILQ_FOREACH(it, &p->arp_entries, node)
+ print_arp_entry(it);
+
+ if (p->default_arp_entry_present)
+ printf("Default entry: port %" PRIu32 " (entry ptr = %p)\n",
+ p->default_arp_entry_port_id,
+ p->default_arp_entry_ptr);
+ else
+ printf("Default: DROP\n");
+
+ return 0;
+}
+
+int
+app_pipeline_routing_add_arp_entry(struct app_params *app, uint32_t pipeline_id,
+ struct pipeline_routing_arp_key *key,
+ struct ether_addr *macaddr)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_arp_add_msg_req *req;
+ struct pipeline_routing_arp_add_msg_rsp *rsp;
+
+ struct app_pipeline_routing_arp_entry *entry;
+
+ int new_entry;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL) ||
+ (macaddr == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ switch (key->type) {
+ case PIPELINE_ROUTING_ARP_IPV4:
+ {
+ uint32_t port_id = key->key.ipv4.port_id;
+
+ /* key */
+ if (port_id >= p->n_ports_out)
+ return -1;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ /* Find existing entry or allocate new */
+ entry = app_pipeline_routing_find_arp_entry(p, key);
+ new_entry = (entry == NULL);
+ if (entry == NULL) {
+ entry = rte_malloc(NULL, sizeof(*entry), RTE_CACHE_LINE_SIZE);
+
+ if (entry == NULL)
+ return -1;
+ }
+
+ /* Message buffer allocation */
+ req = app_msg_alloc(app);
+ if (req == NULL) {
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_ADD;
+ memcpy(&req->key, key, sizeof(*key));
+ ether_addr_copy(macaddr, &req->macaddr);
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL) {
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ /* Read response and write entry */
+ if (rsp->status ||
+ (rsp->entry_ptr == NULL) ||
+ ((new_entry == 0) && (rsp->key_found == 0)) ||
+ ((new_entry == 1) && (rsp->key_found == 1))) {
+ app_msg_free(app, rsp);
+ if (new_entry)
+ rte_free(entry);
+ return -1;
+ }
+
+ memcpy(&entry->key, key, sizeof(*key));
+ ether_addr_copy(macaddr, &entry->macaddr);
+ entry->entry_ptr = rsp->entry_ptr;
+
+ /* Commit entry */
+ if (new_entry) {
+ TAILQ_INSERT_TAIL(&p->arp_entries, entry, node);
+ p->n_arp_entries++;
+ }
+
+ /* Message buffer free */
+ app_msg_free(app, rsp);
+ return 0;
+}
+
+int
+app_pipeline_routing_delete_arp_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_arp_key *key)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_arp_delete_msg_req *req;
+ struct pipeline_routing_arp_delete_msg_rsp *rsp;
+
+ struct app_pipeline_routing_arp_entry *entry;
+
+ /* Check input arguments */
+ if ((app == NULL) ||
+ (key == NULL))
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -EINVAL;
+
+ switch (key->type) {
+ case PIPELINE_ROUTING_ARP_IPV4:
+ {
+ uint32_t port_id = key->key.ipv4.port_id;
+
+ /* key */
+ if (port_id >= p->n_ports_out)
+ return -1;
+ }
+ break;
+
+ default:
+ return -1;
+ }
+
+ /* Find rule */
+ entry = app_pipeline_routing_find_arp_entry(p, key);
+ if (entry == NULL)
+ return 0;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_DEL;
+ memcpy(&req->key, key, sizeof(*key));
+
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response */
+ if (rsp->status || !rsp->key_found) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ /* Remove entry */
+ TAILQ_REMOVE(&p->arp_entries, entry, node);
+ p->n_arp_entries--;
+ rte_free(entry);
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_routing_add_default_arp_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_arp_add_default_msg_req *req;
+ struct pipeline_routing_arp_add_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -1;
+
+ if (port_id >= p->n_ports_out)
+ return -1;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_ADD_DEFAULT;
+ req->port_id = port_id;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ /* Read response and write entry */
+ if (rsp->status || rsp->entry_ptr == NULL) {
+ app_msg_free(app, rsp);
+ return -1;
+ }
+
+ p->default_arp_entry_port_id = port_id;
+ p->default_arp_entry_ptr = rsp->entry_ptr;
+
+ /* Commit entry */
+ p->default_arp_entry_present = 1;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_routing_delete_default_arp_entry(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct pipeline_routing *p;
+
+ struct pipeline_routing_arp_delete_default_msg_req *req;
+ struct pipeline_routing_arp_delete_default_msg_rsp *rsp;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -1;
+
+ p = app_pipeline_data_fe(app, pipeline_id, &pipeline_routing);
+ if (p == NULL)
+ return -EINVAL;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT;
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -ETIMEDOUT;
+
+ /* Read response and write entry */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return rsp->status;
+ }
+
+ /* Commit entry */
+ p->default_arp_entry_present = 0;
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+int
+app_pipeline_routing_set_macaddr(struct app_params *app,
+ uint32_t pipeline_id)
+{
+ struct app_pipeline_params *p;
+ struct pipeline_routing_set_macaddr_msg_req *req;
+ struct pipeline_routing_set_macaddr_msg_rsp *rsp;
+ uint32_t port_id;
+
+ /* Check input arguments */
+ if (app == NULL)
+ return -EINVAL;
+
+ APP_PARAM_FIND_BY_ID(app->pipeline_params, "PIPELINE", pipeline_id, p);
+ if (p == NULL)
+ return -EINVAL;
+
+ /* Allocate and write request */
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -ENOMEM;
+
+ req->type = PIPELINE_MSG_REQ_CUSTOM;
+ req->subtype = PIPELINE_ROUTING_MSG_REQ_SET_MACADDR;
+
+ memset(req->macaddr, 0, sizeof(req->macaddr));
+ for (port_id = 0; port_id < p->n_pktq_out; port_id++) {
+ struct app_link_params *link;
+
+ link = app_pipeline_track_pktq_out_to_link(app,
+ pipeline_id,
+ port_id);
+ if (link)
+ req->macaddr[port_id] = link->mac_addr;
+ }
+
+ /* Send request and wait for response */
+ rsp = app_msg_send_recv(app, pipeline_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -ETIMEDOUT;
+
+ /* Read response and write entry */
+ if (rsp->status) {
+ app_msg_free(app, rsp);
+ return rsp->status;
+ }
+
+ /* Free response */
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/*
+ * route
+ *
+ * route add (ARP = ON/OFF, MPLS = ON/OFF, QINQ = ON/OFF):
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr> qinq <svlan> <cvlan>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr> qinq <svlan> <cvlan>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhmacaddr> mpls <mpls labels>
+ * p <pipelineid> route add <ipaddr> <depth> port <portid> ether <nhipaddr> mpls <mpls labels>
+ *
+ * route add default:
+ * p <pipelineid> route add default <portid>
+ *
+ * route del:
+ * p <pipelineid> route del <ipaddr> <depth>
+ *
+ * route del default:
+ * p <pipelineid> route del default
+ *
+ * route ls:
+ * p <pipelineid> route ls
+ */
+
+struct cmd_route_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t route_string;
+ cmdline_multi_string_t multi_string;
+};
+
+static void
+cmd_route_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_route_result *params = parsed_result;
+ struct app_params *app = data;
+
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
+
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status != 0) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "route");
+ return;
+ }
+
+ /* route add */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_route_key key;
+ struct pipeline_routing_route_data route_data;
+ struct in_addr ipv4, nh_ipv4;
+ struct ether_addr mac_addr;
+ uint32_t depth, port_id, svlan, cvlan, i;
+ uint32_t mpls_labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
+ uint32_t n_labels = RTE_DIM(mpls_labels);
+
+ memset(&key, 0, sizeof(key));
+ memset(&route_data, 0, sizeof(route_data));
+
+ if (n_tokens < 7) {
+ printf(CMD_MSG_NOT_ENOUGH_ARGS, "route add");
+ return;
+ }
+
+ if (parse_ipv4_addr(tokens[1], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
+
+ if (parser_read_uint32(&depth, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "depth");
+ return;
+ }
+
+ if (strcmp(tokens[3], "port")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "port");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[4])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ if (strcmp(tokens[5], "ether")) {
+ printf(CMD_MSG_ARG_NOT_FOUND, "ether");
+ return;
+ }
+
+ if (parse_mac_addr(tokens[6], &mac_addr)) {
+ if (parse_ipv4_addr(tokens[6], &nh_ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "nhmacaddr or nhipaddr");
+ return;
+ }
+
+ route_data.flags |= PIPELINE_ROUTING_ROUTE_ARP;
+ }
+
+ if (n_tokens > 7) {
+ if (strcmp(tokens[7], "mpls") == 0) {
+ if (n_tokens != 9) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route add mpls");
+ return;
+ }
+
+ if (parse_mpls_labels(tokens[8], mpls_labels, &n_labels)) {
+ printf(CMD_MSG_INVALID_ARG, "mpls labels");
+ return;
+ }
+
+ route_data.flags |= PIPELINE_ROUTING_ROUTE_MPLS;
+ } else if (strcmp(tokens[7], "qinq") == 0) {
+ if (n_tokens != 10) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route add qinq");
+ return;
+ }
+
+ if (parser_read_uint32(&svlan, tokens[8])) {
+ printf(CMD_MSG_INVALID_ARG, "svlan");
+ return;
+ }
+ if (parser_read_uint32(&cvlan, tokens[9])) {
+ printf(CMD_MSG_INVALID_ARG, "cvlan");
+ return;
+ }
+
+ route_data.flags |= PIPELINE_ROUTING_ROUTE_QINQ;
+ } else {
+ printf(CMD_MSG_ARG_NOT_FOUND, "mpls or qinq");
+ return;
+ }
+ }
+
+ switch (route_data.flags) {
+ case 0:
+ route_data.port_id = port_id;
+ route_data.ethernet.macaddr = mac_addr;
+ break;
+
+ case PIPELINE_ROUTING_ROUTE_ARP:
+ route_data.port_id = port_id;
+ route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
+ break;
+
+ case PIPELINE_ROUTING_ROUTE_MPLS:
+ route_data.port_id = port_id;
+ route_data.ethernet.macaddr = mac_addr;
+ for (i = 0; i < n_labels; i++)
+ route_data.l2.mpls.labels[i] = mpls_labels[i];
+ route_data.l2.mpls.n_labels = n_labels;
+ break;
+
+ case PIPELINE_ROUTING_ROUTE_MPLS | PIPELINE_ROUTING_ROUTE_ARP:
+ route_data.port_id = port_id;
+ route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
+ for (i = 0; i < n_labels; i++)
+ route_data.l2.mpls.labels[i] = mpls_labels[i];
+ route_data.l2.mpls.n_labels = n_labels;
+ break;
+
+ case PIPELINE_ROUTING_ROUTE_QINQ:
+ route_data.port_id = port_id;
+ route_data.ethernet.macaddr = mac_addr;
+ route_data.l2.qinq.svlan = svlan;
+ route_data.l2.qinq.cvlan = cvlan;
+ break;
+
+ case PIPELINE_ROUTING_ROUTE_QINQ | PIPELINE_ROUTING_ROUTE_ARP:
+ default:
+ route_data.port_id = port_id;
+ route_data.ethernet.ip = rte_be_to_cpu_32(nh_ipv4.s_addr);
+ route_data.l2.qinq.svlan = svlan;
+ route_data.l2.qinq.cvlan = cvlan;
+ break;
+ }
+
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
+ key.key.ipv4.depth = depth;
+
+ status = app_pipeline_routing_add_route(app,
+ params->p,
+ &key,
+ &route_data);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route add");
+
+ return;
+ } /* route add */
+
+ /* route add default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route add default");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ status = app_pipeline_routing_add_default_route(app,
+ params->p,
+ port_id);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route add default");
+
+ return;
+ } /* route add default */
+
+ /* route del*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_route_key key;
+ struct in_addr ipv4;
+ uint32_t depth;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route del");
+ return;
+ }
+
+ if (parse_ipv4_addr(tokens[1], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
+
+ if (parser_read_uint32(&depth, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "depth");
+ return;
+ }
+
+ key.type = PIPELINE_ROUTING_ROUTE_IPV4;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
+ key.key.ipv4.depth = depth;
+
+ status = app_pipeline_routing_delete_route(app, params->p, &key);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route del");
+
+ return;
+ } /* route del */
+
+ /* route del default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route del default");
+ return;
+ }
+
+ status = app_pipeline_routing_delete_default_route(app,
+ params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route del default");
+
+ return;
+ } /* route del default */
+
+ /* route ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "route ls");
+ return;
+ }
+
+ status = app_pipeline_routing_route_ls(app, params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "route ls");
+
+ return;
+ } /* route ls */
+
+ printf(CMD_MSG_MISMATCH_ARGS, "route");
+}
+
+static cmdline_parse_token_string_t cmd_route_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_route_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_route_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_route_route_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_result, route_string, "route");
+
+static cmdline_parse_token_string_t cmd_route_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_route_result, multi_string,
+ TOKEN_STRING_MULTI);
+
+static cmdline_parse_inst_t cmd_route = {
+ .f = cmd_route_parsed,
+ .data = NULL,
+ .help_str = "route add / add default / del / del default / ls",
+ .tokens = {
+ (void *)&cmd_route_p_string,
+ (void *)&cmd_route_p,
+ (void *)&cmd_route_route_string,
+ (void *)&cmd_route_multi_string,
+ NULL,
+ },
+};
+
+/*
+ * arp
+ *
+ * arp add:
+ * p <pipelineid> arp add <portid> <ipaddr> <macaddr>
+ *
+ * arp add default:
+ * p <pipelineid> arp add default <portid>
+ *
+ * arp del:
+ * p <pipelineid> arp del <portid> <ipaddr>
+ *
+ * arp del default:
+ * p <pipelineid> arp del default
+ *
+ * arp ls:
+ * p <pipelineid> arp ls
+ */
+
+struct cmd_arp_result {
+ cmdline_fixed_string_t p_string;
+ uint32_t p;
+ cmdline_fixed_string_t arp_string;
+ cmdline_multi_string_t multi_string;
+};
+
+static void
+cmd_arp_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_arp_result *params = parsed_result;
+ struct app_params *app = data;
+
+ char *tokens[16];
+ uint32_t n_tokens = RTE_DIM(tokens);
+ int status;
+
+ status = parse_tokenize_string(params->multi_string, tokens, &n_tokens);
+ if (status != 0) {
+ printf(CMD_MSG_TOO_MANY_ARGS, "arp");
+ return;
+ }
+
+ /* arp add */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_arp_key key;
+ struct in_addr ipv4;
+ struct ether_addr mac_addr;
+ uint32_t port_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 4) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp add");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ if (parse_ipv4_addr(tokens[2], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
+
+ if (parse_mac_addr(tokens[3], &mac_addr)) {
+ printf(CMD_MSG_INVALID_ARG, "macaddr");
+ return;
+ }
+
+ key.type = PIPELINE_ROUTING_ARP_IPV4;
+ key.key.ipv4.port_id = port_id;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
+
+ status = app_pipeline_routing_add_arp_entry(app,
+ params->p,
+ &key,
+ &mac_addr);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp add");
+
+ return;
+ } /* arp add */
+
+ /* arp add default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "add") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ uint32_t port_id;
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp add default");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[2])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ status = app_pipeline_routing_add_default_arp_entry(app,
+ params->p,
+ port_id);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp add default");
+
+ return;
+ } /* arp add default */
+
+ /* arp del*/
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ strcmp(tokens[1], "default")) {
+ struct pipeline_routing_arp_key key;
+ struct in_addr ipv4;
+ uint32_t port_id;
+
+ memset(&key, 0, sizeof(key));
+
+ if (n_tokens != 3) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp del");
+ return;
+ }
+
+ if (parser_read_uint32(&port_id, tokens[1])) {
+ printf(CMD_MSG_INVALID_ARG, "portid");
+ return;
+ }
+
+ if (parse_ipv4_addr(tokens[2], &ipv4)) {
+ printf(CMD_MSG_INVALID_ARG, "ipaddr");
+ return;
+ }
+
+ key.type = PIPELINE_ROUTING_ARP_IPV4;
+ key.key.ipv4.ip = rte_be_to_cpu_32(ipv4.s_addr);
+ key.key.ipv4.port_id = port_id;
+
+ status = app_pipeline_routing_delete_arp_entry(app,
+ params->p,
+ &key);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp del");
+
+ return;
+ } /* arp del */
+
+ /* arp del default */
+ if ((n_tokens >= 2) &&
+ (strcmp(tokens[0], "del") == 0) &&
+ (strcmp(tokens[1], "default") == 0)) {
+ if (n_tokens != 2) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp del default");
+ return;
+ }
+
+ status = app_pipeline_routing_delete_default_arp_entry(app,
+ params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp del default");
+
+ return;
+ } /* arp del default */
+
+ /* arp ls */
+ if ((n_tokens >= 1) && (strcmp(tokens[0], "ls") == 0)) {
+ if (n_tokens != 1) {
+ printf(CMD_MSG_MISMATCH_ARGS, "arp ls");
+ return;
+ }
+
+ status = app_pipeline_routing_arp_ls(app, params->p);
+ if (status != 0)
+ printf(CMD_MSG_FAIL, "arp ls");
+
+ return;
+ } /* arp ls */
+
+ printf(CMD_MSG_FAIL, "arp");
+}
+
+static cmdline_parse_token_string_t cmd_arp_p_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_result, p_string, "p");
+
+static cmdline_parse_token_num_t cmd_arp_p =
+ TOKEN_NUM_INITIALIZER(struct cmd_arp_result, p, UINT32);
+
+static cmdline_parse_token_string_t cmd_arp_arp_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_result, arp_string, "arp");
+
+static cmdline_parse_token_string_t cmd_arp_multi_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_arp_result, multi_string,
+ TOKEN_STRING_MULTI);
+
+static cmdline_parse_inst_t cmd_arp = {
+ .f = cmd_arp_parsed,
+ .data = NULL,
+ .help_str = "arp add / add default / del / del default / ls",
+ .tokens = {
+ (void *)&cmd_arp_p_string,
+ (void *)&cmd_arp_p,
+ (void *)&cmd_arp_arp_string,
+ (void *)&cmd_arp_multi_string,
+ NULL,
+ },
+};
+
+static cmdline_parse_ctx_t pipeline_cmds[] = {
+ (cmdline_parse_inst_t *)&cmd_route,
+ (cmdline_parse_inst_t *)&cmd_arp,
+ NULL,
+};
+
+static struct pipeline_fe_ops pipeline_routing_fe_ops = {
+ .f_init = app_pipeline_routing_init,
+ .f_post_init = app_pipeline_routing_post_init,
+ .f_free = app_pipeline_routing_free,
+ .f_track = app_pipeline_track_default,
+ .cmds = pipeline_cmds,
+};
+
+struct pipeline_type pipeline_routing = {
+ .name = "ROUTING",
+ .be_ops = &pipeline_routing_be_ops,
+ .fe_ops = &pipeline_routing_fe_ops,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing.h
new file mode 100644
index 00000000..0197449b
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing.h
@@ -0,0 +1,100 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_ROUTING_H__
+#define __INCLUDE_PIPELINE_ROUTING_H__
+
+#include "pipeline.h"
+#include "pipeline_routing_be.h"
+
+/*
+ * Route
+ */
+
+int
+app_pipeline_routing_add_route(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_route_key *key,
+ struct pipeline_routing_route_data *data);
+
+int
+app_pipeline_routing_delete_route(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_route_key *key);
+
+int
+app_pipeline_routing_add_default_route(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_pipeline_routing_delete_default_route(struct app_params *app,
+ uint32_t pipeline_id);
+
+/*
+ * ARP
+ */
+
+int
+app_pipeline_routing_add_arp_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_arp_key *key,
+ struct ether_addr *macaddr);
+
+int
+app_pipeline_routing_delete_arp_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ struct pipeline_routing_arp_key *key);
+
+int
+app_pipeline_routing_add_default_arp_entry(struct app_params *app,
+ uint32_t pipeline_id,
+ uint32_t port_id);
+
+int
+app_pipeline_routing_delete_default_arp_entry(struct app_params *app,
+ uint32_t pipeline_id);
+
+/*
+ * SETTINGS
+ */
+int
+app_pipeline_routing_set_macaddr(struct app_params *app,
+ uint32_t pipeline_id);
+
+/*
+ * Pipeline type
+ */
+extern struct pipeline_type pipeline_routing;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing_be.c b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing_be.c
new file mode 100644
index 00000000..21ac7888
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing_be.c
@@ -0,0 +1,1992 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ip.h>
+#include <rte_byteorder.h>
+#include <rte_table_lpm.h>
+#include <rte_table_hash.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_routing_be.h"
+#include "pipeline_actions_common.h"
+#include "parser.h"
+#include "hash_func.h"
+
+#define MPLS_LABEL(label, exp, s, ttl) \
+ (((((uint64_t) (label)) & 0xFFFFFLLU) << 12) | \
+ ((((uint64_t) (exp)) & 0x7LLU) << 9) | \
+ ((((uint64_t) (s)) & 0x1LLU) << 8) | \
+ (((uint64_t) (ttl)) & 0xFFLU))
+
+#define RTE_SCHED_PORT_HIERARCHY(subport, pipe, \
+ traffic_class, queue, color) \
+ ((((uint64_t) (queue)) & 0x3) | \
+ ((((uint64_t) (traffic_class)) & 0x3) << 2) | \
+ ((((uint64_t) (color)) & 0x3) << 4) | \
+ ((((uint64_t) (subport)) & 0xFFFF) << 16) | \
+ ((((uint64_t) (pipe)) & 0xFFFFFFFF) << 32))
+
+
+/* Network Byte Order (NBO) */
+#define SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr, ethertype) \
+ (((uint64_t) macaddr) | (((uint64_t) rte_cpu_to_be_16(ethertype)) << 48))
+
+#ifndef PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s
+#define PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s 256
+#endif
+
+struct pipeline_routing {
+ struct pipeline p;
+ struct pipeline_routing_params params;
+ pipeline_msg_req_handler custom_handlers[PIPELINE_ROUTING_MSG_REQS];
+ uint64_t macaddr[PIPELINE_MAX_PORT_OUT];
+} __rte_cache_aligned;
+
+/*
+ * Message handlers
+ */
+static void *
+pipeline_routing_msg_req_custom_handler(struct pipeline *p, void *msg);
+
+static pipeline_msg_req_handler handlers[] = {
+ [PIPELINE_MSG_REQ_PING] =
+ pipeline_msg_req_ping_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_IN] =
+ pipeline_msg_req_stats_port_in_handler,
+ [PIPELINE_MSG_REQ_STATS_PORT_OUT] =
+ pipeline_msg_req_stats_port_out_handler,
+ [PIPELINE_MSG_REQ_STATS_TABLE] =
+ pipeline_msg_req_stats_table_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_ENABLE] =
+ pipeline_msg_req_port_in_enable_handler,
+ [PIPELINE_MSG_REQ_PORT_IN_DISABLE] =
+ pipeline_msg_req_port_in_disable_handler,
+ [PIPELINE_MSG_REQ_CUSTOM] =
+ pipeline_routing_msg_req_custom_handler,
+};
+
+static void *
+pipeline_routing_msg_req_route_add_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_route_del_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_route_add_default_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_route_del_default_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_arp_add_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_arp_del_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_arp_add_default_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_arp_del_default_handler(struct pipeline *p,
+ void *msg);
+
+static void *
+pipeline_routing_msg_req_set_macaddr_handler(struct pipeline *p,
+ void *msg);
+
+static pipeline_msg_req_handler custom_handlers[] = {
+ [PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD] =
+ pipeline_routing_msg_req_route_add_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL] =
+ pipeline_routing_msg_req_route_del_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD_DEFAULT] =
+ pipeline_routing_msg_req_route_add_default_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL_DEFAULT] =
+ pipeline_routing_msg_req_route_del_default_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ARP_ADD] =
+ pipeline_routing_msg_req_arp_add_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ARP_DEL] =
+ pipeline_routing_msg_req_arp_del_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ARP_ADD_DEFAULT] =
+ pipeline_routing_msg_req_arp_add_default_handler,
+ [PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT] =
+ pipeline_routing_msg_req_arp_del_default_handler,
+ [PIPELINE_ROUTING_MSG_REQ_SET_MACADDR] =
+ pipeline_routing_msg_req_set_macaddr_handler,
+};
+
+/*
+ * Routing table
+ */
+struct routing_table_entry {
+ struct rte_pipeline_table_entry head;
+ uint32_t flags;
+ uint32_t port_id; /* Output port ID */
+ uint32_t ip; /* Next hop IP address (only valid for remote routes) */
+
+ /* ether_l2 */
+ uint16_t data_offset;
+ uint16_t ether_l2_length;
+ uint64_t slab[4];
+ uint16_t slab_offset[4];
+};
+
+struct layout {
+ uint16_t a;
+ uint32_t b;
+ uint16_t c;
+} __attribute__((__packed__));
+
+#define MACADDR_DST_WRITE(slab_ptr, slab) \
+{ \
+ struct layout *dst = (struct layout *) (slab_ptr); \
+ struct layout *src = (struct layout *) &(slab); \
+ \
+ dst->b = src->b; \
+ dst->c = src->c; \
+}
+
+static inline __attribute__((always_inline)) void
+pkt_work_routing(
+ struct rte_mbuf *pkt,
+ struct rte_pipeline_table_entry *table_entry,
+ void *arg,
+ int arp,
+ int qinq,
+ int qinq_sched,
+ int mpls,
+ int mpls_color_mark)
+{
+ struct pipeline_routing *p_rt = arg;
+
+ struct routing_table_entry *entry =
+ (struct routing_table_entry *) table_entry;
+
+ struct ipv4_hdr *ip = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, p_rt->params.ip_hdr_offset);
+
+ enum rte_meter_color pkt_color = (enum rte_meter_color)
+ RTE_MBUF_METADATA_UINT32(pkt, p_rt->params.color_offset);
+
+ struct pipeline_routing_arp_key_ipv4 *arp_key =
+ (struct pipeline_routing_arp_key_ipv4 *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkt, p_rt->params.arp_key_offset);
+
+ uint64_t *slab0_ptr, *slab1_ptr, *slab2_ptr, *slab3_ptr, sched;
+ uint32_t ip_da, nh_ip, port_id;
+ uint16_t total_length, data_offset, ether_l2_length;
+
+ /* Read */
+ total_length = rte_bswap16(ip->total_length);
+ ip_da = ip->dst_addr;
+ data_offset = entry->data_offset;
+ ether_l2_length = entry->ether_l2_length;
+ slab0_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[0]);
+ slab1_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[1]);
+ slab2_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[2]);
+ slab3_ptr = RTE_MBUF_METADATA_UINT64_PTR(pkt, entry->slab_offset[3]);
+
+ if (arp) {
+ port_id = entry->port_id;
+ nh_ip = entry->ip;
+ if (entry->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ nh_ip = ip_da;
+ }
+
+ /* Compute */
+ total_length += ether_l2_length;
+
+ if (qinq && qinq_sched) {
+ uint32_t dscp = ip->type_of_service >> 2;
+ uint32_t svlan, cvlan, tc, tc_q;
+
+ if (qinq_sched == 1) {
+ uint64_t slab_qinq = rte_bswap64(entry->slab[0]);
+
+ svlan = (slab_qinq >> 48) & 0xFFF;
+ cvlan = (slab_qinq >> 16) & 0xFFF;
+ tc = (dscp >> 2) & 0x3;
+ tc_q = dscp & 0x3;
+ } else {
+ uint32_t ip_src = rte_bswap32(ip->src_addr);
+
+ svlan = 0;
+ cvlan = (ip_src >> 16) & 0xFFF;
+ tc = (ip_src >> 2) & 0x3;
+ tc_q = ip_src & 0x3;
+ }
+ sched = RTE_SCHED_PORT_HIERARCHY(svlan,
+ cvlan,
+ tc,
+ tc_q,
+ e_RTE_METER_GREEN);
+ }
+
+ /* Write */
+ pkt->data_off = data_offset;
+ pkt->data_len = total_length;
+ pkt->pkt_len = total_length;
+
+ if ((qinq == 0) && (mpls == 0)) {
+ *slab0_ptr = entry->slab[0];
+
+ if (arp == 0)
+ MACADDR_DST_WRITE(slab1_ptr, entry->slab[1]);
+ }
+
+ if (qinq) {
+ *slab0_ptr = entry->slab[0];
+ *slab1_ptr = entry->slab[1];
+
+ if (arp == 0)
+ MACADDR_DST_WRITE(slab2_ptr, entry->slab[2]);
+
+ if (qinq_sched) {
+ pkt->hash.sched.lo = sched & 0xFFFFFFFF;
+ pkt->hash.sched.hi = sched >> 32;
+ }
+ }
+
+ if (mpls) {
+ if (mpls_color_mark) {
+ uint64_t mpls_exp = rte_bswap64(
+ (MPLS_LABEL(0, pkt_color, 0, 0) << 32) |
+ MPLS_LABEL(0, pkt_color, 0, 0));
+
+ *slab0_ptr = entry->slab[0] | mpls_exp;
+ *slab1_ptr = entry->slab[1] | mpls_exp;
+ *slab2_ptr = entry->slab[2];
+ } else {
+ *slab0_ptr = entry->slab[0];
+ *slab1_ptr = entry->slab[1];
+ *slab2_ptr = entry->slab[2];
+ }
+
+ if (arp == 0)
+ MACADDR_DST_WRITE(slab3_ptr, entry->slab[3]);
+ }
+
+ if (arp) {
+ arp_key->port_id = port_id;
+ arp_key->ip = nh_ip;
+ }
+}
+
+static inline __attribute__((always_inline)) void
+pkt4_work_routing(
+ struct rte_mbuf **pkts,
+ struct rte_pipeline_table_entry **table_entries,
+ void *arg,
+ int arp,
+ int qinq,
+ int qinq_sched,
+ int mpls,
+ int mpls_color_mark)
+{
+ struct pipeline_routing *p_rt = arg;
+
+ struct routing_table_entry *entry0 =
+ (struct routing_table_entry *) table_entries[0];
+ struct routing_table_entry *entry1 =
+ (struct routing_table_entry *) table_entries[1];
+ struct routing_table_entry *entry2 =
+ (struct routing_table_entry *) table_entries[2];
+ struct routing_table_entry *entry3 =
+ (struct routing_table_entry *) table_entries[3];
+
+ struct ipv4_hdr *ip0 = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[0],
+ p_rt->params.ip_hdr_offset);
+ struct ipv4_hdr *ip1 = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[1],
+ p_rt->params.ip_hdr_offset);
+ struct ipv4_hdr *ip2 = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[2],
+ p_rt->params.ip_hdr_offset);
+ struct ipv4_hdr *ip3 = (struct ipv4_hdr *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[3],
+ p_rt->params.ip_hdr_offset);
+
+ enum rte_meter_color pkt0_color = (enum rte_meter_color)
+ RTE_MBUF_METADATA_UINT32(pkts[0], p_rt->params.color_offset);
+ enum rte_meter_color pkt1_color = (enum rte_meter_color)
+ RTE_MBUF_METADATA_UINT32(pkts[1], p_rt->params.color_offset);
+ enum rte_meter_color pkt2_color = (enum rte_meter_color)
+ RTE_MBUF_METADATA_UINT32(pkts[2], p_rt->params.color_offset);
+ enum rte_meter_color pkt3_color = (enum rte_meter_color)
+ RTE_MBUF_METADATA_UINT32(pkts[3], p_rt->params.color_offset);
+
+ struct pipeline_routing_arp_key_ipv4 *arp_key0 =
+ (struct pipeline_routing_arp_key_ipv4 *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[0],
+ p_rt->params.arp_key_offset);
+ struct pipeline_routing_arp_key_ipv4 *arp_key1 =
+ (struct pipeline_routing_arp_key_ipv4 *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[1],
+ p_rt->params.arp_key_offset);
+ struct pipeline_routing_arp_key_ipv4 *arp_key2 =
+ (struct pipeline_routing_arp_key_ipv4 *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[2],
+ p_rt->params.arp_key_offset);
+ struct pipeline_routing_arp_key_ipv4 *arp_key3 =
+ (struct pipeline_routing_arp_key_ipv4 *)
+ RTE_MBUF_METADATA_UINT8_PTR(pkts[3],
+ p_rt->params.arp_key_offset);
+
+ uint64_t *slab0_ptr0, *slab1_ptr0, *slab2_ptr0, *slab3_ptr0;
+ uint64_t *slab0_ptr1, *slab1_ptr1, *slab2_ptr1, *slab3_ptr1;
+ uint64_t *slab0_ptr2, *slab1_ptr2, *slab2_ptr2, *slab3_ptr2;
+ uint64_t *slab0_ptr3, *slab1_ptr3, *slab2_ptr3, *slab3_ptr3;
+ uint64_t sched0, sched1, sched2, sched3;
+
+ uint32_t ip_da0, nh_ip0, port_id0;
+ uint32_t ip_da1, nh_ip1, port_id1;
+ uint32_t ip_da2, nh_ip2, port_id2;
+ uint32_t ip_da3, nh_ip3, port_id3;
+
+ uint16_t total_length0, data_offset0, ether_l2_length0;
+ uint16_t total_length1, data_offset1, ether_l2_length1;
+ uint16_t total_length2, data_offset2, ether_l2_length2;
+ uint16_t total_length3, data_offset3, ether_l2_length3;
+
+ /* Read */
+ total_length0 = rte_bswap16(ip0->total_length);
+ total_length1 = rte_bswap16(ip1->total_length);
+ total_length2 = rte_bswap16(ip2->total_length);
+ total_length3 = rte_bswap16(ip3->total_length);
+
+ ip_da0 = ip0->dst_addr;
+ ip_da1 = ip1->dst_addr;
+ ip_da2 = ip2->dst_addr;
+ ip_da3 = ip3->dst_addr;
+
+ data_offset0 = entry0->data_offset;
+ data_offset1 = entry1->data_offset;
+ data_offset2 = entry2->data_offset;
+ data_offset3 = entry3->data_offset;
+
+ ether_l2_length0 = entry0->ether_l2_length;
+ ether_l2_length1 = entry1->ether_l2_length;
+ ether_l2_length2 = entry2->ether_l2_length;
+ ether_l2_length3 = entry3->ether_l2_length;
+
+ slab0_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ entry0->slab_offset[0]);
+ slab1_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ entry0->slab_offset[1]);
+ slab2_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ entry0->slab_offset[2]);
+ slab3_ptr0 = RTE_MBUF_METADATA_UINT64_PTR(pkts[0],
+ entry0->slab_offset[3]);
+
+ slab0_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ entry1->slab_offset[0]);
+ slab1_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ entry1->slab_offset[1]);
+ slab2_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ entry1->slab_offset[2]);
+ slab3_ptr1 = RTE_MBUF_METADATA_UINT64_PTR(pkts[1],
+ entry1->slab_offset[3]);
+
+ slab0_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ entry2->slab_offset[0]);
+ slab1_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ entry2->slab_offset[1]);
+ slab2_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ entry2->slab_offset[2]);
+ slab3_ptr2 = RTE_MBUF_METADATA_UINT64_PTR(pkts[2],
+ entry2->slab_offset[3]);
+
+ slab0_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ entry3->slab_offset[0]);
+ slab1_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ entry3->slab_offset[1]);
+ slab2_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ entry3->slab_offset[2]);
+ slab3_ptr3 = RTE_MBUF_METADATA_UINT64_PTR(pkts[3],
+ entry3->slab_offset[3]);
+
+ if (arp) {
+ port_id0 = entry0->port_id;
+ nh_ip0 = entry0->ip;
+ if (entry0->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ nh_ip0 = ip_da0;
+
+ port_id1 = entry1->port_id;
+ nh_ip1 = entry1->ip;
+ if (entry1->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ nh_ip1 = ip_da1;
+
+ port_id2 = entry2->port_id;
+ nh_ip2 = entry2->ip;
+ if (entry2->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ nh_ip2 = ip_da2;
+
+ port_id3 = entry3->port_id;
+ nh_ip3 = entry3->ip;
+ if (entry3->flags & PIPELINE_ROUTING_ROUTE_LOCAL)
+ nh_ip3 = ip_da3;
+ }
+
+ /* Compute */
+ total_length0 += ether_l2_length0;
+ total_length1 += ether_l2_length1;
+ total_length2 += ether_l2_length2;
+ total_length3 += ether_l2_length3;
+
+ if (qinq && qinq_sched) {
+ uint32_t dscp0 = ip0->type_of_service >> 2;
+ uint32_t dscp1 = ip1->type_of_service >> 2;
+ uint32_t dscp2 = ip2->type_of_service >> 2;
+ uint32_t dscp3 = ip3->type_of_service >> 2;
+ uint32_t svlan0, cvlan0, tc0, tc_q0;
+ uint32_t svlan1, cvlan1, tc1, tc_q1;
+ uint32_t svlan2, cvlan2, tc2, tc_q2;
+ uint32_t svlan3, cvlan3, tc3, tc_q3;
+
+ if (qinq_sched == 1) {
+ uint64_t slab_qinq0 = rte_bswap64(entry0->slab[0]);
+ uint64_t slab_qinq1 = rte_bswap64(entry1->slab[0]);
+ uint64_t slab_qinq2 = rte_bswap64(entry2->slab[0]);
+ uint64_t slab_qinq3 = rte_bswap64(entry3->slab[0]);
+
+ svlan0 = (slab_qinq0 >> 48) & 0xFFF;
+ svlan1 = (slab_qinq1 >> 48) & 0xFFF;
+ svlan2 = (slab_qinq2 >> 48) & 0xFFF;
+ svlan3 = (slab_qinq3 >> 48) & 0xFFF;
+
+ cvlan0 = (slab_qinq0 >> 16) & 0xFFF;
+ cvlan1 = (slab_qinq1 >> 16) & 0xFFF;
+ cvlan2 = (slab_qinq2 >> 16) & 0xFFF;
+ cvlan3 = (slab_qinq3 >> 16) & 0xFFF;
+
+ tc0 = (dscp0 >> 2) & 0x3;
+ tc1 = (dscp1 >> 2) & 0x3;
+ tc2 = (dscp2 >> 2) & 0x3;
+ tc3 = (dscp3 >> 2) & 0x3;
+
+ tc_q0 = dscp0 & 0x3;
+ tc_q1 = dscp1 & 0x3;
+ tc_q2 = dscp2 & 0x3;
+ tc_q3 = dscp3 & 0x3;
+ } else {
+ uint32_t ip_src0 = rte_bswap32(ip0->src_addr);
+ uint32_t ip_src1 = rte_bswap32(ip1->src_addr);
+ uint32_t ip_src2 = rte_bswap32(ip2->src_addr);
+ uint32_t ip_src3 = rte_bswap32(ip3->src_addr);
+
+ svlan0 = 0;
+ svlan1 = 0;
+ svlan2 = 0;
+ svlan3 = 0;
+
+ cvlan0 = (ip_src0 >> 16) & 0xFFF;
+ cvlan1 = (ip_src1 >> 16) & 0xFFF;
+ cvlan2 = (ip_src2 >> 16) & 0xFFF;
+ cvlan3 = (ip_src3 >> 16) & 0xFFF;
+
+ tc0 = (ip_src0 >> 2) & 0x3;
+ tc1 = (ip_src1 >> 2) & 0x3;
+ tc2 = (ip_src2 >> 2) & 0x3;
+ tc3 = (ip_src3 >> 2) & 0x3;
+
+ tc_q0 = ip_src0 & 0x3;
+ tc_q1 = ip_src1 & 0x3;
+ tc_q2 = ip_src2 & 0x3;
+ tc_q3 = ip_src3 & 0x3;
+ }
+
+ sched0 = RTE_SCHED_PORT_HIERARCHY(svlan0,
+ cvlan0,
+ tc0,
+ tc_q0,
+ e_RTE_METER_GREEN);
+ sched1 = RTE_SCHED_PORT_HIERARCHY(svlan1,
+ cvlan1,
+ tc1,
+ tc_q1,
+ e_RTE_METER_GREEN);
+ sched2 = RTE_SCHED_PORT_HIERARCHY(svlan2,
+ cvlan2,
+ tc2,
+ tc_q2,
+ e_RTE_METER_GREEN);
+ sched3 = RTE_SCHED_PORT_HIERARCHY(svlan3,
+ cvlan3,
+ tc3,
+ tc_q3,
+ e_RTE_METER_GREEN);
+
+ }
+
+ /* Write */
+ pkts[0]->data_off = data_offset0;
+ pkts[1]->data_off = data_offset1;
+ pkts[2]->data_off = data_offset2;
+ pkts[3]->data_off = data_offset3;
+
+ pkts[0]->data_len = total_length0;
+ pkts[1]->data_len = total_length1;
+ pkts[2]->data_len = total_length2;
+ pkts[3]->data_len = total_length3;
+
+ pkts[0]->pkt_len = total_length0;
+ pkts[1]->pkt_len = total_length1;
+ pkts[2]->pkt_len = total_length2;
+ pkts[3]->pkt_len = total_length3;
+
+ if ((qinq == 0) && (mpls == 0)) {
+ *slab0_ptr0 = entry0->slab[0];
+ *slab0_ptr1 = entry1->slab[0];
+ *slab0_ptr2 = entry2->slab[0];
+ *slab0_ptr3 = entry3->slab[0];
+
+ if (arp == 0) {
+ MACADDR_DST_WRITE(slab1_ptr0, entry0->slab[1]);
+ MACADDR_DST_WRITE(slab1_ptr1, entry1->slab[1]);
+ MACADDR_DST_WRITE(slab1_ptr2, entry2->slab[1]);
+ MACADDR_DST_WRITE(slab1_ptr3, entry3->slab[1]);
+ }
+ }
+
+ if (qinq) {
+ *slab0_ptr0 = entry0->slab[0];
+ *slab0_ptr1 = entry1->slab[0];
+ *slab0_ptr2 = entry2->slab[0];
+ *slab0_ptr3 = entry3->slab[0];
+
+ *slab1_ptr0 = entry0->slab[1];
+ *slab1_ptr1 = entry1->slab[1];
+ *slab1_ptr2 = entry2->slab[1];
+ *slab1_ptr3 = entry3->slab[1];
+
+ if (arp == 0) {
+ MACADDR_DST_WRITE(slab2_ptr0, entry0->slab[2]);
+ MACADDR_DST_WRITE(slab2_ptr1, entry1->slab[2]);
+ MACADDR_DST_WRITE(slab2_ptr2, entry2->slab[2]);
+ MACADDR_DST_WRITE(slab2_ptr3, entry3->slab[2]);
+ }
+
+ if (qinq_sched) {
+ pkts[0]->hash.sched.lo = sched0 & 0xFFFFFFFF;
+ pkts[0]->hash.sched.hi = sched0 >> 32;
+ pkts[1]->hash.sched.lo = sched1 & 0xFFFFFFFF;
+ pkts[1]->hash.sched.hi = sched1 >> 32;
+ pkts[2]->hash.sched.lo = sched2 & 0xFFFFFFFF;
+ pkts[2]->hash.sched.hi = sched2 >> 32;
+ pkts[3]->hash.sched.lo = sched3 & 0xFFFFFFFF;
+ pkts[3]->hash.sched.hi = sched3 >> 32;
+ }
+ }
+
+ if (mpls) {
+ if (mpls_color_mark) {
+ uint64_t mpls_exp0 = rte_bswap64(
+ (MPLS_LABEL(0, pkt0_color, 0, 0) << 32) |
+ MPLS_LABEL(0, pkt0_color, 0, 0));
+ uint64_t mpls_exp1 = rte_bswap64(
+ (MPLS_LABEL(0, pkt1_color, 0, 0) << 32) |
+ MPLS_LABEL(0, pkt1_color, 0, 0));
+ uint64_t mpls_exp2 = rte_bswap64(
+ (MPLS_LABEL(0, pkt2_color, 0, 0) << 32) |
+ MPLS_LABEL(0, pkt2_color, 0, 0));
+ uint64_t mpls_exp3 = rte_bswap64(
+ (MPLS_LABEL(0, pkt3_color, 0, 0) << 32) |
+ MPLS_LABEL(0, pkt3_color, 0, 0));
+
+ *slab0_ptr0 = entry0->slab[0] | mpls_exp0;
+ *slab0_ptr1 = entry1->slab[0] | mpls_exp1;
+ *slab0_ptr2 = entry2->slab[0] | mpls_exp2;
+ *slab0_ptr3 = entry3->slab[0] | mpls_exp3;
+
+ *slab1_ptr0 = entry0->slab[1] | mpls_exp0;
+ *slab1_ptr1 = entry1->slab[1] | mpls_exp1;
+ *slab1_ptr2 = entry2->slab[1] | mpls_exp2;
+ *slab1_ptr3 = entry3->slab[1] | mpls_exp3;
+
+ *slab2_ptr0 = entry0->slab[2];
+ *slab2_ptr1 = entry1->slab[2];
+ *slab2_ptr2 = entry2->slab[2];
+ *slab2_ptr3 = entry3->slab[2];
+ } else {
+ *slab0_ptr0 = entry0->slab[0];
+ *slab0_ptr1 = entry1->slab[0];
+ *slab0_ptr2 = entry2->slab[0];
+ *slab0_ptr3 = entry3->slab[0];
+
+ *slab1_ptr0 = entry0->slab[1];
+ *slab1_ptr1 = entry1->slab[1];
+ *slab1_ptr2 = entry2->slab[1];
+ *slab1_ptr3 = entry3->slab[1];
+
+ *slab2_ptr0 = entry0->slab[2];
+ *slab2_ptr1 = entry1->slab[2];
+ *slab2_ptr2 = entry2->slab[2];
+ *slab2_ptr3 = entry3->slab[2];
+ }
+
+ if (arp == 0) {
+ MACADDR_DST_WRITE(slab3_ptr0, entry0->slab[3]);
+ MACADDR_DST_WRITE(slab3_ptr1, entry1->slab[3]);
+ MACADDR_DST_WRITE(slab3_ptr2, entry2->slab[3]);
+ MACADDR_DST_WRITE(slab3_ptr3, entry3->slab[3]);
+ }
+ }
+
+ if (arp) {
+ arp_key0->port_id = port_id0;
+ arp_key1->port_id = port_id1;
+ arp_key2->port_id = port_id2;
+ arp_key3->port_id = port_id3;
+
+ arp_key0->ip = nh_ip0;
+ arp_key1->ip = nh_ip1;
+ arp_key2->ip = nh_ip2;
+ arp_key3->ip = nh_ip3;
+ }
+}
+
+#define PKT_WORK_ROUTING_ETHERNET(arp) \
+static inline void \
+pkt_work_routing_ether_arp##arp( \
+ struct rte_mbuf *pkt, \
+ struct rte_pipeline_table_entry *table_entry, \
+ void *arg) \
+{ \
+ pkt_work_routing(pkt, table_entry, arg, arp, 0, 0, 0, 0);\
+}
+
+#define PKT4_WORK_ROUTING_ETHERNET(arp) \
+static inline void \
+pkt4_work_routing_ether_arp##arp( \
+ struct rte_mbuf **pkts, \
+ struct rte_pipeline_table_entry **table_entries, \
+ void *arg) \
+{ \
+ pkt4_work_routing(pkts, table_entries, arg, arp, 0, 0, 0, 0);\
+}
+
+#define routing_table_ah_hit_ether(arp) \
+PKT_WORK_ROUTING_ETHERNET(arp) \
+PKT4_WORK_ROUTING_ETHERNET(arp) \
+PIPELINE_TABLE_AH_HIT(routing_table_ah_hit_ether_arp##arp, \
+ pkt_work_routing_ether_arp##arp, \
+ pkt4_work_routing_ether_arp##arp)
+
+routing_table_ah_hit_ether(0)
+routing_table_ah_hit_ether(1)
+
+#define PKT_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
+static inline void \
+pkt_work_routing_ether_qinq_sched##sched##_arp##arp( \
+ struct rte_mbuf *pkt, \
+ struct rte_pipeline_table_entry *table_entry, \
+ void *arg) \
+{ \
+ pkt_work_routing(pkt, table_entry, arg, arp, 1, sched, 0, 0);\
+}
+
+#define PKT4_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
+static inline void \
+pkt4_work_routing_ether_qinq_sched##sched##_arp##arp( \
+ struct rte_mbuf **pkts, \
+ struct rte_pipeline_table_entry **table_entries, \
+ void *arg) \
+{ \
+ pkt4_work_routing(pkts, table_entries, arg, arp, 1, sched, 0, 0);\
+}
+
+#define routing_table_ah_hit_ether_qinq(sched, arp) \
+PKT_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
+PKT4_WORK_ROUTING_ETHERNET_QINQ(sched, arp) \
+PIPELINE_TABLE_AH_HIT(routing_table_ah_hit_ether_qinq_sched##sched##_arp##arp,\
+ pkt_work_routing_ether_qinq_sched##sched##_arp##arp, \
+ pkt4_work_routing_ether_qinq_sched##sched##_arp##arp)
+
+routing_table_ah_hit_ether_qinq(0, 0)
+routing_table_ah_hit_ether_qinq(1, 0)
+routing_table_ah_hit_ether_qinq(2, 0)
+routing_table_ah_hit_ether_qinq(0, 1)
+routing_table_ah_hit_ether_qinq(1, 1)
+routing_table_ah_hit_ether_qinq(2, 1)
+
+#define PKT_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
+static inline void \
+pkt_work_routing_ether_mpls_color##color##_arp##arp( \
+ struct rte_mbuf *pkt, \
+ struct rte_pipeline_table_entry *table_entry, \
+ void *arg) \
+{ \
+ pkt_work_routing(pkt, table_entry, arg, arp, 0, 0, 1, color);\
+}
+
+#define PKT4_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
+static inline void \
+pkt4_work_routing_ether_mpls_color##color##_arp##arp( \
+ struct rte_mbuf **pkts, \
+ struct rte_pipeline_table_entry **table_entries, \
+ void *arg) \
+{ \
+ pkt4_work_routing(pkts, table_entries, arg, arp, 0, 0, 1, color);\
+}
+
+#define routing_table_ah_hit_ether_mpls(color, arp) \
+PKT_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
+PKT4_WORK_ROUTING_ETHERNET_MPLS(color, arp) \
+PIPELINE_TABLE_AH_HIT(routing_table_ah_hit_ether_mpls_color##color##_arp##arp,\
+ pkt_work_routing_ether_mpls_color##color##_arp##arp, \
+ pkt4_work_routing_ether_mpls_color##color##_arp##arp)
+
+routing_table_ah_hit_ether_mpls(0, 0)
+routing_table_ah_hit_ether_mpls(1, 0)
+routing_table_ah_hit_ether_mpls(0, 1)
+routing_table_ah_hit_ether_mpls(1, 1)
+
+static rte_pipeline_table_action_handler_hit
+get_routing_table_ah_hit(struct pipeline_routing *p)
+{
+ if (p->params.dbg_ah_disable)
+ return NULL;
+
+ switch (p->params.encap) {
+ case PIPELINE_ROUTING_ENCAP_ETHERNET:
+ return (p->params.n_arp_entries) ?
+ routing_table_ah_hit_ether_arp1 :
+ routing_table_ah_hit_ether_arp0;
+
+ case PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ:
+ if (p->params.n_arp_entries)
+ switch (p->params.qinq_sched) {
+ case 0:
+ return routing_table_ah_hit_ether_qinq_sched0_arp1;
+ case 1:
+ return routing_table_ah_hit_ether_qinq_sched1_arp1;
+ case 2:
+ return routing_table_ah_hit_ether_qinq_sched2_arp1;
+ default:
+ return NULL;
+ }
+ else
+ switch (p->params.qinq_sched) {
+ case 0:
+ return routing_table_ah_hit_ether_qinq_sched0_arp0;
+ case 1:
+ return routing_table_ah_hit_ether_qinq_sched1_arp0;
+ case 2:
+ return routing_table_ah_hit_ether_qinq_sched2_arp0;
+ default:
+ return NULL;
+ }
+
+ case PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS:
+ if (p->params.n_arp_entries)
+ if (p->params.mpls_color_mark)
+ return routing_table_ah_hit_ether_mpls_color1_arp1;
+ else
+ return routing_table_ah_hit_ether_mpls_color0_arp1;
+ else
+ if (p->params.mpls_color_mark)
+ return routing_table_ah_hit_ether_mpls_color1_arp0;
+ else
+ return routing_table_ah_hit_ether_mpls_color0_arp0;
+
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * ARP table
+ */
+struct arp_table_entry {
+ struct rte_pipeline_table_entry head;
+ uint64_t macaddr;
+};
+
+/**
+ * ARP table AH
+ */
+static inline void
+pkt_work_arp(
+ struct rte_mbuf *pkt,
+ struct rte_pipeline_table_entry *table_entry,
+ __rte_unused void *arg)
+{
+ struct arp_table_entry *entry = (struct arp_table_entry *) table_entry;
+
+ /* Read */
+ uint64_t macaddr_dst = entry->macaddr;
+ uint64_t *slab_ptr = (uint64_t *) ((char *) pkt->buf_addr +
+ (pkt->data_off - 2));
+
+ /* Compute */
+
+ /* Write */
+ MACADDR_DST_WRITE(slab_ptr, macaddr_dst);
+}
+
+static inline void
+pkt4_work_arp(
+ struct rte_mbuf **pkts,
+ struct rte_pipeline_table_entry **table_entries,
+ __rte_unused void *arg)
+{
+ struct arp_table_entry *entry0 =
+ (struct arp_table_entry *) table_entries[0];
+ struct arp_table_entry *entry1 =
+ (struct arp_table_entry *) table_entries[1];
+ struct arp_table_entry *entry2 =
+ (struct arp_table_entry *) table_entries[2];
+ struct arp_table_entry *entry3 =
+ (struct arp_table_entry *) table_entries[3];
+
+ /* Read */
+ uint64_t macaddr_dst0 = entry0->macaddr;
+ uint64_t macaddr_dst1 = entry1->macaddr;
+ uint64_t macaddr_dst2 = entry2->macaddr;
+ uint64_t macaddr_dst3 = entry3->macaddr;
+
+ uint64_t *slab_ptr0 = (uint64_t *) ((char *) pkts[0]->buf_addr +
+ (pkts[0]->data_off - 2));
+ uint64_t *slab_ptr1 = (uint64_t *) ((char *) pkts[1]->buf_addr +
+ (pkts[1]->data_off - 2));
+ uint64_t *slab_ptr2 = (uint64_t *) ((char *) pkts[2]->buf_addr +
+ (pkts[2]->data_off - 2));
+ uint64_t *slab_ptr3 = (uint64_t *) ((char *) pkts[3]->buf_addr +
+ (pkts[3]->data_off - 2));
+
+ /* Compute */
+
+ /* Write */
+ MACADDR_DST_WRITE(slab_ptr0, macaddr_dst0);
+ MACADDR_DST_WRITE(slab_ptr1, macaddr_dst1);
+ MACADDR_DST_WRITE(slab_ptr2, macaddr_dst2);
+ MACADDR_DST_WRITE(slab_ptr3, macaddr_dst3);
+}
+
+PIPELINE_TABLE_AH_HIT(arp_table_ah_hit,
+ pkt_work_arp,
+ pkt4_work_arp);
+
+static rte_pipeline_table_action_handler_hit
+get_arp_table_ah_hit(struct pipeline_routing *p)
+{
+ if (p->params.dbg_ah_disable)
+ return NULL;
+
+ return arp_table_ah_hit;
+}
+
+/*
+ * Argument parsing
+ */
+int
+pipeline_routing_parse_args(struct pipeline_routing_params *p,
+ struct pipeline_params *params)
+{
+ uint32_t n_routes_present = 0;
+ uint32_t port_local_dest_present = 0;
+ uint32_t encap_present = 0;
+ uint32_t qinq_sched_present = 0;
+ uint32_t mpls_color_mark_present = 0;
+ uint32_t n_arp_entries_present = 0;
+ uint32_t ip_hdr_offset_present = 0;
+ uint32_t arp_key_offset_present = 0;
+ uint32_t color_offset_present = 0;
+ uint32_t dbg_ah_disable_present = 0;
+ uint32_t i;
+
+ /* default values */
+ p->n_routes = PIPELINE_ROUTING_N_ROUTES_DEFAULT;
+ p->port_local_dest = params->n_ports_out - 1;
+ p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET;
+ p->qinq_sched = 0;
+ p->mpls_color_mark = 0;
+ p->n_arp_entries = 0;
+ p->dbg_ah_disable = 0;
+
+ for (i = 0; i < params->n_args; i++) {
+ char *arg_name = params->args_name[i];
+ char *arg_value = params->args_value[i];
+
+ /* n_routes */
+ if (strcmp(arg_name, "n_routes") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_routes_present == 0, params->name,
+ arg_name);
+ n_routes_present = 1;
+
+ status = parser_read_uint32(&p->n_routes,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status != -EINVAL) &&
+ (p->n_routes != 0)), params->name,
+ arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+ /* port_local_dest */
+ if (strcmp(arg_name, "port_local_dest") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ port_local_dest_present == 0, params->name,
+ arg_name);
+ port_local_dest_present = 1;
+
+ status = parser_read_uint32(&p->port_local_dest,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL(((status == 0) &&
+ (p->port_local_dest < params->n_ports_out)),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* encap */
+ if (strcmp(arg_name, "encap") == 0) {
+ PIPELINE_PARSE_ERR_DUPLICATE(encap_present == 0,
+ params->name, arg_name);
+ encap_present = 1;
+
+ /* ethernet */
+ if (strcmp(arg_value, "ethernet") == 0) {
+ p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET;
+ continue;
+ }
+
+ /* ethernet_qinq */
+ if (strcmp(arg_value, "ethernet_qinq") == 0) {
+ p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ;
+ continue;
+ }
+
+ /* ethernet_mpls */
+ if (strcmp(arg_value, "ethernet_mpls") == 0) {
+ p->encap = PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS;
+ continue;
+ }
+
+ /* any other */
+ PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
+ arg_name, arg_value);
+ }
+
+ /* qinq_sched */
+ if (strcmp(arg_name, "qinq_sched") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ qinq_sched_present == 0, params->name,
+ arg_name);
+ qinq_sched_present = 1;
+
+ status = parser_read_arg_bool(arg_value);
+ if (status == -EINVAL) {
+ if (strcmp(arg_value, "test") == 0) {
+ p->qinq_sched = 2;
+ continue;
+ }
+ } else {
+ p->qinq_sched = status;
+ continue;
+ }
+
+ PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
+ arg_name, arg_value);
+ }
+
+ /* mpls_color_mark */
+ if (strcmp(arg_name, "mpls_color_mark") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ mpls_color_mark_present == 0,
+ params->name, arg_name);
+ mpls_color_mark_present = 1;
+
+
+ status = parser_read_arg_bool(arg_value);
+ if (status >= 0) {
+ p->mpls_color_mark = status;
+ continue;
+ }
+
+ PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
+ arg_name, arg_value);
+ }
+
+ /* n_arp_entries */
+ if (strcmp(arg_name, "n_arp_entries") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ n_arp_entries_present == 0, params->name,
+ arg_name);
+ n_arp_entries_present = 1;
+
+ status = parser_read_uint32(&p->n_arp_entries,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* ip_hdr_offset */
+ if (strcmp(arg_name, "ip_hdr_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ ip_hdr_offset_present == 0, params->name,
+ arg_name);
+ ip_hdr_offset_present = 1;
+
+ status = parser_read_uint32(&p->ip_hdr_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* arp_key_offset */
+ if (strcmp(arg_name, "arp_key_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ arp_key_offset_present == 0, params->name,
+ arg_name);
+ arp_key_offset_present = 1;
+
+ status = parser_read_uint32(&p->arp_key_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* color_offset */
+ if (strcmp(arg_name, "color_offset") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ color_offset_present == 0, params->name,
+ arg_name);
+ color_offset_present = 1;
+
+ status = parser_read_uint32(&p->color_offset,
+ arg_value);
+ PIPELINE_PARSE_ERR_INV_VAL((status != -EINVAL),
+ params->name, arg_name, arg_value);
+ PIPELINE_PARSE_ERR_OUT_RNG((status != -ERANGE),
+ params->name, arg_name, arg_value);
+
+ continue;
+ }
+
+ /* debug */
+ if (strcmp(arg_name, "dbg_ah_disable") == 0) {
+ int status;
+
+ PIPELINE_PARSE_ERR_DUPLICATE(
+ dbg_ah_disable_present == 0, params->name,
+ arg_name);
+ dbg_ah_disable_present = 1;
+
+ status = parser_read_arg_bool(arg_value);
+ if (status >= 0) {
+ p->dbg_ah_disable = status;
+ continue;
+ }
+
+ PIPELINE_PARSE_ERR_INV_VAL(0, params->name,
+ arg_name, arg_value);
+
+ continue;
+ }
+
+ /* any other */
+ PIPELINE_PARSE_ERR_INV_ENT(0, params->name, arg_name);
+ }
+
+ /* Check that mandatory arguments are present */
+ PIPELINE_PARSE_ERR_MANDATORY(ip_hdr_offset_present, params->name,
+ "ip_hdr_offset");
+
+ /* Check relations between arguments */
+ switch (p->encap) {
+ case PIPELINE_ROUTING_ENCAP_ETHERNET:
+ PIPELINE_ARG_CHECK((!p->qinq_sched), "Parse error in "
+ "section \"%s\": encap = ethernet, therefore "
+ "qinq_sched = yes/test is not allowed",
+ params->name);
+ PIPELINE_ARG_CHECK((!p->mpls_color_mark), "Parse error "
+ "in section \"%s\": encap = ethernet, therefore "
+ "mpls_color_mark = yes is not allowed",
+ params->name);
+ PIPELINE_ARG_CHECK((!color_offset_present), "Parse error "
+ "in section \"%s\": encap = ethernet, therefore "
+ "color_offset is not allowed",
+ params->name);
+ break;
+
+ case PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ:
+ PIPELINE_ARG_CHECK((!p->mpls_color_mark), "Parse error "
+ "in section \"%s\": encap = ethernet_qinq, "
+ "therefore mpls_color_mark = yes is not allowed",
+ params->name);
+ PIPELINE_ARG_CHECK((!color_offset_present), "Parse error "
+ "in section \"%s\": encap = ethernet_qinq, "
+ "therefore color_offset is not allowed",
+ params->name);
+ break;
+
+ case PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS:
+ PIPELINE_ARG_CHECK((!p->qinq_sched), "Parse error in "
+ "section \"%s\": encap = ethernet_mpls, therefore "
+ "qinq_sched = yes/test is not allowed",
+ params->name);
+ break;
+ }
+
+ PIPELINE_ARG_CHECK((!(p->n_arp_entries &&
+ (!arp_key_offset_present))), "Parse error in section "
+ "\"%s\": n_arp_entries is set while "
+ "arp_key_offset is not set", params->name);
+
+ PIPELINE_ARG_CHECK((!((p->n_arp_entries == 0) &&
+ arp_key_offset_present)), "Parse error in section "
+ "\"%s\": arp_key_offset present while "
+ "n_arp_entries is not set", params->name);
+
+ return 0;
+}
+
+static void *
+pipeline_routing_init(struct pipeline_params *params,
+ __rte_unused void *arg)
+{
+ struct pipeline *p;
+ struct pipeline_routing *p_rt;
+ uint32_t size, i;
+
+ /* Check input arguments */
+ if ((params == NULL) ||
+ (params->n_ports_in == 0) ||
+ (params->n_ports_out == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct pipeline_routing));
+ p = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ p_rt = (struct pipeline_routing *) p;
+ if (p == NULL)
+ return NULL;
+
+ strcpy(p->name, params->name);
+ p->log_level = params->log_level;
+
+ PLOG(p, HIGH, "Routing");
+
+ /* Parse arguments */
+ if (pipeline_routing_parse_args(&p_rt->params, params))
+ return NULL;
+
+ /* Pipeline */
+ {
+ struct rte_pipeline_params pipeline_params = {
+ .name = params->name,
+ .socket_id = params->socket_id,
+ .offset_port_id = 0,
+ };
+
+ p->p = rte_pipeline_create(&pipeline_params);
+ if (p->p == NULL) {
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Input ports */
+ p->n_ports_in = params->n_ports_in;
+ for (i = 0; i < p->n_ports_in; i++) {
+ struct rte_pipeline_port_in_params port_params = {
+ .ops = pipeline_port_in_params_get_ops(
+ &params->port_in[i]),
+ .arg_create = pipeline_port_in_params_convert(
+ &params->port_in[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ .burst_size = params->port_in[i].burst_size,
+ };
+
+ int status = rte_pipeline_port_in_create(p->p,
+ &port_params,
+ &p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Output ports */
+ p->n_ports_out = params->n_ports_out;
+ for (i = 0; i < p->n_ports_out; i++) {
+ struct rte_pipeline_port_out_params port_params = {
+ .ops = pipeline_port_out_params_get_ops(
+ &params->port_out[i]),
+ .arg_create = pipeline_port_out_params_convert(
+ &params->port_out[i]),
+ .f_action = NULL,
+ .arg_ah = NULL,
+ };
+
+ int status = rte_pipeline_port_out_create(p->p,
+ &port_params,
+ &p->port_out_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Routing table */
+ p->n_tables = 1;
+ {
+ struct rte_table_lpm_params table_lpm_params = {
+ .name = p->name,
+ .n_rules = p_rt->params.n_routes,
+ .number_tbl8s = PIPELINE_ROUTING_LPM_TABLE_NUMBER_TABLE8s,
+ .flags = 0,
+ .entry_unique_size = sizeof(struct routing_table_entry),
+ .offset = p_rt->params.ip_hdr_offset +
+ __builtin_offsetof(struct ipv4_hdr, dst_addr),
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_lpm_ops,
+ .arg_create = &table_lpm_params,
+ .f_action_hit = get_routing_table_ah_hit(p_rt),
+ .f_action_miss = NULL,
+ .arg_ah = p_rt,
+ .action_data_size =
+ sizeof(struct routing_table_entry) -
+ sizeof(struct rte_pipeline_table_entry),
+ };
+
+ int status;
+
+ status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* ARP table configuration */
+ if (p_rt->params.n_arp_entries) {
+ struct rte_table_hash_key8_ext_params table_arp_params = {
+ .n_entries = p_rt->params.n_arp_entries,
+ .n_entries_ext = p_rt->params.n_arp_entries,
+ .f_hash = hash_default_key8,
+ .seed = 0,
+ .signature_offset = 0, /* Unused */
+ .key_offset = p_rt->params.arp_key_offset,
+ };
+
+ struct rte_pipeline_table_params table_params = {
+ .ops = &rte_table_hash_key8_ext_dosig_ops,
+ .arg_create = &table_arp_params,
+ .f_action_hit = get_arp_table_ah_hit(p_rt),
+ .f_action_miss = NULL,
+ .arg_ah = p_rt,
+ .action_data_size = sizeof(struct arp_table_entry) -
+ sizeof(struct rte_pipeline_table_entry),
+ };
+
+ int status;
+
+ status = rte_pipeline_table_create(p->p,
+ &table_params,
+ &p->table_id[1]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ p->n_tables++;
+ }
+
+ /* Connecting input ports to tables */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_connect_to_table(p->p,
+ p->port_in_id[i],
+ p->table_id[0]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Enable input ports */
+ for (i = 0; i < p->n_ports_in; i++) {
+ int status = rte_pipeline_port_in_enable(p->p,
+ p->port_in_id[i]);
+
+ if (status) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+ }
+
+ /* Check pipeline consistency */
+ if (rte_pipeline_check(p->p) < 0) {
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return NULL;
+ }
+
+ /* Message queues */
+ p->n_msgq = params->n_msgq;
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_in[i] = params->msgq_in[i];
+ for (i = 0; i < p->n_msgq; i++)
+ p->msgq_out[i] = params->msgq_out[i];
+
+ /* Message handlers */
+ memcpy(p->handlers, handlers, sizeof(p->handlers));
+ memcpy(p_rt->custom_handlers,
+ custom_handlers,
+ sizeof(p_rt->custom_handlers));
+
+ return p;
+}
+
+static int
+pipeline_routing_free(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ /* Free resources */
+ rte_pipeline_free(p->p);
+ rte_free(p);
+ return 0;
+}
+
+static int
+pipeline_routing_timer(void *pipeline)
+{
+ struct pipeline *p = (struct pipeline *) pipeline;
+
+ pipeline_msg_req_handle(p);
+ rte_pipeline_flush(p->p);
+
+ return 0;
+}
+
+void *
+pipeline_routing_msg_req_custom_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_routing *p_rt = (struct pipeline_routing *) p;
+ struct pipeline_custom_msg_req *req = msg;
+ pipeline_msg_req_handler f_handle;
+
+ f_handle = (req->subtype < PIPELINE_ROUTING_MSG_REQS) ?
+ p_rt->custom_handlers[req->subtype] :
+ pipeline_msg_req_invalid_handler;
+
+ if (f_handle == NULL)
+ f_handle = pipeline_msg_req_invalid_handler;
+
+ return f_handle(p, req);
+}
+
+void *
+pipeline_routing_msg_req_route_add_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing *p_rt = (struct pipeline_routing *) p;
+ struct pipeline_routing_route_add_msg_req *req = msg;
+ struct pipeline_routing_route_add_msg_rsp *rsp = msg;
+
+ struct rte_table_lpm_key key = {
+ .ip = req->key.key.ipv4.ip,
+ .depth = req->key.key.ipv4.depth,
+ };
+
+ struct routing_table_entry entry_arp0 = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->data.port_id]},
+ },
+
+ .flags = req->data.flags,
+ .port_id = req->data.port_id,
+ .ip = 0,
+ .data_offset = 0,
+ .ether_l2_length = 0,
+ .slab = {0},
+ .slab_offset = {0},
+ };
+
+ struct routing_table_entry entry_arp1 = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_TABLE,
+ {.table_id = p->table_id[1]},
+ },
+
+ .flags = req->data.flags,
+ .port_id = req->data.port_id,
+ .ip = rte_bswap32(req->data.ethernet.ip),
+ .data_offset = 0,
+ .ether_l2_length = 0,
+ .slab = {0},
+ .slab_offset = {0},
+ };
+
+ struct rte_pipeline_table_entry *entry = (p_rt->params.n_arp_entries) ?
+ (struct rte_pipeline_table_entry *) &entry_arp1 :
+ (struct rte_pipeline_table_entry *) &entry_arp0;
+
+ if ((req->key.type != PIPELINE_ROUTING_ROUTE_IPV4) ||
+ ((p_rt->params.n_arp_entries == 0) &&
+ (req->data.flags & PIPELINE_ROUTING_ROUTE_ARP)) ||
+ (p_rt->params.n_arp_entries &&
+ ((req->data.flags & PIPELINE_ROUTING_ROUTE_ARP) == 0)) ||
+ ((p_rt->params.encap != PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
+ (req->data.flags & PIPELINE_ROUTING_ROUTE_QINQ)) ||
+ ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
+ ((req->data.flags & PIPELINE_ROUTING_ROUTE_QINQ) == 0)) ||
+ ((p_rt->params.encap != PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
+ (req->data.flags & PIPELINE_ROUTING_ROUTE_MPLS)) ||
+ ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
+ ((req->data.flags & PIPELINE_ROUTING_ROUTE_MPLS) == 0))) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ /* Ether - ARP off */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET) &&
+ (p_rt->params.n_arp_entries == 0)) {
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
+ uint64_t macaddr_dst;
+ uint64_t ethertype = ETHER_TYPE_IPv4;
+
+ macaddr_dst = *((uint64_t *)&(req->data.ethernet.macaddr));
+ macaddr_dst = rte_bswap64(macaddr_dst << 16);
+
+ entry_arp0.slab[0] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype);
+ entry_arp0.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] = rte_bswap64(macaddr_dst);
+ entry_arp0.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
+
+ entry_arp0.data_offset = entry_arp0.slab_offset[1] + 2
+ - sizeof(struct rte_mbuf);
+ entry_arp0.ether_l2_length = 14;
+ }
+
+ /* Ether - ARP on */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET) &&
+ p_rt->params.n_arp_entries) {
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
+ uint64_t ethertype = ETHER_TYPE_IPv4;
+
+ entry_arp1.slab[0] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype);
+ entry_arp1.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.data_offset = entry_arp1.slab_offset[0] - 6
+ - sizeof(struct rte_mbuf);
+ entry_arp1.ether_l2_length = 14;
+ }
+
+ /* Ether QinQ - ARP off */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
+ (p_rt->params.n_arp_entries == 0)) {
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
+ uint64_t macaddr_dst;
+ uint64_t ethertype_ipv4 = ETHER_TYPE_IPv4;
+ uint64_t ethertype_vlan = 0x8100;
+ uint64_t ethertype_qinq = 0x9100;
+ uint64_t svlan = req->data.l2.qinq.svlan;
+ uint64_t cvlan = req->data.l2.qinq.cvlan;
+
+ macaddr_dst = *((uint64_t *)&(req->data.ethernet.macaddr));
+ macaddr_dst = rte_bswap64(macaddr_dst << 16);
+
+ entry_arp0.slab[0] = rte_bswap64((svlan << 48) |
+ (ethertype_vlan << 32) |
+ (cvlan << 16) |
+ ethertype_ipv4);
+ entry_arp0.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_qinq);
+ entry_arp0.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
+
+ entry_arp0.slab[2] = rte_bswap64(macaddr_dst);
+ entry_arp0.slab_offset[2] = p_rt->params.ip_hdr_offset - 3 * 8;
+
+ entry_arp0.data_offset = entry_arp0.slab_offset[2] + 2
+ - sizeof(struct rte_mbuf);
+ entry_arp0.ether_l2_length = 22;
+ }
+
+ /* Ether QinQ - ARP on */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ) &&
+ p_rt->params.n_arp_entries) {
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
+ uint64_t ethertype_ipv4 = ETHER_TYPE_IPv4;
+ uint64_t ethertype_vlan = 0x8100;
+ uint64_t ethertype_qinq = 0x9100;
+ uint64_t svlan = req->data.l2.qinq.svlan;
+ uint64_t cvlan = req->data.l2.qinq.cvlan;
+
+ entry_arp1.slab[0] = rte_bswap64((svlan << 48) |
+ (ethertype_vlan << 32) |
+ (cvlan << 16) |
+ ethertype_ipv4);
+ entry_arp1.slab_offset[0] = p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.slab[1] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_qinq);
+ entry_arp1.slab_offset[1] = p_rt->params.ip_hdr_offset - 2 * 8;
+
+ entry_arp1.data_offset = entry_arp1.slab_offset[1] - 6
+ - sizeof(struct rte_mbuf);
+ entry_arp1.ether_l2_length = 22;
+ }
+
+ /* Ether MPLS - ARP off */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
+ (p_rt->params.n_arp_entries == 0)) {
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
+ uint64_t macaddr_dst;
+ uint64_t ethertype_mpls = 0x8847;
+
+ uint64_t label0 = req->data.l2.mpls.labels[0];
+ uint64_t label1 = req->data.l2.mpls.labels[1];
+ uint64_t label2 = req->data.l2.mpls.labels[2];
+ uint64_t label3 = req->data.l2.mpls.labels[3];
+ uint32_t n_labels = req->data.l2.mpls.n_labels;
+
+ macaddr_dst = *((uint64_t *)&(req->data.ethernet.macaddr));
+ macaddr_dst = rte_bswap64(macaddr_dst << 16);
+
+ switch (n_labels) {
+ case 1:
+ entry_arp0.slab[0] = 0;
+ entry_arp0.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] = rte_bswap64(
+ MPLS_LABEL(label0, 0, 1, 0));
+ entry_arp0.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 8;
+ break;
+
+ case 2:
+ entry_arp0.slab[0] = 0;
+ entry_arp0.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] = rte_bswap64(
+ (MPLS_LABEL(label0, 0, 0, 0) << 32) |
+ MPLS_LABEL(label1, 0, 1, 0));
+ entry_arp0.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 8;
+ break;
+
+ case 3:
+ entry_arp0.slab[0] = rte_bswap64(
+ (MPLS_LABEL(label1, 0, 0, 0) << 32) |
+ MPLS_LABEL(label2, 0, 1, 0));
+ entry_arp0.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] = rte_bswap64(
+ MPLS_LABEL(label0, 0, 0, 0));
+ entry_arp0.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 2 * 8;
+ break;
+
+ case 4:
+ entry_arp0.slab[0] = rte_bswap64(
+ (MPLS_LABEL(label2, 0, 0, 0) << 32) |
+ MPLS_LABEL(label3, 0, 1, 0));
+ entry_arp0.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp0.slab[1] = rte_bswap64(
+ (MPLS_LABEL(label0, 0, 0, 0) << 32) |
+ MPLS_LABEL(label1, 0, 0, 0));
+ entry_arp0.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 2 * 8;
+ break;
+
+ default:
+ rsp->status = -1;
+ return rsp;
+ }
+
+ entry_arp0.slab[2] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_mpls);
+ entry_arp0.slab_offset[2] = p_rt->params.ip_hdr_offset -
+ (n_labels * 4 + 8);
+
+ entry_arp0.slab[3] = rte_bswap64(macaddr_dst);
+ entry_arp0.slab_offset[3] = p_rt->params.ip_hdr_offset -
+ (n_labels * 4 + 2 * 8);
+
+ entry_arp0.data_offset = entry_arp0.slab_offset[3] + 2
+ - sizeof(struct rte_mbuf);
+ entry_arp0.ether_l2_length = n_labels * 4 + 14;
+ }
+
+ /* Ether MPLS - ARP on */
+ if ((p_rt->params.encap == PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS) &&
+ p_rt->params.n_arp_entries) {
+ uint64_t macaddr_src = p_rt->macaddr[req->data.port_id];
+ uint64_t ethertype_mpls = 0x8847;
+
+ uint64_t label0 = req->data.l2.mpls.labels[0];
+ uint64_t label1 = req->data.l2.mpls.labels[1];
+ uint64_t label2 = req->data.l2.mpls.labels[2];
+ uint64_t label3 = req->data.l2.mpls.labels[3];
+ uint32_t n_labels = req->data.l2.mpls.n_labels;
+
+ switch (n_labels) {
+ case 1:
+ entry_arp1.slab[0] = 0;
+ entry_arp1.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.slab[1] = rte_bswap64(
+ MPLS_LABEL(label0, 0, 1, 0));
+ entry_arp1.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 8;
+ break;
+
+ case 2:
+ entry_arp1.slab[0] = 0;
+ entry_arp1.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.slab[1] = rte_bswap64(
+ (MPLS_LABEL(label0, 0, 0, 0) << 32) |
+ MPLS_LABEL(label1, 0, 1, 0));
+ entry_arp1.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 8;
+ break;
+
+ case 3:
+ entry_arp1.slab[0] = rte_bswap64(
+ (MPLS_LABEL(label1, 0, 0, 0) << 32) |
+ MPLS_LABEL(label2, 0, 1, 0));
+ entry_arp1.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.slab[1] = rte_bswap64(
+ MPLS_LABEL(label0, 0, 0, 0));
+ entry_arp1.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 2 * 8;
+ break;
+
+ case 4:
+ entry_arp1.slab[0] = rte_bswap64(
+ (MPLS_LABEL(label2, 0, 0, 0) << 32) |
+ MPLS_LABEL(label3, 0, 1, 0));
+ entry_arp1.slab_offset[0] =
+ p_rt->params.ip_hdr_offset - 8;
+
+ entry_arp1.slab[1] = rte_bswap64(
+ (MPLS_LABEL(label0, 0, 0, 0) << 32) |
+ MPLS_LABEL(label1, 0, 0, 0));
+ entry_arp1.slab_offset[1] =
+ p_rt->params.ip_hdr_offset - 2 * 8;
+ break;
+
+ default:
+ rsp->status = -1;
+ return rsp;
+ }
+
+ entry_arp1.slab[2] =
+ SLAB_NBO_MACADDRSRC_ETHERTYPE(macaddr_src, ethertype_mpls);
+ entry_arp1.slab_offset[2] = p_rt->params.ip_hdr_offset -
+ (n_labels * 4 + 8);
+
+ entry_arp1.data_offset = entry_arp1.slab_offset[2] - 6
+ - sizeof(struct rte_mbuf);
+ entry_arp1.ether_l2_length = n_labels * 4 + 14;
+ }
+
+ rsp->status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[0],
+ &key,
+ entry,
+ &rsp->key_found,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_route_del_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing_route_delete_msg_req *req = msg;
+ struct pipeline_routing_route_delete_msg_rsp *rsp = msg;
+
+ struct rte_table_lpm_key key = {
+ .ip = req->key.key.ipv4.ip,
+ .depth = req->key.key.ipv4.depth,
+ };
+
+ if (req->key.type != PIPELINE_ROUTING_ROUTE_IPV4) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ p->table_id[0],
+ &key,
+ &rsp->key_found,
+ NULL);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_route_add_default_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_routing_route_add_default_msg_req *req = msg;
+ struct pipeline_routing_route_add_default_msg_rsp *rsp = msg;
+
+ struct routing_table_entry default_entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+
+ .flags = 0,
+ .port_id = 0,
+ .ip = 0,
+ };
+
+ rsp->status = rte_pipeline_table_default_entry_add(p->p,
+ p->table_id[0],
+ (struct rte_pipeline_table_entry *) &default_entry,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_route_del_default_handler(struct pipeline *p,
+ void *msg)
+{
+ struct pipeline_routing_route_delete_default_msg_rsp *rsp = msg;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ p->table_id[0],
+ NULL);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_arp_add_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing_arp_add_msg_req *req = msg;
+ struct pipeline_routing_arp_add_msg_rsp *rsp = msg;
+
+ struct pipeline_routing_arp_key_ipv4 key = {
+ .port_id = req->key.key.ipv4.port_id,
+ .ip = rte_bswap32(req->key.key.ipv4.ip),
+ };
+
+ struct arp_table_entry entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->key.key.ipv4.port_id]},
+ },
+
+ .macaddr = 0, /* set below */
+ };
+
+ if (req->key.type != PIPELINE_ROUTING_ARP_IPV4) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ entry.macaddr = *((uint64_t *)&(req->macaddr));
+ entry.macaddr = entry.macaddr << 16;
+
+ rsp->status = rte_pipeline_table_entry_add(p->p,
+ p->table_id[1],
+ &key,
+ (struct rte_pipeline_table_entry *) &entry,
+ &rsp->key_found,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_arp_del_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing_arp_delete_msg_req *req = msg;
+ struct pipeline_routing_arp_delete_msg_rsp *rsp = msg;
+
+ struct pipeline_routing_arp_key_ipv4 key = {
+ .port_id = req->key.key.ipv4.port_id,
+ .ip = rte_bswap32(req->key.key.ipv4.ip),
+ };
+
+ if (req->key.type != PIPELINE_ROUTING_ARP_IPV4) {
+ rsp->status = -1;
+ return rsp;
+ }
+
+ rsp->status = rte_pipeline_table_entry_delete(p->p,
+ p->table_id[1],
+ &key,
+ &rsp->key_found,
+ NULL);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_arp_add_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing_arp_add_default_msg_req *req = msg;
+ struct pipeline_routing_arp_add_default_msg_rsp *rsp = msg;
+
+ struct arp_table_entry default_entry = {
+ .head = {
+ .action = RTE_PIPELINE_ACTION_PORT,
+ {.port_id = p->port_out_id[req->port_id]},
+ },
+
+ .macaddr = 0,
+ };
+
+ rsp->status = rte_pipeline_table_default_entry_add(p->p,
+ p->table_id[1],
+ (struct rte_pipeline_table_entry *) &default_entry,
+ (struct rte_pipeline_table_entry **) &rsp->entry_ptr);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_arp_del_default_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing_arp_delete_default_msg_rsp *rsp = msg;
+
+ rsp->status = rte_pipeline_table_default_entry_delete(p->p,
+ p->table_id[1],
+ NULL);
+
+ return rsp;
+}
+
+void *
+pipeline_routing_msg_req_set_macaddr_handler(struct pipeline *p, void *msg)
+{
+ struct pipeline_routing *p_rt = (struct pipeline_routing *) p;
+ struct pipeline_routing_set_macaddr_msg_req *req = msg;
+ struct pipeline_routing_set_macaddr_msg_rsp *rsp = msg;
+ uint32_t port_id;
+
+ for (port_id = 0; port_id < p->n_ports_out; port_id++)
+ p_rt->macaddr[port_id] = req->macaddr[port_id];
+
+ rsp->status = 0;
+
+ return rsp;
+}
+
+struct pipeline_be_ops pipeline_routing_be_ops = {
+ .f_init = pipeline_routing_init,
+ .f_free = pipeline_routing_free,
+ .f_run = NULL,
+ .f_timer = pipeline_routing_timer,
+};
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing_be.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing_be.h
new file mode 100644
index 00000000..12763427
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline/pipeline_routing_be.h
@@ -0,0 +1,312 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_ROUTING_BE_H__
+#define __INCLUDE_PIPELINE_ROUTING_BE_H__
+
+#include <rte_ether.h>
+
+#include "pipeline_common_be.h"
+
+/*
+ * Pipeline argument parsing
+ */
+#ifndef PIPELINE_ROUTING_N_ROUTES_DEFAULT
+#define PIPELINE_ROUTING_N_ROUTES_DEFAULT 4096
+#endif
+
+enum pipeline_routing_encap {
+ PIPELINE_ROUTING_ENCAP_ETHERNET = 0,
+ PIPELINE_ROUTING_ENCAP_ETHERNET_QINQ,
+ PIPELINE_ROUTING_ENCAP_ETHERNET_MPLS,
+};
+
+struct pipeline_routing_params {
+ /* routing */
+ uint32_t n_routes;
+ uint32_t port_local_dest;
+
+ /* routing packet encapsulation */
+ enum pipeline_routing_encap encap;
+ uint32_t qinq_sched;
+ uint32_t mpls_color_mark;
+
+ /* arp */
+ uint32_t n_arp_entries;
+
+ /* packet buffer offsets */
+ uint32_t ip_hdr_offset;
+ uint32_t arp_key_offset;
+ uint32_t color_offset;
+
+ /* debug */
+ uint32_t dbg_ah_disable;
+};
+
+int
+pipeline_routing_parse_args(struct pipeline_routing_params *p,
+ struct pipeline_params *params);
+
+/*
+ * Route
+ */
+enum pipeline_routing_route_key_type {
+ PIPELINE_ROUTING_ROUTE_IPV4,
+};
+
+struct pipeline_routing_route_key_ipv4 {
+ uint32_t ip;
+ uint32_t depth;
+};
+
+struct pipeline_routing_route_key {
+ enum pipeline_routing_route_key_type type;
+ union {
+ struct pipeline_routing_route_key_ipv4 ipv4;
+ } key;
+};
+
+enum pipeline_routing_route_flags {
+ PIPELINE_ROUTING_ROUTE_LOCAL = 1 << 0, /* 0 = remote; 1 = local */
+ PIPELINE_ROUTING_ROUTE_ARP = 1 << 1, /* 0 = ARP OFF; 1 = ARP ON */
+ PIPELINE_ROUTING_ROUTE_QINQ = 1 << 2, /* 0 = QINQ OFF; 1 = QINQ ON */
+ PIPELINE_ROUTING_ROUTE_MPLS = 1 << 3, /* 0 = MPLS OFF; 1 = MPLS ON */
+};
+
+#define PIPELINE_ROUTING_MPLS_LABELS_MAX 4
+
+struct pipeline_routing_route_data {
+ uint32_t flags;
+ uint32_t port_id; /* Output port ID */
+
+ union {
+ /* Next hop IP (valid only when ARP is enabled) */
+ uint32_t ip;
+
+ /* Next hop MAC address (valid only when ARP disabled */
+ struct ether_addr macaddr;
+ } ethernet;
+
+ union {
+ struct {
+ uint16_t svlan;
+ uint16_t cvlan;
+ } qinq;
+
+ struct {
+ uint32_t labels[PIPELINE_ROUTING_MPLS_LABELS_MAX];
+ uint32_t n_labels;
+ } mpls;
+ } l2;
+};
+
+/*
+ * ARP
+ */
+enum pipeline_routing_arp_key_type {
+ PIPELINE_ROUTING_ARP_IPV4,
+};
+
+struct pipeline_routing_arp_key_ipv4 {
+ uint32_t port_id;
+ uint32_t ip;
+};
+
+struct pipeline_routing_arp_key {
+ enum pipeline_routing_arp_key_type type;
+ union {
+ struct pipeline_routing_arp_key_ipv4 ipv4;
+ } key;
+};
+
+/*
+ * Messages
+ */
+enum pipeline_routing_msg_req_type {
+ PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD,
+ PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL,
+ PIPELINE_ROUTING_MSG_REQ_ROUTE_ADD_DEFAULT,
+ PIPELINE_ROUTING_MSG_REQ_ROUTE_DEL_DEFAULT,
+ PIPELINE_ROUTING_MSG_REQ_ARP_ADD,
+ PIPELINE_ROUTING_MSG_REQ_ARP_DEL,
+ PIPELINE_ROUTING_MSG_REQ_ARP_ADD_DEFAULT,
+ PIPELINE_ROUTING_MSG_REQ_ARP_DEL_DEFAULT,
+ PIPELINE_ROUTING_MSG_REQ_SET_MACADDR,
+ PIPELINE_ROUTING_MSG_REQS
+};
+
+/*
+ * MSG ROUTE ADD
+ */
+struct pipeline_routing_route_add_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_routing_route_key key;
+
+ /* data */
+ struct pipeline_routing_route_data data;
+};
+
+struct pipeline_routing_route_add_msg_rsp {
+ int status;
+ int key_found;
+ void *entry_ptr;
+};
+
+/*
+ * MSG ROUTE DELETE
+ */
+struct pipeline_routing_route_delete_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_routing_route_key key;
+};
+
+struct pipeline_routing_route_delete_msg_rsp {
+ int status;
+ int key_found;
+};
+
+/*
+ * MSG ROUTE ADD DEFAULT
+ */
+struct pipeline_routing_route_add_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* data */
+ uint32_t port_id;
+};
+
+struct pipeline_routing_route_add_default_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/*
+ * MSG ROUTE DELETE DEFAULT
+ */
+struct pipeline_routing_route_delete_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+};
+
+struct pipeline_routing_route_delete_default_msg_rsp {
+ int status;
+};
+
+/*
+ * MSG ARP ADD
+ */
+struct pipeline_routing_arp_add_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_routing_arp_key key;
+
+ /* data */
+ struct ether_addr macaddr;
+};
+
+struct pipeline_routing_arp_add_msg_rsp {
+ int status;
+ int key_found;
+ void *entry_ptr;
+};
+
+/*
+ * MSG ARP DELETE
+ */
+struct pipeline_routing_arp_delete_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* key */
+ struct pipeline_routing_arp_key key;
+};
+
+struct pipeline_routing_arp_delete_msg_rsp {
+ int status;
+ int key_found;
+};
+
+/*
+ * MSG ARP ADD DEFAULT
+ */
+struct pipeline_routing_arp_add_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ /* data */
+ uint32_t port_id;
+};
+
+struct pipeline_routing_arp_add_default_msg_rsp {
+ int status;
+ void *entry_ptr;
+};
+
+/*
+ * MSG ARP DELETE DEFAULT
+ */
+struct pipeline_routing_arp_delete_default_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+};
+
+struct pipeline_routing_arp_delete_default_msg_rsp {
+ int status;
+};
+
+/*
+ * MSG SET MACADDR
+ */
+struct pipeline_routing_set_macaddr_msg_req {
+ enum pipeline_msg_req_type type;
+ enum pipeline_routing_msg_req_type subtype;
+
+ uint64_t macaddr[PIPELINE_MAX_PORT_OUT];
+};
+
+struct pipeline_routing_set_macaddr_msg_rsp {
+ int status;
+};
+
+extern struct pipeline_be_ops pipeline_routing_be_ops;
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/pipeline_be.h b/src/seastar/dpdk/examples/ip_pipeline/pipeline_be.h
new file mode 100644
index 00000000..0cfcc809
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/pipeline_be.h
@@ -0,0 +1,351 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __INCLUDE_PIPELINE_BE_H__
+#define __INCLUDE_PIPELINE_BE_H__
+
+#include <rte_port_ethdev.h>
+#include <rte_port_ring.h>
+#include <rte_port_frag.h>
+#include <rte_port_ras.h>
+#include <rte_port_sched.h>
+#include <rte_port_fd.h>
+#include <rte_port_source_sink.h>
+#ifdef RTE_LIBRTE_KNI
+#include <rte_port_kni.h>
+#endif
+#include <rte_pipeline.h>
+
+enum pipeline_port_in_type {
+ PIPELINE_PORT_IN_ETHDEV_READER,
+ PIPELINE_PORT_IN_RING_READER,
+ PIPELINE_PORT_IN_RING_MULTI_READER,
+ PIPELINE_PORT_IN_RING_READER_IPV4_FRAG,
+ PIPELINE_PORT_IN_RING_READER_IPV6_FRAG,
+ PIPELINE_PORT_IN_SCHED_READER,
+ PIPELINE_PORT_IN_FD_READER,
+ PIPELINE_PORT_IN_KNI_READER,
+ PIPELINE_PORT_IN_SOURCE,
+};
+
+struct pipeline_port_in_params {
+ enum pipeline_port_in_type type;
+ union {
+ struct rte_port_ethdev_reader_params ethdev;
+ struct rte_port_ring_reader_params ring;
+ struct rte_port_ring_multi_reader_params ring_multi;
+ struct rte_port_ring_reader_ipv4_frag_params ring_ipv4_frag;
+ struct rte_port_ring_reader_ipv6_frag_params ring_ipv6_frag;
+ struct rte_port_sched_reader_params sched;
+ struct rte_port_fd_reader_params fd;
+#ifdef RTE_LIBRTE_KNI
+ struct rte_port_kni_reader_params kni;
+#endif
+ struct rte_port_source_params source;
+ } params;
+ uint32_t burst_size;
+};
+
+static inline void *
+pipeline_port_in_params_convert(struct pipeline_port_in_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_IN_ETHDEV_READER:
+ return (void *) &p->params.ethdev;
+ case PIPELINE_PORT_IN_RING_READER:
+ return (void *) &p->params.ring;
+ case PIPELINE_PORT_IN_RING_MULTI_READER:
+ return (void *) &p->params.ring_multi;
+ case PIPELINE_PORT_IN_RING_READER_IPV4_FRAG:
+ return (void *) &p->params.ring_ipv4_frag;
+ case PIPELINE_PORT_IN_RING_READER_IPV6_FRAG:
+ return (void *) &p->params.ring_ipv6_frag;
+ case PIPELINE_PORT_IN_SCHED_READER:
+ return (void *) &p->params.sched;
+ case PIPELINE_PORT_IN_FD_READER:
+ return (void *) &p->params.fd;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_IN_KNI_READER:
+ return (void *) &p->params.kni;
+#endif
+ case PIPELINE_PORT_IN_SOURCE:
+ return (void *) &p->params.source;
+ default:
+ return NULL;
+ }
+}
+
+static inline struct rte_port_in_ops *
+pipeline_port_in_params_get_ops(struct pipeline_port_in_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_IN_ETHDEV_READER:
+ return &rte_port_ethdev_reader_ops;
+ case PIPELINE_PORT_IN_RING_READER:
+ return &rte_port_ring_reader_ops;
+ case PIPELINE_PORT_IN_RING_MULTI_READER:
+ return &rte_port_ring_multi_reader_ops;
+ case PIPELINE_PORT_IN_RING_READER_IPV4_FRAG:
+ return &rte_port_ring_reader_ipv4_frag_ops;
+ case PIPELINE_PORT_IN_RING_READER_IPV6_FRAG:
+ return &rte_port_ring_reader_ipv6_frag_ops;
+ case PIPELINE_PORT_IN_SCHED_READER:
+ return &rte_port_sched_reader_ops;
+ case PIPELINE_PORT_IN_FD_READER:
+ return &rte_port_fd_reader_ops;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_IN_KNI_READER:
+ return &rte_port_kni_reader_ops;
+#endif
+ case PIPELINE_PORT_IN_SOURCE:
+ return &rte_port_source_ops;
+ default:
+ return NULL;
+ }
+}
+
+enum pipeline_port_out_type {
+ PIPELINE_PORT_OUT_ETHDEV_WRITER,
+ PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP,
+ PIPELINE_PORT_OUT_RING_WRITER,
+ PIPELINE_PORT_OUT_RING_MULTI_WRITER,
+ PIPELINE_PORT_OUT_RING_WRITER_NODROP,
+ PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP,
+ PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS,
+ PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS,
+ PIPELINE_PORT_OUT_SCHED_WRITER,
+ PIPELINE_PORT_OUT_FD_WRITER,
+ PIPELINE_PORT_OUT_KNI_WRITER,
+ PIPELINE_PORT_OUT_KNI_WRITER_NODROP,
+ PIPELINE_PORT_OUT_SINK,
+};
+
+struct pipeline_port_out_params {
+ enum pipeline_port_out_type type;
+ union {
+ struct rte_port_ethdev_writer_params ethdev;
+ struct rte_port_ethdev_writer_nodrop_params ethdev_nodrop;
+ struct rte_port_ring_writer_params ring;
+ struct rte_port_ring_multi_writer_params ring_multi;
+ struct rte_port_ring_writer_nodrop_params ring_nodrop;
+ struct rte_port_ring_multi_writer_nodrop_params ring_multi_nodrop;
+ struct rte_port_ring_writer_ipv4_ras_params ring_ipv4_ras;
+ struct rte_port_ring_writer_ipv6_ras_params ring_ipv6_ras;
+ struct rte_port_sched_writer_params sched;
+ struct rte_port_fd_writer_params fd;
+#ifdef RTE_LIBRTE_KNI
+ struct rte_port_kni_writer_params kni;
+ struct rte_port_kni_writer_nodrop_params kni_nodrop;
+#endif
+ struct rte_port_sink_params sink;
+ } params;
+};
+
+static inline void *
+pipeline_port_out_params_convert(struct pipeline_port_out_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER:
+ return (void *) &p->params.ethdev;
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP:
+ return (void *) &p->params.ethdev_nodrop;
+ case PIPELINE_PORT_OUT_RING_WRITER:
+ return (void *) &p->params.ring;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER:
+ return (void *) &p->params.ring_multi;
+ case PIPELINE_PORT_OUT_RING_WRITER_NODROP:
+ return (void *) &p->params.ring_nodrop;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP:
+ return (void *) &p->params.ring_multi_nodrop;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS:
+ return (void *) &p->params.ring_ipv4_ras;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS:
+ return (void *) &p->params.ring_ipv6_ras;
+ case PIPELINE_PORT_OUT_SCHED_WRITER:
+ return (void *) &p->params.sched;
+ case PIPELINE_PORT_OUT_FD_WRITER:
+ return (void *) &p->params.fd;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_OUT_KNI_WRITER:
+ return (void *) &p->params.kni;
+ case PIPELINE_PORT_OUT_KNI_WRITER_NODROP:
+ return (void *) &p->params.kni_nodrop;
+#endif
+ case PIPELINE_PORT_OUT_SINK:
+ return (void *) &p->params.sink;
+ default:
+ return NULL;
+ }
+}
+
+static inline void *
+pipeline_port_out_params_get_ops(struct pipeline_port_out_params *p)
+{
+ switch (p->type) {
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER:
+ return &rte_port_ethdev_writer_ops;
+ case PIPELINE_PORT_OUT_ETHDEV_WRITER_NODROP:
+ return &rte_port_ethdev_writer_nodrop_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER:
+ return &rte_port_ring_writer_ops;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER:
+ return &rte_port_ring_multi_writer_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER_NODROP:
+ return &rte_port_ring_writer_nodrop_ops;
+ case PIPELINE_PORT_OUT_RING_MULTI_WRITER_NODROP:
+ return &rte_port_ring_multi_writer_nodrop_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV4_RAS:
+ return &rte_port_ring_writer_ipv4_ras_ops;
+ case PIPELINE_PORT_OUT_RING_WRITER_IPV6_RAS:
+ return &rte_port_ring_writer_ipv6_ras_ops;
+ case PIPELINE_PORT_OUT_SCHED_WRITER:
+ return &rte_port_sched_writer_ops;
+ case PIPELINE_PORT_OUT_FD_WRITER:
+ return &rte_port_fd_writer_ops;
+#ifdef RTE_LIBRTE_KNI
+ case PIPELINE_PORT_OUT_KNI_WRITER:
+ return &rte_port_kni_writer_ops;
+ case PIPELINE_PORT_OUT_KNI_WRITER_NODROP:
+ return &rte_port_kni_writer_nodrop_ops;
+#endif
+ case PIPELINE_PORT_OUT_SINK:
+ return &rte_port_sink_ops;
+ default:
+ return NULL;
+ }
+}
+
+#ifndef PIPELINE_NAME_SIZE
+#define PIPELINE_NAME_SIZE 64
+#endif
+
+#ifndef PIPELINE_TYPE_SIZE
+#define PIPELINE_TYPE_SIZE 64
+#endif
+
+#ifndef PIPELINE_MAX_PORT_IN
+#define PIPELINE_MAX_PORT_IN 64
+#endif
+
+#ifndef PIPELINE_MAX_PORT_OUT
+#define PIPELINE_MAX_PORT_OUT 64
+#endif
+
+#ifndef PIPELINE_MAX_TABLES
+#define PIPELINE_MAX_TABLES 16
+#endif
+
+#ifndef PIPELINE_MAX_MSGQ_IN
+#define PIPELINE_MAX_MSGQ_IN 16
+#endif
+
+#ifndef PIPELINE_MAX_MSGQ_OUT
+#define PIPELINE_MAX_MSGQ_OUT 16
+#endif
+
+#ifndef PIPELINE_MAX_ARGS
+#define PIPELINE_MAX_ARGS 64
+#endif
+
+struct pipeline_params {
+ char name[PIPELINE_NAME_SIZE];
+ char type[PIPELINE_TYPE_SIZE];
+
+ struct pipeline_port_in_params port_in[PIPELINE_MAX_PORT_IN];
+ struct pipeline_port_out_params port_out[PIPELINE_MAX_PORT_OUT];
+ struct rte_ring *msgq_in[PIPELINE_MAX_MSGQ_IN];
+ struct rte_ring *msgq_out[PIPELINE_MAX_MSGQ_OUT];
+
+ uint32_t n_ports_in;
+ uint32_t n_ports_out;
+ uint32_t n_msgq;
+
+ int socket_id;
+
+ char *args_name[PIPELINE_MAX_ARGS];
+ char *args_value[PIPELINE_MAX_ARGS];
+ uint32_t n_args;
+
+ uint32_t log_level;
+};
+
+/*
+ * Pipeline type back-end operations
+ */
+
+typedef void* (*pipeline_be_op_init)(struct pipeline_params *params,
+ void *arg);
+
+typedef int (*pipeline_be_op_free)(void *pipeline);
+
+typedef int (*pipeline_be_op_run)(void *pipeline);
+
+typedef int (*pipeline_be_op_timer)(void *pipeline);
+
+struct pipeline_be_ops {
+ pipeline_be_op_init f_init;
+ pipeline_be_op_free f_free;
+ pipeline_be_op_run f_run;
+ pipeline_be_op_timer f_timer;
+};
+
+/* Pipeline specific config parse error messages */
+#define PIPELINE_ARG_CHECK(exp, fmt, ...) \
+do { \
+ if (!(exp)) { \
+ fprintf(stderr, fmt "\n", ## __VA_ARGS__); \
+ return -1; \
+ } \
+} while (0)
+
+#define PIPELINE_PARSE_ERR_INV_VAL(exp, section, entry, val) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": entry \"%s\" " \
+ "has invalid value (\"%s\")", section, entry, val)
+
+#define PIPELINE_PARSE_ERR_OUT_RNG(exp, section, entry, val) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": entry \"%s\" " \
+ "value is out of range (\"%s\")", section, entry, val)
+
+#define PIPELINE_PARSE_ERR_DUPLICATE(exp, section, entry) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": duplicated " \
+ "entry \"%s\"", section, entry)
+
+#define PIPELINE_PARSE_ERR_INV_ENT(exp, section, entry) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": invalid entry " \
+ "\"%s\"", section, entry)
+
+#define PIPELINE_PARSE_ERR_MANDATORY(exp, section, entry) \
+PIPELINE_ARG_CHECK(exp, "Parse error in section \"%s\": mandatory " \
+ "entry \"%s\" is missing", section, entry)
+
+#endif
diff --git a/src/seastar/dpdk/examples/ip_pipeline/thread.c b/src/seastar/dpdk/examples/ip_pipeline/thread.c
new file mode 100644
index 00000000..a0f1f12f
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/thread.c
@@ -0,0 +1,322 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_pipeline.h>
+
+#include "pipeline_common_be.h"
+#include "app.h"
+#include "thread.h"
+
+#if APP_THREAD_HEADROOM_STATS_COLLECT
+
+#define PIPELINE_RUN_REGULAR(thread, pipeline) \
+do { \
+ uint64_t t0 = rte_rdtsc_precise(); \
+ int n_pkts = rte_pipeline_run(pipeline->p); \
+ \
+ if (n_pkts == 0) { \
+ uint64_t t1 = rte_rdtsc_precise(); \
+ \
+ thread->headroom_cycles += t1 - t0; \
+ } \
+} while (0)
+
+
+#define PIPELINE_RUN_CUSTOM(thread, data) \
+do { \
+ uint64_t t0 = rte_rdtsc_precise(); \
+ int n_pkts = data->f_run(data->be); \
+ \
+ if (n_pkts == 0) { \
+ uint64_t t1 = rte_rdtsc_precise(); \
+ \
+ thread->headroom_cycles += t1 - t0; \
+ } \
+} while (0)
+
+#else
+
+#define PIPELINE_RUN_REGULAR(thread, pipeline) \
+ rte_pipeline_run(pipeline->p)
+
+#define PIPELINE_RUN_CUSTOM(thread, data) \
+ data->f_run(data->be)
+
+#endif
+
+static inline void *
+thread_msg_recv(struct rte_ring *r)
+{
+ void *msg;
+ int status = rte_ring_sc_dequeue(r, &msg);
+
+ if (status != 0)
+ return NULL;
+
+ return msg;
+}
+
+static inline void
+thread_msg_send(struct rte_ring *r,
+ void *msg)
+{
+ int status;
+
+ do {
+ status = rte_ring_sp_enqueue(r, msg);
+ } while (status == -ENOBUFS);
+}
+
+static int
+thread_pipeline_enable(struct app_thread_data *t,
+ struct thread_pipeline_enable_msg_req *req)
+{
+ struct app_thread_pipeline_data *p;
+
+ if (req->f_run == NULL) {
+ if (t->n_regular >= APP_MAX_THREAD_PIPELINES)
+ return -1;
+ } else {
+ if (t->n_custom >= APP_MAX_THREAD_PIPELINES)
+ return -1;
+ }
+
+ p = (req->f_run == NULL) ?
+ &t->regular[t->n_regular] :
+ &t->custom[t->n_custom];
+
+ p->pipeline_id = req->pipeline_id;
+ p->be = req->be;
+ p->f_run = req->f_run;
+ p->f_timer = req->f_timer;
+ p->timer_period = req->timer_period;
+ p->deadline = 0;
+
+ if (req->f_run == NULL)
+ t->n_regular++;
+ else
+ t->n_custom++;
+
+ return 0;
+}
+
+static int
+thread_pipeline_disable(struct app_thread_data *t,
+ struct thread_pipeline_disable_msg_req *req)
+{
+ uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
+ uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
+ uint32_t i;
+
+ /* search regular pipelines of current thread */
+ for (i = 0; i < n_regular; i++) {
+ if (t->regular[i].pipeline_id != req->pipeline_id)
+ continue;
+
+ if (i < n_regular - 1)
+ memcpy(&t->regular[i],
+ &t->regular[i+1],
+ (n_regular - 1 - i) * sizeof(struct app_thread_pipeline_data));
+
+ n_regular--;
+ t->n_regular = n_regular;
+
+ return 0;
+ }
+
+ /* search custom pipelines of current thread */
+ for (i = 0; i < n_custom; i++) {
+ if (t->custom[i].pipeline_id != req->pipeline_id)
+ continue;
+
+ if (i < n_custom - 1)
+ memcpy(&t->custom[i],
+ &t->custom[i+1],
+ (n_custom - 1 - i) * sizeof(struct app_thread_pipeline_data));
+
+ n_custom--;
+ t->n_custom = n_custom;
+
+ return 0;
+ }
+
+ /* return if pipeline not found */
+ return -1;
+}
+
+static int
+thread_msg_req_handle(struct app_thread_data *t)
+{
+ void *msg_ptr;
+ struct thread_msg_req *req;
+ struct thread_msg_rsp *rsp;
+
+ msg_ptr = thread_msg_recv(t->msgq_in);
+ req = msg_ptr;
+ rsp = msg_ptr;
+
+ if (req != NULL)
+ switch (req->type) {
+ case THREAD_MSG_REQ_PIPELINE_ENABLE: {
+ rsp->status = thread_pipeline_enable(t,
+ (struct thread_pipeline_enable_msg_req *) req);
+ thread_msg_send(t->msgq_out, rsp);
+ break;
+ }
+
+ case THREAD_MSG_REQ_PIPELINE_DISABLE: {
+ rsp->status = thread_pipeline_disable(t,
+ (struct thread_pipeline_disable_msg_req *) req);
+ thread_msg_send(t->msgq_out, rsp);
+ break;
+ }
+
+ case THREAD_MSG_REQ_HEADROOM_READ: {
+ struct thread_headroom_read_msg_rsp *rsp =
+ (struct thread_headroom_read_msg_rsp *)
+ req;
+
+ rsp->headroom_ratio = t->headroom_ratio;
+ rsp->status = 0;
+ thread_msg_send(t->msgq_out, rsp);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void
+thread_headroom_update(struct app_thread_data *t, uint64_t time)
+{
+ uint64_t time_diff = time - t->headroom_time;
+
+ t->headroom_ratio =
+ ((double) t->headroom_cycles) / ((double) time_diff);
+
+ t->headroom_cycles = 0;
+ t->headroom_time = rte_rdtsc_precise();
+}
+
+int
+app_thread(void *arg)
+{
+ struct app_params *app = (struct app_params *) arg;
+ uint32_t core_id = rte_lcore_id(), i, j;
+ struct app_thread_data *t = &app->thread_data[core_id];
+
+ for (i = 0; ; i++) {
+ uint32_t n_regular = RTE_MIN(t->n_regular, RTE_DIM(t->regular));
+ uint32_t n_custom = RTE_MIN(t->n_custom, RTE_DIM(t->custom));
+
+ /* Run regular pipelines */
+ for (j = 0; j < n_regular; j++) {
+ struct app_thread_pipeline_data *data = &t->regular[j];
+ struct pipeline *p = data->be;
+
+ PIPELINE_RUN_REGULAR(t, p);
+ }
+
+ /* Run custom pipelines */
+ for (j = 0; j < n_custom; j++) {
+ struct app_thread_pipeline_data *data = &t->custom[j];
+
+ PIPELINE_RUN_CUSTOM(t, data);
+ }
+
+ /* Timer */
+ if ((i & 0xF) == 0) {
+ uint64_t time = rte_get_tsc_cycles();
+ uint64_t t_deadline = UINT64_MAX;
+
+ if (time < t->deadline)
+ continue;
+
+ /* Timer for regular pipelines */
+ for (j = 0; j < n_regular; j++) {
+ struct app_thread_pipeline_data *data =
+ &t->regular[j];
+ uint64_t p_deadline = data->deadline;
+
+ if (p_deadline <= time) {
+ data->f_timer(data->be);
+ p_deadline = time + data->timer_period;
+ data->deadline = p_deadline;
+ }
+
+ if (p_deadline < t_deadline)
+ t_deadline = p_deadline;
+ }
+
+ /* Timer for custom pipelines */
+ for (j = 0; j < n_custom; j++) {
+ struct app_thread_pipeline_data *data =
+ &t->custom[j];
+ uint64_t p_deadline = data->deadline;
+
+ if (p_deadline <= time) {
+ data->f_timer(data->be);
+ p_deadline = time + data->timer_period;
+ data->deadline = p_deadline;
+ }
+
+ if (p_deadline < t_deadline)
+ t_deadline = p_deadline;
+ }
+
+ /* Timer for thread message request */
+ {
+ uint64_t deadline = t->thread_req_deadline;
+
+ if (deadline <= time) {
+ thread_msg_req_handle(t);
+ thread_headroom_update(t, time);
+ deadline = time + t->timer_period;
+ t->thread_req_deadline = deadline;
+ }
+
+ if (deadline < t_deadline)
+ t_deadline = deadline;
+ }
+
+
+ t->deadline = t_deadline;
+ }
+ }
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/examples/ip_pipeline/thread.h b/src/seastar/dpdk/examples/ip_pipeline/thread.h
new file mode 100644
index 00000000..e52b22e6
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/thread.h
@@ -0,0 +1,98 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THREAD_H_
+#define THREAD_H_
+
+#include "app.h"
+#include "pipeline_be.h"
+
+enum thread_msg_req_type {
+ THREAD_MSG_REQ_PIPELINE_ENABLE = 0,
+ THREAD_MSG_REQ_PIPELINE_DISABLE,
+ THREAD_MSG_REQ_HEADROOM_READ,
+ THREAD_MSG_REQS
+};
+
+struct thread_msg_req {
+ enum thread_msg_req_type type;
+};
+
+struct thread_msg_rsp {
+ int status;
+};
+
+/*
+ * PIPELINE ENABLE
+ */
+struct thread_pipeline_enable_msg_req {
+ enum thread_msg_req_type type;
+
+ uint32_t pipeline_id;
+ void *be;
+ pipeline_be_op_run f_run;
+ pipeline_be_op_timer f_timer;
+ uint64_t timer_period;
+};
+
+struct thread_pipeline_enable_msg_rsp {
+ int status;
+};
+
+/*
+ * PIPELINE DISABLE
+ */
+struct thread_pipeline_disable_msg_req {
+ enum thread_msg_req_type type;
+
+ uint32_t pipeline_id;
+};
+
+struct thread_pipeline_disable_msg_rsp {
+ int status;
+};
+
+/*
+ * THREAD HEADROOM
+ */
+struct thread_headroom_read_msg_req {
+ enum thread_msg_req_type type;
+};
+
+struct thread_headroom_read_msg_rsp {
+ int status;
+
+ double headroom_ratio;
+};
+
+#endif /* THREAD_H_ */
diff --git a/src/seastar/dpdk/examples/ip_pipeline/thread_fe.c b/src/seastar/dpdk/examples/ip_pipeline/thread_fe.c
new file mode 100644
index 00000000..4590c2b5
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/thread_fe.c
@@ -0,0 +1,457 @@
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_malloc.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+
+#include "thread.h"
+#include "thread_fe.h"
+#include "pipeline.h"
+#include "pipeline_common_fe.h"
+#include "app.h"
+
+static inline void *
+thread_msg_send_recv(struct app_params *app,
+ uint32_t socket_id, uint32_t core_id, uint32_t ht_id,
+ void *msg,
+ uint32_t timeout_ms)
+{
+ struct rte_ring *r_req = app_thread_msgq_in_get(app,
+ socket_id, core_id, ht_id);
+ struct rte_ring *r_rsp = app_thread_msgq_out_get(app,
+ socket_id, core_id, ht_id);
+ uint64_t hz = rte_get_tsc_hz();
+ void *msg_recv;
+ uint64_t deadline;
+ int status;
+
+ /* send */
+ do {
+ status = rte_ring_sp_enqueue(r_req, (void *) msg);
+ } while (status == -ENOBUFS);
+
+ /* recv */
+ deadline = (timeout_ms) ?
+ (rte_rdtsc() + ((hz * timeout_ms) / 1000)) :
+ UINT64_MAX;
+
+ do {
+ if (rte_rdtsc() > deadline)
+ return NULL;
+
+ status = rte_ring_sc_dequeue(r_rsp, &msg_recv);
+ } while (status != 0);
+
+ return msg_recv;
+}
+
+int
+app_pipeline_enable(struct app_params *app,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id)
+{
+ struct thread_pipeline_enable_msg_req *req;
+ struct thread_pipeline_enable_msg_rsp *rsp;
+ int thread_id;
+ struct app_pipeline_data *p;
+ struct app_pipeline_params *p_params;
+ struct pipeline_type *p_type;
+ int status;
+
+ if (app == NULL)
+ return -1;
+
+ thread_id = cpu_core_map_get_lcore_id(app->core_map,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
+ return -1;
+
+ if (app_pipeline_data(app, pipeline_id) == NULL)
+ return -1;
+
+ p = &app->pipeline_data[pipeline_id];
+ p_params = &app->pipeline_params[pipeline_id];
+ p_type = app_pipeline_type_find(app, p_params->type);
+
+ if (p_type == NULL)
+ return -1;
+
+ if (p->enabled == 1)
+ return -1;
+
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = THREAD_MSG_REQ_PIPELINE_ENABLE;
+ req->pipeline_id = pipeline_id;
+ req->be = p->be;
+ req->f_run = p_type->be_ops->f_run;
+ req->f_timer = p_type->be_ops->f_timer;
+ req->timer_period = p->timer_period;
+
+ rsp = thread_msg_send_recv(app,
+ socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
+ if (rsp == NULL)
+ return -1;
+
+ status = rsp->status;
+ app_msg_free(app, rsp);
+
+ if (status != 0)
+ return -1;
+
+ p->enabled = 1;
+ return 0;
+}
+
+int
+app_pipeline_disable(struct app_params *app,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id)
+{
+ struct thread_pipeline_disable_msg_req *req;
+ struct thread_pipeline_disable_msg_rsp *rsp;
+ int thread_id;
+ struct app_pipeline_data *p;
+ int status;
+
+ if (app == NULL)
+ return -1;
+
+ thread_id = cpu_core_map_get_lcore_id(app->core_map,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
+ return -1;
+
+ if (app_pipeline_data(app, pipeline_id) == NULL)
+ return -1;
+
+ p = &app->pipeline_data[pipeline_id];
+
+ if (p->enabled == 0)
+ return -1;
+
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = THREAD_MSG_REQ_PIPELINE_DISABLE;
+ req->pipeline_id = pipeline_id;
+
+ rsp = thread_msg_send_recv(app,
+ socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
+
+ if (rsp == NULL)
+ return -1;
+
+ status = rsp->status;
+ app_msg_free(app, rsp);
+
+ if (status != 0)
+ return -1;
+
+ p->enabled = 0;
+ return 0;
+}
+
+int
+app_thread_headroom(struct app_params *app,
+ uint32_t socket_id,
+ uint32_t core_id,
+ uint32_t hyper_th_id)
+{
+ struct thread_headroom_read_msg_req *req;
+ struct thread_headroom_read_msg_rsp *rsp;
+ int thread_id;
+ int status;
+
+ if (app == NULL)
+ return -1;
+
+ thread_id = cpu_core_map_get_lcore_id(app->core_map,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if ((thread_id < 0) || !app_core_is_enabled(app, thread_id))
+ return -1;
+
+ req = app_msg_alloc(app);
+ if (req == NULL)
+ return -1;
+
+ req->type = THREAD_MSG_REQ_HEADROOM_READ;
+
+ rsp = thread_msg_send_recv(app,
+ socket_id, core_id, hyper_th_id, req, MSG_TIMEOUT_DEFAULT);
+
+ if (rsp == NULL)
+ return -1;
+
+ status = rsp->status;
+
+ if (status != 0)
+ return -1;
+
+ printf("%.3f%%\n", rsp->headroom_ratio * 100);
+
+
+ app_msg_free(app, rsp);
+
+ return 0;
+}
+
+/*
+ * pipeline enable
+ */
+
+struct cmd_pipeline_enable_result {
+ cmdline_fixed_string_t t_string;
+ cmdline_fixed_string_t t_id_string;
+ cmdline_fixed_string_t pipeline_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t enable_string;
+};
+
+static void
+cmd_pipeline_enable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_pipeline_enable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+ uint32_t core_id, socket_id, hyper_th_id;
+
+ if (parse_pipeline_core(&socket_id,
+ &core_id,
+ &hyper_th_id,
+ params->t_id_string) != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ status = app_pipeline_enable(app,
+ socket_id,
+ core_id,
+ hyper_th_id,
+ params->pipeline_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+static cmdline_parse_token_string_t cmd_pipeline_enable_t_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_string, "t");
+
+static cmdline_parse_token_string_t cmd_pipeline_enable_t_id_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, t_id_string,
+ NULL);
+
+static cmdline_parse_token_string_t cmd_pipeline_enable_pipeline_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_string,
+ "pipeline");
+
+static cmdline_parse_token_num_t cmd_pipeline_enable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_pipeline_enable_result, pipeline_id,
+ UINT32);
+
+static cmdline_parse_token_string_t cmd_pipeline_enable_enable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_enable_result, enable_string,
+ "enable");
+
+static cmdline_parse_inst_t cmd_pipeline_enable = {
+ .f = cmd_pipeline_enable_parsed,
+ .data = NULL,
+ .help_str = "Enable pipeline on specified core",
+ .tokens = {
+ (void *)&cmd_pipeline_enable_t_string,
+ (void *)&cmd_pipeline_enable_t_id_string,
+ (void *)&cmd_pipeline_enable_pipeline_string,
+ (void *)&cmd_pipeline_enable_pipeline_id,
+ (void *)&cmd_pipeline_enable_enable_string,
+ NULL,
+ },
+};
+
+/*
+ * pipeline disable
+ */
+
+struct cmd_pipeline_disable_result {
+ cmdline_fixed_string_t t_string;
+ cmdline_fixed_string_t t_id_string;
+ cmdline_fixed_string_t pipeline_string;
+ uint32_t pipeline_id;
+ cmdline_fixed_string_t disable_string;
+};
+
+static void
+cmd_pipeline_disable_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_pipeline_disable_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+ uint32_t core_id, socket_id, hyper_th_id;
+
+ if (parse_pipeline_core(&socket_id,
+ &core_id,
+ &hyper_th_id,
+ params->t_id_string) != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ status = app_pipeline_disable(app,
+ socket_id,
+ core_id,
+ hyper_th_id,
+ params->pipeline_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+static cmdline_parse_token_string_t cmd_pipeline_disable_t_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_string, "t");
+
+static cmdline_parse_token_string_t cmd_pipeline_disable_t_id_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, t_id_string,
+ NULL);
+
+static cmdline_parse_token_string_t cmd_pipeline_disable_pipeline_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result,
+ pipeline_string, "pipeline");
+
+static cmdline_parse_token_num_t cmd_pipeline_disable_pipeline_id =
+ TOKEN_NUM_INITIALIZER(struct cmd_pipeline_disable_result, pipeline_id,
+ UINT32);
+
+static cmdline_parse_token_string_t cmd_pipeline_disable_disable_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_pipeline_disable_result, disable_string,
+ "disable");
+
+static cmdline_parse_inst_t cmd_pipeline_disable = {
+ .f = cmd_pipeline_disable_parsed,
+ .data = NULL,
+ .help_str = "Disable pipeline on specified core",
+ .tokens = {
+ (void *)&cmd_pipeline_disable_t_string,
+ (void *)&cmd_pipeline_disable_t_id_string,
+ (void *)&cmd_pipeline_disable_pipeline_string,
+ (void *)&cmd_pipeline_disable_pipeline_id,
+ (void *)&cmd_pipeline_disable_disable_string,
+ NULL,
+ },
+};
+
+
+/*
+ * thread headroom
+ */
+
+struct cmd_thread_headroom_result {
+ cmdline_fixed_string_t t_string;
+ cmdline_fixed_string_t t_id_string;
+ cmdline_fixed_string_t headroom_string;
+};
+
+static void
+cmd_thread_headroom_parsed(
+ void *parsed_result,
+ __rte_unused struct cmdline *cl,
+ void *data)
+{
+ struct cmd_thread_headroom_result *params = parsed_result;
+ struct app_params *app = data;
+ int status;
+ uint32_t core_id, socket_id, hyper_th_id;
+
+ if (parse_pipeline_core(&socket_id,
+ &core_id,
+ &hyper_th_id,
+ params->t_id_string) != 0) {
+ printf("Command failed\n");
+ return;
+ }
+
+ status = app_thread_headroom(app,
+ socket_id,
+ core_id,
+ hyper_th_id);
+
+ if (status != 0)
+ printf("Command failed\n");
+}
+
+static cmdline_parse_token_string_t cmd_thread_headroom_t_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+ t_string, "t");
+
+static cmdline_parse_token_string_t cmd_thread_headroom_t_id_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+ t_id_string, NULL);
+
+static cmdline_parse_token_string_t cmd_thread_headroom_headroom_string =
+ TOKEN_STRING_INITIALIZER(struct cmd_thread_headroom_result,
+ headroom_string, "headroom");
+
+static cmdline_parse_inst_t cmd_thread_headroom = {
+ .f = cmd_thread_headroom_parsed,
+ .data = NULL,
+ .help_str = "Display thread headroom",
+ .tokens = {
+ (void *)&cmd_thread_headroom_t_string,
+ (void *)&cmd_thread_headroom_t_id_string,
+ (void *)&cmd_thread_headroom_headroom_string,
+ NULL,
+ },
+};
+
+
+static cmdline_parse_ctx_t thread_cmds[] = {
+ (cmdline_parse_inst_t *) &cmd_pipeline_enable,
+ (cmdline_parse_inst_t *) &cmd_pipeline_disable,
+ (cmdline_parse_inst_t *) &cmd_thread_headroom,
+ NULL,
+};
+
+int
+app_pipeline_thread_cmd_push(struct app_params *app)
+{
+ uint32_t n_cmds, i;
+
+ /* Check for available slots in the application commands array */
+ n_cmds = RTE_DIM(thread_cmds) - 1;
+ if (n_cmds > APP_MAX_CMDS - app->n_cmds)
+ return -ENOMEM;
+
+ /* Push thread commands into the application */
+ memcpy(&app->cmds[app->n_cmds], thread_cmds,
+ n_cmds * sizeof(cmdline_parse_ctx_t));
+
+ for (i = 0; i < n_cmds; i++)
+ app->cmds[app->n_cmds + i]->data = app;
+
+ app->n_cmds += n_cmds;
+ app->cmds[app->n_cmds] = NULL;
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/examples/ip_pipeline/thread_fe.h b/src/seastar/dpdk/examples/ip_pipeline/thread_fe.h
new file mode 100644
index 00000000..2fd4ee8e
--- /dev/null
+++ b/src/seastar/dpdk/examples/ip_pipeline/thread_fe.h
@@ -0,0 +1,101 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef THREAD_FE_H_
+#define THREAD_FE_H_
+
+static inline struct rte_ring *
+app_thread_msgq_in_get(struct app_params *app,
+ uint32_t socket_id, uint32_t core_id, uint32_t ht_id)
+{
+ char msgq_name[32];
+ ssize_t param_idx;
+
+ snprintf(msgq_name, sizeof(msgq_name),
+ "MSGQ-REQ-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ socket_id,
+ core_id,
+ (ht_id) ? "h" : "");
+ param_idx = APP_PARAM_FIND(app->msgq_params, msgq_name);
+
+ if (param_idx < 0)
+ return NULL;
+
+ return app->msgq[param_idx];
+}
+
+static inline struct rte_ring *
+app_thread_msgq_out_get(struct app_params *app,
+ uint32_t socket_id, uint32_t core_id, uint32_t ht_id)
+{
+ char msgq_name[32];
+ ssize_t param_idx;
+
+ snprintf(msgq_name, sizeof(msgq_name),
+ "MSGQ-RSP-CORE-s%" PRIu32 "c%" PRIu32 "%s",
+ socket_id,
+ core_id,
+ (ht_id) ? "h" : "");
+ param_idx = APP_PARAM_FIND(app->msgq_params, msgq_name);
+
+ if (param_idx < 0)
+ return NULL;
+
+ return app->msgq[param_idx];
+
+}
+
+int
+app_pipeline_thread_cmd_push(struct app_params *app);
+
+int
+app_pipeline_enable(struct app_params *app,
+ uint32_t core_id,
+ uint32_t socket_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id);
+
+int
+app_pipeline_disable(struct app_params *app,
+ uint32_t core_id,
+ uint32_t socket_id,
+ uint32_t hyper_th_id,
+ uint32_t pipeline_id);
+
+int
+app_thread_headroom(struct app_params *app,
+ uint32_t core_id,
+ uint32_t socket_id,
+ uint32_t hyper_th_id);
+
+#endif /* THREAD_FE_H_ */