summaryrefslogtreecommitdiffstats
path: root/src/seastar/dpdk/lib/librte_pipeline
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/seastar/dpdk/lib/librte_pipeline
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/seastar/dpdk/lib/librte_pipeline')
-rw-r--r--src/seastar/dpdk/lib/librte_pipeline/Makefile31
-rw-r--r--src/seastar/dpdk/lib/librte_pipeline/meson.build8
-rw-r--r--src/seastar/dpdk/lib/librte_pipeline/rte_pipeline.c1618
-rw-r--r--src/seastar/dpdk/lib/librte_pipeline/rte_pipeline.h848
-rw-r--r--src/seastar/dpdk/lib/librte_pipeline/rte_pipeline_version.map76
-rw-r--r--src/seastar/dpdk/lib/librte_pipeline/rte_port_in_action.c531
-rw-r--r--src/seastar/dpdk/lib/librte_pipeline/rte_port_in_action.h301
-rw-r--r--src/seastar/dpdk/lib/librte_pipeline/rte_table_action.c3474
-rw-r--r--src/seastar/dpdk/lib/librte_pipeline/rte_table_action.h1124
9 files changed, 8011 insertions, 0 deletions
diff --git a/src/seastar/dpdk/lib/librte_pipeline/Makefile b/src/seastar/dpdk/lib/librte_pipeline/Makefile
new file mode 100644
index 000000000..cf265503f
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_pipeline/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2010-2016 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pipeline.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mempool -lrte_mbuf -lrte_table
+LDLIBS += -lrte_port -lrte_meter -lrte_sched -lrte_cryptodev
+
+EXPORT_MAP := rte_pipeline_version.map
+
+LIBABIVER := 3
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := rte_pipeline.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += rte_port_in_action.c
+SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) += rte_table_action.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_PIPELINE)-include += rte_pipeline.h rte_port_in_action.h rte_table_action.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/lib/librte_pipeline/meson.build b/src/seastar/dpdk/lib/librte_pipeline/meson.build
new file mode 100644
index 000000000..04e5f5179
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_pipeline/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+version = 3
+allow_experimental_apis = true
+sources = files('rte_pipeline.c', 'rte_port_in_action.c', 'rte_table_action.c')
+headers = files('rte_pipeline.h', 'rte_port_in_action.h', 'rte_table_action.h')
+deps += ['port', 'table', 'meter', 'sched', 'cryptodev']
diff --git a/src/seastar/dpdk/lib/librte_pipeline/rte_pipeline.c b/src/seastar/dpdk/lib/librte_pipeline/rte_pipeline.c
new file mode 100644
index 000000000..f5f397d29
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_pipeline/rte_pipeline.c
@@ -0,0 +1,1618 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#include <string.h>
+#include <stdio.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_branch_prediction.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+
+#include "rte_pipeline.h"
+
+#define RTE_TABLE_INVALID UINT32_MAX
+
+#ifdef RTE_PIPELINE_STATS_COLLECT
+
+#define RTE_PIPELINE_STATS_AH_DROP_WRITE(p, mask) \
+ ({ (p)->n_pkts_ah_drop = __builtin_popcountll(mask); })
+
+#define RTE_PIPELINE_STATS_AH_DROP_READ(p, counter) \
+ ({ (counter) += (p)->n_pkts_ah_drop; (p)->n_pkts_ah_drop = 0; })
+
+#define RTE_PIPELINE_STATS_TABLE_DROP0(p) \
+ ({ (p)->pkts_drop_mask = (p)->action_mask0[RTE_PIPELINE_ACTION_DROP]; })
+
+#define RTE_PIPELINE_STATS_TABLE_DROP1(p, counter) \
+({ \
+ uint64_t mask = (p)->action_mask0[RTE_PIPELINE_ACTION_DROP]; \
+ mask ^= (p)->pkts_drop_mask; \
+ (counter) += __builtin_popcountll(mask); \
+})
+
+#else
+
+#define RTE_PIPELINE_STATS_AH_DROP_WRITE(p, mask)
+#define RTE_PIPELINE_STATS_AH_DROP_READ(p, counter)
+#define RTE_PIPELINE_STATS_TABLE_DROP0(p)
+#define RTE_PIPELINE_STATS_TABLE_DROP1(p, counter)
+
+#endif
+
+struct rte_port_in {
+ /* Input parameters */
+ struct rte_port_in_ops ops;
+ rte_pipeline_port_in_action_handler f_action;
+ void *arg_ah;
+ uint32_t burst_size;
+
+ /* The table to which this port is connected */
+ uint32_t table_id;
+
+ /* Handle to low-level port */
+ void *h_port;
+
+ /* List of enabled ports */
+ struct rte_port_in *next;
+
+ /* Statistics */
+ uint64_t n_pkts_dropped_by_ah;
+};
+
+struct rte_port_out {
+ /* Input parameters */
+ struct rte_port_out_ops ops;
+ rte_pipeline_port_out_action_handler f_action;
+ void *arg_ah;
+
+ /* Handle to low-level port */
+ void *h_port;
+
+ /* Statistics */
+ uint64_t n_pkts_dropped_by_ah;
+};
+
+struct rte_table {
+ /* Input parameters */
+ struct rte_table_ops ops;
+ rte_pipeline_table_action_handler_hit f_action_hit;
+ rte_pipeline_table_action_handler_miss f_action_miss;
+ void *arg_ah;
+ struct rte_pipeline_table_entry *default_entry;
+ uint32_t entry_size;
+
+ uint32_t table_next_id;
+ uint32_t table_next_id_valid;
+
+ /* Handle to the low-level table object */
+ void *h_table;
+
+ /* Statistics */
+ uint64_t n_pkts_dropped_by_lkp_hit_ah;
+ uint64_t n_pkts_dropped_by_lkp_miss_ah;
+ uint64_t n_pkts_dropped_lkp_hit;
+ uint64_t n_pkts_dropped_lkp_miss;
+};
+
+#define RTE_PIPELINE_MAX_NAME_SZ 124
+
+struct rte_pipeline {
+ /* Input parameters */
+ char name[RTE_PIPELINE_MAX_NAME_SZ];
+ int socket_id;
+ uint32_t offset_port_id;
+
+ /* Internal tables */
+ struct rte_port_in ports_in[RTE_PIPELINE_PORT_IN_MAX];
+ struct rte_port_out ports_out[RTE_PIPELINE_PORT_OUT_MAX];
+ struct rte_table tables[RTE_PIPELINE_TABLE_MAX];
+
+ /* Occupancy of internal tables */
+ uint32_t num_ports_in;
+ uint32_t num_ports_out;
+ uint32_t num_tables;
+
+ /* List of enabled ports */
+ uint64_t enabled_port_in_mask;
+ struct rte_port_in *port_in_next;
+
+ /* Pipeline run structures */
+ struct rte_mbuf *pkts[RTE_PORT_IN_BURST_SIZE_MAX];
+ struct rte_pipeline_table_entry *entries[RTE_PORT_IN_BURST_SIZE_MAX];
+ uint64_t action_mask0[RTE_PIPELINE_ACTIONS];
+ uint64_t action_mask1[RTE_PIPELINE_ACTIONS];
+ uint64_t pkts_mask;
+ uint64_t n_pkts_ah_drop;
+ uint64_t pkts_drop_mask;
+} __rte_cache_aligned;
+
+static inline uint32_t
+rte_mask_get_next(uint64_t mask, uint32_t pos)
+{
+ uint64_t mask_rot = (mask << ((63 - pos) & 0x3F)) |
+ (mask >> ((pos + 1) & 0x3F));
+ return (__builtin_ctzll(mask_rot) - (63 - pos)) & 0x3F;
+}
+
+static inline uint32_t
+rte_mask_get_prev(uint64_t mask, uint32_t pos)
+{
+ uint64_t mask_rot = (mask >> (pos & 0x3F)) |
+ (mask << ((64 - pos) & 0x3F));
+ return ((63 - __builtin_clzll(mask_rot)) + pos) & 0x3F;
+}
+
+static void
+rte_pipeline_table_free(struct rte_table *table);
+
+static void
+rte_pipeline_port_in_free(struct rte_port_in *port);
+
+static void
+rte_pipeline_port_out_free(struct rte_port_out *port);
+
+/*
+ * Pipeline
+ *
+ */
+static int
+rte_pipeline_check_params(struct rte_pipeline_params *params)
+{
+ if (params == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Incorrect value for parameter params\n", __func__);
+ return -EINVAL;
+ }
+
+ /* name */
+ if (params->name == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Incorrect value for parameter name\n", __func__);
+ return -EINVAL;
+ }
+
+ /* socket */
+ if (params->socket_id < 0) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Incorrect value for parameter socket_id\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct rte_pipeline *
+rte_pipeline_create(struct rte_pipeline_params *params)
+{
+ struct rte_pipeline *p;
+ int status;
+
+ /* Check input parameters */
+ status = rte_pipeline_check_params(params);
+ if (status != 0) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Pipeline params check failed (%d)\n",
+ __func__, status);
+ return NULL;
+ }
+
+ /* Allocate memory for the pipeline on requested socket */
+ p = rte_zmalloc_socket("PIPELINE", sizeof(struct rte_pipeline),
+ RTE_CACHE_LINE_SIZE, params->socket_id);
+
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Pipeline memory allocation failed\n", __func__);
+ return NULL;
+ }
+
+ /* Save input parameters */
+ strlcpy(p->name, params->name, RTE_PIPELINE_MAX_NAME_SZ);
+ p->socket_id = params->socket_id;
+ p->offset_port_id = params->offset_port_id;
+
+ /* Initialize pipeline internal data structure */
+ p->num_ports_in = 0;
+ p->num_ports_out = 0;
+ p->num_tables = 0;
+ p->enabled_port_in_mask = 0;
+ p->port_in_next = NULL;
+ p->pkts_mask = 0;
+ p->n_pkts_ah_drop = 0;
+
+ return p;
+}
+
+int
+rte_pipeline_free(struct rte_pipeline *p)
+{
+ uint32_t i;
+
+ /* Check input parameters */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: rte_pipeline parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Free input ports */
+ for (i = 0; i < p->num_ports_in; i++) {
+ struct rte_port_in *port = &p->ports_in[i];
+
+ rte_pipeline_port_in_free(port);
+ }
+
+ /* Free tables */
+ for (i = 0; i < p->num_tables; i++) {
+ struct rte_table *table = &p->tables[i];
+
+ rte_pipeline_table_free(table);
+ }
+
+ /* Free output ports */
+ for (i = 0; i < p->num_ports_out; i++) {
+ struct rte_port_out *port = &p->ports_out[i];
+
+ rte_pipeline_port_out_free(port);
+ }
+
+ /* Free pipeline memory */
+ rte_free(p);
+
+ return 0;
+}
+
+/*
+ * Table
+ *
+ */
+static int
+rte_table_check_params(struct rte_pipeline *p,
+ struct rte_pipeline_table_params *params,
+ uint32_t *table_id)
+{
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (params == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (table_id == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: table_id parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* ops */
+ if (params->ops == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params->ops is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_create == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_create function pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_lookup == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_lookup function pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* De we have room for one more table? */
+ if (p->num_tables == RTE_PIPELINE_TABLE_MAX) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Incorrect value for num_tables parameter\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+rte_pipeline_table_create(struct rte_pipeline *p,
+ struct rte_pipeline_table_params *params,
+ uint32_t *table_id)
+{
+ struct rte_table *table;
+ struct rte_pipeline_table_entry *default_entry;
+ void *h_table;
+ uint32_t entry_size, id;
+ int status;
+
+ /* Check input arguments */
+ status = rte_table_check_params(p, params, table_id);
+ if (status != 0)
+ return status;
+
+ id = p->num_tables;
+ table = &p->tables[id];
+
+ /* Allocate space for the default table entry */
+ entry_size = sizeof(struct rte_pipeline_table_entry) +
+ params->action_data_size;
+ default_entry = rte_zmalloc_socket(
+ "PIPELINE", entry_size, RTE_CACHE_LINE_SIZE, p->socket_id);
+ if (default_entry == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Failed to allocate default entry\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Create the table */
+ h_table = params->ops->f_create(params->arg_create, p->socket_id,
+ entry_size);
+ if (h_table == NULL) {
+ rte_free(default_entry);
+ RTE_LOG(ERR, PIPELINE, "%s: Table creation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Commit current table to the pipeline */
+ p->num_tables++;
+ *table_id = id;
+
+ /* Save input parameters */
+ memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
+ table->f_action_hit = params->f_action_hit;
+ table->f_action_miss = params->f_action_miss;
+ table->arg_ah = params->arg_ah;
+ table->entry_size = entry_size;
+
+ /* Clear the lookup miss actions (to be set later through API) */
+ table->default_entry = default_entry;
+ table->default_entry->action = RTE_PIPELINE_ACTION_DROP;
+
+ /* Initialize table internal data structure */
+ table->h_table = h_table;
+ table->table_next_id = 0;
+ table->table_next_id_valid = 0;
+
+ return 0;
+}
+
+void
+rte_pipeline_table_free(struct rte_table *table)
+{
+ if (table->ops.f_free != NULL)
+ table->ops.f_free(table->h_table);
+
+ rte_free(table->default_entry);
+}
+
+int
+rte_pipeline_table_default_entry_add(struct rte_pipeline *p,
+ uint32_t table_id,
+ struct rte_pipeline_table_entry *default_entry,
+ struct rte_pipeline_table_entry **default_entry_ptr)
+{
+ struct rte_table *table;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (default_entry == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: default_entry parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ if ((default_entry->action == RTE_PIPELINE_ACTION_TABLE) &&
+ table->table_next_id_valid &&
+ (default_entry->table_id != table->table_next_id)) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Tree-like topologies not allowed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Set the lookup miss actions */
+ if ((default_entry->action == RTE_PIPELINE_ACTION_TABLE) &&
+ (table->table_next_id_valid == 0)) {
+ table->table_next_id = default_entry->table_id;
+ table->table_next_id_valid = 1;
+ }
+
+ memcpy(table->default_entry, default_entry, table->entry_size);
+
+ *default_entry_ptr = table->default_entry;
+ return 0;
+}
+
+int
+rte_pipeline_table_default_entry_delete(struct rte_pipeline *p,
+ uint32_t table_id,
+ struct rte_pipeline_table_entry *entry)
+{
+ struct rte_table *table;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: pipeline parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ /* Save the current contents of the default entry */
+ if (entry)
+ memcpy(entry, table->default_entry, table->entry_size);
+
+ /* Clear the lookup miss actions */
+ memset(table->default_entry, 0, table->entry_size);
+ table->default_entry->action = RTE_PIPELINE_ACTION_DROP;
+
+ return 0;
+}
+
+int
+rte_pipeline_table_entry_add(struct rte_pipeline *p,
+ uint32_t table_id,
+ void *key,
+ struct rte_pipeline_table_entry *entry,
+ int *key_found,
+ struct rte_pipeline_table_entry **entry_ptr)
+{
+ struct rte_table *table;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (key == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: key parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (entry == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: entry parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ if (table->ops.f_add == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: f_add function pointer NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if ((entry->action == RTE_PIPELINE_ACTION_TABLE) &&
+ table->table_next_id_valid &&
+ (entry->table_id != table->table_next_id)) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Tree-like topologies not allowed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Add entry */
+ if ((entry->action == RTE_PIPELINE_ACTION_TABLE) &&
+ (table->table_next_id_valid == 0)) {
+ table->table_next_id = entry->table_id;
+ table->table_next_id_valid = 1;
+ }
+
+ return (table->ops.f_add)(table->h_table, key, (void *) entry,
+ key_found, (void **) entry_ptr);
+}
+
+int
+rte_pipeline_table_entry_delete(struct rte_pipeline *p,
+ uint32_t table_id,
+ void *key,
+ int *key_found,
+ struct rte_pipeline_table_entry *entry)
+{
+ struct rte_table *table;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (key == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: key parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ if (table->ops.f_delete == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_delete function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ return (table->ops.f_delete)(table->h_table, key, key_found, entry);
+}
+
+int rte_pipeline_table_entry_add_bulk(struct rte_pipeline *p,
+ uint32_t table_id,
+ void **keys,
+ struct rte_pipeline_table_entry **entries,
+ uint32_t n_keys,
+ int *key_found,
+ struct rte_pipeline_table_entry **entries_ptr)
+{
+ struct rte_table *table;
+ uint32_t i;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (keys == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: keys parameter is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (entries == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: entries parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ if (table->ops.f_add_bulk == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: f_add_bulk function pointer NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < n_keys; i++) {
+ if ((entries[i]->action == RTE_PIPELINE_ACTION_TABLE) &&
+ table->table_next_id_valid &&
+ (entries[i]->table_id != table->table_next_id)) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Tree-like topologies not allowed\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ /* Add entry */
+ for (i = 0; i < n_keys; i++) {
+ if ((entries[i]->action == RTE_PIPELINE_ACTION_TABLE) &&
+ (table->table_next_id_valid == 0)) {
+ table->table_next_id = entries[i]->table_id;
+ table->table_next_id_valid = 1;
+ }
+ }
+
+ return (table->ops.f_add_bulk)(table->h_table, keys, (void **) entries,
+ n_keys, key_found, (void **) entries_ptr);
+}
+
+int rte_pipeline_table_entry_delete_bulk(struct rte_pipeline *p,
+ uint32_t table_id,
+ void **keys,
+ uint32_t n_keys,
+ int *key_found,
+ struct rte_pipeline_table_entry **entries)
+{
+ struct rte_table *table;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (keys == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: key parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table_id %d out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+
+ if (table->ops.f_delete_bulk == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_delete function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ return (table->ops.f_delete_bulk)(table->h_table, keys, n_keys, key_found,
+ (void **) entries);
+}
+
+/*
+ * Port
+ *
+ */
+static int
+rte_pipeline_port_in_check_params(struct rte_pipeline *p,
+ struct rte_pipeline_port_in_params *params,
+ uint32_t *port_id)
+{
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (params == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params parameter NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (port_id == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: port_id parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* ops */
+ if (params->ops == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params->ops parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_create == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_create function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_rx == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: f_rx function pointer NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* burst_size */
+ if ((params->burst_size == 0) ||
+ (params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX)) {
+ RTE_LOG(ERR, PIPELINE, "%s: invalid value for burst_size\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Do we have room for one more port? */
+ if (p->num_ports_in == RTE_PIPELINE_PORT_IN_MAX) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: invalid value for num_ports_in\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+rte_pipeline_port_out_check_params(struct rte_pipeline *p,
+ struct rte_pipeline_port_out_params *params,
+ uint32_t *port_id)
+{
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (params == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params parameter NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (port_id == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: port_id parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* ops */
+ if (params->ops == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: params->ops parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_create == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_create function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_tx == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_tx function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_tx_bulk == NULL) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: f_tx_bulk function pointer NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Do we have room for one more port? */
+ if (p->num_ports_out == RTE_PIPELINE_PORT_OUT_MAX) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: invalid value for num_ports_out\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+rte_pipeline_port_in_create(struct rte_pipeline *p,
+ struct rte_pipeline_port_in_params *params,
+ uint32_t *port_id)
+{
+ struct rte_port_in *port;
+ void *h_port;
+ uint32_t id;
+ int status;
+
+ /* Check input arguments */
+ status = rte_pipeline_port_in_check_params(p, params, port_id);
+ if (status != 0)
+ return status;
+
+ id = p->num_ports_in;
+ port = &p->ports_in[id];
+
+ /* Create the port */
+ h_port = params->ops->f_create(params->arg_create, p->socket_id);
+ if (h_port == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: Port creation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Commit current table to the pipeline */
+ p->num_ports_in++;
+ *port_id = id;
+
+ /* Save input parameters */
+ memcpy(&port->ops, params->ops, sizeof(struct rte_port_in_ops));
+ port->f_action = params->f_action;
+ port->arg_ah = params->arg_ah;
+ port->burst_size = params->burst_size;
+
+ /* Initialize port internal data structure */
+ port->table_id = RTE_TABLE_INVALID;
+ port->h_port = h_port;
+ port->next = NULL;
+
+ return 0;
+}
+
+void
+rte_pipeline_port_in_free(struct rte_port_in *port)
+{
+ if (port->ops.f_free != NULL)
+ port->ops.f_free(port->h_port);
+}
+
+int
+rte_pipeline_port_out_create(struct rte_pipeline *p,
+ struct rte_pipeline_port_out_params *params,
+ uint32_t *port_id)
+{
+ struct rte_port_out *port;
+ void *h_port;
+ uint32_t id;
+ int status;
+
+ /* Check input arguments */
+ status = rte_pipeline_port_out_check_params(p, params, port_id);
+ if (status != 0)
+ return status;
+
+ id = p->num_ports_out;
+ port = &p->ports_out[id];
+
+ /* Create the port */
+ h_port = params->ops->f_create(params->arg_create, p->socket_id);
+ if (h_port == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: Port creation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Commit current table to the pipeline */
+ p->num_ports_out++;
+ *port_id = id;
+
+ /* Save input parameters */
+ memcpy(&port->ops, params->ops, sizeof(struct rte_port_out_ops));
+ port->f_action = params->f_action;
+ port->arg_ah = params->arg_ah;
+
+ /* Initialize port internal data structure */
+ port->h_port = h_port;
+
+ return 0;
+}
+
+void
+rte_pipeline_port_out_free(struct rte_port_out *port)
+{
+ if (port->ops.f_free != NULL)
+ port->ops.f_free(port->h_port);
+}
+
+int
+rte_pipeline_port_in_connect_to_table(struct rte_pipeline *p,
+ uint32_t port_id,
+ uint32_t table_id)
+{
+ struct rte_port_in *port;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (port_id >= p->num_ports_in) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: port IN ID %u is out of range\n",
+ __func__, port_id);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Table ID %u is out of range\n",
+ __func__, table_id);
+ return -EINVAL;
+ }
+
+ port = &p->ports_in[port_id];
+ port->table_id = table_id;
+
+ return 0;
+}
+
+int
+rte_pipeline_port_in_enable(struct rte_pipeline *p, uint32_t port_id)
+{
+ struct rte_port_in *port, *port_prev, *port_next;
+ uint64_t port_mask;
+ uint32_t port_prev_id, port_next_id;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (port_id >= p->num_ports_in) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: port IN ID %u is out of range\n",
+ __func__, port_id);
+ return -EINVAL;
+ }
+
+ port = &p->ports_in[port_id];
+
+ /* Return if current input port is already enabled */
+ port_mask = 1LLU << port_id;
+ if (p->enabled_port_in_mask & port_mask)
+ return 0;
+
+ p->enabled_port_in_mask |= port_mask;
+
+ /* Add current input port to the pipeline chain of enabled ports */
+ port_prev_id = rte_mask_get_prev(p->enabled_port_in_mask, port_id);
+ port_next_id = rte_mask_get_next(p->enabled_port_in_mask, port_id);
+
+ port_prev = &p->ports_in[port_prev_id];
+ port_next = &p->ports_in[port_next_id];
+
+ port_prev->next = port;
+ port->next = port_next;
+
+ /* Check if list of enabled ports was previously empty */
+ if (p->enabled_port_in_mask == port_mask)
+ p->port_in_next = port;
+
+ return 0;
+}
+
+int
+rte_pipeline_port_in_disable(struct rte_pipeline *p, uint32_t port_id)
+{
+ struct rte_port_in *port, *port_prev, *port_next;
+ uint64_t port_mask;
+ uint32_t port_prev_id, port_next_id;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (port_id >= p->num_ports_in) {
+ RTE_LOG(ERR, PIPELINE, "%s: port IN ID %u is out of range\n",
+ __func__, port_id);
+ return -EINVAL;
+ }
+
+ port = &p->ports_in[port_id];
+
+ /* Return if current input port is already disabled */
+ port_mask = 1LLU << port_id;
+ if ((p->enabled_port_in_mask & port_mask) == 0)
+ return 0;
+
+ p->enabled_port_in_mask &= ~port_mask;
+
+ /* Return if no other enabled ports */
+ if (p->enabled_port_in_mask == 0) {
+ p->port_in_next = NULL;
+
+ return 0;
+ }
+
+ /* Add current input port to the pipeline chain of enabled ports */
+ port_prev_id = rte_mask_get_prev(p->enabled_port_in_mask, port_id);
+ port_next_id = rte_mask_get_next(p->enabled_port_in_mask, port_id);
+
+ port_prev = &p->ports_in[port_prev_id];
+ port_next = &p->ports_in[port_next_id];
+
+ port_prev->next = port_next;
+
+ /* Check if the port which has just been disabled is next to serve */
+ if (port == p->port_in_next)
+ p->port_in_next = port_next;
+
+ return 0;
+}
+
+/*
+ * Pipeline run-time
+ *
+ */
+int
+rte_pipeline_check(struct rte_pipeline *p)
+{
+ uint32_t port_in_id;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check that pipeline has at least one input port, one table and one
+ output port */
+ if (p->num_ports_in == 0) {
+ RTE_LOG(ERR, PIPELINE, "%s: must have at least 1 input port\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (p->num_tables == 0) {
+ RTE_LOG(ERR, PIPELINE, "%s: must have at least 1 table\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (p->num_ports_out == 0) {
+ RTE_LOG(ERR, PIPELINE, "%s: must have at least 1 output port\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Check that all input ports are connected */
+ for (port_in_id = 0; port_in_id < p->num_ports_in; port_in_id++) {
+ struct rte_port_in *port_in = &p->ports_in[port_in_id];
+
+ if (port_in->table_id == RTE_TABLE_INVALID) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: Port IN ID %u is not connected\n",
+ __func__, port_in_id);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static inline void
+rte_pipeline_compute_masks(struct rte_pipeline *p, uint64_t pkts_mask)
+{
+ p->action_mask1[RTE_PIPELINE_ACTION_DROP] = 0;
+ p->action_mask1[RTE_PIPELINE_ACTION_PORT] = 0;
+ p->action_mask1[RTE_PIPELINE_ACTION_PORT_META] = 0;
+ p->action_mask1[RTE_PIPELINE_ACTION_TABLE] = 0;
+
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ uint64_t pkt_mask = 1LLU << i;
+ uint32_t pos = p->entries[i]->action;
+
+ p->action_mask1[pos] |= pkt_mask;
+ }
+ } else {
+ uint32_t i;
+
+ for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++) {
+ uint64_t pkt_mask = 1LLU << i;
+ uint32_t pos;
+
+ if ((pkt_mask & pkts_mask) == 0)
+ continue;
+
+ pos = p->entries[i]->action;
+ p->action_mask1[pos] |= pkt_mask;
+ }
+ }
+}
+
+static inline void
+rte_pipeline_action_handler_port_bulk(struct rte_pipeline *p,
+ uint64_t pkts_mask, uint32_t port_id)
+{
+ struct rte_port_out *port_out = &p->ports_out[port_id];
+
+ p->pkts_mask = pkts_mask;
+
+ /* Output port user actions */
+ if (port_out->f_action != NULL) {
+ port_out->f_action(p, p->pkts, pkts_mask, port_out->arg_ah);
+
+ RTE_PIPELINE_STATS_AH_DROP_READ(p,
+ port_out->n_pkts_dropped_by_ah);
+ }
+
+ /* Output port TX */
+ if (p->pkts_mask != 0)
+ port_out->ops.f_tx_bulk(port_out->h_port,
+ p->pkts,
+ p->pkts_mask);
+}
+
+static inline void
+rte_pipeline_action_handler_port(struct rte_pipeline *p, uint64_t pkts_mask)
+{
+ p->pkts_mask = pkts_mask;
+
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = p->pkts[i];
+ uint32_t port_out_id = p->entries[i]->port_id;
+ struct rte_port_out *port_out =
+ &p->ports_out[port_out_id];
+
+ /* Output port user actions */
+ if (port_out->f_action == NULL) /* Output port TX */
+ port_out->ops.f_tx(port_out->h_port, pkt);
+ else {
+ uint64_t pkt_mask = 1LLU << i;
+
+ port_out->f_action(p,
+ p->pkts,
+ pkt_mask,
+ port_out->arg_ah);
+
+ RTE_PIPELINE_STATS_AH_DROP_READ(p,
+ port_out->n_pkts_dropped_by_ah);
+
+ /* Output port TX */
+ if (pkt_mask & p->pkts_mask)
+ port_out->ops.f_tx(port_out->h_port,
+ pkt);
+ }
+ }
+ } else {
+ uint32_t i;
+
+ for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++) {
+ uint64_t pkt_mask = 1LLU << i;
+ struct rte_mbuf *pkt;
+ struct rte_port_out *port_out;
+ uint32_t port_out_id;
+
+ if ((pkt_mask & pkts_mask) == 0)
+ continue;
+
+ pkt = p->pkts[i];
+ port_out_id = p->entries[i]->port_id;
+ port_out = &p->ports_out[port_out_id];
+
+ /* Output port user actions */
+ if (port_out->f_action == NULL) /* Output port TX */
+ port_out->ops.f_tx(port_out->h_port, pkt);
+ else {
+ port_out->f_action(p,
+ p->pkts,
+ pkt_mask,
+ port_out->arg_ah);
+
+ RTE_PIPELINE_STATS_AH_DROP_READ(p,
+ port_out->n_pkts_dropped_by_ah);
+
+ /* Output port TX */
+ if (pkt_mask & p->pkts_mask)
+ port_out->ops.f_tx(port_out->h_port,
+ pkt);
+ }
+ }
+ }
+}
+
+static inline void
+rte_pipeline_action_handler_port_meta(struct rte_pipeline *p,
+ uint64_t pkts_mask)
+{
+ p->pkts_mask = pkts_mask;
+
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = p->pkts[i];
+ uint32_t port_out_id =
+ RTE_MBUF_METADATA_UINT32(pkt,
+ p->offset_port_id);
+ struct rte_port_out *port_out = &p->ports_out[
+ port_out_id];
+
+ /* Output port user actions */
+ if (port_out->f_action == NULL) /* Output port TX */
+ port_out->ops.f_tx(port_out->h_port, pkt);
+ else {
+ uint64_t pkt_mask = 1LLU << i;
+
+ port_out->f_action(p,
+ p->pkts,
+ pkt_mask,
+ port_out->arg_ah);
+
+ RTE_PIPELINE_STATS_AH_DROP_READ(p,
+ port_out->n_pkts_dropped_by_ah);
+
+ /* Output port TX */
+ if (pkt_mask & p->pkts_mask)
+ port_out->ops.f_tx(port_out->h_port,
+ pkt);
+ }
+ }
+ } else {
+ uint32_t i;
+
+ for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++) {
+ uint64_t pkt_mask = 1LLU << i;
+ struct rte_mbuf *pkt;
+ struct rte_port_out *port_out;
+ uint32_t port_out_id;
+
+ if ((pkt_mask & pkts_mask) == 0)
+ continue;
+
+ pkt = p->pkts[i];
+ port_out_id = RTE_MBUF_METADATA_UINT32(pkt,
+ p->offset_port_id);
+ port_out = &p->ports_out[port_out_id];
+
+ /* Output port user actions */
+ if (port_out->f_action == NULL) /* Output port TX */
+ port_out->ops.f_tx(port_out->h_port, pkt);
+ else {
+ port_out->f_action(p,
+ p->pkts,
+ pkt_mask,
+ port_out->arg_ah);
+
+ RTE_PIPELINE_STATS_AH_DROP_READ(p,
+ port_out->n_pkts_dropped_by_ah);
+
+ /* Output port TX */
+ if (pkt_mask & p->pkts_mask)
+ port_out->ops.f_tx(port_out->h_port,
+ pkt);
+ }
+ }
+ }
+}
+
+static inline void
+rte_pipeline_action_handler_drop(struct rte_pipeline *p, uint64_t pkts_mask)
+{
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++)
+ rte_pktmbuf_free(p->pkts[i]);
+ } else {
+ uint32_t i;
+
+ for (i = 0; i < RTE_PORT_IN_BURST_SIZE_MAX; i++) {
+ uint64_t pkt_mask = 1LLU << i;
+
+ if ((pkt_mask & pkts_mask) == 0)
+ continue;
+
+ rte_pktmbuf_free(p->pkts[i]);
+ }
+ }
+}
+
+int
+rte_pipeline_run(struct rte_pipeline *p)
+{
+ struct rte_port_in *port_in = p->port_in_next;
+ uint32_t n_pkts, table_id;
+
+ if (port_in == NULL)
+ return 0;
+
+ /* Input port RX */
+ n_pkts = port_in->ops.f_rx(port_in->h_port, p->pkts,
+ port_in->burst_size);
+ if (n_pkts == 0) {
+ p->port_in_next = port_in->next;
+ return 0;
+ }
+
+ p->pkts_mask = RTE_LEN2MASK(n_pkts, uint64_t);
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] = 0;
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT] = 0;
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT_META] = 0;
+ p->action_mask0[RTE_PIPELINE_ACTION_TABLE] = 0;
+
+ /* Input port user actions */
+ if (port_in->f_action != NULL) {
+ port_in->f_action(p, p->pkts, n_pkts, port_in->arg_ah);
+
+ RTE_PIPELINE_STATS_AH_DROP_READ(p,
+ port_in->n_pkts_dropped_by_ah);
+ }
+
+ /* Table */
+ for (table_id = port_in->table_id; p->pkts_mask != 0; ) {
+ struct rte_table *table;
+ uint64_t lookup_hit_mask, lookup_miss_mask;
+
+ /* Lookup */
+ table = &p->tables[table_id];
+ table->ops.f_lookup(table->h_table, p->pkts, p->pkts_mask,
+ &lookup_hit_mask, (void **) p->entries);
+ lookup_miss_mask = p->pkts_mask & (~lookup_hit_mask);
+
+ /* Lookup miss */
+ if (lookup_miss_mask != 0) {
+ struct rte_pipeline_table_entry *default_entry =
+ table->default_entry;
+
+ p->pkts_mask = lookup_miss_mask;
+
+ /* Table user actions */
+ if (table->f_action_miss != NULL) {
+ table->f_action_miss(p,
+ p->pkts,
+ lookup_miss_mask,
+ default_entry,
+ table->arg_ah);
+
+ RTE_PIPELINE_STATS_AH_DROP_READ(p,
+ table->n_pkts_dropped_by_lkp_miss_ah);
+ }
+
+ /* Table reserved actions */
+ if ((default_entry->action == RTE_PIPELINE_ACTION_PORT) &&
+ (p->pkts_mask != 0))
+ rte_pipeline_action_handler_port_bulk(p,
+ p->pkts_mask,
+ default_entry->port_id);
+ else {
+ uint32_t pos = default_entry->action;
+
+ RTE_PIPELINE_STATS_TABLE_DROP0(p);
+
+ p->action_mask0[pos] |= p->pkts_mask;
+
+ RTE_PIPELINE_STATS_TABLE_DROP1(p,
+ table->n_pkts_dropped_lkp_miss);
+ }
+ }
+
+ /* Lookup hit */
+ if (lookup_hit_mask != 0) {
+ p->pkts_mask = lookup_hit_mask;
+
+ /* Table user actions */
+ if (table->f_action_hit != NULL) {
+ table->f_action_hit(p,
+ p->pkts,
+ lookup_hit_mask,
+ p->entries,
+ table->arg_ah);
+
+ RTE_PIPELINE_STATS_AH_DROP_READ(p,
+ table->n_pkts_dropped_by_lkp_hit_ah);
+ }
+
+ /* Table reserved actions */
+ RTE_PIPELINE_STATS_TABLE_DROP0(p);
+ rte_pipeline_compute_masks(p, p->pkts_mask);
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |=
+ p->action_mask1[
+ RTE_PIPELINE_ACTION_DROP];
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT] |=
+ p->action_mask1[
+ RTE_PIPELINE_ACTION_PORT];
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT_META] |=
+ p->action_mask1[
+ RTE_PIPELINE_ACTION_PORT_META];
+ p->action_mask0[RTE_PIPELINE_ACTION_TABLE] |=
+ p->action_mask1[
+ RTE_PIPELINE_ACTION_TABLE];
+
+ RTE_PIPELINE_STATS_TABLE_DROP1(p,
+ table->n_pkts_dropped_lkp_hit);
+ }
+
+ /* Prepare for next iteration */
+ p->pkts_mask = p->action_mask0[RTE_PIPELINE_ACTION_TABLE];
+ table_id = table->table_next_id;
+ p->action_mask0[RTE_PIPELINE_ACTION_TABLE] = 0;
+ }
+
+ /* Table reserved action PORT */
+ rte_pipeline_action_handler_port(p,
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT]);
+
+ /* Table reserved action PORT META */
+ rte_pipeline_action_handler_port_meta(p,
+ p->action_mask0[RTE_PIPELINE_ACTION_PORT_META]);
+
+ /* Table reserved action DROP */
+ rte_pipeline_action_handler_drop(p,
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP]);
+
+ /* Pick candidate for next port IN to serve */
+ p->port_in_next = port_in->next;
+
+ return (int) n_pkts;
+}
+
+int
+rte_pipeline_flush(struct rte_pipeline *p)
+{
+ uint32_t port_id;
+
+ /* Check input arguments */
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ for (port_id = 0; port_id < p->num_ports_out; port_id++) {
+ struct rte_port_out *port = &p->ports_out[port_id];
+
+ if (port->ops.f_flush != NULL)
+ port->ops.f_flush(port->h_port);
+ }
+
+ return 0;
+}
+
+int
+rte_pipeline_port_out_packet_insert(struct rte_pipeline *p,
+ uint32_t port_id, struct rte_mbuf *pkt)
+{
+ struct rte_port_out *port_out = &p->ports_out[port_id];
+
+ port_out->ops.f_tx(port_out->h_port, pkt); /* Output port TX */
+
+ return 0;
+}
+
+int rte_pipeline_ah_packet_hijack(struct rte_pipeline *p,
+ uint64_t pkts_mask)
+{
+ pkts_mask &= p->pkts_mask;
+ p->pkts_mask &= ~pkts_mask;
+
+ return 0;
+}
+
+int rte_pipeline_ah_packet_drop(struct rte_pipeline *p,
+ uint64_t pkts_mask)
+{
+ pkts_mask &= p->pkts_mask;
+ p->pkts_mask &= ~pkts_mask;
+ p->action_mask0[RTE_PIPELINE_ACTION_DROP] |= pkts_mask;
+
+ RTE_PIPELINE_STATS_AH_DROP_WRITE(p, pkts_mask);
+ return 0;
+}
+
+int rte_pipeline_port_in_stats_read(struct rte_pipeline *p, uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats, int clear)
+{
+ struct rte_port_in *port;
+ int retval;
+
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (port_id >= p->num_ports_in) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: port IN ID %u is out of range\n",
+ __func__, port_id);
+ return -EINVAL;
+ }
+
+ port = &p->ports_in[port_id];
+
+ if (port->ops.f_stats != NULL) {
+ retval = port->ops.f_stats(port->h_port, &stats->stats, clear);
+ if (retval)
+ return retval;
+ } else if (stats != NULL)
+ memset(&stats->stats, 0, sizeof(stats->stats));
+
+ if (stats != NULL)
+ stats->n_pkts_dropped_by_ah = port->n_pkts_dropped_by_ah;
+
+ if (clear != 0)
+ port->n_pkts_dropped_by_ah = 0;
+
+ return 0;
+}
+
+int rte_pipeline_port_out_stats_read(struct rte_pipeline *p, uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats, int clear)
+{
+ struct rte_port_out *port;
+ int retval;
+
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (port_id >= p->num_ports_out) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: port OUT ID %u is out of range\n", __func__, port_id);
+ return -EINVAL;
+ }
+
+ port = &p->ports_out[port_id];
+ if (port->ops.f_stats != NULL) {
+ retval = port->ops.f_stats(port->h_port, &stats->stats, clear);
+ if (retval != 0)
+ return retval;
+ } else if (stats != NULL)
+ memset(&stats->stats, 0, sizeof(stats->stats));
+
+ if (stats != NULL)
+ stats->n_pkts_dropped_by_ah = port->n_pkts_dropped_by_ah;
+
+ if (clear != 0)
+ port->n_pkts_dropped_by_ah = 0;
+
+ return 0;
+}
+
+int rte_pipeline_table_stats_read(struct rte_pipeline *p, uint32_t table_id,
+ struct rte_pipeline_table_stats *stats, int clear)
+{
+ struct rte_table *table;
+ int retval;
+
+ if (p == NULL) {
+ RTE_LOG(ERR, PIPELINE, "%s: pipeline parameter NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (table_id >= p->num_tables) {
+ RTE_LOG(ERR, PIPELINE,
+ "%s: table %u is out of range\n", __func__, table_id);
+ return -EINVAL;
+ }
+
+ table = &p->tables[table_id];
+ if (table->ops.f_stats != NULL) {
+ retval = table->ops.f_stats(table->h_table, &stats->stats, clear);
+ if (retval != 0)
+ return retval;
+ } else if (stats != NULL)
+ memset(&stats->stats, 0, sizeof(stats->stats));
+
+ if (stats != NULL) {
+ stats->n_pkts_dropped_by_lkp_hit_ah =
+ table->n_pkts_dropped_by_lkp_hit_ah;
+ stats->n_pkts_dropped_by_lkp_miss_ah =
+ table->n_pkts_dropped_by_lkp_miss_ah;
+ stats->n_pkts_dropped_lkp_hit = table->n_pkts_dropped_lkp_hit;
+ stats->n_pkts_dropped_lkp_miss = table->n_pkts_dropped_lkp_miss;
+ }
+
+ if (clear != 0) {
+ table->n_pkts_dropped_by_lkp_hit_ah = 0;
+ table->n_pkts_dropped_by_lkp_miss_ah = 0;
+ table->n_pkts_dropped_lkp_hit = 0;
+ table->n_pkts_dropped_lkp_miss = 0;
+ }
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/lib/librte_pipeline/rte_pipeline.h b/src/seastar/dpdk/lib/librte_pipeline/rte_pipeline.h
new file mode 100644
index 000000000..3cfb6868f
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_pipeline/rte_pipeline.h
@@ -0,0 +1,848 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
+ */
+
+#ifndef __INCLUDE_RTE_PIPELINE_H__
+#define __INCLUDE_RTE_PIPELINE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @file
+ * RTE Pipeline
+ *
+ * This tool is part of the DPDK Packet Framework tool suite and provides
+ * a standard methodology (logically similar to OpenFlow) for rapid development
+ * of complex packet processing pipelines out of ports, tables and actions.
+ *
+ * <B>Basic operation.</B> A pipeline is constructed by connecting its input
+ * ports to its output ports through a chain of lookup tables. As result of
+ * lookup operation into the current table, one of the table entries (or the
+ * default table entry, in case of lookup miss) is identified to provide the
+ * actions to be executed on the current packet and the associated action
+ * meta-data. The behavior of user actions is defined through the configurable
+ * table action handler, while the reserved actions define the next hop for the
+ * current packet (either another table, an output port or packet drop) and are
+ * handled transparently by the framework.
+ *
+ * <B>Initialization and run-time flows.</B> Once all the pipeline elements
+ * (input ports, tables, output ports) have been created, input ports connected
+ * to tables, table action handlers configured, tables populated with the
+ * initial set of entries (actions and action meta-data) and input ports
+ * enabled, the pipeline runs automatically, pushing packets from input ports
+ * to tables and output ports. At each table, the identified user actions are
+ * being executed, resulting in action meta-data (stored in the table entry)
+ * and packet meta-data (stored with the packet descriptor) being updated. The
+ * pipeline tables can have further updates and input ports can be disabled or
+ * enabled later on as required.
+ *
+ * <B>Multi-core scaling.</B> Typically, each CPU core will run its own
+ * pipeline instance. Complex application-level pipelines can be implemented by
+ * interconnecting multiple CPU core-level pipelines in tree-like topologies,
+ * as the same port devices (e.g. SW rings) can serve as output ports for the
+ * pipeline running on CPU core A, as well as input ports for the pipeline
+ * running on CPU core B. This approach enables the application development
+ * using the pipeline (CPU cores connected serially), cluster/run-to-completion
+ * (CPU cores connected in parallel) or mixed (pipeline of CPU core clusters)
+ * programming models.
+ *
+ * <B>Thread safety.</B> It is possible to have multiple pipelines running on
+ * the same CPU core, but it is not allowed (for thread safety reasons) to have
+ * multiple CPU cores running the same pipeline instance.
+ *
+ ***/
+
+#include <stdint.h>
+
+#include <rte_port.h>
+#include <rte_table.h>
+#include <rte_common.h>
+
+struct rte_mbuf;
+
+/*
+ * Pipeline
+ *
+ */
+/** Opaque data type for pipeline */
+struct rte_pipeline;
+
+/** Parameters for pipeline creation */
+struct rte_pipeline_params {
+ /** Pipeline name */
+ const char *name;
+
+ /** CPU socket ID where memory for the pipeline and its elements (ports
+ and tables) should be allocated */
+ int socket_id;
+
+ /** Offset within packet meta-data to port_id to be used by action
+ "Send packet to output port read from packet meta-data". Has to be
+ 4-byte aligned. */
+ uint32_t offset_port_id;
+};
+
+/** Pipeline port in stats. */
+struct rte_pipeline_port_in_stats {
+ /** Port in stats. */
+ struct rte_port_in_stats stats;
+
+ /** Number of packets dropped by action handler. */
+ uint64_t n_pkts_dropped_by_ah;
+
+};
+
+/** Pipeline port out stats. */
+struct rte_pipeline_port_out_stats {
+ /** Port out stats. */
+ struct rte_port_out_stats stats;
+
+ /** Number of packets dropped by action handler. */
+ uint64_t n_pkts_dropped_by_ah;
+};
+
+/** Pipeline table stats. */
+struct rte_pipeline_table_stats {
+ /** Table stats. */
+ struct rte_table_stats stats;
+
+ /** Number of packets dropped by lookup hit action handler. */
+ uint64_t n_pkts_dropped_by_lkp_hit_ah;
+
+ /** Number of packets dropped by lookup miss action handler. */
+ uint64_t n_pkts_dropped_by_lkp_miss_ah;
+
+ /** Number of packets dropped by pipeline in behalf of this
+ * table based on action specified in table entry. */
+ uint64_t n_pkts_dropped_lkp_hit;
+
+ /** Number of packets dropped by pipeline in behalf of this
+ * table based on action specified in table entry. */
+ uint64_t n_pkts_dropped_lkp_miss;
+};
+
+/**
+ * Pipeline create
+ *
+ * @param params
+ * Parameters for pipeline creation
+ * @return
+ * Handle to pipeline instance on success or NULL otherwise
+ */
+struct rte_pipeline *rte_pipeline_create(struct rte_pipeline_params *params);
+
+/**
+ * Pipeline free
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_free(struct rte_pipeline *p);
+
+/**
+ * Pipeline consistency check
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_check(struct rte_pipeline *p);
+
+/**
+ * Pipeline run
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @return
+ * Number of packets read and processed
+ */
+int rte_pipeline_run(struct rte_pipeline *p);
+
+/**
+ * Pipeline flush
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_flush(struct rte_pipeline *p);
+
+/*
+ * Actions
+ *
+ */
+/** Reserved actions */
+enum rte_pipeline_action {
+ /** Drop the packet */
+ RTE_PIPELINE_ACTION_DROP = 0,
+
+ /** Send packet to output port */
+ RTE_PIPELINE_ACTION_PORT,
+
+ /** Send packet to output port read from packet meta-data */
+ RTE_PIPELINE_ACTION_PORT_META,
+
+ /** Send packet to table */
+ RTE_PIPELINE_ACTION_TABLE,
+
+ /** Number of reserved actions */
+ RTE_PIPELINE_ACTIONS
+};
+
+/*
+ * Table
+ *
+ */
+/** Maximum number of tables allowed for any given pipeline instance. The
+ value of this parameter cannot be changed. */
+#define RTE_PIPELINE_TABLE_MAX 64
+
+/**
+ * Head format for the table entry of any pipeline table. For any given
+ * pipeline table, all table entries should have the same size and format. For
+ * any given pipeline table, the table entry has to start with a head of this
+ * structure, which contains the reserved actions and their associated
+ * meta-data, and then optionally continues with user actions and their
+ * associated meta-data. As all the currently defined reserved actions are
+ * mutually exclusive, only one reserved action can be set per table entry.
+ */
+struct rte_pipeline_table_entry {
+ /** Reserved action */
+ enum rte_pipeline_action action;
+
+ RTE_STD_C11
+ union {
+ /** Output port ID (meta-data for "Send packet to output port"
+ action) */
+ uint32_t port_id;
+ /** Table ID (meta-data for "Send packet to table" action) */
+ uint32_t table_id;
+ };
+ /** Start of table entry area for user defined actions and meta-data */
+ __extension__ uint8_t action_data[0];
+};
+
+/**
+ * Pipeline table action handler on lookup hit
+ *
+ * The action handler can decide to drop packets by resetting the associated
+ * packet bit in the pkts_mask parameter. In this case, the action handler is
+ * required not to free the packet buffer, which will be freed eventually by
+ * the pipeline.
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param pkts
+ * Burst of input packets specified as array of up to 64 pointers to struct
+ * rte_mbuf
+ * @param pkts_mask
+ * 64-bit bitmask specifying which packets in the input burst are valid. When
+ * pkts_mask bit n is set, then element n of pkts array is pointing to a
+ * valid packet and element n of entries array is pointing to a valid table
+ * entry associated with the packet, with the association typically done by
+ * the table lookup operation. Otherwise, element n of pkts array and element
+ * n of entries array will not be accessed.
+ * @param entries
+ * Set of table entries specified as array of up to 64 pointers to struct
+ * rte_pipeline_table_entry
+ * @param arg
+ * Opaque parameter registered by the user at the pipeline table creation
+ * time
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_pipeline_table_action_handler_hit)(
+ struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_pipeline_table_entry **entries,
+ void *arg);
+
+/**
+ * Pipeline table action handler on lookup miss
+ *
+ * The action handler can decide to drop packets by resetting the associated
+ * packet bit in the pkts_mask parameter. In this case, the action handler is
+ * required not to free the packet buffer, which will be freed eventually by
+ * the pipeline.
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param pkts
+ * Burst of input packets specified as array of up to 64 pointers to struct
+ * rte_mbuf
+ * @param pkts_mask
+ * 64-bit bitmask specifying which packets in the input burst are valid. When
+ * pkts_mask bit n is set, then element n of pkts array is pointing to a
+ * valid packet. Otherwise, element n of pkts array will not be accessed.
+ * @param entry
+ * Single table entry associated with all the valid packets from the input
+ * burst, specified as pointer to struct rte_pipeline_table_entry.
+ * This entry is the pipeline table default entry that is associated by the
+ * table lookup operation with the input packets that have resulted in lookup
+ * miss.
+ * @param arg
+ * Opaque parameter registered by the user at the pipeline table creation
+ * time
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_pipeline_table_action_handler_miss)(
+ struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_pipeline_table_entry *entry,
+ void *arg);
+
+/** Parameters for pipeline table creation. Action handlers have to be either
+ both enabled or both disabled (they can be disabled by setting them to
+ NULL). */
+struct rte_pipeline_table_params {
+ /** Table operations (specific to each table type) */
+ struct rte_table_ops *ops;
+ /** Opaque param to be passed to the table create operation when
+ invoked */
+ void *arg_create;
+ /** Callback function to execute the user actions on input packets in
+ case of lookup hit */
+ rte_pipeline_table_action_handler_hit f_action_hit;
+ /** Callback function to execute the user actions on input packets in
+ case of lookup miss */
+ rte_pipeline_table_action_handler_miss f_action_miss;
+
+ /** Opaque parameter to be passed to lookup hit and/or lookup miss
+ action handlers when invoked */
+ void *arg_ah;
+ /** Memory size to be reserved per table entry for storing the user
+ actions and their meta-data */
+ uint32_t action_data_size;
+};
+
+/**
+ * Pipeline table create
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param params
+ * Parameters for pipeline table creation
+ * @param table_id
+ * Table ID. Valid only within the scope of table IDs of the current
+ * pipeline. Only returned after a successful invocation.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_create(struct rte_pipeline *p,
+ struct rte_pipeline_table_params *params,
+ uint32_t *table_id);
+
+/**
+ * Pipeline table default entry add
+ *
+ * The contents of the table default entry is updated with the provided actions
+ * and meta-data. When the default entry is not configured (by using this
+ * function), the built-in default entry has the action "Drop" and meta-data
+ * set to all-zeros.
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param default_entry
+ * New contents for the table default entry
+ * @param default_entry_ptr
+ * On successful invocation, pointer to the default table entry which can be
+ * used for further read-write accesses to this table entry. This pointer
+ * is valid until the default entry is deleted or re-added.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_default_entry_add(struct rte_pipeline *p,
+ uint32_t table_id,
+ struct rte_pipeline_table_entry *default_entry,
+ struct rte_pipeline_table_entry **default_entry_ptr);
+
+/**
+ * Pipeline table default entry delete
+ *
+ * The new contents of the table default entry is set to reserved action "Drop
+ * the packet" with meta-data cleared (i.e. set to all-zeros).
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param entry
+ * On successful invocation, when entry points to a valid buffer, the
+ * previous contents of the table default entry (as it was just before the
+ * delete operation) is copied to this buffer
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_default_entry_delete(struct rte_pipeline *p,
+ uint32_t table_id,
+ struct rte_pipeline_table_entry *entry);
+
+/**
+ * Pipeline table entry add
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param key
+ * Table entry key
+ * @param entry
+ * New contents for the table entry identified by key
+ * @param key_found
+ * On successful invocation, set to TRUE (value different than 0) if key was
+ * already present in the table before the add operation and to FALSE (value
+ * 0) if not
+ * @param entry_ptr
+ * On successful invocation, pointer to the table entry associated with key.
+ * This can be used for further read-write accesses to this table entry and
+ * is valid until the key is deleted from the table or re-added (usually for
+ * associating different actions and/or action meta-data to the current key)
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_entry_add(struct rte_pipeline *p,
+ uint32_t table_id,
+ void *key,
+ struct rte_pipeline_table_entry *entry,
+ int *key_found,
+ struct rte_pipeline_table_entry **entry_ptr);
+
+/**
+ * Pipeline table entry delete
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param key
+ * Table entry key
+ * @param key_found
+ * On successful invocation, set to TRUE (value different than 0) if key was
+ * found in the table before the delete operation and to FALSE (value 0) if
+ * not
+ * @param entry
+ * On successful invocation, when key is found in the table and entry points
+ * to a valid buffer, the table entry contents (as it was before the delete
+ * was performed) is copied to this buffer
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_entry_delete(struct rte_pipeline *p,
+ uint32_t table_id,
+ void *key,
+ int *key_found,
+ struct rte_pipeline_table_entry *entry);
+
+/**
+ * Pipeline table entry add bulk
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param keys
+ * Array containing table entry keys
+ * @param entries
+ * Array containing new contents for every table entry identified by key
+ * @param n_keys
+ * Number of keys to add
+ * @param key_found
+ * On successful invocation, key_found for every item in the array is set to
+ * TRUE (value different than 0) if key was already present in the table
+ * before the add operation and to FALSE (value 0) if not
+ * @param entries_ptr
+ * On successful invocation, array *entries_ptr stores pointer to every table
+ * entry associated with key. This can be used for further read-write accesses
+ * to this table entry and is valid until the key is deleted from the table or
+ * re-added (usually for associating different actions and/or action meta-data
+ * to the current key)
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_entry_add_bulk(struct rte_pipeline *p,
+ uint32_t table_id,
+ void **keys,
+ struct rte_pipeline_table_entry **entries,
+ uint32_t n_keys,
+ int *key_found,
+ struct rte_pipeline_table_entry **entries_ptr);
+
+/**
+ * Pipeline table entry delete bulk
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @param keys
+ * Array containing table entry keys
+ * @param n_keys
+ * Number of keys to delete
+ * @param key_found
+ * On successful invocation, key_found for every item in the array is set to
+ * TRUE (value different than 0) if key was found in the table before the
+ * delete operation and to FALSE (value 0) if not
+ * @param entries
+ * If entries pointer is NULL, this pointer is ignored for every entry found.
+ * Else, after successful invocation, if specific key is found in the table
+ * and entry points to a valid buffer, the table entry contents (as it was
+ * before the delete was performed) is copied to this buffer.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_entry_delete_bulk(struct rte_pipeline *p,
+ uint32_t table_id,
+ void **keys,
+ uint32_t n_keys,
+ int *key_found,
+ struct rte_pipeline_table_entry **entries);
+
+/**
+ * Read pipeline table stats.
+ *
+ * This function reads table statistics identified by *table_id* of given
+ * pipeline *p*.
+ *
+ * @param p
+ * Handle to pipeline instance.
+ * @param table_id
+ * Port ID what stats will be returned.
+ * @param stats
+ * Statistics buffer.
+ * @param clear
+ * If not 0 clear stats after reading.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_table_stats_read(struct rte_pipeline *p, uint32_t table_id,
+ struct rte_pipeline_table_stats *stats, int clear);
+
+/*
+ * Port IN
+ *
+ */
+/** Maximum number of input ports allowed for any given pipeline instance. The
+ value of this parameter cannot be changed. */
+#define RTE_PIPELINE_PORT_IN_MAX 64
+
+/**
+ * Pipeline input port action handler
+ *
+ * The action handler can decide to drop packets by resetting the associated
+ * packet bit in the pkts_mask parameter. In this case, the action handler is
+ * required not to free the packet buffer, which will be freed eventually by
+ * the pipeline.
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param pkts
+ * Burst of input packets specified as array of up to 64 pointers to struct
+ * rte_mbuf
+ * @param n
+ * Number of packets in the input burst. This parameter specifies that
+ * elements 0 to (n-1) of pkts array are valid.
+ * @param arg
+ * Opaque parameter registered by the user at the pipeline table creation
+ * time
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_pipeline_port_in_action_handler)(
+ struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint32_t n,
+ void *arg);
+
+/** Parameters for pipeline input port creation */
+struct rte_pipeline_port_in_params {
+ /** Input port operations (specific to each table type) */
+ struct rte_port_in_ops *ops;
+ /** Opaque parameter to be passed to create operation when invoked */
+ void *arg_create;
+
+ /** Callback function to execute the user actions on input packets.
+ Disabled if set to NULL. */
+ rte_pipeline_port_in_action_handler f_action;
+ /** Opaque parameter to be passed to the action handler when invoked */
+ void *arg_ah;
+
+ /** Recommended burst size for the RX operation(in number of pkts) */
+ uint32_t burst_size;
+};
+
+/**
+ * Pipeline input port create
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param params
+ * Parameters for pipeline input port creation
+ * @param port_id
+ * Input port ID. Valid only within the scope of input port IDs of the
+ * current pipeline. Only returned after a successful invocation.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_in_create(struct rte_pipeline *p,
+ struct rte_pipeline_port_in_params *params,
+ uint32_t *port_id);
+
+/**
+ * Pipeline input port connect to table
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param port_id
+ * Port ID (returned by previous invocation of pipeline input port create)
+ * @param table_id
+ * Table ID (returned by previous invocation of pipeline table create)
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_in_connect_to_table(struct rte_pipeline *p,
+ uint32_t port_id,
+ uint32_t table_id);
+
+/**
+ * Pipeline input port enable
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param port_id
+ * Port ID (returned by previous invocation of pipeline input port create)
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_in_enable(struct rte_pipeline *p,
+ uint32_t port_id);
+
+/**
+ * Pipeline input port disable
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param port_id
+ * Port ID (returned by previous invocation of pipeline input port create)
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_in_disable(struct rte_pipeline *p,
+ uint32_t port_id);
+
+/**
+ * Read pipeline port in stats.
+ *
+ * This function reads port in statistics identified by *port_id* of given
+ * pipeline *p*.
+ *
+ * @param p
+ * Handle to pipeline instance.
+ * @param port_id
+ * Port ID what stats will be returned.
+ * @param stats
+ * Statistics buffer.
+ * @param clear
+ * If not 0 clear stats after reading.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_in_stats_read(struct rte_pipeline *p, uint32_t port_id,
+ struct rte_pipeline_port_in_stats *stats, int clear);
+
+/*
+ * Port OUT
+ *
+ */
+/** Maximum number of output ports allowed for any given pipeline instance. The
+ value of this parameter cannot be changed. */
+#define RTE_PIPELINE_PORT_OUT_MAX 64
+
+/**
+ * Pipeline output port action handler
+ *
+ * The action handler can decide to drop packets by resetting the associated
+ * packet bit in the pkts_mask parameter. In this case, the action handler is
+ * required not to free the packet buffer, which will be freed eventually by
+ * the pipeline.
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param pkts
+ * Burst of input packets specified as array of up to 64 pointers to struct
+ * rte_mbuf
+ * @param pkts_mask
+ * 64-bit bitmask specifying which packets in the input burst are valid. When
+ * pkts_mask bit n is set, then element n of pkts array is pointing to a
+ * valid packet. Otherwise, element n of pkts array will not be accessed.
+ * @param arg
+ * Opaque parameter registered by the user at the pipeline table creation
+ * time
+ * @return
+ * 0 on success, error code otherwise
+ */
+typedef int (*rte_pipeline_port_out_action_handler)(
+ struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ void *arg);
+
+/** Parameters for pipeline output port creation. The action handlers have to
+be either both enabled or both disabled (by setting them to NULL). When
+enabled, the pipeline selects between them at different moments, based on the
+number of packets that have to be sent to the same output port. */
+struct rte_pipeline_port_out_params {
+ /** Output port operations (specific to each table type) */
+ struct rte_port_out_ops *ops;
+ /** Opaque parameter to be passed to create operation when invoked */
+ void *arg_create;
+
+ /** Callback function executing the user actions on bust of input
+ packets */
+ rte_pipeline_port_out_action_handler f_action;
+ /** Opaque parameter to be passed to the action handler when invoked */
+ void *arg_ah;
+};
+
+/**
+ * Pipeline output port create
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param params
+ * Parameters for pipeline output port creation
+ * @param port_id
+ * Output port ID. Valid only within the scope of output port IDs of the
+ * current pipeline. Only returned after a successful invocation.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_out_create(struct rte_pipeline *p,
+ struct rte_pipeline_port_out_params *params,
+ uint32_t *port_id);
+
+/**
+ * Read pipeline port out stats.
+ *
+ * This function reads port out statistics identified by *port_id* of given
+ * pipeline *p*.
+ *
+ * @param p
+ * Handle to pipeline instance.
+ * @param port_id
+ * Port ID what stats will be returned.
+ * @param stats
+ * Statistics buffer.
+ * @param clear
+ * If not 0 clear stats after reading.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_out_stats_read(struct rte_pipeline *p, uint32_t port_id,
+ struct rte_pipeline_port_out_stats *stats, int clear);
+
+/*
+ * Functions to be called as part of the port IN/OUT or table action handlers
+ *
+ */
+/**
+ * Action handler packet insert to output port
+ *
+ * This function can be called by any input/output port or table action handler
+ * to send a packet out through one of the pipeline output ports. This packet is
+ * generated by the action handler, i.e. this packet is not part of the burst of
+ * packets read from one of the pipeline input ports and currently processed by
+ * the pipeline (this packet is not an element of the pkts array input parameter
+ * of the action handler).
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param port_id
+ * Output port ID (returned by previous invocation of pipeline output port
+ * create) to send the packet specified by pkt
+ * @param pkt
+ * New packet generated by the action handler
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_port_out_packet_insert(struct rte_pipeline *p,
+ uint32_t port_id,
+ struct rte_mbuf *pkt);
+
+#define rte_pipeline_ah_port_out_packet_insert \
+ rte_pipeline_port_out_packet_insert
+
+/**
+ * Action handler packet hijack
+ *
+ * This function can be called by any input/output port or table action handler
+ * to hijack selected packets from the burst of packets read from one of the
+ * pipeline input ports and currently processed by the pipeline. The hijacked
+ * packets are removed from any further pipeline processing, with the action
+ * handler now having the full ownership for these packets.
+ *
+ * The action handler can further send the hijacked packets out through any
+ * pipeline output port by calling the rte_pipeline_ah_port_out_packet_insert()
+ * function. The action handler can also drop these packets by calling the
+ * rte_pktmbuf_free() function, although a better alternative is provided by
+ * the action handler using the rte_pipeline_ah_packet_drop() function.
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param pkts_mask
+ * 64-bit bitmask specifying which of the packets handed over for processing
+ * to the action handler is to be hijacked by the action handler. When
+ * pkts_mask bit n is set, then element n of the pkts array (input argument to
+ * the action handler) is hijacked.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_ah_packet_hijack(struct rte_pipeline *p,
+ uint64_t pkts_mask);
+
+/**
+ * Action handler packet drop
+ *
+ * This function is called by the pipeline action handlers (port in/out, table)
+ * to drop the packets selected using packet mask.
+ *
+ * This function can be called by any input/output port or table action handler
+ * to drop selected packets from the burst of packets read from one of the
+ * pipeline input ports and currently processed by the pipeline. The dropped
+ * packets are removed from any further pipeline processing and the packet
+ * buffers are eventually freed to their buffer pool.
+ *
+ * This function updates the drop statistics counters correctly, therefore the
+ * recommended approach for dropping packets by the action handlers is to call
+ * this function as opposed to the action handler hijacking the packets first
+ * and then dropping them invisibly to the pipeline (by using the
+ * rte_pktmbuf_free() function).
+ *
+ * @param p
+ * Handle to pipeline instance
+ * @param pkts_mask
+ * 64-bit bitmask specifying which of the packets handed over for processing
+ * to the action handler is to be dropped by the action handler. When
+ * pkts_mask bit n is set, then element n of the pkts array (input argument to
+ * the action handler) is dropped.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int rte_pipeline_ah_packet_drop(struct rte_pipeline *p,
+ uint64_t pkts_mask);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/seastar/dpdk/lib/librte_pipeline/rte_pipeline_version.map b/src/seastar/dpdk/lib/librte_pipeline/rte_pipeline_version.map
new file mode 100644
index 000000000..420f065d6
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_pipeline/rte_pipeline_version.map
@@ -0,0 +1,76 @@
+DPDK_2.0 {
+ global:
+
+ rte_pipeline_check;
+ rte_pipeline_create;
+ rte_pipeline_flush;
+ rte_pipeline_free;
+ rte_pipeline_port_in_connect_to_table;
+ rte_pipeline_port_in_create;
+ rte_pipeline_port_in_disable;
+ rte_pipeline_port_in_enable;
+ rte_pipeline_port_out_create;
+ rte_pipeline_port_out_packet_insert;
+ rte_pipeline_run;
+ rte_pipeline_table_create;
+ rte_pipeline_table_default_entry_add;
+ rte_pipeline_table_default_entry_delete;
+ rte_pipeline_table_entry_add;
+ rte_pipeline_table_entry_delete;
+
+ local: *;
+};
+
+DPDK_2.1 {
+ global:
+
+ rte_pipeline_port_in_stats_read;
+ rte_pipeline_port_out_stats_read;
+ rte_pipeline_table_stats_read;
+
+} DPDK_2.0;
+
+DPDK_2.2 {
+ global:
+
+ rte_pipeline_table_entry_add_bulk;
+ rte_pipeline_table_entry_delete_bulk;
+
+} DPDK_2.1;
+
+DPDK_16.04 {
+ global:
+
+ rte_pipeline_ah_packet_hijack;
+ rte_pipeline_ah_packet_drop;
+
+} DPDK_2.2;
+
+EXPERIMENTAL {
+ global:
+
+ rte_port_in_action_apply;
+ rte_port_in_action_create;
+ rte_port_in_action_free;
+ rte_port_in_action_params_get;
+ rte_port_in_action_profile_action_register;
+ rte_port_in_action_profile_create;
+ rte_port_in_action_profile_free;
+ rte_port_in_action_profile_freeze;
+ rte_table_action_apply;
+ rte_table_action_create;
+ rte_table_action_dscp_table_update;
+ rte_table_action_free;
+ rte_table_action_meter_profile_add;
+ rte_table_action_meter_profile_delete;
+ rte_table_action_meter_read;
+ rte_table_action_profile_action_register;
+ rte_table_action_profile_create;
+ rte_table_action_profile_free;
+ rte_table_action_profile_freeze;
+ rte_table_action_table_params_get;
+ rte_table_action_stats_read;
+ rte_table_action_time_read;
+ rte_table_action_ttl_read;
+ rte_table_action_crypto_sym_session_get;
+};
diff --git a/src/seastar/dpdk/lib/librte_pipeline/rte_port_in_action.c b/src/seastar/dpdk/lib/librte_pipeline/rte_port_in_action.c
new file mode 100644
index 000000000..e3b00df8d
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_pipeline/rte_port_in_action.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+
+#include "rte_port_in_action.h"
+
+/**
+ * RTE_PORT_IN_ACTION_FLTR
+ */
+static int
+fltr_cfg_check(struct rte_port_in_action_fltr_config *cfg)
+{
+ if (cfg == NULL)
+ return -1;
+
+ return 0;
+}
+
+struct fltr_data {
+ uint32_t port_id;
+};
+
+static void
+fltr_init(struct fltr_data *data,
+ struct rte_port_in_action_fltr_config *cfg)
+{
+ data->port_id = cfg->port_id;
+}
+
+static int
+fltr_apply(struct fltr_data *data,
+ struct rte_port_in_action_fltr_params *p)
+{
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ data->port_id = p->port_id;
+
+ return 0;
+}
+
+/**
+ * RTE_PORT_IN_ACTION_LB
+ */
+static int
+lb_cfg_check(struct rte_port_in_action_lb_config *cfg)
+{
+ if ((cfg == NULL) ||
+ (cfg->key_size < RTE_PORT_IN_ACTION_LB_KEY_SIZE_MIN) ||
+ (cfg->key_size > RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX) ||
+ (!rte_is_power_of_2(cfg->key_size)) ||
+ (cfg->f_hash == NULL))
+ return -1;
+
+ return 0;
+}
+
+struct lb_data {
+ uint32_t port_id[RTE_PORT_IN_ACTION_LB_TABLE_SIZE];
+};
+
+static void
+lb_init(struct lb_data *data,
+ struct rte_port_in_action_lb_config *cfg)
+{
+ memcpy(data->port_id, cfg->port_id, sizeof(cfg->port_id));
+}
+
+static int
+lb_apply(struct lb_data *data,
+ struct rte_port_in_action_lb_params *p)
+{
+ /* Check input arguments */
+ if (p == NULL)
+ return -1;
+
+ memcpy(data->port_id, p->port_id, sizeof(p->port_id));
+
+ return 0;
+}
+
+/**
+ * Action profile
+ */
+static int
+action_valid(enum rte_port_in_action_type action)
+{
+ switch (action) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ case RTE_PORT_IN_ACTION_LB:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+#define RTE_PORT_IN_ACTION_MAX 64
+
+struct ap_config {
+ uint64_t action_mask;
+ struct rte_port_in_action_fltr_config fltr;
+ struct rte_port_in_action_lb_config lb;
+};
+
+static size_t
+action_cfg_size(enum rte_port_in_action_type action)
+{
+ switch (action) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ return sizeof(struct rte_port_in_action_fltr_config);
+ case RTE_PORT_IN_ACTION_LB:
+ return sizeof(struct rte_port_in_action_lb_config);
+ default:
+ return 0;
+ }
+}
+
+static void*
+action_cfg_get(struct ap_config *ap_config,
+ enum rte_port_in_action_type type)
+{
+ switch (type) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ return &ap_config->fltr;
+
+ case RTE_PORT_IN_ACTION_LB:
+ return &ap_config->lb;
+
+ default:
+ return NULL;
+ }
+}
+
+static void
+action_cfg_set(struct ap_config *ap_config,
+ enum rte_port_in_action_type type,
+ void *action_cfg)
+{
+ void *dst = action_cfg_get(ap_config, type);
+
+ if (dst)
+ memcpy(dst, action_cfg, action_cfg_size(type));
+
+ ap_config->action_mask |= 1LLU << type;
+}
+
+struct ap_data {
+ size_t offset[RTE_PORT_IN_ACTION_MAX];
+ size_t total_size;
+};
+
+static size_t
+action_data_size(enum rte_port_in_action_type action,
+ struct ap_config *ap_config __rte_unused)
+{
+ switch (action) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ return sizeof(struct fltr_data);
+
+ case RTE_PORT_IN_ACTION_LB:
+ return sizeof(struct lb_data);
+
+ default:
+ return 0;
+ }
+}
+
+static void
+action_data_offset_set(struct ap_data *ap_data,
+ struct ap_config *ap_config)
+{
+ uint64_t action_mask = ap_config->action_mask;
+ size_t offset;
+ uint32_t action;
+
+ memset(ap_data->offset, 0, sizeof(ap_data->offset));
+
+ offset = 0;
+ for (action = 0; action < RTE_PORT_IN_ACTION_MAX; action++)
+ if (action_mask & (1LLU << action)) {
+ ap_data->offset[action] = offset;
+ offset += action_data_size((enum rte_port_in_action_type)action,
+ ap_config);
+ }
+
+ ap_data->total_size = offset;
+}
+
+struct rte_port_in_action_profile {
+ struct ap_config cfg;
+ struct ap_data data;
+ int frozen;
+};
+
+struct rte_port_in_action_profile *
+rte_port_in_action_profile_create(uint32_t socket_id)
+{
+ struct rte_port_in_action_profile *ap;
+
+ /* Memory allocation */
+ ap = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_port_in_action_profile),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (ap == NULL)
+ return NULL;
+
+ return ap;
+}
+
+int
+rte_port_in_action_profile_action_register(struct rte_port_in_action_profile *profile,
+ enum rte_port_in_action_type type,
+ void *action_config)
+{
+ int status;
+
+ /* Check input arguments */
+ if ((profile == NULL) ||
+ profile->frozen ||
+ (action_valid(type) == 0) ||
+ (profile->cfg.action_mask & (1LLU << type)) ||
+ ((action_cfg_size(type) == 0) && action_config) ||
+ (action_cfg_size(type) && (action_config == NULL)))
+ return -EINVAL;
+
+ switch (type) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ status = fltr_cfg_check(action_config);
+ break;
+
+ case RTE_PORT_IN_ACTION_LB:
+ status = lb_cfg_check(action_config);
+ break;
+
+ default:
+ status = 0;
+ break;
+ }
+
+ if (status)
+ return status;
+
+ /* Action enable */
+ action_cfg_set(&profile->cfg, type, action_config);
+
+ return 0;
+}
+
+int
+rte_port_in_action_profile_freeze(struct rte_port_in_action_profile *profile)
+{
+ if (profile->frozen)
+ return -EBUSY;
+
+ action_data_offset_set(&profile->data, &profile->cfg);
+ profile->frozen = 1;
+
+ return 0;
+}
+
+int
+rte_port_in_action_profile_free(struct rte_port_in_action_profile *profile)
+{
+ if (profile == NULL)
+ return 0;
+
+ free(profile);
+ return 0;
+}
+
+/**
+ * Action
+ */
+struct rte_port_in_action {
+ struct ap_config cfg;
+ struct ap_data data;
+ uint8_t memory[0] __rte_cache_aligned;
+};
+
+static __rte_always_inline void *
+action_data_get(struct rte_port_in_action *action,
+ enum rte_port_in_action_type type)
+{
+ size_t offset = action->data.offset[type];
+
+ return &action->memory[offset];
+}
+
+static void
+action_data_init(struct rte_port_in_action *action,
+ enum rte_port_in_action_type type)
+{
+ void *data = action_data_get(action, type);
+
+ switch (type) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ fltr_init(data, &action->cfg.fltr);
+ return;
+
+ case RTE_PORT_IN_ACTION_LB:
+ lb_init(data, &action->cfg.lb);
+ return;
+
+ default:
+ return;
+ }
+}
+
+struct rte_port_in_action *
+rte_port_in_action_create(struct rte_port_in_action_profile *profile,
+ uint32_t socket_id)
+{
+ struct rte_port_in_action *action;
+ size_t size;
+ uint32_t i;
+
+ /* Check input arguments */
+ if ((profile == NULL) ||
+ (profile->frozen == 0))
+ return NULL;
+
+ /* Memory allocation */
+ size = sizeof(struct rte_port_in_action) + profile->data.total_size;
+ size = RTE_CACHE_LINE_ROUNDUP(size);
+
+ action = rte_zmalloc_socket(NULL,
+ size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (action == NULL)
+ return NULL;
+
+ /* Initialization */
+ memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
+ memcpy(&action->data, &profile->data, sizeof(profile->data));
+
+ for (i = 0; i < RTE_PORT_IN_ACTION_MAX; i++)
+ if (action->cfg.action_mask & (1LLU << i))
+ action_data_init(action,
+ (enum rte_port_in_action_type)i);
+
+ return action;
+}
+
+int
+rte_port_in_action_apply(struct rte_port_in_action *action,
+ enum rte_port_in_action_type type,
+ void *action_params)
+{
+ void *action_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ (action_valid(type) == 0) ||
+ ((action->cfg.action_mask & (1LLU << type)) == 0) ||
+ (action_params == NULL))
+ return -EINVAL;
+
+ /* Data update */
+ action_data = action_data_get(action, type);
+
+ switch (type) {
+ case RTE_PORT_IN_ACTION_FLTR:
+ return fltr_apply(action_data,
+ action_params);
+
+ case RTE_PORT_IN_ACTION_LB:
+ return lb_apply(action_data,
+ action_params);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+ah_filter_on_match(struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts,
+ void *arg)
+{
+ struct rte_port_in_action *action = arg;
+ struct rte_port_in_action_fltr_config *cfg = &action->cfg.fltr;
+ uint64_t *key_mask = (uint64_t *) cfg->key_mask;
+ uint64_t *key = (uint64_t *) cfg->key;
+ uint32_t key_offset = cfg->key_offset;
+ struct fltr_data *data = action_data_get(action,
+ RTE_PORT_IN_ACTION_FLTR);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+ uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ key_offset);
+
+ uint64_t xor0 = (pkt_key[0] & key_mask[0]) ^ key[0];
+ uint64_t xor1 = (pkt_key[1] & key_mask[1]) ^ key[1];
+ uint64_t or = xor0 | xor1;
+
+ if (or == 0) {
+ rte_pipeline_ah_packet_hijack(p, 1LLU << i);
+ rte_pipeline_port_out_packet_insert(p,
+ data->port_id, pkt);
+ }
+ }
+
+ return 0;
+}
+
+static int
+ah_filter_on_mismatch(struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts,
+ void *arg)
+{
+ struct rte_port_in_action *action = arg;
+ struct rte_port_in_action_fltr_config *cfg = &action->cfg.fltr;
+ uint64_t *key_mask = (uint64_t *) cfg->key_mask;
+ uint64_t *key = (uint64_t *) cfg->key;
+ uint32_t key_offset = cfg->key_offset;
+ struct fltr_data *data = action_data_get(action,
+ RTE_PORT_IN_ACTION_FLTR);
+ uint32_t i;
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+ uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(pkt,
+ key_offset);
+
+ uint64_t xor0 = (pkt_key[0] & key_mask[0]) ^ key[0];
+ uint64_t xor1 = (pkt_key[1] & key_mask[1]) ^ key[1];
+ uint64_t or = xor0 | xor1;
+
+ if (or) {
+ rte_pipeline_ah_packet_hijack(p, 1LLU << i);
+ rte_pipeline_port_out_packet_insert(p,
+ data->port_id, pkt);
+ }
+ }
+
+ return 0;
+}
+
+static int
+ah_lb(struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint32_t n_pkts,
+ void *arg)
+{
+ struct rte_port_in_action *action = arg;
+ struct rte_port_in_action_lb_config *cfg = &action->cfg.lb;
+ struct lb_data *data = action_data_get(action, RTE_PORT_IN_ACTION_LB);
+ uint64_t pkt_mask = RTE_LEN2MASK(n_pkts, uint64_t);
+ uint32_t i;
+
+ rte_pipeline_ah_packet_hijack(p, pkt_mask);
+
+ for (i = 0; i < n_pkts; i++) {
+ struct rte_mbuf *pkt = pkts[i];
+ uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(pkt,
+ cfg->key_offset);
+
+ uint64_t digest = cfg->f_hash(pkt_key,
+ cfg->key_mask,
+ cfg->key_size,
+ cfg->seed);
+ uint64_t pos = digest & (RTE_PORT_IN_ACTION_LB_TABLE_SIZE - 1);
+ uint32_t port_id = data->port_id[pos];
+
+ rte_pipeline_port_out_packet_insert(p, port_id, pkt);
+ }
+
+ return 0;
+}
+
+static rte_pipeline_port_in_action_handler
+ah_selector(struct rte_port_in_action *action)
+{
+ if (action->cfg.action_mask == 0)
+ return NULL;
+
+ if (action->cfg.action_mask == 1LLU << RTE_PORT_IN_ACTION_FLTR)
+ return (action->cfg.fltr.filter_on_match) ?
+ ah_filter_on_match : ah_filter_on_mismatch;
+
+ if (action->cfg.action_mask == 1LLU << RTE_PORT_IN_ACTION_LB)
+ return ah_lb;
+
+ return NULL;
+}
+
+int
+rte_port_in_action_params_get(struct rte_port_in_action *action,
+ struct rte_pipeline_port_in_params *params)
+{
+ rte_pipeline_port_in_action_handler f_action;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ (params == NULL))
+ return -EINVAL;
+
+ f_action = ah_selector(action);
+
+ /* Fill in params */
+ params->f_action = f_action;
+ params->arg_ah = (f_action) ? action : NULL;
+
+ return 0;
+}
+
+int
+rte_port_in_action_free(struct rte_port_in_action *action)
+{
+ if (action == NULL)
+ return 0;
+
+ rte_free(action);
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/lib/librte_pipeline/rte_port_in_action.h b/src/seastar/dpdk/lib/librte_pipeline/rte_port_in_action.h
new file mode 100644
index 000000000..0a85e4e03
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_pipeline/rte_port_in_action.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_RTE_PORT_IN_ACTION_H__
+#define __INCLUDE_RTE_PORT_IN_ACTION_H__
+
+/**
+ * @file
+ * RTE Pipeline Input Port Actions
+ *
+ * This API provides a common set of actions for pipeline input ports to speed
+ * up application development.
+ *
+ * Each pipeline input port can be assigned an action handler to be executed
+ * on every input packet during the pipeline execution. The pipeline library
+ * allows the user to define his own input port actions by providing customized
+ * input port action handler. While the user can still follow this process, this
+ * API is intended to provide a quicker development alternative for a set of
+ * predefined actions.
+ *
+ * The typical steps to use this API are:
+ * - Define an input port action profile. This is a configuration template that
+ * can potentially be shared by multiple input ports from the same or
+ * different pipelines, with different input ports from the same pipeline
+ * able to use different action profiles. For every input port using a given
+ * action profile, the profile defines the set of actions and the action
+ * configuration to be executed by the input port. API functions:
+ * rte_port_in_action_profile_create(),
+ * rte_port_in_action_profile_action_register(),
+ * rte_port_in_action_profile_freeze().
+ *
+ * - Instantiate the input port action profile to create input port action
+ * objects. Each pipeline input port has its own action object.
+ * API functions: rte_port_in_action_create().
+ *
+ * - Use the input port action object to generate the input port action handler
+ * invoked by the pipeline. API functions:
+ * rte_port_in_action_params_get().
+ *
+ * - Use the input port action object to generate the internal data structures
+ * used by the input port action handler based on given action parameters.
+ * API functions: rte_port_in_action_apply().
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <rte_compat.h>
+#include <rte_table_hash.h>
+
+#include "rte_pipeline.h"
+
+/** Input port actions. */
+enum rte_port_in_action_type {
+ /** Filter selected input packets. */
+ RTE_PORT_IN_ACTION_FLTR = 0,
+
+ /** Load balance. */
+ RTE_PORT_IN_ACTION_LB,
+};
+
+/**
+ * RTE_PORT_IN_ACTION_FLTR
+ */
+/** Filter key size (number of bytes) */
+#define RTE_PORT_IN_ACTION_FLTR_KEY_SIZE 16
+
+/** Filter action configuration (per action profile). */
+struct rte_port_in_action_fltr_config {
+ /** Key offset within the input packet buffer. Offset 0 points to the
+ * first byte of the MBUF structure.
+ */
+ uint32_t key_offset;
+
+ /** Key mask. */
+ uint8_t key_mask[RTE_PORT_IN_ACTION_FLTR_KEY_SIZE];
+
+ /** Key value. */
+ uint8_t key[RTE_PORT_IN_ACTION_FLTR_KEY_SIZE];
+
+ /** When non-zero, all the input packets that match the *key* (with the
+ * *key_mask* applied) are sent to the pipeline output port *port_id*.
+ * When zero, all the input packets that do NOT match the *key* (with
+ * *key_mask* applied) are sent to the pipeline output port *port_id*.
+ */
+ int filter_on_match;
+
+ /** Pipeline output port ID to send the filtered input packets to.
+ * Can be updated later.
+ *
+ * @see struct rte_port_in_action_fltr_params
+ */
+ uint32_t port_id;
+};
+
+/** Filter action parameters (per action). */
+struct rte_port_in_action_fltr_params {
+ /** Pipeline output port ID to send the filtered input packets to. */
+ uint32_t port_id;
+};
+
+/**
+ * RTE_PORT_IN_ACTION_LB
+ */
+/** Load balance key size min (number of bytes). */
+#define RTE_PORT_IN_ACTION_LB_KEY_SIZE_MIN 8
+
+/** Load balance key size max (number of bytes). */
+#define RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX 64
+
+/** Load balance table size. */
+#define RTE_PORT_IN_ACTION_LB_TABLE_SIZE 16
+
+/** Load balance action configuration (per action profile). */
+struct rte_port_in_action_lb_config {
+ /** Key size (number of bytes). */
+ uint32_t key_size;
+
+ /** Key offset within the input packet buffer. Offset 0 points to the
+ * first byte of the MBUF structure.
+ */
+ uint32_t key_offset;
+
+ /** Key mask(*key_size* bytes are valid). */
+ uint8_t key_mask[RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX];
+
+ /** Hash function. */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed value for *f_hash*. */
+ uint64_t seed;
+
+ /** Table defining the weight of each pipeline output port. The weights
+ * are set in 1/RTE_PORT_IN_ACTION_LB_TABLE_SIZE increments. To assign a
+ * weight of N/RTE_PORT_IN_ACTION_LB_TABLE_SIZE to a given output port
+ * (0 <= N <= RTE_PORT_IN_ACTION_LB_TABLE_SIZE), the output port needs
+ * to show up exactly N times in this table. Can be updated later.
+ *
+ * @see struct rte_port_in_action_lb_params
+ */
+ uint32_t port_id[RTE_PORT_IN_ACTION_LB_TABLE_SIZE];
+};
+
+/** Load balance action parameters (per action). */
+struct rte_port_in_action_lb_params {
+ /** Table defining the weight of each pipeline output port. The weights
+ * are set in 1/RTE_PORT_IN_ACTION_LB_TABLE_SIZE increments. To assign a
+ * weight of N/RTE_PORT_IN_ACTION_LB_TABLE_SIZE to a given output port
+ * (0 <= N <= RTE_PORT_IN_ACTION_LB_TABLE_SIZE), the output port needs
+ * to show up exactly N times in this table.
+ */
+ uint32_t port_id[RTE_PORT_IN_ACTION_LB_TABLE_SIZE];
+};
+
+/**
+ * Input port action profile.
+ */
+struct rte_port_in_action_profile;
+
+/**
+ * Input port action profile create.
+ *
+ * @param[in] socket_id
+ * CPU socket ID for the internal data structures memory allocation.
+ * @return
+ * Input port action profile handle on success, NULL otherwise.
+ */
+struct rte_port_in_action_profile * __rte_experimental
+rte_port_in_action_profile_create(uint32_t socket_id);
+
+/**
+ * Input port action profile free.
+ *
+ * @param[in] profile
+ * Input port action profile handle (needs to be valid).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_port_in_action_profile_free(struct rte_port_in_action_profile *profile);
+
+/**
+ * Input port action profile action register.
+ *
+ * @param[in] profile
+ * Input port action profile handle (needs to be valid and not in frozen
+ * state).
+ * @param[in] type
+ * Specific input port action to be registered for *profile*.
+ * @param[in] action_config
+ * Configuration for the *type* action.
+ * If struct rte_port_in_action_*type*_config is defined, it needs to point to
+ * a valid instance of this structure, otherwise it needs to be set to NULL.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_port_in_action_profile_action_register(
+ struct rte_port_in_action_profile *profile,
+ enum rte_port_in_action_type type,
+ void *action_config);
+
+/**
+ * Input port action profile freeze.
+ *
+ * Once this function is called successfully, the given profile enters the
+ * frozen state with the following immediate effects: no more actions can be
+ * registered for this profile, so the profile can be instantiated to create
+ * input port action objects.
+ *
+ * @param[in] profile
+ * Input port profile action handle (needs to be valid and not in frozen
+ * state).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ *
+ * @see rte_port_in_action_create()
+ */
+int __rte_experimental
+rte_port_in_action_profile_freeze(struct rte_port_in_action_profile *profile);
+
+/**
+ * Input port action.
+ */
+struct rte_port_in_action;
+
+/**
+ * Input port action create.
+ *
+ * Instantiates the given input port action profile to create an input port
+ * action object.
+ *
+ * @param[in] profile
+ * Input port profile action handle (needs to be valid and in frozen state).
+ * @param[in] socket_id
+ * CPU socket ID where the internal data structures required by the new input
+ * port action object should be allocated.
+ * @return
+ * Handle to input port action object on success, NULL on error.
+ */
+struct rte_port_in_action * __rte_experimental
+rte_port_in_action_create(struct rte_port_in_action_profile *profile,
+ uint32_t socket_id);
+
+/**
+ * Input port action free.
+ *
+ * @param[in] action
+ * Handle to input port action object (needs to be valid).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_port_in_action_free(struct rte_port_in_action *action);
+
+/**
+ * Input port params get.
+ *
+ * @param[in] action
+ * Handle to input port action object (needs to be valid).
+ * @param[inout] params
+ * Pipeline input port parameters (needs to be pre-allocated).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_port_in_action_params_get(struct rte_port_in_action *action,
+ struct rte_pipeline_port_in_params *params);
+
+/**
+ * Input port action apply.
+ *
+ * @param[in] action
+ * Handle to input port action object (needs to be valid).
+ * @param[in] type
+ * Specific input port action previously registered for the input port action
+ * profile of the *action* object.
+ * @param[in] action_params
+ * Parameters for the *type* action.
+ * If struct rte_port_in_action_*type*_params is defined, it needs to point to
+ * a valid instance of this structure, otherwise it needs to be set to NULL.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_port_in_action_apply(struct rte_port_in_action *action,
+ enum rte_port_in_action_type type,
+ void *action_params);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_PORT_IN_ACTION_H__ */
diff --git a/src/seastar/dpdk/lib/librte_pipeline/rte_table_action.c b/src/seastar/dpdk/lib/librte_pipeline/rte_table_action.c
new file mode 100644
index 000000000..9a65f3ded
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_pipeline/rte_table_action.c
@@ -0,0 +1,3474 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2018 Intel Corporation
+ */
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_esp.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_table_action.h"
+
+#define rte_htons rte_cpu_to_be_16
+#define rte_htonl rte_cpu_to_be_32
+
+#define rte_ntohs rte_be_to_cpu_16
+#define rte_ntohl rte_be_to_cpu_32
+
+/**
+ * RTE_TABLE_ACTION_FWD
+ */
+#define fwd_data rte_pipeline_table_entry
+
+static int
+fwd_apply(struct fwd_data *data,
+ struct rte_table_action_fwd_params *p)
+{
+ data->action = p->action;
+
+ if (p->action == RTE_PIPELINE_ACTION_PORT)
+ data->port_id = p->id;
+
+ if (p->action == RTE_PIPELINE_ACTION_TABLE)
+ data->table_id = p->id;
+
+ return 0;
+}
+
+/**
+ * RTE_TABLE_ACTION_LB
+ */
+static int
+lb_cfg_check(struct rte_table_action_lb_config *cfg)
+{
+ if ((cfg == NULL) ||
+ (cfg->key_size < RTE_TABLE_ACTION_LB_KEY_SIZE_MIN) ||
+ (cfg->key_size > RTE_TABLE_ACTION_LB_KEY_SIZE_MAX) ||
+ (!rte_is_power_of_2(cfg->key_size)) ||
+ (cfg->f_hash == NULL))
+ return -1;
+
+ return 0;
+}
+
+struct lb_data {
+ uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
+} __attribute__((__packed__));
+
+static int
+lb_apply(struct lb_data *data,
+ struct rte_table_action_lb_params *p)
+{
+ memcpy(data->out, p->out, sizeof(data->out));
+
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_lb(struct rte_mbuf *mbuf,
+ struct lb_data *data,
+ struct rte_table_action_lb_config *cfg)
+{
+ uint8_t *pkt_key = RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->key_offset);
+ uint32_t *out = RTE_MBUF_METADATA_UINT32_PTR(mbuf, cfg->out_offset);
+ uint64_t digest, pos;
+ uint32_t out_val;
+
+ digest = cfg->f_hash(pkt_key,
+ cfg->key_mask,
+ cfg->key_size,
+ cfg->seed);
+ pos = digest & (RTE_TABLE_ACTION_LB_TABLE_SIZE - 1);
+ out_val = data->out[pos];
+
+ *out = out_val;
+}
+
+/**
+ * RTE_TABLE_ACTION_MTR
+ */
+static int
+mtr_cfg_check(struct rte_table_action_mtr_config *mtr)
+{
+ if ((mtr->alg == RTE_TABLE_ACTION_METER_SRTCM) ||
+ ((mtr->n_tc != 1) && (mtr->n_tc != 4)) ||
+ (mtr->n_bytes_enabled != 0))
+ return -ENOTSUP;
+ return 0;
+}
+
+struct mtr_trtcm_data {
+ struct rte_meter_trtcm trtcm;
+ uint64_t stats[RTE_COLORS];
+} __attribute__((__packed__));
+
+#define MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data) \
+ (((data)->stats[RTE_COLOR_GREEN] & 0xF8LLU) >> 3)
+
+static void
+mtr_trtcm_data_meter_profile_id_set(struct mtr_trtcm_data *data,
+ uint32_t profile_id)
+{
+ data->stats[RTE_COLOR_GREEN] &= ~0xF8LLU;
+ data->stats[RTE_COLOR_GREEN] |= (profile_id % 32) << 3;
+}
+
+#define MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color)\
+ (((data)->stats[(color)] & 4LLU) >> 2)
+
+#define MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color)\
+ ((enum rte_color)((data)->stats[(color)] & 3LLU))
+
+static void
+mtr_trtcm_data_policer_action_set(struct mtr_trtcm_data *data,
+ enum rte_color color,
+ enum rte_table_action_policer action)
+{
+ if (action == RTE_TABLE_ACTION_POLICER_DROP) {
+ data->stats[color] |= 4LLU;
+ } else {
+ data->stats[color] &= ~7LLU;
+ data->stats[color] |= color & 3LLU;
+ }
+}
+
+static uint64_t
+mtr_trtcm_data_stats_get(struct mtr_trtcm_data *data,
+ enum rte_color color)
+{
+ return data->stats[color] >> 8;
+}
+
+static void
+mtr_trtcm_data_stats_reset(struct mtr_trtcm_data *data,
+ enum rte_color color)
+{
+ data->stats[color] &= 0xFFLU;
+}
+
+#define MTR_TRTCM_DATA_STATS_INC(data, color) \
+ ((data)->stats[(color)] += (1LLU << 8))
+
+static size_t
+mtr_data_size(struct rte_table_action_mtr_config *mtr)
+{
+ return mtr->n_tc * sizeof(struct mtr_trtcm_data);
+}
+
+struct dscp_table_entry_data {
+ enum rte_color color;
+ uint16_t tc;
+ uint16_t tc_queue;
+};
+
+struct dscp_table_data {
+ struct dscp_table_entry_data entry[64];
+};
+
+struct meter_profile_data {
+ struct rte_meter_trtcm_profile profile;
+ uint32_t profile_id;
+ int valid;
+};
+
+static struct meter_profile_data *
+meter_profile_data_find(struct meter_profile_data *mp,
+ uint32_t mp_size,
+ uint32_t profile_id)
+{
+ uint32_t i;
+
+ for (i = 0; i < mp_size; i++) {
+ struct meter_profile_data *mp_data = &mp[i];
+
+ if (mp_data->valid && (mp_data->profile_id == profile_id))
+ return mp_data;
+ }
+
+ return NULL;
+}
+
+static struct meter_profile_data *
+meter_profile_data_find_unused(struct meter_profile_data *mp,
+ uint32_t mp_size)
+{
+ uint32_t i;
+
+ for (i = 0; i < mp_size; i++) {
+ struct meter_profile_data *mp_data = &mp[i];
+
+ if (!mp_data->valid)
+ return mp_data;
+ }
+
+ return NULL;
+}
+
+static int
+mtr_apply_check(struct rte_table_action_mtr_params *p,
+ struct rte_table_action_mtr_config *cfg,
+ struct meter_profile_data *mp,
+ uint32_t mp_size)
+{
+ uint32_t i;
+
+ if (p->tc_mask > RTE_LEN2MASK(cfg->n_tc, uint32_t))
+ return -EINVAL;
+
+ for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
+ struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
+ struct meter_profile_data *mp_data;
+
+ if ((p->tc_mask & (1LLU << i)) == 0)
+ continue;
+
+ mp_data = meter_profile_data_find(mp,
+ mp_size,
+ p_tc->meter_profile_id);
+ if (!mp_data)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mtr_apply(struct mtr_trtcm_data *data,
+ struct rte_table_action_mtr_params *p,
+ struct rte_table_action_mtr_config *cfg,
+ struct meter_profile_data *mp,
+ uint32_t mp_size)
+{
+ uint32_t i;
+ int status;
+
+ /* Check input arguments */
+ status = mtr_apply_check(p, cfg, mp, mp_size);
+ if (status)
+ return status;
+
+ /* Apply */
+ for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
+ struct rte_table_action_mtr_tc_params *p_tc = &p->mtr[i];
+ struct mtr_trtcm_data *data_tc = &data[i];
+ struct meter_profile_data *mp_data;
+
+ if ((p->tc_mask & (1LLU << i)) == 0)
+ continue;
+
+ /* Find profile */
+ mp_data = meter_profile_data_find(mp,
+ mp_size,
+ p_tc->meter_profile_id);
+ if (!mp_data)
+ return -EINVAL;
+
+ memset(data_tc, 0, sizeof(*data_tc));
+
+ /* Meter object */
+ status = rte_meter_trtcm_config(&data_tc->trtcm,
+ &mp_data->profile);
+ if (status)
+ return status;
+
+ /* Meter profile */
+ mtr_trtcm_data_meter_profile_id_set(data_tc,
+ mp_data - mp);
+
+ /* Policer actions */
+ mtr_trtcm_data_policer_action_set(data_tc,
+ RTE_COLOR_GREEN,
+ p_tc->policer[RTE_COLOR_GREEN]);
+
+ mtr_trtcm_data_policer_action_set(data_tc,
+ RTE_COLOR_YELLOW,
+ p_tc->policer[RTE_COLOR_YELLOW]);
+
+ mtr_trtcm_data_policer_action_set(data_tc,
+ RTE_COLOR_RED,
+ p_tc->policer[RTE_COLOR_RED]);
+ }
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_work_mtr(struct rte_mbuf *mbuf,
+ struct mtr_trtcm_data *data,
+ struct dscp_table_data *dscp_table,
+ struct meter_profile_data *mp,
+ uint64_t time,
+ uint32_t dscp,
+ uint16_t total_length)
+{
+ uint64_t drop_mask;
+ struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
+ enum rte_color color_in, color_meter, color_policer;
+ uint32_t tc, mp_id;
+
+ tc = dscp_entry->tc;
+ color_in = dscp_entry->color;
+ data += tc;
+ mp_id = MTR_TRTCM_DATA_METER_PROFILE_ID_GET(data);
+
+ /* Meter */
+ color_meter = rte_meter_trtcm_color_aware_check(
+ &data->trtcm,
+ &mp[mp_id].profile,
+ time,
+ total_length,
+ color_in);
+
+ /* Stats */
+ MTR_TRTCM_DATA_STATS_INC(data, color_meter);
+
+ /* Police */
+ drop_mask = MTR_TRTCM_DATA_POLICER_ACTION_DROP_GET(data, color_meter);
+ color_policer =
+ MTR_TRTCM_DATA_POLICER_ACTION_COLOR_GET(data, color_meter);
+ rte_mbuf_sched_color_set(mbuf, (uint8_t)color_policer);
+
+ return drop_mask;
+}
+
+/**
+ * RTE_TABLE_ACTION_TM
+ */
+static int
+tm_cfg_check(struct rte_table_action_tm_config *tm)
+{
+ if ((tm->n_subports_per_port == 0) ||
+ (rte_is_power_of_2(tm->n_subports_per_port) == 0) ||
+ (tm->n_subports_per_port > UINT16_MAX) ||
+ (tm->n_pipes_per_subport == 0) ||
+ (rte_is_power_of_2(tm->n_pipes_per_subport) == 0))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct tm_data {
+ uint32_t queue_id;
+ uint32_t reserved;
+} __attribute__((__packed__));
+
+static int
+tm_apply_check(struct rte_table_action_tm_params *p,
+ struct rte_table_action_tm_config *cfg)
+{
+ if ((p->subport_id >= cfg->n_subports_per_port) ||
+ (p->pipe_id >= cfg->n_pipes_per_subport))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+tm_apply(struct tm_data *data,
+ struct rte_table_action_tm_params *p,
+ struct rte_table_action_tm_config *cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = tm_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ /* Apply */
+ data->queue_id = p->subport_id <<
+ (__builtin_ctz(cfg->n_pipes_per_subport) + 4) |
+ p->pipe_id << 4;
+
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_tm(struct rte_mbuf *mbuf,
+ struct tm_data *data,
+ struct dscp_table_data *dscp_table,
+ uint32_t dscp)
+{
+ struct dscp_table_entry_data *dscp_entry = &dscp_table->entry[dscp];
+ uint32_t queue_id = data->queue_id |
+ (dscp_entry->tc << 2) |
+ dscp_entry->tc_queue;
+ rte_mbuf_sched_set(mbuf, queue_id, dscp_entry->tc,
+ (uint8_t)dscp_entry->color);
+}
+
+/**
+ * RTE_TABLE_ACTION_ENCAP
+ */
+static int
+encap_valid(enum rte_table_action_encap_type encap)
+{
+ switch (encap) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ case RTE_TABLE_ACTION_ENCAP_VXLAN:
+ case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int
+encap_cfg_check(struct rte_table_action_encap_config *encap)
+{
+ if ((encap->encap_mask == 0) ||
+ (__builtin_popcountll(encap->encap_mask) != 1))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct encap_ether_data {
+ struct ether_hdr ether;
+} __attribute__((__packed__));
+
+#define VLAN(pcp, dei, vid) \
+ ((uint16_t)((((uint64_t)(pcp)) & 0x7LLU) << 13) | \
+ ((((uint64_t)(dei)) & 0x1LLU) << 12) | \
+ (((uint64_t)(vid)) & 0xFFFLLU)) \
+
+struct encap_vlan_data {
+ struct ether_hdr ether;
+ struct vlan_hdr vlan;
+} __attribute__((__packed__));
+
+struct encap_qinq_data {
+ struct ether_hdr ether;
+ struct vlan_hdr svlan;
+ struct vlan_hdr cvlan;
+} __attribute__((__packed__));
+
+#define ETHER_TYPE_MPLS_UNICAST 0x8847
+
+#define ETHER_TYPE_MPLS_MULTICAST 0x8848
+
+#define MPLS(label, tc, s, ttl) \
+ ((uint32_t)(((((uint64_t)(label)) & 0xFFFFFLLU) << 12) |\
+ ((((uint64_t)(tc)) & 0x7LLU) << 9) | \
+ ((((uint64_t)(s)) & 0x1LLU) << 8) | \
+ (((uint64_t)(ttl)) & 0xFFLLU)))
+
+struct encap_mpls_data {
+ struct ether_hdr ether;
+ uint32_t mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
+ uint32_t mpls_count;
+} __attribute__((__packed__));
+
+#define PPP_PROTOCOL_IP 0x0021
+
+struct pppoe_ppp_hdr {
+ uint16_t ver_type_code;
+ uint16_t session_id;
+ uint16_t length;
+ uint16_t protocol;
+} __attribute__((__packed__));
+
+struct encap_pppoe_data {
+ struct ether_hdr ether;
+ struct pppoe_ppp_hdr pppoe_ppp;
+} __attribute__((__packed__));
+
+#define IP_PROTO_UDP 17
+
+struct encap_vxlan_ipv4_data {
+ struct ether_hdr ether;
+ struct ipv4_hdr ipv4;
+ struct udp_hdr udp;
+ struct vxlan_hdr vxlan;
+} __attribute__((__packed__));
+
+struct encap_vxlan_ipv4_vlan_data {
+ struct ether_hdr ether;
+ struct vlan_hdr vlan;
+ struct ipv4_hdr ipv4;
+ struct udp_hdr udp;
+ struct vxlan_hdr vxlan;
+} __attribute__((__packed__));
+
+struct encap_vxlan_ipv6_data {
+ struct ether_hdr ether;
+ struct ipv6_hdr ipv6;
+ struct udp_hdr udp;
+ struct vxlan_hdr vxlan;
+} __attribute__((__packed__));
+
+struct encap_vxlan_ipv6_vlan_data {
+ struct ether_hdr ether;
+ struct vlan_hdr vlan;
+ struct ipv6_hdr ipv6;
+ struct udp_hdr udp;
+ struct vxlan_hdr vxlan;
+} __attribute__((__packed__));
+
+struct encap_qinq_pppoe_data {
+ struct ether_hdr ether;
+ struct vlan_hdr svlan;
+ struct vlan_hdr cvlan;
+ struct pppoe_ppp_hdr pppoe_ppp;
+} __attribute__((__packed__));
+
+static size_t
+encap_data_size(struct rte_table_action_encap_config *encap)
+{
+ switch (encap->encap_mask) {
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
+ return sizeof(struct encap_ether_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
+ return sizeof(struct encap_vlan_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
+ return sizeof(struct encap_qinq_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
+ return sizeof(struct encap_mpls_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return sizeof(struct encap_pppoe_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
+ if (encap->vxlan.ip_version)
+ if (encap->vxlan.vlan)
+ return sizeof(struct encap_vxlan_ipv4_vlan_data);
+ else
+ return sizeof(struct encap_vxlan_ipv4_data);
+ else
+ if (encap->vxlan.vlan)
+ return sizeof(struct encap_vxlan_ipv6_vlan_data);
+ else
+ return sizeof(struct encap_vxlan_ipv6_data);
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
+ return sizeof(struct encap_qinq_pppoe_data);
+
+ default:
+ return 0;
+ }
+}
+
+static int
+encap_apply_check(struct rte_table_action_encap_params *p,
+ struct rte_table_action_encap_config *cfg)
+{
+ if ((encap_valid(p->type) == 0) ||
+ ((cfg->encap_mask & (1LLU << p->type)) == 0))
+ return -EINVAL;
+
+ switch (p->type) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ if ((p->mpls.mpls_count == 0) ||
+ (p->mpls.mpls_count > RTE_TABLE_ACTION_MPLS_LABELS_MAX))
+ return -EINVAL;
+
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_VXLAN:
+ return 0;
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+encap_ether_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_ether_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ ETHER_TYPE_IPv4 :
+ ETHER_TYPE_IPv6;
+
+ /* Ethernet */
+ ether_addr_copy(&p->ether.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->ether.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_vlan_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_vlan_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ ETHER_TYPE_IPv4 :
+ ETHER_TYPE_IPv6;
+
+ /* Ethernet */
+ ether_addr_copy(&p->vlan.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->vlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+
+ /* VLAN */
+ d->vlan.vlan_tci = rte_htons(VLAN(p->vlan.vlan.pcp,
+ p->vlan.vlan.dei,
+ p->vlan.vlan.vid));
+ d->vlan.eth_proto = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_qinq_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_common_config *common_cfg)
+{
+ struct encap_qinq_data *d = data;
+ uint16_t ethertype = (common_cfg->ip_version) ?
+ ETHER_TYPE_IPv4 :
+ ETHER_TYPE_IPv6;
+
+ /* Ethernet */
+ ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_QINQ);
+
+ /* SVLAN */
+ d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
+ p->qinq.svlan.dei,
+ p->qinq.svlan.vid));
+ d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
+
+ /* CVLAN */
+ d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
+ p->qinq.cvlan.dei,
+ p->qinq.cvlan.vid));
+ d->cvlan.eth_proto = rte_htons(ethertype);
+
+ return 0;
+}
+
+static int
+encap_qinq_pppoe_apply(void *data,
+ struct rte_table_action_encap_params *p)
+{
+ struct encap_qinq_pppoe_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->qinq.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->qinq.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+
+ /* SVLAN */
+ d->svlan.vlan_tci = rte_htons(VLAN(p->qinq.svlan.pcp,
+ p->qinq.svlan.dei,
+ p->qinq.svlan.vid));
+ d->svlan.eth_proto = rte_htons(ETHER_TYPE_VLAN);
+
+ /* CVLAN */
+ d->cvlan.vlan_tci = rte_htons(VLAN(p->qinq.cvlan.pcp,
+ p->qinq.cvlan.dei,
+ p->qinq.cvlan.vid));
+ d->cvlan.eth_proto = rte_htons(ETHER_TYPE_PPPOE_SESSION);
+
+ /* PPPoE and PPP*/
+ d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
+ d->pppoe_ppp.session_id = rte_htons(p->qinq_pppoe.pppoe.session_id);
+ d->pppoe_ppp.length = 0; /* not pre-computed */
+ d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
+
+ return 0;
+}
+
+static int
+encap_mpls_apply(void *data,
+ struct rte_table_action_encap_params *p)
+{
+ struct encap_mpls_data *d = data;
+ uint16_t ethertype = (p->mpls.unicast) ?
+ ETHER_TYPE_MPLS_UNICAST :
+ ETHER_TYPE_MPLS_MULTICAST;
+ uint32_t i;
+
+ /* Ethernet */
+ ether_addr_copy(&p->mpls.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->mpls.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ethertype);
+
+ /* MPLS */
+ for (i = 0; i < p->mpls.mpls_count - 1; i++)
+ d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
+ p->mpls.mpls[i].tc,
+ 0,
+ p->mpls.mpls[i].ttl));
+
+ d->mpls[i] = rte_htonl(MPLS(p->mpls.mpls[i].label,
+ p->mpls.mpls[i].tc,
+ 1,
+ p->mpls.mpls[i].ttl));
+
+ d->mpls_count = p->mpls.mpls_count;
+ return 0;
+}
+
+static int
+encap_pppoe_apply(void *data,
+ struct rte_table_action_encap_params *p)
+{
+ struct encap_pppoe_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->pppoe.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->pppoe.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_PPPOE_SESSION);
+
+ /* PPPoE and PPP*/
+ d->pppoe_ppp.ver_type_code = rte_htons(0x1100);
+ d->pppoe_ppp.session_id = rte_htons(p->pppoe.pppoe.session_id);
+ d->pppoe_ppp.length = 0; /* not pre-computed */
+ d->pppoe_ppp.protocol = rte_htons(PPP_PROTOCOL_IP);
+
+ return 0;
+}
+
+static int
+encap_vxlan_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_encap_config *cfg)
+{
+ if ((p->vxlan.vxlan.vni > 0xFFFFFF) ||
+ (cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) ||
+ (!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) ||
+ (!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) ||
+ (cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF)))
+ return -1;
+
+ if (cfg->vxlan.ip_version)
+ if (cfg->vxlan.vlan) {
+ struct encap_vxlan_ipv4_vlan_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+
+ /* VLAN */
+ d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
+ p->vxlan.vlan.dei,
+ p->vxlan.vlan.vid));
+ d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv4);
+
+ /* IPv4*/
+ d->ipv4.version_ihl = 0x45;
+ d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
+ d->ipv4.total_length = 0; /* not pre-computed */
+ d->ipv4.packet_id = 0;
+ d->ipv4.fragment_offset = 0;
+ d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
+ d->ipv4.next_proto_id = IP_PROTO_UDP;
+ d->ipv4.hdr_checksum = 0;
+ d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
+ d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
+
+ d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ } else {
+ struct encap_vxlan_ipv4_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_IPv4);
+
+ /* IPv4*/
+ d->ipv4.version_ihl = 0x45;
+ d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
+ d->ipv4.total_length = 0; /* not pre-computed */
+ d->ipv4.packet_id = 0;
+ d->ipv4.fragment_offset = 0;
+ d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
+ d->ipv4.next_proto_id = IP_PROTO_UDP;
+ d->ipv4.hdr_checksum = 0;
+ d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
+ d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
+
+ d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ }
+ else
+ if (cfg->vxlan.vlan) {
+ struct encap_vxlan_ipv6_vlan_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+
+ /* VLAN */
+ d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
+ p->vxlan.vlan.dei,
+ p->vxlan.vlan.vid));
+ d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv6);
+
+ /* IPv6*/
+ d->ipv6.vtc_flow = rte_htonl((6 << 28) |
+ (p->vxlan.ipv6.dscp << 22) |
+ p->vxlan.ipv6.flow_label);
+ d->ipv6.payload_len = 0; /* not pre-computed */
+ d->ipv6.proto = IP_PROTO_UDP;
+ d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
+ memcpy(d->ipv6.src_addr,
+ p->vxlan.ipv6.sa,
+ sizeof(p->vxlan.ipv6.sa));
+ memcpy(d->ipv6.dst_addr,
+ p->vxlan.ipv6.da,
+ sizeof(p->vxlan.ipv6.da));
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ } else {
+ struct encap_vxlan_ipv6_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_IPv6);
+
+ /* IPv6*/
+ d->ipv6.vtc_flow = rte_htonl((6 << 28) |
+ (p->vxlan.ipv6.dscp << 22) |
+ p->vxlan.ipv6.flow_label);
+ d->ipv6.payload_len = 0; /* not pre-computed */
+ d->ipv6.proto = IP_PROTO_UDP;
+ d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
+ memcpy(d->ipv6.src_addr,
+ p->vxlan.ipv6.sa,
+ sizeof(p->vxlan.ipv6.sa));
+ memcpy(d->ipv6.dst_addr,
+ p->vxlan.ipv6.da,
+ sizeof(p->vxlan.ipv6.da));
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ }
+}
+
+static int
+encap_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_encap_config *cfg,
+ struct rte_table_action_common_config *common_cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = encap_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ switch (p->type) {
+ case RTE_TABLE_ACTION_ENCAP_ETHER:
+ return encap_ether_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_VLAN:
+ return encap_vlan_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ:
+ return encap_qinq_apply(data, p, common_cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_MPLS:
+ return encap_mpls_apply(data, p);
+
+ case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ return encap_pppoe_apply(data, p);
+
+ case RTE_TABLE_ACTION_ENCAP_VXLAN:
+ return encap_vxlan_apply(data, p, cfg);
+
+ case RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
+ return encap_qinq_pppoe_apply(data, p);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static __rte_always_inline uint16_t
+encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
+ uint16_t total_length)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Add total length (one's complement logic) */
+ cksum1 += total_length;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline void *
+encap(void *dst, const void *src, size_t n)
+{
+ dst = ((uint8_t *) dst) - n;
+ return rte_memcpy(dst, src, n);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv4_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv4_data *vxlan_pkt;
+ uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv4_total_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr) +
+ sizeof(struct ipv4_hdr));
+ ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
+ rte_htons(ipv4_total_length));
+ udp_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
+ vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
+ uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv4_total_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr) +
+ sizeof(struct ipv4_hdr));
+ ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
+ rte_htons(ipv4_total_length));
+ udp_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
+ vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv6_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv6_data *vxlan_pkt;
+ uint16_t ether_length, ipv6_payload_length, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv6_payload_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+ udp_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
+ uint16_t ether_length, ipv6_payload_length, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv6_payload_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+ udp_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap(struct rte_mbuf *mbuf,
+ void *data,
+ struct rte_table_action_encap_config *cfg,
+ void *ip,
+ uint16_t total_length,
+ uint32_t ip_offset)
+{
+ switch (cfg->encap_mask) {
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER:
+ encap(ip, data, sizeof(struct encap_ether_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_ether_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_ether_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN:
+ encap(ip, data, sizeof(struct encap_vlan_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_vlan_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_vlan_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ:
+ encap(ip, data, sizeof(struct encap_qinq_data));
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_qinq_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_qinq_data);
+ break;
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS:
+ {
+ struct encap_mpls_data *mpls = data;
+ size_t size = sizeof(struct ether_hdr) +
+ mpls->mpls_count * 4;
+
+ encap(ip, data, size);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) + size);
+ mbuf->pkt_len = mbuf->data_len = total_length + size;
+ break;
+ }
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
+ {
+ struct encap_pppoe_data *pppoe =
+ encap(ip, data, sizeof(struct encap_pppoe_data));
+ pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_pppoe_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_pppoe_data);
+ break;
+ }
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE:
+ {
+ struct encap_qinq_pppoe_data *qinq_pppoe =
+ encap(ip, data, sizeof(struct encap_qinq_pppoe_data));
+ qinq_pppoe->pppoe_ppp.length = rte_htons(total_length + 2);
+ mbuf->data_off = ip_offset - (sizeof(struct rte_mbuf) +
+ sizeof(struct encap_qinq_pppoe_data));
+ mbuf->pkt_len = mbuf->data_len = total_length +
+ sizeof(struct encap_qinq_pppoe_data);
+ break;
+ }
+
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
+ {
+ if (cfg->vxlan.ip_version)
+ if (cfg->vxlan.vlan)
+ pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
+ else
+ pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
+ else
+ if (cfg->vxlan.vlan)
+ pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
+ else
+ pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
+ }
+
+ default:
+ break;
+ }
+}
+
+/**
+ * RTE_TABLE_ACTION_NAT
+ */
+static int
+nat_cfg_check(struct rte_table_action_nat_config *nat)
+{
+ if ((nat->proto != 0x06) &&
+ (nat->proto != 0x11))
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct nat_ipv4_data {
+ uint32_t addr;
+ uint16_t port;
+} __attribute__((__packed__));
+
+struct nat_ipv6_data {
+ uint8_t addr[16];
+ uint16_t port;
+} __attribute__((__packed__));
+
+static size_t
+nat_data_size(struct rte_table_action_nat_config *nat __rte_unused,
+ struct rte_table_action_common_config *common)
+{
+ int ip_version = common->ip_version;
+
+ return (ip_version) ?
+ sizeof(struct nat_ipv4_data) :
+ sizeof(struct nat_ipv6_data);
+}
+
+static int
+nat_apply_check(struct rte_table_action_nat_params *p,
+ struct rte_table_action_common_config *cfg)
+{
+ if ((p->ip_version && (cfg->ip_version == 0)) ||
+ ((p->ip_version == 0) && cfg->ip_version))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nat_apply(void *data,
+ struct rte_table_action_nat_params *p,
+ struct rte_table_action_common_config *cfg)
+{
+ int status;
+
+ /* Check input arguments */
+ status = nat_apply_check(p, cfg);
+ if (status)
+ return status;
+
+ /* Apply */
+ if (p->ip_version) {
+ struct nat_ipv4_data *d = data;
+
+ d->addr = rte_htonl(p->addr.ipv4);
+ d->port = rte_htons(p->port);
+ } else {
+ struct nat_ipv6_data *d = data;
+
+ memcpy(d->addr, p->addr.ipv6, sizeof(d->addr));
+ d->port = rte_htons(p->port);
+ }
+
+ return 0;
+}
+
+static __rte_always_inline uint16_t
+nat_ipv4_checksum_update(uint16_t cksum0,
+ uint32_t ip0,
+ uint32_t ip1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 (one's complement logic) */
+ cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 (one's complement logic) */
+ cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline uint16_t
+nat_ipv4_tcp_udp_checksum_update(uint16_t cksum0,
+ uint32_t ip0,
+ uint32_t ip1,
+ uint16_t port0,
+ uint16_t port1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 and port 0 (one's complement logic) */
+ cksum1 -= (ip0 >> 16) + (ip0 & 0xFFFF) + port0;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 and port1 (one's complement logic) */
+ cksum1 += (ip1 >> 16) + (ip1 & 0xFFFF) + port1;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline uint16_t
+nat_ipv6_tcp_udp_checksum_update(uint16_t cksum0,
+ uint16_t *ip0,
+ uint16_t *ip1,
+ uint16_t port0,
+ uint16_t port1)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Subtract ip0 and port 0 (one's complement logic) */
+ cksum1 -= ip0[0] + ip0[1] + ip0[2] + ip0[3] +
+ ip0[4] + ip0[5] + ip0[6] + ip0[7] + port0;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ /* Add ip1 and port1 (one's complement logic) */
+ cksum1 += ip1[0] + ip1[1] + ip1[2] + ip1[3] +
+ ip1[4] + ip1[5] + ip1[6] + ip1[7] + port1;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
+static __rte_always_inline void
+pkt_ipv4_work_nat(struct ipv4_hdr *ip,
+ struct nat_ipv4_data *data,
+ struct rte_table_action_nat_config *cfg)
+{
+ if (cfg->source_nat) {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t ip_cksum, tcp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->src_addr,
+ data->addr);
+
+ tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
+ ip->src_addr,
+ data->addr,
+ tcp->src_port,
+ data->port);
+
+ ip->src_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ tcp->src_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t ip_cksum, udp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->src_addr,
+ data->addr);
+
+ udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
+ ip->src_addr,
+ data->addr,
+ udp->src_port,
+ data->port);
+
+ ip->src_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ udp->src_port = data->port;
+ if (udp->dgram_cksum)
+ udp->dgram_cksum = udp_cksum;
+ }
+ } else {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t ip_cksum, tcp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->dst_addr,
+ data->addr);
+
+ tcp_cksum = nat_ipv4_tcp_udp_checksum_update(tcp->cksum,
+ ip->dst_addr,
+ data->addr,
+ tcp->dst_port,
+ data->port);
+
+ ip->dst_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ tcp->dst_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t ip_cksum, udp_cksum;
+
+ ip_cksum = nat_ipv4_checksum_update(ip->hdr_checksum,
+ ip->dst_addr,
+ data->addr);
+
+ udp_cksum = nat_ipv4_tcp_udp_checksum_update(udp->dgram_cksum,
+ ip->dst_addr,
+ data->addr,
+ udp->dst_port,
+ data->port);
+
+ ip->dst_addr = data->addr;
+ ip->hdr_checksum = ip_cksum;
+ udp->dst_port = data->port;
+ if (udp->dgram_cksum)
+ udp->dgram_cksum = udp_cksum;
+ }
+ }
+}
+
+static __rte_always_inline void
+pkt_ipv6_work_nat(struct ipv6_hdr *ip,
+ struct nat_ipv6_data *data,
+ struct rte_table_action_nat_config *cfg)
+{
+ if (cfg->source_nat) {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t tcp_cksum;
+
+ tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
+ (uint16_t *)ip->src_addr,
+ (uint16_t *)data->addr,
+ tcp->src_port,
+ data->port);
+
+ rte_memcpy(ip->src_addr, data->addr, 16);
+ tcp->src_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t udp_cksum;
+
+ udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
+ (uint16_t *)ip->src_addr,
+ (uint16_t *)data->addr,
+ udp->src_port,
+ data->port);
+
+ rte_memcpy(ip->src_addr, data->addr, 16);
+ udp->src_port = data->port;
+ udp->dgram_cksum = udp_cksum;
+ }
+ } else {
+ if (cfg->proto == 0x6) {
+ struct tcp_hdr *tcp = (struct tcp_hdr *) &ip[1];
+ uint16_t tcp_cksum;
+
+ tcp_cksum = nat_ipv6_tcp_udp_checksum_update(tcp->cksum,
+ (uint16_t *)ip->dst_addr,
+ (uint16_t *)data->addr,
+ tcp->dst_port,
+ data->port);
+
+ rte_memcpy(ip->dst_addr, data->addr, 16);
+ tcp->dst_port = data->port;
+ tcp->cksum = tcp_cksum;
+ } else {
+ struct udp_hdr *udp = (struct udp_hdr *) &ip[1];
+ uint16_t udp_cksum;
+
+ udp_cksum = nat_ipv6_tcp_udp_checksum_update(udp->dgram_cksum,
+ (uint16_t *)ip->dst_addr,
+ (uint16_t *)data->addr,
+ udp->dst_port,
+ data->port);
+
+ rte_memcpy(ip->dst_addr, data->addr, 16);
+ udp->dst_port = data->port;
+ udp->dgram_cksum = udp_cksum;
+ }
+ }
+}
+
+/**
+ * RTE_TABLE_ACTION_TTL
+ */
+static int
+ttl_cfg_check(struct rte_table_action_ttl_config *ttl)
+{
+ if (ttl->drop == 0)
+ return -ENOTSUP;
+
+ return 0;
+}
+
+struct ttl_data {
+ uint32_t n_packets;
+} __attribute__((__packed__));
+
+#define TTL_INIT(data, decrement) \
+ ((data)->n_packets = (decrement) ? 1 : 0)
+
+#define TTL_DEC_GET(data) \
+ ((uint8_t)((data)->n_packets & 1))
+
+#define TTL_STATS_RESET(data) \
+ ((data)->n_packets = ((data)->n_packets & 1))
+
+#define TTL_STATS_READ(data) \
+ ((data)->n_packets >> 1)
+
+#define TTL_STATS_ADD(data, value) \
+ ((data)->n_packets = \
+ (((((data)->n_packets >> 1) + (value)) << 1) | \
+ ((data)->n_packets & 1)))
+
+static int
+ttl_apply(void *data,
+ struct rte_table_action_ttl_params *p)
+{
+ struct ttl_data *d = data;
+
+ TTL_INIT(d, p->decrement);
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_ipv4_work_ttl(struct ipv4_hdr *ip,
+ struct ttl_data *data)
+{
+ uint32_t drop;
+ uint16_t cksum = ip->hdr_checksum;
+ uint8_t ttl = ip->time_to_live;
+ uint8_t ttl_diff = TTL_DEC_GET(data);
+
+ cksum += ttl_diff;
+ ttl -= ttl_diff;
+
+ ip->hdr_checksum = cksum;
+ ip->time_to_live = ttl;
+
+ drop = (ttl == 0) ? 1 : 0;
+ TTL_STATS_ADD(data, drop);
+
+ return drop;
+}
+
+static __rte_always_inline uint64_t
+pkt_ipv6_work_ttl(struct ipv6_hdr *ip,
+ struct ttl_data *data)
+{
+ uint32_t drop;
+ uint8_t ttl = ip->hop_limits;
+ uint8_t ttl_diff = TTL_DEC_GET(data);
+
+ ttl -= ttl_diff;
+
+ ip->hop_limits = ttl;
+
+ drop = (ttl == 0) ? 1 : 0;
+ TTL_STATS_ADD(data, drop);
+
+ return drop;
+}
+
+/**
+ * RTE_TABLE_ACTION_STATS
+ */
+static int
+stats_cfg_check(struct rte_table_action_stats_config *stats)
+{
+ if ((stats->n_packets_enabled == 0) && (stats->n_bytes_enabled == 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+struct stats_data {
+ uint64_t n_packets;
+ uint64_t n_bytes;
+} __attribute__((__packed__));
+
+static int
+stats_apply(struct stats_data *data,
+ struct rte_table_action_stats_params *p)
+{
+ data->n_packets = p->n_packets;
+ data->n_bytes = p->n_bytes;
+
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_stats(struct stats_data *data,
+ uint16_t total_length)
+{
+ data->n_packets++;
+ data->n_bytes += total_length;
+}
+
+/**
+ * RTE_TABLE_ACTION_TIME
+ */
+struct time_data {
+ uint64_t time;
+} __attribute__((__packed__));
+
+static int
+time_apply(struct time_data *data,
+ struct rte_table_action_time_params *p)
+{
+ data->time = p->time;
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_time(struct time_data *data,
+ uint64_t time)
+{
+ data->time = time;
+}
+
+
+/**
+ * RTE_TABLE_ACTION_CRYPTO
+ */
+
+#define CRYPTO_OP_MASK_CIPHER 0x1
+#define CRYPTO_OP_MASK_AUTH 0x2
+#define CRYPTO_OP_MASK_AEAD 0x4
+
+struct crypto_op_sym_iv_aad {
+ struct rte_crypto_op op;
+ struct rte_crypto_sym_op sym_op;
+ union {
+ struct {
+ uint8_t cipher_iv[
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ uint8_t auth_iv[
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ } cipher_auth;
+
+ struct {
+ uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
+ } aead_iv_aad;
+
+ } iv_aad;
+};
+
+struct sym_crypto_data {
+
+ union {
+ struct {
+
+ /** Length of cipher iv. */
+ uint16_t cipher_iv_len;
+
+ /** Offset from start of IP header to the cipher iv. */
+ uint16_t cipher_iv_data_offset;
+
+ /** Length of cipher iv to be updated in the mbuf. */
+ uint16_t cipher_iv_update_len;
+
+ /** Offset from start of IP header to the auth iv. */
+ uint16_t auth_iv_data_offset;
+
+ /** Length of auth iv in the mbuf. */
+ uint16_t auth_iv_len;
+
+ /** Length of auth iv to be updated in the mbuf. */
+ uint16_t auth_iv_update_len;
+
+ } cipher_auth;
+ struct {
+
+ /** Length of iv. */
+ uint16_t iv_len;
+
+ /** Offset from start of IP header to the aead iv. */
+ uint16_t iv_data_offset;
+
+ /** Length of iv to be updated in the mbuf. */
+ uint16_t iv_update_len;
+
+ /** Length of aad */
+ uint16_t aad_len;
+
+ /** Offset from start of IP header to the aad. */
+ uint16_t aad_data_offset;
+
+ /** Length of aad to updated in the mbuf. */
+ uint16_t aad_update_len;
+
+ } aead;
+ };
+
+ /** Offset from start of IP header to the data. */
+ uint16_t data_offset;
+
+ /** Digest length. */
+ uint16_t digest_len;
+
+ /** block size */
+ uint16_t block_size;
+
+ /** Mask of crypto operation */
+ uint16_t op_mask;
+
+ /** Session pointer. */
+ struct rte_cryptodev_sym_session *session;
+
+ /** Direction of crypto, encrypt or decrypt */
+ uint16_t direction;
+
+ /** Private data size to store cipher iv / aad. */
+ uint8_t iv_aad_data[32];
+
+} __attribute__((__packed__));
+
+static int
+sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
+{
+ if (!rte_cryptodev_pmd_is_valid_dev(cfg->cryptodev_id))
+ return -EINVAL;
+ if (cfg->mp_create == NULL || cfg->mp_init == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
+{
+ struct rte_cryptodev_info dev_info;
+ const struct rte_cryptodev_capabilities *cap;
+ uint32_t i;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+
+ for (i = 0; dev_info.capabilities[i].op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ i++) {
+ cap = &dev_info.capabilities[i];
+
+ if (cap->sym.xform_type != xform->type)
+ continue;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+ (cap->sym.cipher.algo == xform->cipher.algo))
+ return cap->sym.cipher.block_size;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
+ (cap->sym.aead.algo == xform->aead.algo))
+ return cap->sym.aead.block_size;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
+ break;
+ }
+
+ return -1;
+}
+
+static int
+sym_crypto_apply(struct sym_crypto_data *data,
+ struct rte_table_action_sym_crypto_config *cfg,
+ struct rte_table_action_sym_crypto_params *p)
+{
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ const struct rte_crypto_auth_xform *auth_xform = NULL;
+ const struct rte_crypto_aead_xform *aead_xform = NULL;
+ struct rte_crypto_sym_xform *xform = p->xform;
+ struct rte_cryptodev_sym_session *session;
+ int ret;
+
+ memset(data, 0, sizeof(*data));
+
+ while (xform) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ cipher_xform = &xform->cipher;
+
+ if (cipher_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
+ return -ENOMEM;
+ if (cipher_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
+ return -EINVAL;
+
+ ret = get_block_size(xform, cfg->cryptodev_id);
+ if (ret < 0)
+ return -1;
+ data->block_size = (uint16_t)ret;
+ data->op_mask |= CRYPTO_OP_MASK_CIPHER;
+
+ data->cipher_auth.cipher_iv_len =
+ cipher_xform->iv.length;
+ data->cipher_auth.cipher_iv_data_offset = (uint16_t)
+ p->cipher_auth.cipher_iv_update.offset;
+ data->cipher_auth.cipher_iv_update_len = (uint16_t)
+ p->cipher_auth.cipher_iv_update.length;
+
+ rte_memcpy(data->iv_aad_data,
+ p->cipher_auth.cipher_iv.val,
+ p->cipher_auth.cipher_iv.length);
+
+ data->direction = cipher_xform->op;
+
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = &xform->auth;
+ if (auth_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
+ return -ENOMEM;
+ data->op_mask |= CRYPTO_OP_MASK_AUTH;
+
+ data->cipher_auth.auth_iv_len = auth_xform->iv.length;
+ data->cipher_auth.auth_iv_data_offset = (uint16_t)
+ p->cipher_auth.auth_iv_update.offset;
+ data->cipher_auth.auth_iv_update_len = (uint16_t)
+ p->cipher_auth.auth_iv_update.length;
+ data->digest_len = auth_xform->digest_length;
+
+ data->direction = (auth_xform->op ==
+ RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = &xform->aead;
+
+ if ((aead_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
+ aead_xform->aad_length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
+ return -EINVAL;
+ if (aead_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
+ return -EINVAL;
+
+ ret = get_block_size(xform, cfg->cryptodev_id);
+ if (ret < 0)
+ return -1;
+ data->block_size = (uint16_t)ret;
+ data->op_mask |= CRYPTO_OP_MASK_AEAD;
+
+ data->digest_len = aead_xform->digest_length;
+ data->aead.iv_len = aead_xform->iv.length;
+ data->aead.aad_len = aead_xform->aad_length;
+
+ data->aead.iv_data_offset = (uint16_t)
+ p->aead.iv_update.offset;
+ data->aead.iv_update_len = (uint16_t)
+ p->aead.iv_update.length;
+ data->aead.aad_data_offset = (uint16_t)
+ p->aead.aad_update.offset;
+ data->aead.aad_update_len = (uint16_t)
+ p->aead.aad_update.length;
+
+ rte_memcpy(data->iv_aad_data,
+ p->aead.iv.val,
+ p->aead.iv.length);
+
+ rte_memcpy(data->iv_aad_data + p->aead.iv.length,
+ p->aead.aad.val,
+ p->aead.aad.length);
+
+ data->direction = (aead_xform->op ==
+ RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ } else
+ return -EINVAL;
+
+ xform = xform->next;
+ }
+
+ if (auth_xform && auth_xform->iv.length) {
+ if (cipher_xform) {
+ if (auth_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET +
+ cipher_xform->iv.length)
+ return -EINVAL;
+
+ rte_memcpy(data->iv_aad_data + cipher_xform->iv.length,
+ p->cipher_auth.auth_iv.val,
+ p->cipher_auth.auth_iv.length);
+ } else {
+ rte_memcpy(data->iv_aad_data,
+ p->cipher_auth.auth_iv.val,
+ p->cipher_auth.auth_iv.length);
+ }
+ }
+
+ session = rte_cryptodev_sym_session_create(cfg->mp_create);
+ if (!session)
+ return -ENOMEM;
+
+ ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, session,
+ p->xform, cfg->mp_init);
+ if (ret < 0) {
+ rte_cryptodev_sym_session_free(session);
+ return ret;
+ }
+
+ data->data_offset = (uint16_t)p->data_offset;
+ data->session = session;
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
+ struct rte_table_action_sym_crypto_config *cfg,
+ uint16_t ip_offset)
+{
+ struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
+ RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
+ struct rte_crypto_op *op = &crypto_op->op;
+ struct rte_crypto_sym_op *sym = op->sym;
+ uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off;
+ uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset;
+
+ op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf);
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ sym->m_src = mbuf;
+ sym->m_dst = NULL;
+ sym->session = data->session;
+
+ /** pad the packet */
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ uint32_t append_len = RTE_ALIGN_CEIL(payload_len,
+ data->block_size) - payload_len;
+
+ if (unlikely(rte_pktmbuf_append(mbuf, append_len +
+ data->digest_len) == NULL))
+ return 1;
+
+ payload_len += append_len;
+ } else
+ payload_len -= data->digest_len;
+
+ if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
+ /** prepare cipher op */
+ uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
+
+ sym->cipher.data.length = payload_len;
+ sym->cipher.data.offset = data->data_offset - pkt_offset;
+
+ if (data->cipher_auth.cipher_iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->cipher_auth.cipher_iv_data_offset
+ + ip_offset);
+
+ /** For encryption, update the pkt iv field, otherwise
+ * update the iv_aad_field
+ **/
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data->iv_aad_data,
+ data->cipher_auth.cipher_iv_update_len);
+ else
+ rte_memcpy(data->iv_aad_data, pkt_iv,
+ data->cipher_auth.cipher_iv_update_len);
+ }
+
+ /** write iv */
+ rte_memcpy(iv, data->iv_aad_data,
+ data->cipher_auth.cipher_iv_len);
+ }
+
+ if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
+ /** authentication always start from IP header. */
+ sym->auth.data.offset = ip_offset - pkt_offset;
+ sym->auth.data.length = mbuf->data_len - sym->auth.data.offset -
+ data->digest_len;
+ sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
+ uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
+ data->digest_len);
+ sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
+
+ if (data->cipher_auth.auth_iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->cipher_auth.auth_iv_data_offset
+ + ip_offset);
+ uint8_t *data_iv = data->iv_aad_data +
+ data->cipher_auth.cipher_iv_len;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data_iv,
+ data->cipher_auth.auth_iv_update_len);
+ else
+ rte_memcpy(data_iv, pkt_iv,
+ data->cipher_auth.auth_iv_update_len);
+ }
+
+ if (data->cipher_auth.auth_iv_len) {
+ /** prepare cipher op */
+ uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
+
+ rte_memcpy(iv, data->iv_aad_data +
+ data->cipher_auth.cipher_iv_len,
+ data->cipher_auth.auth_iv_len);
+ }
+ }
+
+ if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
+ uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
+ uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
+
+ sym->aead.aad.data = aad;
+ sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ aad - rte_pktmbuf_mtod(mbuf, uint8_t *));
+ sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
+ uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
+ data->digest_len);
+ sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
+ sym->aead.data.offset = data->data_offset - pkt_offset;
+ sym->aead.data.length = payload_len;
+
+ if (data->aead.iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->aead.iv_data_offset + ip_offset);
+ uint8_t *data_iv = data->iv_aad_data;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data_iv,
+ data->aead.iv_update_len);
+ else
+ rte_memcpy(data_iv, pkt_iv,
+ data->aead.iv_update_len);
+ }
+
+ rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
+
+ if (data->aead.aad_update_len) {
+ uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->aead.aad_data_offset + ip_offset);
+ uint8_t *data_aad = data->iv_aad_data +
+ data->aead.iv_len;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_aad, data_aad,
+ data->aead.iv_update_len);
+ else
+ rte_memcpy(data_aad, pkt_aad,
+ data->aead.iv_update_len);
+ }
+
+ rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
+ data->aead.aad_len);
+ }
+
+ return 0;
+}
+
+/**
+ * RTE_TABLE_ACTION_TAG
+ */
+struct tag_data {
+ uint32_t tag;
+} __attribute__((__packed__));
+
+static int
+tag_apply(struct tag_data *data,
+ struct rte_table_action_tag_params *p)
+{
+ data->tag = p->tag;
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_tag(struct rte_mbuf *mbuf,
+ struct tag_data *data)
+{
+ mbuf->hash.fdir.hi = data->tag;
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+}
+
+static __rte_always_inline void
+pkt4_work_tag(struct rte_mbuf *mbuf0,
+ struct rte_mbuf *mbuf1,
+ struct rte_mbuf *mbuf2,
+ struct rte_mbuf *mbuf3,
+ struct tag_data *data0,
+ struct tag_data *data1,
+ struct tag_data *data2,
+ struct tag_data *data3)
+{
+ mbuf0->hash.fdir.hi = data0->tag;
+ mbuf1->hash.fdir.hi = data1->tag;
+ mbuf2->hash.fdir.hi = data2->tag;
+ mbuf3->hash.fdir.hi = data3->tag;
+
+ mbuf0->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf1->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf2->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf3->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+}
+
+/**
+ * RTE_TABLE_ACTION_DECAP
+ */
+struct decap_data {
+ uint16_t n;
+} __attribute__((__packed__));
+
+static int
+decap_apply(struct decap_data *data,
+ struct rte_table_action_decap_params *p)
+{
+ data->n = p->n;
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_decap(struct rte_mbuf *mbuf,
+ struct decap_data *data)
+{
+ uint16_t data_off = mbuf->data_off;
+ uint16_t data_len = mbuf->data_len;
+ uint32_t pkt_len = mbuf->pkt_len;
+ uint16_t n = data->n;
+
+ mbuf->data_off = data_off + n;
+ mbuf->data_len = data_len - n;
+ mbuf->pkt_len = pkt_len - n;
+}
+
+static __rte_always_inline void
+pkt4_work_decap(struct rte_mbuf *mbuf0,
+ struct rte_mbuf *mbuf1,
+ struct rte_mbuf *mbuf2,
+ struct rte_mbuf *mbuf3,
+ struct decap_data *data0,
+ struct decap_data *data1,
+ struct decap_data *data2,
+ struct decap_data *data3)
+{
+ uint16_t data_off0 = mbuf0->data_off;
+ uint16_t data_len0 = mbuf0->data_len;
+ uint32_t pkt_len0 = mbuf0->pkt_len;
+
+ uint16_t data_off1 = mbuf1->data_off;
+ uint16_t data_len1 = mbuf1->data_len;
+ uint32_t pkt_len1 = mbuf1->pkt_len;
+
+ uint16_t data_off2 = mbuf2->data_off;
+ uint16_t data_len2 = mbuf2->data_len;
+ uint32_t pkt_len2 = mbuf2->pkt_len;
+
+ uint16_t data_off3 = mbuf3->data_off;
+ uint16_t data_len3 = mbuf3->data_len;
+ uint32_t pkt_len3 = mbuf3->pkt_len;
+
+ uint16_t n0 = data0->n;
+ uint16_t n1 = data1->n;
+ uint16_t n2 = data2->n;
+ uint16_t n3 = data3->n;
+
+ mbuf0->data_off = data_off0 + n0;
+ mbuf0->data_len = data_len0 - n0;
+ mbuf0->pkt_len = pkt_len0 - n0;
+
+ mbuf1->data_off = data_off1 + n1;
+ mbuf1->data_len = data_len1 - n1;
+ mbuf1->pkt_len = pkt_len1 - n1;
+
+ mbuf2->data_off = data_off2 + n2;
+ mbuf2->data_len = data_len2 - n2;
+ mbuf2->pkt_len = pkt_len2 - n2;
+
+ mbuf3->data_off = data_off3 + n3;
+ mbuf3->data_len = data_len3 - n3;
+ mbuf3->pkt_len = pkt_len3 - n3;
+}
+
+/**
+ * Action profile
+ */
+static int
+action_valid(enum rte_table_action_type action)
+{
+ switch (action) {
+ case RTE_TABLE_ACTION_FWD:
+ case RTE_TABLE_ACTION_LB:
+ case RTE_TABLE_ACTION_MTR:
+ case RTE_TABLE_ACTION_TM:
+ case RTE_TABLE_ACTION_ENCAP:
+ case RTE_TABLE_ACTION_NAT:
+ case RTE_TABLE_ACTION_TTL:
+ case RTE_TABLE_ACTION_STATS:
+ case RTE_TABLE_ACTION_TIME:
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ case RTE_TABLE_ACTION_TAG:
+ case RTE_TABLE_ACTION_DECAP:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+#define RTE_TABLE_ACTION_MAX 64
+
+struct ap_config {
+ uint64_t action_mask;
+ struct rte_table_action_common_config common;
+ struct rte_table_action_lb_config lb;
+ struct rte_table_action_mtr_config mtr;
+ struct rte_table_action_tm_config tm;
+ struct rte_table_action_encap_config encap;
+ struct rte_table_action_nat_config nat;
+ struct rte_table_action_ttl_config ttl;
+ struct rte_table_action_stats_config stats;
+ struct rte_table_action_sym_crypto_config sym_crypto;
+};
+
+static size_t
+action_cfg_size(enum rte_table_action_type action)
+{
+ switch (action) {
+ case RTE_TABLE_ACTION_LB:
+ return sizeof(struct rte_table_action_lb_config);
+ case RTE_TABLE_ACTION_MTR:
+ return sizeof(struct rte_table_action_mtr_config);
+ case RTE_TABLE_ACTION_TM:
+ return sizeof(struct rte_table_action_tm_config);
+ case RTE_TABLE_ACTION_ENCAP:
+ return sizeof(struct rte_table_action_encap_config);
+ case RTE_TABLE_ACTION_NAT:
+ return sizeof(struct rte_table_action_nat_config);
+ case RTE_TABLE_ACTION_TTL:
+ return sizeof(struct rte_table_action_ttl_config);
+ case RTE_TABLE_ACTION_STATS:
+ return sizeof(struct rte_table_action_stats_config);
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ return sizeof(struct rte_table_action_sym_crypto_config);
+ default:
+ return 0;
+ }
+}
+
+static void*
+action_cfg_get(struct ap_config *ap_config,
+ enum rte_table_action_type type)
+{
+ switch (type) {
+ case RTE_TABLE_ACTION_LB:
+ return &ap_config->lb;
+
+ case RTE_TABLE_ACTION_MTR:
+ return &ap_config->mtr;
+
+ case RTE_TABLE_ACTION_TM:
+ return &ap_config->tm;
+
+ case RTE_TABLE_ACTION_ENCAP:
+ return &ap_config->encap;
+
+ case RTE_TABLE_ACTION_NAT:
+ return &ap_config->nat;
+
+ case RTE_TABLE_ACTION_TTL:
+ return &ap_config->ttl;
+
+ case RTE_TABLE_ACTION_STATS:
+ return &ap_config->stats;
+
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ return &ap_config->sym_crypto;
+ default:
+ return NULL;
+ }
+}
+
+static void
+action_cfg_set(struct ap_config *ap_config,
+ enum rte_table_action_type type,
+ void *action_cfg)
+{
+ void *dst = action_cfg_get(ap_config, type);
+
+ if (dst)
+ memcpy(dst, action_cfg, action_cfg_size(type));
+
+ ap_config->action_mask |= 1LLU << type;
+}
+
+struct ap_data {
+ size_t offset[RTE_TABLE_ACTION_MAX];
+ size_t total_size;
+};
+
+static size_t
+action_data_size(enum rte_table_action_type action,
+ struct ap_config *ap_config)
+{
+ switch (action) {
+ case RTE_TABLE_ACTION_FWD:
+ return sizeof(struct fwd_data);
+
+ case RTE_TABLE_ACTION_LB:
+ return sizeof(struct lb_data);
+
+ case RTE_TABLE_ACTION_MTR:
+ return mtr_data_size(&ap_config->mtr);
+
+ case RTE_TABLE_ACTION_TM:
+ return sizeof(struct tm_data);
+
+ case RTE_TABLE_ACTION_ENCAP:
+ return encap_data_size(&ap_config->encap);
+
+ case RTE_TABLE_ACTION_NAT:
+ return nat_data_size(&ap_config->nat,
+ &ap_config->common);
+
+ case RTE_TABLE_ACTION_TTL:
+ return sizeof(struct ttl_data);
+
+ case RTE_TABLE_ACTION_STATS:
+ return sizeof(struct stats_data);
+
+ case RTE_TABLE_ACTION_TIME:
+ return sizeof(struct time_data);
+
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ return (sizeof(struct sym_crypto_data));
+
+ case RTE_TABLE_ACTION_TAG:
+ return sizeof(struct tag_data);
+
+ case RTE_TABLE_ACTION_DECAP:
+ return sizeof(struct decap_data);
+
+ default:
+ return 0;
+ }
+}
+
+
+static void
+action_data_offset_set(struct ap_data *ap_data,
+ struct ap_config *ap_config)
+{
+ uint64_t action_mask = ap_config->action_mask;
+ size_t offset;
+ uint32_t action;
+
+ memset(ap_data->offset, 0, sizeof(ap_data->offset));
+
+ offset = 0;
+ for (action = 0; action < RTE_TABLE_ACTION_MAX; action++)
+ if (action_mask & (1LLU << action)) {
+ ap_data->offset[action] = offset;
+ offset += action_data_size((enum rte_table_action_type)action,
+ ap_config);
+ }
+
+ ap_data->total_size = offset;
+}
+
+struct rte_table_action_profile {
+ struct ap_config cfg;
+ struct ap_data data;
+ int frozen;
+};
+
+struct rte_table_action_profile *
+rte_table_action_profile_create(struct rte_table_action_common_config *common)
+{
+ struct rte_table_action_profile *ap;
+
+ /* Check input arguments */
+ if (common == NULL)
+ return NULL;
+
+ /* Memory allocation */
+ ap = calloc(1, sizeof(struct rte_table_action_profile));
+ if (ap == NULL)
+ return NULL;
+
+ /* Initialization */
+ memcpy(&ap->cfg.common, common, sizeof(*common));
+
+ return ap;
+}
+
+
+int
+rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
+ enum rte_table_action_type type,
+ void *action_config)
+{
+ int status;
+
+ /* Check input arguments */
+ if ((profile == NULL) ||
+ profile->frozen ||
+ (action_valid(type) == 0) ||
+ (profile->cfg.action_mask & (1LLU << type)) ||
+ ((action_cfg_size(type) == 0) && action_config) ||
+ (action_cfg_size(type) && (action_config == NULL)))
+ return -EINVAL;
+
+ switch (type) {
+ case RTE_TABLE_ACTION_LB:
+ status = lb_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_MTR:
+ status = mtr_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_TM:
+ status = tm_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_ENCAP:
+ status = encap_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_NAT:
+ status = nat_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_TTL:
+ status = ttl_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_STATS:
+ status = stats_cfg_check(action_config);
+ break;
+
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ status = sym_crypto_cfg_check(action_config);
+ break;
+
+ default:
+ status = 0;
+ break;
+ }
+
+ if (status)
+ return status;
+
+ /* Action enable */
+ action_cfg_set(&profile->cfg, type, action_config);
+
+ return 0;
+}
+
+int
+rte_table_action_profile_freeze(struct rte_table_action_profile *profile)
+{
+ if (profile->frozen)
+ return -EBUSY;
+
+ profile->cfg.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD;
+ action_data_offset_set(&profile->data, &profile->cfg);
+ profile->frozen = 1;
+
+ return 0;
+}
+
+int
+rte_table_action_profile_free(struct rte_table_action_profile *profile)
+{
+ if (profile == NULL)
+ return 0;
+
+ free(profile);
+ return 0;
+}
+
+/**
+ * Action
+ */
+#define METER_PROFILES_MAX 32
+
+struct rte_table_action {
+ struct ap_config cfg;
+ struct ap_data data;
+ struct dscp_table_data dscp_table;
+ struct meter_profile_data mp[METER_PROFILES_MAX];
+};
+
+struct rte_table_action *
+rte_table_action_create(struct rte_table_action_profile *profile,
+ uint32_t socket_id)
+{
+ struct rte_table_action *action;
+
+ /* Check input arguments */
+ if ((profile == NULL) ||
+ (profile->frozen == 0))
+ return NULL;
+
+ /* Memory allocation */
+ action = rte_zmalloc_socket(NULL,
+ sizeof(struct rte_table_action),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (action == NULL)
+ return NULL;
+
+ /* Initialization */
+ memcpy(&action->cfg, &profile->cfg, sizeof(profile->cfg));
+ memcpy(&action->data, &profile->data, sizeof(profile->data));
+
+ return action;
+}
+
+static __rte_always_inline void *
+action_data_get(void *data,
+ struct rte_table_action *action,
+ enum rte_table_action_type type)
+{
+ size_t offset = action->data.offset[type];
+ uint8_t *data_bytes = data;
+
+ return &data_bytes[offset];
+}
+
+int
+rte_table_action_apply(struct rte_table_action *action,
+ void *data,
+ enum rte_table_action_type type,
+ void *action_params)
+{
+ void *action_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ (data == NULL) ||
+ (action_valid(type) == 0) ||
+ ((action->cfg.action_mask & (1LLU << type)) == 0) ||
+ (action_params == NULL))
+ return -EINVAL;
+
+ /* Data update */
+ action_data = action_data_get(data, action, type);
+
+ switch (type) {
+ case RTE_TABLE_ACTION_FWD:
+ return fwd_apply(action_data,
+ action_params);
+
+ case RTE_TABLE_ACTION_LB:
+ return lb_apply(action_data,
+ action_params);
+
+ case RTE_TABLE_ACTION_MTR:
+ return mtr_apply(action_data,
+ action_params,
+ &action->cfg.mtr,
+ action->mp,
+ RTE_DIM(action->mp));
+
+ case RTE_TABLE_ACTION_TM:
+ return tm_apply(action_data,
+ action_params,
+ &action->cfg.tm);
+
+ case RTE_TABLE_ACTION_ENCAP:
+ return encap_apply(action_data,
+ action_params,
+ &action->cfg.encap,
+ &action->cfg.common);
+
+ case RTE_TABLE_ACTION_NAT:
+ return nat_apply(action_data,
+ action_params,
+ &action->cfg.common);
+
+ case RTE_TABLE_ACTION_TTL:
+ return ttl_apply(action_data,
+ action_params);
+
+ case RTE_TABLE_ACTION_STATS:
+ return stats_apply(action_data,
+ action_params);
+
+ case RTE_TABLE_ACTION_TIME:
+ return time_apply(action_data,
+ action_params);
+
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ return sym_crypto_apply(action_data,
+ &action->cfg.sym_crypto,
+ action_params);
+
+ case RTE_TABLE_ACTION_TAG:
+ return tag_apply(action_data,
+ action_params);
+
+ case RTE_TABLE_ACTION_DECAP:
+ return decap_apply(action_data,
+ action_params);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+int
+rte_table_action_dscp_table_update(struct rte_table_action *action,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *table)
+{
+ uint32_t i;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
+ (1LLU << RTE_TABLE_ACTION_TM))) == 0) ||
+ (dscp_mask == 0) ||
+ (table == NULL))
+ return -EINVAL;
+
+ for (i = 0; i < RTE_DIM(table->entry); i++) {
+ struct dscp_table_entry_data *data =
+ &action->dscp_table.entry[i];
+ struct rte_table_action_dscp_table_entry *entry =
+ &table->entry[i];
+
+ if ((dscp_mask & (1LLU << i)) == 0)
+ continue;
+
+ data->color = entry->color;
+ data->tc = entry->tc_id;
+ data->tc_queue = entry->tc_queue_id;
+ }
+
+ return 0;
+}
+
+int
+rte_table_action_meter_profile_add(struct rte_table_action *action,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile)
+{
+ struct meter_profile_data *mp_data;
+ uint32_t status;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
+ (profile == NULL))
+ return -EINVAL;
+
+ if (profile->alg != RTE_TABLE_ACTION_METER_TRTCM)
+ return -ENOTSUP;
+
+ mp_data = meter_profile_data_find(action->mp,
+ RTE_DIM(action->mp),
+ meter_profile_id);
+ if (mp_data)
+ return -EEXIST;
+
+ mp_data = meter_profile_data_find_unused(action->mp,
+ RTE_DIM(action->mp));
+ if (!mp_data)
+ return -ENOSPC;
+
+ /* Install new profile */
+ status = rte_meter_trtcm_profile_config(&mp_data->profile,
+ &profile->trtcm);
+ if (status)
+ return status;
+
+ mp_data->profile_id = meter_profile_id;
+ mp_data->valid = 1;
+
+ return 0;
+}
+
+int
+rte_table_action_meter_profile_delete(struct rte_table_action *action,
+ uint32_t meter_profile_id)
+{
+ struct meter_profile_data *mp_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0))
+ return -EINVAL;
+
+ mp_data = meter_profile_data_find(action->mp,
+ RTE_DIM(action->mp),
+ meter_profile_id);
+ if (!mp_data)
+ return 0;
+
+ /* Uninstall profile */
+ mp_data->valid = 0;
+
+ return 0;
+}
+
+int
+rte_table_action_meter_read(struct rte_table_action *action,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear)
+{
+ struct mtr_trtcm_data *mtr_data;
+ uint32_t i;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) ||
+ (data == NULL) ||
+ (tc_mask > RTE_LEN2MASK(action->cfg.mtr.n_tc, uint32_t)))
+ return -EINVAL;
+
+ mtr_data = action_data_get(data, action, RTE_TABLE_ACTION_MTR);
+
+ /* Read */
+ if (stats) {
+ for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
+ struct rte_table_action_mtr_counters_tc *dst =
+ &stats->stats[i];
+ struct mtr_trtcm_data *src = &mtr_data[i];
+
+ if ((tc_mask & (1 << i)) == 0)
+ continue;
+
+ dst->n_packets[RTE_COLOR_GREEN] =
+ mtr_trtcm_data_stats_get(src, RTE_COLOR_GREEN);
+
+ dst->n_packets[RTE_COLOR_YELLOW] =
+ mtr_trtcm_data_stats_get(src, RTE_COLOR_YELLOW);
+
+ dst->n_packets[RTE_COLOR_RED] =
+ mtr_trtcm_data_stats_get(src, RTE_COLOR_RED);
+
+ dst->n_packets_valid = 1;
+ dst->n_bytes_valid = 0;
+ }
+
+ stats->tc_mask = tc_mask;
+ }
+
+ /* Clear */
+ if (clear)
+ for (i = 0; i < RTE_TABLE_ACTION_TC_MAX; i++) {
+ struct mtr_trtcm_data *src = &mtr_data[i];
+
+ if ((tc_mask & (1 << i)) == 0)
+ continue;
+
+ mtr_trtcm_data_stats_reset(src, RTE_COLOR_GREEN);
+ mtr_trtcm_data_stats_reset(src, RTE_COLOR_YELLOW);
+ mtr_trtcm_data_stats_reset(src, RTE_COLOR_RED);
+ }
+
+
+ return 0;
+}
+
+int
+rte_table_action_ttl_read(struct rte_table_action *action,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear)
+{
+ struct ttl_data *ttl_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask &
+ (1LLU << RTE_TABLE_ACTION_TTL)) == 0) ||
+ (data == NULL))
+ return -EINVAL;
+
+ ttl_data = action_data_get(data, action, RTE_TABLE_ACTION_TTL);
+
+ /* Read */
+ if (stats)
+ stats->n_packets = TTL_STATS_READ(ttl_data);
+
+ /* Clear */
+ if (clear)
+ TTL_STATS_RESET(ttl_data);
+
+ return 0;
+}
+
+int
+rte_table_action_stats_read(struct rte_table_action *action,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear)
+{
+ struct stats_data *stats_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask &
+ (1LLU << RTE_TABLE_ACTION_STATS)) == 0) ||
+ (data == NULL))
+ return -EINVAL;
+
+ stats_data = action_data_get(data, action,
+ RTE_TABLE_ACTION_STATS);
+
+ /* Read */
+ if (stats) {
+ stats->n_packets = stats_data->n_packets;
+ stats->n_bytes = stats_data->n_bytes;
+ stats->n_packets_valid = 1;
+ stats->n_bytes_valid = 1;
+ }
+
+ /* Clear */
+ if (clear) {
+ stats_data->n_packets = 0;
+ stats_data->n_bytes = 0;
+ }
+
+ return 0;
+}
+
+int
+rte_table_action_time_read(struct rte_table_action *action,
+ void *data,
+ uint64_t *timestamp)
+{
+ struct time_data *time_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask &
+ (1LLU << RTE_TABLE_ACTION_TIME)) == 0) ||
+ (data == NULL) ||
+ (timestamp == NULL))
+ return -EINVAL;
+
+ time_data = action_data_get(data, action, RTE_TABLE_ACTION_TIME);
+
+ /* Read */
+ *timestamp = time_data->time;
+
+ return 0;
+}
+
+struct rte_cryptodev_sym_session *
+rte_table_action_crypto_sym_session_get(struct rte_table_action *action,
+ void *data)
+{
+ struct sym_crypto_data *sym_crypto_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask &
+ (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) == 0) ||
+ (data == NULL))
+ return NULL;
+
+ sym_crypto_data = action_data_get(data, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+
+ return sym_crypto_data->session;
+}
+
+static __rte_always_inline uint64_t
+pkt_work(struct rte_mbuf *mbuf,
+ struct rte_pipeline_table_entry *table_entry,
+ uint64_t time,
+ struct rte_table_action *action,
+ struct ap_config *cfg)
+{
+ uint64_t drop_mask = 0;
+
+ uint32_t ip_offset = action->cfg.common.ip_offset;
+ void *ip = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ip_offset);
+
+ uint32_t dscp;
+ uint16_t total_length;
+
+ if (cfg->common.ip_version) {
+ struct ipv4_hdr *hdr = ip;
+
+ dscp = hdr->type_of_service >> 2;
+ total_length = rte_ntohs(hdr->total_length);
+ } else {
+ struct ipv6_hdr *hdr = ip;
+
+ dscp = (rte_ntohl(hdr->vtc_flow) & 0x0F600000) >> 18;
+ total_length =
+ rte_ntohs(hdr->payload_len) + sizeof(struct ipv6_hdr);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_LB);
+
+ pkt_work_lb(mbuf,
+ data,
+ &cfg->lb);
+ }
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_MTR);
+
+ drop_mask |= pkt_work_mtr(mbuf,
+ data,
+ &action->dscp_table,
+ action->mp,
+ time,
+ dscp,
+ total_length);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_TM);
+
+ pkt_work_tm(mbuf,
+ data,
+ &action->dscp_table,
+ dscp);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
+ void *data = action_data_get(table_entry,
+ action,
+ RTE_TABLE_ACTION_DECAP);
+
+ pkt_work_decap(mbuf, data);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
+
+ pkt_work_encap(mbuf,
+ data,
+ &cfg->encap,
+ ip,
+ total_length,
+ ip_offset);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_NAT);
+
+ if (cfg->common.ip_version)
+ pkt_ipv4_work_nat(ip, data, &cfg->nat);
+ else
+ pkt_ipv6_work_nat(ip, data, &cfg->nat);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_TTL);
+
+ if (cfg->common.ip_version)
+ drop_mask |= pkt_ipv4_work_ttl(ip, data);
+ else
+ drop_mask |= pkt_ipv6_work_ttl(ip, data);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_STATS);
+
+ pkt_work_stats(data, total_length);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ void *data =
+ action_data_get(table_entry, action, RTE_TABLE_ACTION_TIME);
+
+ pkt_work_time(data, time);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+ void *data = action_data_get(table_entry, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+
+ drop_mask |= pkt_work_sym_crypto(mbuf, data, &cfg->sym_crypto,
+ ip_offset);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
+ void *data = action_data_get(table_entry,
+ action,
+ RTE_TABLE_ACTION_TAG);
+
+ pkt_work_tag(mbuf, data);
+ }
+
+ return drop_mask;
+}
+
+static __rte_always_inline uint64_t
+pkt4_work(struct rte_mbuf **mbufs,
+ struct rte_pipeline_table_entry **table_entries,
+ uint64_t time,
+ struct rte_table_action *action,
+ struct ap_config *cfg)
+{
+ uint64_t drop_mask0 = 0;
+ uint64_t drop_mask1 = 0;
+ uint64_t drop_mask2 = 0;
+ uint64_t drop_mask3 = 0;
+
+ struct rte_mbuf *mbuf0 = mbufs[0];
+ struct rte_mbuf *mbuf1 = mbufs[1];
+ struct rte_mbuf *mbuf2 = mbufs[2];
+ struct rte_mbuf *mbuf3 = mbufs[3];
+
+ struct rte_pipeline_table_entry *table_entry0 = table_entries[0];
+ struct rte_pipeline_table_entry *table_entry1 = table_entries[1];
+ struct rte_pipeline_table_entry *table_entry2 = table_entries[2];
+ struct rte_pipeline_table_entry *table_entry3 = table_entries[3];
+
+ uint32_t ip_offset = action->cfg.common.ip_offset;
+ void *ip0 = RTE_MBUF_METADATA_UINT32_PTR(mbuf0, ip_offset);
+ void *ip1 = RTE_MBUF_METADATA_UINT32_PTR(mbuf1, ip_offset);
+ void *ip2 = RTE_MBUF_METADATA_UINT32_PTR(mbuf2, ip_offset);
+ void *ip3 = RTE_MBUF_METADATA_UINT32_PTR(mbuf3, ip_offset);
+
+ uint32_t dscp0, dscp1, dscp2, dscp3;
+ uint16_t total_length0, total_length1, total_length2, total_length3;
+
+ if (cfg->common.ip_version) {
+ struct ipv4_hdr *hdr0 = ip0;
+ struct ipv4_hdr *hdr1 = ip1;
+ struct ipv4_hdr *hdr2 = ip2;
+ struct ipv4_hdr *hdr3 = ip3;
+
+ dscp0 = hdr0->type_of_service >> 2;
+ dscp1 = hdr1->type_of_service >> 2;
+ dscp2 = hdr2->type_of_service >> 2;
+ dscp3 = hdr3->type_of_service >> 2;
+
+ total_length0 = rte_ntohs(hdr0->total_length);
+ total_length1 = rte_ntohs(hdr1->total_length);
+ total_length2 = rte_ntohs(hdr2->total_length);
+ total_length3 = rte_ntohs(hdr3->total_length);
+ } else {
+ struct ipv6_hdr *hdr0 = ip0;
+ struct ipv6_hdr *hdr1 = ip1;
+ struct ipv6_hdr *hdr2 = ip2;
+ struct ipv6_hdr *hdr3 = ip3;
+
+ dscp0 = (rte_ntohl(hdr0->vtc_flow) & 0x0F600000) >> 18;
+ dscp1 = (rte_ntohl(hdr1->vtc_flow) & 0x0F600000) >> 18;
+ dscp2 = (rte_ntohl(hdr2->vtc_flow) & 0x0F600000) >> 18;
+ dscp3 = (rte_ntohl(hdr3->vtc_flow) & 0x0F600000) >> 18;
+
+ total_length0 =
+ rte_ntohs(hdr0->payload_len) + sizeof(struct ipv6_hdr);
+ total_length1 =
+ rte_ntohs(hdr1->payload_len) + sizeof(struct ipv6_hdr);
+ total_length2 =
+ rte_ntohs(hdr2->payload_len) + sizeof(struct ipv6_hdr);
+ total_length3 =
+ rte_ntohs(hdr3->payload_len) + sizeof(struct ipv6_hdr);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_LB);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_LB);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_LB);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_LB);
+
+ pkt_work_lb(mbuf0,
+ data0,
+ &cfg->lb);
+
+ pkt_work_lb(mbuf1,
+ data1,
+ &cfg->lb);
+
+ pkt_work_lb(mbuf2,
+ data2,
+ &cfg->lb);
+
+ pkt_work_lb(mbuf3,
+ data3,
+ &cfg->lb);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_MTR);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_MTR);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_MTR);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_MTR);
+
+ drop_mask0 |= pkt_work_mtr(mbuf0,
+ data0,
+ &action->dscp_table,
+ action->mp,
+ time,
+ dscp0,
+ total_length0);
+
+ drop_mask1 |= pkt_work_mtr(mbuf1,
+ data1,
+ &action->dscp_table,
+ action->mp,
+ time,
+ dscp1,
+ total_length1);
+
+ drop_mask2 |= pkt_work_mtr(mbuf2,
+ data2,
+ &action->dscp_table,
+ action->mp,
+ time,
+ dscp2,
+ total_length2);
+
+ drop_mask3 |= pkt_work_mtr(mbuf3,
+ data3,
+ &action->dscp_table,
+ action->mp,
+ time,
+ dscp3,
+ total_length3);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_TM);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_TM);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_TM);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_TM);
+
+ pkt_work_tm(mbuf0,
+ data0,
+ &action->dscp_table,
+ dscp0);
+
+ pkt_work_tm(mbuf1,
+ data1,
+ &action->dscp_table,
+ dscp1);
+
+ pkt_work_tm(mbuf2,
+ data2,
+ &action->dscp_table,
+ dscp2);
+
+ pkt_work_tm(mbuf3,
+ data3,
+ &action->dscp_table,
+ dscp3);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
+ void *data0 = action_data_get(table_entry0,
+ action,
+ RTE_TABLE_ACTION_DECAP);
+ void *data1 = action_data_get(table_entry1,
+ action,
+ RTE_TABLE_ACTION_DECAP);
+ void *data2 = action_data_get(table_entry2,
+ action,
+ RTE_TABLE_ACTION_DECAP);
+ void *data3 = action_data_get(table_entry3,
+ action,
+ RTE_TABLE_ACTION_DECAP);
+
+ pkt4_work_decap(mbuf0, mbuf1, mbuf2, mbuf3,
+ data0, data1, data2, data3);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_ENCAP);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_ENCAP);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_ENCAP);
+
+ pkt_work_encap(mbuf0,
+ data0,
+ &cfg->encap,
+ ip0,
+ total_length0,
+ ip_offset);
+
+ pkt_work_encap(mbuf1,
+ data1,
+ &cfg->encap,
+ ip1,
+ total_length1,
+ ip_offset);
+
+ pkt_work_encap(mbuf2,
+ data2,
+ &cfg->encap,
+ ip2,
+ total_length2,
+ ip_offset);
+
+ pkt_work_encap(mbuf3,
+ data3,
+ &cfg->encap,
+ ip3,
+ total_length3,
+ ip_offset);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_NAT);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_NAT);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_NAT);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_NAT);
+
+ if (cfg->common.ip_version) {
+ pkt_ipv4_work_nat(ip0, data0, &cfg->nat);
+ pkt_ipv4_work_nat(ip1, data1, &cfg->nat);
+ pkt_ipv4_work_nat(ip2, data2, &cfg->nat);
+ pkt_ipv4_work_nat(ip3, data3, &cfg->nat);
+ } else {
+ pkt_ipv6_work_nat(ip0, data0, &cfg->nat);
+ pkt_ipv6_work_nat(ip1, data1, &cfg->nat);
+ pkt_ipv6_work_nat(ip2, data2, &cfg->nat);
+ pkt_ipv6_work_nat(ip3, data3, &cfg->nat);
+ }
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_TTL);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_TTL);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_TTL);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_TTL);
+
+ if (cfg->common.ip_version) {
+ drop_mask0 |= pkt_ipv4_work_ttl(ip0, data0);
+ drop_mask1 |= pkt_ipv4_work_ttl(ip1, data1);
+ drop_mask2 |= pkt_ipv4_work_ttl(ip2, data2);
+ drop_mask3 |= pkt_ipv4_work_ttl(ip3, data3);
+ } else {
+ drop_mask0 |= pkt_ipv6_work_ttl(ip0, data0);
+ drop_mask1 |= pkt_ipv6_work_ttl(ip1, data1);
+ drop_mask2 |= pkt_ipv6_work_ttl(ip2, data2);
+ drop_mask3 |= pkt_ipv6_work_ttl(ip3, data3);
+ }
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_STATS);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_STATS);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_STATS);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_STATS);
+
+ pkt_work_stats(data0, total_length0);
+ pkt_work_stats(data1, total_length1);
+ pkt_work_stats(data2, total_length2);
+ pkt_work_stats(data3, total_length3);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) {
+ void *data0 =
+ action_data_get(table_entry0, action, RTE_TABLE_ACTION_TIME);
+ void *data1 =
+ action_data_get(table_entry1, action, RTE_TABLE_ACTION_TIME);
+ void *data2 =
+ action_data_get(table_entry2, action, RTE_TABLE_ACTION_TIME);
+ void *data3 =
+ action_data_get(table_entry3, action, RTE_TABLE_ACTION_TIME);
+
+ pkt_work_time(data0, time);
+ pkt_work_time(data1, time);
+ pkt_work_time(data2, time);
+ pkt_work_time(data3, time);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+ void *data0 = action_data_get(table_entry0, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+ void *data1 = action_data_get(table_entry1, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+ void *data2 = action_data_get(table_entry2, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+ void *data3 = action_data_get(table_entry3, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+
+ drop_mask0 |= pkt_work_sym_crypto(mbuf0, data0, &cfg->sym_crypto,
+ ip_offset);
+ drop_mask1 |= pkt_work_sym_crypto(mbuf1, data1, &cfg->sym_crypto,
+ ip_offset);
+ drop_mask2 |= pkt_work_sym_crypto(mbuf2, data2, &cfg->sym_crypto,
+ ip_offset);
+ drop_mask3 |= pkt_work_sym_crypto(mbuf3, data3, &cfg->sym_crypto,
+ ip_offset);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
+ void *data0 = action_data_get(table_entry0,
+ action,
+ RTE_TABLE_ACTION_TAG);
+ void *data1 = action_data_get(table_entry1,
+ action,
+ RTE_TABLE_ACTION_TAG);
+ void *data2 = action_data_get(table_entry2,
+ action,
+ RTE_TABLE_ACTION_TAG);
+ void *data3 = action_data_get(table_entry3,
+ action,
+ RTE_TABLE_ACTION_TAG);
+
+ pkt4_work_tag(mbuf0, mbuf1, mbuf2, mbuf3,
+ data0, data1, data2, data3);
+ }
+
+ return drop_mask0 |
+ (drop_mask1 << 1) |
+ (drop_mask2 << 2) |
+ (drop_mask3 << 3);
+}
+
+static __rte_always_inline int
+ah(struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_pipeline_table_entry **entries,
+ struct rte_table_action *action,
+ struct ap_config *cfg)
+{
+ uint64_t pkts_drop_mask = 0;
+ uint64_t time = 0;
+
+ if (cfg->action_mask & ((1LLU << RTE_TABLE_ACTION_MTR) |
+ (1LLU << RTE_TABLE_ACTION_TIME)))
+ time = rte_rdtsc();
+
+ if ((pkts_mask & (pkts_mask + 1)) == 0) {
+ uint64_t n_pkts = __builtin_popcountll(pkts_mask);
+ uint32_t i;
+
+ for (i = 0; i < (n_pkts & (~0x3LLU)); i += 4) {
+ uint64_t drop_mask;
+
+ drop_mask = pkt4_work(&pkts[i],
+ &entries[i],
+ time,
+ action,
+ cfg);
+
+ pkts_drop_mask |= drop_mask << i;
+ }
+
+ for ( ; i < n_pkts; i++) {
+ uint64_t drop_mask;
+
+ drop_mask = pkt_work(pkts[i],
+ entries[i],
+ time,
+ action,
+ cfg);
+
+ pkts_drop_mask |= drop_mask << i;
+ }
+ } else
+ for ( ; pkts_mask; ) {
+ uint32_t pos = __builtin_ctzll(pkts_mask);
+ uint64_t pkt_mask = 1LLU << pos;
+ uint64_t drop_mask;
+
+ drop_mask = pkt_work(pkts[pos],
+ entries[pos],
+ time,
+ action,
+ cfg);
+
+ pkts_mask &= ~pkt_mask;
+ pkts_drop_mask |= drop_mask << pos;
+ }
+
+ rte_pipeline_ah_packet_drop(p, pkts_drop_mask);
+
+ return 0;
+}
+
+static int
+ah_default(struct rte_pipeline *p,
+ struct rte_mbuf **pkts,
+ uint64_t pkts_mask,
+ struct rte_pipeline_table_entry **entries,
+ void *arg)
+{
+ struct rte_table_action *action = arg;
+
+ return ah(p,
+ pkts,
+ pkts_mask,
+ entries,
+ action,
+ &action->cfg);
+}
+
+static rte_pipeline_table_action_handler_hit
+ah_selector(struct rte_table_action *action)
+{
+ if (action->cfg.action_mask == (1LLU << RTE_TABLE_ACTION_FWD))
+ return NULL;
+
+ return ah_default;
+}
+
+int
+rte_table_action_table_params_get(struct rte_table_action *action,
+ struct rte_pipeline_table_params *params)
+{
+ rte_pipeline_table_action_handler_hit f_action_hit;
+ uint32_t total_size;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ (params == NULL))
+ return -EINVAL;
+
+ f_action_hit = ah_selector(action);
+ total_size = rte_align32pow2(action->data.total_size);
+
+ /* Fill in params */
+ params->f_action_hit = f_action_hit;
+ params->f_action_miss = NULL;
+ params->arg_ah = (f_action_hit) ? action : NULL;
+ params->action_data_size = total_size -
+ sizeof(struct rte_pipeline_table_entry);
+
+ return 0;
+}
+
+int
+rte_table_action_free(struct rte_table_action *action)
+{
+ if (action == NULL)
+ return 0;
+
+ rte_free(action);
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/lib/librte_pipeline/rte_table_action.h b/src/seastar/dpdk/lib/librte_pipeline/rte_table_action.h
new file mode 100644
index 000000000..cf6eeaa30
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_pipeline/rte_table_action.h
@@ -0,0 +1,1124 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef __INCLUDE_RTE_TABLE_ACTION_H__
+#define __INCLUDE_RTE_TABLE_ACTION_H__
+
+/**
+ * @file
+ * RTE Pipeline Table Actions
+ *
+ * This API provides a common set of actions for pipeline tables to speed up
+ * application development.
+ *
+ * Each match-action rule added to a pipeline table has associated data that
+ * stores the action context. This data is input to the table action handler
+ * called for every input packet that hits the rule as part of the table lookup
+ * during the pipeline execution. The pipeline library allows the user to define
+ * his own table actions by providing customized table action handlers (table
+ * lookup) and complete freedom of setting the rules and their data (table rule
+ * add/delete). While the user can still follow this process, this API is
+ * intended to provide a quicker development alternative for a set of predefined
+ * actions.
+ *
+ * The typical steps to use this API are:
+ * - Define a table action profile. This is a configuration template that can
+ * potentially be shared by multiple tables from the same or different
+ * pipelines, with different tables from the same pipeline likely to use
+ * different action profiles. For every table using a given action profile,
+ * the profile defines the set of actions and the action configuration to be
+ * implemented for all the table rules. API functions:
+ * rte_table_action_profile_create(),
+ * rte_table_action_profile_action_register(),
+ * rte_table_action_profile_freeze().
+ *
+ * - Instantiate the table action profile to create table action objects. Each
+ * pipeline table has its own table action object. API functions:
+ * rte_table_action_create().
+ *
+ * - Use the table action object to generate the pipeline table action handlers
+ * (invoked by the pipeline table lookup operation). API functions:
+ * rte_table_action_table_params_get().
+ *
+ * - Use the table action object to generate the rule data (for the pipeline
+ * table rule add operation) based on given action parameters. API functions:
+ * rte_table_action_apply().
+ *
+ * - Use the table action object to read action data (e.g. stats counters) for
+ * any given rule. API functions: rte_table_action_XYZ_read().
+ *
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <rte_compat.h>
+#include <rte_ether.h>
+#include <rte_meter.h>
+#include <rte_table_hash.h>
+
+#include "rte_pipeline.h"
+
+/** Table actions. */
+enum rte_table_action_type {
+ /** Forward to next pipeline table, output port or drop. */
+ RTE_TABLE_ACTION_FWD = 0,
+
+ /** Load balance. */
+ RTE_TABLE_ACTION_LB,
+
+ /** Traffic Metering and Policing. */
+ RTE_TABLE_ACTION_MTR,
+
+ /** Traffic Management. */
+ RTE_TABLE_ACTION_TM,
+
+ /** Packet encapsulations. */
+ RTE_TABLE_ACTION_ENCAP,
+
+ /** Network Address Translation (NAT). */
+ RTE_TABLE_ACTION_NAT,
+
+ /** Time to Live (TTL) update. */
+ RTE_TABLE_ACTION_TTL,
+
+ /** Statistics. */
+ RTE_TABLE_ACTION_STATS,
+
+ /** Timestamp. */
+ RTE_TABLE_ACTION_TIME,
+
+ /** Crypto. */
+ RTE_TABLE_ACTION_SYM_CRYPTO,
+
+ /** Tag. */
+ RTE_TABLE_ACTION_TAG,
+
+ /** Packet decapsulations. */
+ RTE_TABLE_ACTION_DECAP,
+};
+
+/** Common action configuration (per table action profile). */
+struct rte_table_action_common_config {
+ /** Input packet Internet Protocol (IP) version. Non-zero for IPv4, zero
+ * for IPv6.
+ */
+ int ip_version;
+
+ /** IP header offset within the input packet buffer. Offset 0 points to
+ * the first byte of the MBUF structure.
+ */
+ uint32_t ip_offset;
+};
+
+/**
+ * RTE_TABLE_ACTION_FWD
+ */
+/** Forward action parameters (per table rule). */
+struct rte_table_action_fwd_params {
+ /** Forward action. */
+ enum rte_pipeline_action action;
+
+ /** Pipeline table ID or output port ID. */
+ uint32_t id;
+};
+
+/**
+ * RTE_TABLE_ACTION_LB
+ */
+/** Load balance key size min (number of bytes). */
+#define RTE_TABLE_ACTION_LB_KEY_SIZE_MIN 8
+
+/** Load balance key size max (number of bytes). */
+#define RTE_TABLE_ACTION_LB_KEY_SIZE_MAX 64
+
+/** Load balance table size. */
+#define RTE_TABLE_ACTION_LB_TABLE_SIZE 8
+
+/** Load balance action configuration (per table action profile). */
+struct rte_table_action_lb_config {
+ /** Key size (number of bytes). */
+ uint32_t key_size;
+
+ /** Key offset within the input packet buffer. Offset 0 points to the
+ * first byte of the MBUF structure.
+ */
+ uint32_t key_offset;
+
+ /** Key mask (*key_size* bytes are valid). */
+ uint8_t key_mask[RTE_TABLE_ACTION_LB_KEY_SIZE_MAX];
+
+ /** Hash function. */
+ rte_table_hash_op_hash f_hash;
+
+ /** Seed value for *f_hash*. */
+ uint64_t seed;
+
+ /** Output value offset within the input packet buffer. Offset 0 points
+ * to the first byte of the MBUF structure.
+ */
+ uint32_t out_offset;
+};
+
+/** Load balance action parameters (per table rule). */
+struct rte_table_action_lb_params {
+ /** Table defining the output values and their weights. The weights are
+ * set in 1/RTE_TABLE_ACTION_LB_TABLE_SIZE increments. To assign a
+ * weight of N/RTE_TABLE_ACTION_LB_TABLE_SIZE to a given output value
+ * (0 <= N <= RTE_TABLE_ACTION_LB_TABLE_SIZE), the same output value
+ * needs to show up exactly N times in this table.
+ */
+ uint32_t out[RTE_TABLE_ACTION_LB_TABLE_SIZE];
+};
+
+/**
+ * RTE_TABLE_ACTION_MTR
+ */
+/** Max number of traffic classes (TCs). */
+#define RTE_TABLE_ACTION_TC_MAX 4
+
+/** Max number of queues per traffic class. */
+#define RTE_TABLE_ACTION_TC_QUEUE_MAX 4
+
+/** Differentiated Services Code Point (DSCP) translation table entry. */
+struct rte_table_action_dscp_table_entry {
+ /** Traffic class. Used by the meter or the traffic management actions.
+ * Has to be strictly smaller than *RTE_TABLE_ACTION_TC_MAX*. Traffic
+ * class 0 is the highest priority.
+ */
+ uint32_t tc_id;
+
+ /** Traffic class queue. Used by the traffic management action. Has to
+ * be strictly smaller than *RTE_TABLE_ACTION_TC_QUEUE_MAX*.
+ */
+ uint32_t tc_queue_id;
+
+ /** Packet color. Used by the meter action as the packet input color
+ * for the color aware mode of the traffic metering algorithm.
+ */
+ enum rte_color color;
+};
+
+/** DSCP translation table. */
+struct rte_table_action_dscp_table {
+ /** Array of DSCP table entries */
+ struct rte_table_action_dscp_table_entry entry[64];
+};
+
+/** Supported traffic metering algorithms. */
+enum rte_table_action_meter_algorithm {
+ /** Single Rate Three Color Marker (srTCM) - IETF RFC 2697. */
+ RTE_TABLE_ACTION_METER_SRTCM,
+
+ /** Two Rate Three Color Marker (trTCM) - IETF RFC 2698. */
+ RTE_TABLE_ACTION_METER_TRTCM,
+};
+
+/** Traffic metering profile (configuration template). */
+struct rte_table_action_meter_profile {
+ /** Traffic metering algorithm. */
+ enum rte_table_action_meter_algorithm alg;
+
+ RTE_STD_C11
+ union {
+ /** Only valid when *alg* is set to srTCM - IETF RFC 2697. */
+ struct rte_meter_srtcm_params srtcm;
+
+ /** Only valid when *alg* is set to trTCM - IETF RFC 2698. */
+ struct rte_meter_trtcm_params trtcm;
+ };
+};
+
+/** Policer actions. */
+enum rte_table_action_policer {
+ /** Recolor the packet as green. */
+ RTE_TABLE_ACTION_POLICER_COLOR_GREEN = 0,
+
+ /** Recolor the packet as yellow. */
+ RTE_TABLE_ACTION_POLICER_COLOR_YELLOW,
+
+ /** Recolor the packet as red. */
+ RTE_TABLE_ACTION_POLICER_COLOR_RED,
+
+ /** Drop the packet. */
+ RTE_TABLE_ACTION_POLICER_DROP,
+
+ /** Number of policer actions. */
+ RTE_TABLE_ACTION_POLICER_MAX
+};
+
+/** Meter action configuration per traffic class. */
+struct rte_table_action_mtr_tc_params {
+ /** Meter profile ID. */
+ uint32_t meter_profile_id;
+
+ /** Policer actions. */
+ enum rte_table_action_policer policer[RTE_COLORS];
+};
+
+/** Meter action statistics counters per traffic class. */
+struct rte_table_action_mtr_counters_tc {
+ /** Number of packets per color at the output of the traffic metering
+ * and before the policer actions are executed. Only valid when
+ * *n_packets_valid* is non-zero.
+ */
+ uint64_t n_packets[RTE_COLORS];
+
+ /** Number of packet bytes per color at the output of the traffic
+ * metering and before the policer actions are executed. Only valid when
+ * *n_bytes_valid* is non-zero.
+ */
+ uint64_t n_bytes[RTE_COLORS];
+
+ /** When non-zero, the *n_packets* field is valid. */
+ int n_packets_valid;
+
+ /** When non-zero, the *n_bytes* field is valid. */
+ int n_bytes_valid;
+};
+
+/** Meter action configuration (per table action profile). */
+struct rte_table_action_mtr_config {
+ /** Meter algorithm. */
+ enum rte_table_action_meter_algorithm alg;
+
+ /** Number of traffic classes. Each traffic class has its own traffic
+ * meter and policer instances. Needs to be equal to either 1 or to
+ * *RTE_TABLE_ACTION_TC_MAX*.
+ */
+ uint32_t n_tc;
+
+ /** When non-zero, the *n_packets* meter stats counter is enabled,
+ * otherwise it is disabled.
+ *
+ * @see struct rte_table_action_mtr_counters_tc
+ */
+ int n_packets_enabled;
+
+ /** When non-zero, the *n_bytes* meter stats counter is enabled,
+ * otherwise it is disabled.
+ *
+ * @see struct rte_table_action_mtr_counters_tc
+ */
+ int n_bytes_enabled;
+};
+
+/** Meter action parameters (per table rule). */
+struct rte_table_action_mtr_params {
+ /** Traffic meter and policer parameters for each of the *tc_mask*
+ * traffic classes.
+ */
+ struct rte_table_action_mtr_tc_params mtr[RTE_TABLE_ACTION_TC_MAX];
+
+ /** Bit mask defining which traffic class parameters are valid in *mtr*.
+ * If bit N is set in *tc_mask*, then parameters for traffic class N are
+ * valid in *mtr*.
+ */
+ uint32_t tc_mask;
+};
+
+/** Meter action statistics counters (per table rule). */
+struct rte_table_action_mtr_counters {
+ /** Stats counters for each of the *tc_mask* traffic classes. */
+ struct rte_table_action_mtr_counters_tc stats[RTE_TABLE_ACTION_TC_MAX];
+
+ /** Bit mask defining which traffic class parameters are valid in *mtr*.
+ * If bit N is set in *tc_mask*, then parameters for traffic class N are
+ * valid in *mtr*.
+ */
+ uint32_t tc_mask;
+};
+
+/**
+ * RTE_TABLE_ACTION_TM
+ */
+/** Traffic management action configuration (per table action profile). */
+struct rte_table_action_tm_config {
+ /** Number of subports per port. */
+ uint32_t n_subports_per_port;
+
+ /** Number of pipes per subport. */
+ uint32_t n_pipes_per_subport;
+};
+
+/** Traffic management action parameters (per table rule). */
+struct rte_table_action_tm_params {
+ /** Subport ID. */
+ uint32_t subport_id;
+
+ /** Pipe ID. */
+ uint32_t pipe_id;
+};
+
+/**
+ * RTE_TABLE_ACTION_ENCAP
+ */
+/** Supported packet encapsulation types. */
+enum rte_table_action_encap_type {
+ /** IP -> { Ether | IP } */
+ RTE_TABLE_ACTION_ENCAP_ETHER = 0,
+
+ /** IP -> { Ether | VLAN | IP } */
+ RTE_TABLE_ACTION_ENCAP_VLAN,
+
+ /** IP -> { Ether | S-VLAN | C-VLAN | IP } */
+ RTE_TABLE_ACTION_ENCAP_QINQ,
+
+ /** IP -> { Ether | MPLS | IP } */
+ RTE_TABLE_ACTION_ENCAP_MPLS,
+
+ /** IP -> { Ether | PPPoE | PPP | IP } */
+ RTE_TABLE_ACTION_ENCAP_PPPOE,
+
+ /** Ether -> { Ether | IP | UDP | VXLAN | Ether }
+ * Ether -> { Ether | VLAN | IP | UDP | VXLAN | Ether }
+ */
+ RTE_TABLE_ACTION_ENCAP_VXLAN,
+
+ /** IP -> { Ether | S-VLAN | C-VLAN | PPPoE | PPP | IP } */
+ RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE,
+};
+
+/** Pre-computed Ethernet header fields for encapsulation action. */
+struct rte_table_action_ether_hdr {
+ struct ether_addr da; /**< Destination address. */
+ struct ether_addr sa; /**< Source address. */
+};
+
+/** Pre-computed VLAN header fields for encapsulation action. */
+struct rte_table_action_vlan_hdr {
+ uint8_t pcp; /**< Priority Code Point (PCP). */
+ uint8_t dei; /**< Drop Eligibility Indicator (DEI). */
+ uint16_t vid; /**< VLAN Identifier (VID). */
+};
+
+/** Pre-computed MPLS header fields for encapsulation action. */
+struct rte_table_action_mpls_hdr {
+ uint32_t label; /**< Label. */
+ uint8_t tc; /**< Traffic Class (TC). */
+ uint8_t ttl; /**< Time to Live (TTL). */
+};
+
+/** Pre-computed PPPoE header fields for encapsulation action. */
+struct rte_table_action_pppoe_hdr {
+ uint16_t session_id; /**< Session ID. */
+};
+
+/** Pre-computed IPv4 header fields for encapsulation action. */
+struct rte_table_action_ipv4_header {
+ uint32_t sa; /**< Source address. */
+ uint32_t da; /**< Destination address. */
+ uint8_t dscp; /**< DiffServ Code Point (DSCP). */
+ uint8_t ttl; /**< Time To Live (TTL). */
+};
+
+/** Pre-computed IPv6 header fields for encapsulation action. */
+struct rte_table_action_ipv6_header {
+ uint8_t sa[16]; /**< Source address. */
+ uint8_t da[16]; /**< Destination address. */
+ uint32_t flow_label; /**< Flow label. */
+ uint8_t dscp; /**< DiffServ Code Point (DSCP). */
+ uint8_t hop_limit; /**< Hop Limit (HL). */
+};
+
+/** Pre-computed UDP header fields for encapsulation action. */
+struct rte_table_action_udp_header {
+ uint16_t sp; /**< Source port. */
+ uint16_t dp; /**< Destination port. */
+};
+
+/** Pre-computed VXLAN header fields for encapsulation action. */
+struct rte_table_action_vxlan_hdr {
+ uint32_t vni; /**< VXLAN Network Identifier (VNI). */
+};
+
+/** Ether encap parameters. */
+struct rte_table_action_encap_ether_params {
+ struct rte_table_action_ether_hdr ether; /**< Ethernet header. */
+};
+
+/** VLAN encap parameters. */
+struct rte_table_action_encap_vlan_params {
+ struct rte_table_action_ether_hdr ether; /**< Ethernet header. */
+ struct rte_table_action_vlan_hdr vlan; /**< VLAN header. */
+};
+
+/** QinQ encap parameters. */
+struct rte_table_action_encap_qinq_params {
+ struct rte_table_action_ether_hdr ether; /**< Ethernet header. */
+ struct rte_table_action_vlan_hdr svlan; /**< Service VLAN header. */
+ struct rte_table_action_vlan_hdr cvlan; /**< Customer VLAN header. */
+};
+
+/** Max number of MPLS labels per output packet for MPLS encapsulation. */
+#ifndef RTE_TABLE_ACTION_MPLS_LABELS_MAX
+#define RTE_TABLE_ACTION_MPLS_LABELS_MAX 4
+#endif
+
+/** MPLS encap parameters. */
+struct rte_table_action_encap_mpls_params {
+ /** Ethernet header. */
+ struct rte_table_action_ether_hdr ether;
+
+ /** MPLS header. */
+ struct rte_table_action_mpls_hdr mpls[RTE_TABLE_ACTION_MPLS_LABELS_MAX];
+
+ /** Number of MPLS labels in MPLS header. */
+ uint32_t mpls_count;
+
+ /** Non-zero for MPLS unicast, zero for MPLS multicast. */
+ int unicast;
+};
+
+/** PPPoE encap parameters. */
+struct rte_table_action_encap_pppoe_params {
+ struct rte_table_action_ether_hdr ether; /**< Ethernet header. */
+ struct rte_table_action_pppoe_hdr pppoe; /**< PPPoE/PPP headers. */
+};
+
+/** VXLAN encap parameters. */
+struct rte_table_action_encap_vxlan_params {
+ struct rte_table_action_ether_hdr ether; /**< Ethernet header. */
+ struct rte_table_action_vlan_hdr vlan; /**< VLAN header. */
+
+ RTE_STD_C11
+ union {
+ struct rte_table_action_ipv4_header ipv4; /**< IPv4 header. */
+ struct rte_table_action_ipv6_header ipv6; /**< IPv6 header. */
+ };
+
+ struct rte_table_action_udp_header udp; /**< UDP header. */
+ struct rte_table_action_vxlan_hdr vxlan; /**< VXLAN header. */
+};
+
+/** Encap action configuration (per table action profile). */
+struct rte_table_action_encap_config {
+ /** Bit mask defining the set of packet encapsulations enabled for the
+ * current table action profile. If bit (1 << N) is set in *encap_mask*,
+ * then packet encapsulation N is enabled, otherwise it is disabled.
+ *
+ * @see enum rte_table_action_encap_type
+ */
+ uint64_t encap_mask;
+
+ /** Encapsulation type specific configuration. */
+ RTE_STD_C11
+ union {
+ struct {
+ /** Input packet to be encapsulated: offset within the
+ * input packet buffer to the start of the Ethernet
+ * frame to be encapsulated. Offset 0 points to the
+ * first byte of the MBUF structure.
+ */
+ uint32_t data_offset;
+
+ /** Encapsulation header: non-zero when encapsulation
+ * header includes a VLAN tag, zero otherwise.
+ */
+ int vlan;
+
+ /** Encapsulation header: IP version of the IP header
+ * within the encapsulation header. Non-zero for IPv4,
+ * zero for IPv6.
+ */
+ int ip_version;
+ } vxlan; /**< VXLAN specific configuration. */
+ };
+};
+
+/** QinQ_PPPoE encap parameters. */
+struct rte_table_encap_ether_qinq_pppoe {
+
+ /** Only valid when *type* is set to QinQ. */
+ struct rte_table_action_ether_hdr ether;
+ struct rte_table_action_vlan_hdr svlan; /**< Service VLAN header. */
+ struct rte_table_action_vlan_hdr cvlan; /**< Customer VLAN header. */
+ struct rte_table_action_pppoe_hdr pppoe; /**< PPPoE/PPP headers. */
+};
+
+/** Encap action parameters (per table rule). */
+struct rte_table_action_encap_params {
+ /** Encapsulation type. */
+ enum rte_table_action_encap_type type;
+
+ RTE_STD_C11
+ union {
+ /** Only valid when *type* is set to Ether. */
+ struct rte_table_action_encap_ether_params ether;
+
+ /** Only valid when *type* is set to VLAN. */
+ struct rte_table_action_encap_vlan_params vlan;
+
+ /** Only valid when *type* is set to QinQ. */
+ struct rte_table_action_encap_qinq_params qinq;
+
+ /** Only valid when *type* is set to MPLS. */
+ struct rte_table_action_encap_mpls_params mpls;
+
+ /** Only valid when *type* is set to PPPoE. */
+ struct rte_table_action_encap_pppoe_params pppoe;
+
+ /** Only valid when *type* is set to VXLAN. */
+ struct rte_table_action_encap_vxlan_params vxlan;
+
+ /** Only valid when *type* is set to QinQ_PPPoE. */
+ struct rte_table_encap_ether_qinq_pppoe qinq_pppoe;
+ };
+};
+
+/**
+ * RTE_TABLE_ACTION_NAT
+ */
+/** NAT action configuration (per table action profile). */
+struct rte_table_action_nat_config {
+ /** When non-zero, the IP source address and L4 protocol source port are
+ * translated. When zero, the IP destination address and L4 protocol
+ * destination port are translated.
+ */
+ int source_nat;
+
+ /** Layer 4 protocol, for example TCP (0x06) or UDP (0x11). The checksum
+ * field is computed differently and placed at different header offset
+ * by each layer 4 protocol.
+ */
+ uint8_t proto;
+};
+
+/** NAT action parameters (per table rule). */
+struct rte_table_action_nat_params {
+ /** IP version for *addr*: non-zero for IPv4, zero for IPv6. */
+ int ip_version;
+
+ /** IP address. */
+ union {
+ /** IPv4 address; only valid when *ip_version* is non-zero. */
+ uint32_t ipv4;
+
+ /** IPv6 address; only valid when *ip_version* is set to 0. */
+ uint8_t ipv6[16];
+ } addr;
+
+ /** Port. */
+ uint16_t port;
+};
+
+/**
+ * RTE_TABLE_ACTION_TTL
+ */
+/** TTL action configuration (per table action profile). */
+struct rte_table_action_ttl_config {
+ /** When non-zero, the input packets whose updated IPv4 Time to Live
+ * (TTL) field or IPv6 Hop Limit (HL) field is zero are dropped.
+ * When zero, the input packets whose updated IPv4 TTL field or IPv6 HL
+ * field is zero are forwarded as usual (typically for debugging
+ * purpose).
+ */
+ int drop;
+
+ /** When non-zero, the *n_packets* stats counter for TTL action is
+ * enabled, otherwise disabled.
+ *
+ * @see struct rte_table_action_ttl_counters
+ */
+ int n_packets_enabled;
+};
+
+/** TTL action parameters (per table rule). */
+struct rte_table_action_ttl_params {
+ /** When non-zero, decrement the IPv4 TTL field and update the checksum
+ * field, or decrement the IPv6 HL field. When zero, the IPv4 TTL field
+ * or the IPv6 HL field is not changed.
+ */
+ int decrement;
+};
+
+/** TTL action statistics packets (per table rule). */
+struct rte_table_action_ttl_counters {
+ /** Number of IPv4 packets whose updated TTL field is zero or IPv6
+ * packets whose updated HL field is zero.
+ */
+ uint64_t n_packets;
+};
+
+/**
+ * RTE_TABLE_ACTION_STATS
+ */
+/** Stats action configuration (per table action profile). */
+struct rte_table_action_stats_config {
+ /** When non-zero, the *n_packets* stats counter is enabled, otherwise
+ * disabled.
+ *
+ * @see struct rte_table_action_stats_counters
+ */
+ int n_packets_enabled;
+
+ /** When non-zero, the *n_bytes* stats counter is enabled, otherwise
+ * disabled.
+ *
+ * @see struct rte_table_action_stats_counters
+ */
+ int n_bytes_enabled;
+};
+
+/** Stats action parameters (per table rule). */
+struct rte_table_action_stats_params {
+ /** Initial value for the *n_packets* stats counter. Typically set to 0.
+ *
+ * @see struct rte_table_action_stats_counters
+ */
+ uint64_t n_packets;
+
+ /** Initial value for the *n_bytes* stats counter. Typically set to 0.
+ *
+ * @see struct rte_table_action_stats_counters
+ */
+ uint64_t n_bytes;
+};
+
+/** Stats action counters (per table rule). */
+struct rte_table_action_stats_counters {
+ /** Number of packets. Valid only when *n_packets_valid* is non-zero. */
+ uint64_t n_packets;
+
+ /** Number of bytes. Valid only when *n_bytes_valid* is non-zero. */
+ uint64_t n_bytes;
+
+ /** When non-zero, the *n_packets* field is valid, otherwise invalid. */
+ int n_packets_valid;
+
+ /** When non-zero, the *n_bytes* field is valid, otherwise invalid. */
+ int n_bytes_valid;
+};
+
+/**
+ * RTE_TABLE_ACTION_TIME
+ */
+/** Timestamp action parameters (per table rule). */
+struct rte_table_action_time_params {
+ /** Initial timestamp value. Typically set to current time. */
+ uint64_t time;
+};
+
+/**
+ * RTE_TABLE_ACTION_CRYPTO
+ */
+#ifndef RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX
+#define RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX (16)
+#endif
+
+#ifndef RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX
+#define RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX (16)
+#endif
+
+#ifndef RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET
+#define RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET \
+ (sizeof(struct rte_crypto_op) + sizeof(struct rte_crypto_sym_op))
+#endif
+
+/** Common action structure to store the data's value, length, and offset */
+struct rte_table_action_vlo {
+ uint8_t *val;
+ uint32_t length;
+ uint32_t offset;
+};
+
+/** Symmetric crypto action configuration (per table action profile). */
+struct rte_table_action_sym_crypto_config {
+ /** Target Cryptodev ID. */
+ uint8_t cryptodev_id;
+
+ /**
+ * Offset to rte_crypto_op structure within the input packet buffer.
+ * Offset 0 points to the first byte of the MBUF structure.
+ */
+ uint32_t op_offset;
+
+ /** The mempool for creating cryptodev sessions. */
+ struct rte_mempool *mp_create;
+
+ /** The mempool for initializing cryptodev sessions. */
+ struct rte_mempool *mp_init;
+};
+
+/** Symmetric Crypto action parameters (per table rule). */
+struct rte_table_action_sym_crypto_params {
+
+ /** Xform pointer contains all relevant information */
+ struct rte_crypto_sym_xform *xform;
+
+ /**
+ * Offset within the input packet buffer to the first byte of data
+ * to be processed by the crypto unit. Offset 0 points to the first
+ * byte of the MBUF structure.
+ */
+ uint32_t data_offset;
+
+ union {
+ struct {
+ /** Cipher iv data. */
+ struct rte_table_action_vlo cipher_iv;
+
+ /** Cipher iv data. */
+ struct rte_table_action_vlo cipher_iv_update;
+
+ /** Auth iv data. */
+ struct rte_table_action_vlo auth_iv;
+
+ /** Auth iv data. */
+ struct rte_table_action_vlo auth_iv_update;
+
+ } cipher_auth;
+
+ struct {
+ /** AEAD AAD data. */
+ struct rte_table_action_vlo aad;
+
+ /** AEAD iv data. */
+ struct rte_table_action_vlo iv;
+
+ /** AEAD AAD data. */
+ struct rte_table_action_vlo aad_update;
+
+ /** AEAD iv data. */
+ struct rte_table_action_vlo iv_update;
+
+ } aead;
+ };
+};
+
+/**
+ * RTE_TABLE_ACTION_TAG
+ */
+/** Tag action parameters (per table rule). */
+struct rte_table_action_tag_params {
+ /** Tag to be attached to the input packet. */
+ uint32_t tag;
+};
+
+/**
+ * RTE_TABLE_ACTION_DECAP
+ */
+/** Decap action parameters (per table rule). */
+struct rte_table_action_decap_params {
+ /** Number of bytes to be removed from the start of the packet. */
+ uint16_t n;
+};
+
+/**
+ * Table action profile.
+ */
+struct rte_table_action_profile;
+
+/**
+ * Table action profile create.
+ *
+ * @param[in] common
+ * Common action configuration.
+ * @return
+ * Table action profile handle on success, NULL otherwise.
+ */
+struct rte_table_action_profile * __rte_experimental
+rte_table_action_profile_create(struct rte_table_action_common_config *common);
+
+/**
+ * Table action profile free.
+ *
+ * @param[in] profile
+ * Table profile action handle (needs to be valid).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_profile_free(struct rte_table_action_profile *profile);
+
+/**
+ * Table action profile action register.
+ *
+ * @param[in] profile
+ * Table profile action handle (needs to be valid and not in frozen state).
+ * @param[in] type
+ * Specific table action to be registered for *profile*.
+ * @param[in] action_config
+ * Configuration for the *type* action.
+ * If struct rte_table_action_*type*_config is defined by the Table Action
+ * API, it needs to point to a valid instance of this structure, otherwise it
+ * needs to be set to NULL.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_profile_action_register(struct rte_table_action_profile *profile,
+ enum rte_table_action_type type,
+ void *action_config);
+
+/**
+ * Table action profile freeze.
+ *
+ * Once this function is called successfully, the given profile enters the
+ * frozen state with the following immediate effects: no more actions can be
+ * registered for this profile, so the profile can be instantiated to create
+ * table action objects.
+ *
+ * @param[in] profile
+ * Table profile action handle (needs to be valid and not in frozen state).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ *
+ * @see rte_table_action_create()
+ */
+int __rte_experimental
+rte_table_action_profile_freeze(struct rte_table_action_profile *profile);
+
+/**
+ * Table action.
+ */
+struct rte_table_action;
+
+/**
+ * Table action create.
+ *
+ * Instantiates the given table action profile to create a table action object.
+ *
+ * @param[in] profile
+ * Table profile action handle (needs to be valid and in frozen state).
+ * @param[in] socket_id
+ * CPU socket ID where the internal data structures required by the new table
+ * action object should be allocated.
+ * @return
+ * Handle to table action object on success, NULL on error.
+ *
+ * @see rte_table_action_create()
+ */
+struct rte_table_action * __rte_experimental
+rte_table_action_create(struct rte_table_action_profile *profile,
+ uint32_t socket_id);
+
+/**
+ * Table action free.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_free(struct rte_table_action *action);
+
+/**
+ * Table action table params get.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[inout] params
+ * Pipeline table parameters (needs to be pre-allocated).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_table_params_get(struct rte_table_action *action,
+ struct rte_pipeline_table_params *params);
+
+/**
+ * Table action apply.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] data
+ * Data byte array (typically table rule data) to apply action *type* on.
+ * @param[in] type
+ * Specific table action previously registered for the table action profile of
+ * the *action* object.
+ * @param[in] action_params
+ * Parameters for the *type* action.
+ * If struct rte_table_action_*type*_params is defined by the Table Action
+ * API, it needs to point to a valid instance of this structure, otherwise it
+ * needs to be set to NULL.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_apply(struct rte_table_action *action,
+ void *data,
+ enum rte_table_action_type type,
+ void *action_params);
+
+/**
+ * Table action DSCP table update.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] dscp_mask
+ * 64-bit mask defining the DSCP table entries to be updated. If bit N is set
+ * in this bit mask, then DSCP table entry N is to be updated, otherwise not.
+ * @param[in] table
+ * DSCP table.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_dscp_table_update(struct rte_table_action *action,
+ uint64_t dscp_mask,
+ struct rte_table_action_dscp_table *table);
+
+/**
+ * Table action meter profile add.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] meter_profile_id
+ * Meter profile ID to be used for the *profile* once it is successfully added
+ * to the *action* object (needs to be unused by the set of meter profiles
+ * currently registered for the *action* object).
+ * @param[in] profile
+ * Meter profile to be added.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_meter_profile_add(struct rte_table_action *action,
+ uint32_t meter_profile_id,
+ struct rte_table_action_meter_profile *profile);
+
+/**
+ * Table action meter profile delete.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] meter_profile_id
+ * Meter profile ID of the meter profile to be deleted from the *action*
+ * object (needs to be valid for the *action* object).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_meter_profile_delete(struct rte_table_action *action,
+ uint32_t meter_profile_id);
+
+/**
+ * Table action meter read.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] data
+ * Data byte array (typically table rule data) with meter action previously
+ * applied on it.
+ * @param[in] tc_mask
+ * Bit mask defining which traffic classes should have the meter stats
+ * counters read from *data* and stored into *stats*. If bit N is set in this
+ * bit mask, then traffic class N is part of this operation, otherwise it is
+ * not. If bit N is set in this bit mask, then traffic class N must be one of
+ * the traffic classes that are enabled for the meter action in the table
+ * action profile used by the *action* object.
+ * @param[inout] stats
+ * When non-NULL, it points to the area where the meter stats counters read
+ * from *data* are saved. Only the meter stats counters for the *tc_mask*
+ * traffic classes are read and stored to *stats*.
+ * @param[in] clear
+ * When non-zero, the meter stats counters are cleared (i.e. set to zero),
+ * otherwise the counters are not modified. When the read operation is enabled
+ * (*stats* is non-NULL), the clear operation is performed after the read
+ * operation is completed.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_meter_read(struct rte_table_action *action,
+ void *data,
+ uint32_t tc_mask,
+ struct rte_table_action_mtr_counters *stats,
+ int clear);
+
+/**
+ * Table action TTL read.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] data
+ * Data byte array (typically table rule data) with TTL action previously
+ * applied on it.
+ * @param[inout] stats
+ * When non-NULL, it points to the area where the TTL stats counters read from
+ * *data* are saved.
+ * @param[in] clear
+ * When non-zero, the TTL stats counters are cleared (i.e. set to zero),
+ * otherwise the counters are not modified. When the read operation is enabled
+ * (*stats* is non-NULL), the clear operation is performed after the read
+ * operation is completed.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_ttl_read(struct rte_table_action *action,
+ void *data,
+ struct rte_table_action_ttl_counters *stats,
+ int clear);
+
+/**
+ * Table action stats read.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] data
+ * Data byte array (typically table rule data) with stats action previously
+ * applied on it.
+ * @param[inout] stats
+ * When non-NULL, it points to the area where the stats counters read from
+ * *data* are saved.
+ * @param[in] clear
+ * When non-zero, the stats counters are cleared (i.e. set to zero), otherwise
+ * the counters are not modified. When the read operation is enabled (*stats*
+ * is non-NULL), the clear operation is performed after the read operation is
+ * completed.
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_stats_read(struct rte_table_action *action,
+ void *data,
+ struct rte_table_action_stats_counters *stats,
+ int clear);
+
+/**
+ * Table action timestamp read.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] data
+ * Data byte array (typically table rule data) with timestamp action
+ * previously applied on it.
+ * @param[inout] timestamp
+ * Pre-allocated memory where the timestamp read from *data* is saved (has to
+ * be non-NULL).
+ * @return
+ * Zero on success, non-zero error code otherwise.
+ */
+int __rte_experimental
+rte_table_action_time_read(struct rte_table_action *action,
+ void *data,
+ uint64_t *timestamp);
+
+/**
+ * Table action cryptodev symmetric session get.
+ *
+ * @param[in] action
+ * Handle to table action object (needs to be valid).
+ * @param[in] data
+ * Data byte array (typically table rule data) with sym crypto action.
+ * @return
+ * The pointer to the session on success, NULL otherwise.
+ */
+struct rte_cryptodev_sym_session *__rte_experimental
+rte_table_action_crypto_sym_session_get(struct rte_table_action *action,
+ void *data);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __INCLUDE_RTE_TABLE_ACTION_H__ */