summaryrefslogtreecommitdiffstats
path: root/src/seastar/dpdk/lib/librte_flow_classify
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/seastar/dpdk/lib/librte_flow_classify
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/seastar/dpdk/lib/librte_flow_classify')
-rw-r--r--src/seastar/dpdk/lib/librte_flow_classify/Makefile26
-rw-r--r--src/seastar/dpdk/lib/librte_flow_classify/meson.build7
-rw-r--r--src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify.c681
-rw-r--r--src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify.h279
-rw-r--r--src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.c535
-rw-r--r--src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.h59
-rw-r--r--src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_version.map13
7 files changed, 1600 insertions, 0 deletions
diff --git a/src/seastar/dpdk/lib/librte_flow_classify/Makefile b/src/seastar/dpdk/lib/librte_flow_classify/Makefile
new file mode 100644
index 000000000..fe9fc47ab
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_flow_classify/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_flow_classify.a
+
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+EXPORT_MAP := rte_flow_classify_version.map
+
+LIBABIVER := 1
+
+LDLIBS += -lrte_eal -lrte_ethdev -lrte_net -lrte_table -lrte_acl
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += rte_flow_classify.c
+SRCS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += rte_flow_classify_parse.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY)-include := rte_flow_classify.h
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/seastar/dpdk/lib/librte_flow_classify/meson.build b/src/seastar/dpdk/lib/librte_flow_classify/meson.build
new file mode 100644
index 000000000..d7e487475
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_flow_classify/meson.build
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+allow_experimental_apis = true
+sources = files('rte_flow_classify.c', 'rte_flow_classify_parse.c')
+headers = files('rte_flow_classify.h')
+deps += ['net', 'table']
diff --git a/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify.c b/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify.c
new file mode 100644
index 000000000..24f7f7aa0
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify.c
@@ -0,0 +1,681 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <rte_string_fns.h>
+#include <rte_compat.h>
+#include <rte_flow_classify.h>
+#include "rte_flow_classify_parse.h"
+#include <rte_flow_driver.h>
+#include <rte_table_acl.h>
+#include <stdbool.h>
+
+int librte_flow_classify_logtype;
+
+static uint32_t unique_id = 1;
+
+enum rte_flow_classify_table_type table_type
+ = RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE;
+
+struct rte_flow_classify_table_entry {
+ /* meta-data for classify rule */
+ uint32_t rule_id;
+
+ /* Flow action */
+ struct classify_action action;
+};
+
+struct rte_cls_table {
+ /* Input parameters */
+ struct rte_table_ops ops;
+ uint32_t entry_size;
+ enum rte_flow_classify_table_type type;
+
+ /* Handle to the low-level table object */
+ void *h_table;
+};
+
+#define RTE_FLOW_CLASSIFIER_MAX_NAME_SZ 256
+
+struct rte_flow_classifier {
+ /* Input parameters */
+ char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ];
+ int socket_id;
+
+ /* Internal */
+ /* ntuple_filter */
+ struct rte_eth_ntuple_filter ntuple_filter;
+
+ /* classifier tables */
+ struct rte_cls_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX];
+ uint32_t table_mask;
+ uint32_t num_tables;
+
+ uint16_t nb_pkts;
+ struct rte_flow_classify_table_entry
+ *entries[RTE_PORT_IN_BURST_SIZE_MAX];
+} __rte_cache_aligned;
+
+enum {
+ PROTO_FIELD_IPV4,
+ SRC_FIELD_IPV4,
+ DST_FIELD_IPV4,
+ SRCP_FIELD_IPV4,
+ DSTP_FIELD_IPV4,
+ NUM_FIELDS_IPV4
+};
+
+struct acl_keys {
+ struct rte_table_acl_rule_add_params key_add; /* add key */
+ struct rte_table_acl_rule_delete_params key_del; /* delete key */
+};
+
+struct classify_rules {
+ enum rte_flow_classify_rule_type type;
+ union {
+ struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
+ } u;
+};
+
+struct rte_flow_classify_rule {
+ uint32_t id; /* unique ID of classify rule */
+ enum rte_flow_classify_table_type tbl_type; /* rule table */
+ struct classify_rules rules; /* union of rules */
+ union {
+ struct acl_keys key;
+ } u;
+ int key_found; /* rule key found in table */
+ struct rte_flow_classify_table_entry entry; /* rule meta data */
+ void *entry_ptr; /* handle to the table entry for rule meta data */
+};
+
+int __rte_experimental
+rte_flow_classify_validate(
+ struct rte_flow_classifier *cls,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items;
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0;
+ uint32_t i = 0;
+ int ret;
+
+ if (error == NULL)
+ return -EINVAL;
+
+ if (cls == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: rte_flow_classifier parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -EINVAL;
+ }
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -EINVAL;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -EINVAL;
+ }
+
+ memset(&cls->ntuple_filter, 0, sizeof(cls->ntuple_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = malloc(item_num * sizeof(struct rte_flow_item));
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for pattern items.");
+ return -ENOMEM;
+ }
+
+ memset(items, 0, item_num * sizeof(struct rte_flow_item));
+ classify_pattern_skip_void_item(items, pattern);
+
+ parse_filter = classify_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ free(items);
+ return -EINVAL;
+ }
+
+ ret = parse_filter(attr, items, actions, &cls->ntuple_filter, error);
+ free(items);
+ return ret;
+}
+
+
+#define uint32_t_to_char(ip, a, b, c, d) do {\
+ *a = (unsigned char)(ip >> 24 & 0xff);\
+ *b = (unsigned char)(ip >> 16 & 0xff);\
+ *c = (unsigned char)(ip >> 8 & 0xff);\
+ *d = (unsigned char)(ip & 0xff);\
+ } while (0)
+
+static inline void
+print_acl_ipv4_key_add(struct rte_table_acl_rule_add_params *key)
+{
+ unsigned char a, b, c, d;
+
+ printf("%s: 0x%02hhx/0x%hhx ", __func__,
+ key->field_value[PROTO_FIELD_IPV4].value.u8,
+ key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
+
+ uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
+ &a, &b, &c, &d);
+ printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+ key->field_value[SRC_FIELD_IPV4].mask_range.u32);
+
+ uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
+ &a, &b, &c, &d);
+ printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+ key->field_value[DST_FIELD_IPV4].mask_range.u32);
+
+ printf("%hu : 0x%x %hu : 0x%x",
+ key->field_value[SRCP_FIELD_IPV4].value.u16,
+ key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
+ key->field_value[DSTP_FIELD_IPV4].value.u16,
+ key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
+
+ printf(" priority: 0x%x\n", key->priority);
+}
+
+static inline void
+print_acl_ipv4_key_delete(struct rte_table_acl_rule_delete_params *key)
+{
+ unsigned char a, b, c, d;
+
+ printf("%s: 0x%02hhx/0x%hhx ", __func__,
+ key->field_value[PROTO_FIELD_IPV4].value.u8,
+ key->field_value[PROTO_FIELD_IPV4].mask_range.u8);
+
+ uint32_t_to_char(key->field_value[SRC_FIELD_IPV4].value.u32,
+ &a, &b, &c, &d);
+ printf(" %hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+ key->field_value[SRC_FIELD_IPV4].mask_range.u32);
+
+ uint32_t_to_char(key->field_value[DST_FIELD_IPV4].value.u32,
+ &a, &b, &c, &d);
+ printf("%hhu.%hhu.%hhu.%hhu/0x%x ", a, b, c, d,
+ key->field_value[DST_FIELD_IPV4].mask_range.u32);
+
+ printf("%hu : 0x%x %hu : 0x%x\n",
+ key->field_value[SRCP_FIELD_IPV4].value.u16,
+ key->field_value[SRCP_FIELD_IPV4].mask_range.u16,
+ key->field_value[DSTP_FIELD_IPV4].value.u16,
+ key->field_value[DSTP_FIELD_IPV4].mask_range.u16);
+}
+
+static int
+rte_flow_classifier_check_params(struct rte_flow_classifier_params *params)
+{
+ if (params == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: Incorrect value for parameter params\n", __func__);
+ return -EINVAL;
+ }
+
+ /* name */
+ if (params->name == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: Incorrect value for parameter name\n", __func__);
+ return -EINVAL;
+ }
+
+ /* socket */
+ if (params->socket_id < 0) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: Incorrect value for parameter socket_id\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct rte_flow_classifier * __rte_experimental
+rte_flow_classifier_create(struct rte_flow_classifier_params *params)
+{
+ struct rte_flow_classifier *cls;
+ int ret;
+
+ /* Check input parameters */
+ ret = rte_flow_classifier_check_params(params);
+ if (ret != 0) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: flow classifier params check failed (%d)\n",
+ __func__, ret);
+ return NULL;
+ }
+
+ /* Allocate memory for the flow classifier */
+ cls = rte_zmalloc_socket("FLOW_CLASSIFIER",
+ sizeof(struct rte_flow_classifier),
+ RTE_CACHE_LINE_SIZE, params->socket_id);
+
+ if (cls == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: flow classifier memory allocation failed\n",
+ __func__);
+ return NULL;
+ }
+
+ /* Save input parameters */
+ strlcpy(cls->name, params->name, RTE_FLOW_CLASSIFIER_MAX_NAME_SZ);
+
+ cls->socket_id = params->socket_id;
+
+ return cls;
+}
+
+static void
+rte_flow_classify_table_free(struct rte_cls_table *table)
+{
+ if (table->ops.f_free != NULL)
+ table->ops.f_free(table->h_table);
+}
+
+int __rte_experimental
+rte_flow_classifier_free(struct rte_flow_classifier *cls)
+{
+ uint32_t i;
+
+ /* Check input parameters */
+ if (cls == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: rte_flow_classifier parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Free tables */
+ for (i = 0; i < cls->num_tables; i++) {
+ struct rte_cls_table *table = &cls->tables[i];
+
+ rte_flow_classify_table_free(table);
+ }
+
+ /* Free flow classifier memory */
+ rte_free(cls);
+
+ return 0;
+}
+
+static int
+rte_table_check_params(struct rte_flow_classifier *cls,
+ struct rte_flow_classify_table_params *params)
+{
+ if (cls == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: flow classifier parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (params == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params parameter is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* ops */
+ if (params->ops == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR, "%s: params->ops is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_create == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: f_create function pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (params->ops->f_lookup == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: f_lookup function pointer is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ /* De we have room for one more table? */
+ if (cls->num_tables == RTE_FLOW_CLASSIFY_TABLE_MAX) {
+ RTE_FLOW_CLASSIFY_LOG(ERR,
+ "%s: Incorrect value for num_tables parameter\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int __rte_experimental
+rte_flow_classify_table_create(struct rte_flow_classifier *cls,
+ struct rte_flow_classify_table_params *params)
+{
+ struct rte_cls_table *table;
+ void *h_table;
+ uint32_t entry_size;
+ int ret;
+
+ /* Check input arguments */
+ ret = rte_table_check_params(cls, params);
+ if (ret != 0)
+ return ret;
+
+ /* calculate table entry size */
+ entry_size = sizeof(struct rte_flow_classify_table_entry);
+
+ /* Create the table */
+ h_table = params->ops->f_create(params->arg_create, cls->socket_id,
+ entry_size);
+ if (h_table == NULL) {
+ RTE_FLOW_CLASSIFY_LOG(ERR, "%s: Table creation failed\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* Commit current table to the classifier */
+ table = &cls->tables[cls->num_tables];
+ table->type = params->type;
+ cls->num_tables++;
+
+ /* Save input parameters */
+ memcpy(&table->ops, params->ops, sizeof(struct rte_table_ops));
+
+ /* Initialize table internal data structure */
+ table->entry_size = entry_size;
+ table->h_table = h_table;
+
+ return 0;
+}
+
+static struct rte_flow_classify_rule *
+allocate_acl_ipv4_5tuple_rule(struct rte_flow_classifier *cls)
+{
+ struct rte_flow_classify_rule *rule;
+ int log_level;
+
+ rule = malloc(sizeof(struct rte_flow_classify_rule));
+ if (!rule)
+ return rule;
+
+ memset(rule, 0, sizeof(struct rte_flow_classify_rule));
+ rule->id = unique_id++;
+ rule->rules.type = RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE;
+
+ /* key add values */
+ rule->u.key.key_add.priority = cls->ntuple_filter.priority;
+ rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].mask_range.u8 =
+ cls->ntuple_filter.proto_mask;
+ rule->u.key.key_add.field_value[PROTO_FIELD_IPV4].value.u8 =
+ cls->ntuple_filter.proto;
+ rule->rules.u.ipv4_5tuple.proto = cls->ntuple_filter.proto;
+ rule->rules.u.ipv4_5tuple.proto_mask = cls->ntuple_filter.proto_mask;
+
+ rule->u.key.key_add.field_value[SRC_FIELD_IPV4].mask_range.u32 =
+ cls->ntuple_filter.src_ip_mask;
+ rule->u.key.key_add.field_value[SRC_FIELD_IPV4].value.u32 =
+ cls->ntuple_filter.src_ip;
+ rule->rules.u.ipv4_5tuple.src_ip_mask = cls->ntuple_filter.src_ip_mask;
+ rule->rules.u.ipv4_5tuple.src_ip = cls->ntuple_filter.src_ip;
+
+ rule->u.key.key_add.field_value[DST_FIELD_IPV4].mask_range.u32 =
+ cls->ntuple_filter.dst_ip_mask;
+ rule->u.key.key_add.field_value[DST_FIELD_IPV4].value.u32 =
+ cls->ntuple_filter.dst_ip;
+ rule->rules.u.ipv4_5tuple.dst_ip_mask = cls->ntuple_filter.dst_ip_mask;
+ rule->rules.u.ipv4_5tuple.dst_ip = cls->ntuple_filter.dst_ip;
+
+ rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].mask_range.u16 =
+ cls->ntuple_filter.src_port_mask;
+ rule->u.key.key_add.field_value[SRCP_FIELD_IPV4].value.u16 =
+ cls->ntuple_filter.src_port;
+ rule->rules.u.ipv4_5tuple.src_port_mask =
+ cls->ntuple_filter.src_port_mask;
+ rule->rules.u.ipv4_5tuple.src_port = cls->ntuple_filter.src_port;
+
+ rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].mask_range.u16 =
+ cls->ntuple_filter.dst_port_mask;
+ rule->u.key.key_add.field_value[DSTP_FIELD_IPV4].value.u16 =
+ cls->ntuple_filter.dst_port;
+ rule->rules.u.ipv4_5tuple.dst_port_mask =
+ cls->ntuple_filter.dst_port_mask;
+ rule->rules.u.ipv4_5tuple.dst_port = cls->ntuple_filter.dst_port;
+
+ log_level = rte_log_get_level(librte_flow_classify_logtype);
+
+ if (log_level == RTE_LOG_DEBUG)
+ print_acl_ipv4_key_add(&rule->u.key.key_add);
+
+ /* key delete values */
+ memcpy(&rule->u.key.key_del.field_value[PROTO_FIELD_IPV4],
+ &rule->u.key.key_add.field_value[PROTO_FIELD_IPV4],
+ NUM_FIELDS_IPV4 * sizeof(struct rte_acl_field));
+
+ if (log_level == RTE_LOG_DEBUG)
+ print_acl_ipv4_key_delete(&rule->u.key.key_del);
+
+ return rule;
+}
+
+struct rte_flow_classify_rule * __rte_experimental
+rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ int *key_found,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_classify_rule *rule;
+ struct rte_flow_classify_table_entry *table_entry;
+ struct classify_action *action;
+ uint32_t i;
+ int ret;
+
+ if (!error)
+ return NULL;
+
+ if (key_found == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "NULL key_found.");
+ return NULL;
+ }
+
+ /* parse attr, pattern and actions */
+ ret = rte_flow_classify_validate(cls, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (table_type) {
+ case RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE:
+ rule = allocate_acl_ipv4_5tuple_rule(cls);
+ if (!rule)
+ return NULL;
+ rule->tbl_type = table_type;
+ cls->table_mask |= table_type;
+ break;
+ default:
+ return NULL;
+ }
+
+ action = classify_get_flow_action();
+ table_entry = &rule->entry;
+ table_entry->rule_id = rule->id;
+ table_entry->action.action_mask = action->action_mask;
+
+ /* Copy actions */
+ if (action->action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_COUNT)) {
+ memcpy(&table_entry->action.act.counter, &action->act.counter,
+ sizeof(table_entry->action.act.counter));
+ }
+ if (action->action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_MARK)) {
+ memcpy(&table_entry->action.act.mark, &action->act.mark,
+ sizeof(table_entry->action.act.mark));
+ }
+
+ for (i = 0; i < cls->num_tables; i++) {
+ struct rte_cls_table *table = &cls->tables[i];
+
+ if (table->type == table_type) {
+ if (table->ops.f_add != NULL) {
+ ret = table->ops.f_add(
+ table->h_table,
+ &rule->u.key.key_add,
+ &rule->entry,
+ &rule->key_found,
+ &rule->entry_ptr);
+ if (ret) {
+ free(rule);
+ return NULL;
+ }
+
+ *key_found = rule->key_found;
+ }
+
+ return rule;
+ }
+ }
+ free(rule);
+ return NULL;
+}
+
+int __rte_experimental
+rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
+ struct rte_flow_classify_rule *rule)
+{
+ uint32_t i;
+ int ret = -EINVAL;
+
+ if (!cls || !rule)
+ return ret;
+ enum rte_flow_classify_table_type tbl_type = rule->tbl_type;
+
+ for (i = 0; i < cls->num_tables; i++) {
+ struct rte_cls_table *table = &cls->tables[i];
+
+ if (table->type == tbl_type) {
+ if (table->ops.f_delete != NULL) {
+ ret = table->ops.f_delete(table->h_table,
+ &rule->u.key.key_del,
+ &rule->key_found,
+ &rule->entry);
+
+ return ret;
+ }
+ }
+ }
+ free(rule);
+ return ret;
+}
+
+static int
+flow_classifier_lookup(struct rte_flow_classifier *cls,
+ struct rte_cls_table *table,
+ struct rte_mbuf **pkts,
+ const uint16_t nb_pkts)
+{
+ int ret = -EINVAL;
+ uint64_t pkts_mask;
+ uint64_t lookup_hit_mask;
+
+ pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t);
+ ret = table->ops.f_lookup(table->h_table,
+ pkts, pkts_mask, &lookup_hit_mask,
+ (void **)cls->entries);
+
+ if (!ret && lookup_hit_mask)
+ cls->nb_pkts = nb_pkts;
+ else
+ cls->nb_pkts = 0;
+
+ return ret;
+}
+
+static int
+action_apply(struct rte_flow_classifier *cls,
+ struct rte_flow_classify_rule *rule,
+ struct rte_flow_classify_stats *stats)
+{
+ struct rte_flow_classify_ipv4_5tuple_stats *ntuple_stats;
+ struct rte_flow_classify_table_entry *entry = &rule->entry;
+ uint64_t count = 0;
+ uint32_t action_mask = entry->action.action_mask;
+ int i, ret = -EINVAL;
+
+ if (action_mask & (1LLU << RTE_FLOW_ACTION_TYPE_COUNT)) {
+ for (i = 0; i < cls->nb_pkts; i++) {
+ if (rule->id == cls->entries[i]->rule_id)
+ count++;
+ }
+ if (count) {
+ ret = 0;
+ ntuple_stats = stats->stats;
+ ntuple_stats->counter1 = count;
+ ntuple_stats->ipv4_5tuple = rule->rules.u.ipv4_5tuple;
+ }
+ }
+ return ret;
+}
+
+int __rte_experimental
+rte_flow_classifier_query(struct rte_flow_classifier *cls,
+ struct rte_mbuf **pkts,
+ const uint16_t nb_pkts,
+ struct rte_flow_classify_rule *rule,
+ struct rte_flow_classify_stats *stats)
+{
+ enum rte_flow_classify_table_type tbl_type;
+ uint32_t i;
+ int ret = -EINVAL;
+
+ if (!cls || !rule || !stats || !pkts || nb_pkts == 0)
+ return ret;
+
+ tbl_type = rule->tbl_type;
+ for (i = 0; i < cls->num_tables; i++) {
+ struct rte_cls_table *table = &cls->tables[i];
+
+ if (table->type == tbl_type) {
+ ret = flow_classifier_lookup(cls, table,
+ pkts, nb_pkts);
+ if (!ret) {
+ ret = action_apply(cls, rule, stats);
+ return ret;
+ }
+ }
+ }
+ return ret;
+}
+
+RTE_INIT(librte_flow_classify_init_log)
+{
+ librte_flow_classify_logtype =
+ rte_log_register("lib.flow_classify");
+ if (librte_flow_classify_logtype >= 0)
+ rte_log_set_level(librte_flow_classify_logtype, RTE_LOG_INFO);
+}
diff --git a/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify.h b/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify.h
new file mode 100644
index 000000000..01e88e543
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_FLOW_CLASSIFY_H_
+#define _RTE_FLOW_CLASSIFY_H_
+
+/**
+ * @file
+ *
+ * RTE Flow Classify Library
+ *
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * This library provides flow record information with some measured properties.
+ *
+ * Application should define the flow and measurement criteria (action) for it.
+ *
+ * The Library doesn't maintain any flow records itself, instead flow
+ * information is returned to upper layer only for given packets.
+ *
+ * It is application's responsibility to call rte_flow_classifier_query()
+ * for a burst of packets, just after receiving them or before transmitting
+ * them.
+ * Application should provide the flow type interested in, measurement to apply
+ * to that flow in rte_flow_classify_table_entry_add() API, and should provide
+ * the rte_flow_classifier object and storage to put results in for the
+ * rte_flow_classifier_query() API.
+ *
+ * Usage:
+ * - application calls rte_flow_classifier_create() to create an
+ * rte_flow_classifier object.
+ * - application calls rte_flow_classify_table_create() to create a table
+ * in the rte_flow_classifier object.
+ * - application calls rte_flow_classify_table_entry_add() to add a rule to
+ * the table in the rte_flow_classifier object.
+ * - application calls rte_flow_classifier_query() in a polling manner,
+ * preferably after rte_eth_rx_burst(). This will cause the library to
+ * match packet information to flow information with some measurements.
+ * - rte_flow_classifier object can be destroyed when it is no longer needed
+ * with rte_flow_classifier_free()
+ */
+
+#include <rte_compat.h>
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_acl.h>
+#include <rte_table_acl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int librte_flow_classify_logtype;
+
+#define RTE_FLOW_CLASSIFY_LOG(level, ...) \
+ rte_log(RTE_LOG_ ## level, \
+ librte_flow_classify_logtype, \
+ RTE_FMT("%s(): " RTE_FMT_HEAD(__VA_ARGS__,), \
+ __func__, \
+ RTE_FMT_TAIL(__VA_ARGS__,)))
+
+#ifndef RTE_FLOW_CLASSIFY_TABLE_MAX
+#define RTE_FLOW_CLASSIFY_TABLE_MAX 32
+#endif
+
+/** Opaque data type for flow classifier */
+struct rte_flow_classifier;
+
+/** Opaque data type for flow classify rule */
+struct rte_flow_classify_rule;
+
+/** Flow classify rule type */
+enum rte_flow_classify_rule_type {
+ /** no type */
+ RTE_FLOW_CLASSIFY_RULE_TYPE_NONE,
+ /** IPv4 5tuple type */
+ RTE_FLOW_CLASSIFY_RULE_TYPE_IPV4_5TUPLE,
+};
+
+/** Flow classify table type */
+enum rte_flow_classify_table_type {
+ /** No type */
+ RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE = 1 << 0,
+ /** ACL IP4 5TUPLE */
+ RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE = 1 << 1,
+ /** ACL VLAN IP4 5TUPLE */
+ RTE_FLOW_CLASSIFY_TABLE_ACL_VLAN_IP4_5TUPLE = 1 << 2,
+ /** ACL QinQ IP4 5TUPLE */
+ RTE_FLOW_CLASSIFY_TABLE_ACL_QINQ_IP4_5TUPLE = 1 << 3,
+
+};
+
+/** Parameters for flow classifier creation */
+struct rte_flow_classifier_params {
+ /** flow classifier name */
+ const char *name;
+
+ /** CPU socket ID where memory for the flow classifier and its */
+ /** elements (tables) should be allocated */
+ int socket_id;
+};
+
+/** Parameters for table creation */
+struct rte_flow_classify_table_params {
+ /** Table operations (specific to each table type) */
+ struct rte_table_ops *ops;
+
+ /** Opaque param to be passed to the table create operation */
+ void *arg_create;
+
+ /** Classifier table type */
+ enum rte_flow_classify_table_type type;
+};
+
+/** IPv4 5-tuple data */
+struct rte_flow_classify_ipv4_5tuple {
+ uint32_t dst_ip; /**< Destination IP address in big endian. */
+ uint32_t dst_ip_mask; /**< Mask of destination IP address. */
+ uint32_t src_ip; /**< Source IP address in big endian. */
+ uint32_t src_ip_mask; /**< Mask of destination IP address. */
+ uint16_t dst_port; /**< Destination port in big endian. */
+ uint16_t dst_port_mask; /**< Mask of destination port. */
+ uint16_t src_port; /**< Source Port in big endian. */
+ uint16_t src_port_mask; /**< Mask of source port. */
+ uint8_t proto; /**< L4 protocol. */
+ uint8_t proto_mask; /**< Mask of L4 protocol. */
+};
+
+/**
+ * Flow stats
+ *
+ * For the count action, stats can be returned by the query API.
+ *
+ * Storage for stats is provided by application.
+ */
+struct rte_flow_classify_stats {
+ void *stats;
+};
+
+struct rte_flow_classify_ipv4_5tuple_stats {
+ /** count of packets that match IPv4 5tuple pattern */
+ uint64_t counter1;
+ /** IPv4 5tuple data */
+ struct rte_flow_classify_ipv4_5tuple ipv4_5tuple;
+};
+
+/**
+ * Flow classifier create
+ *
+ * @param params
+ * Parameters for flow classifier creation
+ * @return
+ * Handle to flow classifier instance on success or NULL otherwise
+ */
+struct rte_flow_classifier * __rte_experimental
+rte_flow_classifier_create(struct rte_flow_classifier_params *params);
+
+/**
+ * Flow classifier free
+ *
+ * @param cls
+ * Handle to flow classifier instance
+ * @return
+ * 0 on success, error code otherwise
+ */
+int __rte_experimental
+rte_flow_classifier_free(struct rte_flow_classifier *cls);
+
+/**
+ * Flow classify table create
+ *
+ * @param cls
+ * Handle to flow classifier instance
+ * @param params
+ * Parameters for flow_classify table creation
+ * @return
+ * 0 on success, error code otherwise
+ */
+int __rte_experimental
+rte_flow_classify_table_create(struct rte_flow_classifier *cls,
+ struct rte_flow_classify_table_params *params);
+
+/**
+ * Flow classify validate
+ *
+ * @param cls
+ * Handle to flow classifier instance
+ * @param[in] attr
+ * Flow rule attributes
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END pattern item).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Structure
+ * initialised in case of error only.
+ * @return
+ * 0 on success, error code otherwise
+ */
+int __rte_experimental
+rte_flow_classify_validate(struct rte_flow_classifier *cls,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+/**
+ * Add a flow classify rule to the flow_classifier table.
+ *
+ * @param[in] cls
+ * Flow classifier handle
+ * @param[in] attr
+ * Flow rule attributes
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END pattern item).
+ * @param[out] key_found
+ * returns 1 if rule present already, 0 otherwise.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL. Structure
+ * initialised in case of error only.
+ * @return
+ * A valid handle in case of success, NULL otherwise.
+ */
+struct rte_flow_classify_rule * __rte_experimental
+rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ int *key_found,
+ struct rte_flow_error *error);
+
+/**
+ * Delete a flow classify rule from the flow_classifier table.
+ *
+ * @param[in] cls
+ * Flow classifier handle
+ * @param[in] rule
+ * Flow classify rule
+ * @return
+ * 0 on success, error code otherwise.
+ */
+int __rte_experimental
+rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls,
+ struct rte_flow_classify_rule *rule);
+
+/**
+ * Query flow classifier for given rule.
+ *
+ * @param[in] cls
+ * Flow classifier handle
+ * @param[in] pkts
+ * Pointer to packets to process
+ * @param[in] nb_pkts
+ * Number of packets to process
+ * @param[in] rule
+ * Flow classify rule
+ * @param[in] stats
+ * Flow classify stats
+ *
+ * @return
+ * 0 on success, error code otherwise.
+ */
+int __rte_experimental
+rte_flow_classifier_query(struct rte_flow_classifier *cls,
+ struct rte_mbuf **pkts,
+ const uint16_t nb_pkts,
+ struct rte_flow_classify_rule *rule,
+ struct rte_flow_classify_stats *stats);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FLOW_CLASSIFY_H_ */
diff --git a/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.c b/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.c
new file mode 100644
index 000000000..f65ceaf7c
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.c
@@ -0,0 +1,535 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <rte_flow_classify.h>
+#include "rte_flow_classify_parse.h"
+#include <rte_flow_driver.h>
+
+struct classify_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
+};
+
+static struct classify_action action;
+
+/* Pattern for IPv4 5-tuple UDP filter */
+static enum rte_flow_item_type pattern_ntuple_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern for IPv4 5-tuple TCP filter */
+static enum rte_flow_item_type pattern_ntuple_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern for IPv4 5-tuple SCTP filter */
+static enum rte_flow_item_type pattern_ntuple_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static int
+classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error);
+
+static struct classify_valid_pattern classify_supported_patterns[] = {
+ /* ntuple */
+ { pattern_ntuple_1, classify_parse_ntuple_filter },
+ { pattern_ntuple_2, classify_parse_ntuple_filter },
+ { pattern_ntuple_3, classify_parse_ntuple_filter },
+};
+
+struct classify_action *
+classify_get_flow_action(void)
+{
+ return &action;
+}
+
+/* Find the first VOID or non-VOID item pointer */
+const struct rte_flow_item *
+classify_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+void
+classify_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = classify_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = classify_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+classify_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+parse_filter_t
+classify_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(classify_supported_patterns); i++) {
+ if (classify_match_pattern(classify_supported_patterns[i].items,
+ pattern)) {
+ parse_filter =
+ classify_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+#define FLOW_RULE_MIN_PRIORITY 8
+#define FLOW_RULE_MAX_PRIORITY 0
+
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
+ do {\
+ item = pattern + index;\
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+ index++;\
+ item = pattern + index;\
+ } \
+ } while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index)\
+ do {\
+ act = actions + index;\
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+ index++;\
+ act = actions + index;\
+ } \
+ } while (0)
+
+/**
+ * Please aware there's an assumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+classify_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_action_count *count;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index;
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item can be MAC or IPv4 */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item,
+ "Not supported last point for range");
+ return -EINVAL;
+
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+ /* check if the next not void item is IPv4 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+ }
+
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -EINVAL;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -EINVAL;
+
+ }
+
+ ipv4_mask = item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+
+ /* check if the next not void item is TCP or UDP or SCTP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ /* get the TCP/UDP info */
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -EINVAL;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -EINVAL;
+
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ tcp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ tcp_spec = item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ udp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ } else {
+ sctp_mask = item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -EINVAL;
+ }
+
+ table_type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -EINVAL;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -EINVAL;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -EINVAL;
+ }
+ filter->priority = (uint16_t)attr->priority;
+ if (attr->priority > FLOW_RULE_MIN_PRIORITY)
+ filter->priority = FLOW_RULE_MAX_PRIORITY;
+
+ /* parse action */
+ index = 0;
+
+ /**
+ * n-tuple only supports count and Mark,
+ * check if the first not void action is COUNT or MARK.
+ */
+ memset(&action, 0, sizeof(action));
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
+ count = act->conf;
+ memcpy(&action.act.counter, count, sizeof(action.act.counter));
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
+ mark_spec = act->conf;
+ memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
+ break;
+ default:
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid action.");
+ return -EINVAL;
+ }
+
+ /* check if the next not void item is MARK or COUNT or END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ switch (act->type) {
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_COUNT;
+ count = act->conf;
+ memcpy(&action.act.counter, count, sizeof(action.act.counter));
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ action.action_mask |= 1LLU << RTE_FLOW_ACTION_TYPE_MARK;
+ mark_spec = act->conf;
+ memcpy(&action.act.mark, mark_spec, sizeof(action.act.mark));
+ break;
+ case RTE_FLOW_ACTION_TYPE_END:
+ return 0;
+ default:
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid action.");
+ return -EINVAL;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid action.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.h b/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.h
new file mode 100644
index 000000000..365a07bd6
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_parse.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _RTE_FLOW_CLASSIFY_PARSE_H_
+#define _RTE_FLOW_CLASSIFY_PARSE_H_
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern enum rte_flow_classify_table_type table_type;
+
+struct classify_action {
+ /* Flow action mask */
+ uint64_t action_mask;
+
+ struct action {
+ /** Integer value to return with packets */
+ struct rte_flow_action_mark mark;
+ /** Flow rule counter */
+ struct rte_flow_query_count counter;
+ } act;
+};
+
+typedef int (*parse_filter_t)(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error);
+
+/* Skip all VOID items of the pattern */
+void
+classify_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern);
+
+/* Find the first VOID or non-VOID item pointer */
+const struct rte_flow_item *
+classify_find_first_item(const struct rte_flow_item *item, bool is_void);
+
+
+/* Find if there's parse filter function matched */
+parse_filter_t
+classify_find_parse_filter_func(struct rte_flow_item *pattern);
+
+/* get action data */
+struct classify_action *
+classify_get_flow_action(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FLOW_CLASSIFY_PARSE_H_ */
diff --git a/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_version.map b/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_version.map
new file mode 100644
index 000000000..49bc25c6a
--- /dev/null
+++ b/src/seastar/dpdk/lib/librte_flow_classify/rte_flow_classify_version.map
@@ -0,0 +1,13 @@
+EXPERIMENTAL {
+ global:
+
+ rte_flow_classifier_create;
+ rte_flow_classifier_free;
+ rte_flow_classifier_query;
+ rte_flow_classify_table_create;
+ rte_flow_classify_table_entry_add;
+ rte_flow_classify_table_entry_delete;
+ rte_flow_classify_validate;
+
+ local: *;
+};