diff options
Diffstat (limited to 'src/spdk/dpdk/drivers/net/softnic')
23 files changed, 21472 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/net/softnic/Makefile b/src/spdk/dpdk/drivers/net/softnic/Makefile new file mode 100644 index 000000000..dabbe13a5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/Makefile @@ -0,0 +1,54 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_softnic.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_pipeline -lrte_port -lrte_table +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_sched +LDLIBS += -lrte_cryptodev +LDLIBS += -lrte_bus_vdev + +EXPORT_MAP := rte_pmd_softnic_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_mempool.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_swq.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_link.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tm.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_tap.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_action.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_pipeline.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_thread.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cli.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_meter.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cryptodev.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += parser.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += conn.c + +# +# Export include files +# +SYMLINK-y-include += rte_eth_softnic.h + +ifneq ($(CONFIG_RTE_EXEC_ENV_LINUX),y) +$(info Softnic PMD can only operate in a linux environment, \ +please change the definition of the RTE_TARGET environment variable) +all: +clean: +else + +include $(RTE_SDK)/mk/rte.lib.mk + +endif diff --git a/src/spdk/dpdk/drivers/net/softnic/conn.c b/src/spdk/dpdk/drivers/net/softnic/conn.c new file mode 100644 index 000000000..8b6658088 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/conn.c @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include <string.h> +#include <stdlib.h> +#include <stdio.h> +#include <unistd.h> +#include <sys/types.h> + +#include <sys/socket.h> + +#include <sys/epoll.h> +#include <netinet/in.h> +#include <arpa/inet.h> +#include <errno.h> + +#include "conn.h" + +#define MSG_CMD_TOO_LONG "Command too long." + +struct softnic_conn { + char *welcome; + char *prompt; + char *buf; + char *msg_in; + char *msg_out; + size_t buf_size; + size_t msg_in_len_max; + size_t msg_out_len_max; + size_t msg_in_len; + int fd_server; + int fd_client_group; + softnic_conn_msg_handle_t msg_handle; + void *msg_handle_arg; +}; + +struct softnic_conn * +softnic_conn_init(struct softnic_conn_params *p) +{ + struct sockaddr_in server_address; + struct softnic_conn *conn; + int fd_server, fd_client_group, status; + + memset(&server_address, 0, sizeof(server_address)); + + /* Check input arguments */ + if (p == NULL || + p->welcome == NULL || + p->prompt == NULL || + p->addr == NULL || + p->buf_size == 0 || + p->msg_in_len_max == 0 || + p->msg_out_len_max == 0 || + p->msg_handle == NULL) + return NULL; + + status = inet_aton(p->addr, &server_address.sin_addr); + if (status == 0) + return NULL; + + /* Memory allocation */ + conn = calloc(1, sizeof(struct softnic_conn)); + if (conn == NULL) + return NULL; + + conn->welcome = calloc(1, CONN_WELCOME_LEN_MAX + 1); + conn->prompt = calloc(1, CONN_PROMPT_LEN_MAX + 1); + conn->buf = calloc(1, p->buf_size); + conn->msg_in = calloc(1, p->msg_in_len_max + 1); + conn->msg_out = calloc(1, p->msg_out_len_max + 1); + + if (conn->welcome == NULL || + conn->prompt == NULL || + conn->buf == NULL || + conn->msg_in == NULL || + conn->msg_out == NULL) { + softnic_conn_free(conn); + return NULL; + } + + /* Server socket */ + server_address.sin_family = AF_INET; + server_address.sin_port = htons(p->port); + + fd_server = socket(AF_INET, + SOCK_STREAM | SOCK_NONBLOCK, + 0); + if (fd_server == -1) { + softnic_conn_free(conn); + return NULL; + } + + status = bind(fd_server, + (struct sockaddr *)&server_address, + sizeof(server_address)); + if (status == -1) { + softnic_conn_free(conn); + close(fd_server); + return NULL; + } + + status = listen(fd_server, 16); + if (status == -1) { + softnic_conn_free(conn); + close(fd_server); + return NULL; + } + + /* Client group */ + fd_client_group = epoll_create(1); + if (fd_client_group == -1) { + softnic_conn_free(conn); + close(fd_server); + return NULL; + } + + /* Fill in */ + strncpy(conn->welcome, p->welcome, CONN_WELCOME_LEN_MAX); + strncpy(conn->prompt, p->prompt, CONN_PROMPT_LEN_MAX); + conn->buf_size = p->buf_size; + conn->msg_in_len_max = p->msg_in_len_max; + conn->msg_out_len_max = p->msg_out_len_max; + conn->msg_in_len = 0; + conn->fd_server = fd_server; + conn->fd_client_group = fd_client_group; + conn->msg_handle = p->msg_handle; + conn->msg_handle_arg = p->msg_handle_arg; + + return conn; +} + +void +softnic_conn_free(struct softnic_conn *conn) +{ + if (conn == NULL) + return; + + if (conn->fd_client_group) + close(conn->fd_client_group); + + if (conn->fd_server) + close(conn->fd_server); + + free(conn->msg_out); + free(conn->msg_in); + free(conn->prompt); + free(conn->welcome); + free(conn); +} + +int +softnic_conn_poll_for_conn(struct softnic_conn *conn) +{ + struct sockaddr_in client_address; + struct epoll_event event; + socklen_t client_address_length; + int fd_client, status; + + /* Check input arguments */ + if (conn == NULL) + return -1; + + /* Server socket */ + client_address_length = sizeof(client_address); + fd_client = accept4(conn->fd_server, + (struct sockaddr *)&client_address, + &client_address_length, + SOCK_NONBLOCK); + if (fd_client == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) + return 0; + + return -1; + } + + /* Client group */ + event.events = EPOLLIN | EPOLLRDHUP | EPOLLHUP; + event.data.fd = fd_client; + + status = epoll_ctl(conn->fd_client_group, + EPOLL_CTL_ADD, + fd_client, + &event); + if (status == -1) { + close(fd_client); + return -1; + } + + /* Client */ + status = write(fd_client, + conn->welcome, + strlen(conn->welcome)); + if (status == -1) { + close(fd_client); + return -1; + } + + status = write(fd_client, + conn->prompt, + strlen(conn->prompt)); + if (status == -1) { + close(fd_client); + return -1; + } + + return 0; +} + +static int +data_event_handle(struct softnic_conn *conn, + int fd_client) +{ + ssize_t len, i, status; + + /* Read input message */ + + len = read(fd_client, + conn->buf, + conn->buf_size); + if (len == -1) { + if (errno == EAGAIN || errno == EWOULDBLOCK) + return 0; + + return -1; + } + if (len == 0) + return 0; + + /* Handle input messages */ + for (i = 0; i < len; i++) { + if (conn->buf[i] == '\n') { + size_t n; + + conn->msg_in[conn->msg_in_len] = 0; + conn->msg_out[0] = 0; + + conn->msg_handle(conn->msg_in, + conn->msg_out, + conn->msg_out_len_max, + conn->msg_handle_arg); + + n = strlen(conn->msg_out); + if (n) { + status = write(fd_client, + conn->msg_out, + n); + if (status == -1) + return status; + } + + conn->msg_in_len = 0; + } else if (conn->msg_in_len < conn->msg_in_len_max) { + conn->msg_in[conn->msg_in_len] = conn->buf[i]; + conn->msg_in_len++; + } else { + status = write(fd_client, + MSG_CMD_TOO_LONG, + strlen(MSG_CMD_TOO_LONG)); + if (status == -1) + return status; + + conn->msg_in_len = 0; + } + } + + /* Write prompt */ + status = write(fd_client, + conn->prompt, + strlen(conn->prompt)); + if (status == -1) + return status; + + return 0; +} + +static int +control_event_handle(struct softnic_conn *conn, + int fd_client) +{ + int status; + + status = epoll_ctl(conn->fd_client_group, + EPOLL_CTL_DEL, + fd_client, + NULL); + if (status == -1) + return -1; + + status = close(fd_client); + if (status == -1) + return -1; + + return 0; +} + +int +softnic_conn_poll_for_msg(struct softnic_conn *conn) +{ + struct epoll_event event; + int fd_client, status, status_data = 0, status_control = 0; + + /* Check input arguments */ + if (conn == NULL) + return -1; + + /* Client group */ + status = epoll_wait(conn->fd_client_group, + &event, + 1, + 0); + if (status == -1) + return -1; + if (status == 0) + return 0; + + fd_client = event.data.fd; + + /* Data available */ + if (event.events & EPOLLIN) + status_data = data_event_handle(conn, fd_client); + + /* Control events */ + if (event.events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)) + status_control = control_event_handle(conn, fd_client); + + if (status_data || status_control) + return -1; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/conn.h b/src/spdk/dpdk/drivers/net/softnic/conn.h new file mode 100644 index 000000000..631edeef3 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/conn.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#ifndef __INCLUDE_CONN_H__ +#define __INCLUDE_CONN_H__ + +#include <stdint.h> + +struct softnic_conn; + +#ifndef CONN_WELCOME_LEN_MAX +#define CONN_WELCOME_LEN_MAX 1024 +#endif + +#ifndef CONN_PROMPT_LEN_MAX +#define CONN_PROMPT_LEN_MAX 16 +#endif + +typedef void (*softnic_conn_msg_handle_t)(char *msg_in, + char *msg_out, + size_t msg_out_len_max, + void *arg); + +struct softnic_conn_params { + const char *welcome; + const char *prompt; + const char *addr; + uint16_t port; + size_t buf_size; + size_t msg_in_len_max; + size_t msg_out_len_max; + softnic_conn_msg_handle_t msg_handle; + void *msg_handle_arg; +}; + +struct softnic_conn * +softnic_conn_init(struct softnic_conn_params *p); + +void +softnic_conn_free(struct softnic_conn *conn); + +int +softnic_conn_poll_for_conn(struct softnic_conn *conn); + +int +softnic_conn_poll_for_msg(struct softnic_conn *conn); + +#endif diff --git a/src/spdk/dpdk/drivers/net/softnic/firmware.cli b/src/spdk/dpdk/drivers/net/softnic/firmware.cli new file mode 100644 index 000000000..300cf6e33 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/firmware.cli @@ -0,0 +1,21 @@ +; SPDX-License-Identifier: BSD-3-Clause +; Copyright(c) 2018 Intel Corporation + +link LINK dev 0000:02:00.0 + +pipeline RX period 10 offset_port_id 0 +pipeline RX port in bsz 32 link LINK rxq 0 +pipeline RX port out bsz 32 swq RXQ0 +pipeline RX table match stub +pipeline RX port in 0 table 0 +pipeline RX table 0 rule add match default action fwd port 0 + +pipeline TX period 10 offset_port_id 0 +pipeline TX port in bsz 32 swq TXQ0 +pipeline TX port out bsz 32 link LINK txq 0 +pipeline TX table match stub +pipeline TX port in 0 table 0 +pipeline TX table 0 rule add match default action fwd port 0 + +thread 1 pipeline RX enable +thread 1 pipeline TX enable diff --git a/src/spdk/dpdk/drivers/net/softnic/meson.build b/src/spdk/dpdk/drivers/net/softnic/meson.build new file mode 100644 index 000000000..96c003e15 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/meson.build @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Intel Corporation + +if not is_linux + build = false + reason = 'only supported on linux' +endif +install_headers('rte_eth_softnic.h') +sources = files('rte_eth_softnic_tm.c', + 'rte_eth_softnic.c', + 'rte_eth_softnic_mempool.c', + 'rte_eth_softnic_swq.c', + 'rte_eth_softnic_link.c', + 'rte_eth_softnic_tap.c', + 'rte_eth_softnic_action.c', + 'rte_eth_softnic_pipeline.c', + 'rte_eth_softnic_thread.c', + 'rte_eth_softnic_cli.c', + 'rte_eth_softnic_flow.c', + 'rte_eth_softnic_meter.c', + 'rte_eth_softnic_cryptodev.c', + 'parser.c', + 'conn.c') +deps += ['pipeline', 'port', 'table', 'sched', 'cryptodev'] diff --git a/src/spdk/dpdk/drivers/net/softnic/parser.c b/src/spdk/dpdk/drivers/net/softnic/parser.c new file mode 100644 index 000000000..dc15ec8aa --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/parser.c @@ -0,0 +1,703 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Intel Corporation. + * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org> + * All rights reserved. + */ + +/* For inet_pton4() and inet_pton6() functions: + * + * Copyright (c) 1996 by Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS + * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE + * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS + * SOFTWARE. + */ + +#include <stdint.h> +#include <stdlib.h> +#include <stdio.h> +#include <ctype.h> +#include <getopt.h> +#include <errno.h> +#include <stdarg.h> +#include <string.h> +#include <libgen.h> +#include <unistd.h> +#include <sys/wait.h> + +#include <rte_errno.h> + +#include "parser.h" + +static uint32_t +get_hex_val(char c) +{ + switch (c) { + case '0': case '1': case '2': case '3': case '4': case '5': + case '6': case '7': case '8': case '9': + return c - '0'; + case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': + return c - 'A' + 10; + case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': + return c - 'a' + 10; + default: + return 0; + } +} + +int +softnic_parser_read_arg_bool(const char *p) +{ + p = skip_white_spaces(p); + int result = -EINVAL; + + if (((p[0] == 'y') && (p[1] == 'e') && (p[2] == 's')) || + ((p[0] == 'Y') && (p[1] == 'E') && (p[2] == 'S'))) { + p += 3; + result = 1; + } + + if (((p[0] == 'o') && (p[1] == 'n')) || + ((p[0] == 'O') && (p[1] == 'N'))) { + p += 2; + result = 1; + } + + if (((p[0] == 'n') && (p[1] == 'o')) || + ((p[0] == 'N') && (p[1] == 'O'))) { + p += 2; + result = 0; + } + + if (((p[0] == 'o') && (p[1] == 'f') && (p[2] == 'f')) || + ((p[0] == 'O') && (p[1] == 'F') && (p[2] == 'F'))) { + p += 3; + result = 0; + } + + p = skip_white_spaces(p); + + if (p[0] != '\0') + return -EINVAL; + + return result; +} + +int +softnic_parser_read_int32(int32_t *value, const char *p) +{ + char *next; + int32_t val; + + p = skip_white_spaces(p); + if (!isdigit(*p)) + return -EINVAL; + + val = strtol(p, &next, 10); + if (p == next) + return -EINVAL; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint64(uint64_t *value, const char *p) +{ + char *next; + uint64_t val; + + p = skip_white_spaces(p); + if (!isdigit(*p)) + return -EINVAL; + + val = strtoul(p, &next, 10); + if (p == next) + return -EINVAL; + + p = next; + switch (*p) { + case 'T': + val *= 1024ULL; + /* fall through */ + case 'G': + val *= 1024ULL; + /* fall through */ + case 'M': + val *= 1024ULL; + /* fall through */ + case 'k': + case 'K': + val *= 1024ULL; + p++; + break; + } + + p = skip_white_spaces(p); + if (*p != '\0') + return -EINVAL; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint64_hex(uint64_t *value, const char *p) +{ + char *next; + uint64_t val; + + p = skip_white_spaces(p); + + val = strtoul(p, &next, 16); + if (p == next) + return -EINVAL; + + p = skip_white_spaces(next); + if (*p != '\0') + return -EINVAL; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint32(uint32_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT32_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint32_hex(uint32_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64_hex(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT32_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint16(uint16_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT16_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint16_hex(uint16_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64_hex(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT16_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint8(uint8_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT8_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parser_read_uint8_hex(uint8_t *value, const char *p) +{ + uint64_t val = 0; + int ret = softnic_parser_read_uint64_hex(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT8_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +int +softnic_parse_tokenize_string(char *string, char *tokens[], uint32_t *n_tokens) +{ + uint32_t i; + + if (string == NULL || + tokens == NULL || + (*n_tokens < 1)) + return -EINVAL; + + for (i = 0; i < *n_tokens; i++) { + tokens[i] = strtok_r(string, PARSE_DELIMITER, &string); + if (tokens[i] == NULL) + break; + } + + if (i == *n_tokens && + strtok_r(string, PARSE_DELIMITER, &string) != NULL) + return -E2BIG; + + *n_tokens = i; + return 0; +} + +int +softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size) +{ + char *c; + uint32_t len, i; + + /* Check input parameters */ + if (src == NULL || + dst == NULL || + size == NULL || + (*size == 0)) + return -1; + + len = strlen(src); + if (((len & 3) != 0) || + (len > (*size) * 2)) + return -1; + *size = len / 2; + + for (c = src; *c != 0; c++) { + if ((((*c) >= '0') && ((*c) <= '9')) || + (((*c) >= 'A') && ((*c) <= 'F')) || + (((*c) >= 'a') && ((*c) <= 'f'))) + continue; + + return -1; + } + + /* Convert chars to bytes */ + for (i = 0; i < *size; i++) + dst[i] = get_hex_val(src[2 * i]) * 16 + + get_hex_val(src[2 * i + 1]); + + return 0; +} + +int +softnic_parse_mpls_labels(char *string, uint32_t *labels, uint32_t *n_labels) +{ + uint32_t n_max_labels = *n_labels, count = 0; + + /* Check for void list of labels */ + if (strcmp(string, "<void>") == 0) { + *n_labels = 0; + return 0; + } + + /* At least one label should be present */ + for ( ; (*string != '\0'); ) { + char *next; + int value; + + if (count >= n_max_labels) + return -1; + + if (count > 0) { + if (string[0] != ':') + return -1; + + string++; + } + + value = strtol(string, &next, 10); + if (next == string) + return -1; + string = next; + + labels[count++] = (uint32_t)value; + } + + *n_labels = count; + return 0; +} + +#define INADDRSZ 4 +#define IN6ADDRSZ 16 + +/* int + * inet_pton4(src, dst) + * like inet_aton() but without all the hexadecimal and shorthand. + * return: + * 1 if `src' is a valid dotted quad, else 0. + * notice: + * does not touch `dst' unless it's returning 1. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton4(const char *src, unsigned char *dst) +{ + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[INADDRSZ], *tp; + + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr(digits, ch); + if (pch != NULL) { + unsigned int new = *tp * 10 + (pch - digits); + + if (new > 255) + return 0; + if (!saw_digit) { + if (++octets > 4) + return 0; + saw_digit = 1; + } + *tp = (unsigned char)new; + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return 0; + *++tp = 0; + saw_digit = 0; + } else + return 0; + } + if (octets < 4) + return 0; + + memcpy(dst, tmp, INADDRSZ); + return 1; +} + +/* int + * inet_pton6(src, dst) + * convert presentation level address to network order binary form. + * return: + * 1 if `src' is a valid [RFC1884 2.2] address, else 0. + * notice: + * (1) does not touch `dst' unless it's returning 1. + * (2) :: in a full address is silently ignored. + * credit: + * inspired by Mark Andrews. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton6(const char *src, unsigned char *dst) +{ + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[IN6ADDRSZ], *tp = 0, *endp = 0, *colonp = 0; + const char *xdigits = 0, *curtok = 0; + int ch = 0, saw_xdigit = 0, count_xdigit = 0; + unsigned int val = 0; + unsigned int dbloct_count = 0; + + memset((tp = tmp), '\0', IN6ADDRSZ); + endp = tp + IN6ADDRSZ; + colonp = NULL; + /* Leading :: requires some special handling. */ + if (*src == ':') + if (*++src != ':') + return 0; + curtok = src; + saw_xdigit = count_xdigit = 0; + val = 0; + + while ((ch = *src++) != '\0') { + const char *pch; + + pch = strchr((xdigits = xdigits_l), ch); + if (pch == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + if (count_xdigit >= 4) + return 0; + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return 0; + saw_xdigit = 1; + count_xdigit++; + continue; + } + if (ch == ':') { + curtok = src; + if (!saw_xdigit) { + if (colonp) + return 0; + colonp = tp; + continue; + } else if (*src == '\0') { + return 0; + } + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char)((val >> 8) & 0xff); + *tp++ = (unsigned char)(val & 0xff); + saw_xdigit = 0; + count_xdigit = 0; + val = 0; + dbloct_count++; + continue; + } + if (ch == '.' && ((tp + INADDRSZ) <= endp) && + inet_pton4(curtok, tp) > 0) { + tp += INADDRSZ; + saw_xdigit = 0; + dbloct_count += 2; + break; /* '\0' was seen by inet_pton4(). */ + } + return 0; + } + if (saw_xdigit) { + if (tp + sizeof(int16_t) > endp) + return 0; + *tp++ = (unsigned char)((val >> 8) & 0xff); + *tp++ = (unsigned char)(val & 0xff); + dbloct_count++; + } + if (colonp != NULL) { + /* if we already have 8 double octets, having a colon means error */ + if (dbloct_count == 8) + return 0; + + /* Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + int i; + + for (i = 1; i <= n; i++) { + endp[-i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return 0; + memcpy(dst, tmp, IN6ADDRSZ); + return 1; +} + +static struct rte_ether_addr * +my_ether_aton(const char *a) +{ + int i; + char *end; + unsigned long o[RTE_ETHER_ADDR_LEN]; + static struct rte_ether_addr ether_addr; + + i = 0; + do { + errno = 0; + o[i] = strtoul(a, &end, 16); + if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0)) + return NULL; + a = end + 1; + } while (++i != sizeof(o) / sizeof(o[0]) && end[0] != 0); + + /* Junk at the end of line */ + if (end[0] != 0) + return NULL; + + /* Support the format XX:XX:XX:XX:XX:XX */ + if (i == RTE_ETHER_ADDR_LEN) { + while (i-- != 0) { + if (o[i] > UINT8_MAX) + return NULL; + ether_addr.addr_bytes[i] = (uint8_t)o[i]; + } + /* Support the format XXXX:XXXX:XXXX */ + } else if (i == RTE_ETHER_ADDR_LEN / 2) { + while (i-- != 0) { + if (o[i] > UINT16_MAX) + return NULL; + ether_addr.addr_bytes[i * 2] = (uint8_t)(o[i] >> 8); + ether_addr.addr_bytes[i * 2 + 1] = (uint8_t)(o[i] & 0xff); + } + /* unknown format */ + } else + return NULL; + + return (struct rte_ether_addr *)ðer_addr; +} + +int +softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4) +{ + if (strlen(token) >= INET_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton4(token, (unsigned char *)ipv4) != 1) + return -EINVAL; + + return 0; +} + +int +softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6) +{ + if (strlen(token) >= INET6_ADDRSTRLEN) + return -EINVAL; + + if (inet_pton6(token, (unsigned char *)ipv6) != 1) + return -EINVAL; + + return 0; +} + +int +softnic_parse_mac_addr(const char *token, struct rte_ether_addr *addr) +{ + struct rte_ether_addr *tmp; + + tmp = my_ether_aton(token); + if (tmp == NULL) + return -1; + + memcpy(addr, tmp, sizeof(struct rte_ether_addr)); + return 0; +} + +int +softnic_parse_cpu_core(const char *entry, + struct softnic_cpu_core_params *p) +{ + size_t num_len; + char num[8]; + + uint32_t s = 0, c = 0, h = 0, val; + uint8_t s_parsed = 0, c_parsed = 0, h_parsed = 0; + const char *next = skip_white_spaces(entry); + char type; + + if (p == NULL) + return -EINVAL; + + /* Expect <CORE> or [sX][cY][h]. At least one parameter is required. */ + while (*next != '\0') { + /* If everything parsed nothing should left */ + if (s_parsed && c_parsed && h_parsed) + return -EINVAL; + + type = *next; + switch (type) { + case 's': + case 'S': + if (s_parsed || c_parsed || h_parsed) + return -EINVAL; + s_parsed = 1; + next++; + break; + case 'c': + case 'C': + if (c_parsed || h_parsed) + return -EINVAL; + c_parsed = 1; + next++; + break; + case 'h': + case 'H': + if (h_parsed) + return -EINVAL; + h_parsed = 1; + next++; + break; + default: + /* If it start from digit it must be only core id. */ + if (!isdigit(*next) || s_parsed || c_parsed || h_parsed) + return -EINVAL; + + type = 'C'; + } + + for (num_len = 0; *next != '\0'; next++, num_len++) { + if (num_len == RTE_DIM(num)) + return -EINVAL; + + if (!isdigit(*next)) + break; + + num[num_len] = *next; + } + + if (num_len == 0 && type != 'h' && type != 'H') + return -EINVAL; + + if (num_len != 0 && (type == 'h' || type == 'H')) + return -EINVAL; + + num[num_len] = '\0'; + val = strtol(num, NULL, 10); + + h = 0; + switch (type) { + case 's': + case 'S': + s = val; + break; + case 'c': + case 'C': + c = val; + break; + case 'h': + case 'H': + h = 1; + break; + } + } + + p->socket_id = s; + p->core_id = c; + p->thread_id = h; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/parser.h b/src/spdk/dpdk/drivers/net/softnic/parser.h new file mode 100644 index 000000000..6f408b248 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/parser.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +#ifndef __INCLUDE_SOFTNIC_PARSER_H__ +#define __INCLUDE_SOFTNIC_PARSER_H__ + +#include <stdint.h> + +#include <rte_ip.h> +#include <rte_ether.h> + +#define PARSE_DELIMITER " \f\n\r\t\v" + +#define skip_white_spaces(pos) \ +({ \ + __typeof__(pos) _p = (pos); \ + for ( ; isspace(*_p); _p++) \ + ; \ + _p; \ +}) + +static inline size_t +skip_digits(const char *src) +{ + size_t i; + + for (i = 0; isdigit(src[i]); i++) + ; + + return i; +} + +int softnic_parser_read_arg_bool(const char *p); + +int softnic_parser_read_int32(int32_t *value, const char *p); + +int softnic_parser_read_uint64(uint64_t *value, const char *p); +int softnic_parser_read_uint32(uint32_t *value, const char *p); +int softnic_parser_read_uint16(uint16_t *value, const char *p); +int softnic_parser_read_uint8(uint8_t *value, const char *p); + +int softnic_parser_read_uint64_hex(uint64_t *value, const char *p); +int softnic_parser_read_uint32_hex(uint32_t *value, const char *p); +int softnic_parser_read_uint16_hex(uint16_t *value, const char *p); +int softnic_parser_read_uint8_hex(uint8_t *value, const char *p); + +int softnic_parse_hex_string(char *src, uint8_t *dst, uint32_t *size); + +int softnic_parse_ipv4_addr(const char *token, struct in_addr *ipv4); +int softnic_parse_ipv6_addr(const char *token, struct in6_addr *ipv6); +int softnic_parse_mac_addr(const char *token, struct rte_ether_addr *addr); +int softnic_parse_mpls_labels(char *string, + uint32_t *labels, uint32_t *n_labels); + +struct softnic_cpu_core_params { + uint32_t socket_id; + uint32_t core_id; + uint32_t thread_id; +}; + +int softnic_parse_cpu_core(const char *entry, + struct softnic_cpu_core_params *p); + +int softnic_parse_tokenize_string(char *string, + char *tokens[], uint32_t *n_tokens); + +#endif diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.c new file mode 100644 index 000000000..11723778f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.c @@ -0,0 +1,718 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +#include <rte_ethdev_driver.h> +#include <rte_ethdev_vdev.h> +#include <rte_malloc.h> +#include <rte_bus_vdev.h> +#include <rte_kvargs.h> +#include <rte_errno.h> +#include <rte_ring.h> +#include <rte_tm_driver.h> +#include <rte_mtr_driver.h> + +#include "rte_eth_softnic.h" +#include "rte_eth_softnic_internals.h" + +#define PMD_PARAM_FIRMWARE "firmware" +#define PMD_PARAM_CONN_PORT "conn_port" +#define PMD_PARAM_CPU_ID "cpu_id" +#define PMD_PARAM_SC "sc" +#define PMD_PARAM_TM_N_QUEUES "tm_n_queues" +#define PMD_PARAM_TM_QSIZE0 "tm_qsize0" +#define PMD_PARAM_TM_QSIZE1 "tm_qsize1" +#define PMD_PARAM_TM_QSIZE2 "tm_qsize2" +#define PMD_PARAM_TM_QSIZE3 "tm_qsize3" +#define PMD_PARAM_TM_QSIZE4 "tm_qsize4" +#define PMD_PARAM_TM_QSIZE5 "tm_qsize5" +#define PMD_PARAM_TM_QSIZE6 "tm_qsize6" +#define PMD_PARAM_TM_QSIZE7 "tm_qsize7" +#define PMD_PARAM_TM_QSIZE8 "tm_qsize8" +#define PMD_PARAM_TM_QSIZE9 "tm_qsize9" +#define PMD_PARAM_TM_QSIZE10 "tm_qsize10" +#define PMD_PARAM_TM_QSIZE11 "tm_qsize11" +#define PMD_PARAM_TM_QSIZE12 "tm_qsize12" + + +static const char * const pmd_valid_args[] = { + PMD_PARAM_FIRMWARE, + PMD_PARAM_CONN_PORT, + PMD_PARAM_CPU_ID, + PMD_PARAM_SC, + PMD_PARAM_TM_N_QUEUES, + PMD_PARAM_TM_QSIZE0, + PMD_PARAM_TM_QSIZE1, + PMD_PARAM_TM_QSIZE2, + PMD_PARAM_TM_QSIZE3, + PMD_PARAM_TM_QSIZE4, + PMD_PARAM_TM_QSIZE5, + PMD_PARAM_TM_QSIZE6, + PMD_PARAM_TM_QSIZE7, + PMD_PARAM_TM_QSIZE8, + PMD_PARAM_TM_QSIZE9, + PMD_PARAM_TM_QSIZE10, + PMD_PARAM_TM_QSIZE11, + PMD_PARAM_TM_QSIZE12, + NULL +}; + +static const char welcome[] = + "\n" + "Welcome to Soft NIC!\n" + "\n"; + +static const char prompt[] = "softnic> "; + +static const struct softnic_conn_params conn_params_default = { + .welcome = welcome, + .prompt = prompt, + .addr = "0.0.0.0", + .port = 0, + .buf_size = 1024 * 1024, + .msg_in_len_max = 1024, + .msg_out_len_max = 1024 * 1024, + .msg_handle = softnic_cli_process, + .msg_handle_arg = NULL, +}; + +static int pmd_softnic_logtype; + +#define PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, pmd_softnic_logtype, \ + "%s(): " fmt "\n", __func__, ##args) + +static int +pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused, + struct rte_eth_dev_info *dev_info) +{ + dev_info->max_rx_pktlen = UINT32_MAX; + dev_info->max_rx_queues = UINT16_MAX; + dev_info->max_tx_queues = UINT16_MAX; + + return 0; +} + +static int +pmd_dev_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +pmd_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool __rte_unused) +{ + char name[NAME_SIZE]; + struct pmd_internals *p = dev->data->dev_private; + struct softnic_swq *swq; + + struct softnic_swq_params params = { + .size = nb_rx_desc, + }; + + snprintf(name, sizeof(name), "RXQ%u", rx_queue_id); + + swq = softnic_swq_create(p, + name, + ¶ms); + if (swq == NULL) + return -1; + + dev->data->rx_queues[rx_queue_id] = swq->r; + return 0; +} + +static int +pmd_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + char name[NAME_SIZE]; + struct pmd_internals *p = dev->data->dev_private; + struct softnic_swq *swq; + + struct softnic_swq_params params = { + .size = nb_tx_desc, + }; + + snprintf(name, sizeof(name), "TXQ%u", tx_queue_id); + + swq = softnic_swq_create(p, + name, + ¶ms); + if (swq == NULL) + return -1; + + dev->data->tx_queues[tx_queue_id] = swq->r; + return 0; +} + +static int +pmd_dev_start(struct rte_eth_dev *dev) +{ + struct pmd_internals *p = dev->data->dev_private; + int status; + + /* Firmware */ + status = softnic_cli_script_process(p, + p->params.firmware, + conn_params_default.msg_in_len_max, + conn_params_default.msg_out_len_max); + if (status) + return status; + + /* Link UP */ + dev->data->dev_link.link_status = ETH_LINK_UP; + + return 0; +} + +static void +pmd_dev_stop(struct rte_eth_dev *dev) +{ + struct pmd_internals *p = dev->data->dev_private; + + /* Link DOWN */ + dev->data->dev_link.link_status = ETH_LINK_DOWN; + + /* Firmware */ + softnic_pipeline_disable_all(p); + softnic_pipeline_free(p); + softnic_table_action_profile_free(p); + softnic_port_in_action_profile_free(p); + softnic_tap_free(p); + softnic_tmgr_free(p); + softnic_link_free(p); + softnic_softnic_swq_free_keep_rxq_txq(p); + softnic_mempool_free(p); + + tm_hierarchy_free(p); + softnic_mtr_free(p); +} + +static void +pmd_dev_close(struct rte_eth_dev *dev __rte_unused) +{ + return; +} + +static int +pmd_link_update(struct rte_eth_dev *dev __rte_unused, + int wait_to_complete __rte_unused) +{ + return 0; +} + +static int +pmd_filter_ctrl(struct rte_eth_dev *dev __rte_unused, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + if (filter_type == RTE_ETH_FILTER_GENERIC && + filter_op == RTE_ETH_FILTER_GET) { + *(const void **)arg = &pmd_flow_ops; + return 0; + } + + return -ENOTSUP; +} + +static int +pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) +{ + *(const struct rte_tm_ops **)arg = &pmd_tm_ops; + + return 0; +} + +static int +pmd_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) +{ + *(const struct rte_mtr_ops **)arg = &pmd_mtr_ops; + + return 0; +} + +static const struct eth_dev_ops pmd_ops = { + .dev_configure = pmd_dev_configure, + .dev_start = pmd_dev_start, + .dev_stop = pmd_dev_stop, + .dev_close = pmd_dev_close, + .link_update = pmd_link_update, + .dev_infos_get = pmd_dev_infos_get, + .rx_queue_setup = pmd_rx_queue_setup, + .tx_queue_setup = pmd_tx_queue_setup, + .filter_ctrl = pmd_filter_ctrl, + .tm_ops_get = pmd_tm_ops_get, + .mtr_ops_get = pmd_mtr_ops_get, +}; + +static uint16_t +pmd_rx_pkt_burst(void *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return (uint16_t)rte_ring_sc_dequeue_burst(rxq, + (void **)rx_pkts, + nb_pkts, + NULL); +} + +static uint16_t +pmd_tx_pkt_burst(void *txq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + return (uint16_t)rte_ring_sp_enqueue_burst(txq, + (void **)tx_pkts, + nb_pkts, + NULL); +} + +static void * +pmd_init(struct pmd_params *params) +{ + struct pmd_internals *p; + int status; + + p = rte_zmalloc_socket(params->name, + sizeof(struct pmd_internals), + 0, + params->cpu_id); + if (p == NULL) + return NULL; + + /* Params */ + memcpy(&p->params, params, sizeof(p->params)); + + /* Resources */ + tm_hierarchy_init(p); + softnic_mtr_init(p); + + softnic_mempool_init(p); + softnic_swq_init(p); + softnic_link_init(p); + softnic_tmgr_init(p); + softnic_tap_init(p); + softnic_cryptodev_init(p); + softnic_port_in_action_profile_init(p); + softnic_table_action_profile_init(p); + softnic_pipeline_init(p); + + status = softnic_thread_init(p); + if (status) { + rte_free(p); + return NULL; + } + + if (params->conn_port) { + struct softnic_conn_params conn_params; + + memcpy(&conn_params, &conn_params_default, sizeof(conn_params)); + conn_params.port = p->params.conn_port; + conn_params.msg_handle_arg = p; + + p->conn = softnic_conn_init(&conn_params); + if (p->conn == NULL) { + softnic_thread_free(p); + rte_free(p); + return NULL; + } + } + + return p; +} + +static void +pmd_free(struct pmd_internals *p) +{ + if (p == NULL) + return; + + if (p->params.conn_port) + softnic_conn_free(p->conn); + + softnic_thread_free(p); + softnic_pipeline_free(p); + softnic_table_action_profile_free(p); + softnic_port_in_action_profile_free(p); + softnic_tap_free(p); + softnic_tmgr_free(p); + softnic_link_free(p); + softnic_swq_free(p); + softnic_mempool_free(p); + + tm_hierarchy_free(p); + softnic_mtr_free(p); + + rte_free(p); +} + +static struct rte_ether_addr eth_addr = { + .addr_bytes = {0}, +}; + +static int +pmd_ethdev_register(struct rte_vdev_device *vdev, + struct pmd_params *params, + void *dev_private) +{ + struct rte_eth_dev *dev; + + /* Ethdev entry allocation */ + dev = rte_eth_dev_allocate(params->name); + if (!dev) + return -ENOMEM; + + /* dev */ + dev->rx_pkt_burst = pmd_rx_pkt_burst; + dev->tx_pkt_burst = pmd_tx_pkt_burst; + dev->tx_pkt_prepare = NULL; + dev->dev_ops = &pmd_ops; + dev->device = &vdev->device; + + /* dev->data */ + dev->data->dev_private = dev_private; + dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G; + dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + dev->data->dev_link.link_autoneg = ETH_LINK_FIXED; + dev->data->dev_link.link_status = ETH_LINK_DOWN; + dev->data->mac_addrs = ð_addr; + dev->data->promiscuous = 1; + dev->data->kdrv = RTE_KDRV_NONE; + dev->data->numa_node = params->cpu_id; + + rte_eth_dev_probing_finish(dev); + + return 0; +} + +static int +get_string(const char *key __rte_unused, const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(char **)extra_args = strdup(value); + + if (!*(char **)extra_args) + return -ENOMEM; + + return 0; +} + +static int +get_uint32(const char *key __rte_unused, const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(uint32_t *)extra_args = strtoull(value, NULL, 0); + + return 0; +} + +static int +get_uint16(const char *key __rte_unused, const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(uint16_t *)extra_args = strtoull(value, NULL, 0); + + return 0; +} + +static int +pmd_parse_args(struct pmd_params *p, const char *params) +{ + struct rte_kvargs *kvlist; + int ret = 0; + + kvlist = rte_kvargs_parse(params, pmd_valid_args); + if (kvlist == NULL) + return -EINVAL; + + /* Set default values */ + memset(p, 0, sizeof(*p)); + p->firmware = SOFTNIC_FIRMWARE; + p->cpu_id = SOFTNIC_CPU_ID; + p->sc = SOFTNIC_SC; + p->tm.n_queues = SOFTNIC_TM_N_QUEUES; + p->tm.qsize[0] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[1] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[2] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[3] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[4] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[5] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[6] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[7] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[8] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[9] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[10] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[11] = SOFTNIC_TM_QUEUE_SIZE; + p->tm.qsize[12] = SOFTNIC_TM_QUEUE_SIZE; + + /* Firmware script (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_FIRMWARE, + &get_string, &p->firmware); + if (ret < 0) + goto out_free; + } + + /* Connection listening port (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_CONN_PORT) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_CONN_PORT, + &get_uint16, &p->conn_port); + if (ret < 0) + goto out_free; + } + + /* CPU ID (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_CPU_ID) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_CPU_ID, + &get_uint32, &p->cpu_id); + if (ret < 0) + goto out_free; + } + + /* Service cores (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_SC) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_SC, + &get_uint32, &p->sc); + if (ret < 0) + goto out_free; + } + + /* TM number of queues (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_N_QUEUES) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_N_QUEUES, + &get_uint32, &p->tm.n_queues); + if (ret < 0) + goto out_free; + } + + /* TM queue size 0 .. 3 (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE0) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE0, + &get_uint32, &p->tm.qsize[0]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE1) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE1, + &get_uint32, &p->tm.qsize[1]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE2) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE2, + &get_uint32, &p->tm.qsize[2]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE3) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE3, + &get_uint32, &p->tm.qsize[3]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE4) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE4, + &get_uint32, &p->tm.qsize[4]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE5) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE5, + &get_uint32, &p->tm.qsize[5]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE6) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE6, + &get_uint32, &p->tm.qsize[6]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE7) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE7, + &get_uint32, &p->tm.qsize[7]); + if (ret < 0) + goto out_free; + } + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE8) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE8, + &get_uint32, &p->tm.qsize[8]); + if (ret < 0) + goto out_free; + } + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE9) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE9, + &get_uint32, &p->tm.qsize[9]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE10) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE10, + &get_uint32, &p->tm.qsize[10]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE11) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE11, + &get_uint32, &p->tm.qsize[11]); + if (ret < 0) + goto out_free; + } + + if (rte_kvargs_count(kvlist, PMD_PARAM_TM_QSIZE12) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_TM_QSIZE12, + &get_uint32, &p->tm.qsize[12]); + if (ret < 0) + goto out_free; + } + +out_free: + rte_kvargs_free(kvlist); + return ret; +} + +static int +pmd_probe(struct rte_vdev_device *vdev) +{ + struct pmd_params p; + const char *params; + int status = 0; + + void *dev_private; + const char *name = rte_vdev_device_name(vdev); + + PMD_LOG(INFO, "Probing device \"%s\"", name); + + /* Parse input arguments */ + params = rte_vdev_device_args(vdev); + if (!params) + return -EINVAL; + + status = pmd_parse_args(&p, params); + if (status) + return status; + + p.name = name; + + /* Allocate and initialize soft ethdev private data */ + dev_private = pmd_init(&p); + if (dev_private == NULL) + return -ENOMEM; + + /* Register soft ethdev */ + PMD_LOG(INFO, "Creating soft ethdev \"%s\"", p.name); + + status = pmd_ethdev_register(vdev, &p, dev_private); + if (status) { + pmd_free(dev_private); + return status; + } + + return 0; +} + +static int +pmd_remove(struct rte_vdev_device *vdev) +{ + struct rte_eth_dev *dev = NULL; + + if (!vdev) + return -EINVAL; + + PMD_LOG(INFO, "Removing device \"%s\"", rte_vdev_device_name(vdev)); + + /* Find the ethdev entry */ + dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev)); + if (dev == NULL) + return -ENODEV; + + /* Free device data structures*/ + pmd_free(dev->data->dev_private); + dev->data->dev_private = NULL; /* already freed */ + dev->data->mac_addrs = NULL; /* statically allocated */ + rte_eth_dev_release_port(dev); + + return 0; +} + +static struct rte_vdev_driver pmd_softnic_drv = { + .probe = pmd_probe, + .remove = pmd_remove, +}; + +RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv); +RTE_PMD_REGISTER_PARAM_STRING(net_softnic, + PMD_PARAM_FIRMWARE "=<string> " + PMD_PARAM_CONN_PORT "=<uint16> " + PMD_PARAM_CPU_ID "=<uint32> " + PMD_PARAM_TM_N_QUEUES "=<uint32> " + PMD_PARAM_TM_QSIZE0 "=<uint32> " + PMD_PARAM_TM_QSIZE1 "=<uint32> " + PMD_PARAM_TM_QSIZE2 "=<uint32> " + PMD_PARAM_TM_QSIZE3 "=<uint32>" + PMD_PARAM_TM_QSIZE4 "=<uint32> " + PMD_PARAM_TM_QSIZE5 "=<uint32> " + PMD_PARAM_TM_QSIZE6 "=<uint32> " + PMD_PARAM_TM_QSIZE7 "=<uint32> " + PMD_PARAM_TM_QSIZE8 "=<uint32> " + PMD_PARAM_TM_QSIZE9 "=<uint32> " + PMD_PARAM_TM_QSIZE10 "=<uint32> " + PMD_PARAM_TM_QSIZE11 "=<uint32>" + PMD_PARAM_TM_QSIZE12 "=<uint32>" +); + + +RTE_INIT(pmd_softnic_init_log) +{ + pmd_softnic_logtype = rte_log_register("pmd.net.softnic"); + if (pmd_softnic_logtype >= 0) + rte_log_set_level(pmd_softnic_logtype, RTE_LOG_NOTICE); +} + +int +rte_pmd_softnic_manage(uint16_t port_id) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + struct pmd_internals *softnic; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); +#endif + + softnic = dev->data->dev_private; + + softnic_conn_poll_for_conn(softnic->conn); + + softnic_conn_poll_for_msg(softnic->conn); + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.h b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.h new file mode 100644 index 000000000..3f0116177 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef __INCLUDE_RTE_ETH_SOFTNIC_H__ +#define __INCLUDE_RTE_ETH_SOFTNIC_H__ + +#include <stdint.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/** Firmware. */ +#ifndef SOFTNIC_FIRMWARE +#define SOFTNIC_FIRMWARE "firmware.cli" +#endif + +/** TCP connection port (0 = no connectivity). */ +#ifndef SOFTNIC_CONN_PORT +#define SOFTNIC_CONN_PORT 0 +#endif + +/** NUMA node ID. */ +#ifndef SOFTNIC_CPU_ID +#define SOFTNIC_CPU_ID 0 +#endif + +/** + * Service cores: + * + * 0 = The current device is run explicitly by the application. The firmware + * creates one or several pipelines for the current device and maps them to + * CPU cores that should not be service cores. The application is required + * to call rte_pmd_softnic_run() for the current device on each of these CPU + * cores in order to make the current device work. + * + * 1 = The current device is run on the service cores transparently to the + * application. The firmware creates one or several pipelines for the + * current device and maps them to CPU cores that should be service cores. + * Each of these service cores is calling rte_pmd_softnic_run() for the + * current device in order to make the current device work. The application + * is not allowed to call rte_pmd_softnic_run() for the current device. + */ +#ifndef SOFTNIC_SC +#define SOFTNIC_SC 1 +#endif + +/** Traffic Manager: Number of scheduler queues. */ +#ifndef SOFTNIC_TM_N_QUEUES +#define SOFTNIC_TM_N_QUEUES (64 * 1024) +#endif + +/** Traffic Manager: Scheduler queue size (per traffic class). */ +#ifndef SOFTNIC_TM_QUEUE_SIZE +#define SOFTNIC_TM_QUEUE_SIZE 64 +#endif + +/** + * Soft NIC run. + * + * @param port_id + * Port ID of the Soft NIC device. + * @return + * Zero on success, error code otherwise. + */ +int +rte_pmd_softnic_run(uint16_t port_id); + +/** + * Soft NIC manage. + * + * @param port_id + * Port ID of the Soft NIC device. + * @return + * Zero on success, error code otherwise. + */ +__rte_experimental +int +rte_pmd_softnic_manage(uint16_t port_id); + +#ifdef __cplusplus +} +#endif + +#endif /* __INCLUDE_RTE_ETH_SOFTNIC_H__ */ diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_action.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_action.c new file mode 100644 index 000000000..92c744dc9 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_action.c @@ -0,0 +1,422 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +#include <rte_string_fns.h> +#include <rte_table_hash_func.h> + +#include "rte_eth_softnic_internals.h" + +/** + * Input port + */ +int +softnic_port_in_action_profile_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->port_in_action_profile_list); + + return 0; +} + +void +softnic_port_in_action_profile_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_port_in_action_profile *profile; + + profile = TAILQ_FIRST(&p->port_in_action_profile_list); + if (profile == NULL) + break; + + TAILQ_REMOVE(&p->port_in_action_profile_list, profile, node); + free(profile); + } +} + +struct softnic_port_in_action_profile * +softnic_port_in_action_profile_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_port_in_action_profile *profile; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(profile, &p->port_in_action_profile_list, node) + if (strcmp(profile->name, name) == 0) + return profile; + + return NULL; +} + +struct softnic_port_in_action_profile * +softnic_port_in_action_profile_create(struct pmd_internals *p, + const char *name, + struct softnic_port_in_action_profile_params *params) +{ + struct softnic_port_in_action_profile *profile; + struct rte_port_in_action_profile *ap; + int status; + + /* Check input params */ + if (name == NULL || + softnic_port_in_action_profile_find(p, name) || + params == NULL) + return NULL; + + if ((params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) && + params->lb.f_hash == NULL) { + switch (params->lb.key_size) { + case 8: + params->lb.f_hash = rte_table_hash_crc_key8; + break; + + case 16: + params->lb.f_hash = rte_table_hash_crc_key16; + break; + + case 24: + params->lb.f_hash = rte_table_hash_crc_key24; + break; + + case 32: + params->lb.f_hash = rte_table_hash_crc_key32; + break; + + case 40: + params->lb.f_hash = rte_table_hash_crc_key40; + break; + + case 48: + params->lb.f_hash = rte_table_hash_crc_key48; + break; + + case 56: + params->lb.f_hash = rte_table_hash_crc_key56; + break; + + case 64: + params->lb.f_hash = rte_table_hash_crc_key64; + break; + + default: + return NULL; + } + + params->lb.seed = 0; + } + + /* Resource */ + ap = rte_port_in_action_profile_create(0); + if (ap == NULL) + return NULL; + + if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_FLTR)) { + status = rte_port_in_action_profile_action_register(ap, + RTE_PORT_IN_ACTION_FLTR, + ¶ms->fltr); + + if (status) { + rte_port_in_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_PORT_IN_ACTION_LB)) { + status = rte_port_in_action_profile_action_register(ap, + RTE_PORT_IN_ACTION_LB, + ¶ms->lb); + + if (status) { + rte_port_in_action_profile_free(ap); + return NULL; + } + } + + status = rte_port_in_action_profile_freeze(ap); + if (status) { + rte_port_in_action_profile_free(ap); + return NULL; + } + + /* Node allocation */ + profile = calloc(1, sizeof(struct softnic_port_in_action_profile)); + if (profile == NULL) { + rte_port_in_action_profile_free(ap); + return NULL; + } + + /* Node fill in */ + strlcpy(profile->name, name, sizeof(profile->name)); + memcpy(&profile->params, params, sizeof(*params)); + profile->ap = ap; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&p->port_in_action_profile_list, profile, node); + + return profile; +} + +/** + * Table + */ +int +softnic_table_action_profile_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->table_action_profile_list); + + return 0; +} + +void +softnic_table_action_profile_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_table_action_profile *profile; + + profile = TAILQ_FIRST(&p->table_action_profile_list); + if (profile == NULL) + break; + + TAILQ_REMOVE(&p->table_action_profile_list, profile, node); + free(profile); + } +} + +struct softnic_table_action_profile * +softnic_table_action_profile_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_table_action_profile *profile; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(profile, &p->table_action_profile_list, node) + if (strcmp(profile->name, name) == 0) + return profile; + + return NULL; +} + +struct softnic_table_action_profile * +softnic_table_action_profile_create(struct pmd_internals *p, + const char *name, + struct softnic_table_action_profile_params *params) +{ + struct softnic_table_action_profile *profile; + struct rte_table_action_profile *ap; + int status; + + /* Check input params */ + if (name == NULL || + softnic_table_action_profile_find(p, name) || + params == NULL || + ((params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) == 0)) + return NULL; + + if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) && + params->lb.f_hash == NULL) { + switch (params->lb.key_size) { + case 8: + params->lb.f_hash = rte_table_hash_crc_key8; + break; + + case 16: + params->lb.f_hash = rte_table_hash_crc_key16; + break; + + case 24: + params->lb.f_hash = rte_table_hash_crc_key24; + break; + + case 32: + params->lb.f_hash = rte_table_hash_crc_key32; + break; + + case 40: + params->lb.f_hash = rte_table_hash_crc_key40; + break; + + case 48: + params->lb.f_hash = rte_table_hash_crc_key48; + break; + + case 56: + params->lb.f_hash = rte_table_hash_crc_key56; + break; + + case 64: + params->lb.f_hash = rte_table_hash_crc_key64; + break; + + default: + return NULL; + } + + params->lb.seed = 0; + } + + /* Resource */ + ap = rte_table_action_profile_create(¶ms->common); + if (ap == NULL) + return NULL; + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_FWD, + NULL); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_LB, + ¶ms->lb); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_MTR, + ¶ms->mtr); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_TM, + ¶ms->tm); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_ENCAP, + ¶ms->encap); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_NAT, + ¶ms->nat); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_TTL, + ¶ms->ttl); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_STATS, + ¶ms->stats); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_TIME, + NULL); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_TAG, + NULL); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_DECAP, + NULL); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + if (params->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) { + status = rte_table_action_profile_action_register(ap, + RTE_TABLE_ACTION_SYM_CRYPTO, + ¶ms->sym_crypto); + + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + } + + status = rte_table_action_profile_freeze(ap); + if (status) { + rte_table_action_profile_free(ap); + return NULL; + } + + /* Node allocation */ + profile = calloc(1, sizeof(struct softnic_table_action_profile)); + if (profile == NULL) { + rte_table_action_profile_free(ap); + return NULL; + } + + /* Node fill in */ + strlcpy(profile->name, name, sizeof(profile->name)); + memcpy(&profile->params, params, sizeof(*params)); + profile->ap = ap; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&p->table_action_profile_list, profile, node); + + return profile; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c new file mode 100644 index 000000000..932ec15f4 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cli.c @@ -0,0 +1,6571 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include <stdio.h> +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_string_fns.h> +#include <rte_cryptodev.h> + +#include "rte_eth_softnic_internals.h" +#include "parser.h" + +#ifndef CMD_MAX_TOKENS +#define CMD_MAX_TOKENS 256 +#endif + +#define MSG_OUT_OF_MEMORY "Not enough memory.\n" +#define MSG_CMD_UNKNOWN "Unknown command \"%s\".\n" +#define MSG_CMD_UNIMPLEM "Command \"%s\" not implemented.\n" +#define MSG_ARG_NOT_ENOUGH "Not enough arguments for command \"%s\".\n" +#define MSG_ARG_TOO_MANY "Too many arguments for command \"%s\".\n" +#define MSG_ARG_MISMATCH "Wrong number of arguments for command \"%s\".\n" +#define MSG_ARG_NOT_FOUND "Argument \"%s\" not found.\n" +#define MSG_ARG_INVALID "Invalid value for argument \"%s\".\n" +#define MSG_FILE_ERR "Error in file \"%s\" at line %u.\n" +#define MSG_FILE_NOT_ENOUGH "Not enough rules in file \"%s\".\n" +#define MSG_CMD_FAIL "Command \"%s\" failed.\n" + +static int +is_comment(char *in) +{ + if ((strlen(in) && index("!#%;", in[0])) || + (strncmp(in, "//", 2) == 0) || + (strncmp(in, "--", 2) == 0)) + return 1; + + return 0; +} + +/** + * mempool <mempool_name> + * buffer <buffer_size> + * pool <pool_size> + * cache <cache_size> + */ +static void +cmd_mempool(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_mempool_params p; + char *name; + struct softnic_mempool *mempool; + + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + if (strcmp(tokens[2], "buffer") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buffer"); + return; + } + + if (softnic_parser_read_uint32(&p.buffer_size, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "buffer_size"); + return; + } + + if (strcmp(tokens[4], "pool") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pool"); + return; + } + + if (softnic_parser_read_uint32(&p.pool_size, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "pool_size"); + return; + } + + if (strcmp(tokens[6], "cache") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cache"); + return; + } + + if (softnic_parser_read_uint32(&p.cache_size, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "cache_size"); + return; + } + + mempool = softnic_mempool_create(softnic, name, &p); + if (mempool == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * link <link_name> + * dev <device_name> | port <port_id> + */ +static void +cmd_link(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_link_params p; + struct softnic_link *link; + char *name; + + memset(&p, 0, sizeof(p)); + + if (n_tokens != 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + name = tokens[1]; + + if (strcmp(tokens[2], "dev") == 0) { + p.dev_name = tokens[3]; + } else if (strcmp(tokens[2], "port") == 0) { + p.dev_name = NULL; + + if (softnic_parser_read_uint16(&p.port_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dev or port"); + return; + } + + link = softnic_link_create(softnic, name, &p); + if (link == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * swq <swq_name> + * size <size> + */ +static void +cmd_swq(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_swq_params p; + char *name; + struct softnic_swq *swq; + + if (n_tokens != 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + if (strcmp(tokens[2], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.size, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "size"); + return; + } + + swq = softnic_swq_create(softnic, name, &p); + if (swq == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr shaper profile + * id <profile_id> + * rate <tb_rate> size <tb_size> + * adj <packet_length_adjust> + */ +static void +cmd_tmgr_shaper_profile(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_shaper_params sp; + struct rte_tm_error error; + uint32_t shaper_profile_id; + uint16_t port_id; + int status; + + memset(&sp, 0, sizeof(struct rte_tm_shaper_params)); + + if (n_tokens != 11) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[2], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + if (strcmp(tokens[3], "id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); + return; + } + + if (softnic_parser_read_uint32(&shaper_profile_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "profile_id"); + return; + } + + if (strcmp(tokens[5], "rate") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rate"); + return; + } + + if (softnic_parser_read_uint64(&sp.peak.rate, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tb_rate"); + return; + } + + if (strcmp(tokens[7], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint64(&sp.peak.size, tokens[8]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tb_size"); + return; + } + + if (strcmp(tokens[9], "adj") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "adj"); + return; + } + + if (softnic_parser_read_int32(&sp.pkt_length_adjust, tokens[10]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "packet_length_adjust"); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status) + return; + + status = rte_tm_shaper_profile_add(port_id, shaper_profile_id, &sp, &error); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr shared shaper + * id <shared_shaper_id> + * profile <shaper_profile_id> + */ +static void +cmd_tmgr_shared_shaper(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_error error; + uint32_t shared_shaper_id, shaper_profile_id; + uint16_t port_id; + int status; + + if (n_tokens != 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "shared") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared"); + return; + } + + if (strcmp(tokens[2], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[3], "id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); + return; + } + + if (softnic_parser_read_uint32(&shared_shaper_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id"); + return; + } + + if (strcmp(tokens[5], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + if (softnic_parser_read_uint32(&shaper_profile_id, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id"); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status) + return; + + status = rte_tm_shared_shaper_add_update(port_id, + shared_shaper_id, + shaper_profile_id, + &error); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr node + * id <node_id> + * parent <parent_node_id | none> + * priority <priority> + * weight <weight> + * [shaper profile <shaper_profile_id>] + * [shared shaper <shared_shaper_id>] + * [nonleaf sp <n_sp_priorities>] + */ +static void +cmd_tmgr_node(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_error error; + struct rte_tm_node_params np; + uint32_t node_id, parent_node_id, priority, weight, shared_shaper_id; + uint16_t port_id; + int status; + + memset(&np, 0, sizeof(struct rte_tm_node_params)); + np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; + np.nonleaf.n_sp_priorities = 1; + + if (n_tokens < 10) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "node") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "node"); + return; + } + + if (strcmp(tokens[2], "id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "id"); + return; + } + + if (softnic_parser_read_uint32(&node_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "node_id"); + return; + } + + if (strcmp(tokens[4], "parent") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "parent"); + return; + } + + if (strcmp(tokens[5], "none") == 0) + parent_node_id = RTE_TM_NODE_ID_NULL; + else { + if (softnic_parser_read_uint32(&parent_node_id, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "parent_node_id"); + return; + } + } + + if (strcmp(tokens[6], "priority") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority"); + return; + } + + if (softnic_parser_read_uint32(&priority, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "priority"); + return; + } + + if (strcmp(tokens[8], "weight") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight"); + return; + } + + if (softnic_parser_read_uint32(&weight, tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "weight"); + return; + } + + tokens += 10; + n_tokens -= 10; + + if (n_tokens >= 2 && + (strcmp(tokens[0], "shaper") == 0) && + (strcmp(tokens[1], "profile") == 0)) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); + return; + } + + if (strcmp(tokens[2], "none") == 0) { + np.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; + } else { + if (softnic_parser_read_uint32(&np.shaper_profile_id, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shaper_profile_id"); + return; + } + } + + tokens += 3; + n_tokens -= 3; + } /* shaper profile */ + + if (n_tokens >= 2 && + (strcmp(tokens[0], "shared") == 0) && + (strcmp(tokens[1], "shaper") == 0)) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); + return; + } + + if (softnic_parser_read_uint32(&shared_shaper_id, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared_shaper_id"); + return; + } + + np.shared_shaper_id = &shared_shaper_id; + np.n_shared_shapers = 1; + + tokens += 3; + n_tokens -= 3; + } /* shared shaper */ + + if (n_tokens >= 2 && + (strcmp(tokens[0], "nonleaf") == 0) && + (strcmp(tokens[1], "sp") == 0)) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "tmgr node"); + return; + } + + if (softnic_parser_read_uint32(&np.nonleaf.n_sp_priorities, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_sp_priorities"); + return; + } + + tokens += 3; + n_tokens -= 3; + } /* nonleaf sp <n_sp_priorities> */ + + if (n_tokens) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status != 0) + return; + + status = rte_tm_node_add(port_id, + node_id, + parent_node_id, + priority, + weight, + RTE_TM_NODE_LEVEL_ID_ANY, + &np, + &error); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +static uint32_t +root_node_id(uint32_t n_spp, + uint32_t n_pps) +{ + uint32_t n_queues = n_spp * n_pps * RTE_SCHED_QUEUES_PER_PIPE; + uint32_t n_tc = n_spp * n_pps * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; + uint32_t n_pipes = n_spp * n_pps; + + return n_queues + n_tc + n_pipes + n_spp; +} + +static uint32_t +subport_node_id(uint32_t n_spp, + uint32_t n_pps, + uint32_t subport_id) +{ + uint32_t n_pipes = n_spp * n_pps; + uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; + uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; + + return n_queues + n_tc + n_pipes + subport_id; +} + +static uint32_t +pipe_node_id(uint32_t n_spp, + uint32_t n_pps, + uint32_t subport_id, + uint32_t pipe_id) +{ + uint32_t n_pipes = n_spp * n_pps; + uint32_t n_tc = n_pipes * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; + uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; + + return n_queues + + n_tc + + pipe_id + + subport_id * n_pps; +} + +static uint32_t +tc_node_id(uint32_t n_spp, + uint32_t n_pps, + uint32_t subport_id, + uint32_t pipe_id, + uint32_t tc_id) +{ + uint32_t n_pipes = n_spp * n_pps; + uint32_t n_queues = n_pipes * RTE_SCHED_QUEUES_PER_PIPE; + + return n_queues + + tc_id + + (pipe_id + subport_id * n_pps) * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; +} + +static uint32_t +queue_node_id(uint32_t n_spp __rte_unused, + uint32_t n_pps, + uint32_t subport_id, + uint32_t pipe_id, + uint32_t tc_id, + uint32_t queue_id) +{ + return queue_id + tc_id + + (pipe_id + subport_id * n_pps) * RTE_SCHED_QUEUES_PER_PIPE; +} + +struct tmgr_hierarchy_default_params { + uint32_t n_spp; /**< Number of subports per port. */ + uint32_t n_pps; /**< Number of pipes per subport. */ + + struct { + uint32_t port; + uint32_t subport; + uint32_t pipe; + uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + } shaper_profile_id; + + struct { + uint32_t tc[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint32_t tc_valid[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + } shared_shaper_id; + + struct { + uint32_t queue[RTE_SCHED_QUEUES_PER_PIPE]; + } weight; +}; + +static int +tmgr_hierarchy_default(struct pmd_internals *softnic, + struct tmgr_hierarchy_default_params *params) +{ + struct rte_tm_node_params root_node_params = { + .shaper_profile_id = params->shaper_profile_id.port, + .nonleaf = { + .n_sp_priorities = 1, + }, + }; + + struct rte_tm_node_params subport_node_params = { + .shaper_profile_id = params->shaper_profile_id.subport, + .nonleaf = { + .n_sp_priorities = 1, + }, + }; + + struct rte_tm_node_params pipe_node_params = { + .shaper_profile_id = params->shaper_profile_id.pipe, + .nonleaf = { + .n_sp_priorities = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + }, + }; + + uint32_t *shared_shaper_id = + (uint32_t *)calloc(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + sizeof(uint32_t)); + + if (shared_shaper_id == NULL) + return -1; + + memcpy(shared_shaper_id, params->shared_shaper_id.tc, + sizeof(params->shared_shaper_id.tc)); + + struct rte_tm_node_params tc_node_params[] = { + [0] = { + .shaper_profile_id = params->shaper_profile_id.tc[0], + .shared_shaper_id = &shared_shaper_id[0], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[0]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [1] = { + .shaper_profile_id = params->shaper_profile_id.tc[1], + .shared_shaper_id = &shared_shaper_id[1], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[1]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [2] = { + .shaper_profile_id = params->shaper_profile_id.tc[2], + .shared_shaper_id = &shared_shaper_id[2], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[2]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [3] = { + .shaper_profile_id = params->shaper_profile_id.tc[3], + .shared_shaper_id = &shared_shaper_id[3], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[3]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [4] = { + .shaper_profile_id = params->shaper_profile_id.tc[4], + .shared_shaper_id = &shared_shaper_id[4], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[4]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [5] = { + .shaper_profile_id = params->shaper_profile_id.tc[5], + .shared_shaper_id = &shared_shaper_id[5], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[5]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [6] = { + .shaper_profile_id = params->shaper_profile_id.tc[6], + .shared_shaper_id = &shared_shaper_id[6], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[6]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [7] = { + .shaper_profile_id = params->shaper_profile_id.tc[7], + .shared_shaper_id = &shared_shaper_id[7], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[7]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [8] = { + .shaper_profile_id = params->shaper_profile_id.tc[8], + .shared_shaper_id = &shared_shaper_id[8], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[8]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [9] = { + .shaper_profile_id = params->shaper_profile_id.tc[9], + .shared_shaper_id = &shared_shaper_id[9], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[9]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [10] = { + .shaper_profile_id = params->shaper_profile_id.tc[10], + .shared_shaper_id = &shared_shaper_id[10], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[10]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [11] = { + .shaper_profile_id = params->shaper_profile_id.tc[11], + .shared_shaper_id = &shared_shaper_id[11], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[11]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + + [12] = { + .shaper_profile_id = params->shaper_profile_id.tc[12], + .shared_shaper_id = &shared_shaper_id[12], + .n_shared_shapers = + (¶ms->shared_shaper_id.tc_valid[12]) ? 1 : 0, + .nonleaf = { + .n_sp_priorities = 1, + }, + }, + }; + + struct rte_tm_node_params queue_node_params = { + .shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE, + }; + + struct rte_tm_error error; + uint32_t n_spp = params->n_spp, n_pps = params->n_pps, s; + int status; + uint16_t port_id; + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status) + return -1; + + /* Hierarchy level 0: Root node */ + status = rte_tm_node_add(port_id, + root_node_id(n_spp, n_pps), + RTE_TM_NODE_ID_NULL, + 0, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &root_node_params, + &error); + if (status) + return -1; + + /* Hierarchy level 1: Subport nodes */ + for (s = 0; s < params->n_spp; s++) { + uint32_t p; + + status = rte_tm_node_add(port_id, + subport_node_id(n_spp, n_pps, s), + root_node_id(n_spp, n_pps), + 0, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &subport_node_params, + &error); + if (status) + return -1; + + /* Hierarchy level 2: Pipe nodes */ + for (p = 0; p < params->n_pps; p++) { + uint32_t t; + + status = rte_tm_node_add(port_id, + pipe_node_id(n_spp, n_pps, s, p), + subport_node_id(n_spp, n_pps, s), + 0, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &pipe_node_params, + &error); + if (status) + return -1; + + /* Hierarchy level 3: Traffic class nodes */ + for (t = 0; t < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; t++) { + uint32_t q; + + status = rte_tm_node_add(port_id, + tc_node_id(n_spp, n_pps, s, p, t), + pipe_node_id(n_spp, n_pps, s, p), + t, + 1, + RTE_TM_NODE_LEVEL_ID_ANY, + &tc_node_params[t], + &error); + if (status) + return -1; + + /* Hierarchy level 4: Queue nodes */ + if (t < RTE_SCHED_TRAFFIC_CLASS_BE) { + /* Strict-priority traffic class queues */ + q = 0; + status = rte_tm_node_add(port_id, + queue_node_id(n_spp, n_pps, s, p, t, q), + tc_node_id(n_spp, n_pps, s, p, t), + 0, + params->weight.queue[q], + RTE_TM_NODE_LEVEL_ID_ANY, + &queue_node_params, + &error); + if (status) + return -1; + + continue; + } + /* Best-effort traffic class queues */ + for (q = 0; q < RTE_SCHED_BE_QUEUES_PER_PIPE; q++) { + status = rte_tm_node_add(port_id, + queue_node_id(n_spp, n_pps, s, p, t, q), + tc_node_id(n_spp, n_pps, s, p, t), + 0, + params->weight.queue[q], + RTE_TM_NODE_LEVEL_ID_ANY, + &queue_node_params, + &error); + if (status) + return -1; + } + } /* TC */ + } /* Pipe */ + } /* Subport */ + + return 0; +} + + +/** + * tmgr hierarchy-default + * spp <n_subports_per_port> + * pps <n_pipes_per_subport> + * shaper profile + * port <profile_id> + * subport <profile_id> + * pipe <profile_id> + * tc0 <profile_id> + * tc1 <profile_id> + * tc2 <profile_id> + * tc3 <profile_id> + * tc4 <profile_id> + * tc5 <profile_id> + * tc6 <profile_id> + * tc7 <profile_id> + * tc8 <profile_id> + * tc9 <profile_id> + * tc10 <profile_id> + * tc11 <profile_id> + * tc12 <profile_id> + * shared shaper + * tc0 <id | none> + * tc1 <id | none> + * tc2 <id | none> + * tc3 <id | none> + * tc4 <id | none> + * tc5 <id | none> + * tc6 <id | none> + * tc7 <id | none> + * tc8 <id | none> + * tc9 <id | none> + * tc10 <id | none> + * tc11 <id | none> + * tc12 <id | none> + * weight + * queue <q12> ... <q15> + */ +static void +cmd_tmgr_hierarchy_default(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct tmgr_hierarchy_default_params p; + int i, j, status; + + memset(&p, 0, sizeof(p)); + + if (n_tokens != 74) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "hierarchy-default") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy-default"); + return; + } + + if (strcmp(tokens[2], "spp") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp"); + return; + } + + if (softnic_parser_read_uint32(&p.n_spp, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_subports_per_port"); + return; + } + + if (strcmp(tokens[4], "pps") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps"); + return; + } + + if (softnic_parser_read_uint32(&p.n_pps, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_pipes_per_subport"); + return; + } + + /* Shaper profile */ + + if (strcmp(tokens[6], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[7], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + if (strcmp(tokens[8], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.port, tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port profile id"); + return; + } + + if (strcmp(tokens[10], "subport") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "subport"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.subport, tokens[11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "subport profile id"); + return; + } + + if (strcmp(tokens[12], "pipe") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipe"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.pipe, tokens[13]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "pipe_profile_id"); + return; + } + + if (strcmp(tokens[14], "tc0") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[0], tokens[15]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc0 profile id"); + return; + } + + if (strcmp(tokens[16], "tc1") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[1], tokens[17]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc1 profile id"); + return; + } + + if (strcmp(tokens[18], "tc2") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[2], tokens[19]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc2 profile id"); + return; + } + + if (strcmp(tokens[20], "tc3") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[3], tokens[21]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc3 profile id"); + return; + } + + if (strcmp(tokens[22], "tc4") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc4"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[4], tokens[23]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc4 profile id"); + return; + } + + if (strcmp(tokens[24], "tc5") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc5"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[5], tokens[25]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc5 profile id"); + return; + } + + if (strcmp(tokens[26], "tc6") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc6"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[6], tokens[27]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc6 profile id"); + return; + } + + if (strcmp(tokens[28], "tc7") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc7"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[7], tokens[29]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc7 profile id"); + return; + } + + if (strcmp(tokens[30], "tc8") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc8"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[8], tokens[31]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc8 profile id"); + return; + } + + if (strcmp(tokens[32], "tc9") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc9"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[9], tokens[33]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc9 profile id"); + return; + } + + if (strcmp(tokens[34], "tc10") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc10"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[10], tokens[35]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc10 profile id"); + return; + } + + if (strcmp(tokens[36], "tc11") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc11"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[11], tokens[37]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc11 profile id"); + return; + } + + if (strcmp(tokens[38], "tc12") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc12"); + return; + } + + if (softnic_parser_read_uint32(&p.shaper_profile_id.tc[12], tokens[39]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "tc12 profile id"); + return; + } + + /* Shared shaper */ + + if (strcmp(tokens[40], "shared") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shared"); + return; + } + + if (strcmp(tokens[41], "shaper") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "shaper"); + return; + } + + if (strcmp(tokens[42], "tc0") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc0"); + return; + } + + if (strcmp(tokens[43], "none") == 0) + p.shared_shaper_id.tc_valid[0] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[0], + tokens[43]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc0"); + return; + } + + p.shared_shaper_id.tc_valid[0] = 1; + } + + if (strcmp(tokens[44], "tc1") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc1"); + return; + } + + if (strcmp(tokens[45], "none") == 0) + p.shared_shaper_id.tc_valid[1] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[1], + tokens[45]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc1"); + return; + } + + p.shared_shaper_id.tc_valid[1] = 1; + } + + if (strcmp(tokens[46], "tc2") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc2"); + return; + } + + if (strcmp(tokens[47], "none") == 0) + p.shared_shaper_id.tc_valid[2] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[2], + tokens[47]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc2"); + return; + } + + p.shared_shaper_id.tc_valid[2] = 1; + } + + if (strcmp(tokens[48], "tc3") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc3"); + return; + } + + if (strcmp(tokens[49], "none") == 0) + p.shared_shaper_id.tc_valid[3] = 0; + else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[3], + tokens[49]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc3"); + return; + } + + p.shared_shaper_id.tc_valid[3] = 1; + } + + if (strcmp(tokens[50], "tc4") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc4"); + return; + } + + if (strcmp(tokens[51], "none") == 0) { + p.shared_shaper_id.tc_valid[4] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[4], + tokens[51]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc4"); + return; + } + + p.shared_shaper_id.tc_valid[4] = 1; + } + + if (strcmp(tokens[52], "tc5") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc5"); + return; + } + + if (strcmp(tokens[53], "none") == 0) { + p.shared_shaper_id.tc_valid[5] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[5], + tokens[53]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc5"); + return; + } + + p.shared_shaper_id.tc_valid[5] = 1; + } + + if (strcmp(tokens[54], "tc6") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc6"); + return; + } + + if (strcmp(tokens[55], "none") == 0) { + p.shared_shaper_id.tc_valid[6] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[6], + tokens[55]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc6"); + return; + } + + p.shared_shaper_id.tc_valid[6] = 1; + } + + if (strcmp(tokens[56], "tc7") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc7"); + return; + } + + if (strcmp(tokens[57], "none") == 0) { + p.shared_shaper_id.tc_valid[7] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[7], + tokens[57]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc7"); + return; + } + + p.shared_shaper_id.tc_valid[7] = 1; + } + + if (strcmp(tokens[58], "tc8") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc8"); + return; + } + + if (strcmp(tokens[59], "none") == 0) { + p.shared_shaper_id.tc_valid[8] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[8], + tokens[59]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc8"); + return; + } + + p.shared_shaper_id.tc_valid[8] = 1; + } + + if (strcmp(tokens[60], "tc9") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc9"); + return; + } + + if (strcmp(tokens[61], "none") == 0) { + p.shared_shaper_id.tc_valid[9] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[9], + tokens[61]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc9"); + return; + } + + p.shared_shaper_id.tc_valid[9] = 1; + } + + if (strcmp(tokens[62], "tc10") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc10"); + return; + } + + if (strcmp(tokens[63], "none") == 0) { + p.shared_shaper_id.tc_valid[10] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[10], + tokens[63]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc10"); + return; + } + + p.shared_shaper_id.tc_valid[10] = 1; + } + + if (strcmp(tokens[64], "tc11") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc11"); + return; + } + + if (strcmp(tokens[65], "none") == 0) { + p.shared_shaper_id.tc_valid[11] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[11], + tokens[65]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc11"); + return; + } + + p.shared_shaper_id.tc_valid[11] = 1; + } + + if (strcmp(tokens[66], "tc12") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc12"); + return; + } + + if (strcmp(tokens[67], "none") == 0) { + p.shared_shaper_id.tc_valid[12] = 0; + } else { + if (softnic_parser_read_uint32(&p.shared_shaper_id.tc[12], + tokens[67]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "shared shaper tc12"); + return; + } + + p.shared_shaper_id.tc_valid[12] = 1; + } + + /* Weight */ + + if (strcmp(tokens[68], "weight") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "weight"); + return; + } + + if (strcmp(tokens[69], "queue") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "queue"); + return; + } + + for (i = 0, j = 0; i < 16; i++) { + if (i < RTE_SCHED_TRAFFIC_CLASS_BE) { + p.weight.queue[i] = 1; + } else { + if (softnic_parser_read_uint32(&p.weight.queue[i], + tokens[70 + j]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "weight queue"); + return; + } + j++; + } + } + + status = tmgr_hierarchy_default(softnic, &p); + if (status != 0) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr hierarchy commit + */ +static void +cmd_tmgr_hierarchy_commit(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_tm_error error; + uint16_t port_id; + int status; + + if (n_tokens != 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "hierarchy") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "hierarchy"); + return; + } + + if (strcmp(tokens[2], "commit") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "commit"); + return; + } + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status != 0) + return; + + status = rte_tm_hierarchy_commit(port_id, 1, &error); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tmgr <tmgr_name> + */ +static void +cmd_tmgr(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *name; + struct softnic_tmgr_port *tmgr_port; + + if (n_tokens != 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + tmgr_port = softnic_tmgr_port_create(softnic, name); + if (tmgr_port == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * tap <tap_name> + */ +static void +cmd_tap(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *name; + struct softnic_tap *tap; + + if (n_tokens != 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + tap = softnic_tap_create(softnic, name); + if (tap == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * cryptodev <tap_name> dev <device_name> | dev_id <device_id> + * queue <n_queues> <queue_size> max_sessions <n_sessions> + **/ + +static void +cmd_cryptodev(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_cryptodev_params params; + char *name; + + memset(¶ms, 0, sizeof(params)); + if (n_tokens != 9) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + if (strcmp(tokens[2], "dev") == 0) + params.dev_name = tokens[3]; + else if (strcmp(tokens[2], "dev_id") == 0) { + if (softnic_parser_read_uint32(¶ms.dev_id, tokens[3]) < 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "dev_id"); + return; + } + } else { + snprintf(out, out_size, MSG_ARG_INVALID, + "cryptodev"); + return; + } + + if (strcmp(tokens[4], "queue")) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "4"); + return; + } + + if (softnic_parser_read_uint32(¶ms.n_queues, tokens[5]) < 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "q"); + return; + } + + if (softnic_parser_read_uint32(¶ms.queue_size, tokens[6]) < 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "queue_size"); + return; + } + + if (strcmp(tokens[7], "max_sessions")) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "4"); + return; + } + + if (softnic_parser_read_uint32(¶ms.session_pool_size, tokens[8]) + < 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "q"); + return; + } + + if (softnic_cryptodev_create(softnic, name, ¶ms) == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * port in action profile <profile_name> + * [filter match | mismatch offset <key_offset> mask <key_mask> key <key_value> port <port_id>] + * [balance offset <key_offset> mask <key_mask> port <port_id0> ... <port_id15>] + */ +static void +cmd_port_in_action_profile(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_port_in_action_profile_params p; + struct softnic_port_in_action_profile *ap; + char *name; + uint32_t t0; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (strcmp(tokens[2], "action") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action"); + return; + } + + if (strcmp(tokens[3], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + name = tokens[4]; + + t0 = 5; + + if (t0 < n_tokens && + (strcmp(tokens[t0], "filter") == 0)) { + uint32_t size; + + if (n_tokens < t0 + 10) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "port in action profile filter"); + return; + } + + if (strcmp(tokens[t0 + 1], "match") == 0) { + p.fltr.filter_on_match = 1; + } else if (strcmp(tokens[t0 + 1], "mismatch") == 0) { + p.fltr.filter_on_match = 0; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, "match or mismatch"); + return; + } + + if (strcmp(tokens[t0 + 2], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.fltr.key_offset, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 4], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE; + if ((softnic_parse_hex_string(tokens[t0 + 5], + p.fltr.key_mask, &size) != 0) || + size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + + if (strcmp(tokens[t0 + 6], "key") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key"); + return; + } + + size = RTE_PORT_IN_ACTION_FLTR_KEY_SIZE; + if ((softnic_parse_hex_string(tokens[t0 + 7], + p.fltr.key, &size) != 0) || + size != RTE_PORT_IN_ACTION_FLTR_KEY_SIZE) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_value"); + return; + } + + if (strcmp(tokens[t0 + 8], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&p.fltr.port_id, + tokens[t0 + 9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_FLTR; + t0 += 10; + } /* filter */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "balance") == 0)) { + uint32_t i; + + if (n_tokens < t0 + 22) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "port in action profile balance"); + return; + } + + if (strcmp(tokens[t0 + 1], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.lb.key_offset, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 3], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX; + if (softnic_parse_hex_string(tokens[t0 + 4], + p.lb.key_mask, &p.lb.key_size) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + + if (strcmp(tokens[t0 + 5], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + for (i = 0; i < 16; i++) + if (softnic_parser_read_uint32(&p.lb.port_id[i], + tokens[t0 + 6 + i]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + p.action_mask |= 1LLU << RTE_PORT_IN_ACTION_LB; + t0 += 22; + } /* balance */ + + if (t0 < n_tokens) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + ap = softnic_port_in_action_profile_create(softnic, name, &p); + if (ap == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * table action profile <profile_name> + * ipv4 | ipv6 + * offset <ip_offset> + * fwd + * [balance offset <key_offset> mask <key_mask> outoffset <out_offset>] + * [meter srtcm | trtcm + * tc <n_tc> + * stats none | pkts | bytes | both] + * [tm spp <n_subports_per_port> pps <n_pipes_per_subport>] + * [encap ether | vlan | qinq | mpls | pppoe | qinq_pppoe | + * vxlan offset <ether_offset> ipv4 | ipv6 vlan on | off] + * [nat src | dst + * proto udp | tcp] + * [ttl drop | fwd + * stats none | pkts] + * [stats pkts | bytes | both] + * [time] + * [tag] + * [decap] + * + */ +static void +cmd_table_action_profile(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_table_action_profile_params p; + struct softnic_table_action_profile *ap; + char *name; + uint32_t t0; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "action") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "action"); + return; + } + + if (strcmp(tokens[2], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + name = tokens[3]; + + if (strcmp(tokens[4], "ipv4") == 0) { + p.common.ip_version = 1; + } else if (strcmp(tokens[4], "ipv6") == 0) { + p.common.ip_version = 0; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, "ipv4 or ipv6"); + return; + } + + if (strcmp(tokens[5], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.common.ip_offset, + tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "ip_offset"); + return; + } + + if (strcmp(tokens[7], "fwd") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "fwd"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_FWD; + + t0 = 8; + if (t0 < n_tokens && + (strcmp(tokens[t0], "balance") == 0)) { + if (n_tokens < t0 + 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "table action profile balance"); + return; + } + + if (strcmp(tokens[t0 + 1], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.lb.key_offset, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 3], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + p.lb.key_size = RTE_PORT_IN_ACTION_LB_KEY_SIZE_MAX; + if (softnic_parse_hex_string(tokens[t0 + 4], + p.lb.key_mask, &p.lb.key_size) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + + if (strcmp(tokens[t0 + 5], "outoffset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "outoffset"); + return; + } + + if (softnic_parser_read_uint32(&p.lb.out_offset, + tokens[t0 + 6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "out_offset"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_LB; + t0 += 7; + } /* balance */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "meter") == 0)) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile meter"); + return; + } + + if (strcmp(tokens[t0 + 1], "srtcm") == 0) { + p.mtr.alg = RTE_TABLE_ACTION_METER_SRTCM; + } else if (strcmp(tokens[t0 + 1], "trtcm") == 0) { + p.mtr.alg = RTE_TABLE_ACTION_METER_TRTCM; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "srtcm or trtcm"); + return; + } + + if (strcmp(tokens[t0 + 2], "tc") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "tc"); + return; + } + + if (softnic_parser_read_uint32(&p.mtr.n_tc, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_tc"); + return; + } + + if (strcmp(tokens[t0 + 4], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[t0 + 5], "none") == 0) { + p.mtr.n_packets_enabled = 0; + p.mtr.n_bytes_enabled = 0; + } else if (strcmp(tokens[t0 + 5], "pkts") == 0) { + p.mtr.n_packets_enabled = 1; + p.mtr.n_bytes_enabled = 0; + } else if (strcmp(tokens[t0 + 5], "bytes") == 0) { + p.mtr.n_packets_enabled = 0; + p.mtr.n_bytes_enabled = 1; + } else if (strcmp(tokens[t0 + 5], "both") == 0) { + p.mtr.n_packets_enabled = 1; + p.mtr.n_bytes_enabled = 1; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "none or pkts or bytes or both"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_MTR; + t0 += 6; + } /* meter */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "tm") == 0)) { + if (n_tokens < t0 + 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile tm"); + return; + } + + if (strcmp(tokens[t0 + 1], "spp") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "spp"); + return; + } + + if (softnic_parser_read_uint32(&p.tm.n_subports_per_port, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "n_subports_per_port"); + return; + } + + if (strcmp(tokens[t0 + 3], "pps") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pps"); + return; + } + + if (softnic_parser_read_uint32(&p.tm.n_pipes_per_subport, + tokens[t0 + 4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "n_pipes_per_subport"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_TM; + t0 += 5; + } /* tm */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "encap") == 0)) { + uint32_t n_extra_tokens = 0; + + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "action profile encap"); + return; + } + + if (strcmp(tokens[t0 + 1], "ether") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_ETHER; + } else if (strcmp(tokens[t0 + 1], "vlan") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VLAN; + } else if (strcmp(tokens[t0 + 1], "qinq") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ; + } else if (strcmp(tokens[t0 + 1], "mpls") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS; + } else if (strcmp(tokens[t0 + 1], "pppoe") == 0) { + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE; + } else if (strcmp(tokens[t0 + 1], "vxlan") == 0) { + if (n_tokens < t0 + 2 + 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "action profile encap vxlan"); + return; + } + + if (strcmp(tokens[t0 + 2], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "vxlan: offset"); + return; + } + + if (softnic_parser_read_uint32(&p.encap.vxlan.data_offset, + tokens[t0 + 2 + 1]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "vxlan: ether_offset"); + return; + } + + if (strcmp(tokens[t0 + 2 + 2], "ipv4") == 0) + p.encap.vxlan.ip_version = 1; + else if (strcmp(tokens[t0 + 2 + 2], "ipv6") == 0) + p.encap.vxlan.ip_version = 0; + else { + snprintf(out, out_size, MSG_ARG_INVALID, + "vxlan: ipv4 or ipv6"); + return; + } + + if (strcmp(tokens[t0 + 2 + 3], "vlan") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "vxlan: vlan"); + return; + } + + if (strcmp(tokens[t0 + 2 + 4], "on") == 0) + p.encap.vxlan.vlan = 1; + else if (strcmp(tokens[t0 + 2 + 4], "off") == 0) + p.encap.vxlan.vlan = 0; + else { + snprintf(out, out_size, MSG_ARG_INVALID, + "vxlan: on or off"); + return; + } + + p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN; + n_extra_tokens = 5; + + } else if (strcmp(tokens[t0 + 1], "qinq_pppoe") == 0) { + p.encap.encap_mask = + 1LLU << RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE; + } else { + snprintf(out, out_size, MSG_ARG_MISMATCH, "encap"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_ENCAP; + t0 += 2 + n_extra_tokens; + } /* encap */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "nat") == 0)) { + if (n_tokens < t0 + 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile nat"); + return; + } + + if (strcmp(tokens[t0 + 1], "src") == 0) { + p.nat.source_nat = 1; + } else if (strcmp(tokens[t0 + 1], "dst") == 0) { + p.nat.source_nat = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "src or dst"); + return; + } + + if (strcmp(tokens[t0 + 2], "proto") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "proto"); + return; + } + + if (strcmp(tokens[t0 + 3], "tcp") == 0) { + p.nat.proto = 0x06; + } else if (strcmp(tokens[t0 + 3], "udp") == 0) { + p.nat.proto = 0x11; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "tcp or udp"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_NAT; + t0 += 4; + } /* nat */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "ttl") == 0)) { + if (n_tokens < t0 + 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile ttl"); + return; + } + + if (strcmp(tokens[t0 + 1], "drop") == 0) { + p.ttl.drop = 1; + } else if (strcmp(tokens[t0 + 1], "fwd") == 0) { + p.ttl.drop = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "drop or fwd"); + return; + } + + if (strcmp(tokens[t0 + 2], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[t0 + 3], "none") == 0) { + p.ttl.n_packets_enabled = 0; + } else if (strcmp(tokens[t0 + 3], "pkts") == 0) { + p.ttl.n_packets_enabled = 1; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "none or pkts"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_TTL; + t0 += 4; + } /* ttl */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "stats") == 0)) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile stats"); + return; + } + + if (strcmp(tokens[t0 + 1], "pkts") == 0) { + p.stats.n_packets_enabled = 1; + p.stats.n_bytes_enabled = 0; + } else if (strcmp(tokens[t0 + 1], "bytes") == 0) { + p.stats.n_packets_enabled = 0; + p.stats.n_bytes_enabled = 1; + } else if (strcmp(tokens[t0 + 1], "both") == 0) { + p.stats.n_packets_enabled = 1; + p.stats.n_bytes_enabled = 1; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "pkts or bytes or both"); + return; + } + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_STATS; + t0 += 2; + } /* stats */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "time") == 0)) { + p.action_mask |= 1LLU << RTE_TABLE_ACTION_TIME; + t0 += 1; + } /* time */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "tag") == 0)) { + p.action_mask |= 1LLU << RTE_TABLE_ACTION_TAG; + t0 += 1; + } /* tag */ + + if (t0 < n_tokens && + (strcmp(tokens[t0], "decap") == 0)) { + p.action_mask |= 1LLU << RTE_TABLE_ACTION_DECAP; + t0 += 1; + } /* decap */ + + if (t0 < n_tokens && (strcmp(tokens[t0], "sym_crypto") == 0)) { + struct softnic_cryptodev *cryptodev; + + if (n_tokens < t0 + 5 || + strcmp(tokens[t0 + 1], "dev") || + strcmp(tokens[t0 + 3], "offset")) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "table action profile sym_crypto"); + return; + } + + cryptodev = softnic_cryptodev_find(softnic, tokens[t0 + 2]); + if (cryptodev == NULL) { + snprintf(out, out_size, MSG_ARG_INVALID, + "table action profile sym_crypto"); + return; + } + + p.sym_crypto.cryptodev_id = cryptodev->dev_id; + + if (softnic_parser_read_uint32(&p.sym_crypto.op_offset, + tokens[t0 + 4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "table action profile sym_crypto"); + return; + } + + p.sym_crypto.mp_create = cryptodev->mp_create; + p.sym_crypto.mp_init = cryptodev->mp_init; + + p.action_mask |= 1LLU << RTE_TABLE_ACTION_SYM_CRYPTO; + + t0 += 5; + } /* sym_crypto */ + + if (t0 < n_tokens) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + ap = softnic_table_action_profile_create(softnic, name, &p); + if (ap == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> + * period <timer_period_ms> + * offset_port_id <offset_port_id> + */ +static void +cmd_pipeline(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct pipeline_params p; + char *name; + struct pipeline *pipeline; + + if (n_tokens != 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + name = tokens[1]; + + if (strcmp(tokens[2], "period") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "period"); + return; + } + + if (softnic_parser_read_uint32(&p.timer_period_ms, + tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "timer_period_ms"); + return; + } + + if (strcmp(tokens[4], "offset_port_id") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset_port_id"); + return; + } + + if (softnic_parser_read_uint32(&p.offset_port_id, + tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "offset_port_id"); + return; + } + + pipeline = softnic_pipeline_create(softnic, name, &p); + if (pipeline == NULL) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> port in + * bsz <burst_size> + * link <link_name> rxq <queue_id> + * | swq <swq_name> + * | tmgr <tmgr_name> + * | tap <tap_name> mempool <mempool_name> mtu <mtu> + * | source mempool <mempool_name> file <file_name> bpp <n_bytes_per_pkt> + * | cryptodev <cryptodev_name> rxq <queue_id> + * [action <port_in_action_profile_name>] + * [disabled] + */ +static void +cmd_pipeline_port_in(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_port_in_params p; + char *pipeline_name; + uint32_t t0; + int enabled, status; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (strcmp(tokens[4], "bsz") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz"); + return; + } + + if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "burst_size"); + return; + } + + t0 = 6; + + if (strcmp(tokens[t0], "link") == 0) { + if (n_tokens < t0 + 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in link"); + return; + } + + p.type = PORT_IN_RXQ; + + strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); + + if (strcmp(tokens[t0 + 2], "rxq") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rxq"); + return; + } + + if (softnic_parser_read_uint16(&p.rxq.queue_id, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "queue_id"); + return; + } + t0 += 4; + } else if (strcmp(tokens[t0], "swq") == 0) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in swq"); + return; + } + + p.type = PORT_IN_SWQ; + + strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); + + t0 += 2; + } else if (strcmp(tokens[t0], "tmgr") == 0) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in tmgr"); + return; + } + + p.type = PORT_IN_TMGR; + + strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); + + t0 += 2; + } else if (strcmp(tokens[t0], "tap") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in tap"); + return; + } + + p.type = PORT_IN_TAP; + + strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); + + if (strcmp(tokens[t0 + 2], "mempool") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "mempool"); + return; + } + + p.tap.mempool_name = tokens[t0 + 3]; + + if (strcmp(tokens[t0 + 4], "mtu") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "mtu"); + return; + } + + if (softnic_parser_read_uint32(&p.tap.mtu, + tokens[t0 + 5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "mtu"); + return; + } + + t0 += 6; + } else if (strcmp(tokens[t0], "source") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in source"); + return; + } + + p.type = PORT_IN_SOURCE; + + if (strcmp(tokens[t0 + 1], "mempool") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "mempool"); + return; + } + + p.source.mempool_name = tokens[t0 + 2]; + + if (strcmp(tokens[t0 + 3], "file") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "file"); + return; + } + + p.source.file_name = tokens[t0 + 4]; + + if (strcmp(tokens[t0 + 5], "bpp") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "bpp"); + return; + } + + if (softnic_parser_read_uint32(&p.source.n_bytes_per_pkt, + tokens[t0 + 6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "n_bytes_per_pkt"); + return; + } + + t0 += 7; + } else if (strcmp(tokens[t0], "cryptodev") == 0) { + if (n_tokens < t0 + 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port in cryptodev"); + return; + } + + p.type = PORT_IN_CRYPTODEV; + + strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name)); + if (softnic_parser_read_uint16(&p.rxq.queue_id, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "rxq"); + return; + } + + p.cryptodev.arg_callback = NULL; + p.cryptodev.f_callback = NULL; + + t0 += 4; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]); + return; + } + + if (n_tokens > t0 && + (strcmp(tokens[t0], "action") == 0)) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "action"); + return; + } + + strlcpy(p.action_profile_name, tokens[t0 + 1], + sizeof(p.action_profile_name)); + + t0 += 2; + } + + enabled = 1; + if (n_tokens > t0 && + (strcmp(tokens[t0], "disabled") == 0)) { + enabled = 0; + + t0 += 1; + } + + if (n_tokens != t0) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + status = softnic_pipeline_port_in_create(softnic, + pipeline_name, + &p, + enabled); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> port out + * bsz <burst_size> + * link <link_name> txq <txq_id> + * | swq <swq_name> + * | tmgr <tmgr_name> + * | tap <tap_name> + * | sink [file <file_name> pkts <max_n_pkts>] + * | cryptodev <cryptodev_name> txq <txq_id> offset <crypto_op_offset> + */ +static void +cmd_pipeline_port_out(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_port_out_params p; + char *pipeline_name; + int status; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "out") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out"); + return; + } + + if (strcmp(tokens[4], "bsz") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "bsz"); + return; + } + + if (softnic_parser_read_uint32(&p.burst_size, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "burst_size"); + return; + } + + if (strcmp(tokens[6], "link") == 0) { + if (n_tokens != 10) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out link"); + return; + } + + p.type = PORT_OUT_TXQ; + + strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); + + if (strcmp(tokens[8], "txq") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "txq"); + return; + } + + if (softnic_parser_read_uint16(&p.txq.queue_id, + tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "queue_id"); + return; + } + } else if (strcmp(tokens[6], "swq") == 0) { + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out swq"); + return; + } + + p.type = PORT_OUT_SWQ; + + strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); + } else if (strcmp(tokens[6], "tmgr") == 0) { + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out tmgr"); + return; + } + + p.type = PORT_OUT_TMGR; + + strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); + } else if (strcmp(tokens[6], "tap") == 0) { + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out tap"); + return; + } + + p.type = PORT_OUT_TAP; + + strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); + } else if (strcmp(tokens[6], "sink") == 0) { + if ((n_tokens != 7) && (n_tokens != 11)) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out sink"); + return; + } + + p.type = PORT_OUT_SINK; + + if (n_tokens == 7) { + p.sink.file_name = NULL; + p.sink.max_n_pkts = 0; + } else { + if (strcmp(tokens[7], "file") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "file"); + return; + } + + p.sink.file_name = tokens[8]; + + if (strcmp(tokens[9], "pkts") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pkts"); + return; + } + + if (softnic_parser_read_uint32(&p.sink.max_n_pkts, + tokens[10]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "max_n_pkts"); + return; + } + } + } else if (strcmp(tokens[6], "cryptodev") == 0) { + if (n_tokens != 12) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out cryptodev"); + return; + } + + p.type = PORT_OUT_CRYPTODEV; + + strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name)); + + if (strcmp(tokens[8], "txq")) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out cryptodev"); + return; + } + + if (softnic_parser_read_uint16(&p.cryptodev.queue_id, tokens[9]) + != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "queue_id"); + return; + } + + if (strcmp(tokens[10], "offset")) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline port out cryptodev"); + return; + } + + if (softnic_parser_read_uint32(&p.cryptodev.op_offset, + tokens[11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "queue_id"); + return; + } + } else { + snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]); + return; + } + + status = softnic_pipeline_port_out_create(softnic, pipeline_name, &p); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> table + * match + * acl + * ipv4 | ipv6 + * offset <ip_header_offset> + * size <n_rules> + * | array + * offset <key_offset> + * size <n_keys> + * | hash + * ext | lru + * key <key_size> + * mask <key_mask> + * offset <key_offset> + * buckets <n_buckets> + * size <n_keys> + * | lpm + * ipv4 | ipv6 + * offset <ip_header_offset> + * size <n_rules> + * | stub + * [action <table_action_profile_name>] + */ +static void +cmd_pipeline_table(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_table_params p; + char *pipeline_name; + uint32_t t0; + int status; + + memset(&p, 0, sizeof(p)); + + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (strcmp(tokens[3], "match") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match"); + return; + } + + t0 = 4; + if (strcmp(tokens[t0], "acl") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table acl"); + return; + } + + p.match_type = TABLE_ACL; + + if (strcmp(tokens[t0 + 1], "ipv4") == 0) { + p.match.acl.ip_version = 1; + } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) { + p.match.acl.ip_version = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ipv4 or ipv6"); + return; + } + + if (strcmp(tokens[t0 + 2], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.acl.ip_header_offset, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "ip_header_offset"); + return; + } + + if (strcmp(tokens[t0 + 4], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.acl.n_rules, + tokens[t0 + 5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_rules"); + return; + } + + t0 += 6; + } else if (strcmp(tokens[t0], "array") == 0) { + if (n_tokens < t0 + 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table array"); + return; + } + + p.match_type = TABLE_ARRAY; + + if (strcmp(tokens[t0 + 1], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.array.key_offset, + tokens[t0 + 2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 3], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.array.n_keys, + tokens[t0 + 4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_keys"); + return; + } + + t0 += 5; + } else if (strcmp(tokens[t0], "hash") == 0) { + uint32_t key_mask_size = TABLE_RULE_MATCH_SIZE_MAX; + + if (n_tokens < t0 + 12) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table hash"); + return; + } + + p.match_type = TABLE_HASH; + + if (strcmp(tokens[t0 + 1], "ext") == 0) { + p.match.hash.extendable_bucket = 1; + } else if (strcmp(tokens[t0 + 1], "lru") == 0) { + p.match.hash.extendable_bucket = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ext or lru"); + return; + } + + if (strcmp(tokens[t0 + 2], "key") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "key"); + return; + } + + if ((softnic_parser_read_uint32(&p.match.hash.key_size, + tokens[t0 + 3]) != 0) || + p.match.hash.key_size == 0 || + p.match.hash.key_size > TABLE_RULE_MATCH_SIZE_MAX) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_size"); + return; + } + + if (strcmp(tokens[t0 + 4], "mask") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "mask"); + return; + } + + if ((softnic_parse_hex_string(tokens[t0 + 5], + p.match.hash.key_mask, &key_mask_size) != 0) || + key_mask_size != p.match.hash.key_size) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_mask"); + return; + } + + if (strcmp(tokens[t0 + 6], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.hash.key_offset, + tokens[t0 + 7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 8], "buckets") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "buckets"); + return; + } + + if (softnic_parser_read_uint32(&p.match.hash.n_buckets, + tokens[t0 + 9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_buckets"); + return; + } + + if (strcmp(tokens[t0 + 10], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.hash.n_keys, + tokens[t0 + 11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_keys"); + return; + } + + t0 += 12; + } else if (strcmp(tokens[t0], "lpm") == 0) { + if (n_tokens < t0 + 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "pipeline table lpm"); + return; + } + + p.match_type = TABLE_LPM; + + if (strcmp(tokens[t0 + 1], "ipv4") == 0) { + p.match.lpm.key_size = 4; + } else if (strcmp(tokens[t0 + 1], "ipv6") == 0) { + p.match.lpm.key_size = 16; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ipv4 or ipv6"); + return; + } + + if (strcmp(tokens[t0 + 2], "offset") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset"); + return; + } + + if (softnic_parser_read_uint32(&p.match.lpm.key_offset, + tokens[t0 + 3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key_offset"); + return; + } + + if (strcmp(tokens[t0 + 4], "size") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "size"); + return; + } + + if (softnic_parser_read_uint32(&p.match.lpm.n_rules, + tokens[t0 + 5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_rules"); + return; + } + + t0 += 6; + } else if (strcmp(tokens[t0], "stub") == 0) { + p.match_type = TABLE_STUB; + + t0 += 1; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]); + return; + } + + if (n_tokens > t0 && + (strcmp(tokens[t0], "action") == 0)) { + if (n_tokens < t0 + 2) { + snprintf(out, out_size, MSG_ARG_MISMATCH, "action"); + return; + } + + strlcpy(p.action_profile_name, tokens[t0 + 1], + sizeof(p.action_profile_name)); + + t0 += 2; + } + + if (n_tokens > t0) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + status = softnic_pipeline_table_create(softnic, pipeline_name, &p); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> port in <port_id> table <table_id> + */ +static void +cmd_pipeline_port_in_table(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t port_id, table_id; + int status; + + if (n_tokens != 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + status = softnic_pipeline_port_in_connect_to_table(softnic, + pipeline_name, + port_id, + table_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> port in <port_id> stats read [clear] + */ + +#define MSG_PIPELINE_PORT_IN_STATS \ + "Pkts in: %" PRIu64 "\n" \ + "Pkts dropped by AH: %" PRIu64 "\n" \ + "Pkts dropped by other: %" PRIu64 "\n" + +static void +cmd_pipeline_port_in_stats(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_pipeline_port_in_stats stats; + char *pipeline_name; + uint32_t port_id; + int clear, status; + + if (n_tokens != 7 && + n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[6], "read") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read"); + return; + } + + clear = 0; + if (n_tokens == 8) { + if (strcmp(tokens[7], "clear") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "clear"); + return; + } + + clear = 1; + } + + status = softnic_pipeline_port_in_stats_read(softnic, + pipeline_name, + port_id, + &stats, + clear); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } + + snprintf(out, out_size, MSG_PIPELINE_PORT_IN_STATS, + stats.stats.n_pkts_in, + stats.n_pkts_dropped_by_ah, + stats.stats.n_pkts_drop); +} + +/** + * pipeline <pipeline_name> port in <port_id> enable + */ +static void +cmd_softnic_pipeline_port_in_enable(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t port_id; + int status; + + if (n_tokens != 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "enable") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable"); + return; + } + + status = softnic_pipeline_port_in_enable(softnic, pipeline_name, port_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> port in <port_id> disable + */ +static void +cmd_softnic_pipeline_port_in_disable(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t port_id; + int status; + + if (n_tokens != 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "in") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "in"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "disable") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable"); + return; + } + + status = softnic_pipeline_port_in_disable(softnic, pipeline_name, port_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> port out <port_id> stats read [clear] + */ +#define MSG_PIPELINE_PORT_OUT_STATS \ + "Pkts in: %" PRIu64 "\n" \ + "Pkts dropped by AH: %" PRIu64 "\n" \ + "Pkts dropped by other: %" PRIu64 "\n" + +static void +cmd_pipeline_port_out_stats(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_pipeline_port_out_stats stats; + char *pipeline_name; + uint32_t port_id; + int clear, status; + + if (n_tokens != 7 && + n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "port") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (strcmp(tokens[3], "out") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "out"); + return; + } + + if (softnic_parser_read_uint32(&port_id, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + if (strcmp(tokens[5], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[6], "read") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read"); + return; + } + + clear = 0; + if (n_tokens == 8) { + if (strcmp(tokens[7], "clear") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "clear"); + return; + } + + clear = 1; + } + + status = softnic_pipeline_port_out_stats_read(softnic, + pipeline_name, + port_id, + &stats, + clear); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } + + snprintf(out, out_size, MSG_PIPELINE_PORT_OUT_STATS, + stats.stats.n_pkts_in, + stats.n_pkts_dropped_by_ah, + stats.stats.n_pkts_drop); +} + +/** + * pipeline <pipeline_name> table <table_id> stats read [clear] + */ +#define MSG_PIPELINE_TABLE_STATS \ + "Pkts in: %" PRIu64 "\n" \ + "Pkts in with lookup miss: %" PRIu64 "\n" \ + "Pkts in with lookup hit dropped by AH: %" PRIu64 "\n" \ + "Pkts in with lookup hit dropped by others: %" PRIu64 "\n" \ + "Pkts in with lookup miss dropped by AH: %" PRIu64 "\n" \ + "Pkts in with lookup miss dropped by others: %" PRIu64 "\n" + +static void +cmd_pipeline_table_stats(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_pipeline_table_stats stats; + char *pipeline_name; + uint32_t table_id; + int clear, status; + + if (n_tokens != 6 && + n_tokens != 7) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + if (strcmp(tokens[4], "stats") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "stats"); + return; + } + + if (strcmp(tokens[5], "read") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "read"); + return; + } + + clear = 0; + if (n_tokens == 7) { + if (strcmp(tokens[6], "clear") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "clear"); + return; + } + + clear = 1; + } + + status = softnic_pipeline_table_stats_read(softnic, + pipeline_name, + table_id, + &stats, + clear); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } + + snprintf(out, out_size, MSG_PIPELINE_TABLE_STATS, + stats.stats.n_pkts_in, + stats.stats.n_pkts_lookup_miss, + stats.n_pkts_dropped_by_lkp_hit_ah, + stats.n_pkts_dropped_lkp_hit, + stats.n_pkts_dropped_by_lkp_miss_ah, + stats.n_pkts_dropped_lkp_miss); +} + +/** + * <match> ::= + * + * match + * acl + * priority <priority> + * ipv4 | ipv6 <sa> <sa_depth> <da> <da_depth> + * <sp0> <sp1> <dp0> <dp1> <proto> + * | array <pos> + * | hash + * raw <key> + * | ipv4_5tuple <sa> <da> <sp> <dp> <proto> + * | ipv6_5tuple <sa> <da> <sp> <dp> <proto> + * | ipv4_addr <addr> + * | ipv6_addr <addr> + * | qinq <svlan> <cvlan> + * | lpm + * ipv4 | ipv6 <addr> <depth> + */ +struct pkt_key_qinq { + uint16_t ethertype_svlan; + uint16_t svlan; + uint16_t ethertype_cvlan; + uint16_t cvlan; +} __rte_packed; + +struct pkt_key_ipv4_5tuple { + uint8_t time_to_live; + uint8_t proto; + uint16_t hdr_checksum; + uint32_t sa; + uint32_t da; + uint16_t sp; + uint16_t dp; +} __rte_packed; + +struct pkt_key_ipv6_5tuple { + uint16_t payload_length; + uint8_t proto; + uint8_t hop_limit; + uint8_t sa[16]; + uint8_t da[16]; + uint16_t sp; + uint16_t dp; +} __rte_packed; + +struct pkt_key_ipv4_addr { + uint32_t addr; +} __rte_packed; + +struct pkt_key_ipv6_addr { + uint8_t addr[16]; +} __rte_packed; + +static uint32_t +parse_match(char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size, + struct softnic_table_rule_match *m) +{ + memset(m, 0, sizeof(*m)); + + if (n_tokens < 2) + return 0; + + if (strcmp(tokens[0], "match") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "match"); + return 0; + } + + if (strcmp(tokens[1], "acl") == 0) { + if (n_tokens < 14) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_ACL; + + if (strcmp(tokens[2], "priority") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "priority"); + return 0; + } + + if (softnic_parser_read_uint32(&m->match.acl.priority, + tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "priority"); + return 0; + } + + if (strcmp(tokens[4], "ipv4") == 0) { + struct in_addr saddr, daddr; + + m->match.acl.ip_version = 1; + + if (softnic_parse_ipv4_addr(tokens[5], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + m->match.acl.ipv4.sa = rte_be_to_cpu_32(saddr.s_addr); + + if (softnic_parse_ipv4_addr(tokens[7], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + m->match.acl.ipv4.da = rte_be_to_cpu_32(daddr.s_addr); + } else if (strcmp(tokens[4], "ipv6") == 0) { + struct in6_addr saddr, daddr; + + m->match.acl.ip_version = 0; + + if (softnic_parse_ipv6_addr(tokens[5], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + memcpy(m->match.acl.ipv6.sa, saddr.s6_addr, 16); + + if (softnic_parse_ipv6_addr(tokens[7], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + memcpy(m->match.acl.ipv6.da, daddr.s6_addr, 16); + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, + "ipv4 or ipv6"); + return 0; + } + + if (softnic_parser_read_uint32(&m->match.acl.sa_depth, + tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa_depth"); + return 0; + } + + if (softnic_parser_read_uint32(&m->match.acl.da_depth, + tokens[8]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da_depth"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.sp0, tokens[9]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp0"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.sp1, tokens[10]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp1"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.dp0, tokens[11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp0"); + return 0; + } + + if (softnic_parser_read_uint16(&m->match.acl.dp1, tokens[12]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp1"); + return 0; + } + + if (softnic_parser_read_uint8(&m->match.acl.proto, tokens[13]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "proto"); + return 0; + } + + m->match.acl.proto_mask = 0xff; + + return 14; + } /* acl */ + + if (strcmp(tokens[1], "array") == 0) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_ARRAY; + + if (softnic_parser_read_uint32(&m->match.array.pos, tokens[2]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "pos"); + return 0; + } + + return 3; + } /* array */ + + if (strcmp(tokens[1], "hash") == 0) { + if (n_tokens < 3) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_HASH; + + if (strcmp(tokens[2], "raw") == 0) { + uint32_t key_size = TABLE_RULE_MATCH_SIZE_MAX; + + if (n_tokens < 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_hex_string(tokens[3], + m->match.hash.key, &key_size) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "key"); + return 0; + } + + return 4; + } /* hash raw */ + + if (strcmp(tokens[2], "ipv4_5tuple") == 0) { + struct pkt_key_ipv4_5tuple *ipv4 = + (struct pkt_key_ipv4_5tuple *)m->match.hash.key; + struct in_addr saddr, daddr; + uint16_t sp, dp; + uint8_t proto; + + if (n_tokens < 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv4_addr(tokens[3], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + + if (softnic_parse_ipv4_addr(tokens[4], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + + if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp"); + return 0; + } + + if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp"); + return 0; + } + + if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "proto"); + return 0; + } + + ipv4->sa = saddr.s_addr; + ipv4->da = daddr.s_addr; + ipv4->sp = rte_cpu_to_be_16(sp); + ipv4->dp = rte_cpu_to_be_16(dp); + ipv4->proto = proto; + + return 8; + } /* hash ipv4_5tuple */ + + if (strcmp(tokens[2], "ipv6_5tuple") == 0) { + struct pkt_key_ipv6_5tuple *ipv6 = + (struct pkt_key_ipv6_5tuple *)m->match.hash.key; + struct in6_addr saddr, daddr; + uint16_t sp, dp; + uint8_t proto; + + if (n_tokens < 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv6_addr(tokens[3], &saddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sa"); + return 0; + } + + if (softnic_parse_ipv6_addr(tokens[4], &daddr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "da"); + return 0; + } + + if (softnic_parser_read_uint16(&sp, tokens[5]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "sp"); + return 0; + } + + if (softnic_parser_read_uint16(&dp, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "dp"); + return 0; + } + + if (softnic_parser_read_uint8(&proto, tokens[7]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "proto"); + return 0; + } + + memcpy(ipv6->sa, saddr.s6_addr, 16); + memcpy(ipv6->da, daddr.s6_addr, 16); + ipv6->sp = rte_cpu_to_be_16(sp); + ipv6->dp = rte_cpu_to_be_16(dp); + ipv6->proto = proto; + + return 8; + } /* hash ipv6_5tuple */ + + if (strcmp(tokens[2], "ipv4_addr") == 0) { + struct pkt_key_ipv4_addr *ipv4_addr = + (struct pkt_key_ipv4_addr *)m->match.hash.key; + struct in_addr addr; + + if (n_tokens < 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + ipv4_addr->addr = addr.s_addr; + + return 4; + } /* hash ipv4_addr */ + + if (strcmp(tokens[2], "ipv6_addr") == 0) { + struct pkt_key_ipv6_addr *ipv6_addr = + (struct pkt_key_ipv6_addr *)m->match.hash.key; + struct in6_addr addr; + + if (n_tokens < 4) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + memcpy(ipv6_addr->addr, addr.s6_addr, 16); + + return 4; + } /* hash ipv6_5tuple */ + + if (strcmp(tokens[2], "qinq") == 0) { + struct pkt_key_qinq *qinq = + (struct pkt_key_qinq *)m->match.hash.key; + uint16_t svlan, cvlan; + + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return 0; + } + + if ((softnic_parser_read_uint16(&svlan, tokens[3]) != 0) || + svlan > 0xFFF) { + snprintf(out, out_size, MSG_ARG_INVALID, + "svlan"); + return 0; + } + + if ((softnic_parser_read_uint16(&cvlan, tokens[4]) != 0) || + cvlan > 0xFFF) { + snprintf(out, out_size, MSG_ARG_INVALID, + "cvlan"); + return 0; + } + + qinq->svlan = rte_cpu_to_be_16(svlan); + qinq->cvlan = rte_cpu_to_be_16(cvlan); + + return 5; + } /* hash qinq */ + + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } /* hash */ + + if (strcmp(tokens[1], "lpm") == 0) { + if (n_tokens < 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return 0; + } + + m->match_type = TABLE_LPM; + + if (strcmp(tokens[2], "ipv4") == 0) { + struct in_addr addr; + + m->match.lpm.ip_version = 1; + + if (softnic_parse_ipv4_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + m->match.lpm.ipv4 = rte_be_to_cpu_32(addr.s_addr); + } else if (strcmp(tokens[2], "ipv6") == 0) { + struct in6_addr addr; + + m->match.lpm.ip_version = 0; + + if (softnic_parse_ipv6_addr(tokens[3], &addr) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "addr"); + return 0; + } + + memcpy(m->match.lpm.ipv6, addr.s6_addr, 16); + } else { + snprintf(out, out_size, MSG_ARG_MISMATCH, + "ipv4 or ipv6"); + return 0; + } + + if (softnic_parser_read_uint8(&m->match.lpm.depth, tokens[4]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "depth"); + return 0; + } + + return 5; + } /* lpm */ + + snprintf(out, out_size, MSG_ARG_MISMATCH, + "acl or array or hash or lpm"); + return 0; +} + +/** + * table_action ::= + * + * action + * fwd + * drop + * | port <port_id> + * | meta + * | table <table_id> + * [balance <out0> ... <out7>] + * [meter + * tc0 meter <meter_profile_id> policer g <pa> y <pa> r <pa> + * [tc1 meter <meter_profile_id> policer g <pa> y <pa> r <pa> + * tc2 meter <meter_profile_id> policer g <pa> y <pa> r <pa> + * tc3 meter <meter_profile_id> policer g <pa> y <pa> r <pa>]] + * [tm subport <subport_id> pipe <pipe_id>] + * [encap + * ether <da> <sa> + * | vlan <da> <sa> <pcp> <dei> <vid> + * | qinq <da> <sa> <pcp> <dei> <vid> <pcp> <dei> <vid> + * | qinq_pppoe <da> <sa> <pcp> <dei> <vid> <pcp> <dei> <vid> <session_id> + * | mpls unicast | multicast + * <da> <sa> + * label0 <label> <tc> <ttl> + * [label1 <label> <tc> <ttl> + * [label2 <label> <tc> <ttl> + * [label3 <label> <tc> <ttl>]]] + * | pppoe <da> <sa> <session_id>] + * | vxlan ether <da> <sa> + * [vlan <pcp> <dei> <vid>] + * ipv4 <sa> <da> <dscp> <ttl> + * | ipv6 <sa> <da> <flow_label> <dscp> <hop_limit> + * udp <sp> <dp> + * vxlan <vni>] + * [nat ipv4 | ipv6 <addr> <port>] + * [ttl dec | keep] + * [stats] + * [time] + * [tag <tag>] + * [decap <n>] + * [sym_crypto + * encrypt | decrypt + * type + * | cipher + * cipher_algo <algo> cipher_key <key> cipher_iv <iv> + * | cipher_auth + * cipher_algo <algo> cipher_key <key> cipher_iv <iv> + * auth_algo <algo> auth_key <key> digest_size <size> + * | aead + * aead_algo <algo> aead_key <key> aead_iv <iv> aead_aad <aad> + * digest_size <size> + * data_offset <data_offset>] + * + * where: + * <pa> ::= g | y | r | drop + */ +static uint32_t +parse_table_action_fwd(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + if (n_tokens == 0 || + (strcmp(tokens[0], "fwd") != 0)) + return 0; + + tokens++; + n_tokens--; + + if (n_tokens && (strcmp(tokens[0], "drop") == 0)) { + a->fwd.action = RTE_PIPELINE_ACTION_DROP; + a->action_mask |= 1 << RTE_TABLE_ACTION_FWD; + return 1 + 1; + } + + if (n_tokens && (strcmp(tokens[0], "port") == 0)) { + uint32_t id; + + if (n_tokens < 2 || + softnic_parser_read_uint32(&id, tokens[1])) + return 0; + + a->fwd.action = RTE_PIPELINE_ACTION_PORT; + a->fwd.id = id; + a->action_mask |= 1 << RTE_TABLE_ACTION_FWD; + return 1 + 2; + } + + if (n_tokens && (strcmp(tokens[0], "meta") == 0)) { + a->fwd.action = RTE_PIPELINE_ACTION_PORT_META; + a->action_mask |= 1 << RTE_TABLE_ACTION_FWD; + return 1 + 1; + } + + if (n_tokens && (strcmp(tokens[0], "table") == 0)) { + uint32_t id; + + if (n_tokens < 2 || + softnic_parser_read_uint32(&id, tokens[1])) + return 0; + + a->fwd.action = RTE_PIPELINE_ACTION_TABLE; + a->fwd.id = id; + a->action_mask |= 1 << RTE_TABLE_ACTION_FWD; + return 1 + 2; + } + + return 0; +} + +static uint32_t +parse_table_action_balance(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + uint32_t i; + + if (n_tokens == 0 || + (strcmp(tokens[0], "balance") != 0)) + return 0; + + tokens++; + n_tokens--; + + if (n_tokens < RTE_TABLE_ACTION_LB_TABLE_SIZE) + return 0; + + for (i = 0; i < RTE_TABLE_ACTION_LB_TABLE_SIZE; i++) + if (softnic_parser_read_uint32(&a->lb.out[i], tokens[i]) != 0) + return 0; + + a->action_mask |= 1 << RTE_TABLE_ACTION_LB; + return 1 + RTE_TABLE_ACTION_LB_TABLE_SIZE; +} + +static int +parse_policer_action(char *token, enum rte_table_action_policer *a) +{ + if (strcmp(token, "g") == 0) { + *a = RTE_TABLE_ACTION_POLICER_COLOR_GREEN; + return 0; + } + + if (strcmp(token, "y") == 0) { + *a = RTE_TABLE_ACTION_POLICER_COLOR_YELLOW; + return 0; + } + + if (strcmp(token, "r") == 0) { + *a = RTE_TABLE_ACTION_POLICER_COLOR_RED; + return 0; + } + + if (strcmp(token, "drop") == 0) { + *a = RTE_TABLE_ACTION_POLICER_DROP; + return 0; + } + + return -1; +} + +static uint32_t +parse_table_action_meter_tc(char **tokens, + uint32_t n_tokens, + struct rte_table_action_mtr_tc_params *mtr) +{ + if (n_tokens < 9 || + strcmp(tokens[0], "meter") || + softnic_parser_read_uint32(&mtr->meter_profile_id, tokens[1]) || + strcmp(tokens[2], "policer") || + strcmp(tokens[3], "g") || + parse_policer_action(tokens[4], &mtr->policer[RTE_COLOR_GREEN]) || + strcmp(tokens[5], "y") || + parse_policer_action(tokens[6], &mtr->policer[RTE_COLOR_YELLOW]) || + strcmp(tokens[7], "r") || + parse_policer_action(tokens[8], &mtr->policer[RTE_COLOR_RED])) + return 0; + + return 9; +} + +static uint32_t +parse_table_action_meter(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + if (n_tokens == 0 || + strcmp(tokens[0], "meter")) + return 0; + + tokens++; + n_tokens--; + + if (n_tokens < 10 || + strcmp(tokens[0], "tc0") || + (parse_table_action_meter_tc(tokens + 1, + n_tokens - 1, + &a->mtr.mtr[0]) == 0)) + return 0; + + tokens += 10; + n_tokens -= 10; + + if (n_tokens == 0 || + strcmp(tokens[0], "tc1")) { + a->mtr.tc_mask = 1; + a->action_mask |= 1 << RTE_TABLE_ACTION_MTR; + return 1 + 10; + } + + if (n_tokens < 30 || + (parse_table_action_meter_tc(tokens + 1, + n_tokens - 1, &a->mtr.mtr[1]) == 0) || + strcmp(tokens[10], "tc2") || + (parse_table_action_meter_tc(tokens + 11, + n_tokens - 11, &a->mtr.mtr[2]) == 0) || + strcmp(tokens[20], "tc3") || + (parse_table_action_meter_tc(tokens + 21, + n_tokens - 21, &a->mtr.mtr[3]) == 0)) + return 0; + + a->mtr.tc_mask = 0xF; + a->action_mask |= 1 << RTE_TABLE_ACTION_MTR; + return 1 + 10 + 3 * 10; +} + +static uint32_t +parse_table_action_tm(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + uint32_t subport_id, pipe_id; + + if (n_tokens < 5 || + strcmp(tokens[0], "tm") || + strcmp(tokens[1], "subport") || + softnic_parser_read_uint32(&subport_id, tokens[2]) || + strcmp(tokens[3], "pipe") || + softnic_parser_read_uint32(&pipe_id, tokens[4])) + return 0; + + a->tm.subport_id = subport_id; + a->tm.pipe_id = pipe_id; + a->action_mask |= 1 << RTE_TABLE_ACTION_TM; + return 5; +} + +static uint32_t +parse_table_action_encap(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + if (n_tokens == 0 || + strcmp(tokens[0], "encap")) + return 0; + + tokens++; + n_tokens--; + + /* ether */ + if (n_tokens && (strcmp(tokens[0], "ether") == 0)) { + if (n_tokens < 3 || + softnic_parse_mac_addr(tokens[1], &a->encap.ether.ether.da) || + softnic_parse_mac_addr(tokens[2], &a->encap.ether.ether.sa)) + return 0; + + a->encap.type = RTE_TABLE_ACTION_ENCAP_ETHER; + a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + return 1 + 3; + } + + /* vlan */ + if (n_tokens && (strcmp(tokens[0], "vlan") == 0)) { + uint32_t pcp, dei, vid; + + if (n_tokens < 6 || + softnic_parse_mac_addr(tokens[1], &a->encap.vlan.ether.da) || + softnic_parse_mac_addr(tokens[2], &a->encap.vlan.ether.sa) || + softnic_parser_read_uint32(&pcp, tokens[3]) || + pcp > 0x7 || + softnic_parser_read_uint32(&dei, tokens[4]) || + dei > 0x1 || + softnic_parser_read_uint32(&vid, tokens[5]) || + vid > 0xFFF) + return 0; + + a->encap.vlan.vlan.pcp = pcp & 0x7; + a->encap.vlan.vlan.dei = dei & 0x1; + a->encap.vlan.vlan.vid = vid & 0xFFF; + a->encap.type = RTE_TABLE_ACTION_ENCAP_VLAN; + a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + return 1 + 6; + } + + /* qinq */ + if (n_tokens && (strcmp(tokens[0], "qinq") == 0)) { + uint32_t svlan_pcp, svlan_dei, svlan_vid; + uint32_t cvlan_pcp, cvlan_dei, cvlan_vid; + + if (n_tokens < 9 || + softnic_parse_mac_addr(tokens[1], &a->encap.qinq.ether.da) || + softnic_parse_mac_addr(tokens[2], &a->encap.qinq.ether.sa) || + softnic_parser_read_uint32(&svlan_pcp, tokens[3]) || + svlan_pcp > 0x7 || + softnic_parser_read_uint32(&svlan_dei, tokens[4]) || + svlan_dei > 0x1 || + softnic_parser_read_uint32(&svlan_vid, tokens[5]) || + svlan_vid > 0xFFF || + softnic_parser_read_uint32(&cvlan_pcp, tokens[6]) || + cvlan_pcp > 0x7 || + softnic_parser_read_uint32(&cvlan_dei, tokens[7]) || + cvlan_dei > 0x1 || + softnic_parser_read_uint32(&cvlan_vid, tokens[8]) || + cvlan_vid > 0xFFF) + return 0; + + a->encap.qinq.svlan.pcp = svlan_pcp & 0x7; + a->encap.qinq.svlan.dei = svlan_dei & 0x1; + a->encap.qinq.svlan.vid = svlan_vid & 0xFFF; + a->encap.qinq.cvlan.pcp = cvlan_pcp & 0x7; + a->encap.qinq.cvlan.dei = cvlan_dei & 0x1; + a->encap.qinq.cvlan.vid = cvlan_vid & 0xFFF; + a->encap.type = RTE_TABLE_ACTION_ENCAP_QINQ; + a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + return 1 + 9; + } + + /* qinq_pppoe */ + if (n_tokens && (strcmp(tokens[0], "qinq_pppoe") == 0)) { + uint32_t svlan_pcp, svlan_dei, svlan_vid; + uint32_t cvlan_pcp, cvlan_dei, cvlan_vid; + + if (n_tokens < 10 || + softnic_parse_mac_addr(tokens[1], + &a->encap.qinq_pppoe.ether.da) || + softnic_parse_mac_addr(tokens[2], + &a->encap.qinq_pppoe.ether.sa) || + softnic_parser_read_uint32(&svlan_pcp, tokens[3]) || + svlan_pcp > 0x7 || + softnic_parser_read_uint32(&svlan_dei, tokens[4]) || + svlan_dei > 0x1 || + softnic_parser_read_uint32(&svlan_vid, tokens[5]) || + svlan_vid > 0xFFF || + softnic_parser_read_uint32(&cvlan_pcp, tokens[6]) || + cvlan_pcp > 0x7 || + softnic_parser_read_uint32(&cvlan_dei, tokens[7]) || + cvlan_dei > 0x1 || + softnic_parser_read_uint32(&cvlan_vid, tokens[8]) || + cvlan_vid > 0xFFF || + softnic_parser_read_uint16(&a->encap.qinq_pppoe.pppoe.session_id, + tokens[9])) + return 0; + + a->encap.qinq_pppoe.svlan.pcp = svlan_pcp & 0x7; + a->encap.qinq_pppoe.svlan.dei = svlan_dei & 0x1; + a->encap.qinq_pppoe.svlan.vid = svlan_vid & 0xFFF; + a->encap.qinq_pppoe.cvlan.pcp = cvlan_pcp & 0x7; + a->encap.qinq_pppoe.cvlan.dei = cvlan_dei & 0x1; + a->encap.qinq_pppoe.cvlan.vid = cvlan_vid & 0xFFF; + a->encap.type = RTE_TABLE_ACTION_ENCAP_QINQ_PPPOE; + a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + return 1 + 10; + } + + /* mpls */ + if (n_tokens && (strcmp(tokens[0], "mpls") == 0)) { + uint32_t label, tc, ttl; + + if (n_tokens < 8) + return 0; + + if (strcmp(tokens[1], "unicast") == 0) + a->encap.mpls.unicast = 1; + else if (strcmp(tokens[1], "multicast") == 0) + a->encap.mpls.unicast = 0; + else + return 0; + + if (softnic_parse_mac_addr(tokens[2], &a->encap.mpls.ether.da) || + softnic_parse_mac_addr(tokens[3], &a->encap.mpls.ether.sa) || + strcmp(tokens[4], "label0") || + softnic_parser_read_uint32(&label, tokens[5]) || + label > 0xFFFFF || + softnic_parser_read_uint32(&tc, tokens[6]) || + tc > 0x7 || + softnic_parser_read_uint32(&ttl, tokens[7]) || + ttl > 0x3F) + return 0; + + a->encap.mpls.mpls[0].label = label; + a->encap.mpls.mpls[0].tc = tc; + a->encap.mpls.mpls[0].ttl = ttl; + + tokens += 8; + n_tokens -= 8; + + if (n_tokens == 0 || + strcmp(tokens[0], "label1")) { + a->encap.mpls.mpls_count = 1; + a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS; + a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + return 1 + 8; + } + + if (n_tokens < 4 || + softnic_parser_read_uint32(&label, tokens[1]) || + label > 0xFFFFF || + softnic_parser_read_uint32(&tc, tokens[2]) || + tc > 0x7 || + softnic_parser_read_uint32(&ttl, tokens[3]) || + ttl > 0x3F) + return 0; + + a->encap.mpls.mpls[1].label = label; + a->encap.mpls.mpls[1].tc = tc; + a->encap.mpls.mpls[1].ttl = ttl; + + tokens += 4; + n_tokens -= 4; + + if (n_tokens == 0 || + strcmp(tokens[0], "label2")) { + a->encap.mpls.mpls_count = 2; + a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS; + a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + return 1 + 8 + 4; + } + + if (n_tokens < 4 || + softnic_parser_read_uint32(&label, tokens[1]) || + label > 0xFFFFF || + softnic_parser_read_uint32(&tc, tokens[2]) || + tc > 0x7 || + softnic_parser_read_uint32(&ttl, tokens[3]) || + ttl > 0x3F) + return 0; + + a->encap.mpls.mpls[2].label = label; + a->encap.mpls.mpls[2].tc = tc; + a->encap.mpls.mpls[2].ttl = ttl; + + tokens += 4; + n_tokens -= 4; + + if (n_tokens == 0 || + strcmp(tokens[0], "label3")) { + a->encap.mpls.mpls_count = 3; + a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS; + a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + return 1 + 8 + 4 + 4; + } + + if (n_tokens < 4 || + softnic_parser_read_uint32(&label, tokens[1]) || + label > 0xFFFFF || + softnic_parser_read_uint32(&tc, tokens[2]) || + tc > 0x7 || + softnic_parser_read_uint32(&ttl, tokens[3]) || + ttl > 0x3F) + return 0; + + a->encap.mpls.mpls[3].label = label; + a->encap.mpls.mpls[3].tc = tc; + a->encap.mpls.mpls[3].ttl = ttl; + + a->encap.mpls.mpls_count = 4; + a->encap.type = RTE_TABLE_ACTION_ENCAP_MPLS; + a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + return 1 + 8 + 4 + 4 + 4; + } + + /* pppoe */ + if (n_tokens && (strcmp(tokens[0], "pppoe") == 0)) { + if (n_tokens < 4 || + softnic_parse_mac_addr(tokens[1], &a->encap.pppoe.ether.da) || + softnic_parse_mac_addr(tokens[2], &a->encap.pppoe.ether.sa) || + softnic_parser_read_uint16(&a->encap.pppoe.pppoe.session_id, + tokens[3])) + return 0; + + a->encap.type = RTE_TABLE_ACTION_ENCAP_PPPOE; + a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + return 1 + 4; + } + + /* vxlan */ + if (n_tokens && (strcmp(tokens[0], "vxlan") == 0)) { + uint32_t n = 0; + + n_tokens--; + tokens++; + n++; + + /* ether <da> <sa> */ + if ((n_tokens < 3) || + strcmp(tokens[0], "ether") || + softnic_parse_mac_addr(tokens[1], &a->encap.vxlan.ether.da) || + softnic_parse_mac_addr(tokens[2], &a->encap.vxlan.ether.sa)) + return 0; + + n_tokens -= 3; + tokens += 3; + n += 3; + + /* [vlan <pcp> <dei> <vid>] */ + if (strcmp(tokens[0], "vlan") == 0) { + uint32_t pcp, dei, vid; + + if ((n_tokens < 4) || + softnic_parser_read_uint32(&pcp, tokens[1]) || + (pcp > 7) || + softnic_parser_read_uint32(&dei, tokens[2]) || + (dei > 1) || + softnic_parser_read_uint32(&vid, tokens[3]) || + (vid > 0xFFF)) + return 0; + + a->encap.vxlan.vlan.pcp = pcp; + a->encap.vxlan.vlan.dei = dei; + a->encap.vxlan.vlan.vid = vid; + + n_tokens -= 4; + tokens += 4; + n += 4; + } + + /* ipv4 <sa> <da> <dscp> <ttl> + | ipv6 <sa> <da> <flow_label> <dscp> <hop_limit> */ + if (strcmp(tokens[0], "ipv4") == 0) { + struct in_addr sa, da; + uint8_t dscp, ttl; + + if ((n_tokens < 5) || + softnic_parse_ipv4_addr(tokens[1], &sa) || + softnic_parse_ipv4_addr(tokens[2], &da) || + softnic_parser_read_uint8(&dscp, tokens[3]) || + (dscp > 64) || + softnic_parser_read_uint8(&ttl, tokens[4])) + return 0; + + a->encap.vxlan.ipv4.sa = rte_be_to_cpu_32(sa.s_addr); + a->encap.vxlan.ipv4.da = rte_be_to_cpu_32(da.s_addr); + a->encap.vxlan.ipv4.dscp = dscp; + a->encap.vxlan.ipv4.ttl = ttl; + + n_tokens -= 5; + tokens += 5; + n += 5; + } else if (strcmp(tokens[0], "ipv6") == 0) { + struct in6_addr sa, da; + uint32_t flow_label; + uint8_t dscp, hop_limit; + + if ((n_tokens < 6) || + softnic_parse_ipv6_addr(tokens[1], &sa) || + softnic_parse_ipv6_addr(tokens[2], &da) || + softnic_parser_read_uint32(&flow_label, tokens[3]) || + softnic_parser_read_uint8(&dscp, tokens[4]) || + (dscp > 64) || + softnic_parser_read_uint8(&hop_limit, tokens[5])) + return 0; + + memcpy(a->encap.vxlan.ipv6.sa, sa.s6_addr, 16); + memcpy(a->encap.vxlan.ipv6.da, da.s6_addr, 16); + a->encap.vxlan.ipv6.flow_label = flow_label; + a->encap.vxlan.ipv6.dscp = dscp; + a->encap.vxlan.ipv6.hop_limit = hop_limit; + + n_tokens -= 6; + tokens += 6; + n += 6; + } else + return 0; + + /* udp <sp> <dp> */ + if ((n_tokens < 3) || + strcmp(tokens[0], "udp") || + softnic_parser_read_uint16(&a->encap.vxlan.udp.sp, tokens[1]) || + softnic_parser_read_uint16(&a->encap.vxlan.udp.dp, tokens[2])) + return 0; + + n_tokens -= 3; + tokens += 3; + n += 3; + + /* vxlan <vni> */ + if ((n_tokens < 2) || + strcmp(tokens[0], "vxlan") || + softnic_parser_read_uint32(&a->encap.vxlan.vxlan.vni, tokens[1]) || + (a->encap.vxlan.vxlan.vni > 0xFFFFFF)) + return 0; + + n_tokens -= 2; + tokens += 2; + n += 2; + + a->encap.type = RTE_TABLE_ACTION_ENCAP_VXLAN; + a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + return 1 + n; + } + + return 0; +} + +static uint32_t +parse_table_action_nat(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + if (n_tokens < 4 || + strcmp(tokens[0], "nat")) + return 0; + + if (strcmp(tokens[1], "ipv4") == 0) { + struct in_addr addr; + uint16_t port; + + if (softnic_parse_ipv4_addr(tokens[2], &addr) || + softnic_parser_read_uint16(&port, tokens[3])) + return 0; + + a->nat.ip_version = 1; + a->nat.addr.ipv4 = rte_be_to_cpu_32(addr.s_addr); + a->nat.port = port; + a->action_mask |= 1 << RTE_TABLE_ACTION_NAT; + return 4; + } + + if (strcmp(tokens[1], "ipv6") == 0) { + struct in6_addr addr; + uint16_t port; + + if (softnic_parse_ipv6_addr(tokens[2], &addr) || + softnic_parser_read_uint16(&port, tokens[3])) + return 0; + + a->nat.ip_version = 0; + memcpy(a->nat.addr.ipv6, addr.s6_addr, 16); + a->nat.port = port; + a->action_mask |= 1 << RTE_TABLE_ACTION_NAT; + return 4; + } + + return 0; +} + +static uint32_t +parse_table_action_ttl(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + if (n_tokens < 2 || + strcmp(tokens[0], "ttl")) + return 0; + + if (strcmp(tokens[1], "dec") == 0) + a->ttl.decrement = 1; + else if (strcmp(tokens[1], "keep") == 0) + a->ttl.decrement = 0; + else + return 0; + + a->action_mask |= 1 << RTE_TABLE_ACTION_TTL; + return 2; +} + +static uint32_t +parse_table_action_stats(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + if (n_tokens < 1 || + strcmp(tokens[0], "stats")) + return 0; + + a->stats.n_packets = 0; + a->stats.n_bytes = 0; + a->action_mask |= 1 << RTE_TABLE_ACTION_STATS; + return 1; +} + +static uint32_t +parse_table_action_time(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + if (n_tokens < 1 || + strcmp(tokens[0], "time")) + return 0; + + a->time.time = rte_rdtsc(); + a->action_mask |= 1 << RTE_TABLE_ACTION_TIME; + return 1; +} + +static void +parse_free_sym_crypto_param_data(struct rte_table_action_sym_crypto_params *p) +{ + struct rte_crypto_sym_xform *xform[2] = {NULL}; + uint32_t i; + + xform[0] = p->xform; + if (xform[0]) + xform[1] = xform[0]->next; + + for (i = 0; i < 2; i++) { + if (xform[i] == NULL) + continue; + + switch (xform[i]->type) { + case RTE_CRYPTO_SYM_XFORM_CIPHER: + if (p->cipher_auth.cipher_iv.val) + free(p->cipher_auth.cipher_iv.val); + if (p->cipher_auth.cipher_iv_update.val) + free(p->cipher_auth.cipher_iv_update.val); + break; + case RTE_CRYPTO_SYM_XFORM_AUTH: + if (p->cipher_auth.auth_iv.val) + free(p->cipher_auth.cipher_iv.val); + if (p->cipher_auth.auth_iv_update.val) + free(p->cipher_auth.cipher_iv_update.val); + break; + case RTE_CRYPTO_SYM_XFORM_AEAD: + if (p->aead.iv.val) + free(p->aead.iv.val); + if (p->aead.aad.val) + free(p->aead.aad.val); + break; + default: + continue; + } + } + +} + +static struct rte_crypto_sym_xform * +parse_table_action_cipher(struct rte_table_action_sym_crypto_params *p, + uint8_t *key, uint32_t max_key_len, char **tokens, + uint32_t n_tokens, uint32_t encrypt, uint32_t *used_n_tokens) +{ + struct rte_crypto_sym_xform *xform_cipher; + int status; + size_t len; + + if (n_tokens < 7 || strcmp(tokens[1], "cipher_algo") || + strcmp(tokens[3], "cipher_key") || + strcmp(tokens[5], "cipher_iv")) + return NULL; + + xform_cipher = calloc(1, sizeof(*xform_cipher)); + if (xform_cipher == NULL) + return NULL; + + xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + xform_cipher->cipher.op = encrypt ? RTE_CRYPTO_CIPHER_OP_ENCRYPT : + RTE_CRYPTO_CIPHER_OP_DECRYPT; + + /* cipher_algo */ + status = rte_cryptodev_get_cipher_algo_enum( + &xform_cipher->cipher.algo, tokens[2]); + if (status < 0) + goto error_exit; + + /* cipher_key */ + len = strlen(tokens[4]); + if (len / 2 > max_key_len) { + status = -ENOMEM; + goto error_exit; + } + + status = softnic_parse_hex_string(tokens[4], key, (uint32_t *)&len); + if (status < 0) + goto error_exit; + + xform_cipher->cipher.key.data = key; + xform_cipher->cipher.key.length = (uint16_t)len; + + /* cipher_iv */ + len = strlen(tokens[6]); + + p->cipher_auth.cipher_iv.val = calloc(1, len / 2 + 1); + if (p->cipher_auth.cipher_iv.val == NULL) + goto error_exit; + + status = softnic_parse_hex_string(tokens[6], + p->cipher_auth.cipher_iv.val, + (uint32_t *)&len); + if (status < 0) + goto error_exit; + + xform_cipher->cipher.iv.length = (uint16_t)len; + xform_cipher->cipher.iv.offset = RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET; + p->cipher_auth.cipher_iv.length = (uint32_t)len; + *used_n_tokens = 7; + + return xform_cipher; + +error_exit: + if (p->cipher_auth.cipher_iv.val) { + free(p->cipher_auth.cipher_iv.val); + p->cipher_auth.cipher_iv.val = NULL; + } + + free(xform_cipher); + + return NULL; +} + +static struct rte_crypto_sym_xform * +parse_table_action_cipher_auth(struct rte_table_action_sym_crypto_params *p, + uint8_t *key, uint32_t max_key_len, char **tokens, + uint32_t n_tokens, uint32_t encrypt, uint32_t *used_n_tokens) +{ + struct rte_crypto_sym_xform *xform_cipher; + struct rte_crypto_sym_xform *xform_auth; + int status; + size_t len; + + if (n_tokens < 13 || + strcmp(tokens[7], "auth_algo") || + strcmp(tokens[9], "auth_key") || + strcmp(tokens[11], "digest_size")) + return NULL; + + xform_auth = calloc(1, sizeof(*xform_auth)); + if (xform_auth == NULL) + return NULL; + + xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH; + xform_auth->auth.op = encrypt ? RTE_CRYPTO_AUTH_OP_GENERATE : + RTE_CRYPTO_AUTH_OP_VERIFY; + + /* auth_algo */ + status = rte_cryptodev_get_auth_algo_enum(&xform_auth->auth.algo, + tokens[8]); + if (status < 0) + goto error_exit; + + /* auth_key */ + len = strlen(tokens[10]); + if (len / 2 > max_key_len) { + status = -ENOMEM; + goto error_exit; + } + + status = softnic_parse_hex_string(tokens[10], key, (uint32_t *)&len); + if (status < 0) + goto error_exit; + + xform_auth->auth.key.data = key; + xform_auth->auth.key.length = (uint16_t)len; + + key += xform_auth->auth.key.length; + max_key_len -= xform_auth->auth.key.length; + + if (strcmp(tokens[11], "digest_size")) + goto error_exit; + + status = softnic_parser_read_uint16(&xform_auth->auth.digest_length, + tokens[12]); + if (status < 0) + goto error_exit; + + xform_cipher = parse_table_action_cipher(p, key, max_key_len, tokens, 7, + encrypt, used_n_tokens); + if (xform_cipher == NULL) + goto error_exit; + + *used_n_tokens += 6; + + if (encrypt) { + xform_cipher->next = xform_auth; + return xform_cipher; + } else { + xform_auth->next = xform_cipher; + return xform_auth; + } + +error_exit: + if (p->cipher_auth.auth_iv.val) { + free(p->cipher_auth.auth_iv.val); + p->cipher_auth.auth_iv.val = 0; + } + + free(xform_auth); + + return NULL; +} + +static struct rte_crypto_sym_xform * +parse_table_action_aead(struct rte_table_action_sym_crypto_params *p, + uint8_t *key, uint32_t max_key_len, char **tokens, + uint32_t n_tokens, uint32_t encrypt, uint32_t *used_n_tokens) +{ + struct rte_crypto_sym_xform *xform_aead; + int status; + size_t len; + + if (n_tokens < 11 || strcmp(tokens[1], "aead_algo") || + strcmp(tokens[3], "aead_key") || + strcmp(tokens[5], "aead_iv") || + strcmp(tokens[7], "aead_aad") || + strcmp(tokens[9], "digest_size")) + return NULL; + + xform_aead = calloc(1, sizeof(*xform_aead)); + if (xform_aead == NULL) + return NULL; + + xform_aead->type = RTE_CRYPTO_SYM_XFORM_AEAD; + xform_aead->aead.op = encrypt ? RTE_CRYPTO_AEAD_OP_ENCRYPT : + RTE_CRYPTO_AEAD_OP_DECRYPT; + + /* aead_algo */ + status = rte_cryptodev_get_aead_algo_enum(&xform_aead->aead.algo, + tokens[2]); + if (status < 0) + goto error_exit; + + /* aead_key */ + len = strlen(tokens[4]); + if (len / 2 > max_key_len) { + status = -ENOMEM; + goto error_exit; + } + + status = softnic_parse_hex_string(tokens[4], key, (uint32_t *)&len); + if (status < 0) + goto error_exit; + + xform_aead->aead.key.data = key; + xform_aead->aead.key.length = (uint16_t)len; + + /* aead_iv */ + len = strlen(tokens[6]); + p->aead.iv.val = calloc(1, len / 2 + 1); + if (p->aead.iv.val == NULL) + goto error_exit; + + status = softnic_parse_hex_string(tokens[6], p->aead.iv.val, + (uint32_t *)&len); + if (status < 0) + goto error_exit; + + xform_aead->aead.iv.length = (uint16_t)len; + xform_aead->aead.iv.offset = RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET; + p->aead.iv.length = (uint32_t)len; + + /* aead_aad */ + len = strlen(tokens[8]); + p->aead.aad.val = calloc(1, len / 2 + 1); + if (p->aead.aad.val == NULL) + goto error_exit; + + status = softnic_parse_hex_string(tokens[8], p->aead.aad.val, (uint32_t *)&len); + if (status < 0) + goto error_exit; + + xform_aead->aead.aad_length = (uint16_t)len; + p->aead.aad.length = (uint32_t)len; + + /* digest_size */ + status = softnic_parser_read_uint16(&xform_aead->aead.digest_length, + tokens[10]); + if (status < 0) + goto error_exit; + + *used_n_tokens = 11; + + return xform_aead; + +error_exit: + if (p->aead.iv.val) { + free(p->aead.iv.val); + p->aead.iv.val = NULL; + } + if (p->aead.aad.val) { + free(p->aead.aad.val); + p->aead.aad.val = NULL; + } + + free(xform_aead); + + return NULL; +} + + +static uint32_t +parse_table_action_sym_crypto(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + struct rte_table_action_sym_crypto_params *p = &a->sym_crypto; + struct rte_crypto_sym_xform *xform = NULL; + uint8_t *key = a->sym_crypto_key; + uint32_t max_key_len = SYM_CRYPTO_MAX_KEY_SIZE; + uint32_t used_n_tokens; + uint32_t encrypt; + int status; + + if ((n_tokens < 12) || + strcmp(tokens[0], "sym_crypto") || + strcmp(tokens[2], "type")) + return 0; + + memset(p, 0, sizeof(*p)); + + if (strcmp(tokens[1], "encrypt") == 0) + encrypt = 1; + else + encrypt = 0; + + status = softnic_parser_read_uint32(&p->data_offset, tokens[n_tokens - 1]); + if (status < 0) + return 0; + + if (strcmp(tokens[3], "cipher") == 0) { + tokens += 3; + n_tokens -= 3; + + xform = parse_table_action_cipher(p, key, max_key_len, tokens, + n_tokens, encrypt, &used_n_tokens); + } else if (strcmp(tokens[3], "cipher_auth") == 0) { + tokens += 3; + n_tokens -= 3; + + xform = parse_table_action_cipher_auth(p, key, max_key_len, + tokens, n_tokens, encrypt, &used_n_tokens); + } else if (strcmp(tokens[3], "aead") == 0) { + tokens += 3; + n_tokens -= 3; + + xform = parse_table_action_aead(p, key, max_key_len, tokens, + n_tokens, encrypt, &used_n_tokens); + } + + if (xform == NULL) + return 0; + + p->xform = xform; + + if (strcmp(tokens[used_n_tokens], "data_offset")) { + parse_free_sym_crypto_param_data(p); + return 0; + } + + a->action_mask |= 1 << RTE_TABLE_ACTION_SYM_CRYPTO; + + return used_n_tokens + 5; +} + +static uint32_t +parse_table_action_tag(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + if (n_tokens < 2 || + strcmp(tokens[0], "tag")) + return 0; + + if (softnic_parser_read_uint32(&a->tag.tag, tokens[1])) + return 0; + + a->action_mask |= 1 << RTE_TABLE_ACTION_TAG; + return 2; +} + +static uint32_t +parse_table_action_decap(char **tokens, + uint32_t n_tokens, + struct softnic_table_rule_action *a) +{ + if (n_tokens < 2 || + strcmp(tokens[0], "decap")) + return 0; + + if (softnic_parser_read_uint16(&a->decap.n, tokens[1])) + return 0; + + a->action_mask |= 1 << RTE_TABLE_ACTION_DECAP; + return 2; +} + +static uint32_t +parse_table_action(char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size, + struct softnic_table_rule_action *a) +{ + uint32_t n_tokens0 = n_tokens; + + memset(a, 0, sizeof(*a)); + + if (n_tokens < 2 || + strcmp(tokens[0], "action")) + return 0; + + tokens++; + n_tokens--; + + if (n_tokens && (strcmp(tokens[0], "fwd") == 0)) { + uint32_t n; + + n = parse_table_action_fwd(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action fwd"); + return 0; + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens && (strcmp(tokens[0], "balance") == 0)) { + uint32_t n; + + n = parse_table_action_balance(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action balance"); + return 0; + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens && (strcmp(tokens[0], "meter") == 0)) { + uint32_t n; + + n = parse_table_action_meter(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action meter"); + return 0; + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens && (strcmp(tokens[0], "tm") == 0)) { + uint32_t n; + + n = parse_table_action_tm(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action tm"); + return 0; + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens && (strcmp(tokens[0], "encap") == 0)) { + uint32_t n; + + n = parse_table_action_encap(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action encap"); + return 0; + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens && (strcmp(tokens[0], "nat") == 0)) { + uint32_t n; + + n = parse_table_action_nat(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action nat"); + return 0; + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens && (strcmp(tokens[0], "ttl") == 0)) { + uint32_t n; + + n = parse_table_action_ttl(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action ttl"); + return 0; + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens && (strcmp(tokens[0], "stats") == 0)) { + uint32_t n; + + n = parse_table_action_stats(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action stats"); + return 0; + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens && (strcmp(tokens[0], "time") == 0)) { + uint32_t n; + + n = parse_table_action_time(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action time"); + return 0; + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens && (strcmp(tokens[0], "tag") == 0)) { + uint32_t n; + + n = parse_table_action_tag(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action tag"); + return 0; + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens && (strcmp(tokens[0], "decap") == 0)) { + uint32_t n; + + n = parse_table_action_decap(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action decap"); + return 0; + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens && (strcmp(tokens[0], "sym_crypto") == 0)) { + uint32_t n; + + n = parse_table_action_sym_crypto(tokens, n_tokens, a); + if (n == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, + "action sym_crypto"); + } + + tokens += n; + n_tokens -= n; + } + + if (n_tokens0 - n_tokens == 1) { + snprintf(out, out_size, MSG_ARG_INVALID, "action"); + return 0; + } + + return n_tokens0 - n_tokens; +} + +/** + * pipeline <pipeline_name> table <table_id> rule add + * match <match> + * action <table_action> + */ +static void +cmd_softnic_pipeline_table_rule_add(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_table_rule_match m; + struct softnic_table_rule_action a; + char *pipeline_name; + void *data; + uint32_t table_id, t0, n_tokens_parsed; + int status; + + if (n_tokens < 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + if (strcmp(tokens[4], "rule") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule"); + return; + } + + if (strcmp(tokens[5], "add") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add"); + return; + } + + t0 = 6; + + /* match */ + n_tokens_parsed = parse_match(tokens + t0, + n_tokens - t0, + out, + out_size, + &m); + if (n_tokens_parsed == 0) + return; + t0 += n_tokens_parsed; + + /* action */ + n_tokens_parsed = parse_table_action(tokens + t0, + n_tokens - t0, + out, + out_size, + &a); + if (n_tokens_parsed == 0) + return; + t0 += n_tokens_parsed; + + if (t0 != n_tokens) { + snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]); + return; + } + + status = softnic_pipeline_table_rule_add(softnic, + pipeline_name, + table_id, + &m, + &a, + &data); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> table <table_id> rule add + * match + * default + * action + * fwd + * drop + * | port <port_id> + * | meta + * | table <table_id> + */ +static void +cmd_softnic_pipeline_table_rule_add_default(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_table_rule_action action; + void *data; + char *pipeline_name; + uint32_t table_id; + int status; + + if (n_tokens != 11 && + n_tokens != 12) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + if (strcmp(tokens[4], "rule") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule"); + return; + } + + if (strcmp(tokens[5], "add") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add"); + return; + } + + if (strcmp(tokens[6], "match") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "match"); + return; + } + + if (strcmp(tokens[7], "default") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "default"); + return; + } + + if (strcmp(tokens[8], "action") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "action"); + return; + } + + if (strcmp(tokens[9], "fwd") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "fwd"); + return; + } + + action.action_mask = 1 << RTE_TABLE_ACTION_FWD; + + if (strcmp(tokens[10], "drop") == 0) { + if (n_tokens != 11) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + action.fwd.action = RTE_PIPELINE_ACTION_DROP; + } else if (strcmp(tokens[10], "port") == 0) { + uint32_t id; + + if (n_tokens != 12) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (softnic_parser_read_uint32(&id, tokens[11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "port_id"); + return; + } + + action.fwd.action = RTE_PIPELINE_ACTION_PORT; + action.fwd.id = id; + } else if (strcmp(tokens[10], "meta") == 0) { + if (n_tokens != 11) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + action.fwd.action = RTE_PIPELINE_ACTION_PORT_META; + } else if (strcmp(tokens[10], "table") == 0) { + uint32_t id; + + if (n_tokens != 12) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (softnic_parser_read_uint32(&id, tokens[11]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + action.fwd.action = RTE_PIPELINE_ACTION_TABLE; + action.fwd.id = id; + } else { + snprintf(out, out_size, MSG_ARG_INVALID, + "drop or port or meta or table"); + return; + } + + status = softnic_pipeline_table_rule_add_default(softnic, + pipeline_name, + table_id, + &action, + &data); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> table <table_id> rule add bulk <file_name> <n_rules> + * + * File <file_name>: + * - line format: match <match> action <action> + */ +static int +cli_rule_file_process(const char *file_name, + size_t line_len_max, + struct softnic_table_rule_match *m, + struct softnic_table_rule_action *a, + uint32_t *n_rules, + uint32_t *line_number, + char *out, + size_t out_size); + +static void +cmd_softnic_pipeline_table_rule_add_bulk(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_table_rule_match *match; + struct softnic_table_rule_action *action; + void **data; + char *pipeline_name, *file_name; + uint32_t table_id, n_rules, n_rules_parsed, line_number; + int status; + + if (n_tokens != 9) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + if (strcmp(tokens[4], "rule") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule"); + return; + } + + if (strcmp(tokens[5], "add") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add"); + return; + } + + if (strcmp(tokens[6], "bulk") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "bulk"); + return; + } + + file_name = tokens[7]; + + if ((softnic_parser_read_uint32(&n_rules, tokens[8]) != 0) || + n_rules == 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "n_rules"); + return; + } + + /* Memory allocation. */ + match = calloc(n_rules, sizeof(struct softnic_table_rule_match)); + action = calloc(n_rules, sizeof(struct softnic_table_rule_action)); + data = calloc(n_rules, sizeof(void *)); + if (match == NULL || + action == NULL || + data == NULL) { + snprintf(out, out_size, MSG_OUT_OF_MEMORY); + free(data); + free(action); + free(match); + return; + } + + /* Load rule file */ + n_rules_parsed = n_rules; + status = cli_rule_file_process(file_name, + 1024, + match, + action, + &n_rules_parsed, + &line_number, + out, + out_size); + if (status) { + snprintf(out, out_size, MSG_FILE_ERR, file_name, line_number); + free(data); + free(action); + free(match); + return; + } + if (n_rules_parsed != n_rules) { + snprintf(out, out_size, MSG_FILE_NOT_ENOUGH, file_name); + free(data); + free(action); + free(match); + return; + } + + /* Rule bulk add */ + status = softnic_pipeline_table_rule_add_bulk(softnic, + pipeline_name, + table_id, + match, + action, + data, + &n_rules); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + free(data); + free(action); + free(match); + return; + } + + /* Memory free */ + free(data); + free(action); + free(match); +} + +/** + * pipeline <pipeline_name> table <table_id> rule delete + * match <match> + */ +static void +cmd_softnic_pipeline_table_rule_delete(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct softnic_table_rule_match m; + char *pipeline_name; + uint32_t table_id, n_tokens_parsed, t0; + int status; + + if (n_tokens < 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + if (strcmp(tokens[4], "rule") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule"); + return; + } + + if (strcmp(tokens[5], "delete") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete"); + return; + } + + t0 = 6; + + /* match */ + n_tokens_parsed = parse_match(tokens + t0, + n_tokens - t0, + out, + out_size, + &m); + if (n_tokens_parsed == 0) + return; + t0 += n_tokens_parsed; + + if (n_tokens != t0) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + status = softnic_pipeline_table_rule_delete(softnic, + pipeline_name, + table_id, + &m); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> table <table_id> rule delete + * match + * default + */ +static void +cmd_softnic_pipeline_table_rule_delete_default(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t table_id; + int status; + + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + if (strcmp(tokens[4], "rule") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rule"); + return; + } + + if (strcmp(tokens[5], "delete") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete"); + return; + } + + if (strcmp(tokens[6], "match") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "match"); + return; + } + + if (strcmp(tokens[7], "default") != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "default"); + return; + } + + status = softnic_pipeline_table_rule_delete_default(softnic, + pipeline_name, + table_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> table <table_id> rule read stats [clear] + */ +static void +cmd_softnic_pipeline_table_rule_stats_read(struct pmd_internals *softnic __rte_unused, + char **tokens, + uint32_t n_tokens __rte_unused, + char *out, + size_t out_size) +{ + snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]); +} + +/** + * pipeline <pipeline_name> table <table_id> meter profile <meter_profile_id> + * add srtcm cir <cir> cbs <cbs> ebs <ebs> + * | trtcm cir <cir> pir <pir> cbs <cbs> pbs <pbs> + */ +static void +cmd_pipeline_table_meter_profile_add(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_table_action_meter_profile p; + char *pipeline_name; + uint32_t table_id, meter_profile_id; + int status; + + if (n_tokens < 9) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + if (strcmp(tokens[4], "meter") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "meter"); + return; + } + + if (strcmp(tokens[5], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + if (softnic_parser_read_uint32(&meter_profile_id, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "meter_profile_id"); + return; + } + + if (strcmp(tokens[7], "add") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "add"); + return; + } + + if (strcmp(tokens[8], "srtcm") == 0) { + if (n_tokens != 15) { + snprintf(out, out_size, MSG_ARG_MISMATCH, + tokens[0]); + return; + } + + p.alg = RTE_TABLE_ACTION_METER_SRTCM; + + if (strcmp(tokens[9], "cir") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cir"); + return; + } + + if (softnic_parser_read_uint64(&p.srtcm.cir, tokens[10]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "cir"); + return; + } + + if (strcmp(tokens[11], "cbs") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cbs"); + return; + } + + if (softnic_parser_read_uint64(&p.srtcm.cbs, tokens[12]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "cbs"); + return; + } + + if (strcmp(tokens[13], "ebs") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "ebs"); + return; + } + + if (softnic_parser_read_uint64(&p.srtcm.ebs, tokens[14]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "ebs"); + return; + } + } else if (strcmp(tokens[8], "trtcm") == 0) { + if (n_tokens != 17) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + p.alg = RTE_TABLE_ACTION_METER_TRTCM; + + if (strcmp(tokens[9], "cir") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cir"); + return; + } + + if (softnic_parser_read_uint64(&p.trtcm.cir, tokens[10]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "cir"); + return; + } + + if (strcmp(tokens[11], "pir") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pir"); + return; + } + + if (softnic_parser_read_uint64(&p.trtcm.pir, tokens[12]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "pir"); + return; + } + if (strcmp(tokens[13], "cbs") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "cbs"); + return; + } + + if (softnic_parser_read_uint64(&p.trtcm.cbs, tokens[14]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "cbs"); + return; + } + + if (strcmp(tokens[15], "pbs") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pbs"); + return; + } + + if (softnic_parser_read_uint64(&p.trtcm.pbs, tokens[16]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "pbs"); + return; + } + } else { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + status = softnic_pipeline_table_mtr_profile_add(softnic, + pipeline_name, + table_id, + meter_profile_id, + &p); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> table <table_id> + * meter profile <meter_profile_id> delete + */ +static void +cmd_pipeline_table_meter_profile_delete(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t table_id, meter_profile_id; + int status; + + if (n_tokens != 8) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + if (strcmp(tokens[4], "meter") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "meter"); + return; + } + + if (strcmp(tokens[5], "profile") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "profile"); + return; + } + + if (softnic_parser_read_uint32(&meter_profile_id, tokens[6]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "meter_profile_id"); + return; + } + + if (strcmp(tokens[7], "delete") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "delete"); + return; + } + + status = softnic_pipeline_table_mtr_profile_delete(softnic, + pipeline_name, + table_id, + meter_profile_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> table <table_id> rule read meter [clear] + */ +static void +cmd_pipeline_table_rule_meter_read(struct pmd_internals *softnic __rte_unused, + char **tokens, + uint32_t n_tokens __rte_unused, + char *out, + size_t out_size) +{ + snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]); +} + +/** + * pipeline <pipeline_name> table <table_id> dscp <file_name> + * + * File <file_name>: + * - exactly 64 lines + * - line format: <tc_id> <tc_queue_id> <color>, with <color> as: g | y | r + */ +static int +load_dscp_table(struct rte_table_action_dscp_table *dscp_table, + const char *file_name, + uint32_t *line_number) +{ + FILE *f = NULL; + uint32_t dscp, l; + + /* Check input arguments */ + if (dscp_table == NULL || + file_name == NULL || + line_number == NULL) { + if (line_number) + *line_number = 0; + return -EINVAL; + } + + /* Open input file */ + f = fopen(file_name, "r"); + if (f == NULL) { + *line_number = 0; + return -EINVAL; + } + + /* Read file */ + for (dscp = 0, l = 1; ; l++) { + char line[64]; + char *tokens[3]; + enum rte_color color; + uint32_t tc_id, tc_queue_id, n_tokens = RTE_DIM(tokens); + + if (fgets(line, sizeof(line), f) == NULL) + break; + + if (is_comment(line)) + continue; + + if (softnic_parse_tokenize_string(line, tokens, &n_tokens)) { + *line_number = l; + fclose(f); + return -EINVAL; + } + + if (n_tokens == 0) + continue; + + if (dscp >= RTE_DIM(dscp_table->entry) || + n_tokens != RTE_DIM(tokens) || + softnic_parser_read_uint32(&tc_id, tokens[0]) || + tc_id >= RTE_TABLE_ACTION_TC_MAX || + softnic_parser_read_uint32(&tc_queue_id, tokens[1]) || + tc_queue_id >= RTE_TABLE_ACTION_TC_QUEUE_MAX || + (strlen(tokens[2]) != 1)) { + *line_number = l; + fclose(f); + return -EINVAL; + } + + switch (tokens[2][0]) { + case 'g': + case 'G': + color = RTE_COLOR_GREEN; + break; + + case 'y': + case 'Y': + color = RTE_COLOR_YELLOW; + break; + + case 'r': + case 'R': + color = RTE_COLOR_RED; + break; + + default: + *line_number = l; + fclose(f); + return -EINVAL; + } + + dscp_table->entry[dscp].tc_id = tc_id; + dscp_table->entry[dscp].tc_queue_id = tc_queue_id; + dscp_table->entry[dscp].color = color; + dscp++; + } + + /* Close file */ + fclose(f); + return 0; +} + +static void +cmd_pipeline_table_dscp(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + struct rte_table_action_dscp_table dscp_table; + char *pipeline_name, *file_name; + uint32_t table_id, line_number; + int status; + + if (n_tokens != 6) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + pipeline_name = tokens[1]; + + if (strcmp(tokens[2], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "port"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + if (strcmp(tokens[4], "dscp") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "dscp"); + return; + } + + file_name = tokens[5]; + + status = load_dscp_table(&dscp_table, file_name, &line_number); + if (status) { + snprintf(out, out_size, MSG_FILE_ERR, file_name, line_number); + return; + } + + status = softnic_pipeline_table_dscp_table_update(softnic, + pipeline_name, + table_id, + UINT64_MAX, + &dscp_table); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +/** + * pipeline <pipeline_name> table <table_id> rule read ttl [clear] + */ +static void +cmd_softnic_pipeline_table_rule_ttl_read(struct pmd_internals *softnic __rte_unused, + char **tokens, + uint32_t n_tokens __rte_unused, + char *out, + size_t out_size) +{ + snprintf(out, out_size, MSG_CMD_UNIMPLEM, tokens[0]); +} + +/** + * thread <thread_id> pipeline <pipeline_name> enable + */ +static void +cmd_softnic_thread_pipeline_enable(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t thread_id; + int status; + + if (n_tokens != 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (softnic_parser_read_uint32(&thread_id, tokens[1]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "thread_id"); + return; + } + + if (strcmp(tokens[2], "pipeline") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline"); + return; + } + + pipeline_name = tokens[3]; + + if (strcmp(tokens[4], "enable") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "enable"); + return; + } + + status = softnic_thread_pipeline_enable(softnic, thread_id, pipeline_name); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, "thread pipeline enable"); + return; + } +} + +/** + * thread <thread_id> pipeline <pipeline_name> disable + */ +static void +cmd_softnic_thread_pipeline_disable(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t thread_id; + int status; + + if (n_tokens != 5) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (softnic_parser_read_uint32(&thread_id, tokens[1]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "thread_id"); + return; + } + + if (strcmp(tokens[2], "pipeline") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline"); + return; + } + + pipeline_name = tokens[3]; + + if (strcmp(tokens[4], "disable") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "disable"); + return; + } + + status = softnic_thread_pipeline_disable(softnic, thread_id, pipeline_name); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, + "thread pipeline disable"); + return; + } +} + +/** + * flowapi map + * group <group_id> + * ingress | egress + * pipeline <pipeline_name> + * table <table_id> + */ +static void +cmd_softnic_flowapi_map(struct pmd_internals *softnic, + char **tokens, + uint32_t n_tokens, + char *out, + size_t out_size) +{ + char *pipeline_name; + uint32_t group_id, table_id; + int ingress, status; + + if (n_tokens != 9) { + snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]); + return; + } + + if (strcmp(tokens[1], "map") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "map"); + return; + } + + if (strcmp(tokens[2], "group") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "group"); + return; + } + + if (softnic_parser_read_uint32(&group_id, tokens[3]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "group_id"); + return; + } + + if (strcmp(tokens[4], "ingress") == 0) { + ingress = 1; + } else if (strcmp(tokens[4], "egress") == 0) { + ingress = 0; + } else { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "ingress | egress"); + return; + } + + if (strcmp(tokens[5], "pipeline") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline"); + return; + } + + pipeline_name = tokens[6]; + + if (strcmp(tokens[7], "table") != 0) { + snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table"); + return; + } + + if (softnic_parser_read_uint32(&table_id, tokens[8]) != 0) { + snprintf(out, out_size, MSG_ARG_INVALID, "table_id"); + return; + } + + status = flow_attr_map_set(softnic, + group_id, + ingress, + pipeline_name, + table_id); + if (status) { + snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]); + return; + } +} + +void +softnic_cli_process(char *in, char *out, size_t out_size, void *arg) +{ + char *tokens[CMD_MAX_TOKENS]; + uint32_t n_tokens = RTE_DIM(tokens); + struct pmd_internals *softnic = arg; + int status; + + if (is_comment(in)) + return; + + status = softnic_parse_tokenize_string(in, tokens, &n_tokens); + if (status) { + snprintf(out, out_size, MSG_ARG_TOO_MANY, ""); + return; + } + + if (n_tokens == 0) + return; + + if (strcmp(tokens[0], "mempool") == 0) { + cmd_mempool(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (strcmp(tokens[0], "link") == 0) { + cmd_link(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (strcmp(tokens[0], "swq") == 0) { + cmd_swq(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (strcmp(tokens[0], "tmgr") == 0) { + if (n_tokens == 2) { + cmd_tmgr(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (n_tokens >= 3 && + (strcmp(tokens[1], "shaper") == 0) && + (strcmp(tokens[2], "profile") == 0)) { + cmd_tmgr_shaper_profile(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (n_tokens >= 3 && + (strcmp(tokens[1], "shared") == 0) && + (strcmp(tokens[2], "shaper") == 0)) { + cmd_tmgr_shared_shaper(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (n_tokens >= 2 && + (strcmp(tokens[1], "node") == 0)) { + cmd_tmgr_node(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (n_tokens >= 2 && + (strcmp(tokens[1], "hierarchy-default") == 0)) { + cmd_tmgr_hierarchy_default(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (n_tokens >= 3 && + (strcmp(tokens[1], "hierarchy") == 0) && + (strcmp(tokens[2], "commit") == 0)) { + cmd_tmgr_hierarchy_commit(softnic, tokens, n_tokens, out, out_size); + return; + } + } + + if (strcmp(tokens[0], "tap") == 0) { + cmd_tap(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (strcmp(tokens[0], "cryptodev") == 0) { + cmd_cryptodev(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (strcmp(tokens[0], "port") == 0) { + cmd_port_in_action_profile(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (strcmp(tokens[0], "table") == 0) { + cmd_table_action_profile(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (strcmp(tokens[0], "pipeline") == 0) { + if (n_tokens >= 3 && + (strcmp(tokens[2], "period") == 0)) { + cmd_pipeline(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (n_tokens >= 5 && + (strcmp(tokens[2], "port") == 0) && + (strcmp(tokens[3], "in") == 0) && + (strcmp(tokens[4], "bsz") == 0)) { + cmd_pipeline_port_in(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (n_tokens >= 5 && + (strcmp(tokens[2], "port") == 0) && + (strcmp(tokens[3], "out") == 0) && + (strcmp(tokens[4], "bsz") == 0)) { + cmd_pipeline_port_out(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (n_tokens >= 4 && + (strcmp(tokens[2], "table") == 0) && + (strcmp(tokens[3], "match") == 0)) { + cmd_pipeline_table(softnic, tokens, n_tokens, out, out_size); + return; + } + + if (n_tokens >= 6 && + (strcmp(tokens[2], "port") == 0) && + (strcmp(tokens[3], "in") == 0) && + (strcmp(tokens[5], "table") == 0)) { + cmd_pipeline_port_in_table(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 6 && + (strcmp(tokens[2], "port") == 0) && + (strcmp(tokens[3], "in") == 0) && + (strcmp(tokens[5], "stats") == 0)) { + cmd_pipeline_port_in_stats(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 6 && + (strcmp(tokens[2], "port") == 0) && + (strcmp(tokens[3], "in") == 0) && + (strcmp(tokens[5], "enable") == 0)) { + cmd_softnic_pipeline_port_in_enable(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 6 && + (strcmp(tokens[2], "port") == 0) && + (strcmp(tokens[3], "in") == 0) && + (strcmp(tokens[5], "disable") == 0)) { + cmd_softnic_pipeline_port_in_disable(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 6 && + (strcmp(tokens[2], "port") == 0) && + (strcmp(tokens[3], "out") == 0) && + (strcmp(tokens[5], "stats") == 0)) { + cmd_pipeline_port_out_stats(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 5 && + (strcmp(tokens[2], "table") == 0) && + (strcmp(tokens[4], "stats") == 0)) { + cmd_pipeline_table_stats(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 7 && + (strcmp(tokens[2], "table") == 0) && + (strcmp(tokens[4], "rule") == 0) && + (strcmp(tokens[5], "add") == 0) && + (strcmp(tokens[6], "match") == 0)) { + if (n_tokens >= 8 && + (strcmp(tokens[7], "default") == 0)) { + cmd_softnic_pipeline_table_rule_add_default(softnic, tokens, + n_tokens, out, out_size); + return; + } + + cmd_softnic_pipeline_table_rule_add(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 7 && + (strcmp(tokens[2], "table") == 0) && + (strcmp(tokens[4], "rule") == 0) && + (strcmp(tokens[5], "add") == 0) && + (strcmp(tokens[6], "bulk") == 0)) { + cmd_softnic_pipeline_table_rule_add_bulk(softnic, tokens, + n_tokens, out, out_size); + return; + } + + if (n_tokens >= 7 && + (strcmp(tokens[2], "table") == 0) && + (strcmp(tokens[4], "rule") == 0) && + (strcmp(tokens[5], "delete") == 0) && + (strcmp(tokens[6], "match") == 0)) { + if (n_tokens >= 8 && + (strcmp(tokens[7], "default") == 0)) { + cmd_softnic_pipeline_table_rule_delete_default(softnic, tokens, + n_tokens, out, out_size); + return; + } + + cmd_softnic_pipeline_table_rule_delete(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 7 && + (strcmp(tokens[2], "table") == 0) && + (strcmp(tokens[4], "rule") == 0) && + (strcmp(tokens[5], "read") == 0) && + (strcmp(tokens[6], "stats") == 0)) { + cmd_softnic_pipeline_table_rule_stats_read(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 8 && + (strcmp(tokens[2], "table") == 0) && + (strcmp(tokens[4], "meter") == 0) && + (strcmp(tokens[5], "profile") == 0) && + (strcmp(tokens[7], "add") == 0)) { + cmd_pipeline_table_meter_profile_add(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 8 && + (strcmp(tokens[2], "table") == 0) && + (strcmp(tokens[4], "meter") == 0) && + (strcmp(tokens[5], "profile") == 0) && + (strcmp(tokens[7], "delete") == 0)) { + cmd_pipeline_table_meter_profile_delete(softnic, tokens, + n_tokens, out, out_size); + return; + } + + if (n_tokens >= 7 && + (strcmp(tokens[2], "table") == 0) && + (strcmp(tokens[4], "rule") == 0) && + (strcmp(tokens[5], "read") == 0) && + (strcmp(tokens[6], "meter") == 0)) { + cmd_pipeline_table_rule_meter_read(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 5 && + (strcmp(tokens[2], "table") == 0) && + (strcmp(tokens[4], "dscp") == 0)) { + cmd_pipeline_table_dscp(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 7 && + (strcmp(tokens[2], "table") == 0) && + (strcmp(tokens[4], "rule") == 0) && + (strcmp(tokens[5], "read") == 0) && + (strcmp(tokens[6], "ttl") == 0)) { + cmd_softnic_pipeline_table_rule_ttl_read(softnic, tokens, n_tokens, + out, out_size); + return; + } + } + + if (strcmp(tokens[0], "thread") == 0) { + if (n_tokens >= 5 && + (strcmp(tokens[4], "enable") == 0)) { + cmd_softnic_thread_pipeline_enable(softnic, tokens, n_tokens, + out, out_size); + return; + } + + if (n_tokens >= 5 && + (strcmp(tokens[4], "disable") == 0)) { + cmd_softnic_thread_pipeline_disable(softnic, tokens, n_tokens, + out, out_size); + return; + } + } + + if (strcmp(tokens[0], "flowapi") == 0) { + cmd_softnic_flowapi_map(softnic, tokens, n_tokens, out, + out_size); + return; + } + + snprintf(out, out_size, MSG_CMD_UNKNOWN, tokens[0]); +} + +int +softnic_cli_script_process(struct pmd_internals *softnic, + const char *file_name, + size_t msg_in_len_max, + size_t msg_out_len_max) +{ + char *msg_in = NULL, *msg_out = NULL; + FILE *f = NULL; + + /* Check input arguments */ + if (file_name == NULL || + (strlen(file_name) == 0) || + msg_in_len_max == 0 || + msg_out_len_max == 0) + return -EINVAL; + + msg_in = malloc(msg_in_len_max + 1); + msg_out = malloc(msg_out_len_max + 1); + if (msg_in == NULL || + msg_out == NULL) { + free(msg_out); + free(msg_in); + return -ENOMEM; + } + + /* Open input file */ + f = fopen(file_name, "r"); + if (f == NULL) { + free(msg_out); + free(msg_in); + return -EIO; + } + + /* Read file */ + for ( ; ; ) { + if (fgets(msg_in, msg_in_len_max + 1, f) == NULL) + break; + + printf("%s", msg_in); + msg_out[0] = 0; + + softnic_cli_process(msg_in, + msg_out, + msg_out_len_max, + softnic); + + if (strlen(msg_out)) + printf("%s", msg_out); + } + + /* Close file */ + fclose(f); + free(msg_out); + free(msg_in); + return 0; +} + +static int +cli_rule_file_process(const char *file_name, + size_t line_len_max, + struct softnic_table_rule_match *m, + struct softnic_table_rule_action *a, + uint32_t *n_rules, + uint32_t *line_number, + char *out, + size_t out_size) +{ + FILE *f = NULL; + char *line = NULL; + uint32_t rule_id, line_id; + int status = 0; + + /* Check input arguments */ + if (file_name == NULL || + (strlen(file_name) == 0) || + line_len_max == 0) { + *line_number = 0; + return -EINVAL; + } + + /* Memory allocation */ + line = malloc(line_len_max + 1); + if (line == NULL) { + *line_number = 0; + return -ENOMEM; + } + + /* Open file */ + f = fopen(file_name, "r"); + if (f == NULL) { + *line_number = 0; + free(line); + return -EIO; + } + + /* Read file */ + for (line_id = 1, rule_id = 0; rule_id < *n_rules; line_id++) { + char *tokens[CMD_MAX_TOKENS]; + uint32_t n_tokens, n_tokens_parsed, t0; + + /* Read next line from file. */ + if (fgets(line, line_len_max + 1, f) == NULL) + break; + + /* Comment. */ + if (is_comment(line)) + continue; + + /* Parse line. */ + n_tokens = RTE_DIM(tokens); + status = softnic_parse_tokenize_string(line, tokens, &n_tokens); + if (status) { + status = -EINVAL; + break; + } + + /* Empty line. */ + if (n_tokens == 0) + continue; + t0 = 0; + + /* Rule match. */ + n_tokens_parsed = parse_match(tokens + t0, + n_tokens - t0, + out, + out_size, + &m[rule_id]); + if (n_tokens_parsed == 0) { + status = -EINVAL; + break; + } + t0 += n_tokens_parsed; + + /* Rule action. */ + n_tokens_parsed = parse_table_action(tokens + t0, + n_tokens - t0, + out, + out_size, + &a[rule_id]); + if (n_tokens_parsed == 0) { + status = -EINVAL; + break; + } + t0 += n_tokens_parsed; + + /* Line completed. */ + if (t0 < n_tokens) { + status = -EINVAL; + break; + } + + /* Increment rule count */ + rule_id++; + } + + /* Close file */ + fclose(f); + + /* Memory free */ + free(line); + + *n_rules = rule_id; + *line_number = line_id; + return status; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cryptodev.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cryptodev.c new file mode 100644 index 000000000..a1a4ca565 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_cryptodev.c @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include <stdlib.h> +#include <stdio.h> + +#include <rte_cryptodev.h> +#include <rte_cryptodev_pmd.h> +#include <rte_string_fns.h> + +#include "rte_eth_softnic_internals.h" + +#define SOFTNIC_CRYPTO_SESSION_CACHE_SIZE 128 + +int +softnic_cryptodev_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->cryptodev_list); + + return 0; +} + +void +softnic_cryptodev_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_cryptodev *cryptodev; + + cryptodev = TAILQ_FIRST(&p->cryptodev_list); + if (cryptodev == NULL) + break; + + TAILQ_REMOVE(&p->cryptodev_list, cryptodev, node); + free(cryptodev); + } +} + +struct softnic_cryptodev * +softnic_cryptodev_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_cryptodev *cryptodev; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(cryptodev, &p->cryptodev_list, node) + if (strcmp(cryptodev->name, name) == 0) + return cryptodev; + + return NULL; +} + +struct softnic_cryptodev * +softnic_cryptodev_create(struct pmd_internals *p, + const char *name, + struct softnic_cryptodev_params *params) +{ + struct rte_cryptodev_info dev_info; + struct rte_cryptodev_config dev_conf; + struct rte_cryptodev_qp_conf queue_conf; + struct softnic_cryptodev *cryptodev; + uint32_t dev_id, i; + uint32_t socket_id; + uint32_t cache_size; + char mp_name[NAME_SIZE]; + int status; + + /* Check input params */ + if ((name == NULL) || + softnic_cryptodev_find(p, name) || + (params->n_queues == 0) || + (params->queue_size == 0) || + (params->session_pool_size == 0)) + return NULL; + + if (params->dev_name) { + status = rte_cryptodev_get_dev_id(params->dev_name); + if (status == -1) + return NULL; + + dev_id = (uint32_t)status; + } else { + if (rte_cryptodev_pmd_is_valid_dev(params->dev_id) == 0) + return NULL; + + dev_id = params->dev_id; + } + + cache_size = (params->session_pool_size / 2 < + SOFTNIC_CRYPTO_SESSION_CACHE_SIZE) ? + (params->session_pool_size / 2) : + SOFTNIC_CRYPTO_SESSION_CACHE_SIZE; + + socket_id = rte_cryptodev_socket_id(dev_id); + rte_cryptodev_info_get(dev_id, &dev_info); + + if (dev_info.max_nb_queue_pairs < params->n_queues) + return NULL; + if (dev_info.feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED) + return NULL; + + dev_conf.socket_id = socket_id; + dev_conf.nb_queue_pairs = params->n_queues; + + status = rte_cryptodev_configure(dev_id, &dev_conf); + if (status < 0) + return NULL; + + queue_conf.nb_descriptors = params->queue_size; + for (i = 0; i < params->n_queues; i++) { + status = rte_cryptodev_queue_pair_setup(dev_id, i, + &queue_conf, socket_id); + if (status < 0) + return NULL; + } + + if (rte_cryptodev_start(dev_id) < 0) + return NULL; + + cryptodev = calloc(1, sizeof(struct softnic_cryptodev)); + if (cryptodev == NULL) { + rte_cryptodev_stop(dev_id); + return NULL; + } + + strlcpy(cryptodev->name, name, sizeof(cryptodev->name)); + cryptodev->dev_id = dev_id; + cryptodev->n_queues = params->n_queues; + + snprintf(mp_name, NAME_SIZE, "%s_mp%u", name, dev_id); + cryptodev->mp_create = rte_cryptodev_sym_session_pool_create(mp_name, + params->session_pool_size, + 0, + cache_size, + 0, + socket_id); + if (!cryptodev->mp_create) + goto error_exit; + + snprintf(mp_name, NAME_SIZE, "%s_priv_mp%u", name, dev_id); + cryptodev->mp_init = rte_mempool_create(mp_name, + params->session_pool_size, + rte_cryptodev_sym_get_private_session_size(dev_id), + cache_size, + 0, + NULL, + NULL, + NULL, + NULL, + socket_id, + 0); + if (!cryptodev->mp_init) + goto error_exit; + + TAILQ_INSERT_TAIL(&p->cryptodev_list, cryptodev, node); + + return cryptodev; + +error_exit: + if (cryptodev->mp_create) + rte_mempool_free(cryptodev->mp_create); + if (cryptodev->mp_init) + rte_mempool_free(cryptodev->mp_init); + + free(cryptodev); + + return NULL; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_flow.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_flow.c new file mode 100644 index 000000000..f05ff092f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_flow.c @@ -0,0 +1,2288 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_malloc.h> +#include <rte_string_fns.h> +#include <rte_flow.h> +#include <rte_flow_driver.h> +#include <rte_tailq.h> + +#include "rte_eth_softnic_internals.h" +#include "rte_eth_softnic.h" + +#define rte_htons rte_cpu_to_be_16 +#define rte_htonl rte_cpu_to_be_32 + +#define rte_ntohs rte_be_to_cpu_16 +#define rte_ntohl rte_be_to_cpu_32 + +static struct rte_flow * +softnic_flow_find(struct softnic_table *table, + struct softnic_table_rule_match *rule_match) +{ + struct rte_flow *flow; + + TAILQ_FOREACH(flow, &table->flows, node) + if (memcmp(&flow->match, rule_match, sizeof(*rule_match)) == 0) + return flow; + + return NULL; +} + +int +flow_attr_map_set(struct pmd_internals *softnic, + uint32_t group_id, + int ingress, + const char *pipeline_name, + uint32_t table_id) +{ + struct pipeline *pipeline; + struct flow_attr_map *map; + + if (group_id >= SOFTNIC_FLOW_MAX_GROUPS || + pipeline_name == NULL) + return -1; + + pipeline = softnic_pipeline_find(softnic, pipeline_name); + if (pipeline == NULL || + table_id >= pipeline->n_tables) + return -1; + + map = (ingress) ? &softnic->flow.ingress_map[group_id] : + &softnic->flow.egress_map[group_id]; + strlcpy(map->pipeline_name, pipeline_name, sizeof(map->pipeline_name)); + map->table_id = table_id; + map->valid = 1; + + return 0; +} + +struct flow_attr_map * +flow_attr_map_get(struct pmd_internals *softnic, + uint32_t group_id, + int ingress) +{ + if (group_id >= SOFTNIC_FLOW_MAX_GROUPS) + return NULL; + + return (ingress) ? &softnic->flow.ingress_map[group_id] : + &softnic->flow.egress_map[group_id]; +} + +static int +flow_pipeline_table_get(struct pmd_internals *softnic, + const struct rte_flow_attr *attr, + const char **pipeline_name, + uint32_t *table_id, + struct rte_flow_error *error) +{ + struct flow_attr_map *map; + + if (attr == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, + "Null attr"); + + if (!attr->ingress && !attr->egress) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, + "Ingress/egress not specified"); + + if (attr->ingress && attr->egress) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + attr, + "Setting both ingress and egress is not allowed"); + + map = flow_attr_map_get(softnic, + attr->group, + attr->ingress); + if (map == NULL || + map->valid == 0) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, + attr, + "Invalid group ID"); + + if (pipeline_name) + *pipeline_name = map->pipeline_name; + + if (table_id) + *table_id = map->table_id; + + return 0; +} + +union flow_item { + uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX]; + struct rte_flow_item_eth eth; + struct rte_flow_item_vlan vlan; + struct rte_flow_item_ipv4 ipv4; + struct rte_flow_item_ipv6 ipv6; + struct rte_flow_item_icmp icmp; + struct rte_flow_item_udp udp; + struct rte_flow_item_tcp tcp; + struct rte_flow_item_sctp sctp; + struct rte_flow_item_vxlan vxlan; + struct rte_flow_item_e_tag e_tag; + struct rte_flow_item_nvgre nvgre; + struct rte_flow_item_mpls mpls; + struct rte_flow_item_gre gre; + struct rte_flow_item_gtp gtp; + struct rte_flow_item_esp esp; + struct rte_flow_item_geneve geneve; + struct rte_flow_item_vxlan_gpe vxlan_gpe; + struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4; + struct rte_flow_item_ipv6_ext ipv6_ext; + struct rte_flow_item_icmp6 icmp6; + struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns; + struct rte_flow_item_icmp6_nd_na icmp6_nd_na; + struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt; + struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth; + struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth; +}; + +static const union flow_item flow_item_raw_mask; + +static int +flow_item_is_proto(enum rte_flow_item_type type, + const void **mask, + size_t *size) +{ + switch (type) { + case RTE_FLOW_ITEM_TYPE_RAW: + *mask = &flow_item_raw_mask; + *size = sizeof(flow_item_raw_mask); + return 1; /* TRUE */ + + case RTE_FLOW_ITEM_TYPE_ETH: + *mask = &rte_flow_item_eth_mask; + *size = sizeof(struct rte_flow_item_eth); + return 1; /* TRUE */ + + case RTE_FLOW_ITEM_TYPE_VLAN: + *mask = &rte_flow_item_vlan_mask; + *size = sizeof(struct rte_flow_item_vlan); + return 1; + + case RTE_FLOW_ITEM_TYPE_IPV4: + *mask = &rte_flow_item_ipv4_mask; + *size = sizeof(struct rte_flow_item_ipv4); + return 1; + + case RTE_FLOW_ITEM_TYPE_IPV6: + *mask = &rte_flow_item_ipv6_mask; + *size = sizeof(struct rte_flow_item_ipv6); + return 1; + + case RTE_FLOW_ITEM_TYPE_ICMP: + *mask = &rte_flow_item_icmp_mask; + *size = sizeof(struct rte_flow_item_icmp); + return 1; + + case RTE_FLOW_ITEM_TYPE_UDP: + *mask = &rte_flow_item_udp_mask; + *size = sizeof(struct rte_flow_item_udp); + return 1; + + case RTE_FLOW_ITEM_TYPE_TCP: + *mask = &rte_flow_item_tcp_mask; + *size = sizeof(struct rte_flow_item_tcp); + return 1; + + case RTE_FLOW_ITEM_TYPE_SCTP: + *mask = &rte_flow_item_sctp_mask; + *size = sizeof(struct rte_flow_item_sctp); + return 1; + + case RTE_FLOW_ITEM_TYPE_VXLAN: + *mask = &rte_flow_item_vxlan_mask; + *size = sizeof(struct rte_flow_item_vxlan); + return 1; + + case RTE_FLOW_ITEM_TYPE_E_TAG: + *mask = &rte_flow_item_e_tag_mask; + *size = sizeof(struct rte_flow_item_e_tag); + return 1; + + case RTE_FLOW_ITEM_TYPE_NVGRE: + *mask = &rte_flow_item_nvgre_mask; + *size = sizeof(struct rte_flow_item_nvgre); + return 1; + + case RTE_FLOW_ITEM_TYPE_MPLS: + *mask = &rte_flow_item_mpls_mask; + *size = sizeof(struct rte_flow_item_mpls); + return 1; + + case RTE_FLOW_ITEM_TYPE_GRE: + *mask = &rte_flow_item_gre_mask; + *size = sizeof(struct rte_flow_item_gre); + return 1; + + case RTE_FLOW_ITEM_TYPE_GTP: + case RTE_FLOW_ITEM_TYPE_GTPC: + case RTE_FLOW_ITEM_TYPE_GTPU: + *mask = &rte_flow_item_gtp_mask; + *size = sizeof(struct rte_flow_item_gtp); + return 1; + + case RTE_FLOW_ITEM_TYPE_ESP: + *mask = &rte_flow_item_esp_mask; + *size = sizeof(struct rte_flow_item_esp); + return 1; + + case RTE_FLOW_ITEM_TYPE_GENEVE: + *mask = &rte_flow_item_geneve_mask; + *size = sizeof(struct rte_flow_item_geneve); + return 1; + + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + *mask = &rte_flow_item_vxlan_gpe_mask; + *size = sizeof(struct rte_flow_item_vxlan_gpe); + return 1; + + case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4: + *mask = &rte_flow_item_arp_eth_ipv4_mask; + *size = sizeof(struct rte_flow_item_arp_eth_ipv4); + return 1; + + case RTE_FLOW_ITEM_TYPE_IPV6_EXT: + *mask = &rte_flow_item_ipv6_ext_mask; + *size = sizeof(struct rte_flow_item_ipv6_ext); + return 1; + + case RTE_FLOW_ITEM_TYPE_ICMP6: + *mask = &rte_flow_item_icmp6_mask; + *size = sizeof(struct rte_flow_item_icmp6); + return 1; + + case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS: + *mask = &rte_flow_item_icmp6_nd_ns_mask; + *size = sizeof(struct rte_flow_item_icmp6_nd_ns); + return 1; + + case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA: + *mask = &rte_flow_item_icmp6_nd_na_mask; + *size = sizeof(struct rte_flow_item_icmp6_nd_na); + return 1; + + case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT: + *mask = &rte_flow_item_icmp6_nd_opt_mask; + *size = sizeof(struct rte_flow_item_icmp6_nd_opt); + return 1; + + case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH: + *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask; + *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth); + return 1; + + case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH: + *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask; + *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth); + return 1; + + default: return 0; /* FALSE */ + } +} + +static int +flow_item_raw_preprocess(const struct rte_flow_item *item, + union flow_item *item_spec, + union flow_item *item_mask, + size_t *item_size, + int *item_disabled, + struct rte_flow_error *error) +{ + const struct rte_flow_item_raw *item_raw_spec = item->spec; + const struct rte_flow_item_raw *item_raw_mask = item->mask; + const uint8_t *pattern; + const uint8_t *pattern_mask; + uint8_t *spec = (uint8_t *)item_spec; + uint8_t *mask = (uint8_t *)item_mask; + size_t pattern_length, pattern_offset, i; + int disabled; + + if (!item->spec) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "RAW: Null specification"); + + if (item->last) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "RAW: Range not allowed (last must be NULL)"); + + if (item_raw_spec->relative == 0) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "RAW: Absolute offset not supported"); + + if (item_raw_spec->search) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "RAW: Search not supported"); + + if (item_raw_spec->offset < 0) + return rte_flow_error_set(error, + ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, + item, + "RAW: Negative offset not supported"); + + if (item_raw_spec->length == 0) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "RAW: Zero pattern length"); + + if (item_raw_spec->offset + item_raw_spec->length > + TABLE_RULE_MATCH_SIZE_MAX) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "RAW: Item too big"); + + if (!item_raw_spec->pattern && item_raw_mask && item_raw_mask->pattern) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "RAW: Non-NULL pattern mask not allowed with NULL pattern"); + + pattern = item_raw_spec->pattern; + pattern_mask = (item_raw_mask) ? item_raw_mask->pattern : NULL; + pattern_length = (size_t)item_raw_spec->length; + pattern_offset = (size_t)item_raw_spec->offset; + + disabled = 0; + if (pattern_mask == NULL) + disabled = 1; + else + for (i = 0; i < pattern_length; i++) + if ((pattern)[i]) + disabled = 1; + + memset(spec, 0, TABLE_RULE_MATCH_SIZE_MAX); + if (pattern) + memcpy(&spec[pattern_offset], pattern, pattern_length); + + memset(mask, 0, TABLE_RULE_MATCH_SIZE_MAX); + if (pattern_mask) + memcpy(&mask[pattern_offset], pattern_mask, pattern_length); + + *item_size = pattern_offset + pattern_length; + *item_disabled = disabled; + + return 0; +} + +static int +flow_item_proto_preprocess(const struct rte_flow_item *item, + union flow_item *item_spec, + union flow_item *item_mask, + size_t *item_size, + int *item_disabled, + struct rte_flow_error *error) +{ + const void *mask_default; + uint8_t *spec = (uint8_t *)item_spec; + uint8_t *mask = (uint8_t *)item_mask; + size_t size, i; + + if (!flow_item_is_proto(item->type, &mask_default, &size)) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Item type not supported"); + + if (item->type == RTE_FLOW_ITEM_TYPE_RAW) + return flow_item_raw_preprocess(item, + item_spec, + item_mask, + item_size, + item_disabled, + error); + + /* spec */ + if (!item->spec) { + /* If spec is NULL, then last and mask also have to be NULL. */ + if (item->last || item->mask) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid item (NULL spec with non-NULL last or mask)"); + + memset(item_spec, 0, size); + memset(item_mask, 0, size); + *item_size = size; + *item_disabled = 1; /* TRUE */ + return 0; + } + + memcpy(spec, item->spec, size); + *item_size = size; + + /* mask */ + if (item->mask) + memcpy(mask, item->mask, size); + else + memcpy(mask, mask_default, size); + + /* disabled */ + for (i = 0; i < size; i++) + if (mask[i]) + break; + *item_disabled = (i == size) ? 1 : 0; + + /* Apply mask over spec. */ + for (i = 0; i < size; i++) + spec[i] &= mask[i]; + + /* last */ + if (item->last) { + uint8_t last[size]; + + /* init last */ + memcpy(last, item->last, size); + for (i = 0; i < size; i++) + last[i] &= mask[i]; + + /* check for range */ + for (i = 0; i < size; i++) + if (last[i] != spec[i]) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Range not supported"); + } + + return 0; +} + +/*** + * Skip disabled protocol items and VOID items + * until any of the mutually exclusive conditions + * from the list below takes place: + * (A) A protocol present in the proto_mask + * is met (either ENABLED or DISABLED); + * (B) A protocol NOT present in the proto_mask is met in ENABLED state; + * (C) The END item is met. + */ +static int +flow_item_skip_disabled_protos(const struct rte_flow_item **item, + uint64_t proto_mask, + size_t *length, + struct rte_flow_error *error) +{ + size_t len = 0; + + for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) { + union flow_item spec, mask; + size_t size; + int disabled = 0, status; + + if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID) + continue; + + status = flow_item_proto_preprocess(*item, + &spec, + &mask, + &size, + &disabled, + error); + if (status) + return status; + + if ((proto_mask & (1LLU << (*item)->type)) || + !disabled) + break; + + len += size; + } + + if (length) + *length = len; + + return 0; +} + +#define FLOW_ITEM_PROTO_IP \ + ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \ + (1LLU << RTE_FLOW_ITEM_TYPE_IPV6)) + +static void +flow_item_skip_void(const struct rte_flow_item **item) +{ + for ( ; ; (*item)++) + if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID) + return; +} + +#define IP_PROTOCOL_TCP 0x06 +#define IP_PROTOCOL_UDP 0x11 +#define IP_PROTOCOL_SCTP 0x84 + +static int +mask_to_depth(uint64_t mask, + uint32_t *depth) +{ + uint64_t n; + + if (mask == UINT64_MAX) { + if (depth) + *depth = 64; + + return 0; + } + + mask = ~mask; + + if (mask & (mask + 1)) + return -1; + + n = __builtin_popcountll(mask); + if (depth) + *depth = (uint32_t)(64 - n); + + return 0; +} + +static int +ipv4_mask_to_depth(uint32_t mask, + uint32_t *depth) +{ + uint32_t d; + int status; + + status = mask_to_depth(mask | (UINT64_MAX << 32), &d); + if (status) + return status; + + d -= 32; + if (depth) + *depth = d; + + return 0; +} + +static int +ipv6_mask_to_depth(uint8_t *mask, + uint32_t *depth) +{ + uint64_t *m = (uint64_t *)mask; + uint64_t m0 = rte_be_to_cpu_64(m[0]); + uint64_t m1 = rte_be_to_cpu_64(m[1]); + uint32_t d0, d1; + int status; + + status = mask_to_depth(m0, &d0); + if (status) + return status; + + status = mask_to_depth(m1, &d1); + if (status) + return status; + + if (d0 < 64 && d1) + return -1; + + if (depth) + *depth = d0 + d1; + + return 0; +} + +static int +port_mask_to_range(uint16_t port, + uint16_t port_mask, + uint16_t *port0, + uint16_t *port1) +{ + int status; + uint16_t p0, p1; + + status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL); + if (status) + return -1; + + p0 = port & port_mask; + p1 = p0 | ~port_mask; + + if (port0) + *port0 = p0; + + if (port1) + *port1 = p1; + + return 0; +} + +static int +flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused, + struct pipeline *pipeline __rte_unused, + struct softnic_table *table __rte_unused, + const struct rte_flow_attr *attr, + const struct rte_flow_item *item, + struct softnic_table_rule_match *rule_match, + struct rte_flow_error *error) +{ + union flow_item spec, mask; + size_t size, length = 0; + int disabled = 0, status; + uint8_t ip_proto, ip_proto_mask; + + memset(rule_match, 0, sizeof(*rule_match)); + rule_match->match_type = TABLE_ACL; + rule_match->match.acl.priority = attr->priority; + + /* VOID or disabled protos only, if any. */ + status = flow_item_skip_disabled_protos(&item, + FLOW_ITEM_PROTO_IP, &length, error); + if (status) + return status; + + /* IP only. */ + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + { + uint32_t sa_depth, da_depth; + + status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr), + &sa_depth); + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Illegal IPv4 header source address mask"); + + status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr), + &da_depth); + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Illegal IPv4 header destination address mask"); + + ip_proto = spec.ipv4.hdr.next_proto_id; + ip_proto_mask = mask.ipv4.hdr.next_proto_id; + + rule_match->match.acl.ip_version = 1; + rule_match->match.acl.ipv4.sa = + rte_ntohl(spec.ipv4.hdr.src_addr); + rule_match->match.acl.ipv4.da = + rte_ntohl(spec.ipv4.hdr.dst_addr); + rule_match->match.acl.sa_depth = sa_depth; + rule_match->match.acl.da_depth = da_depth; + rule_match->match.acl.proto = ip_proto; + rule_match->match.acl.proto_mask = ip_proto_mask; + break; + } /* RTE_FLOW_ITEM_TYPE_IPV4 */ + + case RTE_FLOW_ITEM_TYPE_IPV6: + { + uint32_t sa_depth, da_depth; + + status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth); + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Illegal IPv6 header source address mask"); + + status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth); + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Illegal IPv6 header destination address mask"); + + ip_proto = spec.ipv6.hdr.proto; + ip_proto_mask = mask.ipv6.hdr.proto; + + rule_match->match.acl.ip_version = 0; + memcpy(rule_match->match.acl.ipv6.sa, + spec.ipv6.hdr.src_addr, + sizeof(spec.ipv6.hdr.src_addr)); + memcpy(rule_match->match.acl.ipv6.da, + spec.ipv6.hdr.dst_addr, + sizeof(spec.ipv6.hdr.dst_addr)); + rule_match->match.acl.sa_depth = sa_depth; + rule_match->match.acl.da_depth = da_depth; + rule_match->match.acl.proto = ip_proto; + rule_match->match.acl.proto_mask = ip_proto_mask; + break; + } /* RTE_FLOW_ITEM_TYPE_IPV6 */ + + default: + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: IP protocol required"); + } /* switch */ + + if (ip_proto_mask != UINT8_MAX) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Illegal IP protocol mask"); + + item++; + + /* VOID only, if any. */ + flow_item_skip_void(&item); + + /* TCP/UDP/SCTP only. */ + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_TCP: + { + uint16_t sp0, sp1, dp0, dp1; + + if (ip_proto != IP_PROTOCOL_TCP) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Item type is TCP, but IP protocol is not"); + + status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port), + rte_ntohs(mask.tcp.hdr.src_port), + &sp0, + &sp1); + + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Illegal TCP source port mask"); + + status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port), + rte_ntohs(mask.tcp.hdr.dst_port), + &dp0, + &dp1); + + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Illegal TCP destination port mask"); + + rule_match->match.acl.sp0 = sp0; + rule_match->match.acl.sp1 = sp1; + rule_match->match.acl.dp0 = dp0; + rule_match->match.acl.dp1 = dp1; + + break; + } /* RTE_FLOW_ITEM_TYPE_TCP */ + + case RTE_FLOW_ITEM_TYPE_UDP: + { + uint16_t sp0, sp1, dp0, dp1; + + if (ip_proto != IP_PROTOCOL_UDP) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Item type is UDP, but IP protocol is not"); + + status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port), + rte_ntohs(mask.udp.hdr.src_port), + &sp0, + &sp1); + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Illegal UDP source port mask"); + + status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port), + rte_ntohs(mask.udp.hdr.dst_port), + &dp0, + &dp1); + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Illegal UDP destination port mask"); + + rule_match->match.acl.sp0 = sp0; + rule_match->match.acl.sp1 = sp1; + rule_match->match.acl.dp0 = dp0; + rule_match->match.acl.dp1 = dp1; + + break; + } /* RTE_FLOW_ITEM_TYPE_UDP */ + + case RTE_FLOW_ITEM_TYPE_SCTP: + { + uint16_t sp0, sp1, dp0, dp1; + + if (ip_proto != IP_PROTOCOL_SCTP) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Item type is SCTP, but IP protocol is not"); + + status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port), + rte_ntohs(mask.sctp.hdr.src_port), + &sp0, + &sp1); + + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Illegal SCTP source port mask"); + + status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port), + rte_ntohs(mask.sctp.hdr.dst_port), + &dp0, + &dp1); + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Illegal SCTP destination port mask"); + + rule_match->match.acl.sp0 = sp0; + rule_match->match.acl.sp1 = sp1; + rule_match->match.acl.dp0 = dp0; + rule_match->match.acl.dp1 = dp1; + + break; + } /* RTE_FLOW_ITEM_TYPE_SCTP */ + + default: + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: TCP/UDP/SCTP required"); + } /* switch */ + + item++; + + /* VOID or disabled protos only, if any. */ + status = flow_item_skip_disabled_protos(&item, 0, NULL, error); + if (status) + return status; + + /* END only. */ + if (item->type != RTE_FLOW_ITEM_TYPE_END) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "ACL: Expecting END item"); + + return 0; +} + +/*** + * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize* + * respectively. + * They are located within a larger buffer at offsets *toffset* and *foffset* + * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger + * buffer. + * Question: are the two masks equivalent? + * + * Notes: + * 1. Offset basically indicates that the first offset bytes in the buffer + * are "don't care", so offset is equivalent to pre-pending an "all-zeros" + * array of *offset* bytes to the *mask*. + * 2. Each *mask* might contain a number of zero bytes at the beginning or + * at the end. + * 3. Bytes in the larger buffer after the end of the *mask* are also considered + * "don't care", so they are equivalent to appending an "all-zeros" array of + * bytes to the *mask*. + * + * Example: + * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes + * tmask = [00 22 00 33 00], toffset = 2, tsize = 5 + * => buffer mask = [00 00 00 22 00 33 00 00] + * fmask = [22 00 33], foffset = 3, fsize = 3 => + * => buffer mask = [00 00 00 22 00 33 00 00] + * Therefore, the tmask and fmask from this example are equivalent. + */ +static int +hash_key_mask_is_same(uint8_t *tmask, + size_t toffset, + size_t tsize, + uint8_t *fmask, + size_t foffset, + size_t fsize, + size_t *toffset_plus, + size_t *foffset_plus) +{ + size_t tpos; /* Position of first non-zero byte in the tmask buffer. */ + size_t fpos; /* Position of first non-zero byte in the fmask buffer. */ + + /* Compute tpos and fpos. */ + for (tpos = 0; tmask[tpos] == 0; tpos++) + ; + for (fpos = 0; fmask[fpos] == 0; fpos++) + ; + + if (toffset + tpos != foffset + fpos) + return 0; /* FALSE */ + + tsize -= tpos; + fsize -= fpos; + + if (tsize < fsize) { + size_t i; + + for (i = 0; i < tsize; i++) + if (tmask[tpos + i] != fmask[fpos + i]) + return 0; /* FALSE */ + + for ( ; i < fsize; i++) + if (fmask[fpos + i]) + return 0; /* FALSE */ + } else { + size_t i; + + for (i = 0; i < fsize; i++) + if (tmask[tpos + i] != fmask[fpos + i]) + return 0; /* FALSE */ + + for ( ; i < tsize; i++) + if (tmask[tpos + i]) + return 0; /* FALSE */ + } + + if (toffset_plus) + *toffset_plus = tpos; + + if (foffset_plus) + *foffset_plus = fpos; + + return 1; /* TRUE */ +} + +static int +flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused, + struct pipeline *pipeline __rte_unused, + struct softnic_table *table, + const struct rte_flow_attr *attr __rte_unused, + const struct rte_flow_item *item, + struct softnic_table_rule_match *rule_match, + struct rte_flow_error *error) +{ + struct softnic_table_rule_match_hash key, key_mask; + struct softnic_table_hash_params *params = &table->params.match.hash; + size_t offset = 0, length = 0, tpos, fpos; + int status; + + memset(&key, 0, sizeof(key)); + memset(&key_mask, 0, sizeof(key_mask)); + + /* VOID or disabled protos only, if any. */ + status = flow_item_skip_disabled_protos(&item, 0, &offset, error); + if (status) + return status; + + if (item->type == RTE_FLOW_ITEM_TYPE_END) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "HASH: END detected too early"); + + /* VOID or any protocols (enabled or disabled). */ + for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + union flow_item spec, mask; + size_t size; + int disabled, status; + + if (item->type == RTE_FLOW_ITEM_TYPE_VOID) + continue; + + status = flow_item_proto_preprocess(item, + &spec, + &mask, + &size, + &disabled, + error); + if (status) + return status; + + if (length + size > sizeof(key)) { + if (disabled) + break; + + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "HASH: Item too big"); + } + + memcpy(&key.key[length], &spec, size); + memcpy(&key_mask.key[length], &mask, size); + length += size; + } + + if (item->type != RTE_FLOW_ITEM_TYPE_END) { + /* VOID or disabled protos only, if any. */ + status = flow_item_skip_disabled_protos(&item, 0, NULL, error); + if (status) + return status; + + /* END only. */ + if (item->type != RTE_FLOW_ITEM_TYPE_END) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "HASH: Expecting END item"); + } + + /* Compare flow key mask against table key mask. */ + offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM; + + if (!hash_key_mask_is_same(params->key_mask, + params->key_offset, + params->key_size, + key_mask.key, + offset, + length, + &tpos, + &fpos)) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "HASH: Item list is not observing the match format"); + + /* Rule match. */ + memset(rule_match, 0, sizeof(*rule_match)); + rule_match->match_type = TABLE_HASH; + memcpy(&rule_match->match.hash.key[tpos], + &key.key[fpos], + RTE_MIN(sizeof(rule_match->match.hash.key) - tpos, + length - fpos)); + + return 0; +} + +static int +flow_rule_match_get(struct pmd_internals *softnic, + struct pipeline *pipeline, + struct softnic_table *table, + const struct rte_flow_attr *attr, + const struct rte_flow_item *item, + struct softnic_table_rule_match *rule_match, + struct rte_flow_error *error) +{ + switch (table->params.match_type) { + case TABLE_ACL: + return flow_rule_match_acl_get(softnic, + pipeline, + table, + attr, + item, + rule_match, + error); + + /* FALLTHROUGH */ + + case TABLE_HASH: + return flow_rule_match_hash_get(softnic, + pipeline, + table, + attr, + item, + rule_match, + error); + + /* FALLTHROUGH */ + + default: + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Unsupported pipeline table match type"); + } +} + +static int +flow_rule_action_get(struct pmd_internals *softnic, + struct pipeline *pipeline, + struct softnic_table *table, + const struct rte_flow_attr *attr, + const struct rte_flow_action *action, + struct softnic_table_rule_action *rule_action, + struct rte_flow_error *error) +{ + struct softnic_table_action_profile *profile; + struct softnic_table_action_profile_params *params; + int n_jump_queue_rss_drop = 0; + int n_count = 0; + int n_mark = 0; + int n_vxlan_decap = 0; + + profile = softnic_table_action_profile_find(softnic, + table->params.action_profile_name); + if (profile == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + action, + "JUMP: Table action profile"); + + params = &profile->params; + + for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++) { + if (action->type == RTE_FLOW_ACTION_TYPE_VOID) + continue; + + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_JUMP: + { + const struct rte_flow_action_jump *conf = action->conf; + struct flow_attr_map *map; + + if (conf == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "JUMP: Null configuration"); + + if (n_jump_queue_rss_drop) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "Only one termination action is" + " allowed per flow"); + + if ((params->action_mask & + (1LLU << RTE_TABLE_ACTION_FWD)) == 0) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "JUMP action not enabled for this table"); + + n_jump_queue_rss_drop = 1; + + map = flow_attr_map_get(softnic, + conf->group, + attr->ingress); + if (map == NULL || map->valid == 0) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "JUMP: Invalid group mapping"); + + if (strcmp(pipeline->name, map->pipeline_name) != 0) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "JUMP: Jump to table in different pipeline"); + + /* RTE_TABLE_ACTION_FWD */ + rule_action->fwd.action = RTE_PIPELINE_ACTION_TABLE; + rule_action->fwd.id = map->table_id; + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD; + break; + } /* RTE_FLOW_ACTION_TYPE_JUMP */ + + case RTE_FLOW_ACTION_TYPE_QUEUE: + { + char name[NAME_SIZE]; + struct rte_eth_dev *dev; + const struct rte_flow_action_queue *conf = action->conf; + uint32_t port_id; + int status; + + if (conf == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "QUEUE: Null configuration"); + + if (n_jump_queue_rss_drop) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "Only one termination action is allowed" + " per flow"); + + if ((params->action_mask & + (1LLU << RTE_TABLE_ACTION_FWD)) == 0) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "QUEUE action not enabled for this table"); + + n_jump_queue_rss_drop = 1; + + dev = ETHDEV(softnic); + if (dev == NULL || + conf->index >= dev->data->nb_rx_queues) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "QUEUE: Invalid RX queue ID"); + + snprintf(name, sizeof(name), "RXQ%u", + (uint32_t)conf->index); + + status = softnic_pipeline_port_out_find(softnic, + pipeline->name, + name, + &port_id); + if (status) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "QUEUE: RX queue not accessible from this pipeline"); + + /* RTE_TABLE_ACTION_FWD */ + rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT; + rule_action->fwd.id = port_id; + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD; + break; + } /*RTE_FLOW_ACTION_TYPE_QUEUE */ + + case RTE_FLOW_ACTION_TYPE_RSS: + { + const struct rte_flow_action_rss *conf = action->conf; + uint32_t i; + + if (conf == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "RSS: Null configuration"); + + if (!rte_is_power_of_2(conf->queue_num)) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + conf, + "RSS: Number of queues must be a power of 2"); + + if (conf->queue_num > RTE_DIM(rule_action->lb.out)) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + conf, + "RSS: Number of queues too big"); + + if (n_jump_queue_rss_drop) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "Only one termination action is allowed per flow"); + + if (((params->action_mask & + (1LLU << RTE_TABLE_ACTION_FWD)) == 0) || + ((params->action_mask & + (1LLU << RTE_TABLE_ACTION_LB)) == 0)) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "RSS action not supported by this table"); + + if (params->lb.out_offset != + pipeline->params.offset_port_id) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "RSS action not supported by this pipeline"); + + n_jump_queue_rss_drop = 1; + + /* RTE_TABLE_ACTION_LB */ + for (i = 0; i < conf->queue_num; i++) { + char name[NAME_SIZE]; + struct rte_eth_dev *dev; + uint32_t port_id; + int status; + + dev = ETHDEV(softnic); + if (dev == NULL || + conf->queue[i] >= + dev->data->nb_rx_queues) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "RSS: Invalid RX queue ID"); + + snprintf(name, sizeof(name), "RXQ%u", + (uint32_t)conf->queue[i]); + + status = softnic_pipeline_port_out_find(softnic, + pipeline->name, + name, + &port_id); + if (status) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "RSS: RX queue not accessible from this pipeline"); + + rule_action->lb.out[i] = port_id; + } + + for ( ; i < RTE_DIM(rule_action->lb.out); i++) + rule_action->lb.out[i] = + rule_action->lb.out[i % conf->queue_num]; + + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_LB; + + /* RTE_TABLE_ACTION_FWD */ + rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT_META; + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD; + break; + } /* RTE_FLOW_ACTION_TYPE_RSS */ + + case RTE_FLOW_ACTION_TYPE_DROP: + { + const void *conf = action->conf; + + if (conf != NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "DROP: No configuration required"); + + if (n_jump_queue_rss_drop) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "Only one termination action is allowed per flow"); + if ((params->action_mask & + (1LLU << RTE_TABLE_ACTION_FWD)) == 0) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "DROP action not supported by this table"); + + n_jump_queue_rss_drop = 1; + + /* RTE_TABLE_ACTION_FWD */ + rule_action->fwd.action = RTE_PIPELINE_ACTION_DROP; + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD; + break; + } /* RTE_FLOW_ACTION_TYPE_DROP */ + + case RTE_FLOW_ACTION_TYPE_COUNT: + { + const struct rte_flow_action_count *conf = action->conf; + + if (conf == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "COUNT: Null configuration"); + + if (conf->shared) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + conf, + "COUNT: Shared counters not supported"); + + if (n_count) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "Only one COUNT action per flow"); + + if ((params->action_mask & + (1LLU << RTE_TABLE_ACTION_STATS)) == 0) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "COUNT action not supported by this table"); + + n_count = 1; + + /* RTE_TABLE_ACTION_STATS */ + rule_action->stats.n_packets = 0; + rule_action->stats.n_bytes = 0; + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_STATS; + break; + } /* RTE_FLOW_ACTION_TYPE_COUNT */ + + case RTE_FLOW_ACTION_TYPE_MARK: + { + const struct rte_flow_action_mark *conf = action->conf; + + if (conf == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "MARK: Null configuration"); + + if (n_mark) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "Only one MARK action per flow"); + + if ((params->action_mask & + (1LLU << RTE_TABLE_ACTION_TAG)) == 0) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "MARK action not supported by this table"); + + n_mark = 1; + + /* RTE_TABLE_ACTION_TAG */ + rule_action->tag.tag = conf->id; + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_TAG; + break; + } /* RTE_FLOW_ACTION_TYPE_MARK */ + + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + { + const struct rte_flow_action_mark *conf = action->conf; + + if (conf) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "VXLAN DECAP: Non-null configuration"); + + if (n_vxlan_decap) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "Only one VXLAN DECAP action per flow"); + + if ((params->action_mask & + (1LLU << RTE_TABLE_ACTION_DECAP)) == 0) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "VXLAN DECAP action not supported by this table"); + + n_vxlan_decap = 1; + + /* RTE_TABLE_ACTION_DECAP */ + rule_action->decap.n = 50; /* Ether/IPv4/UDP/VXLAN */ + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_DECAP; + break; + } /* RTE_FLOW_ACTION_TYPE_VXLAN_DECAP */ + + case RTE_FLOW_ACTION_TYPE_METER: + { + const struct rte_flow_action_meter *conf = action->conf; + struct softnic_mtr_meter_profile *mp; + struct softnic_mtr *m; + uint32_t table_id = table - pipeline->table; + uint32_t meter_profile_id; + int status; + + if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "METER: Table action not supported"); + + if (params->mtr.n_tc != 1) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "METER: Multiple TCs not supported"); + + if (conf == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "METER: Null configuration"); + + m = softnic_mtr_find(softnic, conf->mtr_id); + + if (m == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, + "METER: Invalid meter ID"); + + if (m->flow) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, + "METER: Meter already attached to a flow"); + + meter_profile_id = m->params.meter_profile_id; + mp = softnic_mtr_meter_profile_find(softnic, meter_profile_id); + + /* Add meter profile to pipeline table */ + if (!softnic_pipeline_table_meter_profile_find(table, + meter_profile_id)) { + struct rte_table_action_meter_profile profile; + + memset(&profile, 0, sizeof(profile)); + profile.alg = RTE_TABLE_ACTION_METER_TRTCM; + profile.trtcm.cir = mp->params.trtcm_rfc2698.cir; + profile.trtcm.pir = mp->params.trtcm_rfc2698.pir; + profile.trtcm.cbs = mp->params.trtcm_rfc2698.cbs; + profile.trtcm.pbs = mp->params.trtcm_rfc2698.pbs; + + status = softnic_pipeline_table_mtr_profile_add(softnic, + pipeline->name, + table_id, + meter_profile_id, + &profile); + if (status) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "METER: Table meter profile add failed"); + return -1; + } + } + + /* RTE_TABLE_ACTION_METER */ + rule_action->mtr.mtr[0].meter_profile_id = meter_profile_id; + rule_action->mtr.mtr[0].policer[RTE_COLOR_GREEN] = + softnic_table_action_policer(m->params.action[RTE_COLOR_GREEN]); + rule_action->mtr.mtr[0].policer[RTE_COLOR_YELLOW] = + softnic_table_action_policer(m->params.action[RTE_COLOR_YELLOW]); + rule_action->mtr.mtr[0].policer[RTE_COLOR_RED] = + softnic_table_action_policer(m->params.action[RTE_COLOR_RED]); + rule_action->mtr.tc_mask = 1; + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_MTR; + break; + } /* RTE_FLOW_ACTION_TYPE_METER */ + + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + { + const struct rte_flow_action_vxlan_encap *conf = + action->conf; + const struct rte_flow_item *item; + union flow_item spec, mask; + int disabled = 0, status; + size_t size; + + if (conf == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "VXLAN ENCAP: Null configuration"); + + item = conf->definition; + if (item == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "VXLAN ENCAP: Null configuration definition"); + + if (!(params->action_mask & + (1LLU << RTE_TABLE_ACTION_ENCAP))) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "VXLAN ENCAP: Encap action not enabled for this table"); + + /* Check for Ether. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: first encap item should be ether"); + } + rte_ether_addr_copy(&spec.eth.dst, + &rule_action->encap.vxlan.ether.da); + rte_ether_addr_copy(&spec.eth.src, + &rule_action->encap.vxlan.ether.sa); + + item++; + + /* Check for VLAN. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + if (!params->encap.vxlan.vlan) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: vlan encap not supported by table"); + + uint16_t tci = rte_ntohs(spec.vlan.tci); + rule_action->encap.vxlan.vlan.pcp = + tci >> 13; + rule_action->encap.vxlan.vlan.dei = + (tci >> 12) & 0x1; + rule_action->encap.vxlan.vlan.vid = + tci & 0xfff; + + item++; + + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, + &mask, &size, &disabled, error); + if (status) + return status; + } else { + if (params->encap.vxlan.vlan) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: expecting vlan encap item"); + } + + /* Check for IPV4/IPV6. */ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + { + rule_action->encap.vxlan.ipv4.sa = + rte_ntohl(spec.ipv4.hdr.src_addr); + rule_action->encap.vxlan.ipv4.da = + rte_ntohl(spec.ipv4.hdr.dst_addr); + rule_action->encap.vxlan.ipv4.dscp = + spec.ipv4.hdr.type_of_service >> 2; + rule_action->encap.vxlan.ipv4.ttl = + spec.ipv4.hdr.time_to_live; + break; + } + case RTE_FLOW_ITEM_TYPE_IPV6: + { + uint32_t vtc_flow; + + memcpy(&rule_action->encap.vxlan.ipv6.sa, + &spec.ipv6.hdr.src_addr, + sizeof(spec.ipv6.hdr.src_addr)); + memcpy(&rule_action->encap.vxlan.ipv6.da, + &spec.ipv6.hdr.dst_addr, + sizeof(spec.ipv6.hdr.dst_addr)); + vtc_flow = rte_ntohl(spec.ipv6.hdr.vtc_flow); + rule_action->encap.vxlan.ipv6.flow_label = + vtc_flow & 0xfffff; + rule_action->encap.vxlan.ipv6.dscp = + (vtc_flow >> 22) & 0x3f; + rule_action->encap.vxlan.ipv6.hop_limit = + spec.ipv6.hdr.hop_limits; + break; + } + default: + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: encap item after ether should be ipv4/ipv6"); + } + + item++; + + /* Check for UDP. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: encap item after ipv4/ipv6 should be udp"); + } + rule_action->encap.vxlan.udp.sp = + rte_ntohs(spec.udp.hdr.src_port); + rule_action->encap.vxlan.udp.dp = + rte_ntohs(spec.udp.hdr.dst_port); + + item++; + + /* Check for VXLAN. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: encap item after udp should be vxlan"); + } + rule_action->encap.vxlan.vxlan.vni = + (spec.vxlan.vni[0] << 16U | + spec.vxlan.vni[1] << 8U + | spec.vxlan.vni[2]); + + item++; + + /* Check for END. */ + flow_item_skip_void(&item); + + if (item->type != RTE_FLOW_ITEM_TYPE_END) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: expecting END item"); + + rule_action->encap.type = RTE_TABLE_ACTION_ENCAP_VXLAN; + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + break; + } /* RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP */ + + default: + return -ENOTSUP; + } + } + + if (n_jump_queue_rss_drop == 0) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "Flow does not have any terminating action"); + + return 0; +} + +static int +pmd_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item item[], + const struct rte_flow_action action[], + struct rte_flow_error *error) +{ + struct softnic_table_rule_match rule_match; + struct softnic_table_rule_action rule_action; + + struct pmd_internals *softnic = dev->data->dev_private; + struct pipeline *pipeline; + struct softnic_table *table; + const char *pipeline_name = NULL; + uint32_t table_id = 0; + int status; + + /* Check input parameters. */ + if (attr == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, "Null attr"); + + if (item == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "Null item"); + + if (action == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Null action"); + + /* Identify the pipeline table to add this flow to. */ + status = flow_pipeline_table_get(softnic, attr, &pipeline_name, + &table_id, error); + if (status) + return status; + + pipeline = softnic_pipeline_find(softnic, pipeline_name); + if (pipeline == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Invalid pipeline name"); + + if (table_id >= pipeline->n_tables) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Invalid pipeline table ID"); + + table = &pipeline->table[table_id]; + + /* Rule match. */ + memset(&rule_match, 0, sizeof(rule_match)); + status = flow_rule_match_get(softnic, + pipeline, + table, + attr, + item, + &rule_match, + error); + if (status) + return status; + + /* Rule action. */ + memset(&rule_action, 0, sizeof(rule_action)); + status = flow_rule_action_get(softnic, + pipeline, + table, + attr, + action, + &rule_action, + error); + if (status) + return status; + + return 0; +} + +static struct softnic_mtr * +flow_action_meter_get(struct pmd_internals *softnic, + const struct rte_flow_action *action) +{ + for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++) + if (action->type == RTE_FLOW_ACTION_TYPE_METER) { + const struct rte_flow_action_meter *conf = action->conf; + + if (conf == NULL) + return NULL; + + return softnic_mtr_find(softnic, conf->mtr_id); + } + + return NULL; +} + +static void +flow_meter_owner_reset(struct pmd_internals *softnic, + struct rte_flow *flow) +{ + struct softnic_mtr_list *ml = &softnic->mtr.mtrs; + struct softnic_mtr *m; + + TAILQ_FOREACH(m, ml, node) + if (m->flow == flow) { + m->flow = NULL; + break; + } +} + +static void +flow_meter_owner_set(struct pmd_internals *softnic, + struct rte_flow *flow, + struct softnic_mtr *mtr) +{ + /* Reset current flow meter */ + flow_meter_owner_reset(softnic, flow); + + /* Set new flow meter */ + mtr->flow = flow; +} + +static int +is_meter_action_enable(struct pmd_internals *softnic, + struct softnic_table *table) +{ + struct softnic_table_action_profile *profile = + softnic_table_action_profile_find(softnic, + table->params.action_profile_name); + struct softnic_table_action_profile_params *params = &profile->params; + + return (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) ? 1 : 0; +} + +static struct rte_flow * +pmd_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item item[], + const struct rte_flow_action action[], + struct rte_flow_error *error) +{ + struct softnic_table_rule_match rule_match; + struct softnic_table_rule_action rule_action; + void *rule_data; + + struct pmd_internals *softnic = dev->data->dev_private; + struct pipeline *pipeline; + struct softnic_table *table; + struct rte_flow *flow; + struct softnic_mtr *mtr; + const char *pipeline_name = NULL; + uint32_t table_id = 0; + int new_flow, status; + + /* Check input parameters. */ + if (attr == NULL) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, + NULL, + "Null attr"); + return NULL; + } + + if (item == NULL) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "Null item"); + return NULL; + } + + if (action == NULL) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Null action"); + return NULL; + } + + /* Identify the pipeline table to add this flow to. */ + status = flow_pipeline_table_get(softnic, attr, &pipeline_name, + &table_id, error); + if (status) + return NULL; + + pipeline = softnic_pipeline_find(softnic, pipeline_name); + if (pipeline == NULL) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Invalid pipeline name"); + return NULL; + } + + if (table_id >= pipeline->n_tables) { + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Invalid pipeline table ID"); + return NULL; + } + + table = &pipeline->table[table_id]; + + /* Rule match. */ + memset(&rule_match, 0, sizeof(rule_match)); + status = flow_rule_match_get(softnic, + pipeline, + table, + attr, + item, + &rule_match, + error); + if (status) + return NULL; + + /* Rule action. */ + memset(&rule_action, 0, sizeof(rule_action)); + status = flow_rule_action_get(softnic, + pipeline, + table, + attr, + action, + &rule_action, + error); + if (status) + return NULL; + + /* Flow find/allocate. */ + new_flow = 0; + flow = softnic_flow_find(table, &rule_match); + if (flow == NULL) { + new_flow = 1; + flow = calloc(1, sizeof(struct rte_flow)); + if (flow == NULL) { + rte_flow_error_set(error, + ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Not enough memory for new flow"); + return NULL; + } + } + + /* Rule add. */ + status = softnic_pipeline_table_rule_add(softnic, + pipeline_name, + table_id, + &rule_match, + &rule_action, + &rule_data); + if (status) { + if (new_flow) + free(flow); + + rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Pipeline table rule add failed"); + return NULL; + } + + /* Flow fill in. */ + memcpy(&flow->match, &rule_match, sizeof(rule_match)); + memcpy(&flow->action, &rule_action, sizeof(rule_action)); + flow->data = rule_data; + flow->pipeline = pipeline; + flow->table_id = table_id; + + mtr = flow_action_meter_get(softnic, action); + if (mtr) + flow_meter_owner_set(softnic, flow, mtr); + + /* Flow add to list. */ + if (new_flow) + TAILQ_INSERT_TAIL(&table->flows, flow, node); + + return flow; +} + +static int +pmd_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct pmd_internals *softnic = dev->data->dev_private; + struct softnic_table *table; + int status; + + /* Check input parameters. */ + if (flow == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Null flow"); + + table = &flow->pipeline->table[flow->table_id]; + + /* Rule delete. */ + status = softnic_pipeline_table_rule_delete(softnic, + flow->pipeline->name, + flow->table_id, + &flow->match); + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Pipeline table rule delete failed"); + + /* Update dependencies */ + if (is_meter_action_enable(softnic, table)) + flow_meter_owner_reset(softnic, flow); + + /* Flow delete. */ + TAILQ_REMOVE(&table->flows, flow, node); + free(flow); + + return 0; +} + +static int +pmd_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct pmd_internals *softnic = dev->data->dev_private; + struct pipeline *pipeline; + int fail_to_del_rule = 0; + uint32_t i; + + TAILQ_FOREACH(pipeline, &softnic->pipeline_list, node) { + /* Remove all the flows added to the tables. */ + for (i = 0; i < pipeline->n_tables; i++) { + struct softnic_table *table = &pipeline->table[i]; + struct rte_flow *flow; + void *temp; + int status; + + TAILQ_FOREACH_SAFE(flow, &table->flows, node, temp) { + /* Rule delete. */ + status = softnic_pipeline_table_rule_delete + (softnic, + pipeline->name, + i, + &flow->match); + if (status) + fail_to_del_rule = 1; + /* Update dependencies */ + if (is_meter_action_enable(softnic, table)) + flow_meter_owner_reset(softnic, flow); + + /* Flow delete. */ + TAILQ_REMOVE(&table->flows, flow, node); + free(flow); + } + } + } + + if (fail_to_del_rule) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Some of the rules could not be deleted"); + + return 0; +} + +static int +pmd_flow_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow, + const struct rte_flow_action *action __rte_unused, + void *data, + struct rte_flow_error *error) +{ + struct rte_table_action_stats_counters stats; + struct softnic_table *table; + struct rte_flow_query_count *flow_stats = data; + int status; + + /* Check input parameters. */ + if (flow == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, + "Null flow"); + + if (data == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Null data"); + + table = &flow->pipeline->table[flow->table_id]; + + /* Rule stats read. */ + status = rte_table_action_stats_read(table->a, + flow->data, + &stats, + flow_stats->reset); + if (status) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Pipeline table rule stats read failed"); + + /* Fill in flow stats. */ + flow_stats->hits_set = + (table->ap->params.stats.n_packets_enabled) ? 1 : 0; + flow_stats->bytes_set = + (table->ap->params.stats.n_bytes_enabled) ? 1 : 0; + flow_stats->hits = stats.n_packets; + flow_stats->bytes = stats.n_bytes; + + return 0; +} + +const struct rte_flow_ops pmd_flow_ops = { + .validate = pmd_flow_validate, + .create = pmd_flow_create, + .destroy = pmd_flow_destroy, + .flush = pmd_flow_flush, + .query = pmd_flow_query, + .isolate = NULL, +}; diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_internals.h b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_internals.h new file mode 100644 index 000000000..6eec43b22 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_internals.h @@ -0,0 +1,1127 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ +#define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ + +#include <stddef.h> +#include <stdint.h> +#include <sys/queue.h> + +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_ring.h> +#include <rte_ethdev.h> +#include <rte_sched.h> +#include <rte_port_in_action.h> +#include <rte_table_action.h> +#include <rte_pipeline.h> + +#include <rte_ethdev_core.h> +#include <rte_ethdev_driver.h> +#include <rte_tm_driver.h> +#include <rte_flow_driver.h> +#include <rte_mtr_driver.h> + +#include "rte_eth_softnic.h" +#include "conn.h" + +#define NAME_SIZE 64 + +/** + * PMD Parameters + */ + +struct pmd_params { + const char *name; + const char *firmware; + uint16_t conn_port; + uint32_t cpu_id; + int sc; /**< Service cores. */ + + /** Traffic Management (TM) */ + struct { + uint32_t n_queues; /**< Number of queues */ + uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + } tm; +}; + +/** + * Ethdev Flow API + */ +struct rte_flow; + +TAILQ_HEAD(flow_list, rte_flow); + +struct flow_attr_map { + char pipeline_name[NAME_SIZE]; + uint32_t table_id; + int valid; +}; + +#ifndef SOFTNIC_FLOW_MAX_GROUPS +#define SOFTNIC_FLOW_MAX_GROUPS 64 +#endif + +struct flow_internals { + struct flow_attr_map ingress_map[SOFTNIC_FLOW_MAX_GROUPS]; + struct flow_attr_map egress_map[SOFTNIC_FLOW_MAX_GROUPS]; +}; + +/** + * Meter + */ + +/* MTR meter profile */ +struct softnic_mtr_meter_profile { + TAILQ_ENTRY(softnic_mtr_meter_profile) node; + uint32_t meter_profile_id; + struct rte_mtr_meter_profile params; + uint32_t n_users; +}; + +TAILQ_HEAD(softnic_mtr_meter_profile_list, softnic_mtr_meter_profile); + +/* MTR meter object */ +struct softnic_mtr { + TAILQ_ENTRY(softnic_mtr) node; + uint32_t mtr_id; + struct rte_mtr_params params; + struct rte_flow *flow; +}; + +TAILQ_HEAD(softnic_mtr_list, softnic_mtr); + +struct mtr_internals { + struct softnic_mtr_meter_profile_list meter_profiles; + struct softnic_mtr_list mtrs; +}; + +/** + * MEMPOOL + */ +struct softnic_mempool_params { + uint32_t buffer_size; + uint32_t pool_size; + uint32_t cache_size; +}; + +struct softnic_mempool { + TAILQ_ENTRY(softnic_mempool) node; + char name[NAME_SIZE]; + struct rte_mempool *m; + uint32_t buffer_size; +}; + +TAILQ_HEAD(softnic_mempool_list, softnic_mempool); + +/** + * SWQ + */ +struct softnic_swq_params { + uint32_t size; +}; + +struct softnic_swq { + TAILQ_ENTRY(softnic_swq) node; + char name[NAME_SIZE]; + struct rte_ring *r; +}; + +TAILQ_HEAD(softnic_swq_list, softnic_swq); + +/** + * LINK + */ +struct softnic_link_params { + const char *dev_name; + uint16_t port_id; /**< Valid only when *dev_name* is NULL. */ +}; + +struct softnic_link { + TAILQ_ENTRY(softnic_link) node; + char name[NAME_SIZE]; + uint16_t port_id; + uint32_t n_rxq; + uint32_t n_txq; +}; + +TAILQ_HEAD(softnic_link_list, softnic_link); + +/** + * TMGR + */ + +#ifndef TM_MAX_SUBPORTS +#define TM_MAX_SUBPORTS 8 +#endif + +#ifndef TM_MAX_PIPES_PER_SUBPORT +#define TM_MAX_PIPES_PER_SUBPORT 4096 +#endif + +#ifndef TM_MAX_PIPE_PROFILE +#define TM_MAX_PIPE_PROFILE 256 +#endif +struct tm_params { + struct rte_sched_port_params port_params; + + struct rte_sched_subport_params subport_params[TM_MAX_SUBPORTS]; + + struct rte_sched_pipe_params pipe_profiles[TM_MAX_PIPE_PROFILE]; + uint32_t n_pipe_profiles; + uint32_t pipe_to_profile[TM_MAX_SUBPORTS * TM_MAX_PIPES_PER_SUBPORT]; +}; + +/* TM Levels */ +enum tm_node_level { + TM_NODE_LEVEL_PORT = 0, + TM_NODE_LEVEL_SUBPORT, + TM_NODE_LEVEL_PIPE, + TM_NODE_LEVEL_TC, + TM_NODE_LEVEL_QUEUE, + TM_NODE_LEVEL_MAX, +}; + +/* TM Shaper Profile */ +struct tm_shaper_profile { + TAILQ_ENTRY(tm_shaper_profile) node; + uint32_t shaper_profile_id; + uint32_t n_users; + struct rte_tm_shaper_params params; +}; + +TAILQ_HEAD(tm_shaper_profile_list, tm_shaper_profile); + +/* TM Shared Shaper */ +struct tm_shared_shaper { + TAILQ_ENTRY(tm_shared_shaper) node; + uint32_t shared_shaper_id; + uint32_t n_users; + uint32_t shaper_profile_id; +}; + +TAILQ_HEAD(tm_shared_shaper_list, tm_shared_shaper); + +/* TM WRED Profile */ +struct tm_wred_profile { + TAILQ_ENTRY(tm_wred_profile) node; + uint32_t wred_profile_id; + uint32_t n_users; + struct rte_tm_wred_params params; +}; + +TAILQ_HEAD(tm_wred_profile_list, tm_wred_profile); + +/* TM Node */ +struct tm_node { + TAILQ_ENTRY(tm_node) node; + uint32_t node_id; + uint32_t parent_node_id; + uint32_t priority; + uint32_t weight; + uint32_t level; + struct tm_node *parent_node; + struct tm_shaper_profile *shaper_profile; + struct tm_wred_profile *wred_profile; + struct rte_tm_node_params params; + struct rte_tm_node_stats stats; + uint32_t n_children; +}; + +TAILQ_HEAD(tm_node_list, tm_node); + +/* TM Hierarchy Specification */ +struct tm_hierarchy { + struct tm_shaper_profile_list shaper_profiles; + struct tm_shared_shaper_list shared_shapers; + struct tm_wred_profile_list wred_profiles; + struct tm_node_list nodes; + + uint32_t n_shaper_profiles; + uint32_t n_shared_shapers; + uint32_t n_wred_profiles; + uint32_t n_nodes; + + uint32_t n_tm_nodes[TM_NODE_LEVEL_MAX]; +}; + +struct tm_internals { + /** Hierarchy specification + * + * -Hierarchy is unfrozen at init and when port is stopped. + * -Hierarchy is frozen on successful hierarchy commit. + * -Run-time hierarchy changes are not allowed, therefore it makes + * sense to keep the hierarchy frozen after the port is started. + */ + struct tm_hierarchy h; + int hierarchy_frozen; + + /** Blueprints */ + struct tm_params params; +}; + +struct softnic_tmgr_port { + TAILQ_ENTRY(softnic_tmgr_port) node; + char name[NAME_SIZE]; + struct rte_sched_port *s; +}; + +TAILQ_HEAD(softnic_tmgr_port_list, softnic_tmgr_port); + +/** + * TAP + */ +struct softnic_tap { + TAILQ_ENTRY(softnic_tap) node; + char name[NAME_SIZE]; + int fd; +}; + +TAILQ_HEAD(softnic_tap_list, softnic_tap); + +/** + * Cryptodev + */ +struct softnic_cryptodev_params { + const char *dev_name; + uint32_t dev_id; /**< Valid only when *dev_name* is NULL. */ + uint32_t n_queues; + uint32_t queue_size; + uint32_t session_pool_size; +}; + +struct softnic_cryptodev { + TAILQ_ENTRY(softnic_cryptodev) node; + char name[NAME_SIZE]; + uint16_t dev_id; + uint32_t n_queues; + struct rte_mempool *mp_create; + struct rte_mempool *mp_init; +}; + +TAILQ_HEAD(softnic_cryptodev_list, softnic_cryptodev); + +/** + * Input port action + */ +struct softnic_port_in_action_profile_params { + uint64_t action_mask; + struct rte_port_in_action_fltr_config fltr; + struct rte_port_in_action_lb_config lb; +}; + +struct softnic_port_in_action_profile { + TAILQ_ENTRY(softnic_port_in_action_profile) node; + char name[NAME_SIZE]; + struct softnic_port_in_action_profile_params params; + struct rte_port_in_action_profile *ap; +}; + +TAILQ_HEAD(softnic_port_in_action_profile_list, softnic_port_in_action_profile); + +/** + * Table action + */ +struct softnic_table_action_profile_params { + uint64_t action_mask; + struct rte_table_action_common_config common; + struct rte_table_action_lb_config lb; + struct rte_table_action_mtr_config mtr; + struct rte_table_action_tm_config tm; + struct rte_table_action_encap_config encap; + struct rte_table_action_nat_config nat; + struct rte_table_action_ttl_config ttl; + struct rte_table_action_stats_config stats; + struct rte_table_action_sym_crypto_config sym_crypto; +}; + +struct softnic_table_action_profile { + TAILQ_ENTRY(softnic_table_action_profile) node; + char name[NAME_SIZE]; + struct softnic_table_action_profile_params params; + struct rte_table_action_profile *ap; +}; + +TAILQ_HEAD(softnic_table_action_profile_list, softnic_table_action_profile); + +struct softnic_table_meter_profile { + TAILQ_ENTRY(softnic_table_meter_profile) node; + uint32_t meter_profile_id; + struct rte_table_action_meter_profile profile; +}; + +TAILQ_HEAD(softnic_table_meter_profile_list, + softnic_table_meter_profile); + +/** + * Pipeline + */ +struct pipeline_params { + uint32_t timer_period_ms; + uint32_t offset_port_id; +}; + +enum softnic_port_in_type { + PORT_IN_RXQ, + PORT_IN_SWQ, + PORT_IN_TMGR, + PORT_IN_TAP, + PORT_IN_SOURCE, + PORT_IN_CRYPTODEV, +}; + +struct softnic_port_in_params { + /* Read */ + enum softnic_port_in_type type; + char dev_name[NAME_SIZE]; + union { + struct { + uint16_t queue_id; + } rxq; + + struct { + const char *mempool_name; + uint32_t mtu; + } tap; + + struct { + const char *mempool_name; + const char *file_name; + uint32_t n_bytes_per_pkt; + } source; + + struct { + uint16_t queue_id; + void *f_callback; + void *arg_callback; + } cryptodev; + }; + uint32_t burst_size; + + /* Action */ + char action_profile_name[NAME_SIZE]; +}; + +enum softnic_port_out_type { + PORT_OUT_TXQ, + PORT_OUT_SWQ, + PORT_OUT_TMGR, + PORT_OUT_TAP, + PORT_OUT_SINK, + PORT_OUT_CRYPTODEV, +}; + +struct softnic_port_out_params { + enum softnic_port_out_type type; + char dev_name[NAME_SIZE]; + union { + struct { + uint16_t queue_id; + } txq; + + struct { + const char *file_name; + uint32_t max_n_pkts; + } sink; + + struct { + uint16_t queue_id; + uint32_t op_offset; + } cryptodev; + }; + uint32_t burst_size; + int retry; + uint32_t n_retries; +}; + +enum softnic_table_type { + TABLE_ACL, + TABLE_ARRAY, + TABLE_HASH, + TABLE_LPM, + TABLE_STUB, +}; + +struct softnic_table_acl_params { + uint32_t n_rules; + uint32_t ip_header_offset; + int ip_version; +}; + +struct softnic_table_array_params { + uint32_t n_keys; + uint32_t key_offset; +}; + +#ifndef TABLE_RULE_MATCH_SIZE_MAX +#define TABLE_RULE_MATCH_SIZE_MAX 256 +#endif + +struct softnic_table_hash_params { + uint32_t n_keys; + uint32_t key_offset; + uint32_t key_size; + uint8_t key_mask[TABLE_RULE_MATCH_SIZE_MAX]; + uint32_t n_buckets; + int extendable_bucket; +}; + +struct softnic_table_lpm_params { + uint32_t n_rules; + uint32_t key_offset; + uint32_t key_size; +}; + +struct softnic_table_params { + /* Match */ + enum softnic_table_type match_type; + union { + struct softnic_table_acl_params acl; + struct softnic_table_array_params array; + struct softnic_table_hash_params hash; + struct softnic_table_lpm_params lpm; + } match; + + /* Action */ + char action_profile_name[NAME_SIZE]; +}; + +struct softnic_port_in { + struct softnic_port_in_params params; + struct softnic_port_in_action_profile *ap; + struct rte_port_in_action *a; +}; + +struct softnic_port_out { + struct softnic_port_out_params params; +}; + +struct softnic_table { + struct softnic_table_params params; + struct softnic_table_action_profile *ap; + struct rte_table_action *a; + struct flow_list flows; + struct rte_table_action_dscp_table dscp_table; + struct softnic_table_meter_profile_list meter_profiles; +}; + +struct pipeline { + TAILQ_ENTRY(pipeline) node; + char name[NAME_SIZE]; + + struct rte_pipeline *p; + struct pipeline_params params; + struct softnic_port_in port_in[RTE_PIPELINE_PORT_IN_MAX]; + struct softnic_port_out port_out[RTE_PIPELINE_PORT_OUT_MAX]; + struct softnic_table table[RTE_PIPELINE_TABLE_MAX]; + uint32_t n_ports_in; + uint32_t n_ports_out; + uint32_t n_tables; + + struct rte_ring *msgq_req; + struct rte_ring *msgq_rsp; + uint32_t timer_period_ms; + + int enabled; + uint32_t thread_id; + uint32_t cpu_id; +}; + +TAILQ_HEAD(pipeline_list, pipeline); + +/** + * Thread + */ +#ifndef THREAD_PIPELINES_MAX +#define THREAD_PIPELINES_MAX 256 +#endif + +#ifndef THREAD_MSGQ_SIZE +#define THREAD_MSGQ_SIZE 64 +#endif + +#ifndef THREAD_TIMER_PERIOD_MS +#define THREAD_TIMER_PERIOD_MS 100 +#endif + +/** + * Master thead: data plane thread context + */ +struct softnic_thread { + struct rte_ring *msgq_req; + struct rte_ring *msgq_rsp; + + uint32_t service_id; +}; + +/** + * Data plane threads: context + */ +#ifndef TABLE_RULE_ACTION_SIZE_MAX +#define TABLE_RULE_ACTION_SIZE_MAX 2048 +#endif + +struct softnic_table_data { + struct rte_table_action *a; +}; + +struct pipeline_data { + struct rte_pipeline *p; + struct softnic_table_data table_data[RTE_PIPELINE_TABLE_MAX]; + uint32_t n_tables; + + struct rte_ring *msgq_req; + struct rte_ring *msgq_rsp; + uint64_t timer_period; /* Measured in CPU cycles. */ + uint64_t time_next; + + uint8_t buffer[TABLE_RULE_ACTION_SIZE_MAX]; +}; + +struct softnic_thread_data { + struct rte_pipeline *p[THREAD_PIPELINES_MAX]; + uint32_t n_pipelines; + + struct pipeline_data pipeline_data[THREAD_PIPELINES_MAX]; + struct rte_ring *msgq_req; + struct rte_ring *msgq_rsp; + uint64_t timer_period; /* Measured in CPU cycles. */ + uint64_t time_next; + uint64_t time_next_min; + uint64_t iter; +} __rte_cache_aligned; + +/** + * PMD Internals + */ +struct pmd_internals { + /** Params */ + struct pmd_params params; + + struct { + struct tm_internals tm; /**< Traffic Management */ + } soft; + + struct flow_internals flow; + struct mtr_internals mtr; + + struct softnic_conn *conn; + struct softnic_mempool_list mempool_list; + struct softnic_swq_list swq_list; + struct softnic_link_list link_list; + struct softnic_tmgr_port_list tmgr_port_list; + struct softnic_tap_list tap_list; + struct softnic_cryptodev_list cryptodev_list; + struct softnic_port_in_action_profile_list port_in_action_profile_list; + struct softnic_table_action_profile_list table_action_profile_list; + struct pipeline_list pipeline_list; + struct softnic_thread thread[RTE_MAX_LCORE]; + struct softnic_thread_data thread_data[RTE_MAX_LCORE]; +}; + +static inline struct rte_eth_dev * +ETHDEV(struct pmd_internals *softnic) +{ + uint16_t port_id; + int status; + + if (softnic == NULL) + return NULL; + + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status) + return NULL; + + return &rte_eth_devices[port_id]; +} + +/** + * Ethdev Flow API + */ +int +flow_attr_map_set(struct pmd_internals *softnic, + uint32_t group_id, + int ingress, + const char *pipeline_name, + uint32_t table_id); + +struct flow_attr_map * +flow_attr_map_get(struct pmd_internals *softnic, + uint32_t group_id, + int ingress); + +extern const struct rte_flow_ops pmd_flow_ops; + +/** + * Meter + */ +int +softnic_mtr_init(struct pmd_internals *p); + +void +softnic_mtr_free(struct pmd_internals *p); + +struct softnic_mtr * +softnic_mtr_find(struct pmd_internals *p, + uint32_t mtr_id); + +struct softnic_mtr_meter_profile * +softnic_mtr_meter_profile_find(struct pmd_internals *p, + uint32_t meter_profile_id); + +extern const struct rte_mtr_ops pmd_mtr_ops; + +/** + * MEMPOOL + */ +int +softnic_mempool_init(struct pmd_internals *p); + +void +softnic_mempool_free(struct pmd_internals *p); + +struct softnic_mempool * +softnic_mempool_find(struct pmd_internals *p, + const char *name); + +struct softnic_mempool * +softnic_mempool_create(struct pmd_internals *p, + const char *name, + struct softnic_mempool_params *params); + +/** + * SWQ + */ +int +softnic_swq_init(struct pmd_internals *p); + +void +softnic_swq_free(struct pmd_internals *p); + +void +softnic_softnic_swq_free_keep_rxq_txq(struct pmd_internals *p); + +struct softnic_swq * +softnic_swq_find(struct pmd_internals *p, + const char *name); + +struct softnic_swq * +softnic_swq_create(struct pmd_internals *p, + const char *name, + struct softnic_swq_params *params); + +/** + * LINK + */ +int +softnic_link_init(struct pmd_internals *p); + +void +softnic_link_free(struct pmd_internals *p); + +struct softnic_link * +softnic_link_find(struct pmd_internals *p, + const char *name); + +struct softnic_link * +softnic_link_create(struct pmd_internals *p, + const char *name, + struct softnic_link_params *params); + +/** + * TMGR + */ +int +softnic_tmgr_init(struct pmd_internals *p); + +void +softnic_tmgr_free(struct pmd_internals *p); + +struct softnic_tmgr_port * +softnic_tmgr_port_find(struct pmd_internals *p, + const char *name); + +struct softnic_tmgr_port * +softnic_tmgr_port_create(struct pmd_internals *p, + const char *name); + +void +tm_hierarchy_init(struct pmd_internals *p); + +void +tm_hierarchy_free(struct pmd_internals *p); + +static inline int +tm_used(struct rte_eth_dev *dev) +{ + struct pmd_internals *p = dev->data->dev_private; + + return p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT]; +} + +extern const struct rte_tm_ops pmd_tm_ops; + +/** + * TAP + */ +int +softnic_tap_init(struct pmd_internals *p); + +void +softnic_tap_free(struct pmd_internals *p); + +struct softnic_tap * +softnic_tap_find(struct pmd_internals *p, + const char *name); + +struct softnic_tap * +softnic_tap_create(struct pmd_internals *p, + const char *name); + +/** + * Sym Crypto + */ +int +softnic_cryptodev_init(struct pmd_internals *p); + +void +softnic_cryptodev_free(struct pmd_internals *p); + +struct softnic_cryptodev * +softnic_cryptodev_find(struct pmd_internals *p, + const char *name); + +struct softnic_cryptodev * +softnic_cryptodev_create(struct pmd_internals *p, + const char *name, + struct softnic_cryptodev_params *params); + +/** + * Input port action + */ +int +softnic_port_in_action_profile_init(struct pmd_internals *p); + +void +softnic_port_in_action_profile_free(struct pmd_internals *p); + +struct softnic_port_in_action_profile * +softnic_port_in_action_profile_find(struct pmd_internals *p, + const char *name); + +struct softnic_port_in_action_profile * +softnic_port_in_action_profile_create(struct pmd_internals *p, + const char *name, + struct softnic_port_in_action_profile_params *params); + +/** + * Table action + */ +int +softnic_table_action_profile_init(struct pmd_internals *p); + +void +softnic_table_action_profile_free(struct pmd_internals *p); + +struct softnic_table_action_profile * +softnic_table_action_profile_find(struct pmd_internals *p, + const char *name); + +struct softnic_table_action_profile * +softnic_table_action_profile_create(struct pmd_internals *p, + const char *name, + struct softnic_table_action_profile_params *params); + +enum rte_table_action_policer +softnic_table_action_policer(enum rte_mtr_policer_action action); + +/** + * Pipeline + */ +int +softnic_pipeline_init(struct pmd_internals *p); + +void +softnic_pipeline_free(struct pmd_internals *p); + +void +softnic_pipeline_disable_all(struct pmd_internals *p); + +uint32_t +softnic_pipeline_thread_count(struct pmd_internals *p, uint32_t thread_id); + +struct pipeline * +softnic_pipeline_find(struct pmd_internals *p, const char *name); + +struct pipeline * +softnic_pipeline_create(struct pmd_internals *p, + const char *name, + struct pipeline_params *params); + +int +softnic_pipeline_port_in_create(struct pmd_internals *p, + const char *pipeline_name, + struct softnic_port_in_params *params, + int enabled); + +int +softnic_pipeline_port_in_connect_to_table(struct pmd_internals *p, + const char *pipeline_name, + uint32_t port_id, + uint32_t table_id); + +int +softnic_pipeline_port_out_create(struct pmd_internals *p, + const char *pipeline_name, + struct softnic_port_out_params *params); + +int +softnic_pipeline_port_out_find(struct pmd_internals *softnic, + const char *pipeline_name, + const char *name, + uint32_t *port_id); + +int +softnic_pipeline_table_create(struct pmd_internals *p, + const char *pipeline_name, + struct softnic_table_params *params); + +struct softnic_table_meter_profile * +softnic_pipeline_table_meter_profile_find(struct softnic_table *table, + uint32_t meter_profile_id); + +struct softnic_table_rule_match_acl { + int ip_version; + + RTE_STD_C11 + union { + struct { + uint32_t sa; + uint32_t da; + } ipv4; + + struct { + uint8_t sa[16]; + uint8_t da[16]; + } ipv6; + }; + + uint32_t sa_depth; + uint32_t da_depth; + uint16_t sp0; + uint16_t sp1; + uint16_t dp0; + uint16_t dp1; + uint8_t proto; + uint8_t proto_mask; + uint32_t priority; +}; + +struct softnic_table_rule_match_array { + uint32_t pos; +}; + +struct softnic_table_rule_match_hash { + uint8_t key[TABLE_RULE_MATCH_SIZE_MAX]; +}; + +struct softnic_table_rule_match_lpm { + int ip_version; + + RTE_STD_C11 + union { + uint32_t ipv4; + uint8_t ipv6[16]; + }; + + uint8_t depth; +}; + +struct softnic_table_rule_match { + enum softnic_table_type match_type; + + union { + struct softnic_table_rule_match_acl acl; + struct softnic_table_rule_match_array array; + struct softnic_table_rule_match_hash hash; + struct softnic_table_rule_match_lpm lpm; + } match; +}; + +#ifndef SYM_CRYPTO_MAX_KEY_SIZE +#define SYM_CRYPTO_MAX_KEY_SIZE (256) +#endif +struct softnic_table_rule_action { + uint64_t action_mask; + struct rte_table_action_fwd_params fwd; + struct rte_table_action_lb_params lb; + struct rte_table_action_mtr_params mtr; + struct rte_table_action_tm_params tm; + struct rte_table_action_encap_params encap; + struct rte_table_action_nat_params nat; + struct rte_table_action_ttl_params ttl; + struct rte_table_action_stats_params stats; + struct rte_table_action_time_params time; + struct rte_table_action_tag_params tag; + struct rte_table_action_decap_params decap; + struct rte_table_action_sym_crypto_params sym_crypto; + uint8_t sym_crypto_key[SYM_CRYPTO_MAX_KEY_SIZE]; +}; + +struct rte_flow { + TAILQ_ENTRY(rte_flow) node; + struct softnic_table_rule_match match; + struct softnic_table_rule_action action; + void *data; + struct pipeline *pipeline; + uint32_t table_id; +}; + +int +softnic_pipeline_port_in_stats_read(struct pmd_internals *p, + const char *pipeline_name, + uint32_t port_id, + struct rte_pipeline_port_in_stats *stats, + int clear); + +int +softnic_pipeline_port_in_enable(struct pmd_internals *p, + const char *pipeline_name, + uint32_t port_id); + +int +softnic_pipeline_port_in_disable(struct pmd_internals *p, + const char *pipeline_name, + uint32_t port_id); + +int +softnic_pipeline_port_out_stats_read(struct pmd_internals *p, + const char *pipeline_name, + uint32_t port_id, + struct rte_pipeline_port_out_stats *stats, + int clear); + +int +softnic_pipeline_table_stats_read(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id, + struct rte_pipeline_table_stats *stats, + int clear); + +int +softnic_pipeline_table_rule_add(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id, + struct softnic_table_rule_match *match, + struct softnic_table_rule_action *action, + void **data); + +int +softnic_pipeline_table_rule_add_bulk(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id, + struct softnic_table_rule_match *match, + struct softnic_table_rule_action *action, + void **data, + uint32_t *n_rules); + +int +softnic_pipeline_table_rule_add_default(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id, + struct softnic_table_rule_action *action, + void **data); + +int +softnic_pipeline_table_rule_delete(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id, + struct softnic_table_rule_match *match); + +int +softnic_pipeline_table_rule_delete_default(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id); + +int +softnic_pipeline_table_rule_stats_read(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id, + void *data, + struct rte_table_action_stats_counters *stats, + int clear); + +int +softnic_pipeline_table_mtr_profile_add(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id, + uint32_t meter_profile_id, + struct rte_table_action_meter_profile *profile); + +int +softnic_pipeline_table_mtr_profile_delete(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id, + uint32_t meter_profile_id); + +int +softnic_pipeline_table_rule_mtr_read(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id, + void *data, + uint32_t tc_mask, + struct rte_table_action_mtr_counters *stats, + int clear); + +int +softnic_pipeline_table_dscp_table_update(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id, + uint64_t dscp_mask, + struct rte_table_action_dscp_table *dscp_table); + +int +softnic_pipeline_table_rule_ttl_read(struct pmd_internals *p, + const char *pipeline_name, + uint32_t table_id, + void *data, + struct rte_table_action_ttl_counters *stats, + int clear); + +/** + * Thread + */ +int +softnic_thread_init(struct pmd_internals *p); + +void +softnic_thread_free(struct pmd_internals *p); + +int +softnic_thread_pipeline_enable(struct pmd_internals *p, + uint32_t thread_id, + const char *pipeline_name); + +int +softnic_thread_pipeline_disable(struct pmd_internals *p, + uint32_t thread_id, + const char *pipeline_name); + +/** + * CLI + */ +void +softnic_cli_process(char *in, + char *out, + size_t out_size, + void *arg); + +int +softnic_cli_script_process(struct pmd_internals *softnic, + const char *file_name, + size_t msg_in_len_max, + size_t msg_out_len_max); + +#endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */ diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_link.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_link.c new file mode 100644 index 000000000..21a64069f --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_link.c @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include <stdlib.h> +#include <string.h> + +#include <rte_ethdev.h> +#include <rte_string_fns.h> + +#include "rte_eth_softnic_internals.h" + +int +softnic_link_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->link_list); + + return 0; +} + +void +softnic_link_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_link *link; + + link = TAILQ_FIRST(&p->link_list); + if (link == NULL) + break; + + TAILQ_REMOVE(&p->link_list, link, node); + free(link); + } +} + +struct softnic_link * +softnic_link_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_link *link; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(link, &p->link_list, node) + if (strcmp(link->name, name) == 0) + return link; + + return NULL; +} + +struct softnic_link * +softnic_link_create(struct pmd_internals *p, + const char *name, + struct softnic_link_params *params) +{ + struct rte_eth_dev_info port_info; + struct softnic_link *link; + uint16_t port_id; + int ret; + + /* Check input params */ + if (name == NULL || + softnic_link_find(p, name) || + params == NULL) + return NULL; + + port_id = params->port_id; + if (params->dev_name) { + int status; + + status = rte_eth_dev_get_port_by_name(params->dev_name, + &port_id); + + if (status) + return NULL; + } else { + if (!rte_eth_dev_is_valid_port(port_id)) + return NULL; + } + + ret = rte_eth_dev_info_get(port_id, &port_info); + if (ret != 0) + return NULL; + + /* Node allocation */ + link = calloc(1, sizeof(struct softnic_link)); + if (link == NULL) + return NULL; + + /* Node fill in */ + strlcpy(link->name, name, sizeof(link->name)); + link->port_id = port_id; + link->n_rxq = port_info.nb_rx_queues; + link->n_txq = port_info.nb_tx_queues; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&p->link_list, link, node); + + return link; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_mempool.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_mempool.c new file mode 100644 index 000000000..d5c569f94 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_mempool.c @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include <stdlib.h> +#include <string.h> + +#include <rte_mbuf.h> +#include <rte_string_fns.h> + +#include "rte_eth_softnic_internals.h" + +#define BUFFER_SIZE_MIN (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) + +int +softnic_mempool_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->mempool_list); + + return 0; +} + +void +softnic_mempool_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_mempool *mempool; + + mempool = TAILQ_FIRST(&p->mempool_list); + if (mempool == NULL) + break; + + TAILQ_REMOVE(&p->mempool_list, mempool, node); + rte_mempool_free(mempool->m); + free(mempool); + } +} + +struct softnic_mempool * +softnic_mempool_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_mempool *mempool; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(mempool, &p->mempool_list, node) + if (strcmp(mempool->name, name) == 0) + return mempool; + + return NULL; +} + +struct softnic_mempool * +softnic_mempool_create(struct pmd_internals *p, + const char *name, + struct softnic_mempool_params *params) +{ + char mempool_name[NAME_SIZE]; + struct softnic_mempool *mempool; + struct rte_mempool *m; + + /* Check input params */ + if (name == NULL || + softnic_mempool_find(p, name) || + params == NULL || + params->buffer_size < BUFFER_SIZE_MIN || + params->pool_size == 0) + return NULL; + + /* Resource create */ + snprintf(mempool_name, sizeof(mempool_name), "%s_%s", + p->params.name, + name); + + m = rte_pktmbuf_pool_create(mempool_name, + params->pool_size, + params->cache_size, + 0, + params->buffer_size - sizeof(struct rte_mbuf), + p->params.cpu_id); + + if (m == NULL) + return NULL; + + /* Node allocation */ + mempool = calloc(1, sizeof(struct softnic_mempool)); + if (mempool == NULL) { + rte_mempool_free(m); + return NULL; + } + + /* Node fill in */ + strlcpy(mempool->name, name, sizeof(mempool->name)); + mempool->m = m; + mempool->buffer_size = params->buffer_size; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&p->mempool_list, mempool, node); + + return mempool; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_meter.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_meter.c new file mode 100644 index 000000000..31a2a0e6d --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_meter.c @@ -0,0 +1,749 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +#include <rte_mtr.h> +#include <rte_mtr_driver.h> + +#include "rte_eth_softnic_internals.h" + +int +softnic_mtr_init(struct pmd_internals *p) +{ + /* Initialize meter profiles list */ + TAILQ_INIT(&p->mtr.meter_profiles); + + /* Initialize MTR objects list */ + TAILQ_INIT(&p->mtr.mtrs); + + return 0; +} + +void +softnic_mtr_free(struct pmd_internals *p) +{ + /* Remove MTR objects */ + for ( ; ; ) { + struct softnic_mtr *m; + + m = TAILQ_FIRST(&p->mtr.mtrs); + if (m == NULL) + break; + + TAILQ_REMOVE(&p->mtr.mtrs, m, node); + free(m); + } + + /* Remove meter profiles */ + for ( ; ; ) { + struct softnic_mtr_meter_profile *mp; + + mp = TAILQ_FIRST(&p->mtr.meter_profiles); + if (mp == NULL) + break; + + TAILQ_REMOVE(&p->mtr.meter_profiles, mp, node); + free(mp); + } +} + +struct softnic_mtr_meter_profile * +softnic_mtr_meter_profile_find(struct pmd_internals *p, + uint32_t meter_profile_id) +{ + struct softnic_mtr_meter_profile_list *mpl = &p->mtr.meter_profiles; + struct softnic_mtr_meter_profile *mp; + + TAILQ_FOREACH(mp, mpl, node) + if (meter_profile_id == mp->meter_profile_id) + return mp; + + return NULL; +} + +enum rte_table_action_policer +softnic_table_action_policer(enum rte_mtr_policer_action action) +{ + switch (action) { + case MTR_POLICER_ACTION_COLOR_GREEN: + return RTE_TABLE_ACTION_POLICER_COLOR_GREEN; + + /* FALLTHROUGH */ + case MTR_POLICER_ACTION_COLOR_YELLOW: + return RTE_TABLE_ACTION_POLICER_COLOR_YELLOW; + + /* FALLTHROUGH */ + case MTR_POLICER_ACTION_COLOR_RED: + return RTE_TABLE_ACTION_POLICER_COLOR_RED; + + /* FALLTHROUGH */ + default: + return RTE_TABLE_ACTION_POLICER_DROP; + } +} + +static int +meter_profile_check(struct rte_eth_dev *dev, + uint32_t meter_profile_id, + struct rte_mtr_meter_profile *profile, + struct rte_mtr_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct softnic_mtr_meter_profile *mp; + + /* Meter profile ID must be valid. */ + if (meter_profile_id == UINT32_MAX) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, + "Meter profile id not valid"); + + /* Meter profile must not exist. */ + mp = softnic_mtr_meter_profile_find(p, meter_profile_id); + if (mp) + return -rte_mtr_error_set(error, + EEXIST, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, + "Meter prfile already exists"); + + /* Profile must not be NULL. */ + if (profile == NULL) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE, + NULL, + "profile null"); + + /* Traffic metering algorithm : TRTCM_RFC2698 */ + if (profile->alg != RTE_MTR_TRTCM_RFC2698) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE, + NULL, + "Metering alg not supported"); + + return 0; +} + +/* MTR meter profile add */ +static int +pmd_mtr_meter_profile_add(struct rte_eth_dev *dev, + uint32_t meter_profile_id, + struct rte_mtr_meter_profile *profile, + struct rte_mtr_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct softnic_mtr_meter_profile_list *mpl = &p->mtr.meter_profiles; + struct softnic_mtr_meter_profile *mp; + int status; + + /* Check input params */ + status = meter_profile_check(dev, meter_profile_id, profile, error); + if (status) + return status; + + /* Memory allocation */ + mp = calloc(1, sizeof(struct softnic_mtr_meter_profile)); + if (mp == NULL) + return -rte_mtr_error_set(error, + ENOMEM, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + "Memory alloc failed"); + + /* Fill in */ + mp->meter_profile_id = meter_profile_id; + memcpy(&mp->params, profile, sizeof(mp->params)); + + /* Add to list */ + TAILQ_INSERT_TAIL(mpl, mp, node); + + return 0; +} + +/* MTR meter profile delete */ +static int +pmd_mtr_meter_profile_delete(struct rte_eth_dev *dev, + uint32_t meter_profile_id, + struct rte_mtr_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct softnic_mtr_meter_profile *mp; + + /* Meter profile must exist */ + mp = softnic_mtr_meter_profile_find(p, meter_profile_id); + if (mp == NULL) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, + "Meter profile id invalid"); + + /* Check unused */ + if (mp->n_users) + return -rte_mtr_error_set(error, + EBUSY, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, + "Meter profile in use"); + + /* Remove from list */ + TAILQ_REMOVE(&p->mtr.meter_profiles, mp, node); + free(mp); + + return 0; +} + +struct softnic_mtr * +softnic_mtr_find(struct pmd_internals *p, uint32_t mtr_id) +{ + struct softnic_mtr_list *ml = &p->mtr.mtrs; + struct softnic_mtr *m; + + TAILQ_FOREACH(m, ml, node) + if (m->mtr_id == mtr_id) + return m; + + return NULL; +} + + +static int +mtr_check(struct pmd_internals *p, + uint32_t mtr_id, + struct rte_mtr_params *params, + int shared, + struct rte_mtr_error *error) +{ + /* MTR id valid */ + if (softnic_mtr_find(p, mtr_id)) + return -rte_mtr_error_set(error, + EEXIST, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, + "MTR object already exists"); + + /* MTR params must not be NULL */ + if (params == NULL) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_MTR_PARAMS, + NULL, + "MTR object params null"); + + /* Previous meter color not supported */ + if (params->use_prev_mtr_color) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_MTR_PARAMS, + NULL, + "Previous meter color not supported"); + + /* Shared MTR object not supported */ + if (shared) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_SHARED, + NULL, + "Shared MTR object not supported"); + + return 0; +} + +/* MTR object create */ +static int +pmd_mtr_create(struct rte_eth_dev *dev, + uint32_t mtr_id, + struct rte_mtr_params *params, + int shared, + struct rte_mtr_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct softnic_mtr_list *ml = &p->mtr.mtrs; + struct softnic_mtr_meter_profile *mp; + struct softnic_mtr *m; + int status; + + /* Check parameters */ + status = mtr_check(p, mtr_id, params, shared, error); + if (status) + return status; + + /* Meter profile must exist */ + mp = softnic_mtr_meter_profile_find(p, params->meter_profile_id); + if (mp == NULL) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, + "Meter profile id not valid"); + + /* Memory allocation */ + m = calloc(1, sizeof(struct softnic_mtr)); + if (m == NULL) + return -rte_mtr_error_set(error, + ENOMEM, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + "Memory alloc failed"); + + /* Fill in */ + m->mtr_id = mtr_id; + memcpy(&m->params, params, sizeof(m->params)); + + /* Add to list */ + TAILQ_INSERT_TAIL(ml, m, node); + + /* Update dependencies */ + mp->n_users++; + + return 0; +} + +/* MTR object destroy */ +static int +pmd_mtr_destroy(struct rte_eth_dev *dev, + uint32_t mtr_id, + struct rte_mtr_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct softnic_mtr_list *ml = &p->mtr.mtrs; + struct softnic_mtr_meter_profile *mp; + struct softnic_mtr *m; + + /* MTR object must exist */ + m = softnic_mtr_find(p, mtr_id); + if (m == NULL) + return -rte_mtr_error_set(error, + EEXIST, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, + "MTR object id not valid"); + + /* MTR object must not have any owner */ + if (m->flow != NULL) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + "MTR object is being used"); + + /* Get meter profile */ + mp = softnic_mtr_meter_profile_find(p, m->params.meter_profile_id); + if (mp == NULL) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, + "MTR object meter profile invalid"); + + /* Update dependencies */ + mp->n_users--; + + /* Remove from list */ + TAILQ_REMOVE(ml, m, node); + free(m); + + return 0; +} + +/* MTR object meter profile update */ +static int +pmd_mtr_meter_profile_update(struct rte_eth_dev *dev, + uint32_t mtr_id, + uint32_t meter_profile_id, + struct rte_mtr_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct softnic_mtr_meter_profile *mp_new, *mp_old; + struct softnic_mtr *m; + int status; + + /* MTR object id must be valid */ + m = softnic_mtr_find(p, mtr_id); + if (m == NULL) + return -rte_mtr_error_set(error, + EEXIST, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, + "MTR object id not valid"); + + /* Meter profile id must be valid */ + mp_new = softnic_mtr_meter_profile_find(p, meter_profile_id); + if (mp_new == NULL) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_METER_PROFILE_ID, + NULL, + "Meter profile not valid"); + + /* MTR object already set to meter profile id */ + if (m->params.meter_profile_id == meter_profile_id) + return 0; + + /* MTR object owner table update */ + if (m->flow) { + uint32_t table_id = m->flow->table_id; + struct softnic_table *table = &m->flow->pipeline->table[table_id]; + struct softnic_table_rule_action action; + + if (!softnic_pipeline_table_meter_profile_find(table, + meter_profile_id)) { + struct rte_table_action_meter_profile profile; + + memset(&profile, 0, sizeof(profile)); + + profile.alg = RTE_TABLE_ACTION_METER_TRTCM; + profile.trtcm.cir = mp_new->params.trtcm_rfc2698.cir; + profile.trtcm.pir = mp_new->params.trtcm_rfc2698.pir; + profile.trtcm.cbs = mp_new->params.trtcm_rfc2698.cbs; + profile.trtcm.pbs = mp_new->params.trtcm_rfc2698.pbs; + + /* Add meter profile to pipeline table */ + status = softnic_pipeline_table_mtr_profile_add(p, + m->flow->pipeline->name, + table_id, + meter_profile_id, + &profile); + if (status) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + "Table meter profile add failed"); + } + + /* Set meter action */ + memcpy(&action, &m->flow->action, sizeof(action)); + + action.mtr.mtr[0].meter_profile_id = meter_profile_id; + + /* Re-add rule */ + status = softnic_pipeline_table_rule_add(p, + m->flow->pipeline->name, + table_id, + &m->flow->match, + &action, + &m->flow->data); + if (status) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + "Pipeline table rule add failed"); + + /* Flow: update meter action */ + memcpy(&m->flow->action, &action, sizeof(m->flow->action)); + } + + mp_old = softnic_mtr_meter_profile_find(p, m->params.meter_profile_id); + + /* Meter: Set meter profile */ + m->params.meter_profile_id = meter_profile_id; + + /* Update dependencies*/ + mp_old->n_users--; + mp_new->n_users++; + + return 0; +} + +/* MTR object meter DSCP table update */ +static int +pmd_mtr_meter_dscp_table_update(struct rte_eth_dev *dev, + uint32_t mtr_id, + enum rte_color *dscp_table, + struct rte_mtr_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct rte_table_action_dscp_table dt; + struct pipeline *pipeline; + struct softnic_table *table; + struct softnic_mtr *m; + uint32_t table_id, i; + int status; + + /* MTR object id must be valid */ + m = softnic_mtr_find(p, mtr_id); + if (m == NULL) + return -rte_mtr_error_set(error, + EEXIST, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, + "MTR object id not valid"); + + /* MTR object owner valid? */ + if (m->flow == NULL) + return 0; + + pipeline = m->flow->pipeline; + table_id = m->flow->table_id; + table = &pipeline->table[table_id]; + + memcpy(&dt, &table->dscp_table, sizeof(dt)); + for (i = 0; i < RTE_DIM(dt.entry); i++) + dt.entry[i].color = (enum rte_color)dscp_table[i]; + + /* Update table */ + status = softnic_pipeline_table_dscp_table_update(p, + pipeline->name, + table_id, + UINT64_MAX, + &dt); + if (status) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + "Table action dscp table update failed"); + + return 0; +} + +/* MTR object policer action update */ +static int +pmd_mtr_policer_actions_update(struct rte_eth_dev *dev, + uint32_t mtr_id, + uint32_t action_mask, + enum rte_mtr_policer_action *actions, + struct rte_mtr_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct softnic_mtr *m; + uint32_t i; + int status; + + /* MTR object id must be valid */ + m = softnic_mtr_find(p, mtr_id); + if (m == NULL) + return -rte_mtr_error_set(error, + EEXIST, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, + "MTR object id not valid"); + + /* Valid policer actions */ + if (actions == NULL) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + "Invalid actions"); + + for (i = 0; i < RTE_COLORS; i++) { + if (action_mask & (1 << i)) { + if (actions[i] != MTR_POLICER_ACTION_COLOR_GREEN && + actions[i] != MTR_POLICER_ACTION_COLOR_YELLOW && + actions[i] != MTR_POLICER_ACTION_COLOR_RED && + actions[i] != MTR_POLICER_ACTION_DROP) { + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + " Invalid action value"); + } + } + } + + /* MTR object owner valid? */ + if (m->flow) { + struct pipeline *pipeline = m->flow->pipeline; + struct softnic_table *table = &pipeline->table[m->flow->table_id]; + struct softnic_table_rule_action action; + + memcpy(&action, &m->flow->action, sizeof(action)); + + /* Set action */ + for (i = 0; i < RTE_COLORS; i++) + if (action_mask & (1 << i)) + action.mtr.mtr[0].policer[i] = + softnic_table_action_policer(actions[i]); + + /* Re-add the rule */ + status = softnic_pipeline_table_rule_add(p, + pipeline->name, + m->flow->table_id, + &m->flow->match, + &action, + &m->flow->data); + if (status) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + "Pipeline table rule re-add failed"); + + /* Flow: Update meter action */ + memcpy(&m->flow->action, &action, sizeof(m->flow->action)); + + /* Reset the meter stats */ + rte_table_action_meter_read(table->a, m->flow->data, + 1, NULL, 1); + } + + /* Meter: Update policer actions */ + for (i = 0; i < RTE_COLORS; i++) + if (action_mask & (1 << i)) + m->params.action[i] = actions[i]; + + return 0; +} + +#define MTR_STATS_PKTS_DEFAULT (RTE_MTR_STATS_N_PKTS_GREEN | \ + RTE_MTR_STATS_N_PKTS_YELLOW | \ + RTE_MTR_STATS_N_PKTS_RED | \ + RTE_MTR_STATS_N_PKTS_DROPPED) + +#define MTR_STATS_BYTES_DEFAULT (RTE_MTR_STATS_N_BYTES_GREEN | \ + RTE_MTR_STATS_N_BYTES_YELLOW | \ + RTE_MTR_STATS_N_BYTES_RED | \ + RTE_MTR_STATS_N_BYTES_DROPPED) + +/* MTR object stats read */ +static void +mtr_stats_convert(struct softnic_mtr *m, + struct rte_table_action_mtr_counters_tc *in, + struct rte_mtr_stats *out, + uint64_t *out_mask) +{ + memset(&out, 0, sizeof(out)); + *out_mask = 0; + + if (in->n_packets_valid) { + uint32_t i; + + for (i = 0; i < RTE_COLORS; i++) { + if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_GREEN) + out->n_pkts[RTE_COLOR_GREEN] += in->n_packets[i]; + + if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_YELLOW) + out->n_pkts[RTE_COLOR_YELLOW] += in->n_packets[i]; + + if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_RED) + out->n_pkts[RTE_COLOR_RED] += in->n_packets[i]; + + if (m->params.action[i] == MTR_POLICER_ACTION_DROP) + out->n_pkts_dropped += in->n_packets[i]; + } + + *out_mask |= MTR_STATS_PKTS_DEFAULT; + } + + if (in->n_bytes_valid) { + uint32_t i; + + for (i = 0; i < RTE_COLORS; i++) { + if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_GREEN) + out->n_bytes[RTE_COLOR_GREEN] += in->n_bytes[i]; + + if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_YELLOW) + out->n_bytes[RTE_COLOR_YELLOW] += in->n_bytes[i]; + + if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_RED) + out->n_bytes[RTE_COLOR_RED] += in->n_bytes[i]; + + if (m->params.action[i] == MTR_POLICER_ACTION_DROP) + out->n_bytes_dropped += in->n_bytes[i]; + } + + *out_mask |= MTR_STATS_BYTES_DEFAULT; + } +} + +/* MTR object stats read */ +static int +pmd_mtr_stats_read(struct rte_eth_dev *dev, + uint32_t mtr_id, + struct rte_mtr_stats *stats, + uint64_t *stats_mask, + int clear, + struct rte_mtr_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct rte_table_action_mtr_counters counters; + struct pipeline *pipeline; + struct softnic_table *table; + struct softnic_mtr *m; + int status; + + /* MTR object id must be valid */ + m = softnic_mtr_find(p, mtr_id); + if (m == NULL) + return -rte_mtr_error_set(error, + EEXIST, + RTE_MTR_ERROR_TYPE_MTR_ID, + NULL, + "MTR object id not valid"); + + /* MTR meter object owner valid? */ + if (m->flow == NULL) { + if (stats != NULL) + memset(stats, 0, sizeof(*stats)); + + if (stats_mask) + *stats_mask = MTR_STATS_PKTS_DEFAULT | + MTR_STATS_BYTES_DEFAULT; + + return 0; + } + + pipeline = m->flow->pipeline; + table = &pipeline->table[m->flow->table_id]; + + /* Meter stats read. */ + status = rte_table_action_meter_read(table->a, + m->flow->data, + 1, + &counters, + clear); + if (status) + return -rte_mtr_error_set(error, + EINVAL, + RTE_MTR_ERROR_TYPE_UNSPECIFIED, + NULL, + "Meter stats read failed"); + + /* Stats format conversion. */ + if (stats || stats_mask) { + struct rte_mtr_stats s; + uint64_t s_mask = 0; + + mtr_stats_convert(m, + &counters.stats[0], + &s, + &s_mask); + + if (stats) + memcpy(stats, &s, sizeof(*stats)); + + if (stats_mask) + *stats_mask = s_mask; + } + + return 0; +} + +const struct rte_mtr_ops pmd_mtr_ops = { + .capabilities_get = NULL, + + .meter_profile_add = pmd_mtr_meter_profile_add, + .meter_profile_delete = pmd_mtr_meter_profile_delete, + + .create = pmd_mtr_create, + .destroy = pmd_mtr_destroy, + .meter_enable = NULL, + .meter_disable = NULL, + + .meter_profile_update = pmd_mtr_meter_profile_update, + .meter_dscp_table_update = pmd_mtr_meter_dscp_table_update, + .policer_actions_update = pmd_mtr_policer_actions_update, + .stats_update = NULL, + + .stats_read = pmd_mtr_stats_read, +}; diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_pipeline.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_pipeline.c new file mode 100644 index 000000000..337aa32e5 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_pipeline.c @@ -0,0 +1,1116 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include <stdlib.h> +#include <string.h> + +#include <rte_common.h> +#include <rte_ip.h> +#include <rte_tcp.h> + +#include <rte_string_fns.h> +#include <rte_port_ethdev.h> +#include <rte_port_ring.h> +#include <rte_port_source_sink.h> +#include <rte_port_fd.h> +#include <rte_port_sched.h> +#include <rte_port_sym_crypto.h> + +#include <rte_table_acl.h> +#include <rte_table_array.h> +#include <rte_table_hash.h> +#include <rte_table_hash_func.h> +#include <rte_table_lpm.h> +#include <rte_table_lpm_ipv6.h> +#include <rte_table_stub.h> + +#include "rte_eth_softnic_internals.h" + +#ifndef PIPELINE_MSGQ_SIZE +#define PIPELINE_MSGQ_SIZE 64 +#endif + +#ifndef TABLE_LPM_NUMBER_TBL8 +#define TABLE_LPM_NUMBER_TBL8 256 +#endif + +int +softnic_pipeline_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->pipeline_list); + + return 0; +} + +static void +softnic_pipeline_table_free(struct softnic_table *table) +{ + for ( ; ; ) { + struct rte_flow *flow; + + flow = TAILQ_FIRST(&table->flows); + if (flow == NULL) + break; + + TAILQ_REMOVE(&table->flows, flow, node); + free(flow); + } + + for ( ; ; ) { + struct softnic_table_meter_profile *mp; + + mp = TAILQ_FIRST(&table->meter_profiles); + if (mp == NULL) + break; + + TAILQ_REMOVE(&table->meter_profiles, mp, node); + free(mp); + } +} + +void +softnic_pipeline_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct pipeline *pipeline; + uint32_t table_id; + + pipeline = TAILQ_FIRST(&p->pipeline_list); + if (pipeline == NULL) + break; + + TAILQ_REMOVE(&p->pipeline_list, pipeline, node); + + for (table_id = 0; table_id < pipeline->n_tables; table_id++) { + struct softnic_table *table = + &pipeline->table[table_id]; + + softnic_pipeline_table_free(table); + } + + rte_ring_free(pipeline->msgq_req); + rte_ring_free(pipeline->msgq_rsp); + rte_pipeline_free(pipeline->p); + free(pipeline); + } +} + +void +softnic_pipeline_disable_all(struct pmd_internals *p) +{ + struct pipeline *pipeline; + + TAILQ_FOREACH(pipeline, &p->pipeline_list, node) + if (pipeline->enabled) + softnic_thread_pipeline_disable(p, + pipeline->thread_id, + pipeline->name); +} + +uint32_t +softnic_pipeline_thread_count(struct pmd_internals *p, uint32_t thread_id) +{ + struct pipeline *pipeline; + uint32_t count = 0; + + TAILQ_FOREACH(pipeline, &p->pipeline_list, node) + if ((pipeline->enabled) && (pipeline->thread_id == thread_id)) + count++; + + return count; +} + +struct pipeline * +softnic_pipeline_find(struct pmd_internals *p, + const char *name) +{ + struct pipeline *pipeline; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(pipeline, &p->pipeline_list, node) + if (strcmp(name, pipeline->name) == 0) + return pipeline; + + return NULL; +} + +struct pipeline * +softnic_pipeline_create(struct pmd_internals *softnic, + const char *name, + struct pipeline_params *params) +{ + char resource_name[NAME_MAX]; + struct rte_pipeline_params pp; + struct pipeline *pipeline; + struct rte_pipeline *p; + struct rte_ring *msgq_req; + struct rte_ring *msgq_rsp; + + /* Check input params */ + if (name == NULL || + softnic_pipeline_find(softnic, name) || + params == NULL || + params->timer_period_ms == 0) + return NULL; + + /* Resource create */ + snprintf(resource_name, sizeof(resource_name), "%s-%s-REQ", + softnic->params.name, + name); + + msgq_req = rte_ring_create(resource_name, + PIPELINE_MSGQ_SIZE, + softnic->params.cpu_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); + if (msgq_req == NULL) + return NULL; + + snprintf(resource_name, sizeof(resource_name), "%s-%s-RSP", + softnic->params.name, + name); + + msgq_rsp = rte_ring_create(resource_name, + PIPELINE_MSGQ_SIZE, + softnic->params.cpu_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); + if (msgq_rsp == NULL) { + rte_ring_free(msgq_req); + return NULL; + } + + snprintf(resource_name, sizeof(resource_name), "%s_%s", + softnic->params.name, + name); + + pp.name = resource_name; + pp.socket_id = (int)softnic->params.cpu_id; + pp.offset_port_id = params->offset_port_id; + + p = rte_pipeline_create(&pp); + if (p == NULL) { + rte_ring_free(msgq_rsp); + rte_ring_free(msgq_req); + return NULL; + } + + /* Node allocation */ + pipeline = calloc(1, sizeof(struct pipeline)); + if (pipeline == NULL) { + rte_pipeline_free(p); + rte_ring_free(msgq_rsp); + rte_ring_free(msgq_req); + return NULL; + } + + /* Node fill in */ + strlcpy(pipeline->name, name, sizeof(pipeline->name)); + pipeline->p = p; + memcpy(&pipeline->params, params, sizeof(*params)); + pipeline->n_ports_in = 0; + pipeline->n_ports_out = 0; + pipeline->n_tables = 0; + pipeline->msgq_req = msgq_req; + pipeline->msgq_rsp = msgq_rsp; + pipeline->timer_period_ms = params->timer_period_ms; + pipeline->enabled = 0; + pipeline->cpu_id = softnic->params.cpu_id; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&softnic->pipeline_list, pipeline, node); + + return pipeline; +} + +int +softnic_pipeline_port_in_create(struct pmd_internals *softnic, + const char *pipeline_name, + struct softnic_port_in_params *params, + int enabled) +{ + struct rte_pipeline_port_in_params p; + + union { + struct rte_port_ethdev_reader_params ethdev; + struct rte_port_ring_reader_params ring; + struct rte_port_sched_reader_params sched; + struct rte_port_fd_reader_params fd; + struct rte_port_source_params source; + struct rte_port_sym_crypto_reader_params cryptodev; + } pp; + + struct pipeline *pipeline; + struct softnic_port_in *port_in; + struct softnic_port_in_action_profile *ap; + struct rte_port_in_action *action; + uint32_t port_id; + int status; + + memset(&p, 0, sizeof(p)); + memset(&pp, 0, sizeof(pp)); + + /* Check input params */ + if (pipeline_name == NULL || + params == NULL || + params->burst_size == 0 || + params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX) + return -1; + + pipeline = softnic_pipeline_find(softnic, pipeline_name); + if (pipeline == NULL) + return -1; + + ap = NULL; + if (strlen(params->action_profile_name)) { + ap = softnic_port_in_action_profile_find(softnic, + params->action_profile_name); + if (ap == NULL) + return -1; + } + + switch (params->type) { + case PORT_IN_RXQ: + { + struct softnic_link *link; + + link = softnic_link_find(softnic, params->dev_name); + if (link == NULL) + return -1; + + if (params->rxq.queue_id >= link->n_rxq) + return -1; + + pp.ethdev.port_id = link->port_id; + pp.ethdev.queue_id = params->rxq.queue_id; + + p.ops = &rte_port_ethdev_reader_ops; + p.arg_create = &pp.ethdev; + break; + } + + case PORT_IN_SWQ: + { + struct softnic_swq *swq; + + swq = softnic_swq_find(softnic, params->dev_name); + if (swq == NULL) + return -1; + + pp.ring.ring = swq->r; + + p.ops = &rte_port_ring_reader_ops; + p.arg_create = &pp.ring; + break; + } + + case PORT_IN_TMGR: + { + struct softnic_tmgr_port *tmgr_port; + + tmgr_port = softnic_tmgr_port_find(softnic, params->dev_name); + if (tmgr_port == NULL) + return -1; + + pp.sched.sched = tmgr_port->s; + + p.ops = &rte_port_sched_reader_ops; + p.arg_create = &pp.sched; + break; + } + + case PORT_IN_TAP: + { + struct softnic_tap *tap; + struct softnic_mempool *mempool; + + tap = softnic_tap_find(softnic, params->dev_name); + mempool = softnic_mempool_find(softnic, params->tap.mempool_name); + if (tap == NULL || mempool == NULL) + return -1; + + pp.fd.fd = tap->fd; + pp.fd.mempool = mempool->m; + pp.fd.mtu = params->tap.mtu; + + p.ops = &rte_port_fd_reader_ops; + p.arg_create = &pp.fd; + break; + } + + case PORT_IN_SOURCE: + { + struct softnic_mempool *mempool; + + mempool = softnic_mempool_find(softnic, params->source.mempool_name); + if (mempool == NULL) + return -1; + + pp.source.mempool = mempool->m; + pp.source.file_name = params->source.file_name; + pp.source.n_bytes_per_pkt = params->source.n_bytes_per_pkt; + + p.ops = &rte_port_source_ops; + p.arg_create = &pp.source; + break; + } + + case PORT_IN_CRYPTODEV: + { + struct softnic_cryptodev *cryptodev; + + cryptodev = softnic_cryptodev_find(softnic, params->dev_name); + if (cryptodev == NULL) + return -1; + + pp.cryptodev.cryptodev_id = cryptodev->dev_id; + pp.cryptodev.queue_id = params->cryptodev.queue_id; + pp.cryptodev.f_callback = params->cryptodev.f_callback; + pp.cryptodev.arg_callback = params->cryptodev.arg_callback; + p.ops = &rte_port_sym_crypto_reader_ops; + p.arg_create = &pp.cryptodev; + break; + } + + default: + return -1; + } + + p.burst_size = params->burst_size; + + /* Resource create */ + action = NULL; + p.f_action = NULL; + p.arg_ah = NULL; + + if (ap) { + action = rte_port_in_action_create(ap->ap, + softnic->params.cpu_id); + if (action == NULL) + return -1; + + status = rte_port_in_action_params_get(action, + &p); + if (status) { + rte_port_in_action_free(action); + return -1; + } + } + + status = rte_pipeline_port_in_create(pipeline->p, + &p, + &port_id); + if (status) { + rte_port_in_action_free(action); + return -1; + } + + if (enabled) + rte_pipeline_port_in_enable(pipeline->p, port_id); + + /* Pipeline */ + port_in = &pipeline->port_in[pipeline->n_ports_in]; + memcpy(&port_in->params, params, sizeof(*params)); + port_in->ap = ap; + port_in->a = action; + pipeline->n_ports_in++; + + return 0; +} + +int +softnic_pipeline_port_in_connect_to_table(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t port_id, + uint32_t table_id) +{ + struct pipeline *pipeline; + int status; + + /* Check input params */ + if (pipeline_name == NULL) + return -1; + + pipeline = softnic_pipeline_find(softnic, pipeline_name); + if (pipeline == NULL || + port_id >= pipeline->n_ports_in || + table_id >= pipeline->n_tables) + return -1; + + /* Resource */ + status = rte_pipeline_port_in_connect_to_table(pipeline->p, + port_id, + table_id); + + return status; +} + +int +softnic_pipeline_port_out_create(struct pmd_internals *softnic, + const char *pipeline_name, + struct softnic_port_out_params *params) +{ + struct rte_pipeline_port_out_params p; + + union { + struct rte_port_ethdev_writer_params ethdev; + struct rte_port_ring_writer_params ring; + struct rte_port_sched_writer_params sched; + struct rte_port_fd_writer_params fd; + struct rte_port_sink_params sink; + struct rte_port_sym_crypto_writer_params cryptodev; + } pp; + + union { + struct rte_port_ethdev_writer_nodrop_params ethdev; + struct rte_port_ring_writer_nodrop_params ring; + struct rte_port_fd_writer_nodrop_params fd; + struct rte_port_sym_crypto_writer_nodrop_params cryptodev; + } pp_nodrop; + + struct pipeline *pipeline; + struct softnic_port_out *port_out; + uint32_t port_id; + int status; + + memset(&p, 0, sizeof(p)); + memset(&pp, 0, sizeof(pp)); + memset(&pp_nodrop, 0, sizeof(pp_nodrop)); + + /* Check input params */ + if (pipeline_name == NULL || + params == NULL || + params->burst_size == 0 || + params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX) + return -1; + + pipeline = softnic_pipeline_find(softnic, pipeline_name); + if (pipeline == NULL) + return -1; + + switch (params->type) { + case PORT_OUT_TXQ: + { + struct softnic_link *link; + + link = softnic_link_find(softnic, params->dev_name); + if (link == NULL) + return -1; + + if (params->txq.queue_id >= link->n_txq) + return -1; + + pp.ethdev.port_id = link->port_id; + pp.ethdev.queue_id = params->txq.queue_id; + pp.ethdev.tx_burst_sz = params->burst_size; + + pp_nodrop.ethdev.port_id = link->port_id; + pp_nodrop.ethdev.queue_id = params->txq.queue_id; + pp_nodrop.ethdev.tx_burst_sz = params->burst_size; + pp_nodrop.ethdev.n_retries = params->n_retries; + + if (params->retry == 0) { + p.ops = &rte_port_ethdev_writer_ops; + p.arg_create = &pp.ethdev; + } else { + p.ops = &rte_port_ethdev_writer_nodrop_ops; + p.arg_create = &pp_nodrop.ethdev; + } + break; + } + + case PORT_OUT_SWQ: + { + struct softnic_swq *swq; + + swq = softnic_swq_find(softnic, params->dev_name); + if (swq == NULL) + return -1; + + pp.ring.ring = swq->r; + pp.ring.tx_burst_sz = params->burst_size; + + pp_nodrop.ring.ring = swq->r; + pp_nodrop.ring.tx_burst_sz = params->burst_size; + pp_nodrop.ring.n_retries = params->n_retries; + + if (params->retry == 0) { + p.ops = &rte_port_ring_writer_ops; + p.arg_create = &pp.ring; + } else { + p.ops = &rte_port_ring_writer_nodrop_ops; + p.arg_create = &pp_nodrop.ring; + } + break; + } + + case PORT_OUT_TMGR: + { + struct softnic_tmgr_port *tmgr_port; + + tmgr_port = softnic_tmgr_port_find(softnic, params->dev_name); + if (tmgr_port == NULL) + return -1; + + pp.sched.sched = tmgr_port->s; + pp.sched.tx_burst_sz = params->burst_size; + + p.ops = &rte_port_sched_writer_ops; + p.arg_create = &pp.sched; + break; + } + + case PORT_OUT_TAP: + { + struct softnic_tap *tap; + + tap = softnic_tap_find(softnic, params->dev_name); + if (tap == NULL) + return -1; + + pp.fd.fd = tap->fd; + pp.fd.tx_burst_sz = params->burst_size; + + pp_nodrop.fd.fd = tap->fd; + pp_nodrop.fd.tx_burst_sz = params->burst_size; + pp_nodrop.fd.n_retries = params->n_retries; + + if (params->retry == 0) { + p.ops = &rte_port_fd_writer_ops; + p.arg_create = &pp.fd; + } else { + p.ops = &rte_port_fd_writer_nodrop_ops; + p.arg_create = &pp_nodrop.fd; + } + break; + } + + case PORT_OUT_SINK: + { + pp.sink.file_name = params->sink.file_name; + pp.sink.max_n_pkts = params->sink.max_n_pkts; + + p.ops = &rte_port_sink_ops; + p.arg_create = &pp.sink; + break; + } + + case PORT_OUT_CRYPTODEV: + { + struct softnic_cryptodev *cryptodev; + + cryptodev = softnic_cryptodev_find(softnic, params->dev_name); + if (cryptodev == NULL) + return -1; + + if (params->cryptodev.queue_id >= cryptodev->n_queues) + return -1; + + pp.cryptodev.cryptodev_id = cryptodev->dev_id; + pp.cryptodev.queue_id = params->cryptodev.queue_id; + pp.cryptodev.tx_burst_sz = params->burst_size; + pp.cryptodev.crypto_op_offset = params->cryptodev.op_offset; + + pp_nodrop.cryptodev.cryptodev_id = cryptodev->dev_id; + pp_nodrop.cryptodev.queue_id = params->cryptodev.queue_id; + pp_nodrop.cryptodev.tx_burst_sz = params->burst_size; + pp_nodrop.cryptodev.n_retries = params->retry; + pp_nodrop.cryptodev.crypto_op_offset = + params->cryptodev.op_offset; + + if (params->retry == 0) { + p.ops = &rte_port_sym_crypto_writer_ops; + p.arg_create = &pp.cryptodev; + } else { + p.ops = &rte_port_sym_crypto_writer_nodrop_ops; + p.arg_create = &pp_nodrop.cryptodev; + } + + break; + } + + default: + return -1; + } + + p.f_action = NULL; + p.arg_ah = NULL; + + /* Resource create */ + status = rte_pipeline_port_out_create(pipeline->p, + &p, + &port_id); + + if (status) + return -1; + + /* Pipeline */ + port_out = &pipeline->port_out[pipeline->n_ports_out]; + memcpy(&port_out->params, params, sizeof(*params)); + pipeline->n_ports_out++; + + return 0; +} + +static const struct rte_acl_field_def table_acl_field_format_ipv4[] = { + /* Protocol */ + [0] = { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint8_t), + .field_index = 0, + .input_index = 0, + .offset = offsetof(struct rte_ipv4_hdr, next_proto_id), + }, + + /* Source IP address (IPv4) */ + [1] = { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = 1, + .input_index = 1, + .offset = offsetof(struct rte_ipv4_hdr, src_addr), + }, + + /* Destination IP address (IPv4) */ + [2] = { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = 2, + .input_index = 2, + .offset = offsetof(struct rte_ipv4_hdr, dst_addr), + }, + + /* Source Port */ + [3] = { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = 3, + .input_index = 3, + .offset = sizeof(struct rte_ipv4_hdr) + + offsetof(struct rte_tcp_hdr, src_port), + }, + + /* Destination Port */ + [4] = { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = 4, + .input_index = 3, + .offset = sizeof(struct rte_ipv4_hdr) + + offsetof(struct rte_tcp_hdr, dst_port), + }, +}; + +static const struct rte_acl_field_def table_acl_field_format_ipv6[] = { + /* Protocol */ + [0] = { + .type = RTE_ACL_FIELD_TYPE_BITMASK, + .size = sizeof(uint8_t), + .field_index = 0, + .input_index = 0, + .offset = offsetof(struct rte_ipv6_hdr, proto), + }, + + /* Source IP address (IPv6) */ + [1] = { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = 1, + .input_index = 1, + .offset = offsetof(struct rte_ipv6_hdr, src_addr[0]), + }, + + [2] = { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = 2, + .input_index = 2, + .offset = offsetof(struct rte_ipv6_hdr, src_addr[4]), + }, + + [3] = { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = 3, + .input_index = 3, + .offset = offsetof(struct rte_ipv6_hdr, src_addr[8]), + }, + + [4] = { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = 4, + .input_index = 4, + .offset = offsetof(struct rte_ipv6_hdr, src_addr[12]), + }, + + /* Destination IP address (IPv6) */ + [5] = { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = 5, + .input_index = 5, + .offset = offsetof(struct rte_ipv6_hdr, dst_addr[0]), + }, + + [6] = { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = 6, + .input_index = 6, + .offset = offsetof(struct rte_ipv6_hdr, dst_addr[4]), + }, + + [7] = { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = 7, + .input_index = 7, + .offset = offsetof(struct rte_ipv6_hdr, dst_addr[8]), + }, + + [8] = { + .type = RTE_ACL_FIELD_TYPE_MASK, + .size = sizeof(uint32_t), + .field_index = 8, + .input_index = 8, + .offset = offsetof(struct rte_ipv6_hdr, dst_addr[12]), + }, + + /* Source Port */ + [9] = { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = 9, + .input_index = 9, + .offset = sizeof(struct rte_ipv6_hdr) + + offsetof(struct rte_tcp_hdr, src_port), + }, + + /* Destination Port */ + [10] = { + .type = RTE_ACL_FIELD_TYPE_RANGE, + .size = sizeof(uint16_t), + .field_index = 10, + .input_index = 9, + .offset = sizeof(struct rte_ipv6_hdr) + + offsetof(struct rte_tcp_hdr, dst_port), + }, +}; + +int +softnic_pipeline_table_create(struct pmd_internals *softnic, + const char *pipeline_name, + struct softnic_table_params *params) +{ + char name[NAME_MAX]; + struct rte_pipeline_table_params p; + + union { + struct rte_table_acl_params acl; + struct rte_table_array_params array; + struct rte_table_hash_params hash; + struct rte_table_lpm_params lpm; + struct rte_table_lpm_ipv6_params lpm_ipv6; + } pp; + + struct pipeline *pipeline; + struct softnic_table *table; + struct softnic_table_action_profile *ap; + struct rte_table_action *action; + uint32_t table_id; + int status; + + memset(&p, 0, sizeof(p)); + memset(&pp, 0, sizeof(pp)); + + /* Check input params */ + if (pipeline_name == NULL || + params == NULL) + return -1; + + pipeline = softnic_pipeline_find(softnic, pipeline_name); + if (pipeline == NULL || + pipeline->n_tables >= RTE_PIPELINE_TABLE_MAX) + return -1; + + ap = NULL; + if (strlen(params->action_profile_name)) { + ap = softnic_table_action_profile_find(softnic, + params->action_profile_name); + if (ap == NULL) + return -1; + } + + snprintf(name, NAME_MAX, "%s_%s_table%u", + softnic->params.name, pipeline_name, pipeline->n_tables); + + switch (params->match_type) { + case TABLE_ACL: + { + uint32_t ip_header_offset = params->match.acl.ip_header_offset - + (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM); + uint32_t i; + + if (params->match.acl.n_rules == 0) + return -1; + + pp.acl.name = name; + pp.acl.n_rules = params->match.acl.n_rules; + if (params->match.acl.ip_version) { + memcpy(&pp.acl.field_format, + &table_acl_field_format_ipv4, + sizeof(table_acl_field_format_ipv4)); + pp.acl.n_rule_fields = + RTE_DIM(table_acl_field_format_ipv4); + } else { + memcpy(&pp.acl.field_format, + &table_acl_field_format_ipv6, + sizeof(table_acl_field_format_ipv6)); + pp.acl.n_rule_fields = + RTE_DIM(table_acl_field_format_ipv6); + } + + for (i = 0; i < pp.acl.n_rule_fields; i++) + pp.acl.field_format[i].offset += ip_header_offset; + + p.ops = &rte_table_acl_ops; + p.arg_create = &pp.acl; + break; + } + + case TABLE_ARRAY: + { + if (params->match.array.n_keys == 0) + return -1; + + pp.array.n_entries = params->match.array.n_keys; + pp.array.offset = params->match.array.key_offset; + + p.ops = &rte_table_array_ops; + p.arg_create = &pp.array; + break; + } + + case TABLE_HASH: + { + struct rte_table_ops *ops; + rte_table_hash_op_hash f_hash; + + if (params->match.hash.n_keys == 0) + return -1; + + switch (params->match.hash.key_size) { + case 8: + f_hash = rte_table_hash_crc_key8; + break; + case 16: + f_hash = rte_table_hash_crc_key16; + break; + case 24: + f_hash = rte_table_hash_crc_key24; + break; + case 32: + f_hash = rte_table_hash_crc_key32; + break; + case 40: + f_hash = rte_table_hash_crc_key40; + break; + case 48: + f_hash = rte_table_hash_crc_key48; + break; + case 56: + f_hash = rte_table_hash_crc_key56; + break; + case 64: + f_hash = rte_table_hash_crc_key64; + break; + default: + return -1; + } + + pp.hash.name = name; + pp.hash.key_size = params->match.hash.key_size; + pp.hash.key_offset = params->match.hash.key_offset; + pp.hash.key_mask = params->match.hash.key_mask; + pp.hash.n_keys = params->match.hash.n_keys; + pp.hash.n_buckets = params->match.hash.n_buckets; + pp.hash.f_hash = f_hash; + pp.hash.seed = 0; + + if (params->match.hash.extendable_bucket) + switch (params->match.hash.key_size) { + case 8: + ops = &rte_table_hash_key8_ext_ops; + break; + case 16: + ops = &rte_table_hash_key16_ext_ops; + break; + default: + ops = &rte_table_hash_ext_ops; + } + else + switch (params->match.hash.key_size) { + case 8: + ops = &rte_table_hash_key8_lru_ops; + break; + case 16: + ops = &rte_table_hash_key16_lru_ops; + break; + default: + ops = &rte_table_hash_lru_ops; + } + + p.ops = ops; + p.arg_create = &pp.hash; + break; + } + + case TABLE_LPM: + { + if (params->match.lpm.n_rules == 0) + return -1; + + switch (params->match.lpm.key_size) { + case 4: + { + pp.lpm.name = name; + pp.lpm.n_rules = params->match.lpm.n_rules; + pp.lpm.number_tbl8s = TABLE_LPM_NUMBER_TBL8; + pp.lpm.flags = 0; + pp.lpm.entry_unique_size = p.action_data_size + + sizeof(struct rte_pipeline_table_entry); + pp.lpm.offset = params->match.lpm.key_offset; + + p.ops = &rte_table_lpm_ops; + p.arg_create = &pp.lpm; + break; + } + + case 16: + { + pp.lpm_ipv6.name = name; + pp.lpm_ipv6.n_rules = params->match.lpm.n_rules; + pp.lpm_ipv6.number_tbl8s = TABLE_LPM_NUMBER_TBL8; + pp.lpm_ipv6.entry_unique_size = p.action_data_size + + sizeof(struct rte_pipeline_table_entry); + pp.lpm_ipv6.offset = params->match.lpm.key_offset; + + p.ops = &rte_table_lpm_ipv6_ops; + p.arg_create = &pp.lpm_ipv6; + break; + } + + default: + return -1; + } + + break; + } + + case TABLE_STUB: + { + p.ops = &rte_table_stub_ops; + p.arg_create = NULL; + break; + } + + default: + return -1; + } + + /* Resource create */ + action = NULL; + p.f_action_hit = NULL; + p.f_action_miss = NULL; + p.arg_ah = NULL; + + if (ap) { + action = rte_table_action_create(ap->ap, + softnic->params.cpu_id); + if (action == NULL) + return -1; + + status = rte_table_action_table_params_get(action, + &p); + if (status || + ((p.action_data_size + + sizeof(struct rte_pipeline_table_entry)) > + TABLE_RULE_ACTION_SIZE_MAX)) { + rte_table_action_free(action); + return -1; + } + } + + if (params->match_type == TABLE_LPM) { + if (params->match.lpm.key_size == 4) + pp.lpm.entry_unique_size = p.action_data_size + + sizeof(struct rte_pipeline_table_entry); + + if (params->match.lpm.key_size == 16) + pp.lpm_ipv6.entry_unique_size = p.action_data_size + + sizeof(struct rte_pipeline_table_entry); + } + + status = rte_pipeline_table_create(pipeline->p, + &p, + &table_id); + if (status) { + rte_table_action_free(action); + return -1; + } + + /* Pipeline */ + table = &pipeline->table[pipeline->n_tables]; + memcpy(&table->params, params, sizeof(*params)); + table->ap = ap; + table->a = action; + TAILQ_INIT(&table->flows); + TAILQ_INIT(&table->meter_profiles); + memset(&table->dscp_table, 0, sizeof(table->dscp_table)); + pipeline->n_tables++; + + return 0; +} + +int +softnic_pipeline_port_out_find(struct pmd_internals *softnic, + const char *pipeline_name, + const char *name, + uint32_t *port_id) +{ + struct pipeline *pipeline; + uint32_t i; + + if (softnic == NULL || + pipeline_name == NULL || + name == NULL || + port_id == NULL) + return -1; + + pipeline = softnic_pipeline_find(softnic, pipeline_name); + if (pipeline == NULL) + return -1; + + for (i = 0; i < pipeline->n_ports_out; i++) + if (strcmp(pipeline->port_out[i].params.dev_name, name) == 0) { + *port_id = i; + return 0; + } + + return -1; +} + +struct softnic_table_meter_profile * +softnic_pipeline_table_meter_profile_find(struct softnic_table *table, + uint32_t meter_profile_id) +{ + struct softnic_table_meter_profile *mp; + + TAILQ_FOREACH(mp, &table->meter_profiles, node) + if (mp->meter_profile_id == meter_profile_id) + return mp; + + return NULL; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_swq.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_swq.c new file mode 100644 index 000000000..2083d0a97 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_swq.c @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include <stdlib.h> +#include <string.h> + +#include <rte_string_fns.h> +#include <rte_tailq.h> + +#include "rte_eth_softnic_internals.h" + +int +softnic_swq_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->swq_list); + + return 0; +} + +void +softnic_swq_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_swq *swq; + + swq = TAILQ_FIRST(&p->swq_list); + if (swq == NULL) + break; + + TAILQ_REMOVE(&p->swq_list, swq, node); + rte_ring_free(swq->r); + free(swq); + } +} + +void +softnic_softnic_swq_free_keep_rxq_txq(struct pmd_internals *p) +{ + struct softnic_swq *swq, *tswq; + + TAILQ_FOREACH_SAFE(swq, &p->swq_list, node, tswq) { + if ((strncmp(swq->name, "RXQ", strlen("RXQ")) == 0) || + (strncmp(swq->name, "TXQ", strlen("TXQ")) == 0)) + continue; + + TAILQ_REMOVE(&p->swq_list, swq, node); + rte_ring_free(swq->r); + free(swq); + } +} + +struct softnic_swq * +softnic_swq_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_swq *swq; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(swq, &p->swq_list, node) + if (strcmp(swq->name, name) == 0) + return swq; + + return NULL; +} + +struct softnic_swq * +softnic_swq_create(struct pmd_internals *p, + const char *name, + struct softnic_swq_params *params) +{ + char ring_name[NAME_SIZE]; + struct softnic_swq *swq; + struct rte_ring *r; + unsigned int flags = RING_F_SP_ENQ | RING_F_SC_DEQ; + + /* Check input params */ + if (name == NULL || + softnic_swq_find(p, name) || + params == NULL || + params->size == 0) + return NULL; + + /* Resource create */ + snprintf(ring_name, sizeof(ring_name), "%s_%s", + p->params.name, + name); + + r = rte_ring_create(ring_name, + params->size, + p->params.cpu_id, + flags); + + if (r == NULL) + return NULL; + + /* Node allocation */ + swq = calloc(1, sizeof(struct softnic_swq)); + if (swq == NULL) { + rte_ring_free(r); + return NULL; + } + + /* Node fill in */ + strlcpy(swq->name, name, sizeof(swq->name)); + swq->r = r; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&p->swq_list, swq, node); + + return swq; +} diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tap.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tap.c new file mode 100644 index 000000000..36fe9f028 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tap.c @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include <netinet/in.h> +#ifdef RTE_EXEC_ENV_LINUX +#include <linux/if.h> +#include <linux/if_tun.h> +#endif +#include <sys/ioctl.h> + +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> + +#include <rte_string_fns.h> + +#include "rte_eth_softnic_internals.h" + +#define TAP_DEV "/dev/net/tun" + +int +softnic_tap_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->tap_list); + + return 0; +} + +void +softnic_tap_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_tap *tap; + + tap = TAILQ_FIRST(&p->tap_list); + if (tap == NULL) + break; + + TAILQ_REMOVE(&p->tap_list, tap, node); + free(tap); + } +} + +struct softnic_tap * +softnic_tap_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_tap *tap; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(tap, &p->tap_list, node) + if (strcmp(tap->name, name) == 0) + return tap; + + return NULL; +} + +#ifndef RTE_EXEC_ENV_LINUX + +struct softnic_tap * +softnic_tap_create(struct pmd_internals *p __rte_unused, + const char *name __rte_unused) +{ + return NULL; +} + +#else + +struct softnic_tap * +softnic_tap_create(struct pmd_internals *p, + const char *name) +{ + struct softnic_tap *tap; + struct ifreq ifr; + int fd, status; + + /* Check input params */ + if (name == NULL || + softnic_tap_find(p, name)) + return NULL; + + /* Resource create */ + fd = open(TAP_DEV, O_RDWR | O_NONBLOCK); + if (fd < 0) + return NULL; + + memset(&ifr, 0, sizeof(ifr)); + ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */ + strlcpy(ifr.ifr_name, name, IFNAMSIZ); + + status = ioctl(fd, TUNSETIFF, (void *)&ifr); + if (status < 0) { + close(fd); + return NULL; + } + + /* Node allocation */ + tap = calloc(1, sizeof(struct softnic_tap)); + if (tap == NULL) { + close(fd); + return NULL; + } + /* Node fill in */ + strlcpy(tap->name, name, sizeof(tap->name)); + tap->fd = fd; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&p->tap_list, tap, node); + + return tap; +} + +#endif diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_thread.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_thread.c new file mode 100644 index 000000000..dcfb5eb82 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_thread.c @@ -0,0 +1,3063 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation + */ + +#include <stdlib.h> + +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_lcore.h> +#include <rte_service_component.h> +#include <rte_ring.h> + +#include <rte_table_acl.h> +#include <rte_table_array.h> +#include <rte_table_hash.h> +#include <rte_table_lpm.h> +#include <rte_table_lpm_ipv6.h> +#include "rte_eth_softnic_internals.h" + +/** + * Master thread: data plane thread init + */ +void +softnic_thread_free(struct pmd_internals *softnic) +{ + uint32_t i; + + RTE_LCORE_FOREACH_SLAVE(i) { + struct softnic_thread *t = &softnic->thread[i]; + + /* MSGQs */ + if (t->msgq_req) + rte_ring_free(t->msgq_req); + + if (t->msgq_rsp) + rte_ring_free(t->msgq_rsp); + } +} + +int +softnic_thread_init(struct pmd_internals *softnic) +{ + uint32_t i; + + for (i = 0; i < RTE_MAX_LCORE; i++) { + char ring_name[NAME_MAX]; + struct rte_ring *msgq_req, *msgq_rsp; + struct softnic_thread *t = &softnic->thread[i]; + struct softnic_thread_data *t_data = &softnic->thread_data[i]; + uint32_t cpu_id = rte_lcore_to_socket_id(i); + + /* MSGQs */ + snprintf(ring_name, sizeof(ring_name), "%s-TH%u-REQ", + softnic->params.name, + i); + + msgq_req = rte_ring_create(ring_name, + THREAD_MSGQ_SIZE, + cpu_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); + + if (msgq_req == NULL) { + softnic_thread_free(softnic); + return -1; + } + + snprintf(ring_name, sizeof(ring_name), "%s-TH%u-RSP", + softnic->params.name, + i); + + msgq_rsp = rte_ring_create(ring_name, + THREAD_MSGQ_SIZE, + cpu_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); + + if (msgq_rsp == NULL) { + softnic_thread_free(softnic); + return -1; + } + + /* Master thread records */ + t->msgq_req = msgq_req; + t->msgq_rsp = msgq_rsp; + t->service_id = UINT32_MAX; + + /* Data plane thread records */ + t_data->n_pipelines = 0; + t_data->msgq_req = msgq_req; + t_data->msgq_rsp = msgq_rsp; + t_data->timer_period = + (rte_get_tsc_hz() * THREAD_TIMER_PERIOD_MS) / 1000; + t_data->time_next = rte_get_tsc_cycles() + t_data->timer_period; + t_data->time_next_min = t_data->time_next; + } + + return 0; +} + +static inline int +thread_is_valid(struct pmd_internals *softnic, uint32_t thread_id) +{ + if (thread_id == rte_get_master_lcore()) + return 0; /* FALSE */ + + if (softnic->params.sc && rte_lcore_has_role(thread_id, ROLE_SERVICE)) + return 1; /* TRUE */ + if (!softnic->params.sc && rte_lcore_has_role(thread_id, ROLE_RTE)) + return 1; /* TRUE */ + + return 0; /* FALSE */ +} + +static inline int +thread_is_running(uint32_t thread_id) +{ + enum rte_lcore_state_t thread_state; + + thread_state = rte_eal_get_lcore_state(thread_id); + return (thread_state == RUNNING)? 1 : 0; +} + +static int32_t +rte_pmd_softnic_run_internal(void *arg); + +static inline int +thread_sc_service_up(struct pmd_internals *softnic, uint32_t thread_id) +{ + struct rte_service_spec service_params; + struct softnic_thread *t = &softnic->thread[thread_id]; + struct rte_eth_dev *dev; + int status; + uint16_t port_id; + + /* service params */ + status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id); + if (status) + return status; + + dev = &rte_eth_devices[port_id]; + snprintf(service_params.name, sizeof(service_params.name), "%s_%u", + softnic->params.name, + thread_id); + service_params.callback = rte_pmd_softnic_run_internal; + service_params.callback_userdata = dev; + service_params.capabilities = 0; + service_params.socket_id = (int)softnic->params.cpu_id; + + /* service register */ + status = rte_service_component_register(&service_params, &t->service_id); + if (status) + return status; + + status = rte_service_component_runstate_set(t->service_id, 1); + if (status) { + rte_service_component_unregister(t->service_id); + t->service_id = UINT32_MAX; + return status; + } + + status = rte_service_runstate_set(t->service_id, 1); + if (status) { + rte_service_component_runstate_set(t->service_id, 0); + rte_service_component_unregister(t->service_id); + t->service_id = UINT32_MAX; + return status; + } + + /* service map to thread */ + status = rte_service_map_lcore_set(t->service_id, thread_id, 1); + if (status) { + rte_service_runstate_set(t->service_id, 0); + rte_service_component_runstate_set(t->service_id, 0); + rte_service_component_unregister(t->service_id); + t->service_id = UINT32_MAX; + return status; + } + + return 0; +} + +static inline void +thread_sc_service_down(struct pmd_internals *softnic, uint32_t thread_id) +{ + struct softnic_thread *t = &softnic->thread[thread_id]; + + /* service unmap from thread */ + rte_service_map_lcore_set(t->service_id, thread_id, 0); + + /* service unregister */ + rte_service_runstate_set(t->service_id, 0); + rte_service_component_runstate_set(t->service_id, 0); + rte_service_component_unregister(t->service_id); + + t->service_id = UINT32_MAX; +} + +/** + * Pipeline is running when: + * (A) Pipeline is mapped to a data plane thread AND + * (B) Its data plane thread is in RUNNING state. + */ +static inline int +pipeline_is_running(struct pipeline *p) +{ + if (p->enabled == 0) + return 0; + + return thread_is_running(p->thread_id); +} + +/** + * Master thread & data plane threads: message passing + */ +enum thread_req_type { + THREAD_REQ_PIPELINE_ENABLE = 0, + THREAD_REQ_PIPELINE_DISABLE, + THREAD_REQ_MAX +}; + +struct thread_msg_req { + enum thread_req_type type; + + union { + struct { + struct rte_pipeline *p; + struct { + struct rte_table_action *a; + } table[RTE_PIPELINE_TABLE_MAX]; + struct rte_ring *msgq_req; + struct rte_ring *msgq_rsp; + uint32_t timer_period_ms; + uint32_t n_tables; + } pipeline_enable; + + struct { + struct rte_pipeline *p; + } pipeline_disable; + }; +}; + +struct thread_msg_rsp { + int status; +}; + +/** + * Master thread + */ +static struct thread_msg_req * +thread_msg_alloc(void) +{ + size_t size = RTE_MAX(sizeof(struct thread_msg_req), + sizeof(struct thread_msg_rsp)); + + return calloc(1, size); +} + +static void +thread_msg_free(struct thread_msg_rsp *rsp) +{ + free(rsp); +} + +static struct thread_msg_rsp * +thread_msg_send_recv(struct pmd_internals *softnic, + uint32_t thread_id, + struct thread_msg_req *req) +{ + struct softnic_thread *t = &softnic->thread[thread_id]; + struct rte_ring *msgq_req = t->msgq_req; + struct rte_ring *msgq_rsp = t->msgq_rsp; + struct thread_msg_rsp *rsp; + int status; + + /* send */ + do { + status = rte_ring_sp_enqueue(msgq_req, req); + } while (status == -ENOBUFS); + + /* recv */ + do { + status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp); + } while (status != 0); + + return rsp; +} + +int +softnic_thread_pipeline_enable(struct pmd_internals *softnic, + uint32_t thread_id, + const char *pipeline_name) +{ + struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name); + struct thread_msg_req *req; + struct thread_msg_rsp *rsp; + uint32_t n_pipelines, i; + int status; + + /* Check input params */ + if (!thread_is_valid(softnic, thread_id) || + (p == NULL) || + (p->n_ports_in == 0) || + (p->n_ports_out == 0) || + (p->n_tables == 0) || + p->enabled) + return -1; + + n_pipelines = softnic_pipeline_thread_count(softnic, thread_id); + if (n_pipelines >= THREAD_PIPELINES_MAX) + return -1; + + if (softnic->params.sc && (n_pipelines == 0)) { + status = thread_sc_service_up(softnic, thread_id); + if (status) + return status; + } + + if (!thread_is_running(thread_id)) { + struct softnic_thread_data *td = &softnic->thread_data[thread_id]; + struct pipeline_data *tdp = &td->pipeline_data[td->n_pipelines]; + + /* Data plane thread */ + td->p[td->n_pipelines] = p->p; + + tdp->p = p->p; + for (i = 0; i < p->n_tables; i++) + tdp->table_data[i].a = + p->table[i].a; + tdp->n_tables = p->n_tables; + + tdp->msgq_req = p->msgq_req; + tdp->msgq_rsp = p->msgq_rsp; + tdp->timer_period = (rte_get_tsc_hz() * p->timer_period_ms) / 1000; + tdp->time_next = rte_get_tsc_cycles() + tdp->timer_period; + + td->n_pipelines++; + + /* Pipeline */ + p->thread_id = thread_id; + p->enabled = 1; + + return 0; + } + + /* Allocate request */ + req = thread_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = THREAD_REQ_PIPELINE_ENABLE; + req->pipeline_enable.p = p->p; + for (i = 0; i < p->n_tables; i++) + req->pipeline_enable.table[i].a = + p->table[i].a; + req->pipeline_enable.msgq_req = p->msgq_req; + req->pipeline_enable.msgq_rsp = p->msgq_rsp; + req->pipeline_enable.timer_period_ms = p->timer_period_ms; + req->pipeline_enable.n_tables = p->n_tables; + + /* Send request and wait for response */ + rsp = thread_msg_send_recv(softnic, thread_id, req); + + /* Read response */ + status = rsp->status; + + /* Free response */ + thread_msg_free(rsp); + + /* Request completion */ + if (status) + return status; + + p->thread_id = thread_id; + p->enabled = 1; + + return 0; +} + +int +softnic_thread_pipeline_disable(struct pmd_internals *softnic, + uint32_t thread_id, + const char *pipeline_name) +{ + struct pipeline *p = softnic_pipeline_find(softnic, pipeline_name); + struct thread_msg_req *req; + struct thread_msg_rsp *rsp; + uint32_t n_pipelines; + int status; + + /* Check input params */ + if (!thread_is_valid(softnic, thread_id) || + (p == NULL) || + (p->enabled && (p->thread_id != thread_id))) + return -1; + + if (p->enabled == 0) + return 0; + + if (!thread_is_running(thread_id)) { + struct softnic_thread_data *td = &softnic->thread_data[thread_id]; + uint32_t i; + + for (i = 0; i < td->n_pipelines; i++) { + struct pipeline_data *tdp = &td->pipeline_data[i]; + + if (tdp->p != p->p) + continue; + + /* Data plane thread */ + if (i < td->n_pipelines - 1) { + struct rte_pipeline *pipeline_last = + td->p[td->n_pipelines - 1]; + struct pipeline_data *tdp_last = + &td->pipeline_data[td->n_pipelines - 1]; + + td->p[i] = pipeline_last; + memcpy(tdp, tdp_last, sizeof(*tdp)); + } + + td->n_pipelines--; + + /* Pipeline */ + p->enabled = 0; + + break; + } + + if (softnic->params.sc && (td->n_pipelines == 0)) + thread_sc_service_down(softnic, thread_id); + + return 0; + } + + /* Allocate request */ + req = thread_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = THREAD_REQ_PIPELINE_DISABLE; + req->pipeline_disable.p = p->p; + + /* Send request and wait for response */ + rsp = thread_msg_send_recv(softnic, thread_id, req); + + /* Read response */ + status = rsp->status; + + /* Free response */ + thread_msg_free(rsp); + + /* Request completion */ + if (status) + return status; + + p->enabled = 0; + + n_pipelines = softnic_pipeline_thread_count(softnic, thread_id); + if (softnic->params.sc && (n_pipelines == 0)) + thread_sc_service_down(softnic, thread_id); + + return 0; +} + +/** + * Data plane threads: message handling + */ +static inline struct thread_msg_req * +thread_msg_recv(struct rte_ring *msgq_req) +{ + struct thread_msg_req *req; + + int status = rte_ring_sc_dequeue(msgq_req, (void **)&req); + + if (status != 0) + return NULL; + + return req; +} + +static inline void +thread_msg_send(struct rte_ring *msgq_rsp, + struct thread_msg_rsp *rsp) +{ + int status; + + do { + status = rte_ring_sp_enqueue(msgq_rsp, rsp); + } while (status == -ENOBUFS); +} + +static struct thread_msg_rsp * +thread_msg_handle_pipeline_enable(struct softnic_thread_data *t, + struct thread_msg_req *req) +{ + struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req; + struct pipeline_data *p = &t->pipeline_data[t->n_pipelines]; + uint32_t i; + + /* Request */ + t->p[t->n_pipelines] = req->pipeline_enable.p; + + p->p = req->pipeline_enable.p; + for (i = 0; i < req->pipeline_enable.n_tables; i++) + p->table_data[i].a = + req->pipeline_enable.table[i].a; + + p->n_tables = req->pipeline_enable.n_tables; + + p->msgq_req = req->pipeline_enable.msgq_req; + p->msgq_rsp = req->pipeline_enable.msgq_rsp; + p->timer_period = + (rte_get_tsc_hz() * req->pipeline_enable.timer_period_ms) / 1000; + p->time_next = rte_get_tsc_cycles() + p->timer_period; + + t->n_pipelines++; + + /* Response */ + rsp->status = 0; + return rsp; +} + +static struct thread_msg_rsp * +thread_msg_handle_pipeline_disable(struct softnic_thread_data *t, + struct thread_msg_req *req) +{ + struct thread_msg_rsp *rsp = (struct thread_msg_rsp *)req; + uint32_t n_pipelines = t->n_pipelines; + struct rte_pipeline *pipeline = req->pipeline_disable.p; + uint32_t i; + + /* find pipeline */ + for (i = 0; i < n_pipelines; i++) { + struct pipeline_data *p = &t->pipeline_data[i]; + + if (p->p != pipeline) + continue; + + if (i < n_pipelines - 1) { + struct rte_pipeline *pipeline_last = + t->p[n_pipelines - 1]; + struct pipeline_data *p_last = + &t->pipeline_data[n_pipelines - 1]; + + t->p[i] = pipeline_last; + memcpy(p, p_last, sizeof(*p)); + } + + t->n_pipelines--; + + rsp->status = 0; + return rsp; + } + + /* should not get here */ + rsp->status = 0; + return rsp; +} + +static void +thread_msg_handle(struct softnic_thread_data *t) +{ + for ( ; ; ) { + struct thread_msg_req *req; + struct thread_msg_rsp *rsp; + + req = thread_msg_recv(t->msgq_req); + if (req == NULL) + break; + + switch (req->type) { + case THREAD_REQ_PIPELINE_ENABLE: + rsp = thread_msg_handle_pipeline_enable(t, req); + break; + + case THREAD_REQ_PIPELINE_DISABLE: + rsp = thread_msg_handle_pipeline_disable(t, req); + break; + + default: + rsp = (struct thread_msg_rsp *)req; + rsp->status = -1; + } + + thread_msg_send(t->msgq_rsp, rsp); + } +} + +/** + * Master thread & data plane threads: message passing + */ +enum pipeline_req_type { + /* Port IN */ + PIPELINE_REQ_PORT_IN_STATS_READ, + PIPELINE_REQ_PORT_IN_ENABLE, + PIPELINE_REQ_PORT_IN_DISABLE, + + /* Port OUT */ + PIPELINE_REQ_PORT_OUT_STATS_READ, + + /* Table */ + PIPELINE_REQ_TABLE_STATS_READ, + PIPELINE_REQ_TABLE_RULE_ADD, + PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT, + PIPELINE_REQ_TABLE_RULE_ADD_BULK, + PIPELINE_REQ_TABLE_RULE_DELETE, + PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT, + PIPELINE_REQ_TABLE_RULE_STATS_READ, + PIPELINE_REQ_TABLE_MTR_PROFILE_ADD, + PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE, + PIPELINE_REQ_TABLE_RULE_MTR_READ, + PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE, + PIPELINE_REQ_TABLE_RULE_TTL_READ, + PIPELINE_REQ_MAX +}; + +struct pipeline_msg_req_port_in_stats_read { + int clear; +}; + +struct pipeline_msg_req_port_out_stats_read { + int clear; +}; + +struct pipeline_msg_req_table_stats_read { + int clear; +}; + +struct pipeline_msg_req_table_rule_add { + struct softnic_table_rule_match match; + struct softnic_table_rule_action action; +}; + +struct pipeline_msg_req_table_rule_add_default { + struct softnic_table_rule_action action; +}; + +struct pipeline_msg_req_table_rule_add_bulk { + struct softnic_table_rule_match *match; + struct softnic_table_rule_action *action; + void **data; + uint32_t n_rules; + int bulk; +}; + +struct pipeline_msg_req_table_rule_delete { + struct softnic_table_rule_match match; +}; + +struct pipeline_msg_req_table_rule_stats_read { + void *data; + int clear; +}; + +struct pipeline_msg_req_table_mtr_profile_add { + uint32_t meter_profile_id; + struct rte_table_action_meter_profile profile; +}; + +struct pipeline_msg_req_table_mtr_profile_delete { + uint32_t meter_profile_id; +}; + +struct pipeline_msg_req_table_rule_mtr_read { + void *data; + uint32_t tc_mask; + int clear; +}; + +struct pipeline_msg_req_table_dscp_table_update { + uint64_t dscp_mask; + struct rte_table_action_dscp_table dscp_table; +}; + +struct pipeline_msg_req_table_rule_ttl_read { + void *data; + int clear; +}; + +struct pipeline_msg_req { + enum pipeline_req_type type; + uint32_t id; /* Port IN, port OUT or table ID */ + + RTE_STD_C11 + union { + struct pipeline_msg_req_port_in_stats_read port_in_stats_read; + struct pipeline_msg_req_port_out_stats_read port_out_stats_read; + struct pipeline_msg_req_table_stats_read table_stats_read; + struct pipeline_msg_req_table_rule_add table_rule_add; + struct pipeline_msg_req_table_rule_add_default table_rule_add_default; + struct pipeline_msg_req_table_rule_add_bulk table_rule_add_bulk; + struct pipeline_msg_req_table_rule_delete table_rule_delete; + struct pipeline_msg_req_table_rule_stats_read table_rule_stats_read; + struct pipeline_msg_req_table_mtr_profile_add table_mtr_profile_add; + struct pipeline_msg_req_table_mtr_profile_delete table_mtr_profile_delete; + struct pipeline_msg_req_table_rule_mtr_read table_rule_mtr_read; + struct pipeline_msg_req_table_dscp_table_update table_dscp_table_update; + struct pipeline_msg_req_table_rule_ttl_read table_rule_ttl_read; + }; +}; + +struct pipeline_msg_rsp_port_in_stats_read { + struct rte_pipeline_port_in_stats stats; +}; + +struct pipeline_msg_rsp_port_out_stats_read { + struct rte_pipeline_port_out_stats stats; +}; + +struct pipeline_msg_rsp_table_stats_read { + struct rte_pipeline_table_stats stats; +}; + +struct pipeline_msg_rsp_table_rule_add { + void *data; +}; + +struct pipeline_msg_rsp_table_rule_add_default { + void *data; +}; + +struct pipeline_msg_rsp_table_rule_add_bulk { + uint32_t n_rules; +}; + +struct pipeline_msg_rsp_table_rule_stats_read { + struct rte_table_action_stats_counters stats; +}; + +struct pipeline_msg_rsp_table_rule_mtr_read { + struct rte_table_action_mtr_counters stats; +}; + +struct pipeline_msg_rsp_table_rule_ttl_read { + struct rte_table_action_ttl_counters stats; +}; + +struct pipeline_msg_rsp { + int status; + + RTE_STD_C11 + union { + struct pipeline_msg_rsp_port_in_stats_read port_in_stats_read; + struct pipeline_msg_rsp_port_out_stats_read port_out_stats_read; + struct pipeline_msg_rsp_table_stats_read table_stats_read; + struct pipeline_msg_rsp_table_rule_add table_rule_add; + struct pipeline_msg_rsp_table_rule_add_default table_rule_add_default; + struct pipeline_msg_rsp_table_rule_add_bulk table_rule_add_bulk; + struct pipeline_msg_rsp_table_rule_stats_read table_rule_stats_read; + struct pipeline_msg_rsp_table_rule_mtr_read table_rule_mtr_read; + struct pipeline_msg_rsp_table_rule_ttl_read table_rule_ttl_read; + }; +}; + +/** + * Master thread + */ +static struct pipeline_msg_req * +pipeline_msg_alloc(void) +{ + size_t size = RTE_MAX(sizeof(struct pipeline_msg_req), + sizeof(struct pipeline_msg_rsp)); + + return calloc(1, size); +} + +static void +pipeline_msg_free(struct pipeline_msg_rsp *rsp) +{ + free(rsp); +} + +static struct pipeline_msg_rsp * +pipeline_msg_send_recv(struct pipeline *p, + struct pipeline_msg_req *req) +{ + struct rte_ring *msgq_req = p->msgq_req; + struct rte_ring *msgq_rsp = p->msgq_rsp; + struct pipeline_msg_rsp *rsp; + int status; + + /* send */ + do { + status = rte_ring_sp_enqueue(msgq_req, req); + } while (status == -ENOBUFS); + + /* recv */ + do { + status = rte_ring_sc_dequeue(msgq_rsp, (void **)&rsp); + } while (status != 0); + + return rsp; +} + +int +softnic_pipeline_port_in_stats_read(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t port_id, + struct rte_pipeline_port_in_stats *stats, + int clear) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + stats == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + port_id >= p->n_ports_in) + return -1; + + if (!pipeline_is_running(p)) { + status = rte_pipeline_port_in_stats_read(p->p, + port_id, + stats, + clear); + + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_PORT_IN_STATS_READ; + req->id = port_id; + req->port_in_stats_read.clear = clear; + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + if (status) + memcpy(stats, &rsp->port_in_stats_read.stats, sizeof(*stats)); + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_port_in_enable(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t port_id) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + port_id >= p->n_ports_in) + return -1; + + if (!pipeline_is_running(p)) { + status = rte_pipeline_port_in_enable(p->p, port_id); + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_PORT_IN_ENABLE; + req->id = port_id; + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_port_in_disable(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t port_id) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + port_id >= p->n_ports_in) + return -1; + + if (!pipeline_is_running(p)) { + status = rte_pipeline_port_in_disable(p->p, port_id); + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_PORT_IN_DISABLE; + req->id = port_id; + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_port_out_stats_read(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t port_id, + struct rte_pipeline_port_out_stats *stats, + int clear) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + stats == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + port_id >= p->n_ports_out) + return -1; + + if (!pipeline_is_running(p)) { + status = rte_pipeline_port_out_stats_read(p->p, + port_id, + stats, + clear); + + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_PORT_OUT_STATS_READ; + req->id = port_id; + req->port_out_stats_read.clear = clear; + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + if (status) + memcpy(stats, &rsp->port_out_stats_read.stats, sizeof(*stats)); + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_table_stats_read(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id, + struct rte_pipeline_table_stats *stats, + int clear) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + stats == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables) + return -1; + + if (!pipeline_is_running(p)) { + status = rte_pipeline_table_stats_read(p->p, + table_id, + stats, + clear); + + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_STATS_READ; + req->id = table_id; + req->table_stats_read.clear = clear; + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + if (status) + memcpy(stats, &rsp->table_stats_read.stats, sizeof(*stats)); + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +static int +match_check(struct softnic_table_rule_match *match, + struct pipeline *p, + uint32_t table_id) +{ + struct softnic_table *table; + + if (match == NULL || + p == NULL || + table_id >= p->n_tables) + return -1; + + table = &p->table[table_id]; + if (match->match_type != table->params.match_type) + return -1; + + switch (match->match_type) { + case TABLE_ACL: + { + struct softnic_table_acl_params *t = &table->params.match.acl; + struct softnic_table_rule_match_acl *r = &match->match.acl; + + if ((r->ip_version && (t->ip_version == 0)) || + ((r->ip_version == 0) && t->ip_version)) + return -1; + + if (r->ip_version) { + if (r->sa_depth > 32 || + r->da_depth > 32) + return -1; + } else { + if (r->sa_depth > 128 || + r->da_depth > 128) + return -1; + } + return 0; + } + + case TABLE_ARRAY: + return 0; + + case TABLE_HASH: + return 0; + + case TABLE_LPM: + { + struct softnic_table_lpm_params *t = &table->params.match.lpm; + struct softnic_table_rule_match_lpm *r = &match->match.lpm; + + if ((r->ip_version && (t->key_size != 4)) || + ((r->ip_version == 0) && (t->key_size != 16))) + return -1; + + if (r->ip_version) { + if (r->depth > 32) + return -1; + } else { + if (r->depth > 128) + return -1; + } + return 0; + } + + case TABLE_STUB: + return -1; + + default: + return -1; + } +} + +static int +action_check(struct softnic_table_rule_action *action, + struct pipeline *p, + uint32_t table_id) +{ + struct softnic_table_action_profile *ap; + + if (action == NULL || + p == NULL || + table_id >= p->n_tables) + return -1; + + ap = p->table[table_id].ap; + if (action->action_mask != ap->params.action_mask) + return -1; + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) { + if (action->fwd.action == RTE_PIPELINE_ACTION_PORT && + action->fwd.id >= p->n_ports_out) + return -1; + + if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE && + action->fwd.id >= p->n_tables) + return -1; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) { + uint32_t tc_mask0 = (1 << ap->params.mtr.n_tc) - 1; + uint32_t tc_mask1 = action->mtr.tc_mask; + + if (tc_mask1 != tc_mask0) + return -1; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) { + uint32_t n_subports_per_port = + ap->params.tm.n_subports_per_port; + uint32_t n_pipes_per_subport = + ap->params.tm.n_pipes_per_subport; + uint32_t subport_id = action->tm.subport_id; + uint32_t pipe_id = action->tm.pipe_id; + + if (subport_id >= n_subports_per_port || + pipe_id >= n_pipes_per_subport) + return -1; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) { + uint64_t encap_mask = ap->params.encap.encap_mask; + enum rte_table_action_encap_type type = action->encap.type; + + if ((encap_mask & (1LLU << type)) == 0) + return -1; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) { + int ip_version0 = ap->params.common.ip_version; + int ip_version1 = action->nat.ip_version; + + if ((ip_version1 && (ip_version0 == 0)) || + ((ip_version1 == 0) && ip_version0)) + return -1; + } + + return 0; +} + +static int +action_default_check(struct softnic_table_rule_action *action, + struct pipeline *p, + uint32_t table_id) +{ + if (action == NULL || + action->action_mask != (1LLU << RTE_TABLE_ACTION_FWD) || + p == NULL || + table_id >= p->n_tables) + return -1; + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) { + if (action->fwd.action == RTE_PIPELINE_ACTION_PORT && + action->fwd.id >= p->n_ports_out) + return -1; + + if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE && + action->fwd.id >= p->n_tables) + return -1; + } + + return 0; +} + +union table_rule_match_low_level { + struct rte_table_acl_rule_add_params acl_add; + struct rte_table_acl_rule_delete_params acl_delete; + struct rte_table_array_key array; + uint8_t hash[TABLE_RULE_MATCH_SIZE_MAX]; + struct rte_table_lpm_key lpm_ipv4; + struct rte_table_lpm_ipv6_key lpm_ipv6; +}; + +static int +match_convert(struct softnic_table_rule_match *mh, + union table_rule_match_low_level *ml, + int add); + +static int +action_convert(struct rte_table_action *a, + struct softnic_table_rule_action *action, + struct rte_pipeline_table_entry *data); + +int +softnic_pipeline_table_rule_add(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id, + struct softnic_table_rule_match *match, + struct softnic_table_rule_action *action, + void **data) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + match == NULL || + action == NULL || + data == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables || + match_check(match, p, table_id) || + action_check(action, p, table_id)) + return -1; + + if (!pipeline_is_running(p)) { + struct rte_table_action *a = p->table[table_id].a; + union table_rule_match_low_level match_ll; + struct rte_pipeline_table_entry *data_in, *data_out; + int key_found; + uint8_t *buffer; + + buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t)); + if (buffer == NULL) + return -1; + + /* Table match-action rule conversion */ + data_in = (struct rte_pipeline_table_entry *)buffer; + + status = match_convert(match, &match_ll, 1); + if (status) { + free(buffer); + return -1; + } + + status = action_convert(a, action, data_in); + if (status) { + free(buffer); + return -1; + } + + /* Add rule (match, action) to table */ + status = rte_pipeline_table_entry_add(p->p, + table_id, + &match_ll, + data_in, + &key_found, + &data_out); + if (status) { + free(buffer); + return -1; + } + + /* Write Response */ + *data = data_out; + + free(buffer); + return 0; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_RULE_ADD; + req->id = table_id; + memcpy(&req->table_rule_add.match, match, sizeof(*match)); + memcpy(&req->table_rule_add.action, action, sizeof(*action)); + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + if (status == 0) + *data = rsp->table_rule_add.data; + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_table_rule_add_default(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id, + struct softnic_table_rule_action *action, + void **data) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + action == NULL || + data == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables || + action_default_check(action, p, table_id)) + return -1; + + if (!pipeline_is_running(p)) { + struct rte_pipeline_table_entry *data_in, *data_out; + uint8_t *buffer; + + buffer = calloc(TABLE_RULE_ACTION_SIZE_MAX, sizeof(uint8_t)); + if (buffer == NULL) + return -1; + + /* Apply actions */ + data_in = (struct rte_pipeline_table_entry *)buffer; + + data_in->action = action->fwd.action; + if (action->fwd.action == RTE_PIPELINE_ACTION_PORT) + data_in->port_id = action->fwd.id; + if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE) + data_in->table_id = action->fwd.id; + + /* Add default rule to table */ + status = rte_pipeline_table_default_entry_add(p->p, + table_id, + data_in, + &data_out); + if (status) { + free(buffer); + return -1; + } + + /* Write Response */ + *data = data_out; + + free(buffer); + return 0; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT; + req->id = table_id; + memcpy(&req->table_rule_add_default.action, action, sizeof(*action)); + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + if (status == 0) + *data = rsp->table_rule_add_default.data; + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_table_rule_add_bulk(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id, + struct softnic_table_rule_match *match, + struct softnic_table_rule_action *action, + void **data, + uint32_t *n_rules) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + uint32_t i; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + match == NULL || + action == NULL || + data == NULL || + n_rules == NULL || + (*n_rules == 0)) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables) + return -1; + + for (i = 0; i < *n_rules; i++) + if (match_check(match, p, table_id) || + action_check(action, p, table_id)) + return -1; + + if (!pipeline_is_running(p)) { + struct rte_table_action *a = p->table[table_id].a; + union table_rule_match_low_level *match_ll; + uint8_t *action_ll; + void **match_ll_ptr; + struct rte_pipeline_table_entry **action_ll_ptr; + struct rte_pipeline_table_entry **entries_ptr = + (struct rte_pipeline_table_entry **)data; + uint32_t bulk = + (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0; + int *found; + + /* Memory allocation */ + match_ll = calloc(*n_rules, sizeof(union table_rule_match_low_level)); + action_ll = calloc(*n_rules, TABLE_RULE_ACTION_SIZE_MAX); + match_ll_ptr = calloc(*n_rules, sizeof(void *)); + action_ll_ptr = + calloc(*n_rules, sizeof(struct rte_pipeline_table_entry *)); + found = calloc(*n_rules, sizeof(int)); + + if (match_ll == NULL || + action_ll == NULL || + match_ll_ptr == NULL || + action_ll_ptr == NULL || + found == NULL) + goto fail; + + for (i = 0; i < *n_rules; i++) { + match_ll_ptr[i] = (void *)&match_ll[i]; + action_ll_ptr[i] = + (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX]; + } + + /* Rule match conversion */ + for (i = 0; i < *n_rules; i++) { + status = match_convert(&match[i], match_ll_ptr[i], 1); + if (status) + goto fail; + } + + /* Rule action conversion */ + for (i = 0; i < *n_rules; i++) { + status = action_convert(a, &action[i], action_ll_ptr[i]); + if (status) + goto fail; + } + + /* Add rule (match, action) to table */ + if (bulk) { + status = rte_pipeline_table_entry_add_bulk(p->p, + table_id, + match_ll_ptr, + action_ll_ptr, + *n_rules, + found, + entries_ptr); + if (status) + *n_rules = 0; + } else { + for (i = 0; i < *n_rules; i++) { + status = rte_pipeline_table_entry_add(p->p, + table_id, + match_ll_ptr[i], + action_ll_ptr[i], + &found[i], + &entries_ptr[i]); + if (status) { + *n_rules = i; + break; + } + } + } + + /* Free */ + free(found); + free(action_ll_ptr); + free(match_ll_ptr); + free(action_ll); + free(match_ll); + + return status; + +fail: + free(found); + free(action_ll_ptr); + free(match_ll_ptr); + free(action_ll); + free(match_ll); + + *n_rules = 0; + return -1; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_RULE_ADD_BULK; + req->id = table_id; + req->table_rule_add_bulk.match = match; + req->table_rule_add_bulk.action = action; + req->table_rule_add_bulk.data = data; + req->table_rule_add_bulk.n_rules = *n_rules; + req->table_rule_add_bulk.bulk = + (p->table[table_id].params.match_type == TABLE_ACL) ? 1 : 0; + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + if (status == 0) + *n_rules = rsp->table_rule_add_bulk.n_rules; + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_table_rule_delete(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id, + struct softnic_table_rule_match *match) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + match == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables || + match_check(match, p, table_id)) + return -1; + + if (!pipeline_is_running(p)) { + union table_rule_match_low_level match_ll; + int key_found; + + status = match_convert(match, &match_ll, 0); + if (status) + return -1; + + status = rte_pipeline_table_entry_delete(p->p, + table_id, + &match_ll, + &key_found, + NULL); + + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_RULE_DELETE; + req->id = table_id; + memcpy(&req->table_rule_delete.match, match, sizeof(*match)); + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_table_rule_delete_default(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables) + return -1; + + if (!pipeline_is_running(p)) { + status = rte_pipeline_table_default_entry_delete(p->p, + table_id, + NULL); + + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT; + req->id = table_id; + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_table_rule_stats_read(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id, + void *data, + struct rte_table_action_stats_counters *stats, + int clear) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + data == NULL || + stats == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables) + return -1; + + if (!pipeline_is_running(p)) { + struct rte_table_action *a = p->table[table_id].a; + + status = rte_table_action_stats_read(a, + data, + stats, + clear); + + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_RULE_STATS_READ; + req->id = table_id; + req->table_rule_stats_read.data = data; + req->table_rule_stats_read.clear = clear; + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + if (status) + memcpy(stats, &rsp->table_rule_stats_read.stats, sizeof(*stats)); + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_table_mtr_profile_add(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id, + uint32_t meter_profile_id, + struct rte_table_action_meter_profile *profile) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + struct softnic_table *table; + struct softnic_table_meter_profile *mp; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + profile == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables) + return -1; + + table = &p->table[table_id]; + mp = softnic_pipeline_table_meter_profile_find(table, meter_profile_id); + if (mp) + return -1; + + /* Resource Allocation */ + mp = calloc(1, sizeof(struct softnic_table_meter_profile)); + if (mp == NULL) + return -1; + + mp->meter_profile_id = meter_profile_id; + memcpy(&mp->profile, profile, sizeof(mp->profile)); + + if (!pipeline_is_running(p)) { + status = rte_table_action_meter_profile_add(table->a, + meter_profile_id, + profile); + if (status) { + free(mp); + return status; + } + + /* Add profile to the table. */ + TAILQ_INSERT_TAIL(&table->meter_profiles, mp, node); + + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) { + free(mp); + return -1; + } + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD; + req->id = table_id; + req->table_mtr_profile_add.meter_profile_id = meter_profile_id; + memcpy(&req->table_mtr_profile_add.profile, profile, sizeof(*profile)); + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + if (status == 0) + TAILQ_INSERT_TAIL(&table->meter_profiles, mp, node); + else + free(mp); + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_table_mtr_profile_delete(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id, + uint32_t meter_profile_id) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables) + return -1; + + if (!pipeline_is_running(p)) { + struct rte_table_action *a = p->table[table_id].a; + + status = rte_table_action_meter_profile_delete(a, + meter_profile_id); + + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE; + req->id = table_id; + req->table_mtr_profile_delete.meter_profile_id = meter_profile_id; + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_table_rule_mtr_read(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id, + void *data, + uint32_t tc_mask, + struct rte_table_action_mtr_counters *stats, + int clear) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + data == NULL || + stats == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables) + return -1; + + if (!pipeline_is_running(p)) { + struct rte_table_action *a = p->table[table_id].a; + + status = rte_table_action_meter_read(a, + data, + tc_mask, + stats, + clear); + + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_RULE_MTR_READ; + req->id = table_id; + req->table_rule_mtr_read.data = data; + req->table_rule_mtr_read.tc_mask = tc_mask; + req->table_rule_mtr_read.clear = clear; + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + if (status) + memcpy(stats, &rsp->table_rule_mtr_read.stats, sizeof(*stats)); + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_table_dscp_table_update(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id, + uint64_t dscp_mask, + struct rte_table_action_dscp_table *dscp_table) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + dscp_table == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables) + return -1; + + if (!pipeline_is_running(p)) { + struct rte_table_action *a = p->table[table_id].a; + + status = rte_table_action_dscp_table_update(a, + dscp_mask, + dscp_table); + + /* Update table dscp table */ + if (!status) + memcpy(&p->table[table_id].dscp_table, dscp_table, + sizeof(p->table[table_id].dscp_table)); + + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE; + req->id = table_id; + req->table_dscp_table_update.dscp_mask = dscp_mask; + memcpy(&req->table_dscp_table_update.dscp_table, + dscp_table, sizeof(*dscp_table)); + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + + /* Update table dscp table */ + if (!status) + memcpy(&p->table[table_id].dscp_table, dscp_table, + sizeof(p->table[table_id].dscp_table)); + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +int +softnic_pipeline_table_rule_ttl_read(struct pmd_internals *softnic, + const char *pipeline_name, + uint32_t table_id, + void *data, + struct rte_table_action_ttl_counters *stats, + int clear) +{ + struct pipeline *p; + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + int status; + + /* Check input params */ + if (pipeline_name == NULL || + data == NULL || + stats == NULL) + return -1; + + p = softnic_pipeline_find(softnic, pipeline_name); + if (p == NULL || + table_id >= p->n_tables) + return -1; + + if (!pipeline_is_running(p)) { + struct rte_table_action *a = p->table[table_id].a; + + status = rte_table_action_ttl_read(a, + data, + stats, + clear); + + return status; + } + + /* Allocate request */ + req = pipeline_msg_alloc(); + if (req == NULL) + return -1; + + /* Write request */ + req->type = PIPELINE_REQ_TABLE_RULE_TTL_READ; + req->id = table_id; + req->table_rule_ttl_read.data = data; + req->table_rule_ttl_read.clear = clear; + + /* Send request and wait for response */ + rsp = pipeline_msg_send_recv(p, req); + + /* Read response */ + status = rsp->status; + if (status) + memcpy(stats, &rsp->table_rule_ttl_read.stats, sizeof(*stats)); + + /* Free response */ + pipeline_msg_free(rsp); + + return status; +} + +/** + * Data plane threads: message handling + */ +static inline struct pipeline_msg_req * +pipeline_msg_recv(struct rte_ring *msgq_req) +{ + struct pipeline_msg_req *req; + + int status = rte_ring_sc_dequeue(msgq_req, (void **)&req); + + if (status != 0) + return NULL; + + return req; +} + +static inline void +pipeline_msg_send(struct rte_ring *msgq_rsp, + struct pipeline_msg_rsp *rsp) +{ + int status; + + do { + status = rte_ring_sp_enqueue(msgq_rsp, rsp); + } while (status == -ENOBUFS); +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_port_in_stats_read(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t port_id = req->id; + int clear = req->port_in_stats_read.clear; + + rsp->status = rte_pipeline_port_in_stats_read(p->p, + port_id, + &rsp->port_in_stats_read.stats, + clear); + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_port_in_enable(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t port_id = req->id; + + rsp->status = rte_pipeline_port_in_enable(p->p, + port_id); + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_port_in_disable(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t port_id = req->id; + + rsp->status = rte_pipeline_port_in_disable(p->p, + port_id); + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_port_out_stats_read(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t port_id = req->id; + int clear = req->port_out_stats_read.clear; + + rsp->status = rte_pipeline_port_out_stats_read(p->p, + port_id, + &rsp->port_out_stats_read.stats, + clear); + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_stats_read(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t port_id = req->id; + int clear = req->table_stats_read.clear; + + rsp->status = rte_pipeline_table_stats_read(p->p, + port_id, + &rsp->table_stats_read.stats, + clear); + + return rsp; +} + +static int +match_convert_ipv6_depth(uint32_t depth, uint32_t *depth32) +{ + if (depth > 128) + return -1; + + switch (depth / 32) { + case 0: + depth32[0] = depth; + depth32[1] = 0; + depth32[2] = 0; + depth32[3] = 0; + return 0; + + case 1: + depth32[0] = 32; + depth32[1] = depth - 32; + depth32[2] = 0; + depth32[3] = 0; + return 0; + + case 2: + depth32[0] = 32; + depth32[1] = 32; + depth32[2] = depth - 64; + depth32[3] = 0; + return 0; + + case 3: + depth32[0] = 32; + depth32[1] = 32; + depth32[2] = 32; + depth32[3] = depth - 96; + return 0; + + case 4: + depth32[0] = 32; + depth32[1] = 32; + depth32[2] = 32; + depth32[3] = 32; + return 0; + + default: + return -1; + } +} + +static int +match_convert(struct softnic_table_rule_match *mh, + union table_rule_match_low_level *ml, + int add) +{ + memset(ml, 0, sizeof(*ml)); + + switch (mh->match_type) { + case TABLE_ACL: + if (mh->match.acl.ip_version) + if (add) { + ml->acl_add.field_value[0].value.u8 = + mh->match.acl.proto; + ml->acl_add.field_value[0].mask_range.u8 = + mh->match.acl.proto_mask; + + ml->acl_add.field_value[1].value.u32 = + mh->match.acl.ipv4.sa; + ml->acl_add.field_value[1].mask_range.u32 = + mh->match.acl.sa_depth; + + ml->acl_add.field_value[2].value.u32 = + mh->match.acl.ipv4.da; + ml->acl_add.field_value[2].mask_range.u32 = + mh->match.acl.da_depth; + + ml->acl_add.field_value[3].value.u16 = + mh->match.acl.sp0; + ml->acl_add.field_value[3].mask_range.u16 = + mh->match.acl.sp1; + + ml->acl_add.field_value[4].value.u16 = + mh->match.acl.dp0; + ml->acl_add.field_value[4].mask_range.u16 = + mh->match.acl.dp1; + + ml->acl_add.priority = + (int32_t)mh->match.acl.priority; + } else { + ml->acl_delete.field_value[0].value.u8 = + mh->match.acl.proto; + ml->acl_delete.field_value[0].mask_range.u8 = + mh->match.acl.proto_mask; + + ml->acl_delete.field_value[1].value.u32 = + mh->match.acl.ipv4.sa; + ml->acl_delete.field_value[1].mask_range.u32 = + mh->match.acl.sa_depth; + + ml->acl_delete.field_value[2].value.u32 = + mh->match.acl.ipv4.da; + ml->acl_delete.field_value[2].mask_range.u32 = + mh->match.acl.da_depth; + + ml->acl_delete.field_value[3].value.u16 = + mh->match.acl.sp0; + ml->acl_delete.field_value[3].mask_range.u16 = + mh->match.acl.sp1; + + ml->acl_delete.field_value[4].value.u16 = + mh->match.acl.dp0; + ml->acl_delete.field_value[4].mask_range.u16 = + mh->match.acl.dp1; + } + else + if (add) { + uint32_t *sa32 = + (uint32_t *)mh->match.acl.ipv6.sa; + uint32_t *da32 = + (uint32_t *)mh->match.acl.ipv6.da; + uint32_t sa32_depth[4], da32_depth[4]; + int status; + + status = match_convert_ipv6_depth(mh->match.acl.sa_depth, + sa32_depth); + if (status) + return status; + + status = match_convert_ipv6_depth( + mh->match.acl.da_depth, + da32_depth); + if (status) + return status; + + ml->acl_add.field_value[0].value.u8 = + mh->match.acl.proto; + ml->acl_add.field_value[0].mask_range.u8 = + mh->match.acl.proto_mask; + + ml->acl_add.field_value[1].value.u32 = + rte_be_to_cpu_32(sa32[0]); + ml->acl_add.field_value[1].mask_range.u32 = + sa32_depth[0]; + ml->acl_add.field_value[2].value.u32 = + rte_be_to_cpu_32(sa32[1]); + ml->acl_add.field_value[2].mask_range.u32 = + sa32_depth[1]; + ml->acl_add.field_value[3].value.u32 = + rte_be_to_cpu_32(sa32[2]); + ml->acl_add.field_value[3].mask_range.u32 = + sa32_depth[2]; + ml->acl_add.field_value[4].value.u32 = + rte_be_to_cpu_32(sa32[3]); + ml->acl_add.field_value[4].mask_range.u32 = + sa32_depth[3]; + + ml->acl_add.field_value[5].value.u32 = + rte_be_to_cpu_32(da32[0]); + ml->acl_add.field_value[5].mask_range.u32 = + da32_depth[0]; + ml->acl_add.field_value[6].value.u32 = + rte_be_to_cpu_32(da32[1]); + ml->acl_add.field_value[6].mask_range.u32 = + da32_depth[1]; + ml->acl_add.field_value[7].value.u32 = + rte_be_to_cpu_32(da32[2]); + ml->acl_add.field_value[7].mask_range.u32 = + da32_depth[2]; + ml->acl_add.field_value[8].value.u32 = + rte_be_to_cpu_32(da32[3]); + ml->acl_add.field_value[8].mask_range.u32 = + da32_depth[3]; + + ml->acl_add.field_value[9].value.u16 = + mh->match.acl.sp0; + ml->acl_add.field_value[9].mask_range.u16 = + mh->match.acl.sp1; + + ml->acl_add.field_value[10].value.u16 = + mh->match.acl.dp0; + ml->acl_add.field_value[10].mask_range.u16 = + mh->match.acl.dp1; + + ml->acl_add.priority = + (int32_t)mh->match.acl.priority; + } else { + uint32_t *sa32 = + (uint32_t *)mh->match.acl.ipv6.sa; + uint32_t *da32 = + (uint32_t *)mh->match.acl.ipv6.da; + uint32_t sa32_depth[4], da32_depth[4]; + int status; + + status = match_convert_ipv6_depth(mh->match.acl.sa_depth, + sa32_depth); + if (status) + return status; + + status = match_convert_ipv6_depth(mh->match.acl.da_depth, + da32_depth); + if (status) + return status; + + ml->acl_delete.field_value[0].value.u8 = + mh->match.acl.proto; + ml->acl_delete.field_value[0].mask_range.u8 = + mh->match.acl.proto_mask; + + ml->acl_delete.field_value[1].value.u32 = + rte_be_to_cpu_32(sa32[0]); + ml->acl_delete.field_value[1].mask_range.u32 = + sa32_depth[0]; + ml->acl_delete.field_value[2].value.u32 = + rte_be_to_cpu_32(sa32[1]); + ml->acl_delete.field_value[2].mask_range.u32 = + sa32_depth[1]; + ml->acl_delete.field_value[3].value.u32 = + rte_be_to_cpu_32(sa32[2]); + ml->acl_delete.field_value[3].mask_range.u32 = + sa32_depth[2]; + ml->acl_delete.field_value[4].value.u32 = + rte_be_to_cpu_32(sa32[3]); + ml->acl_delete.field_value[4].mask_range.u32 = + sa32_depth[3]; + + ml->acl_delete.field_value[5].value.u32 = + rte_be_to_cpu_32(da32[0]); + ml->acl_delete.field_value[5].mask_range.u32 = + da32_depth[0]; + ml->acl_delete.field_value[6].value.u32 = + rte_be_to_cpu_32(da32[1]); + ml->acl_delete.field_value[6].mask_range.u32 = + da32_depth[1]; + ml->acl_delete.field_value[7].value.u32 = + rte_be_to_cpu_32(da32[2]); + ml->acl_delete.field_value[7].mask_range.u32 = + da32_depth[2]; + ml->acl_delete.field_value[8].value.u32 = + rte_be_to_cpu_32(da32[3]); + ml->acl_delete.field_value[8].mask_range.u32 = + da32_depth[3]; + + ml->acl_delete.field_value[9].value.u16 = + mh->match.acl.sp0; + ml->acl_delete.field_value[9].mask_range.u16 = + mh->match.acl.sp1; + + ml->acl_delete.field_value[10].value.u16 = + mh->match.acl.dp0; + ml->acl_delete.field_value[10].mask_range.u16 = + mh->match.acl.dp1; + } + return 0; + + case TABLE_ARRAY: + ml->array.pos = mh->match.array.pos; + return 0; + + case TABLE_HASH: + memcpy(ml->hash, mh->match.hash.key, sizeof(ml->hash)); + return 0; + + case TABLE_LPM: + if (mh->match.lpm.ip_version) { + ml->lpm_ipv4.ip = mh->match.lpm.ipv4; + ml->lpm_ipv4.depth = mh->match.lpm.depth; + } else { + memcpy(ml->lpm_ipv6.ip, + mh->match.lpm.ipv6, sizeof(ml->lpm_ipv6.ip)); + ml->lpm_ipv6.depth = mh->match.lpm.depth; + } + + return 0; + + default: + return -1; + } +} + +static int +action_convert(struct rte_table_action *a, + struct softnic_table_rule_action *action, + struct rte_pipeline_table_entry *data) +{ + int status; + + /* Apply actions */ + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_FWD)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_FWD, + &action->fwd); + + if (status) + return status; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_LB)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_LB, + &action->lb); + + if (status) + return status; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_MTR, + &action->mtr); + + if (status) + return status; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TM)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_TM, + &action->tm); + + if (status) + return status; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_ENCAP, + &action->encap); + + if (status) + return status; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_NAT)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_NAT, + &action->nat); + + if (status) + return status; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TTL)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_TTL, + &action->ttl); + + if (status) + return status; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_STATS)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_STATS, + &action->stats); + + if (status) + return status; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TIME)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_TIME, + &action->time); + + if (status) + return status; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_TAG, + &action->tag); + + if (status) + return status; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_DECAP, + &action->decap); + + if (status) + return status; + } + + if (action->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) { + status = rte_table_action_apply(a, + data, + RTE_TABLE_ACTION_SYM_CRYPTO, + &action->sym_crypto); + + if (status) + return status; + } + + return 0; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_rule_add(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + union table_rule_match_low_level match_ll; + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + struct softnic_table_rule_match *match = &req->table_rule_add.match; + struct softnic_table_rule_action *action = &req->table_rule_add.action; + struct rte_pipeline_table_entry *data_in, *data_out; + uint32_t table_id = req->id; + int key_found, status; + struct rte_table_action *a = p->table_data[table_id].a; + + /* Apply actions */ + memset(p->buffer, 0, sizeof(p->buffer)); + data_in = (struct rte_pipeline_table_entry *)p->buffer; + + status = match_convert(match, &match_ll, 1); + if (status) { + rsp->status = -1; + return rsp; + } + + status = action_convert(a, action, data_in); + if (status) { + rsp->status = -1; + return rsp; + } + + status = rte_pipeline_table_entry_add(p->p, + table_id, + &match_ll, + data_in, + &key_found, + &data_out); + if (status) { + rsp->status = -1; + return rsp; + } + + /* Write response */ + rsp->status = 0; + rsp->table_rule_add.data = data_out; + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_rule_add_default(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + struct softnic_table_rule_action *action = &req->table_rule_add_default.action; + struct rte_pipeline_table_entry *data_in, *data_out; + uint32_t table_id = req->id; + int status; + + /* Apply actions */ + memset(p->buffer, 0, sizeof(p->buffer)); + data_in = (struct rte_pipeline_table_entry *)p->buffer; + + data_in->action = action->fwd.action; + if (action->fwd.action == RTE_PIPELINE_ACTION_PORT) + data_in->port_id = action->fwd.id; + if (action->fwd.action == RTE_PIPELINE_ACTION_TABLE) + data_in->table_id = action->fwd.id; + + /* Add default rule to table */ + status = rte_pipeline_table_default_entry_add(p->p, + table_id, + data_in, + &data_out); + if (status) { + rsp->status = -1; + return rsp; + } + + /* Write response */ + rsp->status = 0; + rsp->table_rule_add_default.data = data_out; + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_rule_add_bulk(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + + uint32_t table_id = req->id; + struct softnic_table_rule_match *match = req->table_rule_add_bulk.match; + struct softnic_table_rule_action *action = req->table_rule_add_bulk.action; + struct rte_pipeline_table_entry **data = + (struct rte_pipeline_table_entry **)req->table_rule_add_bulk.data; + uint32_t n_rules = req->table_rule_add_bulk.n_rules; + uint32_t bulk = req->table_rule_add_bulk.bulk; + + struct rte_table_action *a = p->table_data[table_id].a; + union table_rule_match_low_level *match_ll; + uint8_t *action_ll; + void **match_ll_ptr; + struct rte_pipeline_table_entry **action_ll_ptr; + int *found, status; + uint32_t i; + + /* Memory allocation */ + match_ll = calloc(n_rules, sizeof(union table_rule_match_low_level)); + action_ll = calloc(n_rules, TABLE_RULE_ACTION_SIZE_MAX); + match_ll_ptr = calloc(n_rules, sizeof(void *)); + action_ll_ptr = + calloc(n_rules, sizeof(struct rte_pipeline_table_entry *)); + found = calloc(n_rules, sizeof(int)); + + if (match_ll == NULL || + action_ll == NULL || + match_ll_ptr == NULL || + action_ll_ptr == NULL || + found == NULL) + goto fail; + + for (i = 0; i < n_rules; i++) { + match_ll_ptr[i] = (void *)&match_ll[i]; + action_ll_ptr[i] = + (struct rte_pipeline_table_entry *)&action_ll[i * TABLE_RULE_ACTION_SIZE_MAX]; + } + + /* Rule match conversion */ + for (i = 0; i < n_rules; i++) { + status = match_convert(&match[i], match_ll_ptr[i], 1); + if (status) + goto fail; + } + + /* Rule action conversion */ + for (i = 0; i < n_rules; i++) { + status = action_convert(a, &action[i], action_ll_ptr[i]); + if (status) + goto fail; + } + + /* Add rule (match, action) to table */ + if (bulk) { + status = rte_pipeline_table_entry_add_bulk(p->p, + table_id, + match_ll_ptr, + action_ll_ptr, + n_rules, + found, + data); + if (status) + n_rules = 0; + } else { + for (i = 0; i < n_rules; i++) { + status = rte_pipeline_table_entry_add(p->p, + table_id, + match_ll_ptr[i], + action_ll_ptr[i], + &found[i], + &data[i]); + if (status) { + n_rules = i; + break; + } + } + } + + /* Write response */ + rsp->status = 0; + rsp->table_rule_add_bulk.n_rules = n_rules; + + /* Free */ + free(found); + free(action_ll_ptr); + free(match_ll_ptr); + free(action_ll); + free(match_ll); + + return rsp; + +fail: + free(found); + free(action_ll_ptr); + free(match_ll_ptr); + free(action_ll); + free(match_ll); + + rsp->status = -1; + rsp->table_rule_add_bulk.n_rules = 0; + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_rule_delete(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + union table_rule_match_low_level match_ll; + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + struct softnic_table_rule_match *match = &req->table_rule_delete.match; + uint32_t table_id = req->id; + int key_found, status; + + status = match_convert(match, &match_ll, 0); + if (status) { + rsp->status = -1; + return rsp; + } + + rsp->status = rte_pipeline_table_entry_delete(p->p, + table_id, + &match_ll, + &key_found, + NULL); + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_rule_delete_default(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t table_id = req->id; + + rsp->status = rte_pipeline_table_default_entry_delete(p->p, + table_id, + NULL); + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_rule_stats_read(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t table_id = req->id; + void *data = req->table_rule_stats_read.data; + int clear = req->table_rule_stats_read.clear; + struct rte_table_action *a = p->table_data[table_id].a; + + rsp->status = rte_table_action_stats_read(a, + data, + &rsp->table_rule_stats_read.stats, + clear); + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_mtr_profile_add(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t table_id = req->id; + uint32_t meter_profile_id = req->table_mtr_profile_add.meter_profile_id; + struct rte_table_action_meter_profile *profile = + &req->table_mtr_profile_add.profile; + struct rte_table_action *a = p->table_data[table_id].a; + + rsp->status = rte_table_action_meter_profile_add(a, + meter_profile_id, + profile); + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_mtr_profile_delete(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t table_id = req->id; + uint32_t meter_profile_id = + req->table_mtr_profile_delete.meter_profile_id; + struct rte_table_action *a = p->table_data[table_id].a; + + rsp->status = rte_table_action_meter_profile_delete(a, + meter_profile_id); + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_rule_mtr_read(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t table_id = req->id; + void *data = req->table_rule_mtr_read.data; + uint32_t tc_mask = req->table_rule_mtr_read.tc_mask; + int clear = req->table_rule_mtr_read.clear; + struct rte_table_action *a = p->table_data[table_id].a; + + rsp->status = rte_table_action_meter_read(a, + data, + tc_mask, + &rsp->table_rule_mtr_read.stats, + clear); + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_dscp_table_update(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t table_id = req->id; + uint64_t dscp_mask = req->table_dscp_table_update.dscp_mask; + struct rte_table_action_dscp_table *dscp_table = + &req->table_dscp_table_update.dscp_table; + struct rte_table_action *a = p->table_data[table_id].a; + + rsp->status = rte_table_action_dscp_table_update(a, + dscp_mask, + dscp_table); + + return rsp; +} + +static struct pipeline_msg_rsp * +pipeline_msg_handle_table_rule_ttl_read(struct pipeline_data *p, + struct pipeline_msg_req *req) +{ + struct pipeline_msg_rsp *rsp = (struct pipeline_msg_rsp *)req; + uint32_t table_id = req->id; + void *data = req->table_rule_ttl_read.data; + int clear = req->table_rule_ttl_read.clear; + struct rte_table_action *a = p->table_data[table_id].a; + + rsp->status = rte_table_action_ttl_read(a, + data, + &rsp->table_rule_ttl_read.stats, + clear); + + return rsp; +} + +static void +pipeline_msg_handle(struct pipeline_data *p) +{ + for ( ; ; ) { + struct pipeline_msg_req *req; + struct pipeline_msg_rsp *rsp; + + req = pipeline_msg_recv(p->msgq_req); + if (req == NULL) + break; + + switch (req->type) { + case PIPELINE_REQ_PORT_IN_STATS_READ: + rsp = pipeline_msg_handle_port_in_stats_read(p, req); + break; + + case PIPELINE_REQ_PORT_IN_ENABLE: + rsp = pipeline_msg_handle_port_in_enable(p, req); + break; + + case PIPELINE_REQ_PORT_IN_DISABLE: + rsp = pipeline_msg_handle_port_in_disable(p, req); + break; + + case PIPELINE_REQ_PORT_OUT_STATS_READ: + rsp = pipeline_msg_handle_port_out_stats_read(p, req); + break; + + case PIPELINE_REQ_TABLE_STATS_READ: + rsp = pipeline_msg_handle_table_stats_read(p, req); + break; + + case PIPELINE_REQ_TABLE_RULE_ADD: + rsp = pipeline_msg_handle_table_rule_add(p, req); + break; + + case PIPELINE_REQ_TABLE_RULE_ADD_DEFAULT: + rsp = pipeline_msg_handle_table_rule_add_default(p, req); + break; + + case PIPELINE_REQ_TABLE_RULE_ADD_BULK: + rsp = pipeline_msg_handle_table_rule_add_bulk(p, req); + break; + + case PIPELINE_REQ_TABLE_RULE_DELETE: + rsp = pipeline_msg_handle_table_rule_delete(p, req); + break; + + case PIPELINE_REQ_TABLE_RULE_DELETE_DEFAULT: + rsp = pipeline_msg_handle_table_rule_delete_default(p, req); + break; + + case PIPELINE_REQ_TABLE_RULE_STATS_READ: + rsp = pipeline_msg_handle_table_rule_stats_read(p, req); + break; + + case PIPELINE_REQ_TABLE_MTR_PROFILE_ADD: + rsp = pipeline_msg_handle_table_mtr_profile_add(p, req); + break; + + case PIPELINE_REQ_TABLE_MTR_PROFILE_DELETE: + rsp = pipeline_msg_handle_table_mtr_profile_delete(p, req); + break; + + case PIPELINE_REQ_TABLE_RULE_MTR_READ: + rsp = pipeline_msg_handle_table_rule_mtr_read(p, req); + break; + + case PIPELINE_REQ_TABLE_DSCP_TABLE_UPDATE: + rsp = pipeline_msg_handle_table_dscp_table_update(p, req); + break; + + case PIPELINE_REQ_TABLE_RULE_TTL_READ: + rsp = pipeline_msg_handle_table_rule_ttl_read(p, req); + break; + + default: + rsp = (struct pipeline_msg_rsp *)req; + rsp->status = -1; + } + + pipeline_msg_send(p->msgq_rsp, rsp); + } +} + +/** + * Data plane threads: main + */ +static int32_t +rte_pmd_softnic_run_internal(void *arg) +{ + struct rte_eth_dev *dev = arg; + struct pmd_internals *softnic; + struct softnic_thread_data *t; + uint32_t thread_id, j; + + softnic = dev->data->dev_private; + thread_id = rte_lcore_id(); + t = &softnic->thread_data[thread_id]; + t->iter++; + + /* Data Plane */ + for (j = 0; j < t->n_pipelines; j++) + rte_pipeline_run(t->p[j]); + + /* Control Plane */ + if ((t->iter & 0xFLLU) == 0) { + uint64_t time = rte_get_tsc_cycles(); + uint64_t time_next_min = UINT64_MAX; + + if (time < t->time_next_min) + return 0; + + /* Pipeline message queues */ + for (j = 0; j < t->n_pipelines; j++) { + struct pipeline_data *p = + &t->pipeline_data[j]; + uint64_t time_next = p->time_next; + + if (time_next <= time) { + pipeline_msg_handle(p); + rte_pipeline_flush(p->p); + time_next = time + p->timer_period; + p->time_next = time_next; + } + + if (time_next < time_next_min) + time_next_min = time_next; + } + + /* Thread message queues */ + { + uint64_t time_next = t->time_next; + + if (time_next <= time) { + thread_msg_handle(t); + time_next = time + t->timer_period; + t->time_next = time_next; + } + + if (time_next < time_next_min) + time_next_min = time_next; + } + + t->time_next_min = time_next_min; + } + + return 0; +} + +int +rte_pmd_softnic_run(uint16_t port_id) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); +#endif + + return (int)rte_pmd_softnic_run_internal(dev); +} diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tm.c b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tm.c new file mode 100644 index 000000000..80a470c9e --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_eth_softnic_tm.c @@ -0,0 +1,3463 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include <stdint.h> +#include <stdlib.h> +#include <string.h> + +#include <rte_malloc.h> +#include <rte_string_fns.h> + +#include "rte_eth_softnic_internals.h" +#include "rte_eth_softnic.h" + +#define SUBPORT_TC_PERIOD 10 +#define PIPE_TC_PERIOD 40 + +int +softnic_tmgr_init(struct pmd_internals *p) +{ + TAILQ_INIT(&p->tmgr_port_list); + + return 0; +} + +void +softnic_tmgr_free(struct pmd_internals *p) +{ + for ( ; ; ) { + struct softnic_tmgr_port *tmgr_port; + + tmgr_port = TAILQ_FIRST(&p->tmgr_port_list); + if (tmgr_port == NULL) + break; + + TAILQ_REMOVE(&p->tmgr_port_list, tmgr_port, node); + rte_sched_port_free(tmgr_port->s); + free(tmgr_port); + } +} + +struct softnic_tmgr_port * +softnic_tmgr_port_find(struct pmd_internals *p, + const char *name) +{ + struct softnic_tmgr_port *tmgr_port; + + if (name == NULL) + return NULL; + + TAILQ_FOREACH(tmgr_port, &p->tmgr_port_list, node) + if (strcmp(tmgr_port->name, name) == 0) + return tmgr_port; + + return NULL; +} + +struct softnic_tmgr_port * +softnic_tmgr_port_create(struct pmd_internals *p, + const char *name) +{ + struct softnic_tmgr_port *tmgr_port; + struct tm_params *t = &p->soft.tm.params; + struct rte_sched_port *sched; + uint32_t n_subports, subport_id; + + /* Check input params */ + if (name == NULL || + softnic_tmgr_port_find(p, name)) + return NULL; + + /* + * Resource + */ + + /* Is hierarchy frozen? */ + if (p->soft.tm.hierarchy_frozen == 0) + return NULL; + + /* Port */ + sched = rte_sched_port_config(&t->port_params); + if (sched == NULL) + return NULL; + + /* Subport */ + n_subports = t->port_params.n_subports_per_port; + for (subport_id = 0; subport_id < n_subports; subport_id++) { + uint32_t n_pipes_per_subport = + t->subport_params[subport_id].n_pipes_per_subport_enabled; + uint32_t pipe_id; + int status; + + status = rte_sched_subport_config(sched, + subport_id, + &t->subport_params[subport_id]); + if (status) { + rte_sched_port_free(sched); + return NULL; + } + + /* Pipe */ + for (pipe_id = 0; pipe_id < n_pipes_per_subport; pipe_id++) { + int pos = subport_id * TM_MAX_PIPES_PER_SUBPORT + pipe_id; + int profile_id = t->pipe_to_profile[pos]; + + if (profile_id < 0) + continue; + + status = rte_sched_pipe_config(sched, + subport_id, + pipe_id, + profile_id); + if (status) { + rte_sched_port_free(sched); + return NULL; + } + } + } + + /* Node allocation */ + tmgr_port = calloc(1, sizeof(struct softnic_tmgr_port)); + if (tmgr_port == NULL) { + rte_sched_port_free(sched); + return NULL; + } + + /* Node fill in */ + strlcpy(tmgr_port->name, name, sizeof(tmgr_port->name)); + tmgr_port->s = sched; + + /* Node add to list */ + TAILQ_INSERT_TAIL(&p->tmgr_port_list, tmgr_port, node); + + return tmgr_port; +} + +static struct rte_sched_port * +SCHED(struct pmd_internals *p) +{ + struct softnic_tmgr_port *tmgr_port; + + tmgr_port = softnic_tmgr_port_find(p, "TMGR"); + if (tmgr_port == NULL) + return NULL; + + return tmgr_port->s; +} + +void +tm_hierarchy_init(struct pmd_internals *p) +{ + memset(&p->soft.tm, 0, sizeof(p->soft.tm)); + + /* Initialize shaper profile list */ + TAILQ_INIT(&p->soft.tm.h.shaper_profiles); + + /* Initialize shared shaper list */ + TAILQ_INIT(&p->soft.tm.h.shared_shapers); + + /* Initialize wred profile list */ + TAILQ_INIT(&p->soft.tm.h.wred_profiles); + + /* Initialize TM node list */ + TAILQ_INIT(&p->soft.tm.h.nodes); +} + +void +tm_hierarchy_free(struct pmd_internals *p) +{ + /* Remove all nodes*/ + for ( ; ; ) { + struct tm_node *tm_node; + + tm_node = TAILQ_FIRST(&p->soft.tm.h.nodes); + if (tm_node == NULL) + break; + + TAILQ_REMOVE(&p->soft.tm.h.nodes, tm_node, node); + free(tm_node); + } + + /* Remove all WRED profiles */ + for ( ; ; ) { + struct tm_wred_profile *wred_profile; + + wred_profile = TAILQ_FIRST(&p->soft.tm.h.wred_profiles); + if (wred_profile == NULL) + break; + + TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wred_profile, node); + free(wred_profile); + } + + /* Remove all shared shapers */ + for ( ; ; ) { + struct tm_shared_shaper *shared_shaper; + + shared_shaper = TAILQ_FIRST(&p->soft.tm.h.shared_shapers); + if (shared_shaper == NULL) + break; + + TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, shared_shaper, node); + free(shared_shaper); + } + + /* Remove all shaper profiles */ + for ( ; ; ) { + struct tm_shaper_profile *shaper_profile; + + shaper_profile = TAILQ_FIRST(&p->soft.tm.h.shaper_profiles); + if (shaper_profile == NULL) + break; + + TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, + shaper_profile, node); + free(shaper_profile); + } + + tm_hierarchy_init(p); +} + +static struct tm_shaper_profile * +tm_shaper_profile_search(struct rte_eth_dev *dev, uint32_t shaper_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles; + struct tm_shaper_profile *sp; + + TAILQ_FOREACH(sp, spl, node) + if (shaper_profile_id == sp->shaper_profile_id) + return sp; + + return NULL; +} + +static struct tm_shared_shaper * +tm_shared_shaper_search(struct rte_eth_dev *dev, uint32_t shared_shaper_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_shared_shaper_list *ssl = &p->soft.tm.h.shared_shapers; + struct tm_shared_shaper *ss; + + TAILQ_FOREACH(ss, ssl, node) + if (shared_shaper_id == ss->shared_shaper_id) + return ss; + + return NULL; +} + +static struct tm_wred_profile * +tm_wred_profile_search(struct rte_eth_dev *dev, uint32_t wred_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles; + struct tm_wred_profile *wp; + + TAILQ_FOREACH(wp, wpl, node) + if (wred_profile_id == wp->wred_profile_id) + return wp; + + return NULL; +} + +static struct tm_node * +tm_node_search(struct rte_eth_dev *dev, uint32_t node_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_node_list *nl = &p->soft.tm.h.nodes; + struct tm_node *n; + + TAILQ_FOREACH(n, nl, node) + if (n->node_id == node_id) + return n; + + return NULL; +} + +static struct tm_node * +tm_root_node_present(struct rte_eth_dev *dev) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_node_list *nl = &p->soft.tm.h.nodes; + struct tm_node *n; + + TAILQ_FOREACH(n, nl, node) + if (n->parent_node_id == RTE_TM_NODE_ID_NULL) + return n; + + return NULL; +} + +static uint32_t +tm_node_subport_id(struct rte_eth_dev *dev, struct tm_node *subport_node) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_node_list *nl = &p->soft.tm.h.nodes; + struct tm_node *ns; + uint32_t subport_id; + + subport_id = 0; + TAILQ_FOREACH(ns, nl, node) { + if (ns->level != TM_NODE_LEVEL_SUBPORT) + continue; + + if (ns->node_id == subport_node->node_id) + return subport_id; + + subport_id++; + } + + return UINT32_MAX; +} + +static uint32_t +tm_node_pipe_id(struct rte_eth_dev *dev, struct tm_node *pipe_node) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_node_list *nl = &p->soft.tm.h.nodes; + struct tm_node *np; + uint32_t pipe_id; + + pipe_id = 0; + TAILQ_FOREACH(np, nl, node) { + if (np->level != TM_NODE_LEVEL_PIPE || + np->parent_node_id != pipe_node->parent_node_id) + continue; + + if (np->node_id == pipe_node->node_id) + return pipe_id; + + pipe_id++; + } + + return UINT32_MAX; +} + +static uint32_t +tm_node_tc_id(struct rte_eth_dev *dev __rte_unused, struct tm_node *tc_node) +{ + return tc_node->priority; +} + +static uint32_t +tm_node_queue_id(struct rte_eth_dev *dev, struct tm_node *queue_node) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_node_list *nl = &p->soft.tm.h.nodes; + struct tm_node *nq; + uint32_t queue_id; + + queue_id = 0; + TAILQ_FOREACH(nq, nl, node) { + if (nq->level != TM_NODE_LEVEL_QUEUE || + nq->parent_node_id != queue_node->parent_node_id) + continue; + + if (nq->node_id == queue_node->node_id) + return queue_id; + + queue_id++; + } + + return UINT32_MAX; +} + +static uint32_t +tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level) +{ + struct pmd_internals *p = dev->data->dev_private; + uint32_t n_queues_max = p->params.tm.n_queues; + uint32_t n_tc_max = + (n_queues_max * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + / RTE_SCHED_QUEUES_PER_PIPE; + uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; + uint32_t n_subports_max = n_pipes_max; + uint32_t n_root_max = 1; + + switch (level) { + case TM_NODE_LEVEL_PORT: + return n_root_max; + case TM_NODE_LEVEL_SUBPORT: + return n_subports_max; + case TM_NODE_LEVEL_PIPE: + return n_pipes_max; + case TM_NODE_LEVEL_TC: + return n_tc_max; + case TM_NODE_LEVEL_QUEUE: + default: + return n_queues_max; + } +} + +/* Traffic manager node type get */ +static int +pmd_tm_node_type_get(struct rte_eth_dev *dev, + uint32_t node_id, + int *is_leaf, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + + if (is_leaf == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + + if (node_id == RTE_TM_NODE_ID_NULL || + (tm_node_search(dev, node_id) == NULL)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + *is_leaf = node_id < p->params.tm.n_queues; + + return 0; +} + +#ifdef RTE_SCHED_RED +#define WRED_SUPPORTED 1 +#else +#define WRED_SUPPORTED 0 +#endif + +#define STATS_MASK_DEFAULT \ + (RTE_TM_STATS_N_PKTS | \ + RTE_TM_STATS_N_BYTES | \ + RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \ + RTE_TM_STATS_N_BYTES_GREEN_DROPPED) + +#define STATS_MASK_QUEUE \ + (STATS_MASK_DEFAULT | \ + RTE_TM_STATS_N_PKTS_QUEUED) + +static const struct rte_tm_capabilities tm_cap = { + .n_nodes_max = UINT32_MAX, + .n_levels_max = TM_NODE_LEVEL_MAX, + + .non_leaf_nodes_identical = 0, + .leaf_nodes_identical = 1, + + .shaper_n_max = UINT32_MAX, + .shaper_private_n_max = UINT32_MAX, + .shaper_private_dual_rate_n_max = 0, + .shaper_private_rate_min = 1, + .shaper_private_rate_max = UINT32_MAX, + + .shaper_shared_n_max = UINT32_MAX, + .shaper_shared_n_nodes_per_shaper_max = UINT32_MAX, + .shaper_shared_n_shapers_per_node_max = 1, + .shaper_shared_dual_rate_n_max = 0, + .shaper_shared_rate_min = 1, + .shaper_shared_rate_max = UINT32_MAX, + + .shaper_pkt_length_adjust_min = RTE_TM_ETH_FRAMING_OVERHEAD_FCS, + .shaper_pkt_length_adjust_max = RTE_TM_ETH_FRAMING_OVERHEAD_FCS, + + .sched_n_children_max = UINT32_MAX, + .sched_sp_n_priorities_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + .sched_wfq_n_children_per_group_max = UINT32_MAX, + .sched_wfq_n_groups_max = 1, + .sched_wfq_weight_max = UINT32_MAX, + + .cman_wred_packet_mode_supported = WRED_SUPPORTED, + .cman_wred_byte_mode_supported = 0, + .cman_head_drop_supported = 0, + .cman_wred_context_n_max = 0, + .cman_wred_context_private_n_max = 0, + .cman_wred_context_shared_n_max = 0, + .cman_wred_context_shared_n_nodes_per_context_max = 0, + .cman_wred_context_shared_n_contexts_per_node_max = 0, + + .mark_vlan_dei_supported = {0, 0, 0}, + .mark_ip_ecn_tcp_supported = {0, 0, 0}, + .mark_ip_ecn_sctp_supported = {0, 0, 0}, + .mark_ip_dscp_supported = {0, 0, 0}, + + .dynamic_update_mask = 0, + + .stats_mask = STATS_MASK_QUEUE, +}; + +/* Traffic manager capabilities get */ +static int +pmd_tm_capabilities_get(struct rte_eth_dev *dev __rte_unused, + struct rte_tm_capabilities *cap, + struct rte_tm_error *error) +{ + if (cap == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_CAPABILITIES, + NULL, + rte_strerror(EINVAL)); + + memcpy(cap, &tm_cap, sizeof(*cap)); + + cap->n_nodes_max = tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) + + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) + + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) + + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC) + + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE); + + cap->shaper_private_n_max = + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PORT) + + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT) + + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE) + + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_TC); + + cap->shaper_shared_n_max = RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE * + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_SUBPORT); + + cap->shaper_n_max = cap->shaper_private_n_max + + cap->shaper_shared_n_max; + + cap->shaper_shared_n_nodes_per_shaper_max = + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE); + + cap->sched_n_children_max = RTE_MAX( + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_PIPE), + (uint32_t)RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE); + + cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max; + + if (WRED_SUPPORTED) + cap->cman_wred_context_private_n_max = + tm_level_get_max_nodes(dev, TM_NODE_LEVEL_QUEUE); + + cap->cman_wred_context_n_max = cap->cman_wred_context_private_n_max + + cap->cman_wred_context_shared_n_max; + + return 0; +} + +static const struct rte_tm_level_capabilities tm_level_cap[] = { + [TM_NODE_LEVEL_PORT] = { + .n_nodes_max = 1, + .n_nodes_nonleaf_max = 1, + .n_nodes_leaf_max = 0, + .non_leaf_nodes_identical = 1, + .leaf_nodes_identical = 0, + + {.nonleaf = { + .shaper_private_supported = 1, + .shaper_private_dual_rate_supported = 0, + .shaper_private_rate_min = 1, + .shaper_private_rate_max = UINT32_MAX, + .shaper_shared_n_max = 0, + + .sched_n_children_max = UINT32_MAX, + .sched_sp_n_priorities_max = 1, + .sched_wfq_n_children_per_group_max = UINT32_MAX, + .sched_wfq_n_groups_max = 1, + .sched_wfq_weight_max = 1, + + .stats_mask = STATS_MASK_DEFAULT, + } }, + }, + + [TM_NODE_LEVEL_SUBPORT] = { + .n_nodes_max = UINT32_MAX, + .n_nodes_nonleaf_max = UINT32_MAX, + .n_nodes_leaf_max = 0, + .non_leaf_nodes_identical = 1, + .leaf_nodes_identical = 0, + + {.nonleaf = { + .shaper_private_supported = 1, + .shaper_private_dual_rate_supported = 0, + .shaper_private_rate_min = 1, + .shaper_private_rate_max = UINT32_MAX, + .shaper_shared_n_max = 0, + + .sched_n_children_max = UINT32_MAX, + .sched_sp_n_priorities_max = 1, + .sched_wfq_n_children_per_group_max = UINT32_MAX, + .sched_wfq_n_groups_max = 1, +#ifdef RTE_SCHED_SUBPORT_TC_OV + .sched_wfq_weight_max = UINT32_MAX, +#else + .sched_wfq_weight_max = 1, +#endif + .stats_mask = STATS_MASK_DEFAULT, + } }, + }, + + [TM_NODE_LEVEL_PIPE] = { + .n_nodes_max = UINT32_MAX, + .n_nodes_nonleaf_max = UINT32_MAX, + .n_nodes_leaf_max = 0, + .non_leaf_nodes_identical = 1, + .leaf_nodes_identical = 0, + + {.nonleaf = { + .shaper_private_supported = 1, + .shaper_private_dual_rate_supported = 0, + .shaper_private_rate_min = 1, + .shaper_private_rate_max = UINT32_MAX, + .shaper_shared_n_max = 0, + + .sched_n_children_max = + RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + .sched_sp_n_priorities_max = + RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + .sched_wfq_n_children_per_group_max = 1, + .sched_wfq_n_groups_max = 0, + .sched_wfq_weight_max = 1, + + .stats_mask = STATS_MASK_DEFAULT, + } }, + }, + + [TM_NODE_LEVEL_TC] = { + .n_nodes_max = UINT32_MAX, + .n_nodes_nonleaf_max = UINT32_MAX, + .n_nodes_leaf_max = 0, + .non_leaf_nodes_identical = 1, + .leaf_nodes_identical = 0, + + {.nonleaf = { + .shaper_private_supported = 1, + .shaper_private_dual_rate_supported = 0, + .shaper_private_rate_min = 1, + .shaper_private_rate_max = UINT32_MAX, + .shaper_shared_n_max = 1, + + .sched_n_children_max = + RTE_SCHED_BE_QUEUES_PER_PIPE, + .sched_sp_n_priorities_max = 1, + .sched_wfq_n_children_per_group_max = + RTE_SCHED_BE_QUEUES_PER_PIPE, + .sched_wfq_n_groups_max = 1, + .sched_wfq_weight_max = UINT32_MAX, + + .stats_mask = STATS_MASK_DEFAULT, + } }, + }, + + [TM_NODE_LEVEL_QUEUE] = { + .n_nodes_max = UINT32_MAX, + .n_nodes_nonleaf_max = 0, + .n_nodes_leaf_max = UINT32_MAX, + .non_leaf_nodes_identical = 0, + .leaf_nodes_identical = 1, + + {.leaf = { + .shaper_private_supported = 0, + .shaper_private_dual_rate_supported = 0, + .shaper_private_rate_min = 0, + .shaper_private_rate_max = 0, + .shaper_shared_n_max = 0, + + .cman_head_drop_supported = 0, + .cman_wred_packet_mode_supported = WRED_SUPPORTED, + .cman_wred_byte_mode_supported = 0, + .cman_wred_context_private_supported = WRED_SUPPORTED, + .cman_wred_context_shared_n_max = 0, + + .stats_mask = STATS_MASK_QUEUE, + } }, + }, +}; + +/* Traffic manager level capabilities get */ +static int +pmd_tm_level_capabilities_get(struct rte_eth_dev *dev __rte_unused, + uint32_t level_id, + struct rte_tm_level_capabilities *cap, + struct rte_tm_error *error) +{ + if (cap == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_CAPABILITIES, + NULL, + rte_strerror(EINVAL)); + + if (level_id >= TM_NODE_LEVEL_MAX) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + + memcpy(cap, &tm_level_cap[level_id], sizeof(*cap)); + + switch (level_id) { + case TM_NODE_LEVEL_PORT: + cap->nonleaf.sched_n_children_max = + tm_level_get_max_nodes(dev, + TM_NODE_LEVEL_SUBPORT); + cap->nonleaf.sched_wfq_n_children_per_group_max = + cap->nonleaf.sched_n_children_max; + break; + + case TM_NODE_LEVEL_SUBPORT: + cap->n_nodes_max = tm_level_get_max_nodes(dev, + TM_NODE_LEVEL_SUBPORT); + cap->n_nodes_nonleaf_max = cap->n_nodes_max; + cap->nonleaf.sched_n_children_max = + tm_level_get_max_nodes(dev, + TM_NODE_LEVEL_PIPE); + cap->nonleaf.sched_wfq_n_children_per_group_max = + cap->nonleaf.sched_n_children_max; + break; + + case TM_NODE_LEVEL_PIPE: + cap->n_nodes_max = tm_level_get_max_nodes(dev, + TM_NODE_LEVEL_PIPE); + cap->n_nodes_nonleaf_max = cap->n_nodes_max; + break; + + case TM_NODE_LEVEL_TC: + cap->n_nodes_max = tm_level_get_max_nodes(dev, + TM_NODE_LEVEL_TC); + cap->n_nodes_nonleaf_max = cap->n_nodes_max; + break; + + case TM_NODE_LEVEL_QUEUE: + default: + cap->n_nodes_max = tm_level_get_max_nodes(dev, + TM_NODE_LEVEL_QUEUE); + cap->n_nodes_leaf_max = cap->n_nodes_max; + break; + } + + return 0; +} + +static const struct rte_tm_node_capabilities tm_node_cap[] = { + [TM_NODE_LEVEL_PORT] = { + .shaper_private_supported = 1, + .shaper_private_dual_rate_supported = 0, + .shaper_private_rate_min = 1, + .shaper_private_rate_max = UINT32_MAX, + .shaper_shared_n_max = 0, + + {.nonleaf = { + .sched_n_children_max = UINT32_MAX, + .sched_sp_n_priorities_max = 1, + .sched_wfq_n_children_per_group_max = UINT32_MAX, + .sched_wfq_n_groups_max = 1, + .sched_wfq_weight_max = 1, + } }, + + .stats_mask = STATS_MASK_DEFAULT, + }, + + [TM_NODE_LEVEL_SUBPORT] = { + .shaper_private_supported = 1, + .shaper_private_dual_rate_supported = 0, + .shaper_private_rate_min = 1, + .shaper_private_rate_max = UINT32_MAX, + .shaper_shared_n_max = 0, + + {.nonleaf = { + .sched_n_children_max = UINT32_MAX, + .sched_sp_n_priorities_max = 1, + .sched_wfq_n_children_per_group_max = UINT32_MAX, + .sched_wfq_n_groups_max = 1, + .sched_wfq_weight_max = UINT32_MAX, + } }, + + .stats_mask = STATS_MASK_DEFAULT, + }, + + [TM_NODE_LEVEL_PIPE] = { + .shaper_private_supported = 1, + .shaper_private_dual_rate_supported = 0, + .shaper_private_rate_min = 1, + .shaper_private_rate_max = UINT32_MAX, + .shaper_shared_n_max = 0, + + {.nonleaf = { + .sched_n_children_max = + RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + .sched_sp_n_priorities_max = + RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + .sched_wfq_n_children_per_group_max = 1, + .sched_wfq_n_groups_max = 0, + .sched_wfq_weight_max = 1, + } }, + + .stats_mask = STATS_MASK_DEFAULT, + }, + + [TM_NODE_LEVEL_TC] = { + .shaper_private_supported = 1, + .shaper_private_dual_rate_supported = 0, + .shaper_private_rate_min = 1, + .shaper_private_rate_max = UINT32_MAX, + .shaper_shared_n_max = 1, + + {.nonleaf = { + .sched_n_children_max = + RTE_SCHED_BE_QUEUES_PER_PIPE, + .sched_sp_n_priorities_max = 1, + .sched_wfq_n_children_per_group_max = + RTE_SCHED_BE_QUEUES_PER_PIPE, + .sched_wfq_n_groups_max = 1, + .sched_wfq_weight_max = UINT32_MAX, + } }, + + .stats_mask = STATS_MASK_DEFAULT, + }, + + [TM_NODE_LEVEL_QUEUE] = { + .shaper_private_supported = 0, + .shaper_private_dual_rate_supported = 0, + .shaper_private_rate_min = 0, + .shaper_private_rate_max = 0, + .shaper_shared_n_max = 0, + + + {.leaf = { + .cman_head_drop_supported = 0, + .cman_wred_packet_mode_supported = WRED_SUPPORTED, + .cman_wred_byte_mode_supported = 0, + .cman_wred_context_private_supported = WRED_SUPPORTED, + .cman_wred_context_shared_n_max = 0, + } }, + + .stats_mask = STATS_MASK_QUEUE, + }, +}; + +/* Traffic manager node capabilities get */ +static int +pmd_tm_node_capabilities_get(struct rte_eth_dev *dev __rte_unused, + uint32_t node_id, + struct rte_tm_node_capabilities *cap, + struct rte_tm_error *error) +{ + struct tm_node *tm_node; + + if (cap == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_CAPABILITIES, + NULL, + rte_strerror(EINVAL)); + + tm_node = tm_node_search(dev, node_id); + if (tm_node == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + memcpy(cap, &tm_node_cap[tm_node->level], sizeof(*cap)); + + switch (tm_node->level) { + case TM_NODE_LEVEL_PORT: + cap->nonleaf.sched_n_children_max = + tm_level_get_max_nodes(dev, + TM_NODE_LEVEL_SUBPORT); + cap->nonleaf.sched_wfq_n_children_per_group_max = + cap->nonleaf.sched_n_children_max; + break; + + case TM_NODE_LEVEL_SUBPORT: + cap->nonleaf.sched_n_children_max = + tm_level_get_max_nodes(dev, + TM_NODE_LEVEL_PIPE); + cap->nonleaf.sched_wfq_n_children_per_group_max = + cap->nonleaf.sched_n_children_max; + break; + + case TM_NODE_LEVEL_PIPE: + case TM_NODE_LEVEL_TC: + case TM_NODE_LEVEL_QUEUE: + default: + break; + } + + return 0; +} + +static int +shaper_profile_check(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + struct tm_shaper_profile *sp; + + /* Shaper profile ID must not be NONE. */ + if (shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Shaper profile must not exist. */ + sp = tm_shaper_profile_search(dev, shaper_profile_id); + if (sp) + return -rte_tm_error_set(error, + EEXIST, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EEXIST)); + + /* Profile must not be NULL. */ + if (profile == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE, + NULL, + rte_strerror(EINVAL)); + + /* Peak rate: non-zero, 32-bit */ + if (profile->peak.rate == 0 || + profile->peak.rate >= UINT32_MAX) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE, + NULL, + rte_strerror(EINVAL)); + + /* Peak size: non-zero, 32-bit */ + if (profile->peak.size == 0 || + profile->peak.size >= UINT32_MAX) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE, + NULL, + rte_strerror(EINVAL)); + + /* Dual-rate profiles are not supported. */ + if (profile->committed.rate != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE, + NULL, + rte_strerror(EINVAL)); + + /* Packet length adjust: 24 bytes */ + if (profile->pkt_length_adjust != RTE_TM_ETH_FRAMING_OVERHEAD_FCS) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN, + NULL, + rte_strerror(EINVAL)); + + return 0; +} + +/* Traffic manager shaper profile add */ +static int +pmd_tm_shaper_profile_add(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_shaper_params *profile, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_shaper_profile_list *spl = &p->soft.tm.h.shaper_profiles; + struct tm_shaper_profile *sp; + int status; + + /* Check input params */ + status = shaper_profile_check(dev, shaper_profile_id, profile, error); + if (status) + return status; + + /* Memory allocation */ + sp = calloc(1, sizeof(struct tm_shaper_profile)); + if (sp == NULL) + return -rte_tm_error_set(error, + ENOMEM, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(ENOMEM)); + + /* Fill in */ + sp->shaper_profile_id = shaper_profile_id; + memcpy(&sp->params, profile, sizeof(sp->params)); + + /* Add to list */ + TAILQ_INSERT_TAIL(spl, sp, node); + p->soft.tm.h.n_shaper_profiles++; + + return 0; +} + +/* Traffic manager shaper profile delete */ +static int +pmd_tm_shaper_profile_delete(struct rte_eth_dev *dev, + uint32_t shaper_profile_id, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_shaper_profile *sp; + + /* Check existing */ + sp = tm_shaper_profile_search(dev, shaper_profile_id); + if (sp == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Check unused */ + if (sp->n_users) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EBUSY)); + + /* Remove from list */ + TAILQ_REMOVE(&p->soft.tm.h.shaper_profiles, sp, node); + p->soft.tm.h.n_shaper_profiles--; + free(sp); + + return 0; +} + +static struct tm_node * +tm_shared_shaper_get_tc(struct rte_eth_dev *dev, + struct tm_shared_shaper *ss) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_node_list *nl = &p->soft.tm.h.nodes; + struct tm_node *n; + + /* Subport: each TC uses shared shaper */ + TAILQ_FOREACH(n, nl, node) { + if (n->level != TM_NODE_LEVEL_TC || + n->params.n_shared_shapers == 0 || + n->params.shared_shaper_id[0] != ss->shared_shaper_id) + continue; + + return n; + } + + return NULL; +} + +static int +update_subport_tc_rate(struct rte_eth_dev *dev, + struct tm_node *nt, + struct tm_shared_shaper *ss, + struct tm_shaper_profile *sp_new) +{ + struct pmd_internals *p = dev->data->dev_private; + uint32_t tc_id = tm_node_tc_id(dev, nt); + + struct tm_node *np = nt->parent_node; + + struct tm_node *ns = np->parent_node; + uint32_t subport_id = tm_node_subport_id(dev, ns); + + struct rte_sched_subport_params subport_params; + + struct tm_shaper_profile *sp_old = tm_shaper_profile_search(dev, + ss->shaper_profile_id); + + /* Derive new subport configuration. */ + memcpy(&subport_params, + &p->soft.tm.params.subport_params[subport_id], + sizeof(subport_params)); + subport_params.tc_rate[tc_id] = sp_new->params.peak.rate; + + /* Update the subport configuration. */ + if (rte_sched_subport_config(SCHED(p), + subport_id, &subport_params)) + return -1; + + /* Commit changes. */ + sp_old->n_users--; + + ss->shaper_profile_id = sp_new->shaper_profile_id; + sp_new->n_users++; + + memcpy(&p->soft.tm.params.subport_params[subport_id], + &subport_params, + sizeof(subport_params)); + + return 0; +} + +/* Traffic manager shared shaper add/update */ +static int +pmd_tm_shared_shaper_add_update(struct rte_eth_dev *dev, + uint32_t shared_shaper_id, + uint32_t shaper_profile_id, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_shared_shaper *ss; + struct tm_shaper_profile *sp; + struct tm_node *nt; + + /* Shaper profile must be valid. */ + sp = tm_shaper_profile_search(dev, shaper_profile_id); + if (sp == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /** + * Add new shared shaper + */ + ss = tm_shared_shaper_search(dev, shared_shaper_id); + if (ss == NULL) { + struct tm_shared_shaper_list *ssl = + &p->soft.tm.h.shared_shapers; + + /* Hierarchy must not be frozen */ + if (p->soft.tm.hierarchy_frozen) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EBUSY)); + + /* Memory allocation */ + ss = calloc(1, sizeof(struct tm_shared_shaper)); + if (ss == NULL) + return -rte_tm_error_set(error, + ENOMEM, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(ENOMEM)); + + /* Fill in */ + ss->shared_shaper_id = shared_shaper_id; + ss->shaper_profile_id = shaper_profile_id; + + /* Add to list */ + TAILQ_INSERT_TAIL(ssl, ss, node); + p->soft.tm.h.n_shared_shapers++; + + return 0; + } + + /** + * Update existing shared shaper + */ + /* Hierarchy must be frozen (run-time update) */ + if (p->soft.tm.hierarchy_frozen == 0) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EBUSY)); + + + /* Propagate change. */ + nt = tm_shared_shaper_get_tc(dev, ss); + if (update_subport_tc_rate(dev, nt, ss, sp)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + + return 0; +} + +/* Traffic manager shared shaper delete */ +static int +pmd_tm_shared_shaper_delete(struct rte_eth_dev *dev, + uint32_t shared_shaper_id, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_shared_shaper *ss; + + /* Check existing */ + ss = tm_shared_shaper_search(dev, shared_shaper_id); + if (ss == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID, + NULL, + rte_strerror(EINVAL)); + + /* Check unused */ + if (ss->n_users) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID, + NULL, + rte_strerror(EBUSY)); + + /* Remove from list */ + TAILQ_REMOVE(&p->soft.tm.h.shared_shapers, ss, node); + p->soft.tm.h.n_shared_shapers--; + free(ss); + + return 0; +} + +static int +wred_profile_check(struct rte_eth_dev *dev, + uint32_t wred_profile_id, + struct rte_tm_wred_params *profile, + struct rte_tm_error *error) +{ + struct tm_wred_profile *wp; + enum rte_color color; + + /* WRED profile ID must not be NONE. */ + if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_WRED_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* WRED profile must not exist. */ + wp = tm_wred_profile_search(dev, wred_profile_id); + if (wp) + return -rte_tm_error_set(error, + EEXIST, + RTE_TM_ERROR_TYPE_WRED_PROFILE_ID, + NULL, + rte_strerror(EEXIST)); + + /* Profile must not be NULL. */ + if (profile == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_WRED_PROFILE, + NULL, + rte_strerror(EINVAL)); + + /* WRED profile should be in packet mode */ + if (profile->packet_mode == 0) + return -rte_tm_error_set(error, + ENOTSUP, + RTE_TM_ERROR_TYPE_WRED_PROFILE, + NULL, + rte_strerror(ENOTSUP)); + + /* min_th <= max_th, max_th > 0 */ + for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) { + uint32_t min_th = profile->red_params[color].min_th; + uint32_t max_th = profile->red_params[color].max_th; + + if (min_th > max_th || + max_th == 0 || + min_th > UINT16_MAX || + max_th > UINT16_MAX) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_WRED_PROFILE, + NULL, + rte_strerror(EINVAL)); + } + + return 0; +} + +/* Traffic manager WRED profile add */ +static int +pmd_tm_wred_profile_add(struct rte_eth_dev *dev, + uint32_t wred_profile_id, + struct rte_tm_wred_params *profile, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_wred_profile_list *wpl = &p->soft.tm.h.wred_profiles; + struct tm_wred_profile *wp; + int status; + + /* Check input params */ + status = wred_profile_check(dev, wred_profile_id, profile, error); + if (status) + return status; + + /* Memory allocation */ + wp = calloc(1, sizeof(struct tm_wred_profile)); + if (wp == NULL) + return -rte_tm_error_set(error, + ENOMEM, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(ENOMEM)); + + /* Fill in */ + wp->wred_profile_id = wred_profile_id; + memcpy(&wp->params, profile, sizeof(wp->params)); + + /* Add to list */ + TAILQ_INSERT_TAIL(wpl, wp, node); + p->soft.tm.h.n_wred_profiles++; + + return 0; +} + +/* Traffic manager WRED profile delete */ +static int +pmd_tm_wred_profile_delete(struct rte_eth_dev *dev, + uint32_t wred_profile_id, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_wred_profile *wp; + + /* Check existing */ + wp = tm_wred_profile_search(dev, wred_profile_id); + if (wp == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_WRED_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Check unused */ + if (wp->n_users) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_WRED_PROFILE_ID, + NULL, + rte_strerror(EBUSY)); + + /* Remove from list */ + TAILQ_REMOVE(&p->soft.tm.h.wred_profiles, wp, node); + p->soft.tm.h.n_wred_profiles--; + free(wp); + + return 0; +} + +static int +node_add_check_port(struct rte_eth_dev *dev, + uint32_t node_id, + uint32_t parent_node_id __rte_unused, + uint32_t priority, + uint32_t weight, + uint32_t level_id __rte_unused, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_shaper_profile *sp = tm_shaper_profile_search(dev, + params->shaper_profile_id); + + /* node type: non-leaf */ + if (node_id < p->params.tm.n_queues) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Priority must be 0 */ + if (priority != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PRIORITY, + NULL, + rte_strerror(EINVAL)); + + /* Weight must be 1 */ + if (weight != 1) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, + rte_strerror(EINVAL)); + + /* Shaper must be valid */ + if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE || + sp == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* No shared shapers */ + if (params->n_shared_shapers != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, + NULL, + rte_strerror(EINVAL)); + + /* Number of SP priorities must be 1 */ + if (params->nonleaf.n_sp_priorities != 1) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES, + NULL, + rte_strerror(EINVAL)); + + /* Stats */ + if (params->stats_mask & ~STATS_MASK_DEFAULT) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, + NULL, + rte_strerror(EINVAL)); + + return 0; +} + +static int +node_add_check_subport(struct rte_eth_dev *dev, + uint32_t node_id, + uint32_t parent_node_id __rte_unused, + uint32_t priority, + uint32_t weight, + uint32_t level_id __rte_unused, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + + /* node type: non-leaf */ + if (node_id < p->params.tm.n_queues) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Priority must be 0 */ + if (priority != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PRIORITY, + NULL, + rte_strerror(EINVAL)); + + /* Weight must be 1 */ + if (weight != 1) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, + rte_strerror(EINVAL)); + + /* Shaper must be valid */ + if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE || + (!tm_shaper_profile_search(dev, params->shaper_profile_id))) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* No shared shapers */ + if (params->n_shared_shapers != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, + NULL, + rte_strerror(EINVAL)); + + /* Number of SP priorities must be 1 */ + if (params->nonleaf.n_sp_priorities != 1) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES, + NULL, + rte_strerror(EINVAL)); + + /* Stats */ + if (params->stats_mask & ~STATS_MASK_DEFAULT) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, + NULL, + rte_strerror(EINVAL)); + + return 0; +} + +static int +node_add_check_pipe(struct rte_eth_dev *dev, + uint32_t node_id, + uint32_t parent_node_id __rte_unused, + uint32_t priority, + uint32_t weight __rte_unused, + uint32_t level_id __rte_unused, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + + /* node type: non-leaf */ + if (node_id < p->params.tm.n_queues) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Priority must be 0 */ + if (priority != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PRIORITY, + NULL, + rte_strerror(EINVAL)); + + /* Shaper must be valid */ + if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE || + (!tm_shaper_profile_search(dev, params->shaper_profile_id))) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* No shared shapers */ + if (params->n_shared_shapers != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, + NULL, + rte_strerror(EINVAL)); + + /* Number of SP priorities must be 4 */ + if (params->nonleaf.n_sp_priorities != + RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES, + NULL, + rte_strerror(EINVAL)); + + /* WFQ mode must be byte mode */ + if (params->nonleaf.wfq_weight_mode != NULL && + params->nonleaf.wfq_weight_mode[0] != 0 && + params->nonleaf.wfq_weight_mode[1] != 0 && + params->nonleaf.wfq_weight_mode[2] != 0 && + params->nonleaf.wfq_weight_mode[3] != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE, + NULL, + rte_strerror(EINVAL)); + + /* Stats */ + if (params->stats_mask & ~STATS_MASK_DEFAULT) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, + NULL, + rte_strerror(EINVAL)); + + return 0; +} + +static int +node_add_check_tc(struct rte_eth_dev *dev, + uint32_t node_id, + uint32_t parent_node_id __rte_unused, + uint32_t priority __rte_unused, + uint32_t weight, + uint32_t level_id __rte_unused, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + + /* node type: non-leaf */ + if (node_id < p->params.tm.n_queues) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Weight must be 1 */ + if (weight != 1) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, + rte_strerror(EINVAL)); + + /* Shaper must be valid */ + if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE || + (!tm_shaper_profile_search(dev, params->shaper_profile_id))) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Single valid shared shaper */ + if (params->n_shared_shapers > 1) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, + NULL, + rte_strerror(EINVAL)); + + if (params->n_shared_shapers == 1 && + (params->shared_shaper_id == NULL || + (!tm_shared_shaper_search(dev, params->shared_shaper_id[0])))) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID, + NULL, + rte_strerror(EINVAL)); + + /* Number of priorities must be 1 */ + if (params->nonleaf.n_sp_priorities != 1) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES, + NULL, + rte_strerror(EINVAL)); + + /* Stats */ + if (params->stats_mask & ~STATS_MASK_DEFAULT) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, + NULL, + rte_strerror(EINVAL)); + + return 0; +} + +static int +node_add_check_queue(struct rte_eth_dev *dev, + uint32_t node_id, + uint32_t parent_node_id __rte_unused, + uint32_t priority, + uint32_t weight __rte_unused, + uint32_t level_id __rte_unused, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + + /* node type: leaf */ + if (node_id >= p->params.tm.n_queues) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Priority must be 0 */ + if (priority != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PRIORITY, + NULL, + rte_strerror(EINVAL)); + + /* No shaper */ + if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* No shared shapers */ + if (params->n_shared_shapers != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS, + NULL, + rte_strerror(EINVAL)); + + /* Congestion management must not be head drop */ + if (params->leaf.cman == RTE_TM_CMAN_HEAD_DROP) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN, + NULL, + rte_strerror(EINVAL)); + + /* Congestion management set to WRED */ + if (params->leaf.cman == RTE_TM_CMAN_WRED) { + uint32_t wred_profile_id = params->leaf.wred.wred_profile_id; + struct tm_wred_profile *wp = tm_wred_profile_search(dev, + wred_profile_id); + + /* WRED profile (for private WRED context) must be valid */ + if (wred_profile_id == RTE_TM_WRED_PROFILE_ID_NONE || + wp == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID, + NULL, + rte_strerror(EINVAL)); + + /* No shared WRED contexts */ + if (params->leaf.wred.n_shared_wred_contexts != 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS, + NULL, + rte_strerror(EINVAL)); + } + + /* Stats */ + if (params->stats_mask & ~STATS_MASK_QUEUE) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS, + NULL, + rte_strerror(EINVAL)); + + return 0; +} + +static int +node_add_check(struct rte_eth_dev *dev, + uint32_t node_id, + uint32_t parent_node_id, + uint32_t priority, + uint32_t weight, + uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct tm_node *pn; + uint32_t level; + int status; + + /* node_id, parent_node_id: + * -node_id must not be RTE_TM_NODE_ID_NULL + * -node_id must not be in use + * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL): + * -root node must not exist + * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL): + * -parent_node_id must be valid + */ + if (node_id == RTE_TM_NODE_ID_NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + if (tm_node_search(dev, node_id)) + return -rte_tm_error_set(error, + EEXIST, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EEXIST)); + + if (parent_node_id == RTE_TM_NODE_ID_NULL) { + pn = NULL; + if (tm_root_node_present(dev)) + return -rte_tm_error_set(error, + EEXIST, + RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, + NULL, + rte_strerror(EEXIST)); + } else { + pn = tm_node_search(dev, parent_node_id); + if (pn == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, + NULL, + rte_strerror(EINVAL)); + } + + /* priority: must be 0 .. 3 */ + if (priority >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PRIORITY, + NULL, + rte_strerror(EINVAL)); + + /* weight: must be 1 .. 255 */ + if (weight == 0 || weight >= UINT8_MAX) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, + rte_strerror(EINVAL)); + + /* level_id: if valid, then + * -root node add (parent_node_id is RTE_TM_NODE_ID_NULL): + * -level_id must be zero + * -non-root node add (parent_node_id is not RTE_TM_NODE_ID_NULL): + * -level_id must be parent level ID plus one + */ + level = (pn == NULL) ? 0 : pn->level + 1; + if (level_id != RTE_TM_NODE_LEVEL_ID_ANY && level_id != level) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + + /* params: must not be NULL */ + if (params == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARAMS, + NULL, + rte_strerror(EINVAL)); + + /* params: per level checks */ + switch (level) { + case TM_NODE_LEVEL_PORT: + status = node_add_check_port(dev, node_id, + parent_node_id, priority, weight, level_id, + params, error); + if (status) + return status; + break; + + case TM_NODE_LEVEL_SUBPORT: + status = node_add_check_subport(dev, node_id, + parent_node_id, priority, weight, level_id, + params, error); + if (status) + return status; + break; + + case TM_NODE_LEVEL_PIPE: + status = node_add_check_pipe(dev, node_id, + parent_node_id, priority, weight, level_id, + params, error); + if (status) + return status; + break; + + case TM_NODE_LEVEL_TC: + status = node_add_check_tc(dev, node_id, + parent_node_id, priority, weight, level_id, + params, error); + if (status) + return status; + break; + + case TM_NODE_LEVEL_QUEUE: + status = node_add_check_queue(dev, node_id, + parent_node_id, priority, weight, level_id, + params, error); + if (status) + return status; + break; + + default: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + } + + return 0; +} + +/* Traffic manager node add */ +static int +pmd_tm_node_add(struct rte_eth_dev *dev, + uint32_t node_id, + uint32_t parent_node_id, + uint32_t priority, + uint32_t weight, + uint32_t level_id, + struct rte_tm_node_params *params, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_node_list *nl = &p->soft.tm.h.nodes; + struct tm_node *n; + uint32_t i; + int status; + + /* Checks */ + if (p->soft.tm.hierarchy_frozen) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EBUSY)); + + status = node_add_check(dev, node_id, parent_node_id, priority, weight, + level_id, params, error); + if (status) + return status; + + /* Memory allocation */ + n = calloc(1, sizeof(struct tm_node)); + if (n == NULL) + return -rte_tm_error_set(error, + ENOMEM, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(ENOMEM)); + + /* Fill in */ + n->node_id = node_id; + n->parent_node_id = parent_node_id; + n->priority = priority; + n->weight = weight; + + if (parent_node_id != RTE_TM_NODE_ID_NULL) { + n->parent_node = tm_node_search(dev, parent_node_id); + n->level = n->parent_node->level + 1; + } + + if (params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) + n->shaper_profile = tm_shaper_profile_search(dev, + params->shaper_profile_id); + + if (n->level == TM_NODE_LEVEL_QUEUE && + params->leaf.cman == RTE_TM_CMAN_WRED) + n->wred_profile = tm_wred_profile_search(dev, + params->leaf.wred.wred_profile_id); + + memcpy(&n->params, params, sizeof(n->params)); + + /* Add to list */ + TAILQ_INSERT_TAIL(nl, n, node); + p->soft.tm.h.n_nodes++; + + /* Update dependencies */ + if (n->parent_node) + n->parent_node->n_children++; + + if (n->shaper_profile) + n->shaper_profile->n_users++; + + for (i = 0; i < params->n_shared_shapers; i++) { + struct tm_shared_shaper *ss; + + ss = tm_shared_shaper_search(dev, params->shared_shaper_id[i]); + ss->n_users++; + } + + if (n->wred_profile) + n->wred_profile->n_users++; + + p->soft.tm.h.n_tm_nodes[n->level]++; + + return 0; +} + +/* Traffic manager node delete */ +static int +pmd_tm_node_delete(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_node *n; + uint32_t i; + + /* Check hierarchy changes are currently allowed */ + if (p->soft.tm.hierarchy_frozen) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EBUSY)); + + /* Check existing */ + n = tm_node_search(dev, node_id); + if (n == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Check unused */ + if (n->n_children) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EBUSY)); + + /* Update dependencies */ + p->soft.tm.h.n_tm_nodes[n->level]--; + + if (n->wred_profile) + n->wred_profile->n_users--; + + for (i = 0; i < n->params.n_shared_shapers; i++) { + struct tm_shared_shaper *ss; + + ss = tm_shared_shaper_search(dev, + n->params.shared_shaper_id[i]); + ss->n_users--; + } + + if (n->shaper_profile) + n->shaper_profile->n_users--; + + if (n->parent_node) + n->parent_node->n_children--; + + /* Remove from list */ + TAILQ_REMOVE(&p->soft.tm.h.nodes, n, node); + p->soft.tm.h.n_nodes--; + free(n); + + return 0; +} + + +static void +pipe_profile_build(struct rte_eth_dev *dev, + struct tm_node *np, + struct rte_sched_pipe_params *pp) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_hierarchy *h = &p->soft.tm.h; + struct tm_node_list *nl = &h->nodes; + struct tm_node *nt, *nq; + + memset(pp, 0, sizeof(*pp)); + + /* Pipe */ + pp->tb_rate = np->shaper_profile->params.peak.rate; + pp->tb_size = np->shaper_profile->params.peak.size; + + /* Traffic Class (TC) */ + pp->tc_period = PIPE_TC_PERIOD; + + pp->tc_ov_weight = np->weight; + + TAILQ_FOREACH(nt, nl, node) { + uint32_t queue_id = 0; + + if (nt->level != TM_NODE_LEVEL_TC || + nt->parent_node_id != np->node_id) + continue; + + pp->tc_rate[nt->priority] = + nt->shaper_profile->params.peak.rate; + + /* Queue */ + TAILQ_FOREACH(nq, nl, node) { + + if (nq->level != TM_NODE_LEVEL_QUEUE || + nq->parent_node_id != nt->node_id) + continue; + + if (nt->priority == RTE_SCHED_TRAFFIC_CLASS_BE) + pp->wrr_weights[queue_id] = nq->weight; + + queue_id++; + } + } +} + +static int +pipe_profile_free_exists(struct rte_eth_dev *dev, + uint32_t *pipe_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + + if (t->n_pipe_profiles < TM_MAX_PIPE_PROFILE) { + *pipe_profile_id = t->n_pipe_profiles; + return 1; + } + + return 0; +} + +static int +pipe_profile_exists(struct rte_eth_dev *dev, + struct rte_sched_pipe_params *pp, + uint32_t *pipe_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + uint32_t i; + + for (i = 0; i < t->n_pipe_profiles; i++) + if (memcmp(&t->pipe_profiles[i], pp, sizeof(*pp)) == 0) { + if (pipe_profile_id) + *pipe_profile_id = i; + return 1; + } + + return 0; +} + +static void +pipe_profile_install(struct rte_eth_dev *dev, + struct rte_sched_pipe_params *pp, + uint32_t pipe_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + + memcpy(&t->pipe_profiles[pipe_profile_id], pp, sizeof(*pp)); + t->n_pipe_profiles++; +} + +static void +pipe_profile_mark(struct rte_eth_dev *dev, + uint32_t subport_id, + uint32_t pipe_id, + uint32_t pipe_profile_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_hierarchy *h = &p->soft.tm.h; + struct tm_params *t = &p->soft.tm.params; + uint32_t n_pipes_per_subport, pos; + + n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / + h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT]; + pos = subport_id * n_pipes_per_subport + pipe_id; + + t->pipe_to_profile[pos] = pipe_profile_id; +} + +static struct rte_sched_pipe_params * +pipe_profile_get(struct rte_eth_dev *dev, struct tm_node *np) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_hierarchy *h = &p->soft.tm.h; + struct tm_params *t = &p->soft.tm.params; + uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / + h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT]; + + uint32_t subport_id = tm_node_subport_id(dev, np->parent_node); + uint32_t pipe_id = tm_node_pipe_id(dev, np); + + uint32_t pos = subport_id * n_pipes_per_subport + pipe_id; + uint32_t pipe_profile_id = t->pipe_to_profile[pos]; + + return &t->pipe_profiles[pipe_profile_id]; +} + +static int +pipe_profiles_generate(struct rte_eth_dev *dev) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_hierarchy *h = &p->soft.tm.h; + struct tm_node_list *nl = &h->nodes; + struct tm_node *ns, *np; + uint32_t subport_id; + + /* Objective: Fill in the following fields in struct tm_params: + * - pipe_profiles + * - n_pipe_profiles + * - pipe_to_profile + */ + + subport_id = 0; + TAILQ_FOREACH(ns, nl, node) { + uint32_t pipe_id; + + if (ns->level != TM_NODE_LEVEL_SUBPORT) + continue; + + pipe_id = 0; + TAILQ_FOREACH(np, nl, node) { + struct rte_sched_pipe_params pp; + uint32_t pos; + + if (np->level != TM_NODE_LEVEL_PIPE || + np->parent_node_id != ns->node_id) + continue; + + pipe_profile_build(dev, np, &pp); + + if (!pipe_profile_exists(dev, &pp, &pos)) { + if (!pipe_profile_free_exists(dev, &pos)) + return -1; + + pipe_profile_install(dev, &pp, pos); + } + + pipe_profile_mark(dev, subport_id, pipe_id, pos); + + pipe_id++; + } + + subport_id++; + } + + return 0; +} + +static struct tm_wred_profile * +tm_tc_wred_profile_get(struct rte_eth_dev *dev, uint32_t tc_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_hierarchy *h = &p->soft.tm.h; + struct tm_node_list *nl = &h->nodes; + struct tm_node *nq; + + TAILQ_FOREACH(nq, nl, node) { + if (nq->level != TM_NODE_LEVEL_QUEUE || + nq->parent_node->priority != tc_id) + continue; + + return nq->wred_profile; + } + + return NULL; +} + +#ifdef RTE_SCHED_RED + +static void +wred_profiles_set(struct rte_eth_dev *dev, uint32_t subport_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct rte_sched_subport_params *pp = + &p->soft.tm.params.subport_params[subport_id]; + + uint32_t tc_id; + enum rte_color color; + + for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) + for (color = RTE_COLOR_GREEN; color < RTE_COLORS; color++) { + struct rte_red_params *dst = + &pp->red_params[tc_id][color]; + struct tm_wred_profile *src_wp = + tm_tc_wred_profile_get(dev, tc_id); + struct rte_tm_red_params *src = + &src_wp->params.red_params[color]; + + memcpy(dst, src, sizeof(*dst)); + } +} + +#else + +#define wred_profiles_set(dev, subport_id) + +#endif + +static struct tm_shared_shaper * +tm_tc_shared_shaper_get(struct rte_eth_dev *dev, struct tm_node *tc_node) +{ + return (tc_node->params.n_shared_shapers) ? + tm_shared_shaper_search(dev, + tc_node->params.shared_shaper_id[0]) : + NULL; +} + +static struct tm_shared_shaper * +tm_subport_tc_shared_shaper_get(struct rte_eth_dev *dev, + struct tm_node *subport_node, + uint32_t tc_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_node_list *nl = &p->soft.tm.h.nodes; + struct tm_node *n; + + TAILQ_FOREACH(n, nl, node) { + if (n->level != TM_NODE_LEVEL_TC || + n->parent_node->parent_node_id != + subport_node->node_id || + n->priority != tc_id) + continue; + + return tm_tc_shared_shaper_get(dev, n); + } + + return NULL; +} + +static int +hierarchy_commit_check(struct rte_eth_dev *dev, struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_hierarchy *h = &p->soft.tm.h; + struct tm_node_list *nl = &h->nodes; + struct tm_shared_shaper_list *ssl = &h->shared_shapers; + struct tm_wred_profile_list *wpl = &h->wred_profiles; + struct tm_node *nr = tm_root_node_present(dev), *ns, *np, *nt, *nq; + struct tm_shared_shaper *ss; + + uint32_t n_pipes_per_subport; + + /* Root node exists. */ + if (nr == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + + /* There is at least one subport, max is not exceeded. */ + if (nr->n_children == 0 || nr->n_children > TM_MAX_SUBPORTS) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + + /* There is at least one pipe. */ + if (h->n_tm_nodes[TM_NODE_LEVEL_PIPE] == 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_LEVEL_ID, + NULL, + rte_strerror(EINVAL)); + + /* Number of pipes is the same for all subports. Maximum number of pipes + * per subport is not exceeded. + */ + n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / + h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT]; + + if (n_pipes_per_subport > TM_MAX_PIPES_PER_SUBPORT) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + + TAILQ_FOREACH(ns, nl, node) { + if (ns->level != TM_NODE_LEVEL_SUBPORT) + continue; + + if (ns->n_children != n_pipes_per_subport) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + + /* Each pipe has exactly 13 TCs, with exactly one TC for each priority */ + TAILQ_FOREACH(np, nl, node) { + uint32_t mask = 0, mask_expected = + RTE_LEN2MASK(RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE, + uint32_t); + + if (np->level != TM_NODE_LEVEL_PIPE) + continue; + + if (np->n_children != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + + TAILQ_FOREACH(nt, nl, node) { + if (nt->level != TM_NODE_LEVEL_TC || + nt->parent_node_id != np->node_id) + continue; + + mask |= 1 << nt->priority; + } + + if (mask != mask_expected) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + + /** Each Strict priority TC has exactly 1 packet queues while + * lowest priority TC (Best-effort) has 4 queues. + */ + TAILQ_FOREACH(nt, nl, node) { + if (nt->level != TM_NODE_LEVEL_TC) + continue; + + if (nt->n_children != 1 && nt->n_children != RTE_SCHED_BE_QUEUES_PER_PIPE) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + + /** + * Shared shapers: + * -For each TC #i, all pipes in the same subport use the same + * shared shaper (or no shared shaper) for their TC#i. + * -Each shared shaper needs to have at least one user. All its + * users have to be TC nodes with the same priority and the same + * subport. + */ + TAILQ_FOREACH(ns, nl, node) { + struct tm_shared_shaper *s[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint32_t id; + + if (ns->level != TM_NODE_LEVEL_SUBPORT) + continue; + + for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) + s[id] = tm_subport_tc_shared_shaper_get(dev, ns, id); + + TAILQ_FOREACH(nt, nl, node) { + struct tm_shared_shaper *subport_ss, *tc_ss; + + if (nt->level != TM_NODE_LEVEL_TC || + nt->parent_node->parent_node_id != + ns->node_id) + continue; + + subport_ss = s[nt->priority]; + tc_ss = tm_tc_shared_shaper_get(dev, nt); + + if (subport_ss == NULL && tc_ss == NULL) + continue; + + if ((subport_ss == NULL && tc_ss != NULL) || + (subport_ss != NULL && tc_ss == NULL) || + subport_ss->shared_shaper_id != + tc_ss->shared_shaper_id) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + } + + TAILQ_FOREACH(ss, ssl, node) { + struct tm_node *nt_any = tm_shared_shaper_get_tc(dev, ss); + uint32_t n_users = 0; + + if (nt_any != NULL) + TAILQ_FOREACH(nt, nl, node) { + if (nt->level != TM_NODE_LEVEL_TC || + nt->priority != nt_any->priority || + nt->parent_node->parent_node_id != + nt_any->parent_node->parent_node_id) + continue; + + n_users++; + } + + if (ss->n_users == 0 || ss->n_users != n_users) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + + /* Not too many pipe profiles. */ + if (pipe_profiles_generate(dev)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + + /** + * WRED (when used, i.e. at least one WRED profile defined): + * -Each WRED profile must have at least one user. + * -All leaf nodes must have their private WRED context enabled. + * -For each TC #i, all leaf nodes must use the same WRED profile + * for their private WRED context. + */ + if (h->n_wred_profiles) { + struct tm_wred_profile *wp; + struct tm_wred_profile *w[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint32_t id; + + TAILQ_FOREACH(wp, wpl, node) + if (wp->n_users == 0) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + + for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) { + w[id] = tm_tc_wred_profile_get(dev, id); + + if (w[id] == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + + TAILQ_FOREACH(nq, nl, node) { + uint32_t id; + + if (nq->level != TM_NODE_LEVEL_QUEUE) + continue; + + id = nq->parent_node->priority; + + if (nq->wred_profile == NULL || + nq->wred_profile->wred_profile_id != + w[id]->wred_profile_id) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } + } + + return 0; +} + +static void +hierarchy_blueprints_create(struct rte_eth_dev *dev) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_params *t = &p->soft.tm.params; + struct tm_hierarchy *h = &p->soft.tm.h; + + struct tm_node_list *nl = &h->nodes; + struct tm_node *root = tm_root_node_present(dev), *n; + + uint32_t subport_id; + + t->port_params = (struct rte_sched_port_params) { + .name = dev->data->name, + .socket = dev->data->numa_node, + .rate = root->shaper_profile->params.peak.rate, + .mtu = dev->data->mtu, + .frame_overhead = + root->shaper_profile->params.pkt_length_adjust, + .n_subports_per_port = root->n_children, + .n_pipes_per_subport = TM_MAX_PIPES_PER_SUBPORT, + }; + + subport_id = 0; + TAILQ_FOREACH(n, nl, node) { + uint64_t tc_rate[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + uint32_t i; + + if (n->level != TM_NODE_LEVEL_SUBPORT) + continue; + + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { + struct tm_shared_shaper *ss; + struct tm_shaper_profile *sp; + + ss = tm_subport_tc_shared_shaper_get(dev, n, i); + sp = (ss) ? tm_shaper_profile_search(dev, + ss->shaper_profile_id) : + n->shaper_profile; + tc_rate[i] = sp->params.peak.rate; + } + + t->subport_params[subport_id] = + (struct rte_sched_subport_params) { + .tb_rate = n->shaper_profile->params.peak.rate, + .tb_size = n->shaper_profile->params.peak.size, + + .tc_rate = {tc_rate[0], + tc_rate[1], + tc_rate[2], + tc_rate[3], + tc_rate[4], + tc_rate[5], + tc_rate[6], + tc_rate[7], + tc_rate[8], + tc_rate[9], + tc_rate[10], + tc_rate[11], + tc_rate[12], + }, + .tc_period = SUBPORT_TC_PERIOD, + .n_pipes_per_subport_enabled = + h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / + h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT], + .qsize = {p->params.tm.qsize[0], + p->params.tm.qsize[1], + p->params.tm.qsize[2], + p->params.tm.qsize[3], + p->params.tm.qsize[4], + p->params.tm.qsize[5], + p->params.tm.qsize[6], + p->params.tm.qsize[7], + p->params.tm.qsize[8], + p->params.tm.qsize[9], + p->params.tm.qsize[10], + p->params.tm.qsize[11], + p->params.tm.qsize[12], + }, + .pipe_profiles = t->pipe_profiles, + .n_pipe_profiles = t->n_pipe_profiles, + .n_max_pipe_profiles = TM_MAX_PIPE_PROFILE, + }; + wred_profiles_set(dev, subport_id); + subport_id++; + } +} + +/* Traffic manager hierarchy commit */ +static int +pmd_tm_hierarchy_commit(struct rte_eth_dev *dev, + int clear_on_fail, + struct rte_tm_error *error) +{ + struct pmd_internals *p = dev->data->dev_private; + int status; + + /* Checks */ + if (p->soft.tm.hierarchy_frozen) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EBUSY)); + + status = hierarchy_commit_check(dev, error); + if (status) { + if (clear_on_fail) + tm_hierarchy_free(p); + + return status; + } + + /* Create blueprints */ + hierarchy_blueprints_create(dev); + + /* Freeze hierarchy */ + p->soft.tm.hierarchy_frozen = 1; + + return 0; +} + +#ifdef RTE_SCHED_SUBPORT_TC_OV + +static int +update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight) +{ + struct pmd_internals *p = dev->data->dev_private; + uint32_t pipe_id = tm_node_pipe_id(dev, np); + + struct tm_node *ns = np->parent_node; + uint32_t subport_id = tm_node_subport_id(dev, ns); + + struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np); + struct rte_sched_pipe_params profile1; + uint32_t pipe_profile_id; + + /* Derive new pipe profile. */ + memcpy(&profile1, profile0, sizeof(profile1)); + profile1.tc_ov_weight = (uint8_t)weight; + + /* Since implementation does not allow adding more pipe profiles after + * port configuration, the pipe configuration can be successfully + * updated only if the new profile is also part of the existing set of + * pipe profiles. + */ + if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0) + return -1; + + /* Update the pipe profile used by the current pipe. */ + if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id, + (int32_t)pipe_profile_id)) + return -1; + + /* Commit changes. */ + pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id); + np->weight = weight; + + return 0; +} + +#endif + +static int +update_queue_weight(struct rte_eth_dev *dev, + struct tm_node *nq, uint32_t weight) +{ + struct pmd_internals *p = dev->data->dev_private; + uint32_t queue_id = tm_node_queue_id(dev, nq); + + struct tm_node *nt = nq->parent_node; + + struct tm_node *np = nt->parent_node; + uint32_t pipe_id = tm_node_pipe_id(dev, np); + + struct tm_node *ns = np->parent_node; + uint32_t subport_id = tm_node_subport_id(dev, ns); + + uint32_t pipe_be_queue_id = + queue_id - RTE_SCHED_TRAFFIC_CLASS_BE; + + struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np); + struct rte_sched_pipe_params profile1; + uint32_t pipe_profile_id; + + /* Derive new pipe profile. */ + memcpy(&profile1, profile0, sizeof(profile1)); + profile1.wrr_weights[pipe_be_queue_id] = (uint8_t)weight; + + /* Since implementation does not allow adding more pipe profiles after + * port configuration, the pipe configuration can be successfully + * updated only if the new profile is also part of the existing set + * of pipe profiles. + */ + if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0) + return -1; + + /* Update the pipe profile used by the current pipe. */ + if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id, + (int32_t)pipe_profile_id)) + return -1; + + /* Commit changes. */ + pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id); + nq->weight = weight; + + return 0; +} + +/* Traffic manager node parent update */ +static int +pmd_tm_node_parent_update(struct rte_eth_dev *dev, + uint32_t node_id, + uint32_t parent_node_id, + uint32_t priority, + uint32_t weight, + struct rte_tm_error *error) +{ + struct tm_node *n; + + /* Port must be started and TM used. */ + if (dev->data->dev_started == 0 && (tm_used(dev) == 0)) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EBUSY)); + + /* Node must be valid */ + n = tm_node_search(dev, node_id); + if (n == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Parent node must be the same */ + if (n->parent_node_id != parent_node_id) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Priority must be the same */ + if (n->priority != priority) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_PRIORITY, + NULL, + rte_strerror(EINVAL)); + + /* weight: must be 1 .. 255 */ + if (weight == 0 || weight >= UINT8_MAX) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, + rte_strerror(EINVAL)); + + switch (n->level) { + case TM_NODE_LEVEL_PORT: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, + rte_strerror(EINVAL)); + /* fall-through */ + case TM_NODE_LEVEL_SUBPORT: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, + rte_strerror(EINVAL)); + /* fall-through */ + case TM_NODE_LEVEL_PIPE: +#ifdef RTE_SCHED_SUBPORT_TC_OV + if (update_pipe_weight(dev, n, weight)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + return 0; +#else + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, + rte_strerror(EINVAL)); +#endif + /* fall-through */ + case TM_NODE_LEVEL_TC: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, + rte_strerror(EINVAL)); + /* fall-through */ + case TM_NODE_LEVEL_QUEUE: + /* fall-through */ + default: + if (update_queue_weight(dev, n, weight)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + return 0; + } +} + +static int +update_subport_rate(struct rte_eth_dev *dev, + struct tm_node *ns, + struct tm_shaper_profile *sp) +{ + struct pmd_internals *p = dev->data->dev_private; + uint32_t subport_id = tm_node_subport_id(dev, ns); + + struct rte_sched_subport_params subport_params; + + /* Derive new subport configuration. */ + memcpy(&subport_params, + &p->soft.tm.params.subport_params[subport_id], + sizeof(subport_params)); + subport_params.tb_rate = sp->params.peak.rate; + subport_params.tb_size = sp->params.peak.size; + + /* Update the subport configuration. */ + if (rte_sched_subport_config(SCHED(p), subport_id, + &subport_params)) + return -1; + + /* Commit changes. */ + ns->shaper_profile->n_users--; + + ns->shaper_profile = sp; + ns->params.shaper_profile_id = sp->shaper_profile_id; + sp->n_users++; + + memcpy(&p->soft.tm.params.subport_params[subport_id], + &subport_params, + sizeof(subport_params)); + + return 0; +} + +static int +update_pipe_rate(struct rte_eth_dev *dev, + struct tm_node *np, + struct tm_shaper_profile *sp) +{ + struct pmd_internals *p = dev->data->dev_private; + uint32_t pipe_id = tm_node_pipe_id(dev, np); + + struct tm_node *ns = np->parent_node; + uint32_t subport_id = tm_node_subport_id(dev, ns); + + struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np); + struct rte_sched_pipe_params profile1; + uint32_t pipe_profile_id; + + /* Derive new pipe profile. */ + memcpy(&profile1, profile0, sizeof(profile1)); + profile1.tb_rate = sp->params.peak.rate; + profile1.tb_size = sp->params.peak.size; + + /* Since implementation does not allow adding more pipe profiles after + * port configuration, the pipe configuration can be successfully + * updated only if the new profile is also part of the existing set of + * pipe profiles. + */ + if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0) + return -1; + + /* Update the pipe profile used by the current pipe. */ + if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id, + (int32_t)pipe_profile_id)) + return -1; + + /* Commit changes. */ + pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id); + np->shaper_profile->n_users--; + np->shaper_profile = sp; + np->params.shaper_profile_id = sp->shaper_profile_id; + sp->n_users++; + + return 0; +} + +static int +update_tc_rate(struct rte_eth_dev *dev, + struct tm_node *nt, + struct tm_shaper_profile *sp) +{ + struct pmd_internals *p = dev->data->dev_private; + uint32_t tc_id = tm_node_tc_id(dev, nt); + + struct tm_node *np = nt->parent_node; + uint32_t pipe_id = tm_node_pipe_id(dev, np); + + struct tm_node *ns = np->parent_node; + uint32_t subport_id = tm_node_subport_id(dev, ns); + + struct rte_sched_pipe_params *profile0 = pipe_profile_get(dev, np); + struct rte_sched_pipe_params profile1; + uint32_t pipe_profile_id; + + /* Derive new pipe profile. */ + memcpy(&profile1, profile0, sizeof(profile1)); + profile1.tc_rate[tc_id] = sp->params.peak.rate; + + /* Since implementation does not allow adding more pipe profiles after + * port configuration, the pipe configuration can be successfully + * updated only if the new profile is also part of the existing set of + * pipe profiles. + */ + if (pipe_profile_exists(dev, &profile1, &pipe_profile_id) == 0) + return -1; + + /* Update the pipe profile used by the current pipe. */ + if (rte_sched_pipe_config(SCHED(p), subport_id, pipe_id, + (int32_t)pipe_profile_id)) + return -1; + + /* Commit changes. */ + pipe_profile_mark(dev, subport_id, pipe_id, pipe_profile_id); + nt->shaper_profile->n_users--; + nt->shaper_profile = sp; + nt->params.shaper_profile_id = sp->shaper_profile_id; + sp->n_users++; + + return 0; +} + +/* Traffic manager node shaper update */ +static int +pmd_tm_node_shaper_update(struct rte_eth_dev *dev, + uint32_t node_id, + uint32_t shaper_profile_id, + struct rte_tm_error *error) +{ + struct tm_node *n; + struct tm_shaper_profile *sp; + + /* Port must be started and TM used. */ + if (dev->data->dev_started == 0 && (tm_used(dev) == 0)) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EBUSY)); + + /* Node must be valid */ + n = tm_node_search(dev, node_id); + if (n == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + /* Shaper profile must be valid. */ + sp = tm_shaper_profile_search(dev, shaper_profile_id); + if (sp == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_SHAPER_PROFILE, + NULL, + rte_strerror(EINVAL)); + + switch (n->level) { + case TM_NODE_LEVEL_PORT: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + /* fall-through */ + case TM_NODE_LEVEL_SUBPORT: + if (update_subport_rate(dev, n, sp)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + return 0; + /* fall-through */ + case TM_NODE_LEVEL_PIPE: + if (update_pipe_rate(dev, n, sp)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + return 0; + /* fall-through */ + case TM_NODE_LEVEL_TC: + if (update_tc_rate(dev, n, sp)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + return 0; + /* fall-through */ + case TM_NODE_LEVEL_QUEUE: + /* fall-through */ + default: + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + } +} + +static inline uint32_t +tm_port_queue_id(struct rte_eth_dev *dev, + uint32_t port_subport_id, + uint32_t subport_pipe_id, + uint32_t pipe_tc_id, + uint32_t tc_queue_id) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_hierarchy *h = &p->soft.tm.h; + uint32_t n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / + h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT]; + + uint32_t port_pipe_id = + port_subport_id * n_pipes_per_subport + subport_pipe_id; + + uint32_t port_queue_id = + port_pipe_id * RTE_SCHED_QUEUES_PER_PIPE + pipe_tc_id + tc_queue_id; + + return port_queue_id; +} + +static int +read_port_stats(struct rte_eth_dev *dev, + struct tm_node *nr, + struct rte_tm_node_stats *stats, + uint64_t *stats_mask, + int clear) +{ + struct pmd_internals *p = dev->data->dev_private; + struct tm_hierarchy *h = &p->soft.tm.h; + uint32_t n_subports_per_port = h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT]; + uint32_t subport_id; + + for (subport_id = 0; subport_id < n_subports_per_port; subport_id++) { + struct rte_sched_subport_stats s; + uint32_t tc_ov, id; + + /* Stats read */ + int status = rte_sched_subport_read_stats(SCHED(p), + subport_id, + &s, + &tc_ov); + if (status) + return status; + + /* Stats accumulate */ + for (id = 0; id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; id++) { + nr->stats.n_pkts += + s.n_pkts_tc[id] - s.n_pkts_tc_dropped[id]; + nr->stats.n_bytes += + s.n_bytes_tc[id] - s.n_bytes_tc_dropped[id]; + nr->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += + s.n_pkts_tc_dropped[id]; + nr->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += + s.n_bytes_tc_dropped[id]; + } + } + + /* Stats copy */ + if (stats) + memcpy(stats, &nr->stats, sizeof(*stats)); + + if (stats_mask) + *stats_mask = STATS_MASK_DEFAULT; + + /* Stats clear */ + if (clear) + memset(&nr->stats, 0, sizeof(nr->stats)); + + return 0; +} + +static int +read_subport_stats(struct rte_eth_dev *dev, + struct tm_node *ns, + struct rte_tm_node_stats *stats, + uint64_t *stats_mask, + int clear) +{ + struct pmd_internals *p = dev->data->dev_private; + uint32_t subport_id = tm_node_subport_id(dev, ns); + struct rte_sched_subport_stats s; + uint32_t tc_ov, tc_id; + + /* Stats read */ + int status = rte_sched_subport_read_stats(SCHED(p), + subport_id, + &s, + &tc_ov); + if (status) + return status; + + /* Stats accumulate */ + for (tc_id = 0; tc_id < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc_id++) { + ns->stats.n_pkts += + s.n_pkts_tc[tc_id] - s.n_pkts_tc_dropped[tc_id]; + ns->stats.n_bytes += + s.n_bytes_tc[tc_id] - s.n_bytes_tc_dropped[tc_id]; + ns->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += + s.n_pkts_tc_dropped[tc_id]; + ns->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += + s.n_bytes_tc_dropped[tc_id]; + } + + /* Stats copy */ + if (stats) + memcpy(stats, &ns->stats, sizeof(*stats)); + + if (stats_mask) + *stats_mask = STATS_MASK_DEFAULT; + + /* Stats clear */ + if (clear) + memset(&ns->stats, 0, sizeof(ns->stats)); + + return 0; +} + +static int +read_pipe_stats(struct rte_eth_dev *dev, + struct tm_node *np, + struct rte_tm_node_stats *stats, + uint64_t *stats_mask, + int clear) +{ + struct pmd_internals *p = dev->data->dev_private; + + uint32_t pipe_id = tm_node_pipe_id(dev, np); + + struct tm_node *ns = np->parent_node; + uint32_t subport_id = tm_node_subport_id(dev, ns); + uint32_t tc_id, queue_id; + uint32_t i; + + /* Stats read */ + for (i = 0; i < RTE_SCHED_QUEUES_PER_PIPE; i++) { + struct rte_sched_queue_stats s; + uint16_t qlen; + + if (i < RTE_SCHED_TRAFFIC_CLASS_BE) { + tc_id = i; + queue_id = i; + } else { + tc_id = RTE_SCHED_TRAFFIC_CLASS_BE; + queue_id = i - tc_id; + } + + uint32_t qid = tm_port_queue_id(dev, + subport_id, + pipe_id, + tc_id, + queue_id); + + int status = rte_sched_queue_read_stats(SCHED(p), + qid, + &s, + &qlen); + if (status) + return status; + + /* Stats accumulate */ + np->stats.n_pkts += s.n_pkts - s.n_pkts_dropped; + np->stats.n_bytes += s.n_bytes - s.n_bytes_dropped; + np->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped; + np->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += + s.n_bytes_dropped; + np->stats.leaf.n_pkts_queued = qlen; + } + + /* Stats copy */ + if (stats) + memcpy(stats, &np->stats, sizeof(*stats)); + + if (stats_mask) + *stats_mask = STATS_MASK_DEFAULT; + + /* Stats clear */ + if (clear) + memset(&np->stats, 0, sizeof(np->stats)); + + return 0; +} + +static int +read_tc_stats(struct rte_eth_dev *dev, + struct tm_node *nt, + struct rte_tm_node_stats *stats, + uint64_t *stats_mask, + int clear) +{ + struct pmd_internals *p = dev->data->dev_private; + + uint32_t tc_id = tm_node_tc_id(dev, nt); + + struct tm_node *np = nt->parent_node; + uint32_t pipe_id = tm_node_pipe_id(dev, np); + + struct tm_node *ns = np->parent_node; + uint32_t subport_id = tm_node_subport_id(dev, ns); + struct rte_sched_queue_stats s; + uint32_t qid, i; + uint16_t qlen; + int status; + + /* Stats read */ + if (tc_id < RTE_SCHED_TRAFFIC_CLASS_BE) { + qid = tm_port_queue_id(dev, + subport_id, + pipe_id, + tc_id, + 0); + + status = rte_sched_queue_read_stats(SCHED(p), + qid, + &s, + &qlen); + if (status) + return status; + + /* Stats accumulate */ + nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped; + nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped; + nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped; + nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += + s.n_bytes_dropped; + nt->stats.leaf.n_pkts_queued = qlen; + } else { + for (i = 0; i < RTE_SCHED_BE_QUEUES_PER_PIPE; i++) { + qid = tm_port_queue_id(dev, + subport_id, + pipe_id, + tc_id, + i); + + status = rte_sched_queue_read_stats(SCHED(p), + qid, + &s, + &qlen); + if (status) + return status; + + /* Stats accumulate */ + nt->stats.n_pkts += s.n_pkts - s.n_pkts_dropped; + nt->stats.n_bytes += s.n_bytes - s.n_bytes_dropped; + nt->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += + s.n_pkts_dropped; + nt->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += + s.n_bytes_dropped; + nt->stats.leaf.n_pkts_queued = qlen; + } + } + + /* Stats copy */ + if (stats) + memcpy(stats, &nt->stats, sizeof(*stats)); + + if (stats_mask) + *stats_mask = STATS_MASK_DEFAULT; + + /* Stats clear */ + if (clear) + memset(&nt->stats, 0, sizeof(nt->stats)); + + return 0; +} + +static int +read_queue_stats(struct rte_eth_dev *dev, + struct tm_node *nq, + struct rte_tm_node_stats *stats, + uint64_t *stats_mask, + int clear) +{ + struct pmd_internals *p = dev->data->dev_private; + struct rte_sched_queue_stats s; + uint16_t qlen; + + uint32_t queue_id = tm_node_queue_id(dev, nq); + + struct tm_node *nt = nq->parent_node; + uint32_t tc_id = tm_node_tc_id(dev, nt); + + struct tm_node *np = nt->parent_node; + uint32_t pipe_id = tm_node_pipe_id(dev, np); + + struct tm_node *ns = np->parent_node; + uint32_t subport_id = tm_node_subport_id(dev, ns); + + /* Stats read */ + uint32_t qid = tm_port_queue_id(dev, + subport_id, + pipe_id, + tc_id, + queue_id); + + int status = rte_sched_queue_read_stats(SCHED(p), + qid, + &s, + &qlen); + if (status) + return status; + + /* Stats accumulate */ + nq->stats.n_pkts += s.n_pkts - s.n_pkts_dropped; + nq->stats.n_bytes += s.n_bytes - s.n_bytes_dropped; + nq->stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN] += s.n_pkts_dropped; + nq->stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN] += + s.n_bytes_dropped; + nq->stats.leaf.n_pkts_queued = qlen; + + /* Stats copy */ + if (stats) + memcpy(stats, &nq->stats, sizeof(*stats)); + + if (stats_mask) + *stats_mask = STATS_MASK_QUEUE; + + /* Stats clear */ + if (clear) + memset(&nq->stats, 0, sizeof(nq->stats)); + + return 0; +} + +/* Traffic manager read stats counters for specific node */ +static int +pmd_tm_node_stats_read(struct rte_eth_dev *dev, + uint32_t node_id, + struct rte_tm_node_stats *stats, + uint64_t *stats_mask, + int clear, + struct rte_tm_error *error) +{ + struct tm_node *n; + + /* Port must be started and TM used. */ + if (dev->data->dev_started == 0 && (tm_used(dev) == 0)) + return -rte_tm_error_set(error, + EBUSY, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EBUSY)); + + /* Node must be valid */ + n = tm_node_search(dev, node_id); + if (n == NULL) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_ID, + NULL, + rte_strerror(EINVAL)); + + switch (n->level) { + case TM_NODE_LEVEL_PORT: + if (read_port_stats(dev, n, stats, stats_mask, clear)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + return 0; + + case TM_NODE_LEVEL_SUBPORT: + if (read_subport_stats(dev, n, stats, stats_mask, clear)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + return 0; + + case TM_NODE_LEVEL_PIPE: + if (read_pipe_stats(dev, n, stats, stats_mask, clear)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + return 0; + + case TM_NODE_LEVEL_TC: + if (read_tc_stats(dev, n, stats, stats_mask, clear)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + return 0; + + case TM_NODE_LEVEL_QUEUE: + default: + if (read_queue_stats(dev, n, stats, stats_mask, clear)) + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_UNSPECIFIED, + NULL, + rte_strerror(EINVAL)); + return 0; + } +} + +const struct rte_tm_ops pmd_tm_ops = { + .node_type_get = pmd_tm_node_type_get, + .capabilities_get = pmd_tm_capabilities_get, + .level_capabilities_get = pmd_tm_level_capabilities_get, + .node_capabilities_get = pmd_tm_node_capabilities_get, + + .wred_profile_add = pmd_tm_wred_profile_add, + .wred_profile_delete = pmd_tm_wred_profile_delete, + .shared_wred_context_add_update = NULL, + .shared_wred_context_delete = NULL, + + .shaper_profile_add = pmd_tm_shaper_profile_add, + .shaper_profile_delete = pmd_tm_shaper_profile_delete, + .shared_shaper_add_update = pmd_tm_shared_shaper_add_update, + .shared_shaper_delete = pmd_tm_shared_shaper_delete, + + .node_add = pmd_tm_node_add, + .node_delete = pmd_tm_node_delete, + .node_suspend = NULL, + .node_resume = NULL, + .hierarchy_commit = pmd_tm_hierarchy_commit, + + .node_parent_update = pmd_tm_node_parent_update, + .node_shaper_update = pmd_tm_node_shaper_update, + .node_shared_shaper_update = NULL, + .node_stats_update = NULL, + .node_wfq_weight_mode_update = NULL, + .node_cman_update = NULL, + .node_wred_context_update = NULL, + .node_shared_wred_context_update = NULL, + + .node_stats_read = pmd_tm_node_stats_read, +}; diff --git a/src/spdk/dpdk/drivers/net/softnic/rte_pmd_softnic_version.map b/src/spdk/dpdk/drivers/net/softnic/rte_pmd_softnic_version.map new file mode 100644 index 000000000..50f113d5a --- /dev/null +++ b/src/spdk/dpdk/drivers/net/softnic/rte_pmd_softnic_version.map @@ -0,0 +1,13 @@ +DPDK_20.0 { + global: + + rte_pmd_softnic_run; + + local: *; +}; + +EXPERIMENTAL { + global: + + rte_pmd_softnic_manage; +}; |