diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
commit | e6918187568dbd01842d8d1d2c808ce16a894239 (patch) | |
tree | 64f88b554b444a49f656b6c656111a145cbbaa28 /src/spdk/dpdk/app/test-pmd | |
parent | Initial commit. (diff) | |
download | ceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip |
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/app/test-pmd')
30 files changed, 46031 insertions, 0 deletions
diff --git a/src/spdk/dpdk/app/test-pmd/Makefile b/src/spdk/dpdk/app/test-pmd/Makefile new file mode 100644 index 000000000..ea818de22 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/Makefile @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2010-2015 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +ifeq ($(CONFIG_RTE_TEST_PMD),y) + +# +# library name +# +APP = testpmd + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -Wno-deprecated-declarations + +# +# all source are stored in SRCS-y +# +SRCS-y := testpmd.c +SRCS-y += parameters.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_mtr.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_tm.c +SRCS-y += config.c +SRCS-y += iofwd.c +SRCS-y += macfwd.c +SRCS-y += macswap.c +SRCS-y += flowgen.c +SRCS-y += rxonly.c +SRCS-y += txonly.c +SRCS-y += csumonly.c +SRCS-y += icmpecho.c +SRCS-y += noisy_vnf.c +SRCS-$(CONFIG_RTE_LIBRTE_IEEE1588) += ieee1588fwd.c +SRCS-$(CONFIG_RTE_LIBRTE_BPF) += bpf_cmd.c +SRCS-y += util.c + +ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC), y) +SRCS-y += softnicfwd.c +endif + +ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y) + +ifeq ($(CONFIG_RTE_LIBRTE_PMD_BOND),y) +LDLIBS += -lrte_pmd_bond +endif + +ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS)$(CONFIG_RTE_LIBRTE_DPAA_PMD),yy) +LDLIBS += -lrte_pmd_dpaa +LDLIBS += -lrte_bus_dpaa +LDLIBS += -lrte_mempool_dpaa +endif + +ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y) +LDLIBS += -lrte_pmd_ixgbe +endif + +ifeq ($(CONFIG_RTE_LIBRTE_I40E_PMD),y) +LDLIBS += -lrte_pmd_i40e +endif + +ifeq ($(CONFIG_RTE_LIBRTE_BNXT_PMD),y) +LDLIBS += -lrte_pmd_bnxt +endif + +ifeq ($(CONFIG_RTE_LIBRTE_PMD_SOFTNIC),y) +LDLIBS += -lrte_pmd_softnic +endif + +endif + +include $(RTE_SDK)/mk/rte.app.mk + +endif diff --git a/src/spdk/dpdk/app/test-pmd/bpf_cmd.c b/src/spdk/dpdk/app/test-pmd/bpf_cmd.c new file mode 100644 index 000000000..0f984ccf4 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/bpf_cmd.c @@ -0,0 +1,198 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include <stdio.h> +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_flow.h> +#include <rte_bpf_ethdev.h> + +#include <cmdline.h> +#include <cmdline_parse.h> +#include <cmdline_parse_num.h> +#include <cmdline_parse_string.h> + +#include "testpmd.h" + +static const struct rte_bpf_xsym bpf_xsym[] = { + { + .name = RTE_STR(stdout), + .type = RTE_BPF_XTYPE_VAR, + .var = { + .val = &stdout, + .desc = { + .type = RTE_BPF_ARG_PTR, + .size = sizeof(stdout), + }, + }, + }, + { + .name = RTE_STR(rte_pktmbuf_dump), + .type = RTE_BPF_XTYPE_FUNC, + .func = { + .val = (void *)rte_pktmbuf_dump, + .nb_args = 3, + .args = { + [0] = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uintptr_t), + }, + [1] = { + .type = RTE_BPF_ARG_PTR_MBUF, + .size = sizeof(struct rte_mbuf), + }, + [2] = { + .type = RTE_BPF_ARG_RAW, + .size = sizeof(uint32_t), + }, + }, + }, + }, +}; + +/* *** load BPF program *** */ +struct cmd_bpf_ld_result { + cmdline_fixed_string_t bpf; + cmdline_fixed_string_t dir; + uint8_t port; + uint16_t queue; + cmdline_fixed_string_t op; + cmdline_fixed_string_t flags; + cmdline_fixed_string_t prm; +}; + +static void +bpf_parse_flags(const char *str, struct rte_bpf_arg *arg, uint32_t *flags) +{ + uint32_t i, v; + + *flags = RTE_BPF_ETH_F_NONE; + arg->type = RTE_BPF_ARG_PTR; + arg->size = mbuf_data_size; + + for (i = 0; str[i] != 0; i++) { + v = toupper(str[i]); + if (v == 'J') + *flags |= RTE_BPF_ETH_F_JIT; + else if (v == 'M') { + arg->type = RTE_BPF_ARG_PTR_MBUF; + arg->size = sizeof(struct rte_mbuf); + arg->buf_size = mbuf_data_size; + } else if (v == '-') + continue; + else + printf("unknown flag: \'%c\'", v); + } +} + +static void cmd_operate_bpf_ld_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int32_t rc; + uint32_t flags; + struct cmd_bpf_ld_result *res; + struct rte_bpf_prm prm; + const char *fname, *sname; + + res = parsed_result; + memset(&prm, 0, sizeof(prm)); + prm.xsym = bpf_xsym; + prm.nb_xsym = RTE_DIM(bpf_xsym); + + bpf_parse_flags(res->flags, &prm.prog_arg, &flags); + fname = res->prm; + sname = ".text"; + + if (strcmp(res->dir, "rx") == 0) { + rc = rte_bpf_eth_rx_elf_load(res->port, res->queue, &prm, + fname, sname, flags); + printf("%d:%s\n", rc, strerror(-rc)); + } else if (strcmp(res->dir, "tx") == 0) { + rc = rte_bpf_eth_tx_elf_load(res->port, res->queue, &prm, + fname, sname, flags); + printf("%d:%s\n", rc, strerror(-rc)); + } else + printf("invalid value: %s\n", res->dir); +} + +cmdline_parse_token_string_t cmd_load_bpf_start = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result, + bpf, "bpf-load"); +cmdline_parse_token_string_t cmd_load_bpf_dir = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result, + dir, "rx#tx"); +cmdline_parse_token_num_t cmd_load_bpf_port = + TOKEN_NUM_INITIALIZER(struct cmd_bpf_ld_result, port, UINT8); +cmdline_parse_token_num_t cmd_load_bpf_queue = + TOKEN_NUM_INITIALIZER(struct cmd_bpf_ld_result, queue, UINT16); +cmdline_parse_token_string_t cmd_load_bpf_flags = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result, + flags, NULL); +cmdline_parse_token_string_t cmd_load_bpf_prm = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_ld_result, + prm, NULL); + +cmdline_parse_inst_t cmd_operate_bpf_ld_parse = { + .f = cmd_operate_bpf_ld_parsed, + .data = NULL, + .help_str = "bpf-load rx|tx <port> <queue> <J|M|B> <file_name>", + .tokens = { + (void *)&cmd_load_bpf_start, + (void *)&cmd_load_bpf_dir, + (void *)&cmd_load_bpf_port, + (void *)&cmd_load_bpf_queue, + (void *)&cmd_load_bpf_flags, + (void *)&cmd_load_bpf_prm, + NULL, + }, +}; + +/* *** unload BPF program *** */ +struct cmd_bpf_unld_result { + cmdline_fixed_string_t bpf; + cmdline_fixed_string_t dir; + uint8_t port; + uint16_t queue; +}; + +static void cmd_operate_bpf_unld_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_bpf_unld_result *res; + + res = parsed_result; + + if (strcmp(res->dir, "rx") == 0) + rte_bpf_eth_rx_unload(res->port, res->queue); + else if (strcmp(res->dir, "tx") == 0) + rte_bpf_eth_tx_unload(res->port, res->queue); + else + printf("invalid value: %s\n", res->dir); +} + +cmdline_parse_token_string_t cmd_unload_bpf_start = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_unld_result, + bpf, "bpf-unload"); +cmdline_parse_token_string_t cmd_unload_bpf_dir = + TOKEN_STRING_INITIALIZER(struct cmd_bpf_unld_result, + dir, "rx#tx"); +cmdline_parse_token_num_t cmd_unload_bpf_port = + TOKEN_NUM_INITIALIZER(struct cmd_bpf_unld_result, port, UINT8); +cmdline_parse_token_num_t cmd_unload_bpf_queue = + TOKEN_NUM_INITIALIZER(struct cmd_bpf_unld_result, queue, UINT16); + +cmdline_parse_inst_t cmd_operate_bpf_unld_parse = { + .f = cmd_operate_bpf_unld_parsed, + .data = NULL, + .help_str = "bpf-unload rx|tx <port> <queue>", + .tokens = { + (void *)&cmd_unload_bpf_start, + (void *)&cmd_unload_bpf_dir, + (void *)&cmd_unload_bpf_port, + (void *)&cmd_unload_bpf_queue, + NULL, + }, +}; diff --git a/src/spdk/dpdk/app/test-pmd/bpf_cmd.h b/src/spdk/dpdk/app/test-pmd/bpf_cmd.h new file mode 100644 index 000000000..5ee4c9f79 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/bpf_cmd.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _BPF_CMD_H_ +#define _BPF_CMD_H_ + +#ifdef RTE_LIBRTE_BPF + + /* BPF CLI */ +extern cmdline_parse_inst_t cmd_operate_bpf_ld_parse; +extern cmdline_parse_inst_t cmd_operate_bpf_unld_parse; + +#endif /* RTE_LIBRTE_BPF */ + +#endif /* _BPF_CMD_H_ */ diff --git a/src/spdk/dpdk/app/test-pmd/cmdline.c b/src/spdk/dpdk/app/test-pmd/cmdline.c new file mode 100644 index 000000000..996a49876 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/cmdline.c @@ -0,0 +1,19763 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation. + * Copyright(c) 2014 6WIND S.A. + */ + +#include <stdarg.h> +#include <errno.h> +#include <stdio.h> +#include <stdint.h> +#include <string.h> +#include <termios.h> +#include <unistd.h> +#include <inttypes.h> +#include <sys/socket.h> +#include <netinet/in.h> + +#include <sys/queue.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_malloc.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_ring.h> +#include <rte_mempool.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_string_fns.h> +#include <rte_devargs.h> +#include <rte_flow.h> +#include <rte_gro.h> +#include <rte_mbuf_dyn.h> + +#include <cmdline_rdline.h> +#include <cmdline_parse.h> +#include <cmdline_parse_num.h> +#include <cmdline_parse_string.h> +#include <cmdline_parse_ipaddr.h> +#include <cmdline_parse_etheraddr.h> +#include <cmdline_socket.h> +#include <cmdline.h> +#ifdef RTE_LIBRTE_PMD_BOND +#include <rte_eth_bond.h> +#include <rte_eth_bond_8023ad.h> +#endif +#if defined RTE_LIBRTE_DPAA_BUS && defined RTE_LIBRTE_DPAA_PMD +#include <rte_pmd_dpaa.h> +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD +#include <rte_pmd_ixgbe.h> +#endif +#ifdef RTE_LIBRTE_I40E_PMD +#include <rte_pmd_i40e.h> +#endif +#ifdef RTE_LIBRTE_BNXT_PMD +#include <rte_pmd_bnxt.h> +#endif +#include "testpmd.h" +#include "cmdline_mtr.h" +#include "cmdline_tm.h" +#include "bpf_cmd.h" + +static struct cmdline *testpmd_cl; + +static void cmd_reconfig_device_queue(portid_t id, uint8_t dev, uint8_t queue); + +/* *** Help command with introduction. *** */ +struct cmd_help_brief_result { + cmdline_fixed_string_t help; +}; + +static void cmd_help_brief_parsed(__rte_unused void *parsed_result, + struct cmdline *cl, + __rte_unused void *data) +{ + cmdline_printf( + cl, + "\n" + "Help is available for the following sections:\n\n" + " help control : Start and stop forwarding.\n" + " help display : Displaying port, stats and config " + "information.\n" + " help config : Configuration information.\n" + " help ports : Configuring ports.\n" + " help registers : Reading and setting port registers.\n" + " help filters : Filters configuration help.\n" + " help traffic_management : Traffic Management commands.\n" + " help devices : Device related cmds.\n" + " help all : All of the above sections.\n\n" + ); + +} + +cmdline_parse_token_string_t cmd_help_brief_help = + TOKEN_STRING_INITIALIZER(struct cmd_help_brief_result, help, "help"); + +cmdline_parse_inst_t cmd_help_brief = { + .f = cmd_help_brief_parsed, + .data = NULL, + .help_str = "help: Show help", + .tokens = { + (void *)&cmd_help_brief_help, + NULL, + }, +}; + +/* *** Help command with help sections. *** */ +struct cmd_help_long_result { + cmdline_fixed_string_t help; + cmdline_fixed_string_t section; +}; + +static void cmd_help_long_parsed(void *parsed_result, + struct cmdline *cl, + __rte_unused void *data) +{ + int show_all = 0; + struct cmd_help_long_result *res = parsed_result; + + if (!strcmp(res->section, "all")) + show_all = 1; + + if (show_all || !strcmp(res->section, "control")) { + + cmdline_printf( + cl, + "\n" + "Control forwarding:\n" + "-------------------\n\n" + + "start\n" + " Start packet forwarding with current configuration.\n\n" + + "start tx_first\n" + " Start packet forwarding with current config" + " after sending one burst of packets.\n\n" + + "stop\n" + " Stop packet forwarding, and display accumulated" + " statistics.\n\n" + + "quit\n" + " Quit to prompt.\n\n" + ); + } + + if (show_all || !strcmp(res->section, "display")) { + + cmdline_printf( + cl, + "\n" + "Display:\n" + "--------\n\n" + + "show port (info|stats|summary|xstats|fdir|stat_qmap|dcb_tc|cap) (port_id|all)\n" + " Display information for port_id, or all.\n\n" + + "show port X rss reta (size) (mask0,mask1,...)\n" + " Display the rss redirection table entry indicated" + " by masks on port X. size is used to indicate the" + " hardware supported reta size\n\n" + + "show port (port_id) rss-hash [key]\n" + " Display the RSS hash functions and RSS hash key of port\n\n" + + "clear port (info|stats|xstats|fdir|stat_qmap) (port_id|all)\n" + " Clear information for port_id, or all.\n\n" + + "show (rxq|txq) info (port_id) (queue_id)\n" + " Display information for configured RX/TX queue.\n\n" + + "show config (rxtx|cores|fwd|txpkts)\n" + " Display the given configuration.\n\n" + + "read rxd (port_id) (queue_id) (rxd_id)\n" + " Display an RX descriptor of a port RX queue.\n\n" + + "read txd (port_id) (queue_id) (txd_id)\n" + " Display a TX descriptor of a port TX queue.\n\n" + + "ddp get list (port_id)\n" + " Get ddp profile info list\n\n" + + "ddp get info (profile_path)\n" + " Get ddp profile information.\n\n" + + "show vf stats (port_id) (vf_id)\n" + " Display a VF's statistics.\n\n" + + "clear vf stats (port_id) (vf_id)\n" + " Reset a VF's statistics.\n\n" + + "show port (port_id) pctype mapping\n" + " Get flow ptype to pctype mapping on a port\n\n" + + "show port meter stats (port_id) (meter_id) (clear)\n" + " Get meter stats on a port\n\n" + + "show fwd stats all\n" + " Display statistics for all fwd engines.\n\n" + + "clear fwd stats all\n" + " Clear statistics for all fwd engines.\n\n" + + "show port (port_id) rx_offload capabilities\n" + " List all per queue and per port Rx offloading" + " capabilities of a port\n\n" + + "show port (port_id) rx_offload configuration\n" + " List port level and all queue level" + " Rx offloading configuration\n\n" + + "show port (port_id) tx_offload capabilities\n" + " List all per queue and per port" + " Tx offloading capabilities of a port\n\n" + + "show port (port_id) tx_offload configuration\n" + " List port level and all queue level" + " Tx offloading configuration\n\n" + + "show port (port_id) tx_metadata\n" + " Show Tx metadata value set" + " for a specific port\n\n" + + "show port (port_id) ptypes\n" + " Show port supported ptypes" + " for a specific port\n\n" + + "show device info (<identifier>|all)" + " Show general information about devices probed.\n\n" + + "show port (port_id) rxq|txq (queue_id) desc (desc_id) status" + " Show status of rx|tx descriptor.\n\n" + + "show port (port_id) macs|mcast_macs" + " Display list of mac addresses added to port.\n\n" + ); + } + + if (show_all || !strcmp(res->section, "config")) { + cmdline_printf( + cl, + "\n" + "Configuration:\n" + "--------------\n" + "Configuration changes only become active when" + " forwarding is started/restarted.\n\n" + + "set default\n" + " Reset forwarding to the default configuration.\n\n" + + "set verbose (level)\n" + " Set the debug verbosity level X.\n\n" + + "set log global|(type) (level)\n" + " Set the log level.\n\n" + + "set nbport (num)\n" + " Set number of ports.\n\n" + + "set nbcore (num)\n" + " Set number of cores.\n\n" + + "set coremask (mask)\n" + " Set the forwarding cores hexadecimal mask.\n\n" + + "set portmask (mask)\n" + " Set the forwarding ports hexadecimal mask.\n\n" + + "set burst (num)\n" + " Set number of packets per burst.\n\n" + + "set burst tx delay (microseconds) retry (num)\n" + " Set the transmit delay time and number of retries," + " effective when retry is enabled.\n\n" + + "set txpkts (x[,y]*)\n" + " Set the length of each segment of TXONLY" + " and optionally CSUM packets.\n\n" + + "set txsplit (off|on|rand)\n" + " Set the split policy for the TX packets." + " Right now only applicable for CSUM and TXONLY" + " modes\n\n" + + "set corelist (x[,y]*)\n" + " Set the list of forwarding cores.\n\n" + + "set portlist (x[,y]*)\n" + " Set the list of forwarding ports.\n\n" + + "set port setup on (iterator|event)\n" + " Select how attached port is retrieved for setup.\n\n" + + "set tx loopback (port_id) (on|off)\n" + " Enable or disable tx loopback.\n\n" + + "set all queues drop (port_id) (on|off)\n" + " Set drop enable bit for all queues.\n\n" + + "set vf split drop (port_id) (vf_id) (on|off)\n" + " Set split drop enable bit for a VF from the PF.\n\n" + + "set vf mac antispoof (port_id) (vf_id) (on|off).\n" + " Set MAC antispoof for a VF from the PF.\n\n" + + "set macsec offload (port_id) on encrypt (on|off) replay-protect (on|off)\n" + " Enable MACsec offload.\n\n" + + "set macsec offload (port_id) off\n" + " Disable MACsec offload.\n\n" + + "set macsec sc (tx|rx) (port_id) (mac) (pi)\n" + " Configure MACsec secure connection (SC).\n\n" + + "set macsec sa (tx|rx) (port_id) (idx) (an) (pn) (key)\n" + " Configure MACsec secure association (SA).\n\n" + + "set vf broadcast (port_id) (vf_id) (on|off)\n" + " Set VF broadcast for a VF from the PF.\n\n" + + "vlan set stripq (on|off) (port_id,queue_id)\n" + " Set the VLAN strip for a queue on a port.\n\n" + + "set vf vlan stripq (port_id) (vf_id) (on|off)\n" + " Set the VLAN strip for all queues in a pool for a VF from the PF.\n\n" + + "set vf vlan insert (port_id) (vf_id) (vlan_id)\n" + " Set VLAN insert for a VF from the PF.\n\n" + + "set vf vlan antispoof (port_id) (vf_id) (on|off)\n" + " Set VLAN antispoof for a VF from the PF.\n\n" + + "set vf vlan tag (port_id) (vf_id) (on|off)\n" + " Set VLAN tag for a VF from the PF.\n\n" + + "set vf tx max-bandwidth (port_id) (vf_id) (bandwidth)\n" + " Set a VF's max bandwidth(Mbps).\n\n" + + "set vf tc tx min-bandwidth (port_id) (vf_id) (bw1, bw2, ...)\n" + " Set all TCs' min bandwidth(%%) on a VF.\n\n" + + "set vf tc tx max-bandwidth (port_id) (vf_id) (tc_no) (bandwidth)\n" + " Set a TC's max bandwidth(Mbps) on a VF.\n\n" + + "set tx strict-link-priority (port_id) (tc_bitmap)\n" + " Set some TCs' strict link priority mode on a physical port.\n\n" + + "set tc tx min-bandwidth (port_id) (bw1, bw2, ...)\n" + " Set all TCs' min bandwidth(%%) for all PF and VFs.\n\n" + + "vlan set (strip|filter|qinq_strip|extend) (on|off) (port_id)\n" + " Set the VLAN strip or filter or qinq strip or extend\n\n" + + "vlan set (inner|outer) tpid (value) (port_id)\n" + " Set the VLAN TPID for Packet Filtering on" + " a port\n\n" + + "rx_vlan add (vlan_id|all) (port_id)\n" + " Add a vlan_id, or all identifiers, to the set" + " of VLAN identifiers filtered by port_id.\n\n" + + "rx_vlan rm (vlan_id|all) (port_id)\n" + " Remove a vlan_id, or all identifiers, from the set" + " of VLAN identifiers filtered by port_id.\n\n" + + "rx_vlan add (vlan_id) port (port_id) vf (vf_mask)\n" + " Add a vlan_id, to the set of VLAN identifiers" + "filtered for VF(s) from port_id.\n\n" + + "rx_vlan rm (vlan_id) port (port_id) vf (vf_mask)\n" + " Remove a vlan_id, to the set of VLAN identifiers" + "filtered for VF(s) from port_id.\n\n" + + "tunnel_filter add (port_id) (outer_mac) (inner_mac) (ip_addr) " + "(inner_vlan) (vxlan|nvgre|ipingre|vxlan-gpe) (imac-ivlan|imac-ivlan-tenid|" + "imac-tenid|imac|omac-imac-tenid|oip|iip) (tenant_id) (queue_id)\n" + " add a tunnel filter of a port.\n\n" + + "tunnel_filter rm (port_id) (outer_mac) (inner_mac) (ip_addr) " + "(inner_vlan) (vxlan|nvgre|ipingre|vxlan-gpe) (imac-ivlan|imac-ivlan-tenid|" + "imac-tenid|imac|omac-imac-tenid|oip|iip) (tenant_id) (queue_id)\n" + " remove a tunnel filter of a port.\n\n" + + "rx_vxlan_port add (udp_port) (port_id)\n" + " Add an UDP port for VXLAN packet filter on a port\n\n" + + "rx_vxlan_port rm (udp_port) (port_id)\n" + " Remove an UDP port for VXLAN packet filter on a port\n\n" + + "tx_vlan set (port_id) vlan_id[, vlan_id_outer]\n" + " Set hardware insertion of VLAN IDs (single or double VLAN " + "depends on the number of VLAN IDs) in packets sent on a port.\n\n" + + "tx_vlan set pvid port_id vlan_id (on|off)\n" + " Set port based TX VLAN insertion.\n\n" + + "tx_vlan reset (port_id)\n" + " Disable hardware insertion of a VLAN header in" + " packets sent on a port.\n\n" + + "csum set (ip|udp|tcp|sctp|outer-ip|outer-udp) (hw|sw) (port_id)\n" + " Select hardware or software calculation of the" + " checksum when transmitting a packet using the" + " csum forward engine.\n" + " ip|udp|tcp|sctp always concern the inner layer.\n" + " outer-ip concerns the outer IP layer in" + " outer-udp concerns the outer UDP layer in" + " case the packet is recognized as a tunnel packet by" + " the forward engine (vxlan, gre and ipip are supported)\n" + " Please check the NIC datasheet for HW limits.\n\n" + + "csum parse-tunnel (on|off) (tx_port_id)\n" + " If disabled, treat tunnel packets as non-tunneled" + " packets (treat inner headers as payload). The port\n" + " argument is the port used for TX in csum forward" + " engine.\n\n" + + "csum show (port_id)\n" + " Display tx checksum offload configuration\n\n" + + "tso set (segsize) (portid)\n" + " Enable TCP Segmentation Offload in csum forward" + " engine.\n" + " Please check the NIC datasheet for HW limits.\n\n" + + "tso show (portid)" + " Display the status of TCP Segmentation Offload.\n\n" + + "set port (port_id) gro on|off\n" + " Enable or disable Generic Receive Offload in" + " csum forwarding engine.\n\n" + + "show port (port_id) gro\n" + " Display GRO configuration.\n\n" + + "set gro flush (cycles)\n" + " Set the cycle to flush GROed packets from" + " reassembly tables.\n\n" + + "set port (port_id) gso (on|off)" + " Enable or disable Generic Segmentation Offload in" + " csum forwarding engine.\n\n" + + "set gso segsz (length)\n" + " Set max packet length for output GSO segments," + " including packet header and payload.\n\n" + + "show port (port_id) gso\n" + " Show GSO configuration.\n\n" + + "set fwd (%s)\n" + " Set packet forwarding mode.\n\n" + + "mac_addr add (port_id) (XX:XX:XX:XX:XX:XX)\n" + " Add a MAC address on port_id.\n\n" + + "mac_addr remove (port_id) (XX:XX:XX:XX:XX:XX)\n" + " Remove a MAC address from port_id.\n\n" + + "mac_addr set (port_id) (XX:XX:XX:XX:XX:XX)\n" + " Set the default MAC address for port_id.\n\n" + + "mac_addr add port (port_id) vf (vf_id) (mac_address)\n" + " Add a MAC address for a VF on the port.\n\n" + + "set vf mac addr (port_id) (vf_id) (XX:XX:XX:XX:XX:XX)\n" + " Set the MAC address for a VF from the PF.\n\n" + + "set eth-peer (port_id) (peer_addr)\n" + " set the peer address for certain port.\n\n" + + "set port (port_id) uta (mac_address|all) (on|off)\n" + " Add/Remove a or all unicast hash filter(s)" + "from port X.\n\n" + + "set promisc (port_id|all) (on|off)\n" + " Set the promiscuous mode on port_id, or all.\n\n" + + "set allmulti (port_id|all) (on|off)\n" + " Set the allmulti mode on port_id, or all.\n\n" + + "set vf promisc (port_id) (vf_id) (on|off)\n" + " Set unicast promiscuous mode for a VF from the PF.\n\n" + + "set vf allmulti (port_id) (vf_id) (on|off)\n" + " Set multicast promiscuous mode for a VF from the PF.\n\n" + + "set flow_ctrl rx (on|off) tx (on|off) (high_water)" + " (low_water) (pause_time) (send_xon) mac_ctrl_frame_fwd" + " (on|off) autoneg (on|off) (port_id)\n" + "set flow_ctrl rx (on|off) (portid)\n" + "set flow_ctrl tx (on|off) (portid)\n" + "set flow_ctrl high_water (high_water) (portid)\n" + "set flow_ctrl low_water (low_water) (portid)\n" + "set flow_ctrl pause_time (pause_time) (portid)\n" + "set flow_ctrl send_xon (send_xon) (portid)\n" + "set flow_ctrl mac_ctrl_frame_fwd (on|off) (portid)\n" + "set flow_ctrl autoneg (on|off) (port_id)\n" + " Set the link flow control parameter on a port.\n\n" + + "set pfc_ctrl rx (on|off) tx (on|off) (high_water)" + " (low_water) (pause_time) (priority) (port_id)\n" + " Set the priority flow control parameter on a" + " port.\n\n" + + "set stat_qmap (tx|rx) (port_id) (queue_id) (qmapping)\n" + " Set statistics mapping (qmapping 0..15) for RX/TX" + " queue on port.\n" + " e.g., 'set stat_qmap rx 0 2 5' sets rx queue 2" + " on port 0 to mapping 5.\n\n" + + "set xstats-hide-zero on|off\n" + " Set the option to hide the zero values" + " for xstats display.\n" + + "set port (port_id) vf (vf_id) rx|tx on|off\n" + " Enable/Disable a VF receive/tranmit from a port\n\n" + + "set port (port_id) vf (vf_id) (mac_addr)" + " (exact-mac#exact-mac-vlan#hashmac|hashmac-vlan) on|off\n" + " Add/Remove unicast or multicast MAC addr filter" + " for a VF.\n\n" + + "set port (port_id) vf (vf_id) rxmode (AUPE|ROPE|BAM" + "|MPE) (on|off)\n" + " AUPE:accepts untagged VLAN;" + "ROPE:accept unicast hash\n\n" + " BAM:accepts broadcast packets;" + "MPE:accepts all multicast packets\n\n" + " Enable/Disable a VF receive mode of a port\n\n" + + "set port (port_id) queue (queue_id) rate (rate_num)\n" + " Set rate limit for a queue of a port\n\n" + + "set port (port_id) vf (vf_id) rate (rate_num) " + "queue_mask (queue_mask_value)\n" + " Set rate limit for queues in VF of a port\n\n" + + "set port (port_id) mirror-rule (rule_id)" + " (pool-mirror-up|pool-mirror-down|vlan-mirror)" + " (poolmask|vlanid[,vlanid]*) dst-pool (pool_id) (on|off)\n" + " Set pool or vlan type mirror rule on a port.\n" + " e.g., 'set port 0 mirror-rule 0 vlan-mirror 0,1" + " dst-pool 0 on' enable mirror traffic with vlan 0,1" + " to pool 0.\n\n" + + "set port (port_id) mirror-rule (rule_id)" + " (uplink-mirror|downlink-mirror) dst-pool" + " (pool_id) (on|off)\n" + " Set uplink or downlink type mirror rule on a port.\n" + " e.g., 'set port 0 mirror-rule 0 uplink-mirror dst-pool" + " 0 on' enable mirror income traffic to pool 0.\n\n" + + "reset port (port_id) mirror-rule (rule_id)\n" + " Reset a mirror rule.\n\n" + + "set flush_rx (on|off)\n" + " Flush (default) or don't flush RX streams before" + " forwarding. Mainly used with PCAP drivers.\n\n" + + "set bypass mode (normal|bypass|isolate) (port_id)\n" + " Set the bypass mode for the lowest port on bypass enabled" + " NIC.\n\n" + + "set bypass event (timeout|os_on|os_off|power_on|power_off) " + "mode (normal|bypass|isolate) (port_id)\n" + " Set the event required to initiate specified bypass mode for" + " the lowest port on a bypass enabled NIC where:\n" + " timeout = enable bypass after watchdog timeout.\n" + " os_on = enable bypass when OS/board is powered on.\n" + " os_off = enable bypass when OS/board is powered off.\n" + " power_on = enable bypass when power supply is turned on.\n" + " power_off = enable bypass when power supply is turned off." + "\n\n" + + "set bypass timeout (0|1.5|2|3|4|8|16|32)\n" + " Set the bypass watchdog timeout to 'n' seconds" + " where 0 = instant.\n\n" + + "show bypass config (port_id)\n" + " Show the bypass configuration for a bypass enabled NIC" + " using the lowest port on the NIC.\n\n" + +#ifdef RTE_LIBRTE_PMD_BOND + "create bonded device (mode) (socket)\n" + " Create a new bonded device with specific bonding mode and socket.\n\n" + + "add bonding slave (slave_id) (port_id)\n" + " Add a slave device to a bonded device.\n\n" + + "remove bonding slave (slave_id) (port_id)\n" + " Remove a slave device from a bonded device.\n\n" + + "set bonding mode (value) (port_id)\n" + " Set the bonding mode on a bonded device.\n\n" + + "set bonding primary (slave_id) (port_id)\n" + " Set the primary slave for a bonded device.\n\n" + + "show bonding config (port_id)\n" + " Show the bonding config for port_id.\n\n" + + "set bonding mac_addr (port_id) (address)\n" + " Set the MAC address of a bonded device.\n\n" + + "set bonding mode IEEE802.3AD aggregator policy (port_id) (agg_name)" + " Set Aggregation mode for IEEE802.3AD (mode 4)" + + "set bonding xmit_balance_policy (port_id) (l2|l23|l34)\n" + " Set the transmit balance policy for bonded device running in balance mode.\n\n" + + "set bonding mon_period (port_id) (value)\n" + " Set the bonding link status monitoring polling period in ms.\n\n" + + "set bonding lacp dedicated_queues <port_id> (enable|disable)\n" + " Enable/disable dedicated queues for LACP control traffic.\n\n" + +#endif + "set link-up port (port_id)\n" + " Set link up for a port.\n\n" + + "set link-down port (port_id)\n" + " Set link down for a port.\n\n" + + "E-tag set insertion on port-tag-id (value)" + " port (port_id) vf (vf_id)\n" + " Enable E-tag insertion for a VF on a port\n\n" + + "E-tag set insertion off port (port_id) vf (vf_id)\n" + " Disable E-tag insertion for a VF on a port\n\n" + + "E-tag set stripping (on|off) port (port_id)\n" + " Enable/disable E-tag stripping on a port\n\n" + + "E-tag set forwarding (on|off) port (port_id)\n" + " Enable/disable E-tag based forwarding" + " on a port\n\n" + + "E-tag set filter add e-tag-id (value) dst-pool" + " (pool_id) port (port_id)\n" + " Add an E-tag forwarding filter on a port\n\n" + + "E-tag set filter del e-tag-id (value) port (port_id)\n" + " Delete an E-tag forwarding filter on a port\n\n" + + "ddp add (port_id) (profile_path[,backup_profile_path])\n" + " Load a profile package on a port\n\n" + + "ddp del (port_id) (backup_profile_path)\n" + " Delete a profile package from a port\n\n" + + "ptype mapping get (port_id) (valid_only)\n" + " Get ptype mapping on a port\n\n" + + "ptype mapping replace (port_id) (target) (mask) (pky_type)\n" + " Replace target with the pkt_type in ptype mapping\n\n" + + "ptype mapping reset (port_id)\n" + " Reset ptype mapping on a port\n\n" + + "ptype mapping update (port_id) (hw_ptype) (sw_ptype)\n" + " Update a ptype mapping item on a port\n\n" + + "set port (port_id) ptype_mask (ptype_mask)\n" + " set packet types classification for a specific port\n\n" + + "set port (port_id) queue-region region_id (value) " + "queue_start_index (value) queue_num (value)\n" + " Set a queue region on a port\n\n" + + "set port (port_id) queue-region region_id (value) " + "flowtype (value)\n" + " Set a flowtype region index on a port\n\n" + + "set port (port_id) queue-region UP (value) region_id (value)\n" + " Set the mapping of User Priority to " + "queue region on a port\n\n" + + "set port (port_id) queue-region flush (on|off)\n" + " flush all queue region related configuration\n\n" + + "show port meter cap (port_id)\n" + " Show port meter capability information\n\n" + + "add port meter profile srtcm_rfc2697 (port_id) (profile_id) (cir) (cbs) (ebs)\n" + " meter profile add - srtcm rfc 2697\n\n" + + "add port meter profile trtcm_rfc2698 (port_id) (profile_id) (cir) (pir) (cbs) (pbs)\n" + " meter profile add - trtcm rfc 2698\n\n" + + "add port meter profile trtcm_rfc4115 (port_id) (profile_id) (cir) (eir) (cbs) (ebs)\n" + " meter profile add - trtcm rfc 4115\n\n" + + "del port meter profile (port_id) (profile_id)\n" + " meter profile delete\n\n" + + "create port meter (port_id) (mtr_id) (profile_id) (meter_enable)\n" + "(g_action) (y_action) (r_action) (stats_mask) (shared)\n" + "(use_pre_meter_color) [(dscp_tbl_entry0) (dscp_tbl_entry1)...\n" + "(dscp_tbl_entry63)]\n" + " meter create\n\n" + + "enable port meter (port_id) (mtr_id)\n" + " meter enable\n\n" + + "disable port meter (port_id) (mtr_id)\n" + " meter disable\n\n" + + "del port meter (port_id) (mtr_id)\n" + " meter delete\n\n" + + "set port meter profile (port_id) (mtr_id) (profile_id)\n" + " meter update meter profile\n\n" + + "set port meter dscp table (port_id) (mtr_id) [(dscp_tbl_entry0)\n" + "(dscp_tbl_entry1)...(dscp_tbl_entry63)]\n" + " update meter dscp table entries\n\n" + + "set port meter policer action (port_id) (mtr_id) (action_mask)\n" + "(action0) [(action1) (action2)]\n" + " meter update policer action\n\n" + + "set port meter stats mask (port_id) (mtr_id) (stats_mask)\n" + " meter update stats\n\n" + + "show port (port_id) queue-region\n" + " show all queue region related configuration info\n\n" + + , list_pkt_forwarding_modes() + ); + } + + if (show_all || !strcmp(res->section, "ports")) { + + cmdline_printf( + cl, + "\n" + "Port Operations:\n" + "----------------\n\n" + + "port start (port_id|all)\n" + " Start all ports or port_id.\n\n" + + "port stop (port_id|all)\n" + " Stop all ports or port_id.\n\n" + + "port close (port_id|all)\n" + " Close all ports or port_id.\n\n" + + "port reset (port_id|all)\n" + " Reset all ports or port_id.\n\n" + + "port attach (ident)\n" + " Attach physical or virtual dev by pci address or virtual device name\n\n" + + "port detach (port_id)\n" + " Detach physical or virtual dev by port_id\n\n" + + "port config (port_id|all)" + " speed (10|100|1000|10000|25000|40000|50000|100000|200000|auto)" + " duplex (half|full|auto)\n" + " Set speed and duplex for all ports or port_id\n\n" + + "port config (port_id|all) loopback (mode)\n" + " Set loopback mode for all ports or port_id\n\n" + + "port config all (rxq|txq|rxd|txd) (value)\n" + " Set number for rxq/txq/rxd/txd.\n\n" + + "port config all max-pkt-len (value)\n" + " Set the max packet length.\n\n" + + "port config all max-lro-pkt-size (value)\n" + " Set the max LRO aggregated packet size.\n\n" + + "port config all drop-en (on|off)\n" + " Enable or disable packet drop on all RX queues of all ports when no " + "receive buffers available.\n\n" + + "port config all rss (all|default|ip|tcp|udp|sctp|" + "ether|port|vxlan|geneve|nvgre|vxlan-gpe|none|<flowtype_id>)\n" + " Set the RSS mode.\n\n" + + "port config port-id rss reta (hash,queue)[,(hash,queue)]\n" + " Set the RSS redirection table.\n\n" + + "port config (port_id) dcb vt (on|off) (traffic_class)" + " pfc (on|off)\n" + " Set the DCB mode.\n\n" + + "port config all burst (value)\n" + " Set the number of packets per burst.\n\n" + + "port config all (txpt|txht|txwt|rxpt|rxht|rxwt)" + " (value)\n" + " Set the ring prefetch/host/writeback threshold" + " for tx/rx queue.\n\n" + + "port config all (txfreet|txrst|rxfreet) (value)\n" + " Set free threshold for rx/tx, or set" + " tx rs bit threshold.\n\n" + "port config mtu X value\n" + " Set the MTU of port X to a given value\n\n" + + "port config (port_id) (rxq|txq) (queue_id) ring_size (value)\n" + " Set a rx/tx queue's ring size configuration, the new" + " value will take effect after command that (re-)start the port" + " or command that setup the specific queue\n\n" + + "port (port_id) (rxq|txq) (queue_id) (start|stop)\n" + " Start/stop a rx/tx queue of port X. Only take effect" + " when port X is started\n\n" + + "port (port_id) (rxq|txq) (queue_id) deferred_start (on|off)\n" + " Switch on/off a deferred start of port X rx/tx queue. Only" + " take effect when port X is stopped.\n\n" + + "port (port_id) (rxq|txq) (queue_id) setup\n" + " Setup a rx/tx queue of port X.\n\n" + + "port config (port_id|all) l2-tunnel E-tag ether-type" + " (value)\n" + " Set the value of E-tag ether-type.\n\n" + + "port config (port_id|all) l2-tunnel E-tag" + " (enable|disable)\n" + " Enable/disable the E-tag support.\n\n" + + "port config (port_id) pctype mapping reset\n" + " Reset flow type to pctype mapping on a port\n\n" + + "port config (port_id) pctype mapping update" + " (pctype_id_0[,pctype_id_1]*) (flow_type_id)\n" + " Update a flow type to pctype mapping item on a port\n\n" + + "port config (port_id) pctype (pctype_id) hash_inset|" + "fdir_inset|fdir_flx_inset get|set|clear field\n" + " (field_idx)\n" + " Configure RSS|FDIR|FDIR_FLX input set for some pctype\n\n" + + "port config (port_id) pctype (pctype_id) hash_inset|" + "fdir_inset|fdir_flx_inset clear all" + " Clear RSS|FDIR|FDIR_FLX input set completely for some pctype\n\n" + + "port config (port_id) udp_tunnel_port add|rm vxlan|geneve (udp_port)\n\n" + " Add/remove UDP tunnel port for tunneling offload\n\n" + + "port config <port_id> rx_offload vlan_strip|" + "ipv4_cksum|udp_cksum|tcp_cksum|tcp_lro|qinq_strip|" + "outer_ipv4_cksum|macsec_strip|header_split|" + "vlan_filter|vlan_extend|jumbo_frame|" + "scatter|timestamp|security|keep_crc on|off\n" + " Enable or disable a per port Rx offloading" + " on all Rx queues of a port\n\n" + + "port (port_id) rxq (queue_id) rx_offload vlan_strip|" + "ipv4_cksum|udp_cksum|tcp_cksum|tcp_lro|qinq_strip|" + "outer_ipv4_cksum|macsec_strip|header_split|" + "vlan_filter|vlan_extend|jumbo_frame|" + "scatter|timestamp|security|keep_crc on|off\n" + " Enable or disable a per queue Rx offloading" + " only on a specific Rx queue\n\n" + + "port config (port_id) tx_offload vlan_insert|" + "ipv4_cksum|udp_cksum|tcp_cksum|sctp_cksum|tcp_tso|" + "udp_tso|outer_ipv4_cksum|qinq_insert|vxlan_tnl_tso|" + "gre_tnl_tso|ipip_tnl_tso|geneve_tnl_tso|" + "macsec_insert|mt_lockfree|multi_segs|mbuf_fast_free|" + "security on|off\n" + " Enable or disable a per port Tx offloading" + " on all Tx queues of a port\n\n" + + "port (port_id) txq (queue_id) tx_offload vlan_insert|" + "ipv4_cksum|udp_cksum|tcp_cksum|sctp_cksum|tcp_tso|" + "udp_tso|outer_ipv4_cksum|qinq_insert|vxlan_tnl_tso|" + "gre_tnl_tso|ipip_tnl_tso|geneve_tnl_tso|macsec_insert" + "|mt_lockfree|multi_segs|mbuf_fast_free|security" + " on|off\n" + " Enable or disable a per queue Tx offloading" + " only on a specific Tx queue\n\n" + + "bpf-load rx|tx (port) (queue) (J|M|B) (file_name)\n" + " Load an eBPF program as a callback" + " for particular RX/TX queue\n\n" + + "bpf-unload rx|tx (port) (queue)\n" + " Unload previously loaded eBPF program" + " for particular RX/TX queue\n\n" + + "port config (port_id) tx_metadata (value)\n" + " Set Tx metadata value per port. Testpmd will add this value" + " to any Tx packet sent from this port\n\n" + + "port config (port_id) dynf (name) set|clear\n" + " Register a dynf and Set/clear this flag on Tx. " + "Testpmd will set this value to any Tx packet " + "sent from this port\n\n" + ); + } + + if (show_all || !strcmp(res->section, "registers")) { + + cmdline_printf( + cl, + "\n" + "Registers:\n" + "----------\n\n" + + "read reg (port_id) (address)\n" + " Display value of a port register.\n\n" + + "read regfield (port_id) (address) (bit_x) (bit_y)\n" + " Display a port register bit field.\n\n" + + "read regbit (port_id) (address) (bit_x)\n" + " Display a single port register bit.\n\n" + + "write reg (port_id) (address) (value)\n" + " Set value of a port register.\n\n" + + "write regfield (port_id) (address) (bit_x) (bit_y)" + " (value)\n" + " Set bit field of a port register.\n\n" + + "write regbit (port_id) (address) (bit_x) (value)\n" + " Set single bit value of a port register.\n\n" + ); + } + if (show_all || !strcmp(res->section, "filters")) { + + cmdline_printf( + cl, + "\n" + "filters:\n" + "--------\n\n" + + "ethertype_filter (port_id) (add|del)" + " (mac_addr|mac_ignr) (mac_address) ethertype" + " (ether_type) (drop|fwd) queue (queue_id)\n" + " Add/Del an ethertype filter.\n\n" + + "2tuple_filter (port_id) (add|del)" + " dst_port (dst_port_value) protocol (protocol_value)" + " mask (mask_value) tcp_flags (tcp_flags_value)" + " priority (prio_value) queue (queue_id)\n" + " Add/Del a 2tuple filter.\n\n" + + "5tuple_filter (port_id) (add|del)" + " dst_ip (dst_address) src_ip (src_address)" + " dst_port (dst_port_value) src_port (src_port_value)" + " protocol (protocol_value)" + " mask (mask_value) tcp_flags (tcp_flags_value)" + " priority (prio_value) queue (queue_id)\n" + " Add/Del a 5tuple filter.\n\n" + + "syn_filter (port_id) (add|del) priority (high|low) queue (queue_id)" + " Add/Del syn filter.\n\n" + + "flex_filter (port_id) (add|del) len (len_value)" + " bytes (bytes_value) mask (mask_value)" + " priority (prio_value) queue (queue_id)\n" + " Add/Del a flex filter.\n\n" + + "flow_director_filter (port_id) mode IP (add|del|update)" + " flow (ipv4-other|ipv4-frag|ipv6-other|ipv6-frag)" + " src (src_ip_address) dst (dst_ip_address)" + " tos (tos_value) proto (proto_value) ttl (ttl_value)" + " vlan (vlan_value) flexbytes (flexbytes_value)" + " (drop|fwd) pf|vf(vf_id) queue (queue_id)" + " fd_id (fd_id_value)\n" + " Add/Del an IP type flow director filter.\n\n" + + "flow_director_filter (port_id) mode IP (add|del|update)" + " flow (ipv4-tcp|ipv4-udp|ipv6-tcp|ipv6-udp)" + " src (src_ip_address) (src_port)" + " dst (dst_ip_address) (dst_port)" + " tos (tos_value) ttl (ttl_value)" + " vlan (vlan_value) flexbytes (flexbytes_value)" + " (drop|fwd) pf|vf(vf_id) queue (queue_id)" + " fd_id (fd_id_value)\n" + " Add/Del an UDP/TCP type flow director filter.\n\n" + + "flow_director_filter (port_id) mode IP (add|del|update)" + " flow (ipv4-sctp|ipv6-sctp)" + " src (src_ip_address) (src_port)" + " dst (dst_ip_address) (dst_port)" + " tag (verification_tag) " + " tos (tos_value) ttl (ttl_value)" + " vlan (vlan_value)" + " flexbytes (flexbytes_value) (drop|fwd)" + " pf|vf(vf_id) queue (queue_id) fd_id (fd_id_value)\n" + " Add/Del a SCTP type flow director filter.\n\n" + + "flow_director_filter (port_id) mode IP (add|del|update)" + " flow l2_payload ether (ethertype)" + " flexbytes (flexbytes_value) (drop|fwd)" + " pf|vf(vf_id) queue (queue_id) fd_id (fd_id_value)\n" + " Add/Del a l2 payload type flow director filter.\n\n" + + "flow_director_filter (port_id) mode MAC-VLAN (add|del|update)" + " mac (mac_address) vlan (vlan_value)" + " flexbytes (flexbytes_value) (drop|fwd)" + " queue (queue_id) fd_id (fd_id_value)\n" + " Add/Del a MAC-VLAN flow director filter.\n\n" + + "flow_director_filter (port_id) mode Tunnel (add|del|update)" + " mac (mac_address) vlan (vlan_value)" + " tunnel (NVGRE|VxLAN) tunnel-id (tunnel_id_value)" + " flexbytes (flexbytes_value) (drop|fwd)" + " queue (queue_id) fd_id (fd_id_value)\n" + " Add/Del a Tunnel flow director filter.\n\n" + + "flow_director_filter (port_id) mode raw (add|del|update)" + " flow (flow_id) (drop|fwd) queue (queue_id)" + " fd_id (fd_id_value) packet (packet file name)\n" + " Add/Del a raw type flow director filter.\n\n" + + "flush_flow_director (port_id)\n" + " Flush all flow director entries of a device.\n\n" + + "flow_director_mask (port_id) mode IP vlan (vlan_value)" + " src_mask (ipv4_src) (ipv6_src) (src_port)" + " dst_mask (ipv4_dst) (ipv6_dst) (dst_port)\n" + " Set flow director IP mask.\n\n" + + "flow_director_mask (port_id) mode MAC-VLAN" + " vlan (vlan_value)\n" + " Set flow director MAC-VLAN mask.\n\n" + + "flow_director_mask (port_id) mode Tunnel" + " vlan (vlan_value) mac (mac_value)" + " tunnel-type (tunnel_type_value)" + " tunnel-id (tunnel_id_value)\n" + " Set flow director Tunnel mask.\n\n" + + "flow_director_flex_mask (port_id)" + " flow (none|ipv4-other|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|" + "ipv6-other|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|l2_payload|all)" + " (mask)\n" + " Configure mask of flex payload.\n\n" + + "flow_director_flex_payload (port_id)" + " (raw|l2|l3|l4) (config)\n" + " Configure flex payload selection.\n\n" + + "get_sym_hash_ena_per_port (port_id)\n" + " get symmetric hash enable configuration per port.\n\n" + + "set_sym_hash_ena_per_port (port_id) (enable|disable)\n" + " set symmetric hash enable configuration per port" + " to enable or disable.\n\n" + + "get_hash_global_config (port_id)\n" + " Get the global configurations of hash filters.\n\n" + + "set_hash_global_config (port_id) (toeplitz|simple_xor|symmetric_toeplitz|default)" + " (ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|" + "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload)" + " (enable|disable)\n" + " Set the global configurations of hash filters.\n\n" + + "set_hash_input_set (port_id) (ipv4|ipv4-frag|" + "ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|" + "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|" + "l2_payload|<flowtype_id>) (ovlan|ivlan|src-ipv4|dst-ipv4|" + "src-ipv6|dst-ipv6|ipv4-tos|ipv4-proto|ipv6-tc|" + "ipv6-next-header|udp-src-port|udp-dst-port|" + "tcp-src-port|tcp-dst-port|sctp-src-port|" + "sctp-dst-port|sctp-veri-tag|udp-key|gre-key|fld-1st|" + "fld-2nd|fld-3rd|fld-4th|fld-5th|fld-6th|fld-7th|" + "fld-8th|none) (select|add)\n" + " Set the input set for hash.\n\n" + + "set_fdir_input_set (port_id) " + "(ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|" + "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|" + "l2_payload) (ivlan|ethertype|src-ipv4|dst-ipv4|src-ipv6|" + "dst-ipv6|ipv4-tos|ipv4-proto|ipv4-ttl|ipv6-tc|" + "ipv6-next-header|ipv6-hop-limits|udp-src-port|" + "udp-dst-port|tcp-src-port|tcp-dst-port|" + "sctp-src-port|sctp-dst-port|sctp-veri-tag|none)" + " (select|add)\n" + " Set the input set for FDir.\n\n" + + "flow validate {port_id}" + " [group {group_id}] [priority {level}]" + " [ingress] [egress]" + " pattern {item} [/ {item} [...]] / end" + " actions {action} [/ {action} [...]] / end\n" + " Check whether a flow rule can be created.\n\n" + + "flow create {port_id}" + " [group {group_id}] [priority {level}]" + " [ingress] [egress]" + " pattern {item} [/ {item} [...]] / end" + " actions {action} [/ {action} [...]] / end\n" + " Create a flow rule.\n\n" + + "flow destroy {port_id} rule {rule_id} [...]\n" + " Destroy specific flow rules.\n\n" + + "flow flush {port_id}\n" + " Destroy all flow rules.\n\n" + + "flow query {port_id} {rule_id} {action}\n" + " Query an existing flow rule.\n\n" + + "flow list {port_id} [group {group_id}] [...]\n" + " List existing flow rules sorted by priority," + " filtered by group identifiers.\n\n" + + "flow isolate {port_id} {boolean}\n" + " Restrict ingress traffic to the defined" + " flow rules\n\n" + + "flow aged {port_id} [destroy]\n" + " List and destroy aged flows" + " flow rules\n\n" + + "set vxlan ip-version (ipv4|ipv6) vni (vni) udp-src" + " (udp-src) udp-dst (udp-dst) ip-src (ip-src) ip-dst" + " (ip-dst) eth-src (eth-src) eth-dst (eth-dst)\n" + " Configure the VXLAN encapsulation for flows.\n\n" + + "set vxlan-with-vlan ip-version (ipv4|ipv6) vni (vni)" + " udp-src (udp-src) udp-dst (udp-dst) ip-src (ip-src)" + " ip-dst (ip-dst) vlan-tci (vlan-tci) eth-src (eth-src)" + " eth-dst (eth-dst)\n" + " Configure the VXLAN encapsulation for flows.\n\n" + + "set vxlan-tos-ttl ip-version (ipv4|ipv6) vni (vni) udp-src" + " (udp-src) udp-dst (udp-dst) ip-tos (ip-tos) ip-ttl (ip-ttl)" + " ip-src (ip-src) ip-dst (ip-dst) eth-src (eth-src)" + " eth-dst (eth-dst)\n" + " Configure the VXLAN encapsulation for flows.\n\n" + + "set nvgre ip-version (ipv4|ipv6) tni (tni) ip-src" + " (ip-src) ip-dst (ip-dst) eth-src (eth-src) eth-dst" + " (eth-dst)\n" + " Configure the NVGRE encapsulation for flows.\n\n" + + "set nvgre-with-vlan ip-version (ipv4|ipv6) tni (tni)" + " ip-src (ip-src) ip-dst (ip-dst) vlan-tci (vlan-tci)" + " eth-src (eth-src) eth-dst (eth-dst)\n" + " Configure the NVGRE encapsulation for flows.\n\n" + + "set raw_encap {flow items}\n" + " Configure the encapsulation with raw data.\n\n" + + "set raw_decap {flow items}\n" + " Configure the decapsulation with raw data.\n\n" + + ); + } + + if (show_all || !strcmp(res->section, "traffic_management")) { + cmdline_printf( + cl, + "\n" + "Traffic Management:\n" + "--------------\n" + "show port tm cap (port_id)\n" + " Display the port TM capability.\n\n" + + "show port tm level cap (port_id) (level_id)\n" + " Display the port TM hierarchical level capability.\n\n" + + "show port tm node cap (port_id) (node_id)\n" + " Display the port TM node capability.\n\n" + + "show port tm node type (port_id) (node_id)\n" + " Display the port TM node type.\n\n" + + "show port tm node stats (port_id) (node_id) (clear)\n" + " Display the port TM node stats.\n\n" + +#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED + "set port tm hierarchy default (port_id)\n" + " Set default traffic Management hierarchy on a port\n\n" +#endif + + "add port tm node shaper profile (port_id) (shaper_profile_id)" + " (cmit_tb_rate) (cmit_tb_size) (peak_tb_rate) (peak_tb_size)" + " (packet_length_adjust)\n" + " Add port tm node private shaper profile.\n\n" + + "del port tm node shaper profile (port_id) (shaper_profile_id)\n" + " Delete port tm node private shaper profile.\n\n" + + "add port tm node shared shaper (port_id) (shared_shaper_id)" + " (shaper_profile_id)\n" + " Add/update port tm node shared shaper.\n\n" + + "del port tm node shared shaper (port_id) (shared_shaper_id)\n" + " Delete port tm node shared shaper.\n\n" + + "set port tm node shaper profile (port_id) (node_id)" + " (shaper_profile_id)\n" + " Set port tm node shaper profile.\n\n" + + "add port tm node wred profile (port_id) (wred_profile_id)" + " (color_g) (min_th_g) (max_th_g) (maxp_inv_g) (wq_log2_g)" + " (color_y) (min_th_y) (max_th_y) (maxp_inv_y) (wq_log2_y)" + " (color_r) (min_th_r) (max_th_r) (maxp_inv_r) (wq_log2_r)\n" + " Add port tm node wred profile.\n\n" + + "del port tm node wred profile (port_id) (wred_profile_id)\n" + " Delete port tm node wred profile.\n\n" + + "add port tm nonleaf node (port_id) (node_id) (parent_node_id)" + " (priority) (weight) (level_id) (shaper_profile_id)" + " (n_sp_priorities) (stats_mask) (n_shared_shapers)" + " [(shared_shaper_id_0) (shared_shaper_id_1)...]\n" + " Add port tm nonleaf node.\n\n" + + "add port tm leaf node (port_id) (node_id) (parent_node_id)" + " (priority) (weight) (level_id) (shaper_profile_id)" + " (cman_mode) (wred_profile_id) (stats_mask) (n_shared_shapers)" + " [(shared_shaper_id_0) (shared_shaper_id_1)...]\n" + " Add port tm leaf node.\n\n" + + "del port tm node (port_id) (node_id)\n" + " Delete port tm node.\n\n" + + "set port tm node parent (port_id) (node_id) (parent_node_id)" + " (priority) (weight)\n" + " Set port tm node parent.\n\n" + + "suspend port tm node (port_id) (node_id)" + " Suspend tm node.\n\n" + + "resume port tm node (port_id) (node_id)" + " Resume tm node.\n\n" + + "port tm hierarchy commit (port_id) (clean_on_fail)\n" + " Commit tm hierarchy.\n\n" + + "set port tm mark ip_ecn (port) (green) (yellow)" + " (red)\n" + " Enables/Disables the traffic management marking" + " for IP ECN (Explicit Congestion Notification)" + " packets on a given port\n\n" + + "set port tm mark ip_dscp (port) (green) (yellow)" + " (red)\n" + " Enables/Disables the traffic management marking" + " on the port for IP dscp packets\n\n" + + "set port tm mark vlan_dei (port) (green) (yellow)" + " (red)\n" + " Enables/Disables the traffic management marking" + " on the port for VLAN packets with DEI enabled\n\n" + ); + } + + if (show_all || !strcmp(res->section, "devices")) { + cmdline_printf( + cl, + "\n" + "Device Operations:\n" + "--------------\n" + "device detach (identifier)\n" + " Detach device by identifier.\n\n" + ); + } + +} + +cmdline_parse_token_string_t cmd_help_long_help = + TOKEN_STRING_INITIALIZER(struct cmd_help_long_result, help, "help"); + +cmdline_parse_token_string_t cmd_help_long_section = + TOKEN_STRING_INITIALIZER(struct cmd_help_long_result, section, + "all#control#display#config#" + "ports#registers#filters#traffic_management#devices"); + +cmdline_parse_inst_t cmd_help_long = { + .f = cmd_help_long_parsed, + .data = NULL, + .help_str = "help all|control|display|config|ports|register|" + "filters|traffic_management|devices: " + "Show help", + .tokens = { + (void *)&cmd_help_long_help, + (void *)&cmd_help_long_section, + NULL, + }, +}; + + +/* *** start/stop/close all ports *** */ +struct cmd_operate_port_result { + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t name; + cmdline_fixed_string_t value; +}; + +static void cmd_operate_port_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_operate_port_result *res = parsed_result; + + if (!strcmp(res->name, "start")) + start_port(RTE_PORT_ALL); + else if (!strcmp(res->name, "stop")) + stop_port(RTE_PORT_ALL); + else if (!strcmp(res->name, "close")) + close_port(RTE_PORT_ALL); + else if (!strcmp(res->name, "reset")) + reset_port(RTE_PORT_ALL); + else + printf("Unknown parameter\n"); +} + +cmdline_parse_token_string_t cmd_operate_port_all_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_operate_port_result, keyword, + "port"); +cmdline_parse_token_string_t cmd_operate_port_all_port = + TOKEN_STRING_INITIALIZER(struct cmd_operate_port_result, name, + "start#stop#close#reset"); +cmdline_parse_token_string_t cmd_operate_port_all_all = + TOKEN_STRING_INITIALIZER(struct cmd_operate_port_result, value, "all"); + +cmdline_parse_inst_t cmd_operate_port = { + .f = cmd_operate_port_parsed, + .data = NULL, + .help_str = "port start|stop|close all: Start/Stop/Close/Reset all ports", + .tokens = { + (void *)&cmd_operate_port_all_cmd, + (void *)&cmd_operate_port_all_port, + (void *)&cmd_operate_port_all_all, + NULL, + }, +}; + +/* *** start/stop/close specific port *** */ +struct cmd_operate_specific_port_result { + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t name; + uint8_t value; +}; + +static void cmd_operate_specific_port_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_operate_specific_port_result *res = parsed_result; + + if (!strcmp(res->name, "start")) + start_port(res->value); + else if (!strcmp(res->name, "stop")) + stop_port(res->value); + else if (!strcmp(res->name, "close")) + close_port(res->value); + else if (!strcmp(res->name, "reset")) + reset_port(res->value); + else + printf("Unknown parameter\n"); +} + +cmdline_parse_token_string_t cmd_operate_specific_port_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_operate_specific_port_result, + keyword, "port"); +cmdline_parse_token_string_t cmd_operate_specific_port_port = + TOKEN_STRING_INITIALIZER(struct cmd_operate_specific_port_result, + name, "start#stop#close#reset"); +cmdline_parse_token_num_t cmd_operate_specific_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_operate_specific_port_result, + value, UINT8); + +cmdline_parse_inst_t cmd_operate_specific_port = { + .f = cmd_operate_specific_port_parsed, + .data = NULL, + .help_str = "port start|stop|close <port_id>: Start/Stop/Close/Reset port_id", + .tokens = { + (void *)&cmd_operate_specific_port_cmd, + (void *)&cmd_operate_specific_port_port, + (void *)&cmd_operate_specific_port_id, + NULL, + }, +}; + +/* *** enable port setup (after attach) via iterator or event *** */ +struct cmd_set_port_setup_on_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t setup; + cmdline_fixed_string_t on; + cmdline_fixed_string_t mode; +}; + +static void cmd_set_port_setup_on_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_port_setup_on_result *res = parsed_result; + + if (strcmp(res->mode, "event") == 0) + setup_on_probe_event = true; + else if (strcmp(res->mode, "iterator") == 0) + setup_on_probe_event = false; + else + printf("Unknown mode\n"); +} + +cmdline_parse_token_string_t cmd_set_port_setup_on_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_port_setup_on_result, + set, "set"); +cmdline_parse_token_string_t cmd_set_port_setup_on_port = + TOKEN_STRING_INITIALIZER(struct cmd_set_port_setup_on_result, + port, "port"); +cmdline_parse_token_string_t cmd_set_port_setup_on_setup = + TOKEN_STRING_INITIALIZER(struct cmd_set_port_setup_on_result, + setup, "setup"); +cmdline_parse_token_string_t cmd_set_port_setup_on_on = + TOKEN_STRING_INITIALIZER(struct cmd_set_port_setup_on_result, + on, "on"); +cmdline_parse_token_string_t cmd_set_port_setup_on_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_port_setup_on_result, + mode, "iterator#event"); + +cmdline_parse_inst_t cmd_set_port_setup_on = { + .f = cmd_set_port_setup_on_parsed, + .data = NULL, + .help_str = "set port setup on iterator|event", + .tokens = { + (void *)&cmd_set_port_setup_on_set, + (void *)&cmd_set_port_setup_on_port, + (void *)&cmd_set_port_setup_on_setup, + (void *)&cmd_set_port_setup_on_on, + (void *)&cmd_set_port_setup_on_mode, + NULL, + }, +}; + +/* *** attach a specified port *** */ +struct cmd_operate_attach_port_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_multi_string_t identifier; +}; + +static void cmd_operate_attach_port_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_operate_attach_port_result *res = parsed_result; + + if (!strcmp(res->keyword, "attach")) + attach_port(res->identifier); + else + printf("Unknown parameter\n"); +} + +cmdline_parse_token_string_t cmd_operate_attach_port_port = + TOKEN_STRING_INITIALIZER(struct cmd_operate_attach_port_result, + port, "port"); +cmdline_parse_token_string_t cmd_operate_attach_port_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_operate_attach_port_result, + keyword, "attach"); +cmdline_parse_token_string_t cmd_operate_attach_port_identifier = + TOKEN_STRING_INITIALIZER(struct cmd_operate_attach_port_result, + identifier, TOKEN_STRING_MULTI); + +cmdline_parse_inst_t cmd_operate_attach_port = { + .f = cmd_operate_attach_port_parsed, + .data = NULL, + .help_str = "port attach <identifier>: " + "(identifier: pci address or virtual dev name)", + .tokens = { + (void *)&cmd_operate_attach_port_port, + (void *)&cmd_operate_attach_port_keyword, + (void *)&cmd_operate_attach_port_identifier, + NULL, + }, +}; + +/* *** detach a specified port *** */ +struct cmd_operate_detach_port_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + portid_t port_id; +}; + +static void cmd_operate_detach_port_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_operate_detach_port_result *res = parsed_result; + + if (!strcmp(res->keyword, "detach")) { + RTE_ETH_VALID_PORTID_OR_RET(res->port_id); + detach_port_device(res->port_id); + } else { + printf("Unknown parameter\n"); + } +} + +cmdline_parse_token_string_t cmd_operate_detach_port_port = + TOKEN_STRING_INITIALIZER(struct cmd_operate_detach_port_result, + port, "port"); +cmdline_parse_token_string_t cmd_operate_detach_port_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_operate_detach_port_result, + keyword, "detach"); +cmdline_parse_token_num_t cmd_operate_detach_port_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_operate_detach_port_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_operate_detach_port = { + .f = cmd_operate_detach_port_parsed, + .data = NULL, + .help_str = "port detach <port_id>", + .tokens = { + (void *)&cmd_operate_detach_port_port, + (void *)&cmd_operate_detach_port_keyword, + (void *)&cmd_operate_detach_port_port_id, + NULL, + }, +}; + +/* *** detach device by identifier *** */ +struct cmd_operate_detach_device_result { + cmdline_fixed_string_t device; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t identifier; +}; + +static void cmd_operate_detach_device_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_operate_detach_device_result *res = parsed_result; + + if (!strcmp(res->keyword, "detach")) + detach_devargs(res->identifier); + else + printf("Unknown parameter\n"); +} + +cmdline_parse_token_string_t cmd_operate_detach_device_device = + TOKEN_STRING_INITIALIZER(struct cmd_operate_detach_device_result, + device, "device"); +cmdline_parse_token_string_t cmd_operate_detach_device_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_operate_detach_device_result, + keyword, "detach"); +cmdline_parse_token_string_t cmd_operate_detach_device_identifier = + TOKEN_STRING_INITIALIZER(struct cmd_operate_detach_device_result, + identifier, NULL); + +cmdline_parse_inst_t cmd_operate_detach_device = { + .f = cmd_operate_detach_device_parsed, + .data = NULL, + .help_str = "device detach <identifier>:" + "(identifier: pci address or virtual dev name)", + .tokens = { + (void *)&cmd_operate_detach_device_device, + (void *)&cmd_operate_detach_device_keyword, + (void *)&cmd_operate_detach_device_identifier, + NULL, + }, +}; +/* *** configure speed for all ports *** */ +struct cmd_config_speed_all { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t all; + cmdline_fixed_string_t item1; + cmdline_fixed_string_t item2; + cmdline_fixed_string_t value1; + cmdline_fixed_string_t value2; +}; + +static int +parse_and_check_speed_duplex(char *speedstr, char *duplexstr, uint32_t *speed) +{ + + int duplex; + + if (!strcmp(duplexstr, "half")) { + duplex = ETH_LINK_HALF_DUPLEX; + } else if (!strcmp(duplexstr, "full")) { + duplex = ETH_LINK_FULL_DUPLEX; + } else if (!strcmp(duplexstr, "auto")) { + duplex = ETH_LINK_FULL_DUPLEX; + } else { + printf("Unknown duplex parameter\n"); + return -1; + } + + if (!strcmp(speedstr, "10")) { + *speed = (duplex == ETH_LINK_HALF_DUPLEX) ? + ETH_LINK_SPEED_10M_HD : ETH_LINK_SPEED_10M; + } else if (!strcmp(speedstr, "100")) { + *speed = (duplex == ETH_LINK_HALF_DUPLEX) ? + ETH_LINK_SPEED_100M_HD : ETH_LINK_SPEED_100M; + } else { + if (duplex != ETH_LINK_FULL_DUPLEX) { + printf("Invalid speed/duplex parameters\n"); + return -1; + } + if (!strcmp(speedstr, "1000")) { + *speed = ETH_LINK_SPEED_1G; + } else if (!strcmp(speedstr, "10000")) { + *speed = ETH_LINK_SPEED_10G; + } else if (!strcmp(speedstr, "25000")) { + *speed = ETH_LINK_SPEED_25G; + } else if (!strcmp(speedstr, "40000")) { + *speed = ETH_LINK_SPEED_40G; + } else if (!strcmp(speedstr, "50000")) { + *speed = ETH_LINK_SPEED_50G; + } else if (!strcmp(speedstr, "100000")) { + *speed = ETH_LINK_SPEED_100G; + } else if (!strcmp(speedstr, "200000")) { + *speed = ETH_LINK_SPEED_200G; + } else if (!strcmp(speedstr, "auto")) { + *speed = ETH_LINK_SPEED_AUTONEG; + } else { + printf("Unknown speed parameter\n"); + return -1; + } + } + + return 0; +} + +static void +cmd_config_speed_all_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_speed_all *res = parsed_result; + uint32_t link_speed; + portid_t pid; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + if (parse_and_check_speed_duplex(res->value1, res->value2, + &link_speed) < 0) + return; + + RTE_ETH_FOREACH_DEV(pid) { + ports[pid].dev_conf.link_speeds = link_speed; + } + + cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); +} + +cmdline_parse_token_string_t cmd_config_speed_all_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, port, "port"); +cmdline_parse_token_string_t cmd_config_speed_all_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, keyword, + "config"); +cmdline_parse_token_string_t cmd_config_speed_all_all = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, all, "all"); +cmdline_parse_token_string_t cmd_config_speed_all_item1 = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, item1, "speed"); +cmdline_parse_token_string_t cmd_config_speed_all_value1 = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, value1, + "10#100#1000#10000#25000#40000#50000#100000#200000#auto"); +cmdline_parse_token_string_t cmd_config_speed_all_item2 = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, item2, "duplex"); +cmdline_parse_token_string_t cmd_config_speed_all_value2 = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_all, value2, + "half#full#auto"); + +cmdline_parse_inst_t cmd_config_speed_all = { + .f = cmd_config_speed_all_parsed, + .data = NULL, + .help_str = "port config all speed " + "10|100|1000|10000|25000|40000|50000|100000|200000|auto duplex " + "half|full|auto", + .tokens = { + (void *)&cmd_config_speed_all_port, + (void *)&cmd_config_speed_all_keyword, + (void *)&cmd_config_speed_all_all, + (void *)&cmd_config_speed_all_item1, + (void *)&cmd_config_speed_all_value1, + (void *)&cmd_config_speed_all_item2, + (void *)&cmd_config_speed_all_value2, + NULL, + }, +}; + +/* *** configure speed for specific port *** */ +struct cmd_config_speed_specific { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + portid_t id; + cmdline_fixed_string_t item1; + cmdline_fixed_string_t item2; + cmdline_fixed_string_t value1; + cmdline_fixed_string_t value2; +}; + +static void +cmd_config_speed_specific_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_speed_specific *res = parsed_result; + uint32_t link_speed; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + if (port_id_is_invalid(res->id, ENABLED_WARN)) + return; + + if (parse_and_check_speed_duplex(res->value1, res->value2, + &link_speed) < 0) + return; + + ports[res->id].dev_conf.link_speeds = link_speed; + + cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); +} + + +cmdline_parse_token_string_t cmd_config_speed_specific_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, port, + "port"); +cmdline_parse_token_string_t cmd_config_speed_specific_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, keyword, + "config"); +cmdline_parse_token_num_t cmd_config_speed_specific_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_speed_specific, id, UINT16); +cmdline_parse_token_string_t cmd_config_speed_specific_item1 = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, item1, + "speed"); +cmdline_parse_token_string_t cmd_config_speed_specific_value1 = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, value1, + "10#100#1000#10000#25000#40000#50000#100000#200000#auto"); +cmdline_parse_token_string_t cmd_config_speed_specific_item2 = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, item2, + "duplex"); +cmdline_parse_token_string_t cmd_config_speed_specific_value2 = + TOKEN_STRING_INITIALIZER(struct cmd_config_speed_specific, value2, + "half#full#auto"); + +cmdline_parse_inst_t cmd_config_speed_specific = { + .f = cmd_config_speed_specific_parsed, + .data = NULL, + .help_str = "port config <port_id> speed " + "10|100|1000|10000|25000|40000|50000|100000|200000|auto duplex " + "half|full|auto", + .tokens = { + (void *)&cmd_config_speed_specific_port, + (void *)&cmd_config_speed_specific_keyword, + (void *)&cmd_config_speed_specific_id, + (void *)&cmd_config_speed_specific_item1, + (void *)&cmd_config_speed_specific_value1, + (void *)&cmd_config_speed_specific_item2, + (void *)&cmd_config_speed_specific_value2, + NULL, + }, +}; + +/* *** configure loopback for all ports *** */ +struct cmd_config_loopback_all { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t all; + cmdline_fixed_string_t item; + uint32_t mode; +}; + +static void +cmd_config_loopback_all_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_loopback_all *res = parsed_result; + portid_t pid; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + RTE_ETH_FOREACH_DEV(pid) { + ports[pid].dev_conf.lpbk_mode = res->mode; + } + + cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); +} + +cmdline_parse_token_string_t cmd_config_loopback_all_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, port, "port"); +cmdline_parse_token_string_t cmd_config_loopback_all_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, keyword, + "config"); +cmdline_parse_token_string_t cmd_config_loopback_all_all = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, all, "all"); +cmdline_parse_token_string_t cmd_config_loopback_all_item = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_all, item, + "loopback"); +cmdline_parse_token_num_t cmd_config_loopback_all_mode = + TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_all, mode, UINT32); + +cmdline_parse_inst_t cmd_config_loopback_all = { + .f = cmd_config_loopback_all_parsed, + .data = NULL, + .help_str = "port config all loopback <mode>", + .tokens = { + (void *)&cmd_config_loopback_all_port, + (void *)&cmd_config_loopback_all_keyword, + (void *)&cmd_config_loopback_all_all, + (void *)&cmd_config_loopback_all_item, + (void *)&cmd_config_loopback_all_mode, + NULL, + }, +}; + +/* *** configure loopback for specific port *** */ +struct cmd_config_loopback_specific { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + uint16_t port_id; + cmdline_fixed_string_t item; + uint32_t mode; +}; + +static void +cmd_config_loopback_specific_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_loopback_specific *res = parsed_result; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %u first\n", res->port_id); + return; + } + + ports[res->port_id].dev_conf.lpbk_mode = res->mode; + + cmd_reconfig_device_queue(res->port_id, 1, 1); +} + + +cmdline_parse_token_string_t cmd_config_loopback_specific_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, port, + "port"); +cmdline_parse_token_string_t cmd_config_loopback_specific_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, keyword, + "config"); +cmdline_parse_token_num_t cmd_config_loopback_specific_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_specific, port_id, + UINT16); +cmdline_parse_token_string_t cmd_config_loopback_specific_item = + TOKEN_STRING_INITIALIZER(struct cmd_config_loopback_specific, item, + "loopback"); +cmdline_parse_token_num_t cmd_config_loopback_specific_mode = + TOKEN_NUM_INITIALIZER(struct cmd_config_loopback_specific, mode, + UINT32); + +cmdline_parse_inst_t cmd_config_loopback_specific = { + .f = cmd_config_loopback_specific_parsed, + .data = NULL, + .help_str = "port config <port_id> loopback <mode>", + .tokens = { + (void *)&cmd_config_loopback_specific_port, + (void *)&cmd_config_loopback_specific_keyword, + (void *)&cmd_config_loopback_specific_id, + (void *)&cmd_config_loopback_specific_item, + (void *)&cmd_config_loopback_specific_mode, + NULL, + }, +}; + +/* *** configure txq/rxq, txd/rxd *** */ +struct cmd_config_rx_tx { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t all; + cmdline_fixed_string_t name; + uint16_t value; +}; + +static void +cmd_config_rx_tx_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_rx_tx *res = parsed_result; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + if (!strcmp(res->name, "rxq")) { + if (!res->value && !nb_txq) { + printf("Warning: Either rx or tx queues should be non zero\n"); + return; + } + if (check_nb_rxq(res->value) != 0) + return; + nb_rxq = res->value; + } + else if (!strcmp(res->name, "txq")) { + if (!res->value && !nb_rxq) { + printf("Warning: Either rx or tx queues should be non zero\n"); + return; + } + if (check_nb_txq(res->value) != 0) + return; + nb_txq = res->value; + } + else if (!strcmp(res->name, "rxd")) { + if (check_nb_rxd(res->value) != 0) + return; + nb_rxd = res->value; + } else if (!strcmp(res->name, "txd")) { + if (check_nb_txd(res->value) != 0) + return; + + nb_txd = res->value; + } else { + printf("Unknown parameter\n"); + return; + } + + fwd_config_setup(); + + init_port_config(); + + cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); +} + +cmdline_parse_token_string_t cmd_config_rx_tx_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_rx_tx, port, "port"); +cmdline_parse_token_string_t cmd_config_rx_tx_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_rx_tx, keyword, "config"); +cmdline_parse_token_string_t cmd_config_rx_tx_all = + TOKEN_STRING_INITIALIZER(struct cmd_config_rx_tx, all, "all"); +cmdline_parse_token_string_t cmd_config_rx_tx_name = + TOKEN_STRING_INITIALIZER(struct cmd_config_rx_tx, name, + "rxq#txq#rxd#txd"); +cmdline_parse_token_num_t cmd_config_rx_tx_value = + TOKEN_NUM_INITIALIZER(struct cmd_config_rx_tx, value, UINT16); + +cmdline_parse_inst_t cmd_config_rx_tx = { + .f = cmd_config_rx_tx_parsed, + .data = NULL, + .help_str = "port config all rxq|txq|rxd|txd <value>", + .tokens = { + (void *)&cmd_config_rx_tx_port, + (void *)&cmd_config_rx_tx_keyword, + (void *)&cmd_config_rx_tx_all, + (void *)&cmd_config_rx_tx_name, + (void *)&cmd_config_rx_tx_value, + NULL, + }, +}; + +/* *** config max packet length *** */ +struct cmd_config_max_pkt_len_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t all; + cmdline_fixed_string_t name; + uint32_t value; +}; + +static void +cmd_config_max_pkt_len_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_max_pkt_len_result *res = parsed_result; + portid_t pid; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + RTE_ETH_FOREACH_DEV(pid) { + struct rte_port *port = &ports[pid]; + uint64_t rx_offloads = port->dev_conf.rxmode.offloads; + + if (!strcmp(res->name, "max-pkt-len")) { + if (res->value < RTE_ETHER_MIN_LEN) { + printf("max-pkt-len can not be less than %d\n", + RTE_ETHER_MIN_LEN); + return; + } + if (res->value == port->dev_conf.rxmode.max_rx_pkt_len) + return; + + port->dev_conf.rxmode.max_rx_pkt_len = res->value; + if (res->value > RTE_ETHER_MAX_LEN) + rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; + port->dev_conf.rxmode.offloads = rx_offloads; + } else { + printf("Unknown parameter\n"); + return; + } + } + + init_port_config(); + + cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); +} + +cmdline_parse_token_string_t cmd_config_max_pkt_len_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_max_pkt_len_result, port, + "port"); +cmdline_parse_token_string_t cmd_config_max_pkt_len_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_max_pkt_len_result, keyword, + "config"); +cmdline_parse_token_string_t cmd_config_max_pkt_len_all = + TOKEN_STRING_INITIALIZER(struct cmd_config_max_pkt_len_result, all, + "all"); +cmdline_parse_token_string_t cmd_config_max_pkt_len_name = + TOKEN_STRING_INITIALIZER(struct cmd_config_max_pkt_len_result, name, + "max-pkt-len"); +cmdline_parse_token_num_t cmd_config_max_pkt_len_value = + TOKEN_NUM_INITIALIZER(struct cmd_config_max_pkt_len_result, value, + UINT32); + +cmdline_parse_inst_t cmd_config_max_pkt_len = { + .f = cmd_config_max_pkt_len_parsed, + .data = NULL, + .help_str = "port config all max-pkt-len <value>", + .tokens = { + (void *)&cmd_config_max_pkt_len_port, + (void *)&cmd_config_max_pkt_len_keyword, + (void *)&cmd_config_max_pkt_len_all, + (void *)&cmd_config_max_pkt_len_name, + (void *)&cmd_config_max_pkt_len_value, + NULL, + }, +}; + +/* *** config max LRO aggregated packet size *** */ +struct cmd_config_max_lro_pkt_size_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t all; + cmdline_fixed_string_t name; + uint32_t value; +}; + +static void +cmd_config_max_lro_pkt_size_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_max_lro_pkt_size_result *res = parsed_result; + portid_t pid; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + RTE_ETH_FOREACH_DEV(pid) { + struct rte_port *port = &ports[pid]; + + if (!strcmp(res->name, "max-lro-pkt-size")) { + if (res->value == + port->dev_conf.rxmode.max_lro_pkt_size) + return; + + port->dev_conf.rxmode.max_lro_pkt_size = res->value; + } else { + printf("Unknown parameter\n"); + return; + } + } + + init_port_config(); + + cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); +} + +cmdline_parse_token_string_t cmd_config_max_lro_pkt_size_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_max_lro_pkt_size_result, + port, "port"); +cmdline_parse_token_string_t cmd_config_max_lro_pkt_size_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_max_lro_pkt_size_result, + keyword, "config"); +cmdline_parse_token_string_t cmd_config_max_lro_pkt_size_all = + TOKEN_STRING_INITIALIZER(struct cmd_config_max_lro_pkt_size_result, + all, "all"); +cmdline_parse_token_string_t cmd_config_max_lro_pkt_size_name = + TOKEN_STRING_INITIALIZER(struct cmd_config_max_lro_pkt_size_result, + name, "max-lro-pkt-size"); +cmdline_parse_token_num_t cmd_config_max_lro_pkt_size_value = + TOKEN_NUM_INITIALIZER(struct cmd_config_max_lro_pkt_size_result, + value, UINT32); + +cmdline_parse_inst_t cmd_config_max_lro_pkt_size = { + .f = cmd_config_max_lro_pkt_size_parsed, + .data = NULL, + .help_str = "port config all max-lro-pkt-size <value>", + .tokens = { + (void *)&cmd_config_max_lro_pkt_size_port, + (void *)&cmd_config_max_lro_pkt_size_keyword, + (void *)&cmd_config_max_lro_pkt_size_all, + (void *)&cmd_config_max_lro_pkt_size_name, + (void *)&cmd_config_max_lro_pkt_size_value, + NULL, + }, +}; + +/* *** configure port MTU *** */ +struct cmd_config_mtu_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t mtu; + portid_t port_id; + uint16_t value; +}; + +static void +cmd_config_mtu_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_mtu_result *res = parsed_result; + + if (res->value < RTE_ETHER_MIN_LEN) { + printf("mtu cannot be less than %d\n", RTE_ETHER_MIN_LEN); + return; + } + port_mtu_set(res->port_id, res->value); +} + +cmdline_parse_token_string_t cmd_config_mtu_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_mtu_result, port, + "port"); +cmdline_parse_token_string_t cmd_config_mtu_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_mtu_result, keyword, + "config"); +cmdline_parse_token_string_t cmd_config_mtu_mtu = + TOKEN_STRING_INITIALIZER(struct cmd_config_mtu_result, keyword, + "mtu"); +cmdline_parse_token_num_t cmd_config_mtu_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_mtu_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_config_mtu_value = + TOKEN_NUM_INITIALIZER(struct cmd_config_mtu_result, value, UINT16); + +cmdline_parse_inst_t cmd_config_mtu = { + .f = cmd_config_mtu_parsed, + .data = NULL, + .help_str = "port config mtu <port_id> <value>", + .tokens = { + (void *)&cmd_config_mtu_port, + (void *)&cmd_config_mtu_keyword, + (void *)&cmd_config_mtu_mtu, + (void *)&cmd_config_mtu_port_id, + (void *)&cmd_config_mtu_value, + NULL, + }, +}; + +/* *** configure rx mode *** */ +struct cmd_config_rx_mode_flag { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t all; + cmdline_fixed_string_t name; + cmdline_fixed_string_t value; +}; + +static void +cmd_config_rx_mode_flag_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_rx_mode_flag *res = parsed_result; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + if (!strcmp(res->name, "drop-en")) { + if (!strcmp(res->value, "on")) + rx_drop_en = 1; + else if (!strcmp(res->value, "off")) + rx_drop_en = 0; + else { + printf("Unknown parameter\n"); + return; + } + } else { + printf("Unknown parameter\n"); + return; + } + + init_port_config(); + + cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); +} + +cmdline_parse_token_string_t cmd_config_rx_mode_flag_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, port, "port"); +cmdline_parse_token_string_t cmd_config_rx_mode_flag_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, keyword, + "config"); +cmdline_parse_token_string_t cmd_config_rx_mode_flag_all = + TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, all, "all"); +cmdline_parse_token_string_t cmd_config_rx_mode_flag_name = + TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, name, + "drop-en"); +cmdline_parse_token_string_t cmd_config_rx_mode_flag_value = + TOKEN_STRING_INITIALIZER(struct cmd_config_rx_mode_flag, value, + "on#off"); + +cmdline_parse_inst_t cmd_config_rx_mode_flag = { + .f = cmd_config_rx_mode_flag_parsed, + .data = NULL, + .help_str = "port config all drop-en on|off", + .tokens = { + (void *)&cmd_config_rx_mode_flag_port, + (void *)&cmd_config_rx_mode_flag_keyword, + (void *)&cmd_config_rx_mode_flag_all, + (void *)&cmd_config_rx_mode_flag_name, + (void *)&cmd_config_rx_mode_flag_value, + NULL, + }, +}; + +/* *** configure rss *** */ +struct cmd_config_rss { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t all; + cmdline_fixed_string_t name; + cmdline_fixed_string_t value; +}; + +static void +cmd_config_rss_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_rss *res = parsed_result; + struct rte_eth_rss_conf rss_conf = { .rss_key_len = 0, }; + struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; + int use_default = 0; + int all_updated = 1; + int diag; + uint16_t i; + int ret; + + if (!strcmp(res->value, "all")) + rss_conf.rss_hf = ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | + ETH_RSS_TCP | ETH_RSS_UDP | ETH_RSS_SCTP | + ETH_RSS_L2_PAYLOAD | ETH_RSS_L2TPV3 | ETH_RSS_ESP | + ETH_RSS_AH | ETH_RSS_PFCP; + else if (!strcmp(res->value, "eth")) + rss_conf.rss_hf = ETH_RSS_ETH; + else if (!strcmp(res->value, "vlan")) + rss_conf.rss_hf = ETH_RSS_VLAN; + else if (!strcmp(res->value, "ip")) + rss_conf.rss_hf = ETH_RSS_IP; + else if (!strcmp(res->value, "udp")) + rss_conf.rss_hf = ETH_RSS_UDP; + else if (!strcmp(res->value, "tcp")) + rss_conf.rss_hf = ETH_RSS_TCP; + else if (!strcmp(res->value, "sctp")) + rss_conf.rss_hf = ETH_RSS_SCTP; + else if (!strcmp(res->value, "ether")) + rss_conf.rss_hf = ETH_RSS_L2_PAYLOAD; + else if (!strcmp(res->value, "port")) + rss_conf.rss_hf = ETH_RSS_PORT; + else if (!strcmp(res->value, "vxlan")) + rss_conf.rss_hf = ETH_RSS_VXLAN; + else if (!strcmp(res->value, "geneve")) + rss_conf.rss_hf = ETH_RSS_GENEVE; + else if (!strcmp(res->value, "nvgre")) + rss_conf.rss_hf = ETH_RSS_NVGRE; + else if (!strcmp(res->value, "l3-src-only")) + rss_conf.rss_hf = ETH_RSS_L3_SRC_ONLY; + else if (!strcmp(res->value, "l3-dst-only")) + rss_conf.rss_hf = ETH_RSS_L3_DST_ONLY; + else if (!strcmp(res->value, "l4-src-only")) + rss_conf.rss_hf = ETH_RSS_L4_SRC_ONLY; + else if (!strcmp(res->value, "l4-dst-only")) + rss_conf.rss_hf = ETH_RSS_L4_DST_ONLY; + else if (!strcmp(res->value, "l2-src-only")) + rss_conf.rss_hf = ETH_RSS_L2_SRC_ONLY; + else if (!strcmp(res->value, "l2-dst-only")) + rss_conf.rss_hf = ETH_RSS_L2_DST_ONLY; + else if (!strcmp(res->value, "l2tpv3")) + rss_conf.rss_hf = ETH_RSS_L2TPV3; + else if (!strcmp(res->value, "esp")) + rss_conf.rss_hf = ETH_RSS_ESP; + else if (!strcmp(res->value, "ah")) + rss_conf.rss_hf = ETH_RSS_AH; + else if (!strcmp(res->value, "pfcp")) + rss_conf.rss_hf = ETH_RSS_PFCP; + else if (!strcmp(res->value, "none")) + rss_conf.rss_hf = 0; + else if (!strcmp(res->value, "default")) + use_default = 1; + else if (isdigit(res->value[0]) && atoi(res->value) > 0 && + atoi(res->value) < 64) + rss_conf.rss_hf = 1ULL << atoi(res->value); + else { + printf("Unknown parameter\n"); + return; + } + rss_conf.rss_key = NULL; + /* Update global configuration for RSS types. */ + RTE_ETH_FOREACH_DEV(i) { + struct rte_eth_rss_conf local_rss_conf; + + ret = eth_dev_info_get_print_err(i, &dev_info); + if (ret != 0) + return; + + if (use_default) + rss_conf.rss_hf = dev_info.flow_type_rss_offloads; + + local_rss_conf = rss_conf; + local_rss_conf.rss_hf = rss_conf.rss_hf & + dev_info.flow_type_rss_offloads; + if (local_rss_conf.rss_hf != rss_conf.rss_hf) { + printf("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"\n", + i, rss_conf.rss_hf, local_rss_conf.rss_hf); + } + diag = rte_eth_dev_rss_hash_update(i, &local_rss_conf); + if (diag < 0) { + all_updated = 0; + printf("Configuration of RSS hash at ethernet port %d " + "failed with error (%d): %s.\n", + i, -diag, strerror(-diag)); + } + } + if (all_updated && !use_default) { + rss_hf = rss_conf.rss_hf; + printf("rss_hf %#"PRIx64"\n", rss_hf); + } +} + +cmdline_parse_token_string_t cmd_config_rss_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss, port, "port"); +cmdline_parse_token_string_t cmd_config_rss_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss, keyword, "config"); +cmdline_parse_token_string_t cmd_config_rss_all = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss, all, "all"); +cmdline_parse_token_string_t cmd_config_rss_name = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss, name, "rss"); +cmdline_parse_token_string_t cmd_config_rss_value = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss, value, NULL); + +cmdline_parse_inst_t cmd_config_rss = { + .f = cmd_config_rss_parsed, + .data = NULL, + .help_str = "port config all rss " + "all|default|eth|vlan|ip|tcp|udp|sctp|ether|port|vxlan|geneve|" + "nvgre|vxlan-gpe|l2tpv3|esp|ah|pfcp|none|<flowtype_id>", + .tokens = { + (void *)&cmd_config_rss_port, + (void *)&cmd_config_rss_keyword, + (void *)&cmd_config_rss_all, + (void *)&cmd_config_rss_name, + (void *)&cmd_config_rss_value, + NULL, + }, +}; + +/* *** configure rss hash key *** */ +struct cmd_config_rss_hash_key { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t port_id; + cmdline_fixed_string_t rss_hash_key; + cmdline_fixed_string_t rss_type; + cmdline_fixed_string_t key; +}; + +static uint8_t +hexa_digit_to_value(char hexa_digit) +{ + if ((hexa_digit >= '0') && (hexa_digit <= '9')) + return (uint8_t) (hexa_digit - '0'); + if ((hexa_digit >= 'a') && (hexa_digit <= 'f')) + return (uint8_t) ((hexa_digit - 'a') + 10); + if ((hexa_digit >= 'A') && (hexa_digit <= 'F')) + return (uint8_t) ((hexa_digit - 'A') + 10); + /* Invalid hexa digit */ + return 0xFF; +} + +static uint8_t +parse_and_check_key_hexa_digit(char *key, int idx) +{ + uint8_t hexa_v; + + hexa_v = hexa_digit_to_value(key[idx]); + if (hexa_v == 0xFF) + printf("invalid key: character %c at position %d is not a " + "valid hexa digit\n", key[idx], idx); + return hexa_v; +} + +static void +cmd_config_rss_hash_key_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_rss_hash_key *res = parsed_result; + uint8_t hash_key[RSS_HASH_KEY_LENGTH]; + uint8_t xdgt0; + uint8_t xdgt1; + int i; + struct rte_eth_dev_info dev_info; + uint8_t hash_key_size; + uint32_t key_len; + int ret; + + ret = eth_dev_info_get_print_err(res->port_id, &dev_info); + if (ret != 0) + return; + + if (dev_info.hash_key_size > 0 && + dev_info.hash_key_size <= sizeof(hash_key)) + hash_key_size = dev_info.hash_key_size; + else { + printf("dev_info did not provide a valid hash key size\n"); + return; + } + /* Check the length of the RSS hash key */ + key_len = strlen(res->key); + if (key_len != (hash_key_size * 2)) { + printf("key length: %d invalid - key must be a string of %d" + " hexa-decimal numbers\n", + (int) key_len, hash_key_size * 2); + return; + } + /* Translate RSS hash key into binary representation */ + for (i = 0; i < hash_key_size; i++) { + xdgt0 = parse_and_check_key_hexa_digit(res->key, (i * 2)); + if (xdgt0 == 0xFF) + return; + xdgt1 = parse_and_check_key_hexa_digit(res->key, (i * 2) + 1); + if (xdgt1 == 0xFF) + return; + hash_key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1); + } + port_rss_hash_key_update(res->port_id, res->rss_type, hash_key, + hash_key_size); +} + +cmdline_parse_token_string_t cmd_config_rss_hash_key_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, port, "port"); +cmdline_parse_token_string_t cmd_config_rss_hash_key_config = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, config, + "config"); +cmdline_parse_token_num_t cmd_config_rss_hash_key_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_rss_hash_key, port_id, UINT16); +cmdline_parse_token_string_t cmd_config_rss_hash_key_rss_hash_key = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, + rss_hash_key, "rss-hash-key"); +cmdline_parse_token_string_t cmd_config_rss_hash_key_rss_type = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, rss_type, + "ipv4#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#" + "ipv4-other#ipv6#ipv6-frag#ipv6-tcp#ipv6-udp#" + "ipv6-sctp#ipv6-other#l2-payload#ipv6-ex#" + "ipv6-tcp-ex#ipv6-udp-ex#" + "l3-src-only#l3-dst-only#l4-src-only#l4-dst-only#" + "l2-src-only#l2-dst-only#s-vlan#c-vlan#" + "l2tpv3#esp#ah#pfcp"); +cmdline_parse_token_string_t cmd_config_rss_hash_key_value = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss_hash_key, key, NULL); + +cmdline_parse_inst_t cmd_config_rss_hash_key = { + .f = cmd_config_rss_hash_key_parsed, + .data = NULL, + .help_str = "port config <port_id> rss-hash-key " + "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|" + "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|" + "l2-payload|ipv6-ex|ipv6-tcp-ex|ipv6-udp-ex|" + "l3-src-only|l3-dst-only|l4-src-only|l4-dst-only|" + "l2-src-only|l2-dst-only|s-vlan|c-vlan|" + "l2tpv3|esp|ah|pfcp " + "<string of hex digits (variable length, NIC dependent)>", + .tokens = { + (void *)&cmd_config_rss_hash_key_port, + (void *)&cmd_config_rss_hash_key_config, + (void *)&cmd_config_rss_hash_key_port_id, + (void *)&cmd_config_rss_hash_key_rss_hash_key, + (void *)&cmd_config_rss_hash_key_rss_type, + (void *)&cmd_config_rss_hash_key_value, + NULL, + }, +}; + +/* *** configure port rxq/txq ring size *** */ +struct cmd_config_rxtx_ring_size { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t portid; + cmdline_fixed_string_t rxtxq; + uint16_t qid; + cmdline_fixed_string_t rsize; + uint16_t size; +}; + +static void +cmd_config_rxtx_ring_size_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_rxtx_ring_size *res = parsed_result; + struct rte_port *port; + uint8_t isrx; + + if (port_id_is_invalid(res->portid, ENABLED_WARN)) + return; + + if (res->portid == (portid_t)RTE_PORT_ALL) { + printf("Invalid port id\n"); + return; + } + + port = &ports[res->portid]; + + if (!strcmp(res->rxtxq, "rxq")) + isrx = 1; + else if (!strcmp(res->rxtxq, "txq")) + isrx = 0; + else { + printf("Unknown parameter\n"); + return; + } + + if (isrx && rx_queue_id_is_invalid(res->qid)) + return; + else if (!isrx && tx_queue_id_is_invalid(res->qid)) + return; + + if (isrx && res->size != 0 && res->size <= rx_free_thresh) { + printf("Invalid rx ring_size, must > rx_free_thresh: %d\n", + rx_free_thresh); + return; + } + + if (isrx) + port->nb_rx_desc[res->qid] = res->size; + else + port->nb_tx_desc[res->qid] = res->size; + + cmd_reconfig_device_queue(res->portid, 0, 1); +} + +cmdline_parse_token_string_t cmd_config_rxtx_ring_size_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size, + port, "port"); +cmdline_parse_token_string_t cmd_config_rxtx_ring_size_config = + TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size, + config, "config"); +cmdline_parse_token_num_t cmd_config_rxtx_ring_size_portid = + TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size, + portid, UINT16); +cmdline_parse_token_string_t cmd_config_rxtx_ring_size_rxtxq = + TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size, + rxtxq, "rxq#txq"); +cmdline_parse_token_num_t cmd_config_rxtx_ring_size_qid = + TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size, + qid, UINT16); +cmdline_parse_token_string_t cmd_config_rxtx_ring_size_rsize = + TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_ring_size, + rsize, "ring_size"); +cmdline_parse_token_num_t cmd_config_rxtx_ring_size_size = + TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_ring_size, + size, UINT16); + +cmdline_parse_inst_t cmd_config_rxtx_ring_size = { + .f = cmd_config_rxtx_ring_size_parsed, + .data = NULL, + .help_str = "port config <port_id> rxq|txq <queue_id> ring_size <value>", + .tokens = { + (void *)&cmd_config_rxtx_ring_size_port, + (void *)&cmd_config_rxtx_ring_size_config, + (void *)&cmd_config_rxtx_ring_size_portid, + (void *)&cmd_config_rxtx_ring_size_rxtxq, + (void *)&cmd_config_rxtx_ring_size_qid, + (void *)&cmd_config_rxtx_ring_size_rsize, + (void *)&cmd_config_rxtx_ring_size_size, + NULL, + }, +}; + +/* *** configure port rxq/txq start/stop *** */ +struct cmd_config_rxtx_queue { + cmdline_fixed_string_t port; + portid_t portid; + cmdline_fixed_string_t rxtxq; + uint16_t qid; + cmdline_fixed_string_t opname; +}; + +static void +cmd_config_rxtx_queue_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_rxtx_queue *res = parsed_result; + uint8_t isrx; + uint8_t isstart; + int ret = 0; + + if (test_done == 0) { + printf("Please stop forwarding first\n"); + return; + } + + if (port_id_is_invalid(res->portid, ENABLED_WARN)) + return; + + if (port_is_started(res->portid) != 1) { + printf("Please start port %u first\n", res->portid); + return; + } + + if (!strcmp(res->rxtxq, "rxq")) + isrx = 1; + else if (!strcmp(res->rxtxq, "txq")) + isrx = 0; + else { + printf("Unknown parameter\n"); + return; + } + + if (isrx && rx_queue_id_is_invalid(res->qid)) + return; + else if (!isrx && tx_queue_id_is_invalid(res->qid)) + return; + + if (!strcmp(res->opname, "start")) + isstart = 1; + else if (!strcmp(res->opname, "stop")) + isstart = 0; + else { + printf("Unknown parameter\n"); + return; + } + + if (isstart && isrx) + ret = rte_eth_dev_rx_queue_start(res->portid, res->qid); + else if (!isstart && isrx) + ret = rte_eth_dev_rx_queue_stop(res->portid, res->qid); + else if (isstart && !isrx) + ret = rte_eth_dev_tx_queue_start(res->portid, res->qid); + else + ret = rte_eth_dev_tx_queue_stop(res->portid, res->qid); + + if (ret == -ENOTSUP) + printf("Function not supported in PMD driver\n"); +} + +cmdline_parse_token_string_t cmd_config_rxtx_queue_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_queue, port, "port"); +cmdline_parse_token_num_t cmd_config_rxtx_queue_portid = + TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_queue, portid, UINT16); +cmdline_parse_token_string_t cmd_config_rxtx_queue_rxtxq = + TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_queue, rxtxq, "rxq#txq"); +cmdline_parse_token_num_t cmd_config_rxtx_queue_qid = + TOKEN_NUM_INITIALIZER(struct cmd_config_rxtx_queue, qid, UINT16); +cmdline_parse_token_string_t cmd_config_rxtx_queue_opname = + TOKEN_STRING_INITIALIZER(struct cmd_config_rxtx_queue, opname, + "start#stop"); + +cmdline_parse_inst_t cmd_config_rxtx_queue = { + .f = cmd_config_rxtx_queue_parsed, + .data = NULL, + .help_str = "port <port_id> rxq|txq <queue_id> start|stop", + .tokens = { + (void *)&cmd_config_rxtx_queue_port, + (void *)&cmd_config_rxtx_queue_portid, + (void *)&cmd_config_rxtx_queue_rxtxq, + (void *)&cmd_config_rxtx_queue_qid, + (void *)&cmd_config_rxtx_queue_opname, + NULL, + }, +}; + +/* *** configure port rxq/txq deferred start on/off *** */ +struct cmd_config_deferred_start_rxtx_queue { + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t rxtxq; + uint16_t qid; + cmdline_fixed_string_t opname; + cmdline_fixed_string_t state; +}; + +static void +cmd_config_deferred_start_rxtx_queue_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_deferred_start_rxtx_queue *res = parsed_result; + struct rte_port *port; + uint8_t isrx; + uint8_t ison; + uint8_t needreconfig = 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + if (port_is_started(res->port_id) != 0) { + printf("Please stop port %u first\n", res->port_id); + return; + } + + port = &ports[res->port_id]; + + isrx = !strcmp(res->rxtxq, "rxq"); + + if (isrx && rx_queue_id_is_invalid(res->qid)) + return; + else if (!isrx && tx_queue_id_is_invalid(res->qid)) + return; + + ison = !strcmp(res->state, "on"); + + if (isrx && port->rx_conf[res->qid].rx_deferred_start != ison) { + port->rx_conf[res->qid].rx_deferred_start = ison; + needreconfig = 1; + } else if (!isrx && port->tx_conf[res->qid].tx_deferred_start != ison) { + port->tx_conf[res->qid].tx_deferred_start = ison; + needreconfig = 1; + } + + if (needreconfig) + cmd_reconfig_device_queue(res->port_id, 0, 1); +} + +cmdline_parse_token_string_t cmd_config_deferred_start_rxtx_queue_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue, + port, "port"); +cmdline_parse_token_num_t cmd_config_deferred_start_rxtx_queue_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_deferred_start_rxtx_queue_rxtxq = + TOKEN_STRING_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue, + rxtxq, "rxq#txq"); +cmdline_parse_token_num_t cmd_config_deferred_start_rxtx_queue_qid = + TOKEN_NUM_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue, + qid, UINT16); +cmdline_parse_token_string_t cmd_config_deferred_start_rxtx_queue_opname = + TOKEN_STRING_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue, + opname, "deferred_start"); +cmdline_parse_token_string_t cmd_config_deferred_start_rxtx_queue_state = + TOKEN_STRING_INITIALIZER(struct cmd_config_deferred_start_rxtx_queue, + state, "on#off"); + +cmdline_parse_inst_t cmd_config_deferred_start_rxtx_queue = { + .f = cmd_config_deferred_start_rxtx_queue_parsed, + .data = NULL, + .help_str = "port <port_id> rxq|txq <queue_id> deferred_start on|off", + .tokens = { + (void *)&cmd_config_deferred_start_rxtx_queue_port, + (void *)&cmd_config_deferred_start_rxtx_queue_port_id, + (void *)&cmd_config_deferred_start_rxtx_queue_rxtxq, + (void *)&cmd_config_deferred_start_rxtx_queue_qid, + (void *)&cmd_config_deferred_start_rxtx_queue_opname, + (void *)&cmd_config_deferred_start_rxtx_queue_state, + NULL, + }, +}; + +/* *** configure port rxq/txq setup *** */ +struct cmd_setup_rxtx_queue { + cmdline_fixed_string_t port; + portid_t portid; + cmdline_fixed_string_t rxtxq; + uint16_t qid; + cmdline_fixed_string_t setup; +}; + +/* Common CLI fields for queue setup */ +cmdline_parse_token_string_t cmd_setup_rxtx_queue_port = + TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, port, "port"); +cmdline_parse_token_num_t cmd_setup_rxtx_queue_portid = + TOKEN_NUM_INITIALIZER(struct cmd_setup_rxtx_queue, portid, UINT16); +cmdline_parse_token_string_t cmd_setup_rxtx_queue_rxtxq = + TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, rxtxq, "rxq#txq"); +cmdline_parse_token_num_t cmd_setup_rxtx_queue_qid = + TOKEN_NUM_INITIALIZER(struct cmd_setup_rxtx_queue, qid, UINT16); +cmdline_parse_token_string_t cmd_setup_rxtx_queue_setup = + TOKEN_STRING_INITIALIZER(struct cmd_setup_rxtx_queue, setup, "setup"); + +static void +cmd_setup_rxtx_queue_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_setup_rxtx_queue *res = parsed_result; + struct rte_port *port; + struct rte_mempool *mp; + unsigned int socket_id; + uint8_t isrx = 0; + int ret; + + if (port_id_is_invalid(res->portid, ENABLED_WARN)) + return; + + if (res->portid == (portid_t)RTE_PORT_ALL) { + printf("Invalid port id\n"); + return; + } + + if (!strcmp(res->rxtxq, "rxq")) + isrx = 1; + else if (!strcmp(res->rxtxq, "txq")) + isrx = 0; + else { + printf("Unknown parameter\n"); + return; + } + + if (isrx && rx_queue_id_is_invalid(res->qid)) { + printf("Invalid rx queue\n"); + return; + } else if (!isrx && tx_queue_id_is_invalid(res->qid)) { + printf("Invalid tx queue\n"); + return; + } + + port = &ports[res->portid]; + if (isrx) { + socket_id = rxring_numa[res->portid]; + if (!numa_support || socket_id == NUMA_NO_CONFIG) + socket_id = port->socket_id; + + mp = mbuf_pool_find(socket_id); + if (mp == NULL) { + printf("Failed to setup RX queue: " + "No mempool allocation" + " on the socket %d\n", + rxring_numa[res->portid]); + return; + } + ret = rte_eth_rx_queue_setup(res->portid, + res->qid, + port->nb_rx_desc[res->qid], + socket_id, + &port->rx_conf[res->qid], + mp); + if (ret) + printf("Failed to setup RX queue\n"); + } else { + socket_id = txring_numa[res->portid]; + if (!numa_support || socket_id == NUMA_NO_CONFIG) + socket_id = port->socket_id; + + ret = rte_eth_tx_queue_setup(res->portid, + res->qid, + port->nb_tx_desc[res->qid], + socket_id, + &port->tx_conf[res->qid]); + if (ret) + printf("Failed to setup TX queue\n"); + } +} + +cmdline_parse_inst_t cmd_setup_rxtx_queue = { + .f = cmd_setup_rxtx_queue_parsed, + .data = NULL, + .help_str = "port <port_id> rxq|txq <queue_idx> setup", + .tokens = { + (void *)&cmd_setup_rxtx_queue_port, + (void *)&cmd_setup_rxtx_queue_portid, + (void *)&cmd_setup_rxtx_queue_rxtxq, + (void *)&cmd_setup_rxtx_queue_qid, + (void *)&cmd_setup_rxtx_queue_setup, + NULL, + }, +}; + + +/* *** Configure RSS RETA *** */ +struct cmd_config_rss_reta { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + portid_t port_id; + cmdline_fixed_string_t name; + cmdline_fixed_string_t list_name; + cmdline_fixed_string_t list_of_items; +}; + +static int +parse_reta_config(const char *str, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t nb_entries) +{ + int i; + unsigned size; + uint16_t hash_index, idx, shift; + uint16_t nb_queue; + char s[256]; + const char *p, *p0 = str; + char *end; + enum fieldnames { + FLD_HASH_INDEX = 0, + FLD_QUEUE, + _NUM_FLD + }; + unsigned long int_fld[_NUM_FLD]; + char *str_fld[_NUM_FLD]; + + while ((p = strchr(p0,'(')) != NULL) { + ++p; + if((p0 = strchr(p,')')) == NULL) + return -1; + + size = p0 - p; + if(size >= sizeof(s)) + return -1; + + snprintf(s, sizeof(s), "%.*s", size, p); + if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) + return -1; + for (i = 0; i < _NUM_FLD; i++) { + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); + if (errno != 0 || end == str_fld[i] || + int_fld[i] > 65535) + return -1; + } + + hash_index = (uint16_t)int_fld[FLD_HASH_INDEX]; + nb_queue = (uint16_t)int_fld[FLD_QUEUE]; + + if (hash_index >= nb_entries) { + printf("Invalid RETA hash index=%d\n", hash_index); + return -1; + } + + idx = hash_index / RTE_RETA_GROUP_SIZE; + shift = hash_index % RTE_RETA_GROUP_SIZE; + reta_conf[idx].mask |= (1ULL << shift); + reta_conf[idx].reta[shift] = nb_queue; + } + + return 0; +} + +static void +cmd_set_rss_reta_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int ret; + struct rte_eth_dev_info dev_info; + struct rte_eth_rss_reta_entry64 reta_conf[8]; + struct cmd_config_rss_reta *res = parsed_result; + + ret = eth_dev_info_get_print_err(res->port_id, &dev_info); + if (ret != 0) + return; + + if (dev_info.reta_size == 0) { + printf("Redirection table size is 0 which is " + "invalid for RSS\n"); + return; + } else + printf("The reta size of port %d is %u\n", + res->port_id, dev_info.reta_size); + if (dev_info.reta_size > ETH_RSS_RETA_SIZE_512) { + printf("Currently do not support more than %u entries of " + "redirection table\n", ETH_RSS_RETA_SIZE_512); + return; + } + + memset(reta_conf, 0, sizeof(reta_conf)); + if (!strcmp(res->list_name, "reta")) { + if (parse_reta_config(res->list_of_items, reta_conf, + dev_info.reta_size)) { + printf("Invalid RSS Redirection Table " + "config entered\n"); + return; + } + ret = rte_eth_dev_rss_reta_update(res->port_id, + reta_conf, dev_info.reta_size); + if (ret != 0) + printf("Bad redirection table parameter, " + "return code = %d \n", ret); + } +} + +cmdline_parse_token_string_t cmd_config_rss_reta_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, port, "port"); +cmdline_parse_token_string_t cmd_config_rss_reta_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, keyword, "config"); +cmdline_parse_token_num_t cmd_config_rss_reta_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_rss_reta, port_id, UINT16); +cmdline_parse_token_string_t cmd_config_rss_reta_name = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, name, "rss"); +cmdline_parse_token_string_t cmd_config_rss_reta_list_name = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, list_name, "reta"); +cmdline_parse_token_string_t cmd_config_rss_reta_list_of_items = + TOKEN_STRING_INITIALIZER(struct cmd_config_rss_reta, list_of_items, + NULL); +cmdline_parse_inst_t cmd_config_rss_reta = { + .f = cmd_set_rss_reta_parsed, + .data = NULL, + .help_str = "port config <port_id> rss reta <hash,queue[,hash,queue]*>", + .tokens = { + (void *)&cmd_config_rss_reta_port, + (void *)&cmd_config_rss_reta_keyword, + (void *)&cmd_config_rss_reta_port_id, + (void *)&cmd_config_rss_reta_name, + (void *)&cmd_config_rss_reta_list_name, + (void *)&cmd_config_rss_reta_list_of_items, + NULL, + }, +}; + +/* *** SHOW PORT RETA INFO *** */ +struct cmd_showport_reta { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t rss; + cmdline_fixed_string_t reta; + uint16_t size; + cmdline_fixed_string_t list_of_items; +}; + +static int +showport_parse_reta_config(struct rte_eth_rss_reta_entry64 *conf, + uint16_t nb_entries, + char *str) +{ + uint32_t size; + const char *p, *p0 = str; + char s[256]; + char *end; + char *str_fld[8]; + uint16_t i; + uint16_t num = (nb_entries + RTE_RETA_GROUP_SIZE - 1) / + RTE_RETA_GROUP_SIZE; + int ret; + + p = strchr(p0, '('); + if (p == NULL) + return -1; + p++; + p0 = strchr(p, ')'); + if (p0 == NULL) + return -1; + size = p0 - p; + if (size >= sizeof(s)) { + printf("The string size exceeds the internal buffer size\n"); + return -1; + } + snprintf(s, sizeof(s), "%.*s", size, p); + ret = rte_strsplit(s, sizeof(s), str_fld, num, ','); + if (ret <= 0 || ret != num) { + printf("The bits of masks do not match the number of " + "reta entries: %u\n", num); + return -1; + } + for (i = 0; i < ret; i++) + conf[i].mask = (uint64_t)strtoul(str_fld[i], &end, 0); + + return 0; +} + +static void +cmd_showport_reta_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_showport_reta *res = parsed_result; + struct rte_eth_rss_reta_entry64 reta_conf[8]; + struct rte_eth_dev_info dev_info; + uint16_t max_reta_size; + int ret; + + ret = eth_dev_info_get_print_err(res->port_id, &dev_info); + if (ret != 0) + return; + + max_reta_size = RTE_MIN(dev_info.reta_size, ETH_RSS_RETA_SIZE_512); + if (res->size == 0 || res->size > max_reta_size) { + printf("Invalid redirection table size: %u (1-%u)\n", + res->size, max_reta_size); + return; + } + + memset(reta_conf, 0, sizeof(reta_conf)); + if (showport_parse_reta_config(reta_conf, res->size, + res->list_of_items) < 0) { + printf("Invalid string: %s for reta masks\n", + res->list_of_items); + return; + } + port_rss_reta_info(res->port_id, reta_conf, res->size); +} + +cmdline_parse_token_string_t cmd_showport_reta_show = + TOKEN_STRING_INITIALIZER(struct cmd_showport_reta, show, "show"); +cmdline_parse_token_string_t cmd_showport_reta_port = + TOKEN_STRING_INITIALIZER(struct cmd_showport_reta, port, "port"); +cmdline_parse_token_num_t cmd_showport_reta_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_showport_reta, port_id, UINT16); +cmdline_parse_token_string_t cmd_showport_reta_rss = + TOKEN_STRING_INITIALIZER(struct cmd_showport_reta, rss, "rss"); +cmdline_parse_token_string_t cmd_showport_reta_reta = + TOKEN_STRING_INITIALIZER(struct cmd_showport_reta, reta, "reta"); +cmdline_parse_token_num_t cmd_showport_reta_size = + TOKEN_NUM_INITIALIZER(struct cmd_showport_reta, size, UINT16); +cmdline_parse_token_string_t cmd_showport_reta_list_of_items = + TOKEN_STRING_INITIALIZER(struct cmd_showport_reta, + list_of_items, NULL); + +cmdline_parse_inst_t cmd_showport_reta = { + .f = cmd_showport_reta_parsed, + .data = NULL, + .help_str = "show port <port_id> rss reta <size> <mask0[,mask1]*>", + .tokens = { + (void *)&cmd_showport_reta_show, + (void *)&cmd_showport_reta_port, + (void *)&cmd_showport_reta_port_id, + (void *)&cmd_showport_reta_rss, + (void *)&cmd_showport_reta_reta, + (void *)&cmd_showport_reta_size, + (void *)&cmd_showport_reta_list_of_items, + NULL, + }, +}; + +/* *** Show RSS hash configuration *** */ +struct cmd_showport_rss_hash { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t rss_hash; + cmdline_fixed_string_t rss_type; + cmdline_fixed_string_t key; /* optional argument */ +}; + +static void cmd_showport_rss_hash_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + void *show_rss_key) +{ + struct cmd_showport_rss_hash *res = parsed_result; + + port_rss_hash_conf_show(res->port_id, show_rss_key != NULL); +} + +cmdline_parse_token_string_t cmd_showport_rss_hash_show = + TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, show, "show"); +cmdline_parse_token_string_t cmd_showport_rss_hash_port = + TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, port, "port"); +cmdline_parse_token_num_t cmd_showport_rss_hash_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_showport_rss_hash, port_id, UINT16); +cmdline_parse_token_string_t cmd_showport_rss_hash_rss_hash = + TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, rss_hash, + "rss-hash"); +cmdline_parse_token_string_t cmd_showport_rss_hash_rss_key = + TOKEN_STRING_INITIALIZER(struct cmd_showport_rss_hash, key, "key"); + +cmdline_parse_inst_t cmd_showport_rss_hash = { + .f = cmd_showport_rss_hash_parsed, + .data = NULL, + .help_str = "show port <port_id> rss-hash", + .tokens = { + (void *)&cmd_showport_rss_hash_show, + (void *)&cmd_showport_rss_hash_port, + (void *)&cmd_showport_rss_hash_port_id, + (void *)&cmd_showport_rss_hash_rss_hash, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_showport_rss_hash_key = { + .f = cmd_showport_rss_hash_parsed, + .data = (void *)1, + .help_str = "show port <port_id> rss-hash key", + .tokens = { + (void *)&cmd_showport_rss_hash_show, + (void *)&cmd_showport_rss_hash_port, + (void *)&cmd_showport_rss_hash_port_id, + (void *)&cmd_showport_rss_hash_rss_hash, + (void *)&cmd_showport_rss_hash_rss_key, + NULL, + }, +}; + +/* *** Configure DCB *** */ +struct cmd_config_dcb { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t port_id; + cmdline_fixed_string_t dcb; + cmdline_fixed_string_t vt; + cmdline_fixed_string_t vt_en; + uint8_t num_tcs; + cmdline_fixed_string_t pfc; + cmdline_fixed_string_t pfc_en; +}; + +static void +cmd_config_dcb_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_dcb *res = parsed_result; + portid_t port_id = res->port_id; + struct rte_port *port; + uint8_t pfc_en; + int ret; + + port = &ports[port_id]; + /** Check if the port is not started **/ + if (port->port_status != RTE_PORT_STOPPED) { + printf("Please stop port %d first\n", port_id); + return; + } + + if ((res->num_tcs != ETH_4_TCS) && (res->num_tcs != ETH_8_TCS)) { + printf("The invalid number of traffic class," + " only 4 or 8 allowed.\n"); + return; + } + + if (nb_fwd_lcores < res->num_tcs) { + printf("nb_cores shouldn't be less than number of TCs.\n"); + return; + } + if (!strncmp(res->pfc_en, "on", 2)) + pfc_en = 1; + else + pfc_en = 0; + + /* DCB in VT mode */ + if (!strncmp(res->vt_en, "on", 2)) + ret = init_port_dcb_config(port_id, DCB_VT_ENABLED, + (enum rte_eth_nb_tcs)res->num_tcs, + pfc_en); + else + ret = init_port_dcb_config(port_id, DCB_ENABLED, + (enum rte_eth_nb_tcs)res->num_tcs, + pfc_en); + + + if (ret != 0) { + printf("Cannot initialize network ports.\n"); + return; + } + + cmd_reconfig_device_queue(port_id, 1, 1); +} + +cmdline_parse_token_string_t cmd_config_dcb_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, port, "port"); +cmdline_parse_token_string_t cmd_config_dcb_config = + TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, config, "config"); +cmdline_parse_token_num_t cmd_config_dcb_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_dcb, port_id, UINT16); +cmdline_parse_token_string_t cmd_config_dcb_dcb = + TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, dcb, "dcb"); +cmdline_parse_token_string_t cmd_config_dcb_vt = + TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, vt, "vt"); +cmdline_parse_token_string_t cmd_config_dcb_vt_en = + TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, vt_en, "on#off"); +cmdline_parse_token_num_t cmd_config_dcb_num_tcs = + TOKEN_NUM_INITIALIZER(struct cmd_config_dcb, num_tcs, UINT8); +cmdline_parse_token_string_t cmd_config_dcb_pfc= + TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, pfc, "pfc"); +cmdline_parse_token_string_t cmd_config_dcb_pfc_en = + TOKEN_STRING_INITIALIZER(struct cmd_config_dcb, pfc_en, "on#off"); + +cmdline_parse_inst_t cmd_config_dcb = { + .f = cmd_config_dcb_parsed, + .data = NULL, + .help_str = "port config <port-id> dcb vt on|off <num_tcs> pfc on|off", + .tokens = { + (void *)&cmd_config_dcb_port, + (void *)&cmd_config_dcb_config, + (void *)&cmd_config_dcb_port_id, + (void *)&cmd_config_dcb_dcb, + (void *)&cmd_config_dcb_vt, + (void *)&cmd_config_dcb_vt_en, + (void *)&cmd_config_dcb_num_tcs, + (void *)&cmd_config_dcb_pfc, + (void *)&cmd_config_dcb_pfc_en, + NULL, + }, +}; + +/* *** configure number of packets per burst *** */ +struct cmd_config_burst { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t all; + cmdline_fixed_string_t name; + uint16_t value; +}; + +static void +cmd_config_burst_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_burst *res = parsed_result; + struct rte_eth_dev_info dev_info; + uint16_t rec_nb_pkts; + int ret; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + if (!strcmp(res->name, "burst")) { + if (res->value == 0) { + /* If user gives a value of zero, query the PMD for + * its recommended Rx burst size. Testpmd uses a single + * size for all ports, so assume all ports are the same + * NIC model and use the values from Port 0. + */ + ret = eth_dev_info_get_print_err(0, &dev_info); + if (ret != 0) + return; + + rec_nb_pkts = dev_info.default_rxportconf.burst_size; + + if (rec_nb_pkts == 0) { + printf("PMD does not recommend a burst size.\n" + "User provided value must be between" + " 1 and %d\n", MAX_PKT_BURST); + return; + } else if (rec_nb_pkts > MAX_PKT_BURST) { + printf("PMD recommended burst size of %d" + " exceeds maximum value of %d\n", + rec_nb_pkts, MAX_PKT_BURST); + return; + } + printf("Using PMD-provided burst value of %d\n", + rec_nb_pkts); + nb_pkt_per_burst = rec_nb_pkts; + } else if (res->value > MAX_PKT_BURST) { + printf("burst must be >= 1 && <= %d\n", MAX_PKT_BURST); + return; + } else + nb_pkt_per_burst = res->value; + } else { + printf("Unknown parameter\n"); + return; + } + + init_port_config(); + + cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); +} + +cmdline_parse_token_string_t cmd_config_burst_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_burst, port, "port"); +cmdline_parse_token_string_t cmd_config_burst_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_burst, keyword, "config"); +cmdline_parse_token_string_t cmd_config_burst_all = + TOKEN_STRING_INITIALIZER(struct cmd_config_burst, all, "all"); +cmdline_parse_token_string_t cmd_config_burst_name = + TOKEN_STRING_INITIALIZER(struct cmd_config_burst, name, "burst"); +cmdline_parse_token_num_t cmd_config_burst_value = + TOKEN_NUM_INITIALIZER(struct cmd_config_burst, value, UINT16); + +cmdline_parse_inst_t cmd_config_burst = { + .f = cmd_config_burst_parsed, + .data = NULL, + .help_str = "port config all burst <value>", + .tokens = { + (void *)&cmd_config_burst_port, + (void *)&cmd_config_burst_keyword, + (void *)&cmd_config_burst_all, + (void *)&cmd_config_burst_name, + (void *)&cmd_config_burst_value, + NULL, + }, +}; + +/* *** configure rx/tx queues *** */ +struct cmd_config_thresh { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t all; + cmdline_fixed_string_t name; + uint8_t value; +}; + +static void +cmd_config_thresh_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_thresh *res = parsed_result; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + if (!strcmp(res->name, "txpt")) + tx_pthresh = res->value; + else if(!strcmp(res->name, "txht")) + tx_hthresh = res->value; + else if(!strcmp(res->name, "txwt")) + tx_wthresh = res->value; + else if(!strcmp(res->name, "rxpt")) + rx_pthresh = res->value; + else if(!strcmp(res->name, "rxht")) + rx_hthresh = res->value; + else if(!strcmp(res->name, "rxwt")) + rx_wthresh = res->value; + else { + printf("Unknown parameter\n"); + return; + } + + init_port_config(); + + cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); +} + +cmdline_parse_token_string_t cmd_config_thresh_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_thresh, port, "port"); +cmdline_parse_token_string_t cmd_config_thresh_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_thresh, keyword, "config"); +cmdline_parse_token_string_t cmd_config_thresh_all = + TOKEN_STRING_INITIALIZER(struct cmd_config_thresh, all, "all"); +cmdline_parse_token_string_t cmd_config_thresh_name = + TOKEN_STRING_INITIALIZER(struct cmd_config_thresh, name, + "txpt#txht#txwt#rxpt#rxht#rxwt"); +cmdline_parse_token_num_t cmd_config_thresh_value = + TOKEN_NUM_INITIALIZER(struct cmd_config_thresh, value, UINT8); + +cmdline_parse_inst_t cmd_config_thresh = { + .f = cmd_config_thresh_parsed, + .data = NULL, + .help_str = "port config all txpt|txht|txwt|rxpt|rxht|rxwt <value>", + .tokens = { + (void *)&cmd_config_thresh_port, + (void *)&cmd_config_thresh_keyword, + (void *)&cmd_config_thresh_all, + (void *)&cmd_config_thresh_name, + (void *)&cmd_config_thresh_value, + NULL, + }, +}; + +/* *** configure free/rs threshold *** */ +struct cmd_config_threshold { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t all; + cmdline_fixed_string_t name; + uint16_t value; +}; + +static void +cmd_config_threshold_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_threshold *res = parsed_result; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + if (!strcmp(res->name, "txfreet")) + tx_free_thresh = res->value; + else if (!strcmp(res->name, "txrst")) + tx_rs_thresh = res->value; + else if (!strcmp(res->name, "rxfreet")) + rx_free_thresh = res->value; + else { + printf("Unknown parameter\n"); + return; + } + + init_port_config(); + + cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); +} + +cmdline_parse_token_string_t cmd_config_threshold_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_threshold, port, "port"); +cmdline_parse_token_string_t cmd_config_threshold_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_threshold, keyword, + "config"); +cmdline_parse_token_string_t cmd_config_threshold_all = + TOKEN_STRING_INITIALIZER(struct cmd_config_threshold, all, "all"); +cmdline_parse_token_string_t cmd_config_threshold_name = + TOKEN_STRING_INITIALIZER(struct cmd_config_threshold, name, + "txfreet#txrst#rxfreet"); +cmdline_parse_token_num_t cmd_config_threshold_value = + TOKEN_NUM_INITIALIZER(struct cmd_config_threshold, value, UINT16); + +cmdline_parse_inst_t cmd_config_threshold = { + .f = cmd_config_threshold_parsed, + .data = NULL, + .help_str = "port config all txfreet|txrst|rxfreet <value>", + .tokens = { + (void *)&cmd_config_threshold_port, + (void *)&cmd_config_threshold_keyword, + (void *)&cmd_config_threshold_all, + (void *)&cmd_config_threshold_name, + (void *)&cmd_config_threshold_value, + NULL, + }, +}; + +/* *** stop *** */ +struct cmd_stop_result { + cmdline_fixed_string_t stop; +}; + +static void cmd_stop_parsed(__rte_unused void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + stop_packet_forwarding(); +} + +cmdline_parse_token_string_t cmd_stop_stop = + TOKEN_STRING_INITIALIZER(struct cmd_stop_result, stop, "stop"); + +cmdline_parse_inst_t cmd_stop = { + .f = cmd_stop_parsed, + .data = NULL, + .help_str = "stop: Stop packet forwarding", + .tokens = { + (void *)&cmd_stop_stop, + NULL, + }, +}; + +/* *** SET CORELIST and PORTLIST CONFIGURATION *** */ + +unsigned int +parse_item_list(char* str, const char* item_name, unsigned int max_items, + unsigned int *parsed_items, int check_unique_values) +{ + unsigned int nb_item; + unsigned int value; + unsigned int i; + unsigned int j; + int value_ok; + char c; + + /* + * First parse all items in the list and store their value. + */ + value = 0; + nb_item = 0; + value_ok = 0; + for (i = 0; i < strnlen(str, STR_TOKEN_SIZE); i++) { + c = str[i]; + if ((c >= '0') && (c <= '9')) { + value = (unsigned int) (value * 10 + (c - '0')); + value_ok = 1; + continue; + } + if (c != ',') { + printf("character %c is not a decimal digit\n", c); + return 0; + } + if (! value_ok) { + printf("No valid value before comma\n"); + return 0; + } + if (nb_item < max_items) { + parsed_items[nb_item] = value; + value_ok = 0; + value = 0; + } + nb_item++; + } + if (nb_item >= max_items) { + printf("Number of %s = %u > %u (maximum items)\n", + item_name, nb_item + 1, max_items); + return 0; + } + parsed_items[nb_item++] = value; + if (! check_unique_values) + return nb_item; + + /* + * Then, check that all values in the list are differents. + * No optimization here... + */ + for (i = 0; i < nb_item; i++) { + for (j = i + 1; j < nb_item; j++) { + if (parsed_items[j] == parsed_items[i]) { + printf("duplicated %s %u at index %u and %u\n", + item_name, parsed_items[i], i, j); + return 0; + } + } + } + return nb_item; +} + +struct cmd_set_list_result { + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t list_name; + cmdline_fixed_string_t list_of_items; +}; + +static void cmd_set_list_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_list_result *res; + union { + unsigned int lcorelist[RTE_MAX_LCORE]; + unsigned int portlist[RTE_MAX_ETHPORTS]; + } parsed_items; + unsigned int nb_item; + + if (test_done == 0) { + printf("Please stop forwarding first\n"); + return; + } + + res = parsed_result; + if (!strcmp(res->list_name, "corelist")) { + nb_item = parse_item_list(res->list_of_items, "core", + RTE_MAX_LCORE, + parsed_items.lcorelist, 1); + if (nb_item > 0) { + set_fwd_lcores_list(parsed_items.lcorelist, nb_item); + fwd_config_setup(); + } + return; + } + if (!strcmp(res->list_name, "portlist")) { + nb_item = parse_item_list(res->list_of_items, "port", + RTE_MAX_ETHPORTS, + parsed_items.portlist, 1); + if (nb_item > 0) { + set_fwd_ports_list(parsed_items.portlist, nb_item); + fwd_config_setup(); + } + } +} + +cmdline_parse_token_string_t cmd_set_list_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, cmd_keyword, + "set"); +cmdline_parse_token_string_t cmd_set_list_name = + TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, list_name, + "corelist#portlist"); +cmdline_parse_token_string_t cmd_set_list_of_items = + TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, list_of_items, + NULL); + +cmdline_parse_inst_t cmd_set_fwd_list = { + .f = cmd_set_list_parsed, + .data = NULL, + .help_str = "set corelist|portlist <list0[,list1]*>", + .tokens = { + (void *)&cmd_set_list_keyword, + (void *)&cmd_set_list_name, + (void *)&cmd_set_list_of_items, + NULL, + }, +}; + +/* *** SET COREMASK and PORTMASK CONFIGURATION *** */ + +struct cmd_setmask_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t mask; + uint64_t hexavalue; +}; + +static void cmd_set_mask_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_setmask_result *res = parsed_result; + + if (test_done == 0) { + printf("Please stop forwarding first\n"); + return; + } + if (!strcmp(res->mask, "coremask")) { + set_fwd_lcores_mask(res->hexavalue); + fwd_config_setup(); + } else if (!strcmp(res->mask, "portmask")) { + set_fwd_ports_mask(res->hexavalue); + fwd_config_setup(); + } +} + +cmdline_parse_token_string_t cmd_setmask_set = + TOKEN_STRING_INITIALIZER(struct cmd_setmask_result, set, "set"); +cmdline_parse_token_string_t cmd_setmask_mask = + TOKEN_STRING_INITIALIZER(struct cmd_setmask_result, mask, + "coremask#portmask"); +cmdline_parse_token_num_t cmd_setmask_value = + TOKEN_NUM_INITIALIZER(struct cmd_setmask_result, hexavalue, UINT64); + +cmdline_parse_inst_t cmd_set_fwd_mask = { + .f = cmd_set_mask_parsed, + .data = NULL, + .help_str = "set coremask|portmask <hexadecimal value>", + .tokens = { + (void *)&cmd_setmask_set, + (void *)&cmd_setmask_mask, + (void *)&cmd_setmask_value, + NULL, + }, +}; + +/* + * SET NBPORT, NBCORE, PACKET BURST, and VERBOSE LEVEL CONFIGURATION + */ +struct cmd_set_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t what; + uint16_t value; +}; + +static void cmd_set_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_result *res = parsed_result; + if (!strcmp(res->what, "nbport")) { + set_fwd_ports_number(res->value); + fwd_config_setup(); + } else if (!strcmp(res->what, "nbcore")) { + set_fwd_lcores_number(res->value); + fwd_config_setup(); + } else if (!strcmp(res->what, "burst")) + set_nb_pkt_per_burst(res->value); + else if (!strcmp(res->what, "verbose")) + set_verbose_level(res->value); +} + +cmdline_parse_token_string_t cmd_set_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_result, set, "set"); +cmdline_parse_token_string_t cmd_set_what = + TOKEN_STRING_INITIALIZER(struct cmd_set_result, what, + "nbport#nbcore#burst#verbose"); +cmdline_parse_token_num_t cmd_set_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_result, value, UINT16); + +cmdline_parse_inst_t cmd_set_numbers = { + .f = cmd_set_parsed, + .data = NULL, + .help_str = "set nbport|nbcore|burst|verbose <value>", + .tokens = { + (void *)&cmd_set_set, + (void *)&cmd_set_what, + (void *)&cmd_set_value, + NULL, + }, +}; + +/* *** SET LOG LEVEL CONFIGURATION *** */ + +struct cmd_set_log_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t log; + cmdline_fixed_string_t type; + uint32_t level; +}; + +static void +cmd_set_log_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_log_result *res; + int ret; + + res = parsed_result; + if (!strcmp(res->type, "global")) + rte_log_set_global_level(res->level); + else { + ret = rte_log_set_level_regexp(res->type, res->level); + if (ret < 0) + printf("Unable to set log level\n"); + } +} + +cmdline_parse_token_string_t cmd_set_log_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_log_result, set, "set"); +cmdline_parse_token_string_t cmd_set_log_log = + TOKEN_STRING_INITIALIZER(struct cmd_set_log_result, log, "log"); +cmdline_parse_token_string_t cmd_set_log_type = + TOKEN_STRING_INITIALIZER(struct cmd_set_log_result, type, NULL); +cmdline_parse_token_num_t cmd_set_log_level = + TOKEN_NUM_INITIALIZER(struct cmd_set_log_result, level, UINT32); + +cmdline_parse_inst_t cmd_set_log = { + .f = cmd_set_log_parsed, + .data = NULL, + .help_str = "set log global|<type> <level>", + .tokens = { + (void *)&cmd_set_log_set, + (void *)&cmd_set_log_log, + (void *)&cmd_set_log_type, + (void *)&cmd_set_log_level, + NULL, + }, +}; + +/* *** SET SEGMENT LENGTHS OF TXONLY PACKETS *** */ + +struct cmd_set_txpkts_result { + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t txpkts; + cmdline_fixed_string_t seg_lengths; +}; + +static void +cmd_set_txpkts_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_txpkts_result *res; + unsigned seg_lengths[RTE_MAX_SEGS_PER_PKT]; + unsigned int nb_segs; + + res = parsed_result; + nb_segs = parse_item_list(res->seg_lengths, "segment lengths", + RTE_MAX_SEGS_PER_PKT, seg_lengths, 0); + if (nb_segs > 0) + set_tx_pkt_segments(seg_lengths, nb_segs); +} + +cmdline_parse_token_string_t cmd_set_txpkts_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result, + cmd_keyword, "set"); +cmdline_parse_token_string_t cmd_set_txpkts_name = + TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result, + txpkts, "txpkts"); +cmdline_parse_token_string_t cmd_set_txpkts_lengths = + TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result, + seg_lengths, NULL); + +cmdline_parse_inst_t cmd_set_txpkts = { + .f = cmd_set_txpkts_parsed, + .data = NULL, + .help_str = "set txpkts <len0[,len1]*>", + .tokens = { + (void *)&cmd_set_txpkts_keyword, + (void *)&cmd_set_txpkts_name, + (void *)&cmd_set_txpkts_lengths, + NULL, + }, +}; + +/* *** SET COPY AND SPLIT POLICY ON TX PACKETS *** */ + +struct cmd_set_txsplit_result { + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t txsplit; + cmdline_fixed_string_t mode; +}; + +static void +cmd_set_txsplit_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_txsplit_result *res; + + res = parsed_result; + set_tx_pkt_split(res->mode); +} + +cmdline_parse_token_string_t cmd_set_txsplit_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result, + cmd_keyword, "set"); +cmdline_parse_token_string_t cmd_set_txsplit_name = + TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result, + txsplit, "txsplit"); +cmdline_parse_token_string_t cmd_set_txsplit_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_txsplit_result, + mode, NULL); + +cmdline_parse_inst_t cmd_set_txsplit = { + .f = cmd_set_txsplit_parsed, + .data = NULL, + .help_str = "set txsplit on|off|rand", + .tokens = { + (void *)&cmd_set_txsplit_keyword, + (void *)&cmd_set_txsplit_name, + (void *)&cmd_set_txsplit_mode, + NULL, + }, +}; + +/* *** ADD/REMOVE ALL VLAN IDENTIFIERS TO/FROM A PORT VLAN RX FILTER *** */ +struct cmd_rx_vlan_filter_all_result { + cmdline_fixed_string_t rx_vlan; + cmdline_fixed_string_t what; + cmdline_fixed_string_t all; + portid_t port_id; +}; + +static void +cmd_rx_vlan_filter_all_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_rx_vlan_filter_all_result *res = parsed_result; + + if (!strcmp(res->what, "add")) + rx_vlan_all_filter_set(res->port_id, 1); + else + rx_vlan_all_filter_set(res->port_id, 0); +} + +cmdline_parse_token_string_t cmd_rx_vlan_filter_all_rx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result, + rx_vlan, "rx_vlan"); +cmdline_parse_token_string_t cmd_rx_vlan_filter_all_what = + TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result, + what, "add#rm"); +cmdline_parse_token_string_t cmd_rx_vlan_filter_all_all = + TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result, + all, "all"); +cmdline_parse_token_num_t cmd_rx_vlan_filter_all_portid = + TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_all_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_rx_vlan_filter_all = { + .f = cmd_rx_vlan_filter_all_parsed, + .data = NULL, + .help_str = "rx_vlan add|rm all <port_id>: " + "Add/Remove all identifiers to/from the set of VLAN " + "identifiers filtered by a port", + .tokens = { + (void *)&cmd_rx_vlan_filter_all_rx_vlan, + (void *)&cmd_rx_vlan_filter_all_what, + (void *)&cmd_rx_vlan_filter_all_all, + (void *)&cmd_rx_vlan_filter_all_portid, + NULL, + }, +}; + +/* *** VLAN OFFLOAD SET ON A PORT *** */ +struct cmd_vlan_offload_result { + cmdline_fixed_string_t vlan; + cmdline_fixed_string_t set; + cmdline_fixed_string_t vlan_type; + cmdline_fixed_string_t what; + cmdline_fixed_string_t on; + cmdline_fixed_string_t port_id; +}; + +static void +cmd_vlan_offload_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int on; + struct cmd_vlan_offload_result *res = parsed_result; + char *str; + int i, len = 0; + portid_t port_id = 0; + unsigned int tmp; + + str = res->port_id; + len = strnlen(str, STR_TOKEN_SIZE); + i = 0; + /* Get port_id first */ + while(i < len){ + if(str[i] == ',') + break; + + i++; + } + str[i]='\0'; + tmp = strtoul(str, NULL, 0); + /* If port_id greater that what portid_t can represent, return */ + if(tmp >= RTE_MAX_ETHPORTS) + return; + port_id = (portid_t)tmp; + + if (!strcmp(res->on, "on")) + on = 1; + else + on = 0; + + if (!strcmp(res->what, "strip")) + rx_vlan_strip_set(port_id, on); + else if(!strcmp(res->what, "stripq")){ + uint16_t queue_id = 0; + + /* No queue_id, return */ + if(i + 1 >= len) { + printf("must specify (port,queue_id)\n"); + return; + } + tmp = strtoul(str + i + 1, NULL, 0); + /* If queue_id greater that what 16-bits can represent, return */ + if(tmp > 0xffff) + return; + + queue_id = (uint16_t)tmp; + rx_vlan_strip_set_on_queue(port_id, queue_id, on); + } + else if (!strcmp(res->what, "filter")) + rx_vlan_filter_set(port_id, on); + else if (!strcmp(res->what, "qinq_strip")) + rx_vlan_qinq_strip_set(port_id, on); + else + vlan_extend_set(port_id, on); + + return; +} + +cmdline_parse_token_string_t cmd_vlan_offload_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result, + vlan, "vlan"); +cmdline_parse_token_string_t cmd_vlan_offload_set = + TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result, + set, "set"); +cmdline_parse_token_string_t cmd_vlan_offload_what = + TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result, + what, "strip#filter#qinq_strip#extend#stripq"); +cmdline_parse_token_string_t cmd_vlan_offload_on = + TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result, + on, "on#off"); +cmdline_parse_token_string_t cmd_vlan_offload_portid = + TOKEN_STRING_INITIALIZER(struct cmd_vlan_offload_result, + port_id, NULL); + +cmdline_parse_inst_t cmd_vlan_offload = { + .f = cmd_vlan_offload_parsed, + .data = NULL, + .help_str = "vlan set strip|filter|qinq_strip|extend|stripq on|off " + "<port_id[,queue_id]>: " + "Strip/Filter/QinQ for rx side Extend for both rx/tx sides", + .tokens = { + (void *)&cmd_vlan_offload_vlan, + (void *)&cmd_vlan_offload_set, + (void *)&cmd_vlan_offload_what, + (void *)&cmd_vlan_offload_on, + (void *)&cmd_vlan_offload_portid, + NULL, + }, +}; + +/* *** VLAN TPID SET ON A PORT *** */ +struct cmd_vlan_tpid_result { + cmdline_fixed_string_t vlan; + cmdline_fixed_string_t set; + cmdline_fixed_string_t vlan_type; + cmdline_fixed_string_t what; + uint16_t tp_id; + portid_t port_id; +}; + +static void +cmd_vlan_tpid_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vlan_tpid_result *res = parsed_result; + enum rte_vlan_type vlan_type; + + if (!strcmp(res->vlan_type, "inner")) + vlan_type = ETH_VLAN_TYPE_INNER; + else if (!strcmp(res->vlan_type, "outer")) + vlan_type = ETH_VLAN_TYPE_OUTER; + else { + printf("Unknown vlan type\n"); + return; + } + vlan_tpid_set(res->port_id, vlan_type, res->tp_id); +} + +cmdline_parse_token_string_t cmd_vlan_tpid_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_vlan_tpid_result, + vlan, "vlan"); +cmdline_parse_token_string_t cmd_vlan_tpid_set = + TOKEN_STRING_INITIALIZER(struct cmd_vlan_tpid_result, + set, "set"); +cmdline_parse_token_string_t cmd_vlan_type = + TOKEN_STRING_INITIALIZER(struct cmd_vlan_tpid_result, + vlan_type, "inner#outer"); +cmdline_parse_token_string_t cmd_vlan_tpid_what = + TOKEN_STRING_INITIALIZER(struct cmd_vlan_tpid_result, + what, "tpid"); +cmdline_parse_token_num_t cmd_vlan_tpid_tpid = + TOKEN_NUM_INITIALIZER(struct cmd_vlan_tpid_result, + tp_id, UINT16); +cmdline_parse_token_num_t cmd_vlan_tpid_portid = + TOKEN_NUM_INITIALIZER(struct cmd_vlan_tpid_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_vlan_tpid = { + .f = cmd_vlan_tpid_parsed, + .data = NULL, + .help_str = "vlan set inner|outer tpid <tp_id> <port_id>: " + "Set the VLAN Ether type", + .tokens = { + (void *)&cmd_vlan_tpid_vlan, + (void *)&cmd_vlan_tpid_set, + (void *)&cmd_vlan_type, + (void *)&cmd_vlan_tpid_what, + (void *)&cmd_vlan_tpid_tpid, + (void *)&cmd_vlan_tpid_portid, + NULL, + }, +}; + +/* *** ADD/REMOVE A VLAN IDENTIFIER TO/FROM A PORT VLAN RX FILTER *** */ +struct cmd_rx_vlan_filter_result { + cmdline_fixed_string_t rx_vlan; + cmdline_fixed_string_t what; + uint16_t vlan_id; + portid_t port_id; +}; + +static void +cmd_rx_vlan_filter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_rx_vlan_filter_result *res = parsed_result; + + if (!strcmp(res->what, "add")) + rx_vft_set(res->port_id, res->vlan_id, 1); + else + rx_vft_set(res->port_id, res->vlan_id, 0); +} + +cmdline_parse_token_string_t cmd_rx_vlan_filter_rx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_result, + rx_vlan, "rx_vlan"); +cmdline_parse_token_string_t cmd_rx_vlan_filter_what = + TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_result, + what, "add#rm"); +cmdline_parse_token_num_t cmd_rx_vlan_filter_vlanid = + TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_result, + vlan_id, UINT16); +cmdline_parse_token_num_t cmd_rx_vlan_filter_portid = + TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_rx_vlan_filter = { + .f = cmd_rx_vlan_filter_parsed, + .data = NULL, + .help_str = "rx_vlan add|rm <vlan_id> <port_id>: " + "Add/Remove a VLAN identifier to/from the set of VLAN " + "identifiers filtered by a port", + .tokens = { + (void *)&cmd_rx_vlan_filter_rx_vlan, + (void *)&cmd_rx_vlan_filter_what, + (void *)&cmd_rx_vlan_filter_vlanid, + (void *)&cmd_rx_vlan_filter_portid, + NULL, + }, +}; + +/* *** ENABLE HARDWARE INSERTION OF VLAN HEADER IN TX PACKETS *** */ +struct cmd_tx_vlan_set_result { + cmdline_fixed_string_t tx_vlan; + cmdline_fixed_string_t set; + portid_t port_id; + uint16_t vlan_id; +}; + +static void +cmd_tx_vlan_set_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_tx_vlan_set_result *res = parsed_result; + + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } + + tx_vlan_set(res->port_id, res->vlan_id); + + cmd_reconfig_device_queue(res->port_id, 1, 1); +} + +cmdline_parse_token_string_t cmd_tx_vlan_set_tx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_result, + tx_vlan, "tx_vlan"); +cmdline_parse_token_string_t cmd_tx_vlan_set_set = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_result, + set, "set"); +cmdline_parse_token_num_t cmd_tx_vlan_set_portid = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_tx_vlan_set_vlanid = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_result, + vlan_id, UINT16); + +cmdline_parse_inst_t cmd_tx_vlan_set = { + .f = cmd_tx_vlan_set_parsed, + .data = NULL, + .help_str = "tx_vlan set <port_id> <vlan_id>: " + "Enable hardware insertion of a single VLAN header " + "with a given TAG Identifier in packets sent on a port", + .tokens = { + (void *)&cmd_tx_vlan_set_tx_vlan, + (void *)&cmd_tx_vlan_set_set, + (void *)&cmd_tx_vlan_set_portid, + (void *)&cmd_tx_vlan_set_vlanid, + NULL, + }, +}; + +/* *** ENABLE HARDWARE INSERTION OF Double VLAN HEADER IN TX PACKETS *** */ +struct cmd_tx_vlan_set_qinq_result { + cmdline_fixed_string_t tx_vlan; + cmdline_fixed_string_t set; + portid_t port_id; + uint16_t vlan_id; + uint16_t vlan_id_outer; +}; + +static void +cmd_tx_vlan_set_qinq_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_tx_vlan_set_qinq_result *res = parsed_result; + + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } + + tx_qinq_set(res->port_id, res->vlan_id, res->vlan_id_outer); + + cmd_reconfig_device_queue(res->port_id, 1, 1); +} + +cmdline_parse_token_string_t cmd_tx_vlan_set_qinq_tx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_qinq_result, + tx_vlan, "tx_vlan"); +cmdline_parse_token_string_t cmd_tx_vlan_set_qinq_set = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_qinq_result, + set, "set"); +cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_portid = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_qinq_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_vlanid = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_qinq_result, + vlan_id, UINT16); +cmdline_parse_token_num_t cmd_tx_vlan_set_qinq_vlanid_outer = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_qinq_result, + vlan_id_outer, UINT16); + +cmdline_parse_inst_t cmd_tx_vlan_set_qinq = { + .f = cmd_tx_vlan_set_qinq_parsed, + .data = NULL, + .help_str = "tx_vlan set <port_id> <vlan_id> <outer_vlan_id>: " + "Enable hardware insertion of double VLAN header " + "with given TAG Identifiers in packets sent on a port", + .tokens = { + (void *)&cmd_tx_vlan_set_qinq_tx_vlan, + (void *)&cmd_tx_vlan_set_qinq_set, + (void *)&cmd_tx_vlan_set_qinq_portid, + (void *)&cmd_tx_vlan_set_qinq_vlanid, + (void *)&cmd_tx_vlan_set_qinq_vlanid_outer, + NULL, + }, +}; + +/* *** ENABLE/DISABLE PORT BASED TX VLAN INSERTION *** */ +struct cmd_tx_vlan_set_pvid_result { + cmdline_fixed_string_t tx_vlan; + cmdline_fixed_string_t set; + cmdline_fixed_string_t pvid; + portid_t port_id; + uint16_t vlan_id; + cmdline_fixed_string_t mode; +}; + +static void +cmd_tx_vlan_set_pvid_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_tx_vlan_set_pvid_result *res = parsed_result; + + if (strcmp(res->mode, "on") == 0) + tx_vlan_pvid_set(res->port_id, res->vlan_id, 1); + else + tx_vlan_pvid_set(res->port_id, res->vlan_id, 0); +} + +cmdline_parse_token_string_t cmd_tx_vlan_set_pvid_tx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_pvid_result, + tx_vlan, "tx_vlan"); +cmdline_parse_token_string_t cmd_tx_vlan_set_pvid_set = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_pvid_result, + set, "set"); +cmdline_parse_token_string_t cmd_tx_vlan_set_pvid_pvid = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_pvid_result, + pvid, "pvid"); +cmdline_parse_token_num_t cmd_tx_vlan_set_pvid_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_pvid_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_tx_vlan_set_pvid_vlan_id = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_pvid_result, + vlan_id, UINT16); +cmdline_parse_token_string_t cmd_tx_vlan_set_pvid_mode = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_pvid_result, + mode, "on#off"); + +cmdline_parse_inst_t cmd_tx_vlan_set_pvid = { + .f = cmd_tx_vlan_set_pvid_parsed, + .data = NULL, + .help_str = "tx_vlan set pvid <port_id> <vlan_id> on|off", + .tokens = { + (void *)&cmd_tx_vlan_set_pvid_tx_vlan, + (void *)&cmd_tx_vlan_set_pvid_set, + (void *)&cmd_tx_vlan_set_pvid_pvid, + (void *)&cmd_tx_vlan_set_pvid_port_id, + (void *)&cmd_tx_vlan_set_pvid_vlan_id, + (void *)&cmd_tx_vlan_set_pvid_mode, + NULL, + }, +}; + +/* *** DISABLE HARDWARE INSERTION OF VLAN HEADER IN TX PACKETS *** */ +struct cmd_tx_vlan_reset_result { + cmdline_fixed_string_t tx_vlan; + cmdline_fixed_string_t reset; + portid_t port_id; +}; + +static void +cmd_tx_vlan_reset_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_tx_vlan_reset_result *res = parsed_result; + + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } + + tx_vlan_reset(res->port_id); + + cmd_reconfig_device_queue(res->port_id, 1, 1); +} + +cmdline_parse_token_string_t cmd_tx_vlan_reset_tx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_reset_result, + tx_vlan, "tx_vlan"); +cmdline_parse_token_string_t cmd_tx_vlan_reset_reset = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_reset_result, + reset, "reset"); +cmdline_parse_token_num_t cmd_tx_vlan_reset_portid = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_reset_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_tx_vlan_reset = { + .f = cmd_tx_vlan_reset_parsed, + .data = NULL, + .help_str = "tx_vlan reset <port_id>: Disable hardware insertion of a " + "VLAN header in packets sent on a port", + .tokens = { + (void *)&cmd_tx_vlan_reset_tx_vlan, + (void *)&cmd_tx_vlan_reset_reset, + (void *)&cmd_tx_vlan_reset_portid, + NULL, + }, +}; + + +/* *** ENABLE HARDWARE INSERTION OF CHECKSUM IN TX PACKETS *** */ +struct cmd_csum_result { + cmdline_fixed_string_t csum; + cmdline_fixed_string_t mode; + cmdline_fixed_string_t proto; + cmdline_fixed_string_t hwsw; + portid_t port_id; +}; + +static void +csum_show(int port_id) +{ + struct rte_eth_dev_info dev_info; + uint64_t tx_offloads; + int ret; + + tx_offloads = ports[port_id].dev_conf.txmode.offloads; + printf("Parse tunnel is %s\n", + (ports[port_id].parse_tunnel) ? "on" : "off"); + printf("IP checksum offload is %s\n", + (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ? "hw" : "sw"); + printf("UDP checksum offload is %s\n", + (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ? "hw" : "sw"); + printf("TCP checksum offload is %s\n", + (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ? "hw" : "sw"); + printf("SCTP checksum offload is %s\n", + (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) ? "hw" : "sw"); + printf("Outer-Ip checksum offload is %s\n", + (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ? "hw" : "sw"); + printf("Outer-Udp checksum offload is %s\n", + (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ? "hw" : "sw"); + + /* display warnings if configuration is not supported by the NIC */ + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + if ((tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) && + (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) { + printf("Warning: hardware IP checksum enabled but not " + "supported by port %d\n", port_id); + } + if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) && + (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) { + printf("Warning: hardware UDP checksum enabled but not " + "supported by port %d\n", port_id); + } + if ((tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) && + (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) { + printf("Warning: hardware TCP checksum enabled but not " + "supported by port %d\n", port_id); + } + if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) && + (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) == 0) { + printf("Warning: hardware SCTP checksum enabled but not " + "supported by port %d\n", port_id); + } + if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) && + (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) == 0) { + printf("Warning: hardware outer IP checksum enabled but not " + "supported by port %d\n", port_id); + } + if ((tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) && + (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) + == 0) { + printf("Warning: hardware outer UDP checksum enabled but not " + "supported by port %d\n", port_id); + } +} + +static void +cmd_config_queue_tx_offloads(struct rte_port *port) +{ + int k; + + /* Apply queue tx offloads configuration */ + for (k = 0; k < port->dev_info.max_rx_queues; k++) + port->tx_conf[k].offloads = + port->dev_conf.txmode.offloads; +} + +static void +cmd_csum_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_csum_result *res = parsed_result; + int hw = 0; + uint64_t csum_offloads = 0; + struct rte_eth_dev_info dev_info; + int ret; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) { + printf("invalid port %d\n", res->port_id); + return; + } + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } + + ret = eth_dev_info_get_print_err(res->port_id, &dev_info); + if (ret != 0) + return; + + if (!strcmp(res->mode, "set")) { + + if (!strcmp(res->hwsw, "hw")) + hw = 1; + + if (!strcmp(res->proto, "ip")) { + if (hw == 0 || (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_IPV4_CKSUM)) { + csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; + } else { + printf("IP checksum offload is not supported " + "by port %u\n", res->port_id); + } + } else if (!strcmp(res->proto, "udp")) { + if (hw == 0 || (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_UDP_CKSUM)) { + csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; + } else { + printf("UDP checksum offload is not supported " + "by port %u\n", res->port_id); + } + } else if (!strcmp(res->proto, "tcp")) { + if (hw == 0 || (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_TCP_CKSUM)) { + csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; + } else { + printf("TCP checksum offload is not supported " + "by port %u\n", res->port_id); + } + } else if (!strcmp(res->proto, "sctp")) { + if (hw == 0 || (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_SCTP_CKSUM)) { + csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM; + } else { + printf("SCTP checksum offload is not supported " + "by port %u\n", res->port_id); + } + } else if (!strcmp(res->proto, "outer-ip")) { + if (hw == 0 || (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) { + csum_offloads |= + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + } else { + printf("Outer IP checksum offload is not " + "supported by port %u\n", res->port_id); + } + } else if (!strcmp(res->proto, "outer-udp")) { + if (hw == 0 || (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)) { + csum_offloads |= + DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; + } else { + printf("Outer UDP checksum offload is not " + "supported by port %u\n", res->port_id); + } + } + + if (hw) { + ports[res->port_id].dev_conf.txmode.offloads |= + csum_offloads; + } else { + ports[res->port_id].dev_conf.txmode.offloads &= + (~csum_offloads); + } + cmd_config_queue_tx_offloads(&ports[res->port_id]); + } + csum_show(res->port_id); + + cmd_reconfig_device_queue(res->port_id, 1, 1); +} + +cmdline_parse_token_string_t cmd_csum_csum = + TOKEN_STRING_INITIALIZER(struct cmd_csum_result, + csum, "csum"); +cmdline_parse_token_string_t cmd_csum_mode = + TOKEN_STRING_INITIALIZER(struct cmd_csum_result, + mode, "set"); +cmdline_parse_token_string_t cmd_csum_proto = + TOKEN_STRING_INITIALIZER(struct cmd_csum_result, + proto, "ip#tcp#udp#sctp#outer-ip#outer-udp"); +cmdline_parse_token_string_t cmd_csum_hwsw = + TOKEN_STRING_INITIALIZER(struct cmd_csum_result, + hwsw, "hw#sw"); +cmdline_parse_token_num_t cmd_csum_portid = + TOKEN_NUM_INITIALIZER(struct cmd_csum_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_csum_set = { + .f = cmd_csum_parsed, + .data = NULL, + .help_str = "csum set ip|tcp|udp|sctp|outer-ip|outer-udp hw|sw <port_id>: " + "Enable/Disable hardware calculation of L3/L4 checksum when " + "using csum forward engine", + .tokens = { + (void *)&cmd_csum_csum, + (void *)&cmd_csum_mode, + (void *)&cmd_csum_proto, + (void *)&cmd_csum_hwsw, + (void *)&cmd_csum_portid, + NULL, + }, +}; + +cmdline_parse_token_string_t cmd_csum_mode_show = + TOKEN_STRING_INITIALIZER(struct cmd_csum_result, + mode, "show"); + +cmdline_parse_inst_t cmd_csum_show = { + .f = cmd_csum_parsed, + .data = NULL, + .help_str = "csum show <port_id>: Show checksum offload configuration", + .tokens = { + (void *)&cmd_csum_csum, + (void *)&cmd_csum_mode_show, + (void *)&cmd_csum_portid, + NULL, + }, +}; + +/* Enable/disable tunnel parsing */ +struct cmd_csum_tunnel_result { + cmdline_fixed_string_t csum; + cmdline_fixed_string_t parse; + cmdline_fixed_string_t onoff; + portid_t port_id; +}; + +static void +cmd_csum_tunnel_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_csum_tunnel_result *res = parsed_result; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + if (!strcmp(res->onoff, "on")) + ports[res->port_id].parse_tunnel = 1; + else + ports[res->port_id].parse_tunnel = 0; + + csum_show(res->port_id); +} + +cmdline_parse_token_string_t cmd_csum_tunnel_csum = + TOKEN_STRING_INITIALIZER(struct cmd_csum_tunnel_result, + csum, "csum"); +cmdline_parse_token_string_t cmd_csum_tunnel_parse = + TOKEN_STRING_INITIALIZER(struct cmd_csum_tunnel_result, + parse, "parse-tunnel"); +cmdline_parse_token_string_t cmd_csum_tunnel_onoff = + TOKEN_STRING_INITIALIZER(struct cmd_csum_tunnel_result, + onoff, "on#off"); +cmdline_parse_token_num_t cmd_csum_tunnel_portid = + TOKEN_NUM_INITIALIZER(struct cmd_csum_tunnel_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_csum_tunnel = { + .f = cmd_csum_tunnel_parsed, + .data = NULL, + .help_str = "csum parse-tunnel on|off <port_id>: " + "Enable/Disable parsing of tunnels for csum engine", + .tokens = { + (void *)&cmd_csum_tunnel_csum, + (void *)&cmd_csum_tunnel_parse, + (void *)&cmd_csum_tunnel_onoff, + (void *)&cmd_csum_tunnel_portid, + NULL, + }, +}; + +/* *** ENABLE HARDWARE SEGMENTATION IN TX NON-TUNNELED PACKETS *** */ +struct cmd_tso_set_result { + cmdline_fixed_string_t tso; + cmdline_fixed_string_t mode; + uint16_t tso_segsz; + portid_t port_id; +}; + +static void +cmd_tso_set_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_tso_set_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + int ret; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } + + if (!strcmp(res->mode, "set")) + ports[res->port_id].tso_segsz = res->tso_segsz; + + ret = eth_dev_info_get_print_err(res->port_id, &dev_info); + if (ret != 0) + return; + + if ((ports[res->port_id].tso_segsz != 0) && + (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) { + printf("Error: TSO is not supported by port %d\n", + res->port_id); + return; + } + + if (ports[res->port_id].tso_segsz == 0) { + ports[res->port_id].dev_conf.txmode.offloads &= + ~DEV_TX_OFFLOAD_TCP_TSO; + printf("TSO for non-tunneled packets is disabled\n"); + } else { + ports[res->port_id].dev_conf.txmode.offloads |= + DEV_TX_OFFLOAD_TCP_TSO; + printf("TSO segment size for non-tunneled packets is %d\n", + ports[res->port_id].tso_segsz); + } + cmd_config_queue_tx_offloads(&ports[res->port_id]); + + /* display warnings if configuration is not supported by the NIC */ + ret = eth_dev_info_get_print_err(res->port_id, &dev_info); + if (ret != 0) + return; + + if ((ports[res->port_id].tso_segsz != 0) && + (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) { + printf("Warning: TSO enabled but not " + "supported by port %d\n", res->port_id); + } + + cmd_reconfig_device_queue(res->port_id, 1, 1); +} + +cmdline_parse_token_string_t cmd_tso_set_tso = + TOKEN_STRING_INITIALIZER(struct cmd_tso_set_result, + tso, "tso"); +cmdline_parse_token_string_t cmd_tso_set_mode = + TOKEN_STRING_INITIALIZER(struct cmd_tso_set_result, + mode, "set"); +cmdline_parse_token_num_t cmd_tso_set_tso_segsz = + TOKEN_NUM_INITIALIZER(struct cmd_tso_set_result, + tso_segsz, UINT16); +cmdline_parse_token_num_t cmd_tso_set_portid = + TOKEN_NUM_INITIALIZER(struct cmd_tso_set_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_tso_set = { + .f = cmd_tso_set_parsed, + .data = NULL, + .help_str = "tso set <tso_segsz> <port_id>: " + "Set TSO segment size of non-tunneled packets for csum engine " + "(0 to disable)", + .tokens = { + (void *)&cmd_tso_set_tso, + (void *)&cmd_tso_set_mode, + (void *)&cmd_tso_set_tso_segsz, + (void *)&cmd_tso_set_portid, + NULL, + }, +}; + +cmdline_parse_token_string_t cmd_tso_show_mode = + TOKEN_STRING_INITIALIZER(struct cmd_tso_set_result, + mode, "show"); + + +cmdline_parse_inst_t cmd_tso_show = { + .f = cmd_tso_set_parsed, + .data = NULL, + .help_str = "tso show <port_id>: " + "Show TSO segment size of non-tunneled packets for csum engine", + .tokens = { + (void *)&cmd_tso_set_tso, + (void *)&cmd_tso_show_mode, + (void *)&cmd_tso_set_portid, + NULL, + }, +}; + +/* *** ENABLE HARDWARE SEGMENTATION IN TX TUNNELED PACKETS *** */ +struct cmd_tunnel_tso_set_result { + cmdline_fixed_string_t tso; + cmdline_fixed_string_t mode; + uint16_t tso_segsz; + portid_t port_id; +}; + +static struct rte_eth_dev_info +check_tunnel_tso_nic_support(portid_t port_id) +{ + struct rte_eth_dev_info dev_info; + + if (eth_dev_info_get_print_err(port_id, &dev_info) != 0) + return dev_info; + + if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) + printf("Warning: VXLAN TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); + if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO)) + printf("Warning: GRE TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); + if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO)) + printf("Warning: IPIP TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); + if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) + printf("Warning: GENEVE TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); + if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO)) + printf("Warning: IP TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); + if (!(dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO)) + printf("Warning: UDP TUNNEL TSO not supported therefore " + "not enabled for port %d\n", port_id); + return dev_info; +} + +static void +cmd_tunnel_tso_set_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_tunnel_tso_set_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + if (!port_is_stopped(res->port_id)) { + printf("Please stop port %d first\n", res->port_id); + return; + } + + if (!strcmp(res->mode, "set")) + ports[res->port_id].tunnel_tso_segsz = res->tso_segsz; + + dev_info = check_tunnel_tso_nic_support(res->port_id); + if (ports[res->port_id].tunnel_tso_segsz == 0) { + ports[res->port_id].dev_conf.txmode.offloads &= + ~(DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); + printf("TSO for tunneled packets is disabled\n"); + } else { + uint64_t tso_offloads = (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | + DEV_TX_OFFLOAD_IPIP_TNL_TSO | + DEV_TX_OFFLOAD_GENEVE_TNL_TSO | + DEV_TX_OFFLOAD_IP_TNL_TSO | + DEV_TX_OFFLOAD_UDP_TNL_TSO); + + ports[res->port_id].dev_conf.txmode.offloads |= + (tso_offloads & dev_info.tx_offload_capa); + printf("TSO segment size for tunneled packets is %d\n", + ports[res->port_id].tunnel_tso_segsz); + + /* Below conditions are needed to make it work: + * (1) tunnel TSO is supported by the NIC; + * (2) "csum parse_tunnel" must be set so that tunneled pkts + * are recognized; + * (3) for tunneled pkts with outer L3 of IPv4, + * "csum set outer-ip" must be set to hw, because after tso, + * total_len of outer IP header is changed, and the checksum + * of outer IP header calculated by sw should be wrong; that + * is not necessary for IPv6 tunneled pkts because there's no + * checksum in IP header anymore. + */ + + if (!ports[res->port_id].parse_tunnel) + printf("Warning: csum parse_tunnel must be set " + "so that tunneled packets are recognized\n"); + if (!(ports[res->port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)) + printf("Warning: csum set outer-ip must be set to hw " + "if outer L3 is IPv4; not necessary for IPv6\n"); + } + + cmd_config_queue_tx_offloads(&ports[res->port_id]); + cmd_reconfig_device_queue(res->port_id, 1, 1); +} + +cmdline_parse_token_string_t cmd_tunnel_tso_set_tso = + TOKEN_STRING_INITIALIZER(struct cmd_tunnel_tso_set_result, + tso, "tunnel_tso"); +cmdline_parse_token_string_t cmd_tunnel_tso_set_mode = + TOKEN_STRING_INITIALIZER(struct cmd_tunnel_tso_set_result, + mode, "set"); +cmdline_parse_token_num_t cmd_tunnel_tso_set_tso_segsz = + TOKEN_NUM_INITIALIZER(struct cmd_tunnel_tso_set_result, + tso_segsz, UINT16); +cmdline_parse_token_num_t cmd_tunnel_tso_set_portid = + TOKEN_NUM_INITIALIZER(struct cmd_tunnel_tso_set_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_tunnel_tso_set = { + .f = cmd_tunnel_tso_set_parsed, + .data = NULL, + .help_str = "tunnel_tso set <tso_segsz> <port_id>: " + "Set TSO segment size of tunneled packets for csum engine " + "(0 to disable)", + .tokens = { + (void *)&cmd_tunnel_tso_set_tso, + (void *)&cmd_tunnel_tso_set_mode, + (void *)&cmd_tunnel_tso_set_tso_segsz, + (void *)&cmd_tunnel_tso_set_portid, + NULL, + }, +}; + +cmdline_parse_token_string_t cmd_tunnel_tso_show_mode = + TOKEN_STRING_INITIALIZER(struct cmd_tunnel_tso_set_result, + mode, "show"); + + +cmdline_parse_inst_t cmd_tunnel_tso_show = { + .f = cmd_tunnel_tso_set_parsed, + .data = NULL, + .help_str = "tunnel_tso show <port_id> " + "Show TSO segment size of tunneled packets for csum engine", + .tokens = { + (void *)&cmd_tunnel_tso_set_tso, + (void *)&cmd_tunnel_tso_show_mode, + (void *)&cmd_tunnel_tso_set_portid, + NULL, + }, +}; + +/* *** SET GRO FOR A PORT *** */ +struct cmd_gro_enable_result { + cmdline_fixed_string_t cmd_set; + cmdline_fixed_string_t cmd_port; + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t cmd_onoff; + portid_t cmd_pid; +}; + +static void +cmd_gro_enable_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_gro_enable_result *res; + + res = parsed_result; + if (!strcmp(res->cmd_keyword, "gro")) + setup_gro(res->cmd_onoff, res->cmd_pid); +} + +cmdline_parse_token_string_t cmd_gro_enable_set = + TOKEN_STRING_INITIALIZER(struct cmd_gro_enable_result, + cmd_set, "set"); +cmdline_parse_token_string_t cmd_gro_enable_port = + TOKEN_STRING_INITIALIZER(struct cmd_gro_enable_result, + cmd_keyword, "port"); +cmdline_parse_token_num_t cmd_gro_enable_pid = + TOKEN_NUM_INITIALIZER(struct cmd_gro_enable_result, + cmd_pid, UINT16); +cmdline_parse_token_string_t cmd_gro_enable_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_gro_enable_result, + cmd_keyword, "gro"); +cmdline_parse_token_string_t cmd_gro_enable_onoff = + TOKEN_STRING_INITIALIZER(struct cmd_gro_enable_result, + cmd_onoff, "on#off"); + +cmdline_parse_inst_t cmd_gro_enable = { + .f = cmd_gro_enable_parsed, + .data = NULL, + .help_str = "set port <port_id> gro on|off", + .tokens = { + (void *)&cmd_gro_enable_set, + (void *)&cmd_gro_enable_port, + (void *)&cmd_gro_enable_pid, + (void *)&cmd_gro_enable_keyword, + (void *)&cmd_gro_enable_onoff, + NULL, + }, +}; + +/* *** DISPLAY GRO CONFIGURATION *** */ +struct cmd_gro_show_result { + cmdline_fixed_string_t cmd_show; + cmdline_fixed_string_t cmd_port; + cmdline_fixed_string_t cmd_keyword; + portid_t cmd_pid; +}; + +static void +cmd_gro_show_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_gro_show_result *res; + + res = parsed_result; + if (!strcmp(res->cmd_keyword, "gro")) + show_gro(res->cmd_pid); +} + +cmdline_parse_token_string_t cmd_gro_show_show = + TOKEN_STRING_INITIALIZER(struct cmd_gro_show_result, + cmd_show, "show"); +cmdline_parse_token_string_t cmd_gro_show_port = + TOKEN_STRING_INITIALIZER(struct cmd_gro_show_result, + cmd_port, "port"); +cmdline_parse_token_num_t cmd_gro_show_pid = + TOKEN_NUM_INITIALIZER(struct cmd_gro_show_result, + cmd_pid, UINT16); +cmdline_parse_token_string_t cmd_gro_show_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_gro_show_result, + cmd_keyword, "gro"); + +cmdline_parse_inst_t cmd_gro_show = { + .f = cmd_gro_show_parsed, + .data = NULL, + .help_str = "show port <port_id> gro", + .tokens = { + (void *)&cmd_gro_show_show, + (void *)&cmd_gro_show_port, + (void *)&cmd_gro_show_pid, + (void *)&cmd_gro_show_keyword, + NULL, + }, +}; + +/* *** SET FLUSH CYCLES FOR GRO *** */ +struct cmd_gro_flush_result { + cmdline_fixed_string_t cmd_set; + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t cmd_flush; + uint8_t cmd_cycles; +}; + +static void +cmd_gro_flush_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_gro_flush_result *res; + + res = parsed_result; + if ((!strcmp(res->cmd_keyword, "gro")) && + (!strcmp(res->cmd_flush, "flush"))) + setup_gro_flush_cycles(res->cmd_cycles); +} + +cmdline_parse_token_string_t cmd_gro_flush_set = + TOKEN_STRING_INITIALIZER(struct cmd_gro_flush_result, + cmd_set, "set"); +cmdline_parse_token_string_t cmd_gro_flush_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_gro_flush_result, + cmd_keyword, "gro"); +cmdline_parse_token_string_t cmd_gro_flush_flush = + TOKEN_STRING_INITIALIZER(struct cmd_gro_flush_result, + cmd_flush, "flush"); +cmdline_parse_token_num_t cmd_gro_flush_cycles = + TOKEN_NUM_INITIALIZER(struct cmd_gro_flush_result, + cmd_cycles, UINT8); + +cmdline_parse_inst_t cmd_gro_flush = { + .f = cmd_gro_flush_parsed, + .data = NULL, + .help_str = "set gro flush <cycles>", + .tokens = { + (void *)&cmd_gro_flush_set, + (void *)&cmd_gro_flush_keyword, + (void *)&cmd_gro_flush_flush, + (void *)&cmd_gro_flush_cycles, + NULL, + }, +}; + +/* *** ENABLE/DISABLE GSO *** */ +struct cmd_gso_enable_result { + cmdline_fixed_string_t cmd_set; + cmdline_fixed_string_t cmd_port; + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t cmd_mode; + portid_t cmd_pid; +}; + +static void +cmd_gso_enable_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_gso_enable_result *res; + + res = parsed_result; + if (!strcmp(res->cmd_keyword, "gso")) + setup_gso(res->cmd_mode, res->cmd_pid); +} + +cmdline_parse_token_string_t cmd_gso_enable_set = + TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result, + cmd_set, "set"); +cmdline_parse_token_string_t cmd_gso_enable_port = + TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result, + cmd_port, "port"); +cmdline_parse_token_string_t cmd_gso_enable_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result, + cmd_keyword, "gso"); +cmdline_parse_token_string_t cmd_gso_enable_mode = + TOKEN_STRING_INITIALIZER(struct cmd_gso_enable_result, + cmd_mode, "on#off"); +cmdline_parse_token_num_t cmd_gso_enable_pid = + TOKEN_NUM_INITIALIZER(struct cmd_gso_enable_result, + cmd_pid, UINT16); + +cmdline_parse_inst_t cmd_gso_enable = { + .f = cmd_gso_enable_parsed, + .data = NULL, + .help_str = "set port <port_id> gso on|off", + .tokens = { + (void *)&cmd_gso_enable_set, + (void *)&cmd_gso_enable_port, + (void *)&cmd_gso_enable_pid, + (void *)&cmd_gso_enable_keyword, + (void *)&cmd_gso_enable_mode, + NULL, + }, +}; + +/* *** SET MAX PACKET LENGTH FOR GSO SEGMENTS *** */ +struct cmd_gso_size_result { + cmdline_fixed_string_t cmd_set; + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t cmd_segsz; + uint16_t cmd_size; +}; + +static void +cmd_gso_size_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_gso_size_result *res = parsed_result; + + if (test_done == 0) { + printf("Before setting GSO segsz, please first" + " stop forwarding\n"); + return; + } + + if (!strcmp(res->cmd_keyword, "gso") && + !strcmp(res->cmd_segsz, "segsz")) { + if (res->cmd_size < RTE_GSO_SEG_SIZE_MIN) + printf("gso_size should be larger than %zu." + " Please input a legal value\n", + RTE_GSO_SEG_SIZE_MIN); + else + gso_max_segment_size = res->cmd_size; + } +} + +cmdline_parse_token_string_t cmd_gso_size_set = + TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result, + cmd_set, "set"); +cmdline_parse_token_string_t cmd_gso_size_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result, + cmd_keyword, "gso"); +cmdline_parse_token_string_t cmd_gso_size_segsz = + TOKEN_STRING_INITIALIZER(struct cmd_gso_size_result, + cmd_segsz, "segsz"); +cmdline_parse_token_num_t cmd_gso_size_size = + TOKEN_NUM_INITIALIZER(struct cmd_gso_size_result, + cmd_size, UINT16); + +cmdline_parse_inst_t cmd_gso_size = { + .f = cmd_gso_size_parsed, + .data = NULL, + .help_str = "set gso segsz <length>", + .tokens = { + (void *)&cmd_gso_size_set, + (void *)&cmd_gso_size_keyword, + (void *)&cmd_gso_size_segsz, + (void *)&cmd_gso_size_size, + NULL, + }, +}; + +/* *** SHOW GSO CONFIGURATION *** */ +struct cmd_gso_show_result { + cmdline_fixed_string_t cmd_show; + cmdline_fixed_string_t cmd_port; + cmdline_fixed_string_t cmd_keyword; + portid_t cmd_pid; +}; + +static void +cmd_gso_show_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_gso_show_result *res = parsed_result; + + if (!rte_eth_dev_is_valid_port(res->cmd_pid)) { + printf("invalid port id %u\n", res->cmd_pid); + return; + } + if (!strcmp(res->cmd_keyword, "gso")) { + if (gso_ports[res->cmd_pid].enable) { + printf("Max GSO'd packet size: %uB\n" + "Supported GSO types: TCP/IPv4, " + "UDP/IPv4, VxLAN with inner " + "TCP/IPv4 packet, GRE with inner " + "TCP/IPv4 packet\n", + gso_max_segment_size); + } else + printf("GSO is not enabled on Port %u\n", res->cmd_pid); + } +} + +cmdline_parse_token_string_t cmd_gso_show_show = +TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result, + cmd_show, "show"); +cmdline_parse_token_string_t cmd_gso_show_port = +TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result, + cmd_port, "port"); +cmdline_parse_token_string_t cmd_gso_show_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_gso_show_result, + cmd_keyword, "gso"); +cmdline_parse_token_num_t cmd_gso_show_pid = + TOKEN_NUM_INITIALIZER(struct cmd_gso_show_result, + cmd_pid, UINT16); + +cmdline_parse_inst_t cmd_gso_show = { + .f = cmd_gso_show_parsed, + .data = NULL, + .help_str = "show port <port_id> gso", + .tokens = { + (void *)&cmd_gso_show_show, + (void *)&cmd_gso_show_port, + (void *)&cmd_gso_show_pid, + (void *)&cmd_gso_show_keyword, + NULL, + }, +}; + +/* *** ENABLE/DISABLE FLUSH ON RX STREAMS *** */ +struct cmd_set_flush_rx { + cmdline_fixed_string_t set; + cmdline_fixed_string_t flush_rx; + cmdline_fixed_string_t mode; +}; + +static void +cmd_set_flush_rx_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_flush_rx *res = parsed_result; + no_flush_rx = (uint8_t)((strcmp(res->mode, "on") == 0) ? 0 : 1); +} + +cmdline_parse_token_string_t cmd_setflushrx_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_flush_rx, + set, "set"); +cmdline_parse_token_string_t cmd_setflushrx_flush_rx = + TOKEN_STRING_INITIALIZER(struct cmd_set_flush_rx, + flush_rx, "flush_rx"); +cmdline_parse_token_string_t cmd_setflushrx_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_flush_rx, + mode, "on#off"); + + +cmdline_parse_inst_t cmd_set_flush_rx = { + .f = cmd_set_flush_rx_parsed, + .help_str = "set flush_rx on|off: Enable/Disable flush on rx streams", + .data = NULL, + .tokens = { + (void *)&cmd_setflushrx_set, + (void *)&cmd_setflushrx_flush_rx, + (void *)&cmd_setflushrx_mode, + NULL, + }, +}; + +/* *** ENABLE/DISABLE LINK STATUS CHECK *** */ +struct cmd_set_link_check { + cmdline_fixed_string_t set; + cmdline_fixed_string_t link_check; + cmdline_fixed_string_t mode; +}; + +static void +cmd_set_link_check_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_link_check *res = parsed_result; + no_link_check = (uint8_t)((strcmp(res->mode, "on") == 0) ? 0 : 1); +} + +cmdline_parse_token_string_t cmd_setlinkcheck_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_link_check, + set, "set"); +cmdline_parse_token_string_t cmd_setlinkcheck_link_check = + TOKEN_STRING_INITIALIZER(struct cmd_set_link_check, + link_check, "link_check"); +cmdline_parse_token_string_t cmd_setlinkcheck_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_link_check, + mode, "on#off"); + + +cmdline_parse_inst_t cmd_set_link_check = { + .f = cmd_set_link_check_parsed, + .help_str = "set link_check on|off: Enable/Disable link status check " + "when starting/stopping a port", + .data = NULL, + .tokens = { + (void *)&cmd_setlinkcheck_set, + (void *)&cmd_setlinkcheck_link_check, + (void *)&cmd_setlinkcheck_mode, + NULL, + }, +}; + +/* *** SET NIC BYPASS MODE *** */ +struct cmd_set_bypass_mode_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t bypass; + cmdline_fixed_string_t mode; + cmdline_fixed_string_t value; + portid_t port_id; +}; + +static void +cmd_set_bypass_mode_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_bypass_mode_result *res = parsed_result; + portid_t port_id = res->port_id; + int32_t rc = -EINVAL; + +#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS + uint32_t bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL; + + if (!strcmp(res->value, "bypass")) + bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_BYPASS; + else if (!strcmp(res->value, "isolate")) + bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_ISOLATE; + else + bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL; + + /* Set the bypass mode for the relevant port. */ + rc = rte_pmd_ixgbe_bypass_state_set(port_id, &bypass_mode); +#endif + if (rc != 0) + printf("\t Failed to set bypass mode for port = %d.\n", port_id); +} + +cmdline_parse_token_string_t cmd_setbypass_mode_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_mode_result, + set, "set"); +cmdline_parse_token_string_t cmd_setbypass_mode_bypass = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_mode_result, + bypass, "bypass"); +cmdline_parse_token_string_t cmd_setbypass_mode_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_mode_result, + mode, "mode"); +cmdline_parse_token_string_t cmd_setbypass_mode_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_mode_result, + value, "normal#bypass#isolate"); +cmdline_parse_token_num_t cmd_setbypass_mode_port = + TOKEN_NUM_INITIALIZER(struct cmd_set_bypass_mode_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_set_bypass_mode = { + .f = cmd_set_bypass_mode_parsed, + .help_str = "set bypass mode normal|bypass|isolate <port_id>: " + "Set the NIC bypass mode for port_id", + .data = NULL, + .tokens = { + (void *)&cmd_setbypass_mode_set, + (void *)&cmd_setbypass_mode_bypass, + (void *)&cmd_setbypass_mode_mode, + (void *)&cmd_setbypass_mode_value, + (void *)&cmd_setbypass_mode_port, + NULL, + }, +}; + +/* *** SET NIC BYPASS EVENT *** */ +struct cmd_set_bypass_event_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t bypass; + cmdline_fixed_string_t event; + cmdline_fixed_string_t event_value; + cmdline_fixed_string_t mode; + cmdline_fixed_string_t mode_value; + portid_t port_id; +}; + +static void +cmd_set_bypass_event_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int32_t rc = -EINVAL; + struct cmd_set_bypass_event_result *res = parsed_result; + portid_t port_id = res->port_id; + +#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS + uint32_t bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_NONE; + uint32_t bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL; + + if (!strcmp(res->event_value, "timeout")) + bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_TIMEOUT; + else if (!strcmp(res->event_value, "os_on")) + bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_OS_ON; + else if (!strcmp(res->event_value, "os_off")) + bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_OS_OFF; + else if (!strcmp(res->event_value, "power_on")) + bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_POWER_ON; + else if (!strcmp(res->event_value, "power_off")) + bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_POWER_OFF; + else + bypass_event = RTE_PMD_IXGBE_BYPASS_EVENT_NONE; + + if (!strcmp(res->mode_value, "bypass")) + bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_BYPASS; + else if (!strcmp(res->mode_value, "isolate")) + bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_ISOLATE; + else + bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NORMAL; + + /* Set the watchdog timeout. */ + if (bypass_event == RTE_PMD_IXGBE_BYPASS_EVENT_TIMEOUT) { + + rc = -EINVAL; + if (RTE_PMD_IXGBE_BYPASS_TMT_VALID(bypass_timeout)) { + rc = rte_pmd_ixgbe_bypass_wd_timeout_store(port_id, + bypass_timeout); + } + if (rc != 0) { + printf("Failed to set timeout value %u " + "for port %d, errto code: %d.\n", + bypass_timeout, port_id, rc); + } + } + + /* Set the bypass event to transition to bypass mode. */ + rc = rte_pmd_ixgbe_bypass_event_store(port_id, bypass_event, + bypass_mode); +#endif + + if (rc != 0) + printf("\t Failed to set bypass event for port = %d.\n", + port_id); +} + +cmdline_parse_token_string_t cmd_setbypass_event_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result, + set, "set"); +cmdline_parse_token_string_t cmd_setbypass_event_bypass = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result, + bypass, "bypass"); +cmdline_parse_token_string_t cmd_setbypass_event_event = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result, + event, "event"); +cmdline_parse_token_string_t cmd_setbypass_event_event_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result, + event_value, "none#timeout#os_off#os_on#power_on#power_off"); +cmdline_parse_token_string_t cmd_setbypass_event_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result, + mode, "mode"); +cmdline_parse_token_string_t cmd_setbypass_event_mode_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_event_result, + mode_value, "normal#bypass#isolate"); +cmdline_parse_token_num_t cmd_setbypass_event_port = + TOKEN_NUM_INITIALIZER(struct cmd_set_bypass_event_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_set_bypass_event = { + .f = cmd_set_bypass_event_parsed, + .help_str = "set bypass event none|timeout|os_on|os_off|power_on|" + "power_off mode normal|bypass|isolate <port_id>: " + "Set the NIC bypass event mode for port_id", + .data = NULL, + .tokens = { + (void *)&cmd_setbypass_event_set, + (void *)&cmd_setbypass_event_bypass, + (void *)&cmd_setbypass_event_event, + (void *)&cmd_setbypass_event_event_value, + (void *)&cmd_setbypass_event_mode, + (void *)&cmd_setbypass_event_mode_value, + (void *)&cmd_setbypass_event_port, + NULL, + }, +}; + + +/* *** SET NIC BYPASS TIMEOUT *** */ +struct cmd_set_bypass_timeout_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t bypass; + cmdline_fixed_string_t timeout; + cmdline_fixed_string_t value; +}; + +static void +cmd_set_bypass_timeout_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + __rte_unused struct cmd_set_bypass_timeout_result *res = parsed_result; + +#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS + if (!strcmp(res->value, "1.5")) + bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_1_5_SEC; + else if (!strcmp(res->value, "2")) + bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_2_SEC; + else if (!strcmp(res->value, "3")) + bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_3_SEC; + else if (!strcmp(res->value, "4")) + bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_4_SEC; + else if (!strcmp(res->value, "8")) + bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_8_SEC; + else if (!strcmp(res->value, "16")) + bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_16_SEC; + else if (!strcmp(res->value, "32")) + bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_32_SEC; + else + bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; +#endif +} + +cmdline_parse_token_string_t cmd_setbypass_timeout_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_timeout_result, + set, "set"); +cmdline_parse_token_string_t cmd_setbypass_timeout_bypass = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_timeout_result, + bypass, "bypass"); +cmdline_parse_token_string_t cmd_setbypass_timeout_timeout = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_timeout_result, + timeout, "timeout"); +cmdline_parse_token_string_t cmd_setbypass_timeout_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_bypass_timeout_result, + value, "0#1.5#2#3#4#8#16#32"); + +cmdline_parse_inst_t cmd_set_bypass_timeout = { + .f = cmd_set_bypass_timeout_parsed, + .help_str = "set bypass timeout 0|1.5|2|3|4|8|16|32: " + "Set the NIC bypass watchdog timeout in seconds", + .data = NULL, + .tokens = { + (void *)&cmd_setbypass_timeout_set, + (void *)&cmd_setbypass_timeout_bypass, + (void *)&cmd_setbypass_timeout_timeout, + (void *)&cmd_setbypass_timeout_value, + NULL, + }, +}; + +/* *** SHOW NIC BYPASS MODE *** */ +struct cmd_show_bypass_config_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t bypass; + cmdline_fixed_string_t config; + portid_t port_id; +}; + +static void +cmd_show_bypass_config_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_bypass_config_result *res = parsed_result; + portid_t port_id = res->port_id; + int rc = -EINVAL; +#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS + uint32_t event_mode; + uint32_t bypass_mode; + uint32_t timeout = bypass_timeout; + unsigned int i; + + static const char * const timeouts[RTE_PMD_IXGBE_BYPASS_TMT_NUM] = + {"off", "1.5", "2", "3", "4", "8", "16", "32"}; + static const char * const modes[RTE_PMD_IXGBE_BYPASS_MODE_NUM] = + {"UNKNOWN", "normal", "bypass", "isolate"}; + static const char * const events[RTE_PMD_IXGBE_BYPASS_EVENT_NUM] = { + "NONE", + "OS/board on", + "power supply on", + "OS/board off", + "power supply off", + "timeout"}; + + /* Display the bypass mode.*/ + if (rte_pmd_ixgbe_bypass_state_show(port_id, &bypass_mode) != 0) { + printf("\tFailed to get bypass mode for port = %d\n", port_id); + return; + } + else { + if (!RTE_PMD_IXGBE_BYPASS_MODE_VALID(bypass_mode)) + bypass_mode = RTE_PMD_IXGBE_BYPASS_MODE_NONE; + + printf("\tbypass mode = %s\n", modes[bypass_mode]); + } + + /* Display the bypass timeout.*/ + if (!RTE_PMD_IXGBE_BYPASS_TMT_VALID(timeout)) + timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; + + printf("\tbypass timeout = %s\n", timeouts[timeout]); + + /* Display the bypass events and associated modes. */ + for (i = RTE_PMD_IXGBE_BYPASS_EVENT_START; i < RTE_DIM(events); i++) { + + if (rte_pmd_ixgbe_bypass_event_show(port_id, i, &event_mode)) { + printf("\tFailed to get bypass mode for event = %s\n", + events[i]); + } else { + if (!RTE_PMD_IXGBE_BYPASS_MODE_VALID(event_mode)) + event_mode = RTE_PMD_IXGBE_BYPASS_MODE_NONE; + + printf("\tbypass event: %-16s = %s\n", events[i], + modes[event_mode]); + } + } +#endif + if (rc != 0) + printf("\tFailed to get bypass configuration for port = %d\n", + port_id); +} + +cmdline_parse_token_string_t cmd_showbypass_config_show = + TOKEN_STRING_INITIALIZER(struct cmd_show_bypass_config_result, + show, "show"); +cmdline_parse_token_string_t cmd_showbypass_config_bypass = + TOKEN_STRING_INITIALIZER(struct cmd_show_bypass_config_result, + bypass, "bypass"); +cmdline_parse_token_string_t cmd_showbypass_config_config = + TOKEN_STRING_INITIALIZER(struct cmd_show_bypass_config_result, + config, "config"); +cmdline_parse_token_num_t cmd_showbypass_config_port = + TOKEN_NUM_INITIALIZER(struct cmd_show_bypass_config_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_show_bypass_config = { + .f = cmd_show_bypass_config_parsed, + .help_str = "show bypass config <port_id>: " + "Show the NIC bypass config for port_id", + .data = NULL, + .tokens = { + (void *)&cmd_showbypass_config_show, + (void *)&cmd_showbypass_config_bypass, + (void *)&cmd_showbypass_config_config, + (void *)&cmd_showbypass_config_port, + NULL, + }, +}; + +#ifdef RTE_LIBRTE_PMD_BOND +/* *** SET BONDING MODE *** */ +struct cmd_set_bonding_mode_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t bonding; + cmdline_fixed_string_t mode; + uint8_t value; + portid_t port_id; +}; + +static void cmd_set_bonding_mode_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_bonding_mode_result *res = parsed_result; + portid_t port_id = res->port_id; + + /* Set the bonding mode for the relevant port. */ + if (0 != rte_eth_bond_mode_set(port_id, res->value)) + printf("\t Failed to set bonding mode for port = %d.\n", port_id); +} + +cmdline_parse_token_string_t cmd_setbonding_mode_set = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_mode_result, + set, "set"); +cmdline_parse_token_string_t cmd_setbonding_mode_bonding = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_mode_result, + bonding, "bonding"); +cmdline_parse_token_string_t cmd_setbonding_mode_mode = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_mode_result, + mode, "mode"); +cmdline_parse_token_num_t cmd_setbonding_mode_value = +TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_mode_result, + value, UINT8); +cmdline_parse_token_num_t cmd_setbonding_mode_port = +TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_mode_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_set_bonding_mode = { + .f = cmd_set_bonding_mode_parsed, + .help_str = "set bonding mode <mode_value> <port_id>: " + "Set the bonding mode for port_id", + .data = NULL, + .tokens = { + (void *) &cmd_setbonding_mode_set, + (void *) &cmd_setbonding_mode_bonding, + (void *) &cmd_setbonding_mode_mode, + (void *) &cmd_setbonding_mode_value, + (void *) &cmd_setbonding_mode_port, + NULL + } +}; + +/* *** SET BONDING SLOW_QUEUE SW/HW *** */ +struct cmd_set_bonding_lacp_dedicated_queues_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t bonding; + cmdline_fixed_string_t lacp; + cmdline_fixed_string_t dedicated_queues; + portid_t port_id; + cmdline_fixed_string_t mode; +}; + +static void cmd_set_bonding_lacp_dedicated_queues_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_bonding_lacp_dedicated_queues_result *res = parsed_result; + portid_t port_id = res->port_id; + struct rte_port *port; + + port = &ports[port_id]; + + /** Check if the port is not started **/ + if (port->port_status != RTE_PORT_STOPPED) { + printf("Please stop port %d first\n", port_id); + return; + } + + if (!strcmp(res->mode, "enable")) { + if (rte_eth_bond_8023ad_dedicated_queues_enable(port_id) == 0) + printf("Dedicate queues for LACP control packets" + " enabled\n"); + else + printf("Enabling dedicate queues for LACP control " + "packets on port %d failed\n", port_id); + } else if (!strcmp(res->mode, "disable")) { + if (rte_eth_bond_8023ad_dedicated_queues_disable(port_id) == 0) + printf("Dedicated queues for LACP control packets " + "disabled\n"); + else + printf("Disabling dedicated queues for LACP control " + "traffic on port %d failed\n", port_id); + } +} + +cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_set = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result, + set, "set"); +cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_bonding = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result, + bonding, "bonding"); +cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_lacp = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result, + lacp, "lacp"); +cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_dedicated_queues = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result, + dedicated_queues, "dedicated_queues"); +cmdline_parse_token_num_t cmd_setbonding_lacp_dedicated_queues_port_id = +TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_setbonding_lacp_dedicated_queues_mode = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_lacp_dedicated_queues_result, + mode, "enable#disable"); + +cmdline_parse_inst_t cmd_set_lacp_dedicated_queues = { + .f = cmd_set_bonding_lacp_dedicated_queues_parsed, + .help_str = "set bonding lacp dedicated_queues <port_id> " + "enable|disable: " + "Enable/disable dedicated queues for LACP control traffic for port_id", + .data = NULL, + .tokens = { + (void *)&cmd_setbonding_lacp_dedicated_queues_set, + (void *)&cmd_setbonding_lacp_dedicated_queues_bonding, + (void *)&cmd_setbonding_lacp_dedicated_queues_lacp, + (void *)&cmd_setbonding_lacp_dedicated_queues_dedicated_queues, + (void *)&cmd_setbonding_lacp_dedicated_queues_port_id, + (void *)&cmd_setbonding_lacp_dedicated_queues_mode, + NULL + } +}; + +/* *** SET BALANCE XMIT POLICY *** */ +struct cmd_set_bonding_balance_xmit_policy_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t bonding; + cmdline_fixed_string_t balance_xmit_policy; + portid_t port_id; + cmdline_fixed_string_t policy; +}; + +static void cmd_set_bonding_balance_xmit_policy_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_bonding_balance_xmit_policy_result *res = parsed_result; + portid_t port_id = res->port_id; + uint8_t policy; + + if (!strcmp(res->policy, "l2")) { + policy = BALANCE_XMIT_POLICY_LAYER2; + } else if (!strcmp(res->policy, "l23")) { + policy = BALANCE_XMIT_POLICY_LAYER23; + } else if (!strcmp(res->policy, "l34")) { + policy = BALANCE_XMIT_POLICY_LAYER34; + } else { + printf("\t Invalid xmit policy selection"); + return; + } + + /* Set the bonding mode for the relevant port. */ + if (0 != rte_eth_bond_xmit_policy_set(port_id, policy)) { + printf("\t Failed to set bonding balance xmit policy for port = %d.\n", + port_id); + } +} + +cmdline_parse_token_string_t cmd_setbonding_balance_xmit_policy_set = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result, + set, "set"); +cmdline_parse_token_string_t cmd_setbonding_balance_xmit_policy_bonding = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result, + bonding, "bonding"); +cmdline_parse_token_string_t cmd_setbonding_balance_xmit_policy_balance_xmit_policy = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result, + balance_xmit_policy, "balance_xmit_policy"); +cmdline_parse_token_num_t cmd_setbonding_balance_xmit_policy_port = +TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_setbonding_balance_xmit_policy_policy = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_balance_xmit_policy_result, + policy, "l2#l23#l34"); + +cmdline_parse_inst_t cmd_set_balance_xmit_policy = { + .f = cmd_set_bonding_balance_xmit_policy_parsed, + .help_str = "set bonding balance_xmit_policy <port_id> " + "l2|l23|l34: " + "Set the bonding balance_xmit_policy for port_id", + .data = NULL, + .tokens = { + (void *)&cmd_setbonding_balance_xmit_policy_set, + (void *)&cmd_setbonding_balance_xmit_policy_bonding, + (void *)&cmd_setbonding_balance_xmit_policy_balance_xmit_policy, + (void *)&cmd_setbonding_balance_xmit_policy_port, + (void *)&cmd_setbonding_balance_xmit_policy_policy, + NULL + } +}; + +/* *** SHOW NIC BONDING CONFIGURATION *** */ +struct cmd_show_bonding_config_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t bonding; + cmdline_fixed_string_t config; + portid_t port_id; +}; + +static void cmd_show_bonding_config_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_bonding_config_result *res = parsed_result; + int bonding_mode, agg_mode; + portid_t slaves[RTE_MAX_ETHPORTS]; + int num_slaves, num_active_slaves; + int primary_id; + int i; + portid_t port_id = res->port_id; + + /* Display the bonding mode.*/ + bonding_mode = rte_eth_bond_mode_get(port_id); + if (bonding_mode < 0) { + printf("\tFailed to get bonding mode for port = %d\n", port_id); + return; + } else + printf("\tBonding mode: %d\n", bonding_mode); + + if (bonding_mode == BONDING_MODE_BALANCE) { + int balance_xmit_policy; + + balance_xmit_policy = rte_eth_bond_xmit_policy_get(port_id); + if (balance_xmit_policy < 0) { + printf("\tFailed to get balance xmit policy for port = %d\n", + port_id); + return; + } else { + printf("\tBalance Xmit Policy: "); + + switch (balance_xmit_policy) { + case BALANCE_XMIT_POLICY_LAYER2: + printf("BALANCE_XMIT_POLICY_LAYER2"); + break; + case BALANCE_XMIT_POLICY_LAYER23: + printf("BALANCE_XMIT_POLICY_LAYER23"); + break; + case BALANCE_XMIT_POLICY_LAYER34: + printf("BALANCE_XMIT_POLICY_LAYER34"); + break; + } + printf("\n"); + } + } + + if (bonding_mode == BONDING_MODE_8023AD) { + agg_mode = rte_eth_bond_8023ad_agg_selection_get(port_id); + printf("\tIEEE802.3AD Aggregator Mode: "); + switch (agg_mode) { + case AGG_BANDWIDTH: + printf("bandwidth"); + break; + case AGG_STABLE: + printf("stable"); + break; + case AGG_COUNT: + printf("count"); + break; + } + printf("\n"); + } + + num_slaves = rte_eth_bond_slaves_get(port_id, slaves, RTE_MAX_ETHPORTS); + + if (num_slaves < 0) { + printf("\tFailed to get slave list for port = %d\n", port_id); + return; + } + if (num_slaves > 0) { + printf("\tSlaves (%d): [", num_slaves); + for (i = 0; i < num_slaves - 1; i++) + printf("%d ", slaves[i]); + + printf("%d]\n", slaves[num_slaves - 1]); + } else { + printf("\tSlaves: []\n"); + + } + + num_active_slaves = rte_eth_bond_active_slaves_get(port_id, slaves, + RTE_MAX_ETHPORTS); + + if (num_active_slaves < 0) { + printf("\tFailed to get active slave list for port = %d\n", port_id); + return; + } + if (num_active_slaves > 0) { + printf("\tActive Slaves (%d): [", num_active_slaves); + for (i = 0; i < num_active_slaves - 1; i++) + printf("%d ", slaves[i]); + + printf("%d]\n", slaves[num_active_slaves - 1]); + + } else { + printf("\tActive Slaves: []\n"); + + } + + primary_id = rte_eth_bond_primary_get(port_id); + if (primary_id < 0) { + printf("\tFailed to get primary slave for port = %d\n", port_id); + return; + } else + printf("\tPrimary: [%d]\n", primary_id); + +} + +cmdline_parse_token_string_t cmd_showbonding_config_show = +TOKEN_STRING_INITIALIZER(struct cmd_show_bonding_config_result, + show, "show"); +cmdline_parse_token_string_t cmd_showbonding_config_bonding = +TOKEN_STRING_INITIALIZER(struct cmd_show_bonding_config_result, + bonding, "bonding"); +cmdline_parse_token_string_t cmd_showbonding_config_config = +TOKEN_STRING_INITIALIZER(struct cmd_show_bonding_config_result, + config, "config"); +cmdline_parse_token_num_t cmd_showbonding_config_port = +TOKEN_NUM_INITIALIZER(struct cmd_show_bonding_config_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_show_bonding_config = { + .f = cmd_show_bonding_config_parsed, + .help_str = "show bonding config <port_id>: " + "Show the bonding config for port_id", + .data = NULL, + .tokens = { + (void *)&cmd_showbonding_config_show, + (void *)&cmd_showbonding_config_bonding, + (void *)&cmd_showbonding_config_config, + (void *)&cmd_showbonding_config_port, + NULL + } +}; + +/* *** SET BONDING PRIMARY *** */ +struct cmd_set_bonding_primary_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t bonding; + cmdline_fixed_string_t primary; + portid_t slave_id; + portid_t port_id; +}; + +static void cmd_set_bonding_primary_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_bonding_primary_result *res = parsed_result; + portid_t master_port_id = res->port_id; + portid_t slave_port_id = res->slave_id; + + /* Set the primary slave for a bonded device. */ + if (0 != rte_eth_bond_primary_set(master_port_id, slave_port_id)) { + printf("\t Failed to set primary slave for port = %d.\n", + master_port_id); + return; + } + init_port_config(); +} + +cmdline_parse_token_string_t cmd_setbonding_primary_set = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_primary_result, + set, "set"); +cmdline_parse_token_string_t cmd_setbonding_primary_bonding = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_primary_result, + bonding, "bonding"); +cmdline_parse_token_string_t cmd_setbonding_primary_primary = +TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_primary_result, + primary, "primary"); +cmdline_parse_token_num_t cmd_setbonding_primary_slave = +TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_primary_result, + slave_id, UINT16); +cmdline_parse_token_num_t cmd_setbonding_primary_port = +TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_primary_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_set_bonding_primary = { + .f = cmd_set_bonding_primary_parsed, + .help_str = "set bonding primary <slave_id> <port_id>: " + "Set the primary slave for port_id", + .data = NULL, + .tokens = { + (void *)&cmd_setbonding_primary_set, + (void *)&cmd_setbonding_primary_bonding, + (void *)&cmd_setbonding_primary_primary, + (void *)&cmd_setbonding_primary_slave, + (void *)&cmd_setbonding_primary_port, + NULL + } +}; + +/* *** ADD SLAVE *** */ +struct cmd_add_bonding_slave_result { + cmdline_fixed_string_t add; + cmdline_fixed_string_t bonding; + cmdline_fixed_string_t slave; + portid_t slave_id; + portid_t port_id; +}; + +static void cmd_add_bonding_slave_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_add_bonding_slave_result *res = parsed_result; + portid_t master_port_id = res->port_id; + portid_t slave_port_id = res->slave_id; + + /* add the slave for a bonded device. */ + if (0 != rte_eth_bond_slave_add(master_port_id, slave_port_id)) { + printf("\t Failed to add slave %d to master port = %d.\n", + slave_port_id, master_port_id); + return; + } + init_port_config(); + set_port_slave_flag(slave_port_id); +} + +cmdline_parse_token_string_t cmd_addbonding_slave_add = +TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_slave_result, + add, "add"); +cmdline_parse_token_string_t cmd_addbonding_slave_bonding = +TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_slave_result, + bonding, "bonding"); +cmdline_parse_token_string_t cmd_addbonding_slave_slave = +TOKEN_STRING_INITIALIZER(struct cmd_add_bonding_slave_result, + slave, "slave"); +cmdline_parse_token_num_t cmd_addbonding_slave_slaveid = +TOKEN_NUM_INITIALIZER(struct cmd_add_bonding_slave_result, + slave_id, UINT16); +cmdline_parse_token_num_t cmd_addbonding_slave_port = +TOKEN_NUM_INITIALIZER(struct cmd_add_bonding_slave_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_add_bonding_slave = { + .f = cmd_add_bonding_slave_parsed, + .help_str = "add bonding slave <slave_id> <port_id>: " + "Add a slave device to a bonded device", + .data = NULL, + .tokens = { + (void *)&cmd_addbonding_slave_add, + (void *)&cmd_addbonding_slave_bonding, + (void *)&cmd_addbonding_slave_slave, + (void *)&cmd_addbonding_slave_slaveid, + (void *)&cmd_addbonding_slave_port, + NULL + } +}; + +/* *** REMOVE SLAVE *** */ +struct cmd_remove_bonding_slave_result { + cmdline_fixed_string_t remove; + cmdline_fixed_string_t bonding; + cmdline_fixed_string_t slave; + portid_t slave_id; + portid_t port_id; +}; + +static void cmd_remove_bonding_slave_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_remove_bonding_slave_result *res = parsed_result; + portid_t master_port_id = res->port_id; + portid_t slave_port_id = res->slave_id; + + /* remove the slave from a bonded device. */ + if (0 != rte_eth_bond_slave_remove(master_port_id, slave_port_id)) { + printf("\t Failed to remove slave %d from master port = %d.\n", + slave_port_id, master_port_id); + return; + } + init_port_config(); + clear_port_slave_flag(slave_port_id); +} + +cmdline_parse_token_string_t cmd_removebonding_slave_remove = + TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_slave_result, + remove, "remove"); +cmdline_parse_token_string_t cmd_removebonding_slave_bonding = + TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_slave_result, + bonding, "bonding"); +cmdline_parse_token_string_t cmd_removebonding_slave_slave = + TOKEN_STRING_INITIALIZER(struct cmd_remove_bonding_slave_result, + slave, "slave"); +cmdline_parse_token_num_t cmd_removebonding_slave_slaveid = + TOKEN_NUM_INITIALIZER(struct cmd_remove_bonding_slave_result, + slave_id, UINT16); +cmdline_parse_token_num_t cmd_removebonding_slave_port = + TOKEN_NUM_INITIALIZER(struct cmd_remove_bonding_slave_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_remove_bonding_slave = { + .f = cmd_remove_bonding_slave_parsed, + .help_str = "remove bonding slave <slave_id> <port_id>: " + "Remove a slave device from a bonded device", + .data = NULL, + .tokens = { + (void *)&cmd_removebonding_slave_remove, + (void *)&cmd_removebonding_slave_bonding, + (void *)&cmd_removebonding_slave_slave, + (void *)&cmd_removebonding_slave_slaveid, + (void *)&cmd_removebonding_slave_port, + NULL + } +}; + +/* *** CREATE BONDED DEVICE *** */ +struct cmd_create_bonded_device_result { + cmdline_fixed_string_t create; + cmdline_fixed_string_t bonded; + cmdline_fixed_string_t device; + uint8_t mode; + uint8_t socket; +}; + +static int bond_dev_num = 0; + +static void cmd_create_bonded_device_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_create_bonded_device_result *res = parsed_result; + char ethdev_name[RTE_ETH_NAME_MAX_LEN]; + int port_id; + int ret; + + if (test_done == 0) { + printf("Please stop forwarding first\n"); + return; + } + + snprintf(ethdev_name, RTE_ETH_NAME_MAX_LEN, "net_bonding_testpmd_%d", + bond_dev_num++); + + /* Create a new bonded device. */ + port_id = rte_eth_bond_create(ethdev_name, res->mode, res->socket); + if (port_id < 0) { + printf("\t Failed to create bonded device.\n"); + return; + } else { + printf("Created new bonded device %s on (port %d).\n", ethdev_name, + port_id); + + /* Update number of ports */ + nb_ports = rte_eth_dev_count_avail(); + reconfig(port_id, res->socket); + ret = rte_eth_promiscuous_enable(port_id); + if (ret != 0) + printf("Failed to enable promiscuous mode for port %u: %s - ignore\n", + port_id, rte_strerror(-ret)); + + ports[port_id].need_setup = 0; + ports[port_id].port_status = RTE_PORT_STOPPED; + } + +} + +cmdline_parse_token_string_t cmd_createbonded_device_create = + TOKEN_STRING_INITIALIZER(struct cmd_create_bonded_device_result, + create, "create"); +cmdline_parse_token_string_t cmd_createbonded_device_bonded = + TOKEN_STRING_INITIALIZER(struct cmd_create_bonded_device_result, + bonded, "bonded"); +cmdline_parse_token_string_t cmd_createbonded_device_device = + TOKEN_STRING_INITIALIZER(struct cmd_create_bonded_device_result, + device, "device"); +cmdline_parse_token_num_t cmd_createbonded_device_mode = + TOKEN_NUM_INITIALIZER(struct cmd_create_bonded_device_result, + mode, UINT8); +cmdline_parse_token_num_t cmd_createbonded_device_socket = + TOKEN_NUM_INITIALIZER(struct cmd_create_bonded_device_result, + socket, UINT8); + +cmdline_parse_inst_t cmd_create_bonded_device = { + .f = cmd_create_bonded_device_parsed, + .help_str = "create bonded device <mode> <socket>: " + "Create a new bonded device with specific bonding mode and socket", + .data = NULL, + .tokens = { + (void *)&cmd_createbonded_device_create, + (void *)&cmd_createbonded_device_bonded, + (void *)&cmd_createbonded_device_device, + (void *)&cmd_createbonded_device_mode, + (void *)&cmd_createbonded_device_socket, + NULL + } +}; + +/* *** SET MAC ADDRESS IN BONDED DEVICE *** */ +struct cmd_set_bond_mac_addr_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t bonding; + cmdline_fixed_string_t mac_addr; + uint16_t port_num; + struct rte_ether_addr address; +}; + +static void cmd_set_bond_mac_addr_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_bond_mac_addr_result *res = parsed_result; + int ret; + + if (port_id_is_invalid(res->port_num, ENABLED_WARN)) + return; + + ret = rte_eth_bond_mac_address_set(res->port_num, &res->address); + + /* check the return value and print it if is < 0 */ + if (ret < 0) + printf("set_bond_mac_addr error: (%s)\n", strerror(-ret)); +} + +cmdline_parse_token_string_t cmd_set_bond_mac_addr_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mac_addr_result, set, "set"); +cmdline_parse_token_string_t cmd_set_bond_mac_addr_bonding = + TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mac_addr_result, bonding, + "bonding"); +cmdline_parse_token_string_t cmd_set_bond_mac_addr_mac = + TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mac_addr_result, mac_addr, + "mac_addr"); +cmdline_parse_token_num_t cmd_set_bond_mac_addr_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_set_bond_mac_addr_result, + port_num, UINT16); +cmdline_parse_token_etheraddr_t cmd_set_bond_mac_addr_addr = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_bond_mac_addr_result, address); + +cmdline_parse_inst_t cmd_set_bond_mac_addr = { + .f = cmd_set_bond_mac_addr_parsed, + .data = (void *) 0, + .help_str = "set bonding mac_addr <port_id> <mac_addr>", + .tokens = { + (void *)&cmd_set_bond_mac_addr_set, + (void *)&cmd_set_bond_mac_addr_bonding, + (void *)&cmd_set_bond_mac_addr_mac, + (void *)&cmd_set_bond_mac_addr_portnum, + (void *)&cmd_set_bond_mac_addr_addr, + NULL + } +}; + + +/* *** SET LINK STATUS MONITORING POLLING PERIOD ON BONDED DEVICE *** */ +struct cmd_set_bond_mon_period_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t bonding; + cmdline_fixed_string_t mon_period; + uint16_t port_num; + uint32_t period_ms; +}; + +static void cmd_set_bond_mon_period_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_bond_mon_period_result *res = parsed_result; + int ret; + + ret = rte_eth_bond_link_monitoring_set(res->port_num, res->period_ms); + + /* check the return value and print it if is < 0 */ + if (ret < 0) + printf("set_bond_mac_addr error: (%s)\n", strerror(-ret)); +} + +cmdline_parse_token_string_t cmd_set_bond_mon_period_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mon_period_result, + set, "set"); +cmdline_parse_token_string_t cmd_set_bond_mon_period_bonding = + TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mon_period_result, + bonding, "bonding"); +cmdline_parse_token_string_t cmd_set_bond_mon_period_mon_period = + TOKEN_STRING_INITIALIZER(struct cmd_set_bond_mon_period_result, + mon_period, "mon_period"); +cmdline_parse_token_num_t cmd_set_bond_mon_period_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_set_bond_mon_period_result, + port_num, UINT16); +cmdline_parse_token_num_t cmd_set_bond_mon_period_period_ms = + TOKEN_NUM_INITIALIZER(struct cmd_set_bond_mon_period_result, + period_ms, UINT32); + +cmdline_parse_inst_t cmd_set_bond_mon_period = { + .f = cmd_set_bond_mon_period_parsed, + .data = (void *) 0, + .help_str = "set bonding mon_period <port_id> <period_ms>", + .tokens = { + (void *)&cmd_set_bond_mon_period_set, + (void *)&cmd_set_bond_mon_period_bonding, + (void *)&cmd_set_bond_mon_period_mon_period, + (void *)&cmd_set_bond_mon_period_portnum, + (void *)&cmd_set_bond_mon_period_period_ms, + NULL + } +}; + + + +struct cmd_set_bonding_agg_mode_policy_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t bonding; + cmdline_fixed_string_t agg_mode; + uint16_t port_num; + cmdline_fixed_string_t policy; +}; + + +static void +cmd_set_bonding_agg_mode(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_bonding_agg_mode_policy_result *res = parsed_result; + uint8_t policy = AGG_BANDWIDTH; + + if (!strcmp(res->policy, "bandwidth")) + policy = AGG_BANDWIDTH; + else if (!strcmp(res->policy, "stable")) + policy = AGG_STABLE; + else if (!strcmp(res->policy, "count")) + policy = AGG_COUNT; + + rte_eth_bond_8023ad_agg_selection_set(res->port_num, policy); +} + + +cmdline_parse_token_string_t cmd_set_bonding_agg_mode_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result, + set, "set"); +cmdline_parse_token_string_t cmd_set_bonding_agg_mode_bonding = + TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result, + bonding, "bonding"); + +cmdline_parse_token_string_t cmd_set_bonding_agg_mode_agg_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result, + agg_mode, "agg_mode"); + +cmdline_parse_token_num_t cmd_set_bonding_agg_mode_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_set_bonding_agg_mode_policy_result, + port_num, UINT16); + +cmdline_parse_token_string_t cmd_set_bonding_agg_mode_policy_string = + TOKEN_STRING_INITIALIZER( + struct cmd_set_bonding_balance_xmit_policy_result, + policy, "stable#bandwidth#count"); + +cmdline_parse_inst_t cmd_set_bonding_agg_mode_policy = { + .f = cmd_set_bonding_agg_mode, + .data = (void *) 0, + .help_str = "set bonding mode IEEE802.3AD aggregator policy <port_id> <agg_name>", + .tokens = { + (void *)&cmd_set_bonding_agg_mode_set, + (void *)&cmd_set_bonding_agg_mode_bonding, + (void *)&cmd_set_bonding_agg_mode_agg_mode, + (void *)&cmd_set_bonding_agg_mode_portnum, + (void *)&cmd_set_bonding_agg_mode_policy_string, + NULL + } +}; + + +#endif /* RTE_LIBRTE_PMD_BOND */ + +/* *** SET FORWARDING MODE *** */ +struct cmd_set_fwd_mode_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t fwd; + cmdline_fixed_string_t mode; +}; + +static void cmd_set_fwd_mode_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_fwd_mode_result *res = parsed_result; + + retry_enabled = 0; + set_pkt_forwarding_mode(res->mode); +} + +cmdline_parse_token_string_t cmd_setfwd_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, set, "set"); +cmdline_parse_token_string_t cmd_setfwd_fwd = + TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, fwd, "fwd"); +cmdline_parse_token_string_t cmd_setfwd_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, mode, + "" /* defined at init */); + +cmdline_parse_inst_t cmd_set_fwd_mode = { + .f = cmd_set_fwd_mode_parsed, + .data = NULL, + .help_str = NULL, /* defined at init */ + .tokens = { + (void *)&cmd_setfwd_set, + (void *)&cmd_setfwd_fwd, + (void *)&cmd_setfwd_mode, + NULL, + }, +}; + +static void cmd_set_fwd_mode_init(void) +{ + char *modes, *c; + static char token[128]; + static char help[256]; + cmdline_parse_token_string_t *token_struct; + + modes = list_pkt_forwarding_modes(); + snprintf(help, sizeof(help), "set fwd %s: " + "Set packet forwarding mode", modes); + cmd_set_fwd_mode.help_str = help; + + /* string token separator is # */ + for (c = token; *modes != '\0'; modes++) + if (*modes == '|') + *c++ = '#'; + else + *c++ = *modes; + token_struct = (cmdline_parse_token_string_t*)cmd_set_fwd_mode.tokens[2]; + token_struct->string_data.str = token; +} + +/* *** SET RETRY FORWARDING MODE *** */ +struct cmd_set_fwd_retry_mode_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t fwd; + cmdline_fixed_string_t mode; + cmdline_fixed_string_t retry; +}; + +static void cmd_set_fwd_retry_mode_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_fwd_retry_mode_result *res = parsed_result; + + retry_enabled = 1; + set_pkt_forwarding_mode(res->mode); +} + +cmdline_parse_token_string_t cmd_setfwd_retry_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_retry_mode_result, + set, "set"); +cmdline_parse_token_string_t cmd_setfwd_retry_fwd = + TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_retry_mode_result, + fwd, "fwd"); +cmdline_parse_token_string_t cmd_setfwd_retry_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_retry_mode_result, + mode, + "" /* defined at init */); +cmdline_parse_token_string_t cmd_setfwd_retry_retry = + TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_retry_mode_result, + retry, "retry"); + +cmdline_parse_inst_t cmd_set_fwd_retry_mode = { + .f = cmd_set_fwd_retry_mode_parsed, + .data = NULL, + .help_str = NULL, /* defined at init */ + .tokens = { + (void *)&cmd_setfwd_retry_set, + (void *)&cmd_setfwd_retry_fwd, + (void *)&cmd_setfwd_retry_mode, + (void *)&cmd_setfwd_retry_retry, + NULL, + }, +}; + +static void cmd_set_fwd_retry_mode_init(void) +{ + char *modes, *c; + static char token[128]; + static char help[256]; + cmdline_parse_token_string_t *token_struct; + + modes = list_pkt_forwarding_retry_modes(); + snprintf(help, sizeof(help), "set fwd %s retry: " + "Set packet forwarding mode with retry", modes); + cmd_set_fwd_retry_mode.help_str = help; + + /* string token separator is # */ + for (c = token; *modes != '\0'; modes++) + if (*modes == '|') + *c++ = '#'; + else + *c++ = *modes; + token_struct = (cmdline_parse_token_string_t *) + cmd_set_fwd_retry_mode.tokens[2]; + token_struct->string_data.str = token; +} + +/* *** SET BURST TX DELAY TIME RETRY NUMBER *** */ +struct cmd_set_burst_tx_retry_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t burst; + cmdline_fixed_string_t tx; + cmdline_fixed_string_t delay; + uint32_t time; + cmdline_fixed_string_t retry; + uint32_t retry_num; +}; + +static void cmd_set_burst_tx_retry_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_burst_tx_retry_result *res = parsed_result; + + if (!strcmp(res->set, "set") && !strcmp(res->burst, "burst") + && !strcmp(res->tx, "tx")) { + if (!strcmp(res->delay, "delay")) + burst_tx_delay_time = res->time; + if (!strcmp(res->retry, "retry")) + burst_tx_retry_num = res->retry_num; + } + +} + +cmdline_parse_token_string_t cmd_set_burst_tx_retry_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_burst_tx_retry_result, set, "set"); +cmdline_parse_token_string_t cmd_set_burst_tx_retry_burst = + TOKEN_STRING_INITIALIZER(struct cmd_set_burst_tx_retry_result, burst, + "burst"); +cmdline_parse_token_string_t cmd_set_burst_tx_retry_tx = + TOKEN_STRING_INITIALIZER(struct cmd_set_burst_tx_retry_result, tx, "tx"); +cmdline_parse_token_string_t cmd_set_burst_tx_retry_delay = + TOKEN_STRING_INITIALIZER(struct cmd_set_burst_tx_retry_result, delay, "delay"); +cmdline_parse_token_num_t cmd_set_burst_tx_retry_time = + TOKEN_NUM_INITIALIZER(struct cmd_set_burst_tx_retry_result, time, UINT32); +cmdline_parse_token_string_t cmd_set_burst_tx_retry_retry = + TOKEN_STRING_INITIALIZER(struct cmd_set_burst_tx_retry_result, retry, "retry"); +cmdline_parse_token_num_t cmd_set_burst_tx_retry_retry_num = + TOKEN_NUM_INITIALIZER(struct cmd_set_burst_tx_retry_result, retry_num, UINT32); + +cmdline_parse_inst_t cmd_set_burst_tx_retry = { + .f = cmd_set_burst_tx_retry_parsed, + .help_str = "set burst tx delay <delay_usec> retry <num_retry>", + .tokens = { + (void *)&cmd_set_burst_tx_retry_set, + (void *)&cmd_set_burst_tx_retry_burst, + (void *)&cmd_set_burst_tx_retry_tx, + (void *)&cmd_set_burst_tx_retry_delay, + (void *)&cmd_set_burst_tx_retry_time, + (void *)&cmd_set_burst_tx_retry_retry, + (void *)&cmd_set_burst_tx_retry_retry_num, + NULL, + }, +}; + +/* *** SET PROMISC MODE *** */ +struct cmd_set_promisc_mode_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t promisc; + cmdline_fixed_string_t port_all; /* valid if "allports" argument == 1 */ + uint16_t port_num; /* valid if "allports" argument == 0 */ + cmdline_fixed_string_t mode; +}; + +static void cmd_set_promisc_mode_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + void *allports) +{ + struct cmd_set_promisc_mode_result *res = parsed_result; + int enable; + portid_t i; + + if (!strcmp(res->mode, "on")) + enable = 1; + else + enable = 0; + + /* all ports */ + if (allports) { + RTE_ETH_FOREACH_DEV(i) + eth_set_promisc_mode(i, enable); + } else { + eth_set_promisc_mode(res->port_num, enable); + } +} + +cmdline_parse_token_string_t cmd_setpromisc_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, set, "set"); +cmdline_parse_token_string_t cmd_setpromisc_promisc = + TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, promisc, + "promisc"); +cmdline_parse_token_string_t cmd_setpromisc_portall = + TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, port_all, + "all"); +cmdline_parse_token_num_t cmd_setpromisc_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_set_promisc_mode_result, port_num, + UINT16); +cmdline_parse_token_string_t cmd_setpromisc_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, mode, + "on#off"); + +cmdline_parse_inst_t cmd_set_promisc_mode_all = { + .f = cmd_set_promisc_mode_parsed, + .data = (void *)1, + .help_str = "set promisc all on|off: Set promisc mode for all ports", + .tokens = { + (void *)&cmd_setpromisc_set, + (void *)&cmd_setpromisc_promisc, + (void *)&cmd_setpromisc_portall, + (void *)&cmd_setpromisc_mode, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_promisc_mode_one = { + .f = cmd_set_promisc_mode_parsed, + .data = (void *)0, + .help_str = "set promisc <port_id> on|off: Set promisc mode on port_id", + .tokens = { + (void *)&cmd_setpromisc_set, + (void *)&cmd_setpromisc_promisc, + (void *)&cmd_setpromisc_portnum, + (void *)&cmd_setpromisc_mode, + NULL, + }, +}; + +/* *** SET ALLMULTI MODE *** */ +struct cmd_set_allmulti_mode_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t allmulti; + cmdline_fixed_string_t port_all; /* valid if "allports" argument == 1 */ + uint16_t port_num; /* valid if "allports" argument == 0 */ + cmdline_fixed_string_t mode; +}; + +static void cmd_set_allmulti_mode_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + void *allports) +{ + struct cmd_set_allmulti_mode_result *res = parsed_result; + int enable; + portid_t i; + + if (!strcmp(res->mode, "on")) + enable = 1; + else + enable = 0; + + /* all ports */ + if (allports) { + RTE_ETH_FOREACH_DEV(i) { + eth_set_allmulticast_mode(i, enable); + } + } + else { + eth_set_allmulticast_mode(res->port_num, enable); + } +} + +cmdline_parse_token_string_t cmd_setallmulti_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, set, "set"); +cmdline_parse_token_string_t cmd_setallmulti_allmulti = + TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, allmulti, + "allmulti"); +cmdline_parse_token_string_t cmd_setallmulti_portall = + TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, port_all, + "all"); +cmdline_parse_token_num_t cmd_setallmulti_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_set_allmulti_mode_result, port_num, + UINT16); +cmdline_parse_token_string_t cmd_setallmulti_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, mode, + "on#off"); + +cmdline_parse_inst_t cmd_set_allmulti_mode_all = { + .f = cmd_set_allmulti_mode_parsed, + .data = (void *)1, + .help_str = "set allmulti all on|off: Set allmulti mode for all ports", + .tokens = { + (void *)&cmd_setallmulti_set, + (void *)&cmd_setallmulti_allmulti, + (void *)&cmd_setallmulti_portall, + (void *)&cmd_setallmulti_mode, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_allmulti_mode_one = { + .f = cmd_set_allmulti_mode_parsed, + .data = (void *)0, + .help_str = "set allmulti <port_id> on|off: " + "Set allmulti mode on port_id", + .tokens = { + (void *)&cmd_setallmulti_set, + (void *)&cmd_setallmulti_allmulti, + (void *)&cmd_setallmulti_portnum, + (void *)&cmd_setallmulti_mode, + NULL, + }, +}; + +/* *** SETUP ETHERNET LINK FLOW CONTROL *** */ +struct cmd_link_flow_ctrl_set_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t flow_ctrl; + cmdline_fixed_string_t rx; + cmdline_fixed_string_t rx_lfc_mode; + cmdline_fixed_string_t tx; + cmdline_fixed_string_t tx_lfc_mode; + cmdline_fixed_string_t mac_ctrl_frame_fwd; + cmdline_fixed_string_t mac_ctrl_frame_fwd_mode; + cmdline_fixed_string_t autoneg_str; + cmdline_fixed_string_t autoneg; + cmdline_fixed_string_t hw_str; + uint32_t high_water; + cmdline_fixed_string_t lw_str; + uint32_t low_water; + cmdline_fixed_string_t pt_str; + uint16_t pause_time; + cmdline_fixed_string_t xon_str; + uint16_t send_xon; + portid_t port_id; +}; + +cmdline_parse_token_string_t cmd_lfc_set_set = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + set, "set"); +cmdline_parse_token_string_t cmd_lfc_set_flow_ctrl = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + flow_ctrl, "flow_ctrl"); +cmdline_parse_token_string_t cmd_lfc_set_rx = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + rx, "rx"); +cmdline_parse_token_string_t cmd_lfc_set_rx_mode = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + rx_lfc_mode, "on#off"); +cmdline_parse_token_string_t cmd_lfc_set_tx = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + tx, "tx"); +cmdline_parse_token_string_t cmd_lfc_set_tx_mode = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + tx_lfc_mode, "on#off"); +cmdline_parse_token_string_t cmd_lfc_set_high_water_str = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + hw_str, "high_water"); +cmdline_parse_token_num_t cmd_lfc_set_high_water = + TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + high_water, UINT32); +cmdline_parse_token_string_t cmd_lfc_set_low_water_str = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + lw_str, "low_water"); +cmdline_parse_token_num_t cmd_lfc_set_low_water = + TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + low_water, UINT32); +cmdline_parse_token_string_t cmd_lfc_set_pause_time_str = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + pt_str, "pause_time"); +cmdline_parse_token_num_t cmd_lfc_set_pause_time = + TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + pause_time, UINT16); +cmdline_parse_token_string_t cmd_lfc_set_send_xon_str = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + xon_str, "send_xon"); +cmdline_parse_token_num_t cmd_lfc_set_send_xon = + TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + send_xon, UINT16); +cmdline_parse_token_string_t cmd_lfc_set_mac_ctrl_frame_fwd_mode = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + mac_ctrl_frame_fwd, "mac_ctrl_frame_fwd"); +cmdline_parse_token_string_t cmd_lfc_set_mac_ctrl_frame_fwd = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + mac_ctrl_frame_fwd_mode, "on#off"); +cmdline_parse_token_string_t cmd_lfc_set_autoneg_str = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + autoneg_str, "autoneg"); +cmdline_parse_token_string_t cmd_lfc_set_autoneg = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + autoneg, "on#off"); +cmdline_parse_token_num_t cmd_lfc_set_portid = + TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + port_id, UINT16); + +/* forward declaration */ +static void +cmd_link_flow_ctrl_set_parsed(void *parsed_result, struct cmdline *cl, + void *data); + +cmdline_parse_inst_t cmd_link_flow_control_set = { + .f = cmd_link_flow_ctrl_set_parsed, + .data = NULL, + .help_str = "set flow_ctrl rx on|off tx on|off <high_water> " + "<low_water> <pause_time> <send_xon> mac_ctrl_frame_fwd on|off " + "autoneg on|off <port_id>: Configure the Ethernet flow control", + .tokens = { + (void *)&cmd_lfc_set_set, + (void *)&cmd_lfc_set_flow_ctrl, + (void *)&cmd_lfc_set_rx, + (void *)&cmd_lfc_set_rx_mode, + (void *)&cmd_lfc_set_tx, + (void *)&cmd_lfc_set_tx_mode, + (void *)&cmd_lfc_set_high_water, + (void *)&cmd_lfc_set_low_water, + (void *)&cmd_lfc_set_pause_time, + (void *)&cmd_lfc_set_send_xon, + (void *)&cmd_lfc_set_mac_ctrl_frame_fwd_mode, + (void *)&cmd_lfc_set_mac_ctrl_frame_fwd, + (void *)&cmd_lfc_set_autoneg_str, + (void *)&cmd_lfc_set_autoneg, + (void *)&cmd_lfc_set_portid, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_link_flow_control_set_rx = { + .f = cmd_link_flow_ctrl_set_parsed, + .data = (void *)&cmd_link_flow_control_set_rx, + .help_str = "set flow_ctrl rx on|off <port_id>: " + "Change rx flow control parameter", + .tokens = { + (void *)&cmd_lfc_set_set, + (void *)&cmd_lfc_set_flow_ctrl, + (void *)&cmd_lfc_set_rx, + (void *)&cmd_lfc_set_rx_mode, + (void *)&cmd_lfc_set_portid, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_link_flow_control_set_tx = { + .f = cmd_link_flow_ctrl_set_parsed, + .data = (void *)&cmd_link_flow_control_set_tx, + .help_str = "set flow_ctrl tx on|off <port_id>: " + "Change tx flow control parameter", + .tokens = { + (void *)&cmd_lfc_set_set, + (void *)&cmd_lfc_set_flow_ctrl, + (void *)&cmd_lfc_set_tx, + (void *)&cmd_lfc_set_tx_mode, + (void *)&cmd_lfc_set_portid, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_link_flow_control_set_hw = { + .f = cmd_link_flow_ctrl_set_parsed, + .data = (void *)&cmd_link_flow_control_set_hw, + .help_str = "set flow_ctrl high_water <value> <port_id>: " + "Change high water flow control parameter", + .tokens = { + (void *)&cmd_lfc_set_set, + (void *)&cmd_lfc_set_flow_ctrl, + (void *)&cmd_lfc_set_high_water_str, + (void *)&cmd_lfc_set_high_water, + (void *)&cmd_lfc_set_portid, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_link_flow_control_set_lw = { + .f = cmd_link_flow_ctrl_set_parsed, + .data = (void *)&cmd_link_flow_control_set_lw, + .help_str = "set flow_ctrl low_water <value> <port_id>: " + "Change low water flow control parameter", + .tokens = { + (void *)&cmd_lfc_set_set, + (void *)&cmd_lfc_set_flow_ctrl, + (void *)&cmd_lfc_set_low_water_str, + (void *)&cmd_lfc_set_low_water, + (void *)&cmd_lfc_set_portid, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_link_flow_control_set_pt = { + .f = cmd_link_flow_ctrl_set_parsed, + .data = (void *)&cmd_link_flow_control_set_pt, + .help_str = "set flow_ctrl pause_time <value> <port_id>: " + "Change pause time flow control parameter", + .tokens = { + (void *)&cmd_lfc_set_set, + (void *)&cmd_lfc_set_flow_ctrl, + (void *)&cmd_lfc_set_pause_time_str, + (void *)&cmd_lfc_set_pause_time, + (void *)&cmd_lfc_set_portid, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_link_flow_control_set_xon = { + .f = cmd_link_flow_ctrl_set_parsed, + .data = (void *)&cmd_link_flow_control_set_xon, + .help_str = "set flow_ctrl send_xon <value> <port_id>: " + "Change send_xon flow control parameter", + .tokens = { + (void *)&cmd_lfc_set_set, + (void *)&cmd_lfc_set_flow_ctrl, + (void *)&cmd_lfc_set_send_xon_str, + (void *)&cmd_lfc_set_send_xon, + (void *)&cmd_lfc_set_portid, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_link_flow_control_set_macfwd = { + .f = cmd_link_flow_ctrl_set_parsed, + .data = (void *)&cmd_link_flow_control_set_macfwd, + .help_str = "set flow_ctrl mac_ctrl_frame_fwd on|off <port_id>: " + "Change mac ctrl fwd flow control parameter", + .tokens = { + (void *)&cmd_lfc_set_set, + (void *)&cmd_lfc_set_flow_ctrl, + (void *)&cmd_lfc_set_mac_ctrl_frame_fwd_mode, + (void *)&cmd_lfc_set_mac_ctrl_frame_fwd, + (void *)&cmd_lfc_set_portid, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_link_flow_control_set_autoneg = { + .f = cmd_link_flow_ctrl_set_parsed, + .data = (void *)&cmd_link_flow_control_set_autoneg, + .help_str = "set flow_ctrl autoneg on|off <port_id>: " + "Change autoneg flow control parameter", + .tokens = { + (void *)&cmd_lfc_set_set, + (void *)&cmd_lfc_set_flow_ctrl, + (void *)&cmd_lfc_set_autoneg_str, + (void *)&cmd_lfc_set_autoneg, + (void *)&cmd_lfc_set_portid, + NULL, + }, +}; + +static void +cmd_link_flow_ctrl_set_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + void *data) +{ + struct cmd_link_flow_ctrl_set_result *res = parsed_result; + cmdline_parse_inst_t *cmd = data; + struct rte_eth_fc_conf fc_conf; + int rx_fc_en = 0; + int tx_fc_en = 0; + int ret; + + /* + * Rx on/off, flow control is enabled/disabled on RX side. This can indicate + * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side. + * Tx on/off, flow control is enabled/disabled on TX side. This can indicate + * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side. + */ + static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = { + {RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL} + }; + + /* Partial command line, retrieve current configuration */ + if (cmd) { + ret = rte_eth_dev_flow_ctrl_get(res->port_id, &fc_conf); + if (ret != 0) { + printf("cannot get current flow ctrl parameters, return" + "code = %d\n", ret); + return; + } + + if ((fc_conf.mode == RTE_FC_RX_PAUSE) || + (fc_conf.mode == RTE_FC_FULL)) + rx_fc_en = 1; + if ((fc_conf.mode == RTE_FC_TX_PAUSE) || + (fc_conf.mode == RTE_FC_FULL)) + tx_fc_en = 1; + } + + if (!cmd || cmd == &cmd_link_flow_control_set_rx) + rx_fc_en = (!strcmp(res->rx_lfc_mode, "on")) ? 1 : 0; + + if (!cmd || cmd == &cmd_link_flow_control_set_tx) + tx_fc_en = (!strcmp(res->tx_lfc_mode, "on")) ? 1 : 0; + + fc_conf.mode = rx_tx_onoff_2_lfc_mode[rx_fc_en][tx_fc_en]; + + if (!cmd || cmd == &cmd_link_flow_control_set_hw) + fc_conf.high_water = res->high_water; + + if (!cmd || cmd == &cmd_link_flow_control_set_lw) + fc_conf.low_water = res->low_water; + + if (!cmd || cmd == &cmd_link_flow_control_set_pt) + fc_conf.pause_time = res->pause_time; + + if (!cmd || cmd == &cmd_link_flow_control_set_xon) + fc_conf.send_xon = res->send_xon; + + if (!cmd || cmd == &cmd_link_flow_control_set_macfwd) { + if (!strcmp(res->mac_ctrl_frame_fwd_mode, "on")) + fc_conf.mac_ctrl_frame_fwd = 1; + else + fc_conf.mac_ctrl_frame_fwd = 0; + } + + if (!cmd || cmd == &cmd_link_flow_control_set_autoneg) + fc_conf.autoneg = (!strcmp(res->autoneg, "on")) ? 1 : 0; + + ret = rte_eth_dev_flow_ctrl_set(res->port_id, &fc_conf); + if (ret != 0) + printf("bad flow contrl parameter, return code = %d \n", ret); +} + +/* *** SETUP ETHERNET PRIORITY FLOW CONTROL *** */ +struct cmd_priority_flow_ctrl_set_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t pfc_ctrl; + cmdline_fixed_string_t rx; + cmdline_fixed_string_t rx_pfc_mode; + cmdline_fixed_string_t tx; + cmdline_fixed_string_t tx_pfc_mode; + uint32_t high_water; + uint32_t low_water; + uint16_t pause_time; + uint8_t priority; + portid_t port_id; +}; + +static void +cmd_priority_flow_ctrl_set_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_priority_flow_ctrl_set_result *res = parsed_result; + struct rte_eth_pfc_conf pfc_conf; + int rx_fc_enable, tx_fc_enable; + int ret; + + /* + * Rx on/off, flow control is enabled/disabled on RX side. This can indicate + * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side. + * Tx on/off, flow control is enabled/disabled on TX side. This can indicate + * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side. + */ + static enum rte_eth_fc_mode rx_tx_onoff_2_pfc_mode[2][2] = { + {RTE_FC_NONE, RTE_FC_TX_PAUSE}, {RTE_FC_RX_PAUSE, RTE_FC_FULL} + }; + + memset(&pfc_conf, 0, sizeof(struct rte_eth_pfc_conf)); + rx_fc_enable = (!strncmp(res->rx_pfc_mode, "on",2)) ? 1 : 0; + tx_fc_enable = (!strncmp(res->tx_pfc_mode, "on",2)) ? 1 : 0; + pfc_conf.fc.mode = rx_tx_onoff_2_pfc_mode[rx_fc_enable][tx_fc_enable]; + pfc_conf.fc.high_water = res->high_water; + pfc_conf.fc.low_water = res->low_water; + pfc_conf.fc.pause_time = res->pause_time; + pfc_conf.priority = res->priority; + + ret = rte_eth_dev_priority_flow_ctrl_set(res->port_id, &pfc_conf); + if (ret != 0) + printf("bad priority flow contrl parameter, return code = %d \n", ret); +} + +cmdline_parse_token_string_t cmd_pfc_set_set = + TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result, + set, "set"); +cmdline_parse_token_string_t cmd_pfc_set_flow_ctrl = + TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result, + pfc_ctrl, "pfc_ctrl"); +cmdline_parse_token_string_t cmd_pfc_set_rx = + TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result, + rx, "rx"); +cmdline_parse_token_string_t cmd_pfc_set_rx_mode = + TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result, + rx_pfc_mode, "on#off"); +cmdline_parse_token_string_t cmd_pfc_set_tx = + TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result, + tx, "tx"); +cmdline_parse_token_string_t cmd_pfc_set_tx_mode = + TOKEN_STRING_INITIALIZER(struct cmd_priority_flow_ctrl_set_result, + tx_pfc_mode, "on#off"); +cmdline_parse_token_num_t cmd_pfc_set_high_water = + TOKEN_NUM_INITIALIZER(struct cmd_priority_flow_ctrl_set_result, + high_water, UINT32); +cmdline_parse_token_num_t cmd_pfc_set_low_water = + TOKEN_NUM_INITIALIZER(struct cmd_priority_flow_ctrl_set_result, + low_water, UINT32); +cmdline_parse_token_num_t cmd_pfc_set_pause_time = + TOKEN_NUM_INITIALIZER(struct cmd_priority_flow_ctrl_set_result, + pause_time, UINT16); +cmdline_parse_token_num_t cmd_pfc_set_priority = + TOKEN_NUM_INITIALIZER(struct cmd_priority_flow_ctrl_set_result, + priority, UINT8); +cmdline_parse_token_num_t cmd_pfc_set_portid = + TOKEN_NUM_INITIALIZER(struct cmd_priority_flow_ctrl_set_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_priority_flow_control_set = { + .f = cmd_priority_flow_ctrl_set_parsed, + .data = NULL, + .help_str = "set pfc_ctrl rx on|off tx on|off <high_water> <low_water> " + "<pause_time> <priority> <port_id>: " + "Configure the Ethernet priority flow control", + .tokens = { + (void *)&cmd_pfc_set_set, + (void *)&cmd_pfc_set_flow_ctrl, + (void *)&cmd_pfc_set_rx, + (void *)&cmd_pfc_set_rx_mode, + (void *)&cmd_pfc_set_tx, + (void *)&cmd_pfc_set_tx_mode, + (void *)&cmd_pfc_set_high_water, + (void *)&cmd_pfc_set_low_water, + (void *)&cmd_pfc_set_pause_time, + (void *)&cmd_pfc_set_priority, + (void *)&cmd_pfc_set_portid, + NULL, + }, +}; + +/* *** RESET CONFIGURATION *** */ +struct cmd_reset_result { + cmdline_fixed_string_t reset; + cmdline_fixed_string_t def; +}; + +static void cmd_reset_parsed(__rte_unused void *parsed_result, + struct cmdline *cl, + __rte_unused void *data) +{ + cmdline_printf(cl, "Reset to default forwarding configuration...\n"); + set_def_fwd_config(); +} + +cmdline_parse_token_string_t cmd_reset_set = + TOKEN_STRING_INITIALIZER(struct cmd_reset_result, reset, "set"); +cmdline_parse_token_string_t cmd_reset_def = + TOKEN_STRING_INITIALIZER(struct cmd_reset_result, def, + "default"); + +cmdline_parse_inst_t cmd_reset = { + .f = cmd_reset_parsed, + .data = NULL, + .help_str = "set default: Reset default forwarding configuration", + .tokens = { + (void *)&cmd_reset_set, + (void *)&cmd_reset_def, + NULL, + }, +}; + +/* *** START FORWARDING *** */ +struct cmd_start_result { + cmdline_fixed_string_t start; +}; + +cmdline_parse_token_string_t cmd_start_start = + TOKEN_STRING_INITIALIZER(struct cmd_start_result, start, "start"); + +static void cmd_start_parsed(__rte_unused void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + start_packet_forwarding(0); +} + +cmdline_parse_inst_t cmd_start = { + .f = cmd_start_parsed, + .data = NULL, + .help_str = "start: Start packet forwarding", + .tokens = { + (void *)&cmd_start_start, + NULL, + }, +}; + +/* *** START FORWARDING WITH ONE TX BURST FIRST *** */ +struct cmd_start_tx_first_result { + cmdline_fixed_string_t start; + cmdline_fixed_string_t tx_first; +}; + +static void +cmd_start_tx_first_parsed(__rte_unused void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + start_packet_forwarding(1); +} + +cmdline_parse_token_string_t cmd_start_tx_first_start = + TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_result, start, + "start"); +cmdline_parse_token_string_t cmd_start_tx_first_tx_first = + TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_result, + tx_first, "tx_first"); + +cmdline_parse_inst_t cmd_start_tx_first = { + .f = cmd_start_tx_first_parsed, + .data = NULL, + .help_str = "start tx_first: Start packet forwarding, " + "after sending 1 burst of packets", + .tokens = { + (void *)&cmd_start_tx_first_start, + (void *)&cmd_start_tx_first_tx_first, + NULL, + }, +}; + +/* *** START FORWARDING WITH N TX BURST FIRST *** */ +struct cmd_start_tx_first_n_result { + cmdline_fixed_string_t start; + cmdline_fixed_string_t tx_first; + uint32_t tx_num; +}; + +static void +cmd_start_tx_first_n_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_start_tx_first_n_result *res = parsed_result; + + start_packet_forwarding(res->tx_num); +} + +cmdline_parse_token_string_t cmd_start_tx_first_n_start = + TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_n_result, + start, "start"); +cmdline_parse_token_string_t cmd_start_tx_first_n_tx_first = + TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_n_result, + tx_first, "tx_first"); +cmdline_parse_token_num_t cmd_start_tx_first_n_tx_num = + TOKEN_NUM_INITIALIZER(struct cmd_start_tx_first_n_result, + tx_num, UINT32); + +cmdline_parse_inst_t cmd_start_tx_first_n = { + .f = cmd_start_tx_first_n_parsed, + .data = NULL, + .help_str = "start tx_first <num>: " + "packet forwarding, after sending <num> bursts of packets", + .tokens = { + (void *)&cmd_start_tx_first_n_start, + (void *)&cmd_start_tx_first_n_tx_first, + (void *)&cmd_start_tx_first_n_tx_num, + NULL, + }, +}; + +/* *** SET LINK UP *** */ +struct cmd_set_link_up_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t link_up; + cmdline_fixed_string_t port; + portid_t port_id; +}; + +cmdline_parse_token_string_t cmd_set_link_up_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_link_up_result, set, "set"); +cmdline_parse_token_string_t cmd_set_link_up_link_up = + TOKEN_STRING_INITIALIZER(struct cmd_set_link_up_result, link_up, + "link-up"); +cmdline_parse_token_string_t cmd_set_link_up_port = + TOKEN_STRING_INITIALIZER(struct cmd_set_link_up_result, port, "port"); +cmdline_parse_token_num_t cmd_set_link_up_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_set_link_up_result, port_id, UINT16); + +static void cmd_set_link_up_parsed(__rte_unused void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_link_up_result *res = parsed_result; + dev_set_link_up(res->port_id); +} + +cmdline_parse_inst_t cmd_set_link_up = { + .f = cmd_set_link_up_parsed, + .data = NULL, + .help_str = "set link-up port <port id>", + .tokens = { + (void *)&cmd_set_link_up_set, + (void *)&cmd_set_link_up_link_up, + (void *)&cmd_set_link_up_port, + (void *)&cmd_set_link_up_port_id, + NULL, + }, +}; + +/* *** SET LINK DOWN *** */ +struct cmd_set_link_down_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t link_down; + cmdline_fixed_string_t port; + portid_t port_id; +}; + +cmdline_parse_token_string_t cmd_set_link_down_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_link_down_result, set, "set"); +cmdline_parse_token_string_t cmd_set_link_down_link_down = + TOKEN_STRING_INITIALIZER(struct cmd_set_link_down_result, link_down, + "link-down"); +cmdline_parse_token_string_t cmd_set_link_down_port = + TOKEN_STRING_INITIALIZER(struct cmd_set_link_down_result, port, "port"); +cmdline_parse_token_num_t cmd_set_link_down_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_set_link_down_result, port_id, UINT16); + +static void cmd_set_link_down_parsed( + __rte_unused void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_link_down_result *res = parsed_result; + dev_set_link_down(res->port_id); +} + +cmdline_parse_inst_t cmd_set_link_down = { + .f = cmd_set_link_down_parsed, + .data = NULL, + .help_str = "set link-down port <port id>", + .tokens = { + (void *)&cmd_set_link_down_set, + (void *)&cmd_set_link_down_link_down, + (void *)&cmd_set_link_down_port, + (void *)&cmd_set_link_down_port_id, + NULL, + }, +}; + +/* *** SHOW CFG *** */ +struct cmd_showcfg_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t cfg; + cmdline_fixed_string_t what; +}; + +static void cmd_showcfg_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_showcfg_result *res = parsed_result; + if (!strcmp(res->what, "rxtx")) + rxtx_config_display(); + else if (!strcmp(res->what, "cores")) + fwd_lcores_config_display(); + else if (!strcmp(res->what, "fwd")) + pkt_fwd_config_display(&cur_fwd_config); + else if (!strcmp(res->what, "txpkts")) + show_tx_pkt_segments(); +} + +cmdline_parse_token_string_t cmd_showcfg_show = + TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, show, "show"); +cmdline_parse_token_string_t cmd_showcfg_port = + TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, cfg, "config"); +cmdline_parse_token_string_t cmd_showcfg_what = + TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, what, + "rxtx#cores#fwd#txpkts"); + +cmdline_parse_inst_t cmd_showcfg = { + .f = cmd_showcfg_parsed, + .data = NULL, + .help_str = "show config rxtx|cores|fwd|txpkts", + .tokens = { + (void *)&cmd_showcfg_show, + (void *)&cmd_showcfg_port, + (void *)&cmd_showcfg_what, + NULL, + }, +}; + +/* *** SHOW ALL PORT INFO *** */ +struct cmd_showportall_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t what; + cmdline_fixed_string_t all; +}; + +static void cmd_showportall_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + portid_t i; + + struct cmd_showportall_result *res = parsed_result; + if (!strcmp(res->show, "clear")) { + if (!strcmp(res->what, "stats")) + RTE_ETH_FOREACH_DEV(i) + nic_stats_clear(i); + else if (!strcmp(res->what, "xstats")) + RTE_ETH_FOREACH_DEV(i) + nic_xstats_clear(i); + } else if (!strcmp(res->what, "info")) + RTE_ETH_FOREACH_DEV(i) + port_infos_display(i); + else if (!strcmp(res->what, "summary")) { + port_summary_header_display(); + RTE_ETH_FOREACH_DEV(i) + port_summary_display(i); + } + else if (!strcmp(res->what, "stats")) + RTE_ETH_FOREACH_DEV(i) + nic_stats_display(i); + else if (!strcmp(res->what, "xstats")) + RTE_ETH_FOREACH_DEV(i) + nic_xstats_display(i); + else if (!strcmp(res->what, "fdir")) + RTE_ETH_FOREACH_DEV(i) + fdir_get_infos(i); + else if (!strcmp(res->what, "stat_qmap")) + RTE_ETH_FOREACH_DEV(i) + nic_stats_mapping_display(i); + else if (!strcmp(res->what, "dcb_tc")) + RTE_ETH_FOREACH_DEV(i) + port_dcb_info_display(i); + else if (!strcmp(res->what, "cap")) + RTE_ETH_FOREACH_DEV(i) + port_offload_cap_display(i); +} + +cmdline_parse_token_string_t cmd_showportall_show = + TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, show, + "show#clear"); +cmdline_parse_token_string_t cmd_showportall_port = + TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, port, "port"); +cmdline_parse_token_string_t cmd_showportall_what = + TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, what, + "info#summary#stats#xstats#fdir#stat_qmap#dcb_tc#cap"); +cmdline_parse_token_string_t cmd_showportall_all = + TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, all, "all"); +cmdline_parse_inst_t cmd_showportall = { + .f = cmd_showportall_parsed, + .data = NULL, + .help_str = "show|clear port " + "info|summary|stats|xstats|fdir|stat_qmap|dcb_tc|cap all", + .tokens = { + (void *)&cmd_showportall_show, + (void *)&cmd_showportall_port, + (void *)&cmd_showportall_what, + (void *)&cmd_showportall_all, + NULL, + }, +}; + +/* *** SHOW PORT INFO *** */ +struct cmd_showport_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t what; + uint16_t portnum; +}; + +static void cmd_showport_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_showport_result *res = parsed_result; + if (!strcmp(res->show, "clear")) { + if (!strcmp(res->what, "stats")) + nic_stats_clear(res->portnum); + else if (!strcmp(res->what, "xstats")) + nic_xstats_clear(res->portnum); + } else if (!strcmp(res->what, "info")) + port_infos_display(res->portnum); + else if (!strcmp(res->what, "summary")) { + port_summary_header_display(); + port_summary_display(res->portnum); + } + else if (!strcmp(res->what, "stats")) + nic_stats_display(res->portnum); + else if (!strcmp(res->what, "xstats")) + nic_xstats_display(res->portnum); + else if (!strcmp(res->what, "fdir")) + fdir_get_infos(res->portnum); + else if (!strcmp(res->what, "stat_qmap")) + nic_stats_mapping_display(res->portnum); + else if (!strcmp(res->what, "dcb_tc")) + port_dcb_info_display(res->portnum); + else if (!strcmp(res->what, "cap")) + port_offload_cap_display(res->portnum); +} + +cmdline_parse_token_string_t cmd_showport_show = + TOKEN_STRING_INITIALIZER(struct cmd_showport_result, show, + "show#clear"); +cmdline_parse_token_string_t cmd_showport_port = + TOKEN_STRING_INITIALIZER(struct cmd_showport_result, port, "port"); +cmdline_parse_token_string_t cmd_showport_what = + TOKEN_STRING_INITIALIZER(struct cmd_showport_result, what, + "info#summary#stats#xstats#fdir#stat_qmap#dcb_tc#cap"); +cmdline_parse_token_num_t cmd_showport_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_showport_result, portnum, UINT16); + +cmdline_parse_inst_t cmd_showport = { + .f = cmd_showport_parsed, + .data = NULL, + .help_str = "show|clear port " + "info|summary|stats|xstats|fdir|stat_qmap|dcb_tc|cap " + "<port_id>", + .tokens = { + (void *)&cmd_showport_show, + (void *)&cmd_showport_port, + (void *)&cmd_showport_what, + (void *)&cmd_showport_portnum, + NULL, + }, +}; + +/* *** SHOW DEVICE INFO *** */ +struct cmd_showdevice_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t device; + cmdline_fixed_string_t what; + cmdline_fixed_string_t identifier; +}; + +static void cmd_showdevice_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_showdevice_result *res = parsed_result; + if (!strcmp(res->what, "info")) { + if (!strcmp(res->identifier, "all")) + device_infos_display(NULL); + else + device_infos_display(res->identifier); + } +} + +cmdline_parse_token_string_t cmd_showdevice_show = + TOKEN_STRING_INITIALIZER(struct cmd_showdevice_result, show, + "show"); +cmdline_parse_token_string_t cmd_showdevice_device = + TOKEN_STRING_INITIALIZER(struct cmd_showdevice_result, device, "device"); +cmdline_parse_token_string_t cmd_showdevice_what = + TOKEN_STRING_INITIALIZER(struct cmd_showdevice_result, what, + "info"); +cmdline_parse_token_string_t cmd_showdevice_identifier = + TOKEN_STRING_INITIALIZER(struct cmd_showdevice_result, + identifier, NULL); + +cmdline_parse_inst_t cmd_showdevice = { + .f = cmd_showdevice_parsed, + .data = NULL, + .help_str = "show device info <identifier>|all", + .tokens = { + (void *)&cmd_showdevice_show, + (void *)&cmd_showdevice_device, + (void *)&cmd_showdevice_what, + (void *)&cmd_showdevice_identifier, + NULL, + }, +}; +/* *** SHOW QUEUE INFO *** */ +struct cmd_showqueue_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t type; + cmdline_fixed_string_t what; + uint16_t portnum; + uint16_t queuenum; +}; + +static void +cmd_showqueue_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_showqueue_result *res = parsed_result; + + if (!strcmp(res->type, "rxq")) + rx_queue_infos_display(res->portnum, res->queuenum); + else if (!strcmp(res->type, "txq")) + tx_queue_infos_display(res->portnum, res->queuenum); +} + +cmdline_parse_token_string_t cmd_showqueue_show = + TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, show, "show"); +cmdline_parse_token_string_t cmd_showqueue_type = + TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, type, "rxq#txq"); +cmdline_parse_token_string_t cmd_showqueue_what = + TOKEN_STRING_INITIALIZER(struct cmd_showqueue_result, what, "info"); +cmdline_parse_token_num_t cmd_showqueue_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, portnum, UINT16); +cmdline_parse_token_num_t cmd_showqueue_queuenum = + TOKEN_NUM_INITIALIZER(struct cmd_showqueue_result, queuenum, UINT16); + +cmdline_parse_inst_t cmd_showqueue = { + .f = cmd_showqueue_parsed, + .data = NULL, + .help_str = "show rxq|txq info <port_id> <queue_id>", + .tokens = { + (void *)&cmd_showqueue_show, + (void *)&cmd_showqueue_type, + (void *)&cmd_showqueue_what, + (void *)&cmd_showqueue_portnum, + (void *)&cmd_showqueue_queuenum, + NULL, + }, +}; + +/* show/clear fwd engine statistics */ +struct fwd_result { + cmdline_fixed_string_t action; + cmdline_fixed_string_t fwd; + cmdline_fixed_string_t stats; + cmdline_fixed_string_t all; +}; + +cmdline_parse_token_string_t cmd_fwd_action = + TOKEN_STRING_INITIALIZER(struct fwd_result, action, "show#clear"); +cmdline_parse_token_string_t cmd_fwd_fwd = + TOKEN_STRING_INITIALIZER(struct fwd_result, fwd, "fwd"); +cmdline_parse_token_string_t cmd_fwd_stats = + TOKEN_STRING_INITIALIZER(struct fwd_result, stats, "stats"); +cmdline_parse_token_string_t cmd_fwd_all = + TOKEN_STRING_INITIALIZER(struct fwd_result, all, "all"); + +static void +cmd_showfwdall_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct fwd_result *res = parsed_result; + + if (!strcmp(res->action, "show")) + fwd_stats_display(); + else + fwd_stats_reset(); +} + +static cmdline_parse_inst_t cmd_showfwdall = { + .f = cmd_showfwdall_parsed, + .data = NULL, + .help_str = "show|clear fwd stats all", + .tokens = { + (void *)&cmd_fwd_action, + (void *)&cmd_fwd_fwd, + (void *)&cmd_fwd_stats, + (void *)&cmd_fwd_all, + NULL, + }, +}; + +/* *** READ PORT REGISTER *** */ +struct cmd_read_reg_result { + cmdline_fixed_string_t read; + cmdline_fixed_string_t reg; + portid_t port_id; + uint32_t reg_off; +}; + +static void +cmd_read_reg_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_read_reg_result *res = parsed_result; + port_reg_display(res->port_id, res->reg_off); +} + +cmdline_parse_token_string_t cmd_read_reg_read = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_result, read, "read"); +cmdline_parse_token_string_t cmd_read_reg_reg = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_result, reg, "reg"); +cmdline_parse_token_num_t cmd_read_reg_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_read_reg_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_result, reg_off, UINT32); + +cmdline_parse_inst_t cmd_read_reg = { + .f = cmd_read_reg_parsed, + .data = NULL, + .help_str = "read reg <port_id> <reg_off>", + .tokens = { + (void *)&cmd_read_reg_read, + (void *)&cmd_read_reg_reg, + (void *)&cmd_read_reg_port_id, + (void *)&cmd_read_reg_reg_off, + NULL, + }, +}; + +/* *** READ PORT REGISTER BIT FIELD *** */ +struct cmd_read_reg_bit_field_result { + cmdline_fixed_string_t read; + cmdline_fixed_string_t regfield; + portid_t port_id; + uint32_t reg_off; + uint8_t bit1_pos; + uint8_t bit2_pos; +}; + +static void +cmd_read_reg_bit_field_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_read_reg_bit_field_result *res = parsed_result; + port_reg_bit_field_display(res->port_id, res->reg_off, + res->bit1_pos, res->bit2_pos); +} + +cmdline_parse_token_string_t cmd_read_reg_bit_field_read = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_field_result, read, + "read"); +cmdline_parse_token_string_t cmd_read_reg_bit_field_regfield = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_field_result, + regfield, "regfield"); +cmdline_parse_token_num_t cmd_read_reg_bit_field_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, port_id, + UINT16); +cmdline_parse_token_num_t cmd_read_reg_bit_field_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, reg_off, + UINT32); +cmdline_parse_token_num_t cmd_read_reg_bit_field_bit1_pos = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, bit1_pos, + UINT8); +cmdline_parse_token_num_t cmd_read_reg_bit_field_bit2_pos = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, bit2_pos, + UINT8); + +cmdline_parse_inst_t cmd_read_reg_bit_field = { + .f = cmd_read_reg_bit_field_parsed, + .data = NULL, + .help_str = "read regfield <port_id> <reg_off> <bit_x> <bit_y>: " + "Read register bit field between bit_x and bit_y included", + .tokens = { + (void *)&cmd_read_reg_bit_field_read, + (void *)&cmd_read_reg_bit_field_regfield, + (void *)&cmd_read_reg_bit_field_port_id, + (void *)&cmd_read_reg_bit_field_reg_off, + (void *)&cmd_read_reg_bit_field_bit1_pos, + (void *)&cmd_read_reg_bit_field_bit2_pos, + NULL, + }, +}; + +/* *** READ PORT REGISTER BIT *** */ +struct cmd_read_reg_bit_result { + cmdline_fixed_string_t read; + cmdline_fixed_string_t regbit; + portid_t port_id; + uint32_t reg_off; + uint8_t bit_pos; +}; + +static void +cmd_read_reg_bit_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_read_reg_bit_result *res = parsed_result; + port_reg_bit_display(res->port_id, res->reg_off, res->bit_pos); +} + +cmdline_parse_token_string_t cmd_read_reg_bit_read = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_result, read, "read"); +cmdline_parse_token_string_t cmd_read_reg_bit_regbit = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_result, + regbit, "regbit"); +cmdline_parse_token_num_t cmd_read_reg_bit_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_read_reg_bit_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, reg_off, UINT32); +cmdline_parse_token_num_t cmd_read_reg_bit_bit_pos = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, bit_pos, UINT8); + +cmdline_parse_inst_t cmd_read_reg_bit = { + .f = cmd_read_reg_bit_parsed, + .data = NULL, + .help_str = "read regbit <port_id> <reg_off> <bit_x>: 0 <= bit_x <= 31", + .tokens = { + (void *)&cmd_read_reg_bit_read, + (void *)&cmd_read_reg_bit_regbit, + (void *)&cmd_read_reg_bit_port_id, + (void *)&cmd_read_reg_bit_reg_off, + (void *)&cmd_read_reg_bit_bit_pos, + NULL, + }, +}; + +/* *** WRITE PORT REGISTER *** */ +struct cmd_write_reg_result { + cmdline_fixed_string_t write; + cmdline_fixed_string_t reg; + portid_t port_id; + uint32_t reg_off; + uint32_t value; +}; + +static void +cmd_write_reg_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_write_reg_result *res = parsed_result; + port_reg_set(res->port_id, res->reg_off, res->value); +} + +cmdline_parse_token_string_t cmd_write_reg_write = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_result, write, "write"); +cmdline_parse_token_string_t cmd_write_reg_reg = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_result, reg, "reg"); +cmdline_parse_token_num_t cmd_write_reg_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_write_reg_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, reg_off, UINT32); +cmdline_parse_token_num_t cmd_write_reg_value = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, value, UINT32); + +cmdline_parse_inst_t cmd_write_reg = { + .f = cmd_write_reg_parsed, + .data = NULL, + .help_str = "write reg <port_id> <reg_off> <reg_value>", + .tokens = { + (void *)&cmd_write_reg_write, + (void *)&cmd_write_reg_reg, + (void *)&cmd_write_reg_port_id, + (void *)&cmd_write_reg_reg_off, + (void *)&cmd_write_reg_value, + NULL, + }, +}; + +/* *** WRITE PORT REGISTER BIT FIELD *** */ +struct cmd_write_reg_bit_field_result { + cmdline_fixed_string_t write; + cmdline_fixed_string_t regfield; + portid_t port_id; + uint32_t reg_off; + uint8_t bit1_pos; + uint8_t bit2_pos; + uint32_t value; +}; + +static void +cmd_write_reg_bit_field_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_write_reg_bit_field_result *res = parsed_result; + port_reg_bit_field_set(res->port_id, res->reg_off, + res->bit1_pos, res->bit2_pos, res->value); +} + +cmdline_parse_token_string_t cmd_write_reg_bit_field_write = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_field_result, write, + "write"); +cmdline_parse_token_string_t cmd_write_reg_bit_field_regfield = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_field_result, + regfield, "regfield"); +cmdline_parse_token_num_t cmd_write_reg_bit_field_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, port_id, + UINT16); +cmdline_parse_token_num_t cmd_write_reg_bit_field_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, reg_off, + UINT32); +cmdline_parse_token_num_t cmd_write_reg_bit_field_bit1_pos = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, bit1_pos, + UINT8); +cmdline_parse_token_num_t cmd_write_reg_bit_field_bit2_pos = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, bit2_pos, + UINT8); +cmdline_parse_token_num_t cmd_write_reg_bit_field_value = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, value, + UINT32); + +cmdline_parse_inst_t cmd_write_reg_bit_field = { + .f = cmd_write_reg_bit_field_parsed, + .data = NULL, + .help_str = "write regfield <port_id> <reg_off> <bit_x> <bit_y> " + "<reg_value>: " + "Set register bit field between bit_x and bit_y included", + .tokens = { + (void *)&cmd_write_reg_bit_field_write, + (void *)&cmd_write_reg_bit_field_regfield, + (void *)&cmd_write_reg_bit_field_port_id, + (void *)&cmd_write_reg_bit_field_reg_off, + (void *)&cmd_write_reg_bit_field_bit1_pos, + (void *)&cmd_write_reg_bit_field_bit2_pos, + (void *)&cmd_write_reg_bit_field_value, + NULL, + }, +}; + +/* *** WRITE PORT REGISTER BIT *** */ +struct cmd_write_reg_bit_result { + cmdline_fixed_string_t write; + cmdline_fixed_string_t regbit; + portid_t port_id; + uint32_t reg_off; + uint8_t bit_pos; + uint8_t value; +}; + +static void +cmd_write_reg_bit_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_write_reg_bit_result *res = parsed_result; + port_reg_bit_set(res->port_id, res->reg_off, res->bit_pos, res->value); +} + +cmdline_parse_token_string_t cmd_write_reg_bit_write = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_result, write, + "write"); +cmdline_parse_token_string_t cmd_write_reg_bit_regbit = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_result, + regbit, "regbit"); +cmdline_parse_token_num_t cmd_write_reg_bit_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_write_reg_bit_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, reg_off, UINT32); +cmdline_parse_token_num_t cmd_write_reg_bit_bit_pos = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, bit_pos, UINT8); +cmdline_parse_token_num_t cmd_write_reg_bit_value = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, value, UINT8); + +cmdline_parse_inst_t cmd_write_reg_bit = { + .f = cmd_write_reg_bit_parsed, + .data = NULL, + .help_str = "write regbit <port_id> <reg_off> <bit_x> 0|1: " + "0 <= bit_x <= 31", + .tokens = { + (void *)&cmd_write_reg_bit_write, + (void *)&cmd_write_reg_bit_regbit, + (void *)&cmd_write_reg_bit_port_id, + (void *)&cmd_write_reg_bit_reg_off, + (void *)&cmd_write_reg_bit_bit_pos, + (void *)&cmd_write_reg_bit_value, + NULL, + }, +}; + +/* *** READ A RING DESCRIPTOR OF A PORT RX/TX QUEUE *** */ +struct cmd_read_rxd_txd_result { + cmdline_fixed_string_t read; + cmdline_fixed_string_t rxd_txd; + portid_t port_id; + uint16_t queue_id; + uint16_t desc_id; +}; + +static void +cmd_read_rxd_txd_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_read_rxd_txd_result *res = parsed_result; + + if (!strcmp(res->rxd_txd, "rxd")) + rx_ring_desc_display(res->port_id, res->queue_id, res->desc_id); + else if (!strcmp(res->rxd_txd, "txd")) + tx_ring_desc_display(res->port_id, res->queue_id, res->desc_id); +} + +cmdline_parse_token_string_t cmd_read_rxd_txd_read = + TOKEN_STRING_INITIALIZER(struct cmd_read_rxd_txd_result, read, "read"); +cmdline_parse_token_string_t cmd_read_rxd_txd_rxd_txd = + TOKEN_STRING_INITIALIZER(struct cmd_read_rxd_txd_result, rxd_txd, + "rxd#txd"); +cmdline_parse_token_num_t cmd_read_rxd_txd_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_read_rxd_txd_queue_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, queue_id, UINT16); +cmdline_parse_token_num_t cmd_read_rxd_txd_desc_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, desc_id, UINT16); + +cmdline_parse_inst_t cmd_read_rxd_txd = { + .f = cmd_read_rxd_txd_parsed, + .data = NULL, + .help_str = "read rxd|txd <port_id> <queue_id> <desc_id>", + .tokens = { + (void *)&cmd_read_rxd_txd_read, + (void *)&cmd_read_rxd_txd_rxd_txd, + (void *)&cmd_read_rxd_txd_port_id, + (void *)&cmd_read_rxd_txd_queue_id, + (void *)&cmd_read_rxd_txd_desc_id, + NULL, + }, +}; + +/* *** QUIT *** */ +struct cmd_quit_result { + cmdline_fixed_string_t quit; +}; + +static void cmd_quit_parsed(__rte_unused void *parsed_result, + struct cmdline *cl, + __rte_unused void *data) +{ + cmdline_quit(cl); +} + +cmdline_parse_token_string_t cmd_quit_quit = + TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit"); + +cmdline_parse_inst_t cmd_quit = { + .f = cmd_quit_parsed, + .data = NULL, + .help_str = "quit: Exit application", + .tokens = { + (void *)&cmd_quit_quit, + NULL, + }, +}; + +/* *** ADD/REMOVE MAC ADDRESS FROM A PORT *** */ +struct cmd_mac_addr_result { + cmdline_fixed_string_t mac_addr_cmd; + cmdline_fixed_string_t what; + uint16_t port_num; + struct rte_ether_addr address; +}; + +static void cmd_mac_addr_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_mac_addr_result *res = parsed_result; + int ret; + + if (strcmp(res->what, "add") == 0) + ret = rte_eth_dev_mac_addr_add(res->port_num, &res->address, 0); + else if (strcmp(res->what, "set") == 0) + ret = rte_eth_dev_default_mac_addr_set(res->port_num, + &res->address); + else + ret = rte_eth_dev_mac_addr_remove(res->port_num, &res->address); + + /* check the return value and print it if is < 0 */ + if(ret < 0) + printf("mac_addr_cmd error: (%s)\n", strerror(-ret)); + +} + +cmdline_parse_token_string_t cmd_mac_addr_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_mac_addr_result, mac_addr_cmd, + "mac_addr"); +cmdline_parse_token_string_t cmd_mac_addr_what = + TOKEN_STRING_INITIALIZER(struct cmd_mac_addr_result, what, + "add#remove#set"); +cmdline_parse_token_num_t cmd_mac_addr_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_mac_addr_result, port_num, + UINT16); +cmdline_parse_token_etheraddr_t cmd_mac_addr_addr = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_mac_addr_result, address); + +cmdline_parse_inst_t cmd_mac_addr = { + .f = cmd_mac_addr_parsed, + .data = (void *)0, + .help_str = "mac_addr add|remove|set <port_id> <mac_addr>: " + "Add/Remove/Set MAC address on port_id", + .tokens = { + (void *)&cmd_mac_addr_cmd, + (void *)&cmd_mac_addr_what, + (void *)&cmd_mac_addr_portnum, + (void *)&cmd_mac_addr_addr, + NULL, + }, +}; + +/* *** SET THE PEER ADDRESS FOR CERTAIN PORT *** */ +struct cmd_eth_peer_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t eth_peer; + portid_t port_id; + cmdline_fixed_string_t peer_addr; +}; + +static void cmd_set_eth_peer_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_eth_peer_result *res = parsed_result; + + if (test_done == 0) { + printf("Please stop forwarding first\n"); + return; + } + if (!strcmp(res->eth_peer, "eth-peer")) { + set_fwd_eth_peer(res->port_id, res->peer_addr); + fwd_config_setup(); + } +} +cmdline_parse_token_string_t cmd_eth_peer_set = + TOKEN_STRING_INITIALIZER(struct cmd_eth_peer_result, set, "set"); +cmdline_parse_token_string_t cmd_eth_peer = + TOKEN_STRING_INITIALIZER(struct cmd_eth_peer_result, eth_peer, "eth-peer"); +cmdline_parse_token_num_t cmd_eth_peer_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_eth_peer_result, port_id, UINT16); +cmdline_parse_token_string_t cmd_eth_peer_addr = + TOKEN_STRING_INITIALIZER(struct cmd_eth_peer_result, peer_addr, NULL); + +cmdline_parse_inst_t cmd_set_fwd_eth_peer = { + .f = cmd_set_eth_peer_parsed, + .data = NULL, + .help_str = "set eth-peer <port_id> <peer_mac>", + .tokens = { + (void *)&cmd_eth_peer_set, + (void *)&cmd_eth_peer, + (void *)&cmd_eth_peer_port_id, + (void *)&cmd_eth_peer_addr, + NULL, + }, +}; + +/* *** CONFIGURE QUEUE STATS COUNTER MAPPINGS *** */ +struct cmd_set_qmap_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t qmap; + cmdline_fixed_string_t what; + portid_t port_id; + uint16_t queue_id; + uint8_t map_value; +}; + +static void +cmd_set_qmap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_qmap_result *res = parsed_result; + int is_rx = (strcmp(res->what, "tx") == 0) ? 0 : 1; + + set_qmap(res->port_id, (uint8_t)is_rx, res->queue_id, res->map_value); +} + +cmdline_parse_token_string_t cmd_setqmap_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_qmap_result, + set, "set"); +cmdline_parse_token_string_t cmd_setqmap_qmap = + TOKEN_STRING_INITIALIZER(struct cmd_set_qmap_result, + qmap, "stat_qmap"); +cmdline_parse_token_string_t cmd_setqmap_what = + TOKEN_STRING_INITIALIZER(struct cmd_set_qmap_result, + what, "tx#rx"); +cmdline_parse_token_num_t cmd_setqmap_portid = + TOKEN_NUM_INITIALIZER(struct cmd_set_qmap_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_setqmap_queueid = + TOKEN_NUM_INITIALIZER(struct cmd_set_qmap_result, + queue_id, UINT16); +cmdline_parse_token_num_t cmd_setqmap_mapvalue = + TOKEN_NUM_INITIALIZER(struct cmd_set_qmap_result, + map_value, UINT8); + +cmdline_parse_inst_t cmd_set_qmap = { + .f = cmd_set_qmap_parsed, + .data = NULL, + .help_str = "set stat_qmap rx|tx <port_id> <queue_id> <map_value>: " + "Set statistics mapping value on tx|rx queue_id of port_id", + .tokens = { + (void *)&cmd_setqmap_set, + (void *)&cmd_setqmap_qmap, + (void *)&cmd_setqmap_what, + (void *)&cmd_setqmap_portid, + (void *)&cmd_setqmap_queueid, + (void *)&cmd_setqmap_mapvalue, + NULL, + }, +}; + +/* *** SET OPTION TO HIDE ZERO VALUES FOR XSTATS DISPLAY *** */ +struct cmd_set_xstats_hide_zero_result { + cmdline_fixed_string_t keyword; + cmdline_fixed_string_t name; + cmdline_fixed_string_t on_off; +}; + +static void +cmd_set_xstats_hide_zero_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_xstats_hide_zero_result *res; + uint16_t on_off = 0; + + res = parsed_result; + on_off = !strcmp(res->on_off, "on") ? 1 : 0; + set_xstats_hide_zero(on_off); +} + +cmdline_parse_token_string_t cmd_set_xstats_hide_zero_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_set_xstats_hide_zero_result, + keyword, "set"); +cmdline_parse_token_string_t cmd_set_xstats_hide_zero_name = + TOKEN_STRING_INITIALIZER(struct cmd_set_xstats_hide_zero_result, + name, "xstats-hide-zero"); +cmdline_parse_token_string_t cmd_set_xstats_hide_zero_on_off = + TOKEN_STRING_INITIALIZER(struct cmd_set_xstats_hide_zero_result, + on_off, "on#off"); + +cmdline_parse_inst_t cmd_set_xstats_hide_zero = { + .f = cmd_set_xstats_hide_zero_parsed, + .data = NULL, + .help_str = "set xstats-hide-zero on|off", + .tokens = { + (void *)&cmd_set_xstats_hide_zero_keyword, + (void *)&cmd_set_xstats_hide_zero_name, + (void *)&cmd_set_xstats_hide_zero_on_off, + NULL, + }, +}; + +/* *** CONFIGURE UNICAST HASH TABLE *** */ +struct cmd_set_uc_hash_table { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t what; + struct rte_ether_addr address; + cmdline_fixed_string_t mode; +}; + +static void +cmd_set_uc_hash_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int ret=0; + struct cmd_set_uc_hash_table *res = parsed_result; + + int is_on = (strcmp(res->mode, "on") == 0) ? 1 : 0; + + if (strcmp(res->what, "uta") == 0) + ret = rte_eth_dev_uc_hash_table_set(res->port_id, + &res->address,(uint8_t)is_on); + if (ret < 0) + printf("bad unicast hash table parameter, return code = %d \n", ret); + +} + +cmdline_parse_token_string_t cmd_set_uc_hash_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_uc_hash_table, + set, "set"); +cmdline_parse_token_string_t cmd_set_uc_hash_port = + TOKEN_STRING_INITIALIZER(struct cmd_set_uc_hash_table, + port, "port"); +cmdline_parse_token_num_t cmd_set_uc_hash_portid = + TOKEN_NUM_INITIALIZER(struct cmd_set_uc_hash_table, + port_id, UINT16); +cmdline_parse_token_string_t cmd_set_uc_hash_what = + TOKEN_STRING_INITIALIZER(struct cmd_set_uc_hash_table, + what, "uta"); +cmdline_parse_token_etheraddr_t cmd_set_uc_hash_mac = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_uc_hash_table, + address); +cmdline_parse_token_string_t cmd_set_uc_hash_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_uc_hash_table, + mode, "on#off"); + +cmdline_parse_inst_t cmd_set_uc_hash_filter = { + .f = cmd_set_uc_hash_parsed, + .data = NULL, + .help_str = "set port <port_id> uta <mac_addr> on|off)", + .tokens = { + (void *)&cmd_set_uc_hash_set, + (void *)&cmd_set_uc_hash_port, + (void *)&cmd_set_uc_hash_portid, + (void *)&cmd_set_uc_hash_what, + (void *)&cmd_set_uc_hash_mac, + (void *)&cmd_set_uc_hash_mode, + NULL, + }, +}; + +struct cmd_set_uc_all_hash_table { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t what; + cmdline_fixed_string_t value; + cmdline_fixed_string_t mode; +}; + +static void +cmd_set_uc_all_hash_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int ret=0; + struct cmd_set_uc_all_hash_table *res = parsed_result; + + int is_on = (strcmp(res->mode, "on") == 0) ? 1 : 0; + + if ((strcmp(res->what, "uta") == 0) && + (strcmp(res->value, "all") == 0)) + ret = rte_eth_dev_uc_all_hash_table_set(res->port_id,(uint8_t) is_on); + if (ret < 0) + printf("bad unicast hash table parameter," + "return code = %d \n", ret); +} + +cmdline_parse_token_string_t cmd_set_uc_all_hash_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_uc_all_hash_table, + set, "set"); +cmdline_parse_token_string_t cmd_set_uc_all_hash_port = + TOKEN_STRING_INITIALIZER(struct cmd_set_uc_all_hash_table, + port, "port"); +cmdline_parse_token_num_t cmd_set_uc_all_hash_portid = + TOKEN_NUM_INITIALIZER(struct cmd_set_uc_all_hash_table, + port_id, UINT16); +cmdline_parse_token_string_t cmd_set_uc_all_hash_what = + TOKEN_STRING_INITIALIZER(struct cmd_set_uc_all_hash_table, + what, "uta"); +cmdline_parse_token_string_t cmd_set_uc_all_hash_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_uc_all_hash_table, + value,"all"); +cmdline_parse_token_string_t cmd_set_uc_all_hash_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_uc_all_hash_table, + mode, "on#off"); + +cmdline_parse_inst_t cmd_set_uc_all_hash_filter = { + .f = cmd_set_uc_all_hash_parsed, + .data = NULL, + .help_str = "set port <port_id> uta all on|off", + .tokens = { + (void *)&cmd_set_uc_all_hash_set, + (void *)&cmd_set_uc_all_hash_port, + (void *)&cmd_set_uc_all_hash_portid, + (void *)&cmd_set_uc_all_hash_what, + (void *)&cmd_set_uc_all_hash_value, + (void *)&cmd_set_uc_all_hash_mode, + NULL, + }, +}; + +/* *** CONFIGURE MACVLAN FILTER FOR VF(s) *** */ +struct cmd_set_vf_macvlan_filter { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t vf; + uint8_t vf_id; + struct rte_ether_addr address; + cmdline_fixed_string_t filter_type; + cmdline_fixed_string_t mode; +}; + +static void +cmd_set_vf_macvlan_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int is_on, ret = 0; + struct cmd_set_vf_macvlan_filter *res = parsed_result; + struct rte_eth_mac_filter filter; + + memset(&filter, 0, sizeof(struct rte_eth_mac_filter)); + + rte_memcpy(&filter.mac_addr, &res->address, RTE_ETHER_ADDR_LEN); + + /* set VF MAC filter */ + filter.is_vf = 1; + + /* set VF ID */ + filter.dst_id = res->vf_id; + + if (!strcmp(res->filter_type, "exact-mac")) + filter.filter_type = RTE_MAC_PERFECT_MATCH; + else if (!strcmp(res->filter_type, "exact-mac-vlan")) + filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + else if (!strcmp(res->filter_type, "hashmac")) + filter.filter_type = RTE_MAC_HASH_MATCH; + else if (!strcmp(res->filter_type, "hashmac-vlan")) + filter.filter_type = RTE_MACVLAN_HASH_MATCH; + + is_on = (strcmp(res->mode, "on") == 0) ? 1 : 0; + + if (is_on) + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_MACVLAN, + RTE_ETH_FILTER_ADD, + &filter); + else + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_MACVLAN, + RTE_ETH_FILTER_DELETE, + &filter); + + if (ret < 0) + printf("bad set MAC hash parameter, return code = %d\n", ret); + +} + +cmdline_parse_token_string_t cmd_set_vf_macvlan_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_macvlan_filter, + set, "set"); +cmdline_parse_token_string_t cmd_set_vf_macvlan_port = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_macvlan_filter, + port, "port"); +cmdline_parse_token_num_t cmd_set_vf_macvlan_portid = + TOKEN_NUM_INITIALIZER(struct cmd_set_vf_macvlan_filter, + port_id, UINT16); +cmdline_parse_token_string_t cmd_set_vf_macvlan_vf = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_macvlan_filter, + vf, "vf"); +cmdline_parse_token_num_t cmd_set_vf_macvlan_vf_id = + TOKEN_NUM_INITIALIZER(struct cmd_set_vf_macvlan_filter, + vf_id, UINT8); +cmdline_parse_token_etheraddr_t cmd_set_vf_macvlan_mac = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vf_macvlan_filter, + address); +cmdline_parse_token_string_t cmd_set_vf_macvlan_filter_type = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_macvlan_filter, + filter_type, "exact-mac#exact-mac-vlan" + "#hashmac#hashmac-vlan"); +cmdline_parse_token_string_t cmd_set_vf_macvlan_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_macvlan_filter, + mode, "on#off"); + +cmdline_parse_inst_t cmd_set_vf_macvlan_filter = { + .f = cmd_set_vf_macvlan_parsed, + .data = NULL, + .help_str = "set port <port_id> vf <vf_id> <mac_addr> " + "exact-mac|exact-mac-vlan|hashmac|hashmac-vlan on|off: " + "Exact match rule: exact match of MAC or MAC and VLAN; " + "hash match rule: hash match of MAC and exact match of VLAN", + .tokens = { + (void *)&cmd_set_vf_macvlan_set, + (void *)&cmd_set_vf_macvlan_port, + (void *)&cmd_set_vf_macvlan_portid, + (void *)&cmd_set_vf_macvlan_vf, + (void *)&cmd_set_vf_macvlan_vf_id, + (void *)&cmd_set_vf_macvlan_mac, + (void *)&cmd_set_vf_macvlan_filter_type, + (void *)&cmd_set_vf_macvlan_mode, + NULL, + }, +}; + +/* *** CONFIGURE VF TRAFFIC CONTROL *** */ +struct cmd_set_vf_traffic { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t vf; + uint8_t vf_id; + cmdline_fixed_string_t what; + cmdline_fixed_string_t mode; +}; + +static void +cmd_set_vf_traffic_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_vf_traffic *res = parsed_result; + int is_rx = (strcmp(res->what, "rx") == 0) ? 1 : 0; + int is_on = (strcmp(res->mode, "on") == 0) ? 1 : 0; + + set_vf_traffic(res->port_id, (uint8_t)is_rx, res->vf_id,(uint8_t) is_on); +} + +cmdline_parse_token_string_t cmd_setvf_traffic_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_traffic, + set, "set"); +cmdline_parse_token_string_t cmd_setvf_traffic_port = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_traffic, + port, "port"); +cmdline_parse_token_num_t cmd_setvf_traffic_portid = + TOKEN_NUM_INITIALIZER(struct cmd_set_vf_traffic, + port_id, UINT16); +cmdline_parse_token_string_t cmd_setvf_traffic_vf = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_traffic, + vf, "vf"); +cmdline_parse_token_num_t cmd_setvf_traffic_vfid = + TOKEN_NUM_INITIALIZER(struct cmd_set_vf_traffic, + vf_id, UINT8); +cmdline_parse_token_string_t cmd_setvf_traffic_what = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_traffic, + what, "tx#rx"); +cmdline_parse_token_string_t cmd_setvf_traffic_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_traffic, + mode, "on#off"); + +cmdline_parse_inst_t cmd_set_vf_traffic = { + .f = cmd_set_vf_traffic_parsed, + .data = NULL, + .help_str = "set port <port_id> vf <vf_id> rx|tx on|off", + .tokens = { + (void *)&cmd_setvf_traffic_set, + (void *)&cmd_setvf_traffic_port, + (void *)&cmd_setvf_traffic_portid, + (void *)&cmd_setvf_traffic_vf, + (void *)&cmd_setvf_traffic_vfid, + (void *)&cmd_setvf_traffic_what, + (void *)&cmd_setvf_traffic_mode, + NULL, + }, +}; + +/* *** CONFIGURE VF RECEIVE MODE *** */ +struct cmd_set_vf_rxmode { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t vf; + uint8_t vf_id; + cmdline_fixed_string_t what; + cmdline_fixed_string_t mode; + cmdline_fixed_string_t on; +}; + +static void +cmd_set_vf_rxmode_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int ret = -ENOTSUP; + uint16_t vf_rxmode = 0; + struct cmd_set_vf_rxmode *res = parsed_result; + + int is_on = (strcmp(res->on, "on") == 0) ? 1 : 0; + if (!strcmp(res->what,"rxmode")) { + if (!strcmp(res->mode, "AUPE")) + vf_rxmode |= ETH_VMDQ_ACCEPT_UNTAG; + else if (!strcmp(res->mode, "ROPE")) + vf_rxmode |= ETH_VMDQ_ACCEPT_HASH_UC; + else if (!strcmp(res->mode, "BAM")) + vf_rxmode |= ETH_VMDQ_ACCEPT_BROADCAST; + else if (!strncmp(res->mode, "MPE",3)) + vf_rxmode |= ETH_VMDQ_ACCEPT_MULTICAST; + } + + RTE_SET_USED(is_on); + +#ifdef RTE_LIBRTE_IXGBE_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_ixgbe_set_vf_rxmode(res->port_id, res->vf_id, + vf_rxmode, (uint8_t)is_on); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_set_vf_rxmode(res->port_id, res->vf_id, + vf_rxmode, (uint8_t)is_on); +#endif + if (ret < 0) + printf("bad VF receive mode parameter, return code = %d \n", + ret); +} + +cmdline_parse_token_string_t cmd_set_vf_rxmode_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode, + set, "set"); +cmdline_parse_token_string_t cmd_set_vf_rxmode_port = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode, + port, "port"); +cmdline_parse_token_num_t cmd_set_vf_rxmode_portid = + TOKEN_NUM_INITIALIZER(struct cmd_set_vf_rxmode, + port_id, UINT16); +cmdline_parse_token_string_t cmd_set_vf_rxmode_vf = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode, + vf, "vf"); +cmdline_parse_token_num_t cmd_set_vf_rxmode_vfid = + TOKEN_NUM_INITIALIZER(struct cmd_set_vf_rxmode, + vf_id, UINT8); +cmdline_parse_token_string_t cmd_set_vf_rxmode_what = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode, + what, "rxmode"); +cmdline_parse_token_string_t cmd_set_vf_rxmode_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode, + mode, "AUPE#ROPE#BAM#MPE"); +cmdline_parse_token_string_t cmd_set_vf_rxmode_on = + TOKEN_STRING_INITIALIZER(struct cmd_set_vf_rxmode, + on, "on#off"); + +cmdline_parse_inst_t cmd_set_vf_rxmode = { + .f = cmd_set_vf_rxmode_parsed, + .data = NULL, + .help_str = "set port <port_id> vf <vf_id> rxmode " + "AUPE|ROPE|BAM|MPE on|off", + .tokens = { + (void *)&cmd_set_vf_rxmode_set, + (void *)&cmd_set_vf_rxmode_port, + (void *)&cmd_set_vf_rxmode_portid, + (void *)&cmd_set_vf_rxmode_vf, + (void *)&cmd_set_vf_rxmode_vfid, + (void *)&cmd_set_vf_rxmode_what, + (void *)&cmd_set_vf_rxmode_mode, + (void *)&cmd_set_vf_rxmode_on, + NULL, + }, +}; + +/* *** ADD MAC ADDRESS FILTER FOR A VF OF A PORT *** */ +struct cmd_vf_mac_addr_result { + cmdline_fixed_string_t mac_addr_cmd; + cmdline_fixed_string_t what; + cmdline_fixed_string_t port; + uint16_t port_num; + cmdline_fixed_string_t vf; + uint8_t vf_num; + struct rte_ether_addr address; +}; + +static void cmd_vf_mac_addr_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_mac_addr_result *res = parsed_result; + int ret = -ENOTSUP; + + if (strcmp(res->what, "add") != 0) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_add_vf_mac_addr(res->port_num, res->vf_num, + &res->address); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_mac_addr_add(res->port_num, &res->address, + res->vf_num); +#endif + + if(ret < 0) + printf("vf_mac_addr_cmd error: (%s)\n", strerror(-ret)); + +} + +cmdline_parse_token_string_t cmd_vf_mac_addr_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_vf_mac_addr_result, + mac_addr_cmd,"mac_addr"); +cmdline_parse_token_string_t cmd_vf_mac_addr_what = + TOKEN_STRING_INITIALIZER(struct cmd_vf_mac_addr_result, + what,"add"); +cmdline_parse_token_string_t cmd_vf_mac_addr_port = + TOKEN_STRING_INITIALIZER(struct cmd_vf_mac_addr_result, + port,"port"); +cmdline_parse_token_num_t cmd_vf_mac_addr_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_vf_mac_addr_result, + port_num, UINT16); +cmdline_parse_token_string_t cmd_vf_mac_addr_vf = + TOKEN_STRING_INITIALIZER(struct cmd_vf_mac_addr_result, + vf,"vf"); +cmdline_parse_token_num_t cmd_vf_mac_addr_vfnum = + TOKEN_NUM_INITIALIZER(struct cmd_vf_mac_addr_result, + vf_num, UINT8); +cmdline_parse_token_etheraddr_t cmd_vf_mac_addr_addr = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_vf_mac_addr_result, + address); + +cmdline_parse_inst_t cmd_vf_mac_addr_filter = { + .f = cmd_vf_mac_addr_parsed, + .data = (void *)0, + .help_str = "mac_addr add port <port_id> vf <vf_id> <mac_addr>: " + "Add MAC address filtering for a VF on port_id", + .tokens = { + (void *)&cmd_vf_mac_addr_cmd, + (void *)&cmd_vf_mac_addr_what, + (void *)&cmd_vf_mac_addr_port, + (void *)&cmd_vf_mac_addr_portnum, + (void *)&cmd_vf_mac_addr_vf, + (void *)&cmd_vf_mac_addr_vfnum, + (void *)&cmd_vf_mac_addr_addr, + NULL, + }, +}; + +/* *** ADD/REMOVE A VLAN IDENTIFIER TO/FROM A PORT VLAN RX FILTER *** */ +struct cmd_vf_rx_vlan_filter { + cmdline_fixed_string_t rx_vlan; + cmdline_fixed_string_t what; + uint16_t vlan_id; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t vf; + uint64_t vf_mask; +}; + +static void +cmd_vf_rx_vlan_filter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_rx_vlan_filter *res = parsed_result; + int ret = -ENOTSUP; + + __rte_unused int is_add = (strcmp(res->what, "add") == 0) ? 1 : 0; + +#ifdef RTE_LIBRTE_IXGBE_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_ixgbe_set_vf_vlan_filter(res->port_id, + res->vlan_id, res->vf_mask, is_add); +#endif +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_set_vf_vlan_filter(res->port_id, + res->vlan_id, res->vf_mask, is_add); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_set_vf_vlan_filter(res->port_id, + res->vlan_id, res->vf_mask, is_add); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vlan_id %d or vf_mask %"PRIu64"\n", + res->vlan_id, res->vf_mask); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented or supported\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_token_string_t cmd_vf_rx_vlan_filter_rx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_vf_rx_vlan_filter, + rx_vlan, "rx_vlan"); +cmdline_parse_token_string_t cmd_vf_rx_vlan_filter_what = + TOKEN_STRING_INITIALIZER(struct cmd_vf_rx_vlan_filter, + what, "add#rm"); +cmdline_parse_token_num_t cmd_vf_rx_vlan_filter_vlanid = + TOKEN_NUM_INITIALIZER(struct cmd_vf_rx_vlan_filter, + vlan_id, UINT16); +cmdline_parse_token_string_t cmd_vf_rx_vlan_filter_port = + TOKEN_STRING_INITIALIZER(struct cmd_vf_rx_vlan_filter, + port, "port"); +cmdline_parse_token_num_t cmd_vf_rx_vlan_filter_portid = + TOKEN_NUM_INITIALIZER(struct cmd_vf_rx_vlan_filter, + port_id, UINT16); +cmdline_parse_token_string_t cmd_vf_rx_vlan_filter_vf = + TOKEN_STRING_INITIALIZER(struct cmd_vf_rx_vlan_filter, + vf, "vf"); +cmdline_parse_token_num_t cmd_vf_rx_vlan_filter_vf_mask = + TOKEN_NUM_INITIALIZER(struct cmd_vf_rx_vlan_filter, + vf_mask, UINT64); + +cmdline_parse_inst_t cmd_vf_rxvlan_filter = { + .f = cmd_vf_rx_vlan_filter_parsed, + .data = NULL, + .help_str = "rx_vlan add|rm <vlan_id> port <port_id> vf <vf_mask>: " + "(vf_mask = hexadecimal VF mask)", + .tokens = { + (void *)&cmd_vf_rx_vlan_filter_rx_vlan, + (void *)&cmd_vf_rx_vlan_filter_what, + (void *)&cmd_vf_rx_vlan_filter_vlanid, + (void *)&cmd_vf_rx_vlan_filter_port, + (void *)&cmd_vf_rx_vlan_filter_portid, + (void *)&cmd_vf_rx_vlan_filter_vf, + (void *)&cmd_vf_rx_vlan_filter_vf_mask, + NULL, + }, +}; + +/* *** SET RATE LIMIT FOR A QUEUE OF A PORT *** */ +struct cmd_queue_rate_limit_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + uint16_t port_num; + cmdline_fixed_string_t queue; + uint8_t queue_num; + cmdline_fixed_string_t rate; + uint16_t rate_num; +}; + +static void cmd_queue_rate_limit_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_queue_rate_limit_result *res = parsed_result; + int ret = 0; + + if ((strcmp(res->set, "set") == 0) && (strcmp(res->port, "port") == 0) + && (strcmp(res->queue, "queue") == 0) + && (strcmp(res->rate, "rate") == 0)) + ret = set_queue_rate_limit(res->port_num, res->queue_num, + res->rate_num); + if (ret < 0) + printf("queue_rate_limit_cmd error: (%s)\n", strerror(-ret)); + +} + +cmdline_parse_token_string_t cmd_queue_rate_limit_set = + TOKEN_STRING_INITIALIZER(struct cmd_queue_rate_limit_result, + set, "set"); +cmdline_parse_token_string_t cmd_queue_rate_limit_port = + TOKEN_STRING_INITIALIZER(struct cmd_queue_rate_limit_result, + port, "port"); +cmdline_parse_token_num_t cmd_queue_rate_limit_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_queue_rate_limit_result, + port_num, UINT16); +cmdline_parse_token_string_t cmd_queue_rate_limit_queue = + TOKEN_STRING_INITIALIZER(struct cmd_queue_rate_limit_result, + queue, "queue"); +cmdline_parse_token_num_t cmd_queue_rate_limit_queuenum = + TOKEN_NUM_INITIALIZER(struct cmd_queue_rate_limit_result, + queue_num, UINT8); +cmdline_parse_token_string_t cmd_queue_rate_limit_rate = + TOKEN_STRING_INITIALIZER(struct cmd_queue_rate_limit_result, + rate, "rate"); +cmdline_parse_token_num_t cmd_queue_rate_limit_ratenum = + TOKEN_NUM_INITIALIZER(struct cmd_queue_rate_limit_result, + rate_num, UINT16); + +cmdline_parse_inst_t cmd_queue_rate_limit = { + .f = cmd_queue_rate_limit_parsed, + .data = (void *)0, + .help_str = "set port <port_id> queue <queue_id> rate <rate_value>: " + "Set rate limit for a queue on port_id", + .tokens = { + (void *)&cmd_queue_rate_limit_set, + (void *)&cmd_queue_rate_limit_port, + (void *)&cmd_queue_rate_limit_portnum, + (void *)&cmd_queue_rate_limit_queue, + (void *)&cmd_queue_rate_limit_queuenum, + (void *)&cmd_queue_rate_limit_rate, + (void *)&cmd_queue_rate_limit_ratenum, + NULL, + }, +}; + +/* *** SET RATE LIMIT FOR A VF OF A PORT *** */ +struct cmd_vf_rate_limit_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + uint16_t port_num; + cmdline_fixed_string_t vf; + uint8_t vf_num; + cmdline_fixed_string_t rate; + uint16_t rate_num; + cmdline_fixed_string_t q_msk; + uint64_t q_msk_val; +}; + +static void cmd_vf_rate_limit_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_rate_limit_result *res = parsed_result; + int ret = 0; + + if ((strcmp(res->set, "set") == 0) && (strcmp(res->port, "port") == 0) + && (strcmp(res->vf, "vf") == 0) + && (strcmp(res->rate, "rate") == 0) + && (strcmp(res->q_msk, "queue_mask") == 0)) + ret = set_vf_rate_limit(res->port_num, res->vf_num, + res->rate_num, res->q_msk_val); + if (ret < 0) + printf("vf_rate_limit_cmd error: (%s)\n", strerror(-ret)); + +} + +cmdline_parse_token_string_t cmd_vf_rate_limit_set = + TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result, + set, "set"); +cmdline_parse_token_string_t cmd_vf_rate_limit_port = + TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result, + port, "port"); +cmdline_parse_token_num_t cmd_vf_rate_limit_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_vf_rate_limit_result, + port_num, UINT16); +cmdline_parse_token_string_t cmd_vf_rate_limit_vf = + TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result, + vf, "vf"); +cmdline_parse_token_num_t cmd_vf_rate_limit_vfnum = + TOKEN_NUM_INITIALIZER(struct cmd_vf_rate_limit_result, + vf_num, UINT8); +cmdline_parse_token_string_t cmd_vf_rate_limit_rate = + TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result, + rate, "rate"); +cmdline_parse_token_num_t cmd_vf_rate_limit_ratenum = + TOKEN_NUM_INITIALIZER(struct cmd_vf_rate_limit_result, + rate_num, UINT16); +cmdline_parse_token_string_t cmd_vf_rate_limit_q_msk = + TOKEN_STRING_INITIALIZER(struct cmd_vf_rate_limit_result, + q_msk, "queue_mask"); +cmdline_parse_token_num_t cmd_vf_rate_limit_q_msk_val = + TOKEN_NUM_INITIALIZER(struct cmd_vf_rate_limit_result, + q_msk_val, UINT64); + +cmdline_parse_inst_t cmd_vf_rate_limit = { + .f = cmd_vf_rate_limit_parsed, + .data = (void *)0, + .help_str = "set port <port_id> vf <vf_id> rate <rate_value> " + "queue_mask <queue_mask_value>: " + "Set rate limit for queues of VF on port_id", + .tokens = { + (void *)&cmd_vf_rate_limit_set, + (void *)&cmd_vf_rate_limit_port, + (void *)&cmd_vf_rate_limit_portnum, + (void *)&cmd_vf_rate_limit_vf, + (void *)&cmd_vf_rate_limit_vfnum, + (void *)&cmd_vf_rate_limit_rate, + (void *)&cmd_vf_rate_limit_ratenum, + (void *)&cmd_vf_rate_limit_q_msk, + (void *)&cmd_vf_rate_limit_q_msk_val, + NULL, + }, +}; + +/* *** ADD TUNNEL FILTER OF A PORT *** */ +struct cmd_tunnel_filter_result { + cmdline_fixed_string_t cmd; + cmdline_fixed_string_t what; + portid_t port_id; + struct rte_ether_addr outer_mac; + struct rte_ether_addr inner_mac; + cmdline_ipaddr_t ip_value; + uint16_t inner_vlan; + cmdline_fixed_string_t tunnel_type; + cmdline_fixed_string_t filter_type; + uint32_t tenant_id; + uint16_t queue_num; +}; + +static void +cmd_tunnel_filter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_tunnel_filter_result *res = parsed_result; + struct rte_eth_tunnel_filter_conf tunnel_filter_conf; + int ret = 0; + + memset(&tunnel_filter_conf, 0, sizeof(tunnel_filter_conf)); + + rte_ether_addr_copy(&res->outer_mac, &tunnel_filter_conf.outer_mac); + rte_ether_addr_copy(&res->inner_mac, &tunnel_filter_conf.inner_mac); + tunnel_filter_conf.inner_vlan = res->inner_vlan; + + if (res->ip_value.family == AF_INET) { + tunnel_filter_conf.ip_addr.ipv4_addr = + res->ip_value.addr.ipv4.s_addr; + tunnel_filter_conf.ip_type = RTE_TUNNEL_IPTYPE_IPV4; + } else { + memcpy(&(tunnel_filter_conf.ip_addr.ipv6_addr), + &(res->ip_value.addr.ipv6), + sizeof(struct in6_addr)); + tunnel_filter_conf.ip_type = RTE_TUNNEL_IPTYPE_IPV6; + } + + if (!strcmp(res->filter_type, "imac-ivlan")) + tunnel_filter_conf.filter_type = RTE_TUNNEL_FILTER_IMAC_IVLAN; + else if (!strcmp(res->filter_type, "imac-ivlan-tenid")) + tunnel_filter_conf.filter_type = + RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID; + else if (!strcmp(res->filter_type, "imac-tenid")) + tunnel_filter_conf.filter_type = RTE_TUNNEL_FILTER_IMAC_TENID; + else if (!strcmp(res->filter_type, "imac")) + tunnel_filter_conf.filter_type = ETH_TUNNEL_FILTER_IMAC; + else if (!strcmp(res->filter_type, "omac-imac-tenid")) + tunnel_filter_conf.filter_type = + RTE_TUNNEL_FILTER_OMAC_TENID_IMAC; + else if (!strcmp(res->filter_type, "oip")) + tunnel_filter_conf.filter_type = ETH_TUNNEL_FILTER_OIP; + else if (!strcmp(res->filter_type, "iip")) + tunnel_filter_conf.filter_type = ETH_TUNNEL_FILTER_IIP; + else { + printf("The filter type is not supported"); + return; + } + + if (!strcmp(res->tunnel_type, "vxlan")) + tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN; + else if (!strcmp(res->tunnel_type, "vxlan-gpe")) + tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_VXLAN_GPE; + else if (!strcmp(res->tunnel_type, "nvgre")) + tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_NVGRE; + else if (!strcmp(res->tunnel_type, "ipingre")) + tunnel_filter_conf.tunnel_type = RTE_TUNNEL_TYPE_IP_IN_GRE; + else { + printf("The tunnel type %s not supported.\n", res->tunnel_type); + return; + } + + tunnel_filter_conf.tenant_id = res->tenant_id; + tunnel_filter_conf.queue_id = res->queue_num; + if (!strcmp(res->what, "add")) + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_TUNNEL, + RTE_ETH_FILTER_ADD, + &tunnel_filter_conf); + else + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_TUNNEL, + RTE_ETH_FILTER_DELETE, + &tunnel_filter_conf); + if (ret < 0) + printf("cmd_tunnel_filter_parsed error: (%s)\n", + strerror(-ret)); + +} +cmdline_parse_token_string_t cmd_tunnel_filter_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_tunnel_filter_result, + cmd, "tunnel_filter"); +cmdline_parse_token_string_t cmd_tunnel_filter_what = + TOKEN_STRING_INITIALIZER(struct cmd_tunnel_filter_result, + what, "add#rm"); +cmdline_parse_token_num_t cmd_tunnel_filter_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_tunnel_filter_result, + port_id, UINT16); +cmdline_parse_token_etheraddr_t cmd_tunnel_filter_outer_mac = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_tunnel_filter_result, + outer_mac); +cmdline_parse_token_etheraddr_t cmd_tunnel_filter_inner_mac = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_tunnel_filter_result, + inner_mac); +cmdline_parse_token_num_t cmd_tunnel_filter_innner_vlan = + TOKEN_NUM_INITIALIZER(struct cmd_tunnel_filter_result, + inner_vlan, UINT16); +cmdline_parse_token_ipaddr_t cmd_tunnel_filter_ip_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_tunnel_filter_result, + ip_value); +cmdline_parse_token_string_t cmd_tunnel_filter_tunnel_type = + TOKEN_STRING_INITIALIZER(struct cmd_tunnel_filter_result, + tunnel_type, "vxlan#nvgre#ipingre#vxlan-gpe"); + +cmdline_parse_token_string_t cmd_tunnel_filter_filter_type = + TOKEN_STRING_INITIALIZER(struct cmd_tunnel_filter_result, + filter_type, "oip#iip#imac-ivlan#imac-ivlan-tenid#imac-tenid#" + "imac#omac-imac-tenid"); +cmdline_parse_token_num_t cmd_tunnel_filter_tenant_id = + TOKEN_NUM_INITIALIZER(struct cmd_tunnel_filter_result, + tenant_id, UINT32); +cmdline_parse_token_num_t cmd_tunnel_filter_queue_num = + TOKEN_NUM_INITIALIZER(struct cmd_tunnel_filter_result, + queue_num, UINT16); + +cmdline_parse_inst_t cmd_tunnel_filter = { + .f = cmd_tunnel_filter_parsed, + .data = (void *)0, + .help_str = "tunnel_filter add|rm <port_id> <outer_mac> <inner_mac> " + "<ip> <inner_vlan> vxlan|nvgre|ipingre oip|iip|imac-ivlan|" + "imac-ivlan-tenid|imac-tenid|imac|omac-imac-tenid <tenant_id> " + "<queue_id>: Add/Rm tunnel filter of a port", + .tokens = { + (void *)&cmd_tunnel_filter_cmd, + (void *)&cmd_tunnel_filter_what, + (void *)&cmd_tunnel_filter_port_id, + (void *)&cmd_tunnel_filter_outer_mac, + (void *)&cmd_tunnel_filter_inner_mac, + (void *)&cmd_tunnel_filter_ip_value, + (void *)&cmd_tunnel_filter_innner_vlan, + (void *)&cmd_tunnel_filter_tunnel_type, + (void *)&cmd_tunnel_filter_filter_type, + (void *)&cmd_tunnel_filter_tenant_id, + (void *)&cmd_tunnel_filter_queue_num, + NULL, + }, +}; + +/* *** CONFIGURE TUNNEL UDP PORT *** */ +struct cmd_tunnel_udp_config { + cmdline_fixed_string_t cmd; + cmdline_fixed_string_t what; + uint16_t udp_port; + portid_t port_id; +}; + +static void +cmd_tunnel_udp_config_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_tunnel_udp_config *res = parsed_result; + struct rte_eth_udp_tunnel tunnel_udp; + int ret; + + tunnel_udp.udp_port = res->udp_port; + + if (!strcmp(res->cmd, "rx_vxlan_port")) + tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN; + + if (!strcmp(res->what, "add")) + ret = rte_eth_dev_udp_tunnel_port_add(res->port_id, + &tunnel_udp); + else + ret = rte_eth_dev_udp_tunnel_port_delete(res->port_id, + &tunnel_udp); + + if (ret < 0) + printf("udp tunneling add error: (%s)\n", strerror(-ret)); +} + +cmdline_parse_token_string_t cmd_tunnel_udp_config_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_tunnel_udp_config, + cmd, "rx_vxlan_port"); +cmdline_parse_token_string_t cmd_tunnel_udp_config_what = + TOKEN_STRING_INITIALIZER(struct cmd_tunnel_udp_config, + what, "add#rm"); +cmdline_parse_token_num_t cmd_tunnel_udp_config_udp_port = + TOKEN_NUM_INITIALIZER(struct cmd_tunnel_udp_config, + udp_port, UINT16); +cmdline_parse_token_num_t cmd_tunnel_udp_config_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_tunnel_udp_config, + port_id, UINT16); + +cmdline_parse_inst_t cmd_tunnel_udp_config = { + .f = cmd_tunnel_udp_config_parsed, + .data = (void *)0, + .help_str = "rx_vxlan_port add|rm <udp_port> <port_id>: " + "Add/Remove a tunneling UDP port filter", + .tokens = { + (void *)&cmd_tunnel_udp_config_cmd, + (void *)&cmd_tunnel_udp_config_what, + (void *)&cmd_tunnel_udp_config_udp_port, + (void *)&cmd_tunnel_udp_config_port_id, + NULL, + }, +}; + +struct cmd_config_tunnel_udp_port { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t port_id; + cmdline_fixed_string_t udp_tunnel_port; + cmdline_fixed_string_t action; + cmdline_fixed_string_t tunnel_type; + uint16_t udp_port; +}; + +static void +cmd_cfg_tunnel_udp_port_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_tunnel_udp_port *res = parsed_result; + struct rte_eth_udp_tunnel tunnel_udp; + int ret = 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + tunnel_udp.udp_port = res->udp_port; + + if (!strcmp(res->tunnel_type, "vxlan")) { + tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN; + } else if (!strcmp(res->tunnel_type, "geneve")) { + tunnel_udp.prot_type = RTE_TUNNEL_TYPE_GENEVE; + } else if (!strcmp(res->tunnel_type, "vxlan-gpe")) { + tunnel_udp.prot_type = RTE_TUNNEL_TYPE_VXLAN_GPE; + } else { + printf("Invalid tunnel type\n"); + return; + } + + if (!strcmp(res->action, "add")) + ret = rte_eth_dev_udp_tunnel_port_add(res->port_id, + &tunnel_udp); + else + ret = rte_eth_dev_udp_tunnel_port_delete(res->port_id, + &tunnel_udp); + + if (ret < 0) + printf("udp tunneling port add error: (%s)\n", strerror(-ret)); +} + +cmdline_parse_token_string_t cmd_config_tunnel_udp_port_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, port, + "port"); +cmdline_parse_token_string_t cmd_config_tunnel_udp_port_config = + TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, config, + "config"); +cmdline_parse_token_num_t cmd_config_tunnel_udp_port_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_tunnel_udp_port, port_id, + UINT16); +cmdline_parse_token_string_t cmd_config_tunnel_udp_port_tunnel_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, + udp_tunnel_port, + "udp_tunnel_port"); +cmdline_parse_token_string_t cmd_config_tunnel_udp_port_action = + TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, action, + "add#rm"); +cmdline_parse_token_string_t cmd_config_tunnel_udp_port_tunnel_type = + TOKEN_STRING_INITIALIZER(struct cmd_config_tunnel_udp_port, tunnel_type, + "vxlan#geneve#vxlan-gpe"); +cmdline_parse_token_num_t cmd_config_tunnel_udp_port_value = + TOKEN_NUM_INITIALIZER(struct cmd_config_tunnel_udp_port, udp_port, + UINT16); + +cmdline_parse_inst_t cmd_cfg_tunnel_udp_port = { + .f = cmd_cfg_tunnel_udp_port_parsed, + .data = NULL, + .help_str = "port config <port_id> udp_tunnel_port add|rm vxlan|geneve|vxlan-gpe <udp_port>", + .tokens = { + (void *)&cmd_config_tunnel_udp_port_port, + (void *)&cmd_config_tunnel_udp_port_config, + (void *)&cmd_config_tunnel_udp_port_port_id, + (void *)&cmd_config_tunnel_udp_port_tunnel_port, + (void *)&cmd_config_tunnel_udp_port_action, + (void *)&cmd_config_tunnel_udp_port_tunnel_type, + (void *)&cmd_config_tunnel_udp_port_value, + NULL, + }, +}; + +/* *** GLOBAL CONFIG *** */ +struct cmd_global_config_result { + cmdline_fixed_string_t cmd; + portid_t port_id; + cmdline_fixed_string_t cfg_type; + uint8_t len; +}; + +static void +cmd_global_config_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_global_config_result *res = parsed_result; + struct rte_eth_global_cfg conf; + int ret; + + memset(&conf, 0, sizeof(conf)); + conf.cfg_type = RTE_ETH_GLOBAL_CFG_TYPE_GRE_KEY_LEN; + conf.cfg.gre_key_len = res->len; + ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_NONE, + RTE_ETH_FILTER_SET, &conf); + if (ret != 0) + printf("Global config error\n"); +} + +cmdline_parse_token_string_t cmd_global_config_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_global_config_result, cmd, + "global_config"); +cmdline_parse_token_num_t cmd_global_config_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_global_config_result, port_id, + UINT16); +cmdline_parse_token_string_t cmd_global_config_type = + TOKEN_STRING_INITIALIZER(struct cmd_global_config_result, + cfg_type, "gre-key-len"); +cmdline_parse_token_num_t cmd_global_config_gre_key_len = + TOKEN_NUM_INITIALIZER(struct cmd_global_config_result, + len, UINT8); + +cmdline_parse_inst_t cmd_global_config = { + .f = cmd_global_config_parsed, + .data = (void *)NULL, + .help_str = "global_config <port_id> gre-key-len <key_len>", + .tokens = { + (void *)&cmd_global_config_cmd, + (void *)&cmd_global_config_port_id, + (void *)&cmd_global_config_type, + (void *)&cmd_global_config_gre_key_len, + NULL, + }, +}; + +/* *** CONFIGURE VM MIRROR VLAN/POOL RULE *** */ +struct cmd_set_mirror_mask_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t mirror; + uint8_t rule_id; + cmdline_fixed_string_t what; + cmdline_fixed_string_t value; + cmdline_fixed_string_t dstpool; + uint8_t dstpool_id; + cmdline_fixed_string_t on; +}; + +cmdline_parse_token_string_t cmd_mirror_mask_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result, + set, "set"); +cmdline_parse_token_string_t cmd_mirror_mask_port = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result, + port, "port"); +cmdline_parse_token_num_t cmd_mirror_mask_portid = + TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_mask_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_mirror_mask_mirror = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result, + mirror, "mirror-rule"); +cmdline_parse_token_num_t cmd_mirror_mask_ruleid = + TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_mask_result, + rule_id, UINT8); +cmdline_parse_token_string_t cmd_mirror_mask_what = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result, + what, "pool-mirror-up#pool-mirror-down" + "#vlan-mirror"); +cmdline_parse_token_string_t cmd_mirror_mask_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result, + value, NULL); +cmdline_parse_token_string_t cmd_mirror_mask_dstpool = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result, + dstpool, "dst-pool"); +cmdline_parse_token_num_t cmd_mirror_mask_poolid = + TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_mask_result, + dstpool_id, UINT8); +cmdline_parse_token_string_t cmd_mirror_mask_on = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_mask_result, + on, "on#off"); + +static void +cmd_set_mirror_mask_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int ret,nb_item,i; + struct cmd_set_mirror_mask_result *res = parsed_result; + struct rte_eth_mirror_conf mr_conf; + + memset(&mr_conf, 0, sizeof(struct rte_eth_mirror_conf)); + + unsigned int vlan_list[ETH_MIRROR_MAX_VLANS]; + + mr_conf.dst_pool = res->dstpool_id; + + if (!strcmp(res->what, "pool-mirror-up")) { + mr_conf.pool_mask = strtoull(res->value, NULL, 16); + mr_conf.rule_type = ETH_MIRROR_VIRTUAL_POOL_UP; + } else if (!strcmp(res->what, "pool-mirror-down")) { + mr_conf.pool_mask = strtoull(res->value, NULL, 16); + mr_conf.rule_type = ETH_MIRROR_VIRTUAL_POOL_DOWN; + } else if (!strcmp(res->what, "vlan-mirror")) { + mr_conf.rule_type = ETH_MIRROR_VLAN; + nb_item = parse_item_list(res->value, "vlan", + ETH_MIRROR_MAX_VLANS, vlan_list, 1); + if (nb_item <= 0) + return; + + for (i = 0; i < nb_item; i++) { + if (vlan_list[i] > RTE_ETHER_MAX_VLAN_ID) { + printf("Invalid vlan_id: must be < 4096\n"); + return; + } + + mr_conf.vlan.vlan_id[i] = (uint16_t)vlan_list[i]; + mr_conf.vlan.vlan_mask |= 1ULL << i; + } + } + + if (!strcmp(res->on, "on")) + ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf, + res->rule_id, 1); + else + ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf, + res->rule_id, 0); + if (ret < 0) + printf("mirror rule add error: (%s)\n", strerror(-ret)); +} + +cmdline_parse_inst_t cmd_set_mirror_mask = { + .f = cmd_set_mirror_mask_parsed, + .data = NULL, + .help_str = "set port <port_id> mirror-rule <rule_id> " + "pool-mirror-up|pool-mirror-down|vlan-mirror " + "<pool_mask|vlan_id[,vlan_id]*> dst-pool <pool_id> on|off", + .tokens = { + (void *)&cmd_mirror_mask_set, + (void *)&cmd_mirror_mask_port, + (void *)&cmd_mirror_mask_portid, + (void *)&cmd_mirror_mask_mirror, + (void *)&cmd_mirror_mask_ruleid, + (void *)&cmd_mirror_mask_what, + (void *)&cmd_mirror_mask_value, + (void *)&cmd_mirror_mask_dstpool, + (void *)&cmd_mirror_mask_poolid, + (void *)&cmd_mirror_mask_on, + NULL, + }, +}; + +/* *** CONFIGURE VM MIRROR UPLINK/DOWNLINK RULE *** */ +struct cmd_set_mirror_link_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t mirror; + uint8_t rule_id; + cmdline_fixed_string_t what; + cmdline_fixed_string_t dstpool; + uint8_t dstpool_id; + cmdline_fixed_string_t on; +}; + +cmdline_parse_token_string_t cmd_mirror_link_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result, + set, "set"); +cmdline_parse_token_string_t cmd_mirror_link_port = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result, + port, "port"); +cmdline_parse_token_num_t cmd_mirror_link_portid = + TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_link_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_mirror_link_mirror = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result, + mirror, "mirror-rule"); +cmdline_parse_token_num_t cmd_mirror_link_ruleid = + TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_link_result, + rule_id, UINT8); +cmdline_parse_token_string_t cmd_mirror_link_what = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result, + what, "uplink-mirror#downlink-mirror"); +cmdline_parse_token_string_t cmd_mirror_link_dstpool = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result, + dstpool, "dst-pool"); +cmdline_parse_token_num_t cmd_mirror_link_poolid = + TOKEN_NUM_INITIALIZER(struct cmd_set_mirror_link_result, + dstpool_id, UINT8); +cmdline_parse_token_string_t cmd_mirror_link_on = + TOKEN_STRING_INITIALIZER(struct cmd_set_mirror_link_result, + on, "on#off"); + +static void +cmd_set_mirror_link_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int ret; + struct cmd_set_mirror_link_result *res = parsed_result; + struct rte_eth_mirror_conf mr_conf; + + memset(&mr_conf, 0, sizeof(struct rte_eth_mirror_conf)); + if (!strcmp(res->what, "uplink-mirror")) + mr_conf.rule_type = ETH_MIRROR_UPLINK_PORT; + else + mr_conf.rule_type = ETH_MIRROR_DOWNLINK_PORT; + + mr_conf.dst_pool = res->dstpool_id; + + if (!strcmp(res->on, "on")) + ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf, + res->rule_id, 1); + else + ret = rte_eth_mirror_rule_set(res->port_id, &mr_conf, + res->rule_id, 0); + + /* check the return value and print it if is < 0 */ + if (ret < 0) + printf("mirror rule add error: (%s)\n", strerror(-ret)); + +} + +cmdline_parse_inst_t cmd_set_mirror_link = { + .f = cmd_set_mirror_link_parsed, + .data = NULL, + .help_str = "set port <port_id> mirror-rule <rule_id> " + "uplink-mirror|downlink-mirror dst-pool <pool_id> on|off", + .tokens = { + (void *)&cmd_mirror_link_set, + (void *)&cmd_mirror_link_port, + (void *)&cmd_mirror_link_portid, + (void *)&cmd_mirror_link_mirror, + (void *)&cmd_mirror_link_ruleid, + (void *)&cmd_mirror_link_what, + (void *)&cmd_mirror_link_dstpool, + (void *)&cmd_mirror_link_poolid, + (void *)&cmd_mirror_link_on, + NULL, + }, +}; + +/* *** RESET VM MIRROR RULE *** */ +struct cmd_rm_mirror_rule_result { + cmdline_fixed_string_t reset; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t mirror; + uint8_t rule_id; +}; + +cmdline_parse_token_string_t cmd_rm_mirror_rule_reset = + TOKEN_STRING_INITIALIZER(struct cmd_rm_mirror_rule_result, + reset, "reset"); +cmdline_parse_token_string_t cmd_rm_mirror_rule_port = + TOKEN_STRING_INITIALIZER(struct cmd_rm_mirror_rule_result, + port, "port"); +cmdline_parse_token_num_t cmd_rm_mirror_rule_portid = + TOKEN_NUM_INITIALIZER(struct cmd_rm_mirror_rule_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_rm_mirror_rule_mirror = + TOKEN_STRING_INITIALIZER(struct cmd_rm_mirror_rule_result, + mirror, "mirror-rule"); +cmdline_parse_token_num_t cmd_rm_mirror_rule_ruleid = + TOKEN_NUM_INITIALIZER(struct cmd_rm_mirror_rule_result, + rule_id, UINT8); + +static void +cmd_reset_mirror_rule_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int ret; + struct cmd_set_mirror_link_result *res = parsed_result; + /* check rule_id */ + ret = rte_eth_mirror_rule_reset(res->port_id,res->rule_id); + if(ret < 0) + printf("mirror rule remove error: (%s)\n", strerror(-ret)); +} + +cmdline_parse_inst_t cmd_reset_mirror_rule = { + .f = cmd_reset_mirror_rule_parsed, + .data = NULL, + .help_str = "reset port <port_id> mirror-rule <rule_id>", + .tokens = { + (void *)&cmd_rm_mirror_rule_reset, + (void *)&cmd_rm_mirror_rule_port, + (void *)&cmd_rm_mirror_rule_portid, + (void *)&cmd_rm_mirror_rule_mirror, + (void *)&cmd_rm_mirror_rule_ruleid, + NULL, + }, +}; + +/* ******************************************************************************** */ + +struct cmd_dump_result { + cmdline_fixed_string_t dump; +}; + +static void +dump_struct_sizes(void) +{ +#define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t)); + DUMP_SIZE(struct rte_mbuf); + DUMP_SIZE(struct rte_mempool); + DUMP_SIZE(struct rte_ring); +#undef DUMP_SIZE +} + + +/* Dump the socket memory statistics on console */ +static void +dump_socket_mem(FILE *f) +{ + struct rte_malloc_socket_stats socket_stats; + unsigned int i; + size_t total = 0; + size_t alloc = 0; + size_t free = 0; + unsigned int n_alloc = 0; + unsigned int n_free = 0; + static size_t last_allocs; + static size_t last_total; + + + for (i = 0; i < RTE_MAX_NUMA_NODES; i++) { + if (rte_malloc_get_socket_stats(i, &socket_stats) || + !socket_stats.heap_totalsz_bytes) + continue; + total += socket_stats.heap_totalsz_bytes; + alloc += socket_stats.heap_allocsz_bytes; + free += socket_stats.heap_freesz_bytes; + n_alloc += socket_stats.alloc_count; + n_free += socket_stats.free_count; + fprintf(f, + "Socket %u: size(M) total: %.6lf alloc: %.6lf(%.3lf%%) free: %.6lf \tcount alloc: %-4u free: %u\n", + i, + (double)socket_stats.heap_totalsz_bytes / (1024 * 1024), + (double)socket_stats.heap_allocsz_bytes / (1024 * 1024), + (double)socket_stats.heap_allocsz_bytes * 100 / + (double)socket_stats.heap_totalsz_bytes, + (double)socket_stats.heap_freesz_bytes / (1024 * 1024), + socket_stats.alloc_count, + socket_stats.free_count); + } + fprintf(f, + "Total : size(M) total: %.6lf alloc: %.6lf(%.3lf%%) free: %.6lf \tcount alloc: %-4u free: %u\n", + (double)total / (1024 * 1024), (double)alloc / (1024 * 1024), + (double)alloc * 100 / (double)total, + (double)free / (1024 * 1024), + n_alloc, n_free); + if (last_allocs) + fprintf(stdout, "Memory total change: %.6lf(M), allocation change: %.6lf(M)\n", + ((double)total - (double)last_total) / (1024 * 1024), + (double)(alloc - (double)last_allocs) / 1024 / 1024); + last_allocs = alloc; + last_total = total; +} + +static void cmd_dump_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_dump_result *res = parsed_result; + + if (!strcmp(res->dump, "dump_physmem")) + rte_dump_physmem_layout(stdout); + else if (!strcmp(res->dump, "dump_socket_mem")) + dump_socket_mem(stdout); + else if (!strcmp(res->dump, "dump_memzone")) + rte_memzone_dump(stdout); + else if (!strcmp(res->dump, "dump_struct_sizes")) + dump_struct_sizes(); + else if (!strcmp(res->dump, "dump_ring")) + rte_ring_list_dump(stdout); + else if (!strcmp(res->dump, "dump_mempool")) + rte_mempool_list_dump(stdout); + else if (!strcmp(res->dump, "dump_devargs")) + rte_devargs_dump(stdout); + else if (!strcmp(res->dump, "dump_log_types")) + rte_log_dump(stdout); +} + +cmdline_parse_token_string_t cmd_dump_dump = + TOKEN_STRING_INITIALIZER(struct cmd_dump_result, dump, + "dump_physmem#" + "dump_memzone#" + "dump_socket_mem#" + "dump_struct_sizes#" + "dump_ring#" + "dump_mempool#" + "dump_devargs#" + "dump_log_types"); + +cmdline_parse_inst_t cmd_dump = { + .f = cmd_dump_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "Dump status", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_dump_dump, + NULL, + }, +}; + +/* ******************************************************************************** */ + +struct cmd_dump_one_result { + cmdline_fixed_string_t dump; + cmdline_fixed_string_t name; +}; + +static void cmd_dump_one_parsed(void *parsed_result, struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_dump_one_result *res = parsed_result; + + if (!strcmp(res->dump, "dump_ring")) { + struct rte_ring *r; + r = rte_ring_lookup(res->name); + if (r == NULL) { + cmdline_printf(cl, "Cannot find ring\n"); + return; + } + rte_ring_dump(stdout, r); + } else if (!strcmp(res->dump, "dump_mempool")) { + struct rte_mempool *mp; + mp = rte_mempool_lookup(res->name); + if (mp == NULL) { + cmdline_printf(cl, "Cannot find mempool\n"); + return; + } + rte_mempool_dump(stdout, mp); + } +} + +cmdline_parse_token_string_t cmd_dump_one_dump = + TOKEN_STRING_INITIALIZER(struct cmd_dump_one_result, dump, + "dump_ring#dump_mempool"); + +cmdline_parse_token_string_t cmd_dump_one_name = + TOKEN_STRING_INITIALIZER(struct cmd_dump_one_result, name, NULL); + +cmdline_parse_inst_t cmd_dump_one = { + .f = cmd_dump_one_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "dump_ring|dump_mempool <name>: Dump one ring/mempool", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_dump_one_dump, + (void *)&cmd_dump_one_name, + NULL, + }, +}; + +/* *** Add/Del syn filter *** */ +struct cmd_syn_filter_result { + cmdline_fixed_string_t filter; + portid_t port_id; + cmdline_fixed_string_t ops; + cmdline_fixed_string_t priority; + cmdline_fixed_string_t high; + cmdline_fixed_string_t queue; + uint16_t queue_id; +}; + +static void +cmd_syn_filter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_syn_filter_result *res = parsed_result; + struct rte_eth_syn_filter syn_filter; + int ret = 0; + + ret = rte_eth_dev_filter_supported(res->port_id, + RTE_ETH_FILTER_SYN); + if (ret < 0) { + printf("syn filter is not supported on port %u.\n", + res->port_id); + return; + } + + memset(&syn_filter, 0, sizeof(syn_filter)); + + if (!strcmp(res->ops, "add")) { + if (!strcmp(res->high, "high")) + syn_filter.hig_pri = 1; + else + syn_filter.hig_pri = 0; + + syn_filter.queue = res->queue_id; + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_SYN, + RTE_ETH_FILTER_ADD, + &syn_filter); + } else + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_SYN, + RTE_ETH_FILTER_DELETE, + &syn_filter); + + if (ret < 0) + printf("syn filter programming error: (%s)\n", + strerror(-ret)); +} + +cmdline_parse_token_string_t cmd_syn_filter_filter = + TOKEN_STRING_INITIALIZER(struct cmd_syn_filter_result, + filter, "syn_filter"); +cmdline_parse_token_num_t cmd_syn_filter_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_syn_filter_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_syn_filter_ops = + TOKEN_STRING_INITIALIZER(struct cmd_syn_filter_result, + ops, "add#del"); +cmdline_parse_token_string_t cmd_syn_filter_priority = + TOKEN_STRING_INITIALIZER(struct cmd_syn_filter_result, + priority, "priority"); +cmdline_parse_token_string_t cmd_syn_filter_high = + TOKEN_STRING_INITIALIZER(struct cmd_syn_filter_result, + high, "high#low"); +cmdline_parse_token_string_t cmd_syn_filter_queue = + TOKEN_STRING_INITIALIZER(struct cmd_syn_filter_result, + queue, "queue"); +cmdline_parse_token_num_t cmd_syn_filter_queue_id = + TOKEN_NUM_INITIALIZER(struct cmd_syn_filter_result, + queue_id, UINT16); + +cmdline_parse_inst_t cmd_syn_filter = { + .f = cmd_syn_filter_parsed, + .data = NULL, + .help_str = "syn_filter <port_id> add|del priority high|low queue " + "<queue_id>: Add/Delete syn filter", + .tokens = { + (void *)&cmd_syn_filter_filter, + (void *)&cmd_syn_filter_port_id, + (void *)&cmd_syn_filter_ops, + (void *)&cmd_syn_filter_priority, + (void *)&cmd_syn_filter_high, + (void *)&cmd_syn_filter_queue, + (void *)&cmd_syn_filter_queue_id, + NULL, + }, +}; + +/* *** queue region set *** */ +struct cmd_queue_region_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t cmd; + cmdline_fixed_string_t region; + uint8_t region_id; + cmdline_fixed_string_t queue_start_index; + uint8_t queue_id; + cmdline_fixed_string_t queue_num; + uint8_t queue_num_value; +}; + +static void +cmd_queue_region_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_queue_region_result *res = parsed_result; + int ret = -ENOTSUP; +#ifdef RTE_LIBRTE_I40E_PMD + struct rte_pmd_i40e_queue_region_conf region_conf; + enum rte_pmd_i40e_queue_region_op op_type; +#endif + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + memset(®ion_conf, 0, sizeof(region_conf)); + op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_SET; + region_conf.region_id = res->region_id; + region_conf.queue_num = res->queue_num_value; + region_conf.queue_start_index = res->queue_id; + + ret = rte_pmd_i40e_rss_queue_region_conf(res->port_id, + op_type, ®ion_conf); +#endif + + switch (ret) { + case 0: + break; + case -ENOTSUP: + printf("function not implemented or supported\n"); + break; + default: + printf("queue region config error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_token_string_t cmd_queue_region_set = +TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result, + set, "set"); +cmdline_parse_token_string_t cmd_queue_region_port = + TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result, port, "port"); +cmdline_parse_token_num_t cmd_queue_region_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_queue_region_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_queue_region_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result, + cmd, "queue-region"); +cmdline_parse_token_string_t cmd_queue_region_id = + TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result, + region, "region_id"); +cmdline_parse_token_num_t cmd_queue_region_index = + TOKEN_NUM_INITIALIZER(struct cmd_queue_region_result, + region_id, UINT8); +cmdline_parse_token_string_t cmd_queue_region_queue_start_index = + TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result, + queue_start_index, "queue_start_index"); +cmdline_parse_token_num_t cmd_queue_region_queue_id = + TOKEN_NUM_INITIALIZER(struct cmd_queue_region_result, + queue_id, UINT8); +cmdline_parse_token_string_t cmd_queue_region_queue_num = + TOKEN_STRING_INITIALIZER(struct cmd_queue_region_result, + queue_num, "queue_num"); +cmdline_parse_token_num_t cmd_queue_region_queue_num_value = + TOKEN_NUM_INITIALIZER(struct cmd_queue_region_result, + queue_num_value, UINT8); + +cmdline_parse_inst_t cmd_queue_region = { + .f = cmd_queue_region_parsed, + .data = NULL, + .help_str = "set port <port_id> queue-region region_id <value> " + "queue_start_index <value> queue_num <value>: Set a queue region", + .tokens = { + (void *)&cmd_queue_region_set, + (void *)&cmd_queue_region_port, + (void *)&cmd_queue_region_port_id, + (void *)&cmd_queue_region_cmd, + (void *)&cmd_queue_region_id, + (void *)&cmd_queue_region_index, + (void *)&cmd_queue_region_queue_start_index, + (void *)&cmd_queue_region_queue_id, + (void *)&cmd_queue_region_queue_num, + (void *)&cmd_queue_region_queue_num_value, + NULL, + }, +}; + +/* *** queue region and flowtype set *** */ +struct cmd_region_flowtype_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t cmd; + cmdline_fixed_string_t region; + uint8_t region_id; + cmdline_fixed_string_t flowtype; + uint8_t flowtype_id; +}; + +static void +cmd_region_flowtype_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_region_flowtype_result *res = parsed_result; + int ret = -ENOTSUP; +#ifdef RTE_LIBRTE_I40E_PMD + struct rte_pmd_i40e_queue_region_conf region_conf; + enum rte_pmd_i40e_queue_region_op op_type; +#endif + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + memset(®ion_conf, 0, sizeof(region_conf)); + + op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_FLOWTYPE_SET; + region_conf.region_id = res->region_id; + region_conf.hw_flowtype = res->flowtype_id; + + ret = rte_pmd_i40e_rss_queue_region_conf(res->port_id, + op_type, ®ion_conf); +#endif + + switch (ret) { + case 0: + break; + case -ENOTSUP: + printf("function not implemented or supported\n"); + break; + default: + printf("region flowtype config error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_token_string_t cmd_region_flowtype_set = +TOKEN_STRING_INITIALIZER(struct cmd_region_flowtype_result, + set, "set"); +cmdline_parse_token_string_t cmd_region_flowtype_port = + TOKEN_STRING_INITIALIZER(struct cmd_region_flowtype_result, + port, "port"); +cmdline_parse_token_num_t cmd_region_flowtype_port_index = + TOKEN_NUM_INITIALIZER(struct cmd_region_flowtype_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_region_flowtype_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_region_flowtype_result, + cmd, "queue-region"); +cmdline_parse_token_string_t cmd_region_flowtype_index = + TOKEN_STRING_INITIALIZER(struct cmd_region_flowtype_result, + region, "region_id"); +cmdline_parse_token_num_t cmd_region_flowtype_id = + TOKEN_NUM_INITIALIZER(struct cmd_region_flowtype_result, + region_id, UINT8); +cmdline_parse_token_string_t cmd_region_flowtype_flow_index = + TOKEN_STRING_INITIALIZER(struct cmd_region_flowtype_result, + flowtype, "flowtype"); +cmdline_parse_token_num_t cmd_region_flowtype_flow_id = + TOKEN_NUM_INITIALIZER(struct cmd_region_flowtype_result, + flowtype_id, UINT8); +cmdline_parse_inst_t cmd_region_flowtype = { + .f = cmd_region_flowtype_parsed, + .data = NULL, + .help_str = "set port <port_id> queue-region region_id <value> " + "flowtype <value>: Set a flowtype region index", + .tokens = { + (void *)&cmd_region_flowtype_set, + (void *)&cmd_region_flowtype_port, + (void *)&cmd_region_flowtype_port_index, + (void *)&cmd_region_flowtype_cmd, + (void *)&cmd_region_flowtype_index, + (void *)&cmd_region_flowtype_id, + (void *)&cmd_region_flowtype_flow_index, + (void *)&cmd_region_flowtype_flow_id, + NULL, + }, +}; + +/* *** User Priority (UP) to queue region (region_id) set *** */ +struct cmd_user_priority_region_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t cmd; + cmdline_fixed_string_t user_priority; + uint8_t user_priority_id; + cmdline_fixed_string_t region; + uint8_t region_id; +}; + +static void +cmd_user_priority_region_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_user_priority_region_result *res = parsed_result; + int ret = -ENOTSUP; +#ifdef RTE_LIBRTE_I40E_PMD + struct rte_pmd_i40e_queue_region_conf region_conf; + enum rte_pmd_i40e_queue_region_op op_type; +#endif + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + memset(®ion_conf, 0, sizeof(region_conf)); + op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_USER_PRIORITY_SET; + region_conf.user_priority = res->user_priority_id; + region_conf.region_id = res->region_id; + + ret = rte_pmd_i40e_rss_queue_region_conf(res->port_id, + op_type, ®ion_conf); +#endif + + switch (ret) { + case 0: + break; + case -ENOTSUP: + printf("function not implemented or supported\n"); + break; + default: + printf("user_priority region config error: (%s)\n", + strerror(-ret)); + } +} + +cmdline_parse_token_string_t cmd_user_priority_region_set = + TOKEN_STRING_INITIALIZER(struct cmd_user_priority_region_result, + set, "set"); +cmdline_parse_token_string_t cmd_user_priority_region_port = + TOKEN_STRING_INITIALIZER(struct cmd_user_priority_region_result, + port, "port"); +cmdline_parse_token_num_t cmd_user_priority_region_port_index = + TOKEN_NUM_INITIALIZER(struct cmd_user_priority_region_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_user_priority_region_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_user_priority_region_result, + cmd, "queue-region"); +cmdline_parse_token_string_t cmd_user_priority_region_UP = + TOKEN_STRING_INITIALIZER(struct cmd_user_priority_region_result, + user_priority, "UP"); +cmdline_parse_token_num_t cmd_user_priority_region_UP_id = + TOKEN_NUM_INITIALIZER(struct cmd_user_priority_region_result, + user_priority_id, UINT8); +cmdline_parse_token_string_t cmd_user_priority_region_region = + TOKEN_STRING_INITIALIZER(struct cmd_user_priority_region_result, + region, "region_id"); +cmdline_parse_token_num_t cmd_user_priority_region_region_id = + TOKEN_NUM_INITIALIZER(struct cmd_user_priority_region_result, + region_id, UINT8); + +cmdline_parse_inst_t cmd_user_priority_region = { + .f = cmd_user_priority_region_parsed, + .data = NULL, + .help_str = "set port <port_id> queue-region UP <value> " + "region_id <value>: Set the mapping of User Priority (UP) " + "to queue region (region_id) ", + .tokens = { + (void *)&cmd_user_priority_region_set, + (void *)&cmd_user_priority_region_port, + (void *)&cmd_user_priority_region_port_index, + (void *)&cmd_user_priority_region_cmd, + (void *)&cmd_user_priority_region_UP, + (void *)&cmd_user_priority_region_UP_id, + (void *)&cmd_user_priority_region_region, + (void *)&cmd_user_priority_region_region_id, + NULL, + }, +}; + +/* *** flush all queue region related configuration *** */ +struct cmd_flush_queue_region_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t cmd; + cmdline_fixed_string_t flush; + cmdline_fixed_string_t what; +}; + +static void +cmd_flush_queue_region_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_flush_queue_region_result *res = parsed_result; + int ret = -ENOTSUP; +#ifdef RTE_LIBRTE_I40E_PMD + struct rte_pmd_i40e_queue_region_conf region_conf; + enum rte_pmd_i40e_queue_region_op op_type; +#endif + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + memset(®ion_conf, 0, sizeof(region_conf)); + + if (strcmp(res->what, "on") == 0) + op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_ON; + else + op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_ALL_FLUSH_OFF; + + ret = rte_pmd_i40e_rss_queue_region_conf(res->port_id, + op_type, ®ion_conf); +#endif + + switch (ret) { + case 0: + break; + case -ENOTSUP: + printf("function not implemented or supported\n"); + break; + default: + printf("queue region config flush error: (%s)\n", + strerror(-ret)); + } +} + +cmdline_parse_token_string_t cmd_flush_queue_region_set = + TOKEN_STRING_INITIALIZER(struct cmd_flush_queue_region_result, + set, "set"); +cmdline_parse_token_string_t cmd_flush_queue_region_port = + TOKEN_STRING_INITIALIZER(struct cmd_flush_queue_region_result, + port, "port"); +cmdline_parse_token_num_t cmd_flush_queue_region_port_index = + TOKEN_NUM_INITIALIZER(struct cmd_flush_queue_region_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_flush_queue_region_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_flush_queue_region_result, + cmd, "queue-region"); +cmdline_parse_token_string_t cmd_flush_queue_region_flush = + TOKEN_STRING_INITIALIZER(struct cmd_flush_queue_region_result, + flush, "flush"); +cmdline_parse_token_string_t cmd_flush_queue_region_what = + TOKEN_STRING_INITIALIZER(struct cmd_flush_queue_region_result, + what, "on#off"); + +cmdline_parse_inst_t cmd_flush_queue_region = { + .f = cmd_flush_queue_region_parsed, + .data = NULL, + .help_str = "set port <port_id> queue-region flush on|off" + ": flush all queue region related configuration", + .tokens = { + (void *)&cmd_flush_queue_region_set, + (void *)&cmd_flush_queue_region_port, + (void *)&cmd_flush_queue_region_port_index, + (void *)&cmd_flush_queue_region_cmd, + (void *)&cmd_flush_queue_region_flush, + (void *)&cmd_flush_queue_region_what, + NULL, + }, +}; + +/* *** get all queue region related configuration info *** */ +struct cmd_show_queue_region_info { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t cmd; +}; + +static void +cmd_show_queue_region_info_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_queue_region_info *res = parsed_result; + int ret = -ENOTSUP; +#ifdef RTE_LIBRTE_I40E_PMD + struct rte_pmd_i40e_queue_regions rte_pmd_regions; + enum rte_pmd_i40e_queue_region_op op_type; +#endif + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + memset(&rte_pmd_regions, 0, sizeof(rte_pmd_regions)); + + op_type = RTE_PMD_I40E_RSS_QUEUE_REGION_INFO_GET; + + ret = rte_pmd_i40e_rss_queue_region_conf(res->port_id, + op_type, &rte_pmd_regions); + + port_queue_region_info_display(res->port_id, &rte_pmd_regions); +#endif + + switch (ret) { + case 0: + break; + case -ENOTSUP: + printf("function not implemented or supported\n"); + break; + default: + printf("queue region config info show error: (%s)\n", + strerror(-ret)); + } +} + +cmdline_parse_token_string_t cmd_show_queue_region_info_get = +TOKEN_STRING_INITIALIZER(struct cmd_show_queue_region_info, + show, "show"); +cmdline_parse_token_string_t cmd_show_queue_region_info_port = + TOKEN_STRING_INITIALIZER(struct cmd_show_queue_region_info, + port, "port"); +cmdline_parse_token_num_t cmd_show_queue_region_info_port_index = + TOKEN_NUM_INITIALIZER(struct cmd_show_queue_region_info, + port_id, UINT16); +cmdline_parse_token_string_t cmd_show_queue_region_info_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_show_queue_region_info, + cmd, "queue-region"); + +cmdline_parse_inst_t cmd_show_queue_region_info_all = { + .f = cmd_show_queue_region_info_parsed, + .data = NULL, + .help_str = "show port <port_id> queue-region" + ": show all queue region related configuration info", + .tokens = { + (void *)&cmd_show_queue_region_info_get, + (void *)&cmd_show_queue_region_info_port, + (void *)&cmd_show_queue_region_info_port_index, + (void *)&cmd_show_queue_region_info_cmd, + NULL, + }, +}; + +/* *** ADD/REMOVE A 2tuple FILTER *** */ +struct cmd_2tuple_filter_result { + cmdline_fixed_string_t filter; + portid_t port_id; + cmdline_fixed_string_t ops; + cmdline_fixed_string_t dst_port; + uint16_t dst_port_value; + cmdline_fixed_string_t protocol; + uint8_t protocol_value; + cmdline_fixed_string_t mask; + uint8_t mask_value; + cmdline_fixed_string_t tcp_flags; + uint8_t tcp_flags_value; + cmdline_fixed_string_t priority; + uint8_t priority_value; + cmdline_fixed_string_t queue; + uint16_t queue_id; +}; + +static void +cmd_2tuple_filter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct rte_eth_ntuple_filter filter; + struct cmd_2tuple_filter_result *res = parsed_result; + int ret = 0; + + ret = rte_eth_dev_filter_supported(res->port_id, RTE_ETH_FILTER_NTUPLE); + if (ret < 0) { + printf("ntuple filter is not supported on port %u.\n", + res->port_id); + return; + } + + memset(&filter, 0, sizeof(struct rte_eth_ntuple_filter)); + + filter.flags = RTE_2TUPLE_FLAGS; + filter.dst_port_mask = (res->mask_value & 0x02) ? UINT16_MAX : 0; + filter.proto_mask = (res->mask_value & 0x01) ? UINT8_MAX : 0; + filter.proto = res->protocol_value; + filter.priority = res->priority_value; + if (res->tcp_flags_value != 0 && filter.proto != IPPROTO_TCP) { + printf("nonzero tcp_flags is only meaningful" + " when protocol is TCP.\n"); + return; + } + if (res->tcp_flags_value > RTE_NTUPLE_TCP_FLAGS_MASK) { + printf("invalid TCP flags.\n"); + return; + } + + if (res->tcp_flags_value != 0) { + filter.flags |= RTE_NTUPLE_FLAGS_TCP_FLAG; + filter.tcp_flags = res->tcp_flags_value; + } + + /* need convert to big endian. */ + filter.dst_port = rte_cpu_to_be_16(res->dst_port_value); + filter.queue = res->queue_id; + + if (!strcmp(res->ops, "add")) + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_NTUPLE, + RTE_ETH_FILTER_ADD, + &filter); + else + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_NTUPLE, + RTE_ETH_FILTER_DELETE, + &filter); + if (ret < 0) + printf("2tuple filter programming error: (%s)\n", + strerror(-ret)); + +} + +cmdline_parse_token_string_t cmd_2tuple_filter_filter = + TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result, + filter, "2tuple_filter"); +cmdline_parse_token_num_t cmd_2tuple_filter_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_2tuple_filter_ops = + TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result, + ops, "add#del"); +cmdline_parse_token_string_t cmd_2tuple_filter_dst_port = + TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result, + dst_port, "dst_port"); +cmdline_parse_token_num_t cmd_2tuple_filter_dst_port_value = + TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result, + dst_port_value, UINT16); +cmdline_parse_token_string_t cmd_2tuple_filter_protocol = + TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result, + protocol, "protocol"); +cmdline_parse_token_num_t cmd_2tuple_filter_protocol_value = + TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result, + protocol_value, UINT8); +cmdline_parse_token_string_t cmd_2tuple_filter_mask = + TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result, + mask, "mask"); +cmdline_parse_token_num_t cmd_2tuple_filter_mask_value = + TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result, + mask_value, INT8); +cmdline_parse_token_string_t cmd_2tuple_filter_tcp_flags = + TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result, + tcp_flags, "tcp_flags"); +cmdline_parse_token_num_t cmd_2tuple_filter_tcp_flags_value = + TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result, + tcp_flags_value, UINT8); +cmdline_parse_token_string_t cmd_2tuple_filter_priority = + TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result, + priority, "priority"); +cmdline_parse_token_num_t cmd_2tuple_filter_priority_value = + TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result, + priority_value, UINT8); +cmdline_parse_token_string_t cmd_2tuple_filter_queue = + TOKEN_STRING_INITIALIZER(struct cmd_2tuple_filter_result, + queue, "queue"); +cmdline_parse_token_num_t cmd_2tuple_filter_queue_id = + TOKEN_NUM_INITIALIZER(struct cmd_2tuple_filter_result, + queue_id, UINT16); + +cmdline_parse_inst_t cmd_2tuple_filter = { + .f = cmd_2tuple_filter_parsed, + .data = NULL, + .help_str = "2tuple_filter <port_id> add|del dst_port <value> protocol " + "<value> mask <value> tcp_flags <value> priority <value> queue " + "<queue_id>: Add a 2tuple filter", + .tokens = { + (void *)&cmd_2tuple_filter_filter, + (void *)&cmd_2tuple_filter_port_id, + (void *)&cmd_2tuple_filter_ops, + (void *)&cmd_2tuple_filter_dst_port, + (void *)&cmd_2tuple_filter_dst_port_value, + (void *)&cmd_2tuple_filter_protocol, + (void *)&cmd_2tuple_filter_protocol_value, + (void *)&cmd_2tuple_filter_mask, + (void *)&cmd_2tuple_filter_mask_value, + (void *)&cmd_2tuple_filter_tcp_flags, + (void *)&cmd_2tuple_filter_tcp_flags_value, + (void *)&cmd_2tuple_filter_priority, + (void *)&cmd_2tuple_filter_priority_value, + (void *)&cmd_2tuple_filter_queue, + (void *)&cmd_2tuple_filter_queue_id, + NULL, + }, +}; + +/* *** ADD/REMOVE A 5tuple FILTER *** */ +struct cmd_5tuple_filter_result { + cmdline_fixed_string_t filter; + portid_t port_id; + cmdline_fixed_string_t ops; + cmdline_fixed_string_t dst_ip; + cmdline_ipaddr_t dst_ip_value; + cmdline_fixed_string_t src_ip; + cmdline_ipaddr_t src_ip_value; + cmdline_fixed_string_t dst_port; + uint16_t dst_port_value; + cmdline_fixed_string_t src_port; + uint16_t src_port_value; + cmdline_fixed_string_t protocol; + uint8_t protocol_value; + cmdline_fixed_string_t mask; + uint8_t mask_value; + cmdline_fixed_string_t tcp_flags; + uint8_t tcp_flags_value; + cmdline_fixed_string_t priority; + uint8_t priority_value; + cmdline_fixed_string_t queue; + uint16_t queue_id; +}; + +static void +cmd_5tuple_filter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct rte_eth_ntuple_filter filter; + struct cmd_5tuple_filter_result *res = parsed_result; + int ret = 0; + + ret = rte_eth_dev_filter_supported(res->port_id, RTE_ETH_FILTER_NTUPLE); + if (ret < 0) { + printf("ntuple filter is not supported on port %u.\n", + res->port_id); + return; + } + + memset(&filter, 0, sizeof(struct rte_eth_ntuple_filter)); + + filter.flags = RTE_5TUPLE_FLAGS; + filter.dst_ip_mask = (res->mask_value & 0x10) ? UINT32_MAX : 0; + filter.src_ip_mask = (res->mask_value & 0x08) ? UINT32_MAX : 0; + filter.dst_port_mask = (res->mask_value & 0x04) ? UINT16_MAX : 0; + filter.src_port_mask = (res->mask_value & 0x02) ? UINT16_MAX : 0; + filter.proto_mask = (res->mask_value & 0x01) ? UINT8_MAX : 0; + filter.proto = res->protocol_value; + filter.priority = res->priority_value; + if (res->tcp_flags_value != 0 && filter.proto != IPPROTO_TCP) { + printf("nonzero tcp_flags is only meaningful" + " when protocol is TCP.\n"); + return; + } + if (res->tcp_flags_value > RTE_NTUPLE_TCP_FLAGS_MASK) { + printf("invalid TCP flags.\n"); + return; + } + + if (res->tcp_flags_value != 0) { + filter.flags |= RTE_NTUPLE_FLAGS_TCP_FLAG; + filter.tcp_flags = res->tcp_flags_value; + } + + if (res->dst_ip_value.family == AF_INET) + /* no need to convert, already big endian. */ + filter.dst_ip = res->dst_ip_value.addr.ipv4.s_addr; + else { + if (filter.dst_ip_mask == 0) { + printf("can not support ipv6 involved compare.\n"); + return; + } + filter.dst_ip = 0; + } + + if (res->src_ip_value.family == AF_INET) + /* no need to convert, already big endian. */ + filter.src_ip = res->src_ip_value.addr.ipv4.s_addr; + else { + if (filter.src_ip_mask == 0) { + printf("can not support ipv6 involved compare.\n"); + return; + } + filter.src_ip = 0; + } + /* need convert to big endian. */ + filter.dst_port = rte_cpu_to_be_16(res->dst_port_value); + filter.src_port = rte_cpu_to_be_16(res->src_port_value); + filter.queue = res->queue_id; + + if (!strcmp(res->ops, "add")) + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_NTUPLE, + RTE_ETH_FILTER_ADD, + &filter); + else + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_NTUPLE, + RTE_ETH_FILTER_DELETE, + &filter); + if (ret < 0) + printf("5tuple filter programming error: (%s)\n", + strerror(-ret)); +} + +cmdline_parse_token_string_t cmd_5tuple_filter_filter = + TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result, + filter, "5tuple_filter"); +cmdline_parse_token_num_t cmd_5tuple_filter_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_5tuple_filter_ops = + TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result, + ops, "add#del"); +cmdline_parse_token_string_t cmd_5tuple_filter_dst_ip = + TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result, + dst_ip, "dst_ip"); +cmdline_parse_token_ipaddr_t cmd_5tuple_filter_dst_ip_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_5tuple_filter_result, + dst_ip_value); +cmdline_parse_token_string_t cmd_5tuple_filter_src_ip = + TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result, + src_ip, "src_ip"); +cmdline_parse_token_ipaddr_t cmd_5tuple_filter_src_ip_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_5tuple_filter_result, + src_ip_value); +cmdline_parse_token_string_t cmd_5tuple_filter_dst_port = + TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result, + dst_port, "dst_port"); +cmdline_parse_token_num_t cmd_5tuple_filter_dst_port_value = + TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result, + dst_port_value, UINT16); +cmdline_parse_token_string_t cmd_5tuple_filter_src_port = + TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result, + src_port, "src_port"); +cmdline_parse_token_num_t cmd_5tuple_filter_src_port_value = + TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result, + src_port_value, UINT16); +cmdline_parse_token_string_t cmd_5tuple_filter_protocol = + TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result, + protocol, "protocol"); +cmdline_parse_token_num_t cmd_5tuple_filter_protocol_value = + TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result, + protocol_value, UINT8); +cmdline_parse_token_string_t cmd_5tuple_filter_mask = + TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result, + mask, "mask"); +cmdline_parse_token_num_t cmd_5tuple_filter_mask_value = + TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result, + mask_value, INT8); +cmdline_parse_token_string_t cmd_5tuple_filter_tcp_flags = + TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result, + tcp_flags, "tcp_flags"); +cmdline_parse_token_num_t cmd_5tuple_filter_tcp_flags_value = + TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result, + tcp_flags_value, UINT8); +cmdline_parse_token_string_t cmd_5tuple_filter_priority = + TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result, + priority, "priority"); +cmdline_parse_token_num_t cmd_5tuple_filter_priority_value = + TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result, + priority_value, UINT8); +cmdline_parse_token_string_t cmd_5tuple_filter_queue = + TOKEN_STRING_INITIALIZER(struct cmd_5tuple_filter_result, + queue, "queue"); +cmdline_parse_token_num_t cmd_5tuple_filter_queue_id = + TOKEN_NUM_INITIALIZER(struct cmd_5tuple_filter_result, + queue_id, UINT16); + +cmdline_parse_inst_t cmd_5tuple_filter = { + .f = cmd_5tuple_filter_parsed, + .data = NULL, + .help_str = "5tuple_filter <port_id> add|del dst_ip <value> " + "src_ip <value> dst_port <value> src_port <value> " + "protocol <value> mask <value> tcp_flags <value> " + "priority <value> queue <queue_id>: Add/Del a 5tuple filter", + .tokens = { + (void *)&cmd_5tuple_filter_filter, + (void *)&cmd_5tuple_filter_port_id, + (void *)&cmd_5tuple_filter_ops, + (void *)&cmd_5tuple_filter_dst_ip, + (void *)&cmd_5tuple_filter_dst_ip_value, + (void *)&cmd_5tuple_filter_src_ip, + (void *)&cmd_5tuple_filter_src_ip_value, + (void *)&cmd_5tuple_filter_dst_port, + (void *)&cmd_5tuple_filter_dst_port_value, + (void *)&cmd_5tuple_filter_src_port, + (void *)&cmd_5tuple_filter_src_port_value, + (void *)&cmd_5tuple_filter_protocol, + (void *)&cmd_5tuple_filter_protocol_value, + (void *)&cmd_5tuple_filter_mask, + (void *)&cmd_5tuple_filter_mask_value, + (void *)&cmd_5tuple_filter_tcp_flags, + (void *)&cmd_5tuple_filter_tcp_flags_value, + (void *)&cmd_5tuple_filter_priority, + (void *)&cmd_5tuple_filter_priority_value, + (void *)&cmd_5tuple_filter_queue, + (void *)&cmd_5tuple_filter_queue_id, + NULL, + }, +}; + +/* *** ADD/REMOVE A flex FILTER *** */ +struct cmd_flex_filter_result { + cmdline_fixed_string_t filter; + cmdline_fixed_string_t ops; + portid_t port_id; + cmdline_fixed_string_t len; + uint8_t len_value; + cmdline_fixed_string_t bytes; + cmdline_fixed_string_t bytes_value; + cmdline_fixed_string_t mask; + cmdline_fixed_string_t mask_value; + cmdline_fixed_string_t priority; + uint8_t priority_value; + cmdline_fixed_string_t queue; + uint16_t queue_id; +}; + +static int xdigit2val(unsigned char c) +{ + int val; + if (isdigit(c)) + val = c - '0'; + else if (isupper(c)) + val = c - 'A' + 10; + else + val = c - 'a' + 10; + return val; +} + +static void +cmd_flex_filter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + int ret = 0; + struct rte_eth_flex_filter filter; + struct cmd_flex_filter_result *res = parsed_result; + char *bytes_ptr, *mask_ptr; + uint16_t len, i, j = 0; + char c; + int val; + uint8_t byte = 0; + + if (res->len_value > RTE_FLEX_FILTER_MAXLEN) { + printf("the len exceed the max length 128\n"); + return; + } + memset(&filter, 0, sizeof(struct rte_eth_flex_filter)); + filter.len = res->len_value; + filter.priority = res->priority_value; + filter.queue = res->queue_id; + bytes_ptr = res->bytes_value; + mask_ptr = res->mask_value; + + /* translate bytes string to array. */ + if (bytes_ptr[0] == '0' && ((bytes_ptr[1] == 'x') || + (bytes_ptr[1] == 'X'))) + bytes_ptr += 2; + len = strnlen(bytes_ptr, res->len_value * 2); + if (len == 0 || (len % 8 != 0)) { + printf("please check len and bytes input\n"); + return; + } + for (i = 0; i < len; i++) { + c = bytes_ptr[i]; + if (isxdigit(c) == 0) { + /* invalid characters. */ + printf("invalid input\n"); + return; + } + val = xdigit2val(c); + if (i % 2) { + byte |= val; + filter.bytes[j] = byte; + printf("bytes[%d]:%02x ", j, filter.bytes[j]); + j++; + byte = 0; + } else + byte |= val << 4; + } + printf("\n"); + /* translate mask string to uint8_t array. */ + if (mask_ptr[0] == '0' && ((mask_ptr[1] == 'x') || + (mask_ptr[1] == 'X'))) + mask_ptr += 2; + len = strnlen(mask_ptr, (res->len_value + 3) / 4); + if (len == 0) { + printf("invalid input\n"); + return; + } + j = 0; + byte = 0; + for (i = 0; i < len; i++) { + c = mask_ptr[i]; + if (isxdigit(c) == 0) { + /* invalid characters. */ + printf("invalid input\n"); + return; + } + val = xdigit2val(c); + if (i % 2) { + byte |= val; + filter.mask[j] = byte; + printf("mask[%d]:%02x ", j, filter.mask[j]); + j++; + byte = 0; + } else + byte |= val << 4; + } + printf("\n"); + + if (!strcmp(res->ops, "add")) + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_FLEXIBLE, + RTE_ETH_FILTER_ADD, + &filter); + else + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_FLEXIBLE, + RTE_ETH_FILTER_DELETE, + &filter); + + if (ret < 0) + printf("flex filter setting error: (%s)\n", strerror(-ret)); +} + +cmdline_parse_token_string_t cmd_flex_filter_filter = + TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result, + filter, "flex_filter"); +cmdline_parse_token_num_t cmd_flex_filter_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_flex_filter_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_flex_filter_ops = + TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result, + ops, "add#del"); +cmdline_parse_token_string_t cmd_flex_filter_len = + TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result, + len, "len"); +cmdline_parse_token_num_t cmd_flex_filter_len_value = + TOKEN_NUM_INITIALIZER(struct cmd_flex_filter_result, + len_value, UINT8); +cmdline_parse_token_string_t cmd_flex_filter_bytes = + TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result, + bytes, "bytes"); +cmdline_parse_token_string_t cmd_flex_filter_bytes_value = + TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result, + bytes_value, NULL); +cmdline_parse_token_string_t cmd_flex_filter_mask = + TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result, + mask, "mask"); +cmdline_parse_token_string_t cmd_flex_filter_mask_value = + TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result, + mask_value, NULL); +cmdline_parse_token_string_t cmd_flex_filter_priority = + TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result, + priority, "priority"); +cmdline_parse_token_num_t cmd_flex_filter_priority_value = + TOKEN_NUM_INITIALIZER(struct cmd_flex_filter_result, + priority_value, UINT8); +cmdline_parse_token_string_t cmd_flex_filter_queue = + TOKEN_STRING_INITIALIZER(struct cmd_flex_filter_result, + queue, "queue"); +cmdline_parse_token_num_t cmd_flex_filter_queue_id = + TOKEN_NUM_INITIALIZER(struct cmd_flex_filter_result, + queue_id, UINT16); +cmdline_parse_inst_t cmd_flex_filter = { + .f = cmd_flex_filter_parsed, + .data = NULL, + .help_str = "flex_filter <port_id> add|del len <value> bytes " + "<value> mask <value> priority <value> queue <queue_id>: " + "Add/Del a flex filter", + .tokens = { + (void *)&cmd_flex_filter_filter, + (void *)&cmd_flex_filter_port_id, + (void *)&cmd_flex_filter_ops, + (void *)&cmd_flex_filter_len, + (void *)&cmd_flex_filter_len_value, + (void *)&cmd_flex_filter_bytes, + (void *)&cmd_flex_filter_bytes_value, + (void *)&cmd_flex_filter_mask, + (void *)&cmd_flex_filter_mask_value, + (void *)&cmd_flex_filter_priority, + (void *)&cmd_flex_filter_priority_value, + (void *)&cmd_flex_filter_queue, + (void *)&cmd_flex_filter_queue_id, + NULL, + }, +}; + +/* *** Filters Control *** */ + +/* *** deal with ethertype filter *** */ +struct cmd_ethertype_filter_result { + cmdline_fixed_string_t filter; + portid_t port_id; + cmdline_fixed_string_t ops; + cmdline_fixed_string_t mac; + struct rte_ether_addr mac_addr; + cmdline_fixed_string_t ethertype; + uint16_t ethertype_value; + cmdline_fixed_string_t drop; + cmdline_fixed_string_t queue; + uint16_t queue_id; +}; + +cmdline_parse_token_string_t cmd_ethertype_filter_filter = + TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result, + filter, "ethertype_filter"); +cmdline_parse_token_num_t cmd_ethertype_filter_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_ethertype_filter_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_ethertype_filter_ops = + TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result, + ops, "add#del"); +cmdline_parse_token_string_t cmd_ethertype_filter_mac = + TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result, + mac, "mac_addr#mac_ignr"); +cmdline_parse_token_etheraddr_t cmd_ethertype_filter_mac_addr = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_ethertype_filter_result, + mac_addr); +cmdline_parse_token_string_t cmd_ethertype_filter_ethertype = + TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result, + ethertype, "ethertype"); +cmdline_parse_token_num_t cmd_ethertype_filter_ethertype_value = + TOKEN_NUM_INITIALIZER(struct cmd_ethertype_filter_result, + ethertype_value, UINT16); +cmdline_parse_token_string_t cmd_ethertype_filter_drop = + TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result, + drop, "drop#fwd"); +cmdline_parse_token_string_t cmd_ethertype_filter_queue = + TOKEN_STRING_INITIALIZER(struct cmd_ethertype_filter_result, + queue, "queue"); +cmdline_parse_token_num_t cmd_ethertype_filter_queue_id = + TOKEN_NUM_INITIALIZER(struct cmd_ethertype_filter_result, + queue_id, UINT16); + +static void +cmd_ethertype_filter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_ethertype_filter_result *res = parsed_result; + struct rte_eth_ethertype_filter filter; + int ret = 0; + + ret = rte_eth_dev_filter_supported(res->port_id, + RTE_ETH_FILTER_ETHERTYPE); + if (ret < 0) { + printf("ethertype filter is not supported on port %u.\n", + res->port_id); + return; + } + + memset(&filter, 0, sizeof(filter)); + if (!strcmp(res->mac, "mac_addr")) { + filter.flags |= RTE_ETHTYPE_FLAGS_MAC; + rte_memcpy(&filter.mac_addr, &res->mac_addr, + sizeof(struct rte_ether_addr)); + } + if (!strcmp(res->drop, "drop")) + filter.flags |= RTE_ETHTYPE_FLAGS_DROP; + filter.ether_type = res->ethertype_value; + filter.queue = res->queue_id; + + if (!strcmp(res->ops, "add")) + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_ETHERTYPE, + RTE_ETH_FILTER_ADD, + &filter); + else + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_ETHERTYPE, + RTE_ETH_FILTER_DELETE, + &filter); + if (ret < 0) + printf("ethertype filter programming error: (%s)\n", + strerror(-ret)); +} + +cmdline_parse_inst_t cmd_ethertype_filter = { + .f = cmd_ethertype_filter_parsed, + .data = NULL, + .help_str = "ethertype_filter <port_id> add|del mac_addr|mac_ignr " + "<mac_addr> ethertype <value> drop|fw queue <queue_id>: " + "Add or delete an ethertype filter entry", + .tokens = { + (void *)&cmd_ethertype_filter_filter, + (void *)&cmd_ethertype_filter_port_id, + (void *)&cmd_ethertype_filter_ops, + (void *)&cmd_ethertype_filter_mac, + (void *)&cmd_ethertype_filter_mac_addr, + (void *)&cmd_ethertype_filter_ethertype, + (void *)&cmd_ethertype_filter_ethertype_value, + (void *)&cmd_ethertype_filter_drop, + (void *)&cmd_ethertype_filter_queue, + (void *)&cmd_ethertype_filter_queue_id, + NULL, + }, +}; + +/* *** deal with flow director filter *** */ +struct cmd_flow_director_result { + cmdline_fixed_string_t flow_director_filter; + portid_t port_id; + cmdline_fixed_string_t mode; + cmdline_fixed_string_t mode_value; + cmdline_fixed_string_t ops; + cmdline_fixed_string_t flow; + cmdline_fixed_string_t flow_type; + cmdline_fixed_string_t ether; + uint16_t ether_type; + cmdline_fixed_string_t src; + cmdline_ipaddr_t ip_src; + uint16_t port_src; + cmdline_fixed_string_t dst; + cmdline_ipaddr_t ip_dst; + uint16_t port_dst; + cmdline_fixed_string_t verify_tag; + uint32_t verify_tag_value; + cmdline_fixed_string_t tos; + uint8_t tos_value; + cmdline_fixed_string_t proto; + uint8_t proto_value; + cmdline_fixed_string_t ttl; + uint8_t ttl_value; + cmdline_fixed_string_t vlan; + uint16_t vlan_value; + cmdline_fixed_string_t flexbytes; + cmdline_fixed_string_t flexbytes_value; + cmdline_fixed_string_t pf_vf; + cmdline_fixed_string_t drop; + cmdline_fixed_string_t queue; + uint16_t queue_id; + cmdline_fixed_string_t fd_id; + uint32_t fd_id_value; + cmdline_fixed_string_t mac; + struct rte_ether_addr mac_addr; + cmdline_fixed_string_t tunnel; + cmdline_fixed_string_t tunnel_type; + cmdline_fixed_string_t tunnel_id; + uint32_t tunnel_id_value; + cmdline_fixed_string_t packet; + char filepath[]; +}; + +static inline int +parse_flexbytes(const char *q_arg, uint8_t *flexbytes, uint16_t max_num) +{ + char s[256]; + const char *p, *p0 = q_arg; + char *end; + unsigned long int_fld; + char *str_fld[max_num]; + int i; + unsigned size; + int ret = -1; + + p = strchr(p0, '('); + if (p == NULL) + return -1; + ++p; + p0 = strchr(p, ')'); + if (p0 == NULL) + return -1; + + size = p0 - p; + if (size >= sizeof(s)) + return -1; + + snprintf(s, sizeof(s), "%.*s", size, p); + ret = rte_strsplit(s, sizeof(s), str_fld, max_num, ','); + if (ret < 0 || ret > max_num) + return -1; + for (i = 0; i < ret; i++) { + errno = 0; + int_fld = strtoul(str_fld[i], &end, 0); + if (errno != 0 || *end != '\0' || int_fld > UINT8_MAX) + return -1; + flexbytes[i] = (uint8_t)int_fld; + } + return ret; +} + +static uint16_t +str2flowtype(char *string) +{ + uint8_t i = 0; + static const struct { + char str[32]; + uint16_t type; + } flowtype_str[] = { + {"raw", RTE_ETH_FLOW_RAW}, + {"ipv4", RTE_ETH_FLOW_IPV4}, + {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, + {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, + {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, + {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, + {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, + {"ipv6", RTE_ETH_FLOW_IPV6}, + {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, + {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, + {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, + {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, + {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, + {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, + }; + + for (i = 0; i < RTE_DIM(flowtype_str); i++) { + if (!strcmp(flowtype_str[i].str, string)) + return flowtype_str[i].type; + } + + if (isdigit(string[0]) && atoi(string) > 0 && atoi(string) < 64) + return (uint16_t)atoi(string); + + return RTE_ETH_FLOW_UNKNOWN; +} + +static enum rte_eth_fdir_tunnel_type +str2fdir_tunneltype(char *string) +{ + uint8_t i = 0; + + static const struct { + char str[32]; + enum rte_eth_fdir_tunnel_type type; + } tunneltype_str[] = { + {"NVGRE", RTE_FDIR_TUNNEL_TYPE_NVGRE}, + {"VxLAN", RTE_FDIR_TUNNEL_TYPE_VXLAN}, + }; + + for (i = 0; i < RTE_DIM(tunneltype_str); i++) { + if (!strcmp(tunneltype_str[i].str, string)) + return tunneltype_str[i].type; + } + return RTE_FDIR_TUNNEL_TYPE_UNKNOWN; +} + +#define IPV4_ADDR_TO_UINT(ip_addr, ip) \ +do { \ + if ((ip_addr).family == AF_INET) \ + (ip) = (ip_addr).addr.ipv4.s_addr; \ + else { \ + printf("invalid parameter.\n"); \ + return; \ + } \ +} while (0) + +#define IPV6_ADDR_TO_ARRAY(ip_addr, ip) \ +do { \ + if ((ip_addr).family == AF_INET6) \ + rte_memcpy(&(ip), \ + &((ip_addr).addr.ipv6), \ + sizeof(struct in6_addr)); \ + else { \ + printf("invalid parameter.\n"); \ + return; \ + } \ +} while (0) + +static void +cmd_flow_director_filter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_flow_director_result *res = parsed_result; + struct rte_eth_fdir_filter entry; + uint8_t flexbytes[RTE_ETH_FDIR_MAX_FLEXLEN]; + char *end; + unsigned long vf_id; + int ret = 0; + + ret = rte_eth_dev_filter_supported(res->port_id, RTE_ETH_FILTER_FDIR); + if (ret < 0) { + printf("flow director is not supported on port %u.\n", + res->port_id); + return; + } + memset(flexbytes, 0, sizeof(flexbytes)); + memset(&entry, 0, sizeof(struct rte_eth_fdir_filter)); + + if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + if (strcmp(res->mode_value, "MAC-VLAN")) { + printf("Please set mode to MAC-VLAN.\n"); + return; + } + } else if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { + if (strcmp(res->mode_value, "Tunnel")) { + printf("Please set mode to Tunnel.\n"); + return; + } + } else { + if (!strcmp(res->mode_value, "raw")) { +#ifdef RTE_LIBRTE_I40E_PMD + struct rte_pmd_i40e_flow_type_mapping + mapping[RTE_PMD_I40E_FLOW_TYPE_MAX]; + struct rte_pmd_i40e_pkt_template_conf conf; + uint16_t flow_type = str2flowtype(res->flow_type); + uint16_t i, port = res->port_id; + uint8_t add; + + memset(&conf, 0, sizeof(conf)); + + if (flow_type == RTE_ETH_FLOW_UNKNOWN) { + printf("Invalid flow type specified.\n"); + return; + } + ret = rte_pmd_i40e_flow_type_mapping_get(res->port_id, + mapping); + if (ret) + return; + if (mapping[flow_type].pctype == 0ULL) { + printf("Invalid flow type specified.\n"); + return; + } + for (i = 0; i < RTE_PMD_I40E_PCTYPE_MAX; i++) { + if (mapping[flow_type].pctype & (1ULL << i)) { + conf.input.pctype = i; + break; + } + } + + conf.input.packet = open_file(res->filepath, + &conf.input.length); + if (!conf.input.packet) + return; + if (!strcmp(res->drop, "drop")) + conf.action.behavior = + RTE_PMD_I40E_PKT_TEMPLATE_REJECT; + else + conf.action.behavior = + RTE_PMD_I40E_PKT_TEMPLATE_ACCEPT; + conf.action.report_status = + RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID; + conf.action.rx_queue = res->queue_id; + conf.soft_id = res->fd_id_value; + add = strcmp(res->ops, "del") ? 1 : 0; + ret = rte_pmd_i40e_flow_add_del_packet_template(port, + &conf, + add); + if (ret < 0) + printf("flow director config error: (%s)\n", + strerror(-ret)); + close_file(conf.input.packet); +#endif + return; + } else if (strcmp(res->mode_value, "IP")) { + printf("Please set mode to IP or raw.\n"); + return; + } + entry.input.flow_type = str2flowtype(res->flow_type); + } + + ret = parse_flexbytes(res->flexbytes_value, + flexbytes, + RTE_ETH_FDIR_MAX_FLEXLEN); + if (ret < 0) { + printf("error: Cannot parse flexbytes input.\n"); + return; + } + + switch (entry.input.flow_type) { + case RTE_ETH_FLOW_FRAG_IPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + entry.input.flow.ip4_flow.proto = res->proto_value; + /* fall-through */ + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + IPV4_ADDR_TO_UINT(res->ip_dst, + entry.input.flow.ip4_flow.dst_ip); + IPV4_ADDR_TO_UINT(res->ip_src, + entry.input.flow.ip4_flow.src_ip); + entry.input.flow.ip4_flow.tos = res->tos_value; + entry.input.flow.ip4_flow.ttl = res->ttl_value; + /* need convert to big endian. */ + entry.input.flow.udp4_flow.dst_port = + rte_cpu_to_be_16(res->port_dst); + entry.input.flow.udp4_flow.src_port = + rte_cpu_to_be_16(res->port_src); + break; + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + IPV4_ADDR_TO_UINT(res->ip_dst, + entry.input.flow.sctp4_flow.ip.dst_ip); + IPV4_ADDR_TO_UINT(res->ip_src, + entry.input.flow.sctp4_flow.ip.src_ip); + entry.input.flow.ip4_flow.tos = res->tos_value; + entry.input.flow.ip4_flow.ttl = res->ttl_value; + /* need convert to big endian. */ + entry.input.flow.sctp4_flow.dst_port = + rte_cpu_to_be_16(res->port_dst); + entry.input.flow.sctp4_flow.src_port = + rte_cpu_to_be_16(res->port_src); + entry.input.flow.sctp4_flow.verify_tag = + rte_cpu_to_be_32(res->verify_tag_value); + break; + case RTE_ETH_FLOW_FRAG_IPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + entry.input.flow.ipv6_flow.proto = res->proto_value; + /* fall-through */ + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + IPV6_ADDR_TO_ARRAY(res->ip_dst, + entry.input.flow.ipv6_flow.dst_ip); + IPV6_ADDR_TO_ARRAY(res->ip_src, + entry.input.flow.ipv6_flow.src_ip); + entry.input.flow.ipv6_flow.tc = res->tos_value; + entry.input.flow.ipv6_flow.hop_limits = res->ttl_value; + /* need convert to big endian. */ + entry.input.flow.udp6_flow.dst_port = + rte_cpu_to_be_16(res->port_dst); + entry.input.flow.udp6_flow.src_port = + rte_cpu_to_be_16(res->port_src); + break; + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + IPV6_ADDR_TO_ARRAY(res->ip_dst, + entry.input.flow.sctp6_flow.ip.dst_ip); + IPV6_ADDR_TO_ARRAY(res->ip_src, + entry.input.flow.sctp6_flow.ip.src_ip); + entry.input.flow.ipv6_flow.tc = res->tos_value; + entry.input.flow.ipv6_flow.hop_limits = res->ttl_value; + /* need convert to big endian. */ + entry.input.flow.sctp6_flow.dst_port = + rte_cpu_to_be_16(res->port_dst); + entry.input.flow.sctp6_flow.src_port = + rte_cpu_to_be_16(res->port_src); + entry.input.flow.sctp6_flow.verify_tag = + rte_cpu_to_be_32(res->verify_tag_value); + break; + case RTE_ETH_FLOW_L2_PAYLOAD: + entry.input.flow.l2_flow.ether_type = + rte_cpu_to_be_16(res->ether_type); + break; + default: + break; + } + + if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) + rte_memcpy(&entry.input.flow.mac_vlan_flow.mac_addr, + &res->mac_addr, + sizeof(struct rte_ether_addr)); + + if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { + rte_memcpy(&entry.input.flow.tunnel_flow.mac_addr, + &res->mac_addr, + sizeof(struct rte_ether_addr)); + entry.input.flow.tunnel_flow.tunnel_type = + str2fdir_tunneltype(res->tunnel_type); + entry.input.flow.tunnel_flow.tunnel_id = + rte_cpu_to_be_32(res->tunnel_id_value); + } + + rte_memcpy(entry.input.flow_ext.flexbytes, + flexbytes, + RTE_ETH_FDIR_MAX_FLEXLEN); + + entry.input.flow_ext.vlan_tci = rte_cpu_to_be_16(res->vlan_value); + + entry.action.flex_off = 0; /*use 0 by default */ + if (!strcmp(res->drop, "drop")) + entry.action.behavior = RTE_ETH_FDIR_REJECT; + else + entry.action.behavior = RTE_ETH_FDIR_ACCEPT; + + if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN && + fdir_conf.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { + if (!strcmp(res->pf_vf, "pf")) + entry.input.flow_ext.is_vf = 0; + else if (!strncmp(res->pf_vf, "vf", 2)) { + struct rte_eth_dev_info dev_info; + + ret = eth_dev_info_get_print_err(res->port_id, + &dev_info); + if (ret != 0) + return; + + errno = 0; + vf_id = strtoul(res->pf_vf + 2, &end, 10); + if (errno != 0 || *end != '\0' || + vf_id >= dev_info.max_vfs) { + printf("invalid parameter %s.\n", res->pf_vf); + return; + } + entry.input.flow_ext.is_vf = 1; + entry.input.flow_ext.dst_id = (uint16_t)vf_id; + } else { + printf("invalid parameter %s.\n", res->pf_vf); + return; + } + } + + /* set to report FD ID by default */ + entry.action.report_status = RTE_ETH_FDIR_REPORT_ID; + entry.action.rx_queue = res->queue_id; + entry.soft_id = res->fd_id_value; + if (!strcmp(res->ops, "add")) + ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR, + RTE_ETH_FILTER_ADD, &entry); + else if (!strcmp(res->ops, "del")) + ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR, + RTE_ETH_FILTER_DELETE, &entry); + else + ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR, + RTE_ETH_FILTER_UPDATE, &entry); + if (ret < 0) + printf("flow director programming error: (%s)\n", + strerror(-ret)); +} + +cmdline_parse_token_string_t cmd_flow_director_filter = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + flow_director_filter, "flow_director_filter"); +cmdline_parse_token_num_t cmd_flow_director_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_flow_director_ops = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + ops, "add#del#update"); +cmdline_parse_token_string_t cmd_flow_director_flow = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + flow, "flow"); +cmdline_parse_token_string_t cmd_flow_director_flow_type = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + flow_type, NULL); +cmdline_parse_token_string_t cmd_flow_director_ether = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + ether, "ether"); +cmdline_parse_token_num_t cmd_flow_director_ether_type = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + ether_type, UINT16); +cmdline_parse_token_string_t cmd_flow_director_src = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + src, "src"); +cmdline_parse_token_ipaddr_t cmd_flow_director_ip_src = + TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_result, + ip_src); +cmdline_parse_token_num_t cmd_flow_director_port_src = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + port_src, UINT16); +cmdline_parse_token_string_t cmd_flow_director_dst = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + dst, "dst"); +cmdline_parse_token_ipaddr_t cmd_flow_director_ip_dst = + TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_result, + ip_dst); +cmdline_parse_token_num_t cmd_flow_director_port_dst = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + port_dst, UINT16); +cmdline_parse_token_string_t cmd_flow_director_verify_tag = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + verify_tag, "verify_tag"); +cmdline_parse_token_num_t cmd_flow_director_verify_tag_value = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + verify_tag_value, UINT32); +cmdline_parse_token_string_t cmd_flow_director_tos = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + tos, "tos"); +cmdline_parse_token_num_t cmd_flow_director_tos_value = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + tos_value, UINT8); +cmdline_parse_token_string_t cmd_flow_director_proto = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + proto, "proto"); +cmdline_parse_token_num_t cmd_flow_director_proto_value = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + proto_value, UINT8); +cmdline_parse_token_string_t cmd_flow_director_ttl = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + ttl, "ttl"); +cmdline_parse_token_num_t cmd_flow_director_ttl_value = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + ttl_value, UINT8); +cmdline_parse_token_string_t cmd_flow_director_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + vlan, "vlan"); +cmdline_parse_token_num_t cmd_flow_director_vlan_value = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + vlan_value, UINT16); +cmdline_parse_token_string_t cmd_flow_director_flexbytes = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + flexbytes, "flexbytes"); +cmdline_parse_token_string_t cmd_flow_director_flexbytes_value = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + flexbytes_value, NULL); +cmdline_parse_token_string_t cmd_flow_director_drop = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + drop, "drop#fwd"); +cmdline_parse_token_string_t cmd_flow_director_pf_vf = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + pf_vf, NULL); +cmdline_parse_token_string_t cmd_flow_director_queue = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + queue, "queue"); +cmdline_parse_token_num_t cmd_flow_director_queue_id = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + queue_id, UINT16); +cmdline_parse_token_string_t cmd_flow_director_fd_id = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + fd_id, "fd_id"); +cmdline_parse_token_num_t cmd_flow_director_fd_id_value = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + fd_id_value, UINT32); + +cmdline_parse_token_string_t cmd_flow_director_mode = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + mode, "mode"); +cmdline_parse_token_string_t cmd_flow_director_mode_ip = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + mode_value, "IP"); +cmdline_parse_token_string_t cmd_flow_director_mode_mac_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + mode_value, "MAC-VLAN"); +cmdline_parse_token_string_t cmd_flow_director_mode_tunnel = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + mode_value, "Tunnel"); +cmdline_parse_token_string_t cmd_flow_director_mode_raw = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + mode_value, "raw"); +cmdline_parse_token_string_t cmd_flow_director_mac = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + mac, "mac"); +cmdline_parse_token_etheraddr_t cmd_flow_director_mac_addr = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_flow_director_result, + mac_addr); +cmdline_parse_token_string_t cmd_flow_director_tunnel = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + tunnel, "tunnel"); +cmdline_parse_token_string_t cmd_flow_director_tunnel_type = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + tunnel_type, "NVGRE#VxLAN"); +cmdline_parse_token_string_t cmd_flow_director_tunnel_id = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + tunnel_id, "tunnel-id"); +cmdline_parse_token_num_t cmd_flow_director_tunnel_id_value = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_result, + tunnel_id_value, UINT32); +cmdline_parse_token_string_t cmd_flow_director_packet = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + packet, "packet"); +cmdline_parse_token_string_t cmd_flow_director_filepath = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_result, + filepath, NULL); + +cmdline_parse_inst_t cmd_add_del_ip_flow_director = { + .f = cmd_flow_director_filter_parsed, + .data = NULL, + .help_str = "flow_director_filter <port_id> mode IP add|del|update flow" + " ipv4-other|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|" + "ipv6-other|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|" + "l2_payload src <src_ip> dst <dst_ip> tos <tos_value> " + "proto <proto_value> ttl <ttl_value> vlan <vlan_value> " + "flexbytes <flexbyte_values> drop|fw <pf_vf> queue <queue_id> " + "fd_id <fd_id_value>: " + "Add or delete an ip flow director entry on NIC", + .tokens = { + (void *)&cmd_flow_director_filter, + (void *)&cmd_flow_director_port_id, + (void *)&cmd_flow_director_mode, + (void *)&cmd_flow_director_mode_ip, + (void *)&cmd_flow_director_ops, + (void *)&cmd_flow_director_flow, + (void *)&cmd_flow_director_flow_type, + (void *)&cmd_flow_director_src, + (void *)&cmd_flow_director_ip_src, + (void *)&cmd_flow_director_dst, + (void *)&cmd_flow_director_ip_dst, + (void *)&cmd_flow_director_tos, + (void *)&cmd_flow_director_tos_value, + (void *)&cmd_flow_director_proto, + (void *)&cmd_flow_director_proto_value, + (void *)&cmd_flow_director_ttl, + (void *)&cmd_flow_director_ttl_value, + (void *)&cmd_flow_director_vlan, + (void *)&cmd_flow_director_vlan_value, + (void *)&cmd_flow_director_flexbytes, + (void *)&cmd_flow_director_flexbytes_value, + (void *)&cmd_flow_director_drop, + (void *)&cmd_flow_director_pf_vf, + (void *)&cmd_flow_director_queue, + (void *)&cmd_flow_director_queue_id, + (void *)&cmd_flow_director_fd_id, + (void *)&cmd_flow_director_fd_id_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_add_del_udp_flow_director = { + .f = cmd_flow_director_filter_parsed, + .data = NULL, + .help_str = "flow_director_filter ... : Add or delete an udp/tcp flow " + "director entry on NIC", + .tokens = { + (void *)&cmd_flow_director_filter, + (void *)&cmd_flow_director_port_id, + (void *)&cmd_flow_director_mode, + (void *)&cmd_flow_director_mode_ip, + (void *)&cmd_flow_director_ops, + (void *)&cmd_flow_director_flow, + (void *)&cmd_flow_director_flow_type, + (void *)&cmd_flow_director_src, + (void *)&cmd_flow_director_ip_src, + (void *)&cmd_flow_director_port_src, + (void *)&cmd_flow_director_dst, + (void *)&cmd_flow_director_ip_dst, + (void *)&cmd_flow_director_port_dst, + (void *)&cmd_flow_director_tos, + (void *)&cmd_flow_director_tos_value, + (void *)&cmd_flow_director_ttl, + (void *)&cmd_flow_director_ttl_value, + (void *)&cmd_flow_director_vlan, + (void *)&cmd_flow_director_vlan_value, + (void *)&cmd_flow_director_flexbytes, + (void *)&cmd_flow_director_flexbytes_value, + (void *)&cmd_flow_director_drop, + (void *)&cmd_flow_director_pf_vf, + (void *)&cmd_flow_director_queue, + (void *)&cmd_flow_director_queue_id, + (void *)&cmd_flow_director_fd_id, + (void *)&cmd_flow_director_fd_id_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_add_del_sctp_flow_director = { + .f = cmd_flow_director_filter_parsed, + .data = NULL, + .help_str = "flow_director_filter ... : Add or delete a sctp flow " + "director entry on NIC", + .tokens = { + (void *)&cmd_flow_director_filter, + (void *)&cmd_flow_director_port_id, + (void *)&cmd_flow_director_mode, + (void *)&cmd_flow_director_mode_ip, + (void *)&cmd_flow_director_ops, + (void *)&cmd_flow_director_flow, + (void *)&cmd_flow_director_flow_type, + (void *)&cmd_flow_director_src, + (void *)&cmd_flow_director_ip_src, + (void *)&cmd_flow_director_port_src, + (void *)&cmd_flow_director_dst, + (void *)&cmd_flow_director_ip_dst, + (void *)&cmd_flow_director_port_dst, + (void *)&cmd_flow_director_verify_tag, + (void *)&cmd_flow_director_verify_tag_value, + (void *)&cmd_flow_director_tos, + (void *)&cmd_flow_director_tos_value, + (void *)&cmd_flow_director_ttl, + (void *)&cmd_flow_director_ttl_value, + (void *)&cmd_flow_director_vlan, + (void *)&cmd_flow_director_vlan_value, + (void *)&cmd_flow_director_flexbytes, + (void *)&cmd_flow_director_flexbytes_value, + (void *)&cmd_flow_director_drop, + (void *)&cmd_flow_director_pf_vf, + (void *)&cmd_flow_director_queue, + (void *)&cmd_flow_director_queue_id, + (void *)&cmd_flow_director_fd_id, + (void *)&cmd_flow_director_fd_id_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_add_del_l2_flow_director = { + .f = cmd_flow_director_filter_parsed, + .data = NULL, + .help_str = "flow_director_filter ... : Add or delete a L2 flow " + "director entry on NIC", + .tokens = { + (void *)&cmd_flow_director_filter, + (void *)&cmd_flow_director_port_id, + (void *)&cmd_flow_director_mode, + (void *)&cmd_flow_director_mode_ip, + (void *)&cmd_flow_director_ops, + (void *)&cmd_flow_director_flow, + (void *)&cmd_flow_director_flow_type, + (void *)&cmd_flow_director_ether, + (void *)&cmd_flow_director_ether_type, + (void *)&cmd_flow_director_flexbytes, + (void *)&cmd_flow_director_flexbytes_value, + (void *)&cmd_flow_director_drop, + (void *)&cmd_flow_director_pf_vf, + (void *)&cmd_flow_director_queue, + (void *)&cmd_flow_director_queue_id, + (void *)&cmd_flow_director_fd_id, + (void *)&cmd_flow_director_fd_id_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_add_del_mac_vlan_flow_director = { + .f = cmd_flow_director_filter_parsed, + .data = NULL, + .help_str = "flow_director_filter ... : Add or delete a MAC VLAN flow " + "director entry on NIC", + .tokens = { + (void *)&cmd_flow_director_filter, + (void *)&cmd_flow_director_port_id, + (void *)&cmd_flow_director_mode, + (void *)&cmd_flow_director_mode_mac_vlan, + (void *)&cmd_flow_director_ops, + (void *)&cmd_flow_director_mac, + (void *)&cmd_flow_director_mac_addr, + (void *)&cmd_flow_director_vlan, + (void *)&cmd_flow_director_vlan_value, + (void *)&cmd_flow_director_flexbytes, + (void *)&cmd_flow_director_flexbytes_value, + (void *)&cmd_flow_director_drop, + (void *)&cmd_flow_director_queue, + (void *)&cmd_flow_director_queue_id, + (void *)&cmd_flow_director_fd_id, + (void *)&cmd_flow_director_fd_id_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_add_del_tunnel_flow_director = { + .f = cmd_flow_director_filter_parsed, + .data = NULL, + .help_str = "flow_director_filter ... : Add or delete a tunnel flow " + "director entry on NIC", + .tokens = { + (void *)&cmd_flow_director_filter, + (void *)&cmd_flow_director_port_id, + (void *)&cmd_flow_director_mode, + (void *)&cmd_flow_director_mode_tunnel, + (void *)&cmd_flow_director_ops, + (void *)&cmd_flow_director_mac, + (void *)&cmd_flow_director_mac_addr, + (void *)&cmd_flow_director_vlan, + (void *)&cmd_flow_director_vlan_value, + (void *)&cmd_flow_director_tunnel, + (void *)&cmd_flow_director_tunnel_type, + (void *)&cmd_flow_director_tunnel_id, + (void *)&cmd_flow_director_tunnel_id_value, + (void *)&cmd_flow_director_flexbytes, + (void *)&cmd_flow_director_flexbytes_value, + (void *)&cmd_flow_director_drop, + (void *)&cmd_flow_director_queue, + (void *)&cmd_flow_director_queue_id, + (void *)&cmd_flow_director_fd_id, + (void *)&cmd_flow_director_fd_id_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_add_del_raw_flow_director = { + .f = cmd_flow_director_filter_parsed, + .data = NULL, + .help_str = "flow_director_filter ... : Add or delete a raw flow " + "director entry on NIC", + .tokens = { + (void *)&cmd_flow_director_filter, + (void *)&cmd_flow_director_port_id, + (void *)&cmd_flow_director_mode, + (void *)&cmd_flow_director_mode_raw, + (void *)&cmd_flow_director_ops, + (void *)&cmd_flow_director_flow, + (void *)&cmd_flow_director_flow_type, + (void *)&cmd_flow_director_drop, + (void *)&cmd_flow_director_queue, + (void *)&cmd_flow_director_queue_id, + (void *)&cmd_flow_director_fd_id, + (void *)&cmd_flow_director_fd_id_value, + (void *)&cmd_flow_director_packet, + (void *)&cmd_flow_director_filepath, + NULL, + }, +}; + +struct cmd_flush_flow_director_result { + cmdline_fixed_string_t flush_flow_director; + portid_t port_id; +}; + +cmdline_parse_token_string_t cmd_flush_flow_director_flush = + TOKEN_STRING_INITIALIZER(struct cmd_flush_flow_director_result, + flush_flow_director, "flush_flow_director"); +cmdline_parse_token_num_t cmd_flush_flow_director_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_flush_flow_director_result, + port_id, UINT16); + +static void +cmd_flush_flow_director_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_flow_director_result *res = parsed_result; + int ret = 0; + + ret = rte_eth_dev_filter_supported(res->port_id, RTE_ETH_FILTER_FDIR); + if (ret < 0) { + printf("flow director is not supported on port %u.\n", + res->port_id); + return; + } + + ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR, + RTE_ETH_FILTER_FLUSH, NULL); + if (ret < 0) + printf("flow director table flushing error: (%s)\n", + strerror(-ret)); +} + +cmdline_parse_inst_t cmd_flush_flow_director = { + .f = cmd_flush_flow_director_parsed, + .data = NULL, + .help_str = "flush_flow_director <port_id>: " + "Flush all flow director entries of a device on NIC", + .tokens = { + (void *)&cmd_flush_flow_director_flush, + (void *)&cmd_flush_flow_director_port_id, + NULL, + }, +}; + +/* *** deal with flow director mask *** */ +struct cmd_flow_director_mask_result { + cmdline_fixed_string_t flow_director_mask; + portid_t port_id; + cmdline_fixed_string_t mode; + cmdline_fixed_string_t mode_value; + cmdline_fixed_string_t vlan; + uint16_t vlan_mask; + cmdline_fixed_string_t src_mask; + cmdline_ipaddr_t ipv4_src; + cmdline_ipaddr_t ipv6_src; + uint16_t port_src; + cmdline_fixed_string_t dst_mask; + cmdline_ipaddr_t ipv4_dst; + cmdline_ipaddr_t ipv6_dst; + uint16_t port_dst; + cmdline_fixed_string_t mac; + uint8_t mac_addr_byte_mask; + cmdline_fixed_string_t tunnel_id; + uint32_t tunnel_id_mask; + cmdline_fixed_string_t tunnel_type; + uint8_t tunnel_type_mask; +}; + +static void +cmd_flow_director_mask_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_flow_director_mask_result *res = parsed_result; + struct rte_eth_fdir_masks *mask; + struct rte_port *port; + + port = &ports[res->port_id]; + /** Check if the port is not started **/ + if (port->port_status != RTE_PORT_STOPPED) { + printf("Please stop port %d first\n", res->port_id); + return; + } + + mask = &port->dev_conf.fdir_conf.mask; + + if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + if (strcmp(res->mode_value, "MAC-VLAN")) { + printf("Please set mode to MAC-VLAN.\n"); + return; + } + + mask->vlan_tci_mask = rte_cpu_to_be_16(res->vlan_mask); + } else if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { + if (strcmp(res->mode_value, "Tunnel")) { + printf("Please set mode to Tunnel.\n"); + return; + } + + mask->vlan_tci_mask = rte_cpu_to_be_16(res->vlan_mask); + mask->mac_addr_byte_mask = res->mac_addr_byte_mask; + mask->tunnel_id_mask = rte_cpu_to_be_32(res->tunnel_id_mask); + mask->tunnel_type_mask = res->tunnel_type_mask; + } else { + if (strcmp(res->mode_value, "IP")) { + printf("Please set mode to IP.\n"); + return; + } + + mask->vlan_tci_mask = rte_cpu_to_be_16(res->vlan_mask); + IPV4_ADDR_TO_UINT(res->ipv4_src, mask->ipv4_mask.src_ip); + IPV4_ADDR_TO_UINT(res->ipv4_dst, mask->ipv4_mask.dst_ip); + IPV6_ADDR_TO_ARRAY(res->ipv6_src, mask->ipv6_mask.src_ip); + IPV6_ADDR_TO_ARRAY(res->ipv6_dst, mask->ipv6_mask.dst_ip); + mask->src_port_mask = rte_cpu_to_be_16(res->port_src); + mask->dst_port_mask = rte_cpu_to_be_16(res->port_dst); + } + + cmd_reconfig_device_queue(res->port_id, 1, 1); +} + +cmdline_parse_token_string_t cmd_flow_director_mask = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result, + flow_director_mask, "flow_director_mask"); +cmdline_parse_token_num_t cmd_flow_director_mask_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_flow_director_mask_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result, + vlan, "vlan"); +cmdline_parse_token_num_t cmd_flow_director_mask_vlan_value = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result, + vlan_mask, UINT16); +cmdline_parse_token_string_t cmd_flow_director_mask_src = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result, + src_mask, "src_mask"); +cmdline_parse_token_ipaddr_t cmd_flow_director_mask_ipv4_src = + TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_mask_result, + ipv4_src); +cmdline_parse_token_ipaddr_t cmd_flow_director_mask_ipv6_src = + TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_mask_result, + ipv6_src); +cmdline_parse_token_num_t cmd_flow_director_mask_port_src = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result, + port_src, UINT16); +cmdline_parse_token_string_t cmd_flow_director_mask_dst = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result, + dst_mask, "dst_mask"); +cmdline_parse_token_ipaddr_t cmd_flow_director_mask_ipv4_dst = + TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_mask_result, + ipv4_dst); +cmdline_parse_token_ipaddr_t cmd_flow_director_mask_ipv6_dst = + TOKEN_IPADDR_INITIALIZER(struct cmd_flow_director_mask_result, + ipv6_dst); +cmdline_parse_token_num_t cmd_flow_director_mask_port_dst = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result, + port_dst, UINT16); + +cmdline_parse_token_string_t cmd_flow_director_mask_mode = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result, + mode, "mode"); +cmdline_parse_token_string_t cmd_flow_director_mask_mode_ip = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result, + mode_value, "IP"); +cmdline_parse_token_string_t cmd_flow_director_mask_mode_mac_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result, + mode_value, "MAC-VLAN"); +cmdline_parse_token_string_t cmd_flow_director_mask_mode_tunnel = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result, + mode_value, "Tunnel"); +cmdline_parse_token_string_t cmd_flow_director_mask_mac = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result, + mac, "mac"); +cmdline_parse_token_num_t cmd_flow_director_mask_mac_value = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result, + mac_addr_byte_mask, UINT8); +cmdline_parse_token_string_t cmd_flow_director_mask_tunnel_type = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result, + tunnel_type, "tunnel-type"); +cmdline_parse_token_num_t cmd_flow_director_mask_tunnel_type_value = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result, + tunnel_type_mask, UINT8); +cmdline_parse_token_string_t cmd_flow_director_mask_tunnel_id = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_mask_result, + tunnel_id, "tunnel-id"); +cmdline_parse_token_num_t cmd_flow_director_mask_tunnel_id_value = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_mask_result, + tunnel_id_mask, UINT32); + +cmdline_parse_inst_t cmd_set_flow_director_ip_mask = { + .f = cmd_flow_director_mask_parsed, + .data = NULL, + .help_str = "flow_director_mask ... : " + "Set IP mode flow director's mask on NIC", + .tokens = { + (void *)&cmd_flow_director_mask, + (void *)&cmd_flow_director_mask_port_id, + (void *)&cmd_flow_director_mask_mode, + (void *)&cmd_flow_director_mask_mode_ip, + (void *)&cmd_flow_director_mask_vlan, + (void *)&cmd_flow_director_mask_vlan_value, + (void *)&cmd_flow_director_mask_src, + (void *)&cmd_flow_director_mask_ipv4_src, + (void *)&cmd_flow_director_mask_ipv6_src, + (void *)&cmd_flow_director_mask_port_src, + (void *)&cmd_flow_director_mask_dst, + (void *)&cmd_flow_director_mask_ipv4_dst, + (void *)&cmd_flow_director_mask_ipv6_dst, + (void *)&cmd_flow_director_mask_port_dst, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_flow_director_mac_vlan_mask = { + .f = cmd_flow_director_mask_parsed, + .data = NULL, + .help_str = "flow_director_mask ... : Set MAC VLAN mode " + "flow director's mask on NIC", + .tokens = { + (void *)&cmd_flow_director_mask, + (void *)&cmd_flow_director_mask_port_id, + (void *)&cmd_flow_director_mask_mode, + (void *)&cmd_flow_director_mask_mode_mac_vlan, + (void *)&cmd_flow_director_mask_vlan, + (void *)&cmd_flow_director_mask_vlan_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_flow_director_tunnel_mask = { + .f = cmd_flow_director_mask_parsed, + .data = NULL, + .help_str = "flow_director_mask ... : Set tunnel mode " + "flow director's mask on NIC", + .tokens = { + (void *)&cmd_flow_director_mask, + (void *)&cmd_flow_director_mask_port_id, + (void *)&cmd_flow_director_mask_mode, + (void *)&cmd_flow_director_mask_mode_tunnel, + (void *)&cmd_flow_director_mask_vlan, + (void *)&cmd_flow_director_mask_vlan_value, + (void *)&cmd_flow_director_mask_mac, + (void *)&cmd_flow_director_mask_mac_value, + (void *)&cmd_flow_director_mask_tunnel_type, + (void *)&cmd_flow_director_mask_tunnel_type_value, + (void *)&cmd_flow_director_mask_tunnel_id, + (void *)&cmd_flow_director_mask_tunnel_id_value, + NULL, + }, +}; + +/* *** deal with flow director mask on flexible payload *** */ +struct cmd_flow_director_flex_mask_result { + cmdline_fixed_string_t flow_director_flexmask; + portid_t port_id; + cmdline_fixed_string_t flow; + cmdline_fixed_string_t flow_type; + cmdline_fixed_string_t mask; +}; + +static void +cmd_flow_director_flex_mask_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_flow_director_flex_mask_result *res = parsed_result; + struct rte_eth_fdir_info fdir_info; + struct rte_eth_fdir_flex_mask flex_mask; + struct rte_port *port; + uint64_t flow_type_mask; + uint16_t i; + int ret; + + port = &ports[res->port_id]; + /** Check if the port is not started **/ + if (port->port_status != RTE_PORT_STOPPED) { + printf("Please stop port %d first\n", res->port_id); + return; + } + + memset(&flex_mask, 0, sizeof(struct rte_eth_fdir_flex_mask)); + ret = parse_flexbytes(res->mask, + flex_mask.mask, + RTE_ETH_FDIR_MAX_FLEXLEN); + if (ret < 0) { + printf("error: Cannot parse mask input.\n"); + return; + } + + memset(&fdir_info, 0, sizeof(fdir_info)); + ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR, + RTE_ETH_FILTER_INFO, &fdir_info); + if (ret < 0) { + printf("Cannot get FDir filter info\n"); + return; + } + + if (!strcmp(res->flow_type, "none")) { + /* means don't specify the flow type */ + flex_mask.flow_type = RTE_ETH_FLOW_UNKNOWN; + for (i = 0; i < RTE_ETH_FLOW_MAX; i++) + memset(&port->dev_conf.fdir_conf.flex_conf.flex_mask[i], + 0, sizeof(struct rte_eth_fdir_flex_mask)); + port->dev_conf.fdir_conf.flex_conf.nb_flexmasks = 1; + rte_memcpy(&port->dev_conf.fdir_conf.flex_conf.flex_mask[0], + &flex_mask, + sizeof(struct rte_eth_fdir_flex_mask)); + cmd_reconfig_device_queue(res->port_id, 1, 1); + return; + } + flow_type_mask = fdir_info.flow_types_mask[0]; + if (!strcmp(res->flow_type, "all")) { + if (!flow_type_mask) { + printf("No flow type supported\n"); + return; + } + for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { + if (flow_type_mask & (1ULL << i)) { + flex_mask.flow_type = i; + fdir_set_flex_mask(res->port_id, &flex_mask); + } + } + cmd_reconfig_device_queue(res->port_id, 1, 1); + return; + } + flex_mask.flow_type = str2flowtype(res->flow_type); + if (!(flow_type_mask & (1ULL << flex_mask.flow_type))) { + printf("Flow type %s not supported on port %d\n", + res->flow_type, res->port_id); + return; + } + fdir_set_flex_mask(res->port_id, &flex_mask); + cmd_reconfig_device_queue(res->port_id, 1, 1); +} + +cmdline_parse_token_string_t cmd_flow_director_flexmask = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flex_mask_result, + flow_director_flexmask, + "flow_director_flex_mask"); +cmdline_parse_token_num_t cmd_flow_director_flexmask_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_flex_mask_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_flow_director_flexmask_flow = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flex_mask_result, + flow, "flow"); +cmdline_parse_token_string_t cmd_flow_director_flexmask_flow_type = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flex_mask_result, + flow_type, "none#ipv4-other#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#" + "ipv6-other#ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#l2_payload#all"); +cmdline_parse_token_string_t cmd_flow_director_flexmask_mask = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flex_mask_result, + mask, NULL); + +cmdline_parse_inst_t cmd_set_flow_director_flex_mask = { + .f = cmd_flow_director_flex_mask_parsed, + .data = NULL, + .help_str = "flow_director_flex_mask ... : " + "Set flow director's flex mask on NIC", + .tokens = { + (void *)&cmd_flow_director_flexmask, + (void *)&cmd_flow_director_flexmask_port_id, + (void *)&cmd_flow_director_flexmask_flow, + (void *)&cmd_flow_director_flexmask_flow_type, + (void *)&cmd_flow_director_flexmask_mask, + NULL, + }, +}; + +/* *** deal with flow director flexible payload configuration *** */ +struct cmd_flow_director_flexpayload_result { + cmdline_fixed_string_t flow_director_flexpayload; + portid_t port_id; + cmdline_fixed_string_t payload_layer; + cmdline_fixed_string_t payload_cfg; +}; + +static inline int +parse_offsets(const char *q_arg, uint16_t *offsets, uint16_t max_num) +{ + char s[256]; + const char *p, *p0 = q_arg; + char *end; + unsigned long int_fld; + char *str_fld[max_num]; + int i; + unsigned size; + int ret = -1; + + p = strchr(p0, '('); + if (p == NULL) + return -1; + ++p; + p0 = strchr(p, ')'); + if (p0 == NULL) + return -1; + + size = p0 - p; + if (size >= sizeof(s)) + return -1; + + snprintf(s, sizeof(s), "%.*s", size, p); + ret = rte_strsplit(s, sizeof(s), str_fld, max_num, ','); + if (ret < 0 || ret > max_num) + return -1; + for (i = 0; i < ret; i++) { + errno = 0; + int_fld = strtoul(str_fld[i], &end, 0); + if (errno != 0 || *end != '\0' || int_fld > UINT16_MAX) + return -1; + offsets[i] = (uint16_t)int_fld; + } + return ret; +} + +static void +cmd_flow_director_flxpld_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_flow_director_flexpayload_result *res = parsed_result; + struct rte_eth_flex_payload_cfg flex_cfg; + struct rte_port *port; + int ret = 0; + + port = &ports[res->port_id]; + /** Check if the port is not started **/ + if (port->port_status != RTE_PORT_STOPPED) { + printf("Please stop port %d first\n", res->port_id); + return; + } + + memset(&flex_cfg, 0, sizeof(struct rte_eth_flex_payload_cfg)); + + if (!strcmp(res->payload_layer, "raw")) + flex_cfg.type = RTE_ETH_RAW_PAYLOAD; + else if (!strcmp(res->payload_layer, "l2")) + flex_cfg.type = RTE_ETH_L2_PAYLOAD; + else if (!strcmp(res->payload_layer, "l3")) + flex_cfg.type = RTE_ETH_L3_PAYLOAD; + else if (!strcmp(res->payload_layer, "l4")) + flex_cfg.type = RTE_ETH_L4_PAYLOAD; + + ret = parse_offsets(res->payload_cfg, flex_cfg.src_offset, + RTE_ETH_FDIR_MAX_FLEXLEN); + if (ret < 0) { + printf("error: Cannot parse flex payload input.\n"); + return; + } + + fdir_set_flex_payload(res->port_id, &flex_cfg); + cmd_reconfig_device_queue(res->port_id, 1, 1); +} + +cmdline_parse_token_string_t cmd_flow_director_flexpayload = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flexpayload_result, + flow_director_flexpayload, + "flow_director_flex_payload"); +cmdline_parse_token_num_t cmd_flow_director_flexpayload_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_flow_director_flexpayload_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_flow_director_flexpayload_payload_layer = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flexpayload_result, + payload_layer, "raw#l2#l3#l4"); +cmdline_parse_token_string_t cmd_flow_director_flexpayload_payload_cfg = + TOKEN_STRING_INITIALIZER(struct cmd_flow_director_flexpayload_result, + payload_cfg, NULL); + +cmdline_parse_inst_t cmd_set_flow_director_flex_payload = { + .f = cmd_flow_director_flxpld_parsed, + .data = NULL, + .help_str = "flow_director_flexpayload ... : " + "Set flow director's flex payload on NIC", + .tokens = { + (void *)&cmd_flow_director_flexpayload, + (void *)&cmd_flow_director_flexpayload_port_id, + (void *)&cmd_flow_director_flexpayload_payload_layer, + (void *)&cmd_flow_director_flexpayload_payload_cfg, + NULL, + }, +}; + +/* Generic flow interface command. */ +extern cmdline_parse_inst_t cmd_flow; + +/* *** Classification Filters Control *** */ +/* *** Get symmetric hash enable per port *** */ +struct cmd_get_sym_hash_ena_per_port_result { + cmdline_fixed_string_t get_sym_hash_ena_per_port; + portid_t port_id; +}; + +static void +cmd_get_sym_hash_per_port_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_get_sym_hash_ena_per_port_result *res = parsed_result; + struct rte_eth_hash_filter_info info; + int ret; + + if (rte_eth_dev_filter_supported(res->port_id, + RTE_ETH_FILTER_HASH) < 0) { + printf("RTE_ETH_FILTER_HASH not supported on port: %d\n", + res->port_id); + return; + } + + memset(&info, 0, sizeof(info)); + info.info_type = RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT; + ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH, + RTE_ETH_FILTER_GET, &info); + + if (ret < 0) { + printf("Cannot get symmetric hash enable per port " + "on port %u\n", res->port_id); + return; + } + + printf("Symmetric hash is %s on port %u\n", info.info.enable ? + "enabled" : "disabled", res->port_id); +} + +cmdline_parse_token_string_t cmd_get_sym_hash_ena_per_port_all = + TOKEN_STRING_INITIALIZER(struct cmd_get_sym_hash_ena_per_port_result, + get_sym_hash_ena_per_port, "get_sym_hash_ena_per_port"); +cmdline_parse_token_num_t cmd_get_sym_hash_ena_per_port_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_get_sym_hash_ena_per_port_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_get_sym_hash_ena_per_port = { + .f = cmd_get_sym_hash_per_port_parsed, + .data = NULL, + .help_str = "get_sym_hash_ena_per_port <port_id>", + .tokens = { + (void *)&cmd_get_sym_hash_ena_per_port_all, + (void *)&cmd_get_sym_hash_ena_per_port_port_id, + NULL, + }, +}; + +/* *** Set symmetric hash enable per port *** */ +struct cmd_set_sym_hash_ena_per_port_result { + cmdline_fixed_string_t set_sym_hash_ena_per_port; + cmdline_fixed_string_t enable; + portid_t port_id; +}; + +static void +cmd_set_sym_hash_per_port_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_sym_hash_ena_per_port_result *res = parsed_result; + struct rte_eth_hash_filter_info info; + int ret; + + if (rte_eth_dev_filter_supported(res->port_id, + RTE_ETH_FILTER_HASH) < 0) { + printf("RTE_ETH_FILTER_HASH not supported on port: %d\n", + res->port_id); + return; + } + + memset(&info, 0, sizeof(info)); + info.info_type = RTE_ETH_HASH_FILTER_SYM_HASH_ENA_PER_PORT; + if (!strcmp(res->enable, "enable")) + info.info.enable = 1; + ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH, + RTE_ETH_FILTER_SET, &info); + if (ret < 0) { + printf("Cannot set symmetric hash enable per port on " + "port %u\n", res->port_id); + return; + } + printf("Symmetric hash has been set to %s on port %u\n", + res->enable, res->port_id); +} + +cmdline_parse_token_string_t cmd_set_sym_hash_ena_per_port_all = + TOKEN_STRING_INITIALIZER(struct cmd_set_sym_hash_ena_per_port_result, + set_sym_hash_ena_per_port, "set_sym_hash_ena_per_port"); +cmdline_parse_token_num_t cmd_set_sym_hash_ena_per_port_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_set_sym_hash_ena_per_port_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_set_sym_hash_ena_per_port_enable = + TOKEN_STRING_INITIALIZER(struct cmd_set_sym_hash_ena_per_port_result, + enable, "enable#disable"); + +cmdline_parse_inst_t cmd_set_sym_hash_ena_per_port = { + .f = cmd_set_sym_hash_per_port_parsed, + .data = NULL, + .help_str = "set_sym_hash_ena_per_port <port_id> enable|disable", + .tokens = { + (void *)&cmd_set_sym_hash_ena_per_port_all, + (void *)&cmd_set_sym_hash_ena_per_port_port_id, + (void *)&cmd_set_sym_hash_ena_per_port_enable, + NULL, + }, +}; + +/* Get global config of hash function */ +struct cmd_get_hash_global_config_result { + cmdline_fixed_string_t get_hash_global_config; + portid_t port_id; +}; + +static char * +flowtype_to_str(uint16_t ftype) +{ + uint16_t i; + static struct { + char str[16]; + uint16_t ftype; + } ftype_table[] = { + {"ipv4", RTE_ETH_FLOW_IPV4}, + {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, + {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, + {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, + {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, + {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, + {"ipv6", RTE_ETH_FLOW_IPV6}, + {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, + {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, + {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, + {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, + {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, + {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, + {"port", RTE_ETH_FLOW_PORT}, + {"vxlan", RTE_ETH_FLOW_VXLAN}, + {"geneve", RTE_ETH_FLOW_GENEVE}, + {"nvgre", RTE_ETH_FLOW_NVGRE}, + {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, + }; + + for (i = 0; i < RTE_DIM(ftype_table); i++) { + if (ftype_table[i].ftype == ftype) + return ftype_table[i].str; + } + + return NULL; +} + +static void +cmd_get_hash_global_config_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_get_hash_global_config_result *res = parsed_result; + struct rte_eth_hash_filter_info info; + uint32_t idx, offset; + uint16_t i; + char *str; + int ret; + + if (rte_eth_dev_filter_supported(res->port_id, + RTE_ETH_FILTER_HASH) < 0) { + printf("RTE_ETH_FILTER_HASH not supported on port %d\n", + res->port_id); + return; + } + + memset(&info, 0, sizeof(info)); + info.info_type = RTE_ETH_HASH_FILTER_GLOBAL_CONFIG; + ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH, + RTE_ETH_FILTER_GET, &info); + if (ret < 0) { + printf("Cannot get hash global configurations by port %d\n", + res->port_id); + return; + } + + switch (info.info.global_conf.hash_func) { + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + printf("Hash function is Toeplitz\n"); + break; + case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: + printf("Hash function is Simple XOR\n"); + break; + case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: + printf("Hash function is Symmetric Toeplitz\n"); + break; + default: + printf("Unknown hash function\n"); + break; + } + + for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { + idx = i / UINT64_BIT; + offset = i % UINT64_BIT; + if (!(info.info.global_conf.valid_bit_mask[idx] & + (1ULL << offset))) + continue; + str = flowtype_to_str(i); + if (!str) + continue; + printf("Symmetric hash is %s globally for flow type %s " + "by port %d\n", + ((info.info.global_conf.sym_hash_enable_mask[idx] & + (1ULL << offset)) ? "enabled" : "disabled"), str, + res->port_id); + } +} + +cmdline_parse_token_string_t cmd_get_hash_global_config_all = + TOKEN_STRING_INITIALIZER(struct cmd_get_hash_global_config_result, + get_hash_global_config, "get_hash_global_config"); +cmdline_parse_token_num_t cmd_get_hash_global_config_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_get_hash_global_config_result, + port_id, UINT16); + +cmdline_parse_inst_t cmd_get_hash_global_config = { + .f = cmd_get_hash_global_config_parsed, + .data = NULL, + .help_str = "get_hash_global_config <port_id>", + .tokens = { + (void *)&cmd_get_hash_global_config_all, + (void *)&cmd_get_hash_global_config_port_id, + NULL, + }, +}; + +/* Set global config of hash function */ +struct cmd_set_hash_global_config_result { + cmdline_fixed_string_t set_hash_global_config; + portid_t port_id; + cmdline_fixed_string_t hash_func; + cmdline_fixed_string_t flow_type; + cmdline_fixed_string_t enable; +}; + +static void +cmd_set_hash_global_config_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_hash_global_config_result *res = parsed_result; + struct rte_eth_hash_filter_info info; + uint32_t ftype, idx, offset; + int ret; + + if (rte_eth_dev_filter_supported(res->port_id, + RTE_ETH_FILTER_HASH) < 0) { + printf("RTE_ETH_FILTER_HASH not supported on port %d\n", + res->port_id); + return; + } + memset(&info, 0, sizeof(info)); + info.info_type = RTE_ETH_HASH_FILTER_GLOBAL_CONFIG; + if (!strcmp(res->hash_func, "toeplitz")) + info.info.global_conf.hash_func = + RTE_ETH_HASH_FUNCTION_TOEPLITZ; + else if (!strcmp(res->hash_func, "simple_xor")) + info.info.global_conf.hash_func = + RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; + else if (!strcmp(res->hash_func, "symmetric_toeplitz")) + info.info.global_conf.hash_func = + RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ; + else if (!strcmp(res->hash_func, "default")) + info.info.global_conf.hash_func = + RTE_ETH_HASH_FUNCTION_DEFAULT; + + ftype = str2flowtype(res->flow_type); + idx = ftype / UINT64_BIT; + offset = ftype % UINT64_BIT; + info.info.global_conf.valid_bit_mask[idx] |= (1ULL << offset); + if (!strcmp(res->enable, "enable")) + info.info.global_conf.sym_hash_enable_mask[idx] |= + (1ULL << offset); + ret = rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH, + RTE_ETH_FILTER_SET, &info); + if (ret < 0) + printf("Cannot set global hash configurations by port %d\n", + res->port_id); + else + printf("Global hash configurations have been set " + "successfully by port %d\n", res->port_id); +} + +cmdline_parse_token_string_t cmd_set_hash_global_config_all = + TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result, + set_hash_global_config, "set_hash_global_config"); +cmdline_parse_token_num_t cmd_set_hash_global_config_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_set_hash_global_config_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_set_hash_global_config_hash_func = + TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result, + hash_func, "toeplitz#simple_xor#symmetric_toeplitz#default"); +cmdline_parse_token_string_t cmd_set_hash_global_config_flow_type = + TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result, + flow_type, + "ipv4#ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#ipv4-other#ipv6#" + "ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#ipv6-other#l2_payload"); +cmdline_parse_token_string_t cmd_set_hash_global_config_enable = + TOKEN_STRING_INITIALIZER(struct cmd_set_hash_global_config_result, + enable, "enable#disable"); + +cmdline_parse_inst_t cmd_set_hash_global_config = { + .f = cmd_set_hash_global_config_parsed, + .data = NULL, + .help_str = "set_hash_global_config <port_id> " + "toeplitz|simple_xor|symmetric_toeplitz|default " + "ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|" + "ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|" + "l2_payload enable|disable", + .tokens = { + (void *)&cmd_set_hash_global_config_all, + (void *)&cmd_set_hash_global_config_port_id, + (void *)&cmd_set_hash_global_config_hash_func, + (void *)&cmd_set_hash_global_config_flow_type, + (void *)&cmd_set_hash_global_config_enable, + NULL, + }, +}; + +/* Set hash input set */ +struct cmd_set_hash_input_set_result { + cmdline_fixed_string_t set_hash_input_set; + portid_t port_id; + cmdline_fixed_string_t flow_type; + cmdline_fixed_string_t inset_field; + cmdline_fixed_string_t select; +}; + +static enum rte_eth_input_set_field +str2inset(char *string) +{ + uint16_t i; + + static const struct { + char str[32]; + enum rte_eth_input_set_field inset; + } inset_table[] = { + {"ethertype", RTE_ETH_INPUT_SET_L2_ETHERTYPE}, + {"ovlan", RTE_ETH_INPUT_SET_L2_OUTER_VLAN}, + {"ivlan", RTE_ETH_INPUT_SET_L2_INNER_VLAN}, + {"src-ipv4", RTE_ETH_INPUT_SET_L3_SRC_IP4}, + {"dst-ipv4", RTE_ETH_INPUT_SET_L3_DST_IP4}, + {"ipv4-tos", RTE_ETH_INPUT_SET_L3_IP4_TOS}, + {"ipv4-proto", RTE_ETH_INPUT_SET_L3_IP4_PROTO}, + {"ipv4-ttl", RTE_ETH_INPUT_SET_L3_IP4_TTL}, + {"src-ipv6", RTE_ETH_INPUT_SET_L3_SRC_IP6}, + {"dst-ipv6", RTE_ETH_INPUT_SET_L3_DST_IP6}, + {"ipv6-tc", RTE_ETH_INPUT_SET_L3_IP6_TC}, + {"ipv6-next-header", RTE_ETH_INPUT_SET_L3_IP6_NEXT_HEADER}, + {"ipv6-hop-limits", RTE_ETH_INPUT_SET_L3_IP6_HOP_LIMITS}, + {"udp-src-port", RTE_ETH_INPUT_SET_L4_UDP_SRC_PORT}, + {"udp-dst-port", RTE_ETH_INPUT_SET_L4_UDP_DST_PORT}, + {"tcp-src-port", RTE_ETH_INPUT_SET_L4_TCP_SRC_PORT}, + {"tcp-dst-port", RTE_ETH_INPUT_SET_L4_TCP_DST_PORT}, + {"sctp-src-port", RTE_ETH_INPUT_SET_L4_SCTP_SRC_PORT}, + {"sctp-dst-port", RTE_ETH_INPUT_SET_L4_SCTP_DST_PORT}, + {"sctp-veri-tag", RTE_ETH_INPUT_SET_L4_SCTP_VERIFICATION_TAG}, + {"udp-key", RTE_ETH_INPUT_SET_TUNNEL_L4_UDP_KEY}, + {"gre-key", RTE_ETH_INPUT_SET_TUNNEL_GRE_KEY}, + {"fld-1st", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_1ST_WORD}, + {"fld-2nd", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_2ND_WORD}, + {"fld-3rd", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_3RD_WORD}, + {"fld-4th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_4TH_WORD}, + {"fld-5th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_5TH_WORD}, + {"fld-6th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_6TH_WORD}, + {"fld-7th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_7TH_WORD}, + {"fld-8th", RTE_ETH_INPUT_SET_FLEX_PAYLOAD_8TH_WORD}, + {"none", RTE_ETH_INPUT_SET_NONE}, + }; + + for (i = 0; i < RTE_DIM(inset_table); i++) { + if (!strcmp(string, inset_table[i].str)) + return inset_table[i].inset; + } + + return RTE_ETH_INPUT_SET_UNKNOWN; +} + +static void +cmd_set_hash_input_set_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_hash_input_set_result *res = parsed_result; + struct rte_eth_hash_filter_info info; + + memset(&info, 0, sizeof(info)); + info.info_type = RTE_ETH_HASH_FILTER_INPUT_SET_SELECT; + info.info.input_set_conf.flow_type = str2flowtype(res->flow_type); + info.info.input_set_conf.field[0] = str2inset(res->inset_field); + info.info.input_set_conf.inset_size = 1; + if (!strcmp(res->select, "select")) + info.info.input_set_conf.op = RTE_ETH_INPUT_SET_SELECT; + else if (!strcmp(res->select, "add")) + info.info.input_set_conf.op = RTE_ETH_INPUT_SET_ADD; + rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_HASH, + RTE_ETH_FILTER_SET, &info); +} + +cmdline_parse_token_string_t cmd_set_hash_input_set_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result, + set_hash_input_set, "set_hash_input_set"); +cmdline_parse_token_num_t cmd_set_hash_input_set_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_set_hash_input_set_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_set_hash_input_set_flow_type = + TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result, + flow_type, NULL); +cmdline_parse_token_string_t cmd_set_hash_input_set_field = + TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result, + inset_field, + "ovlan#ivlan#src-ipv4#dst-ipv4#src-ipv6#dst-ipv6#" + "ipv4-tos#ipv4-proto#ipv6-tc#ipv6-next-header#udp-src-port#" + "udp-dst-port#tcp-src-port#tcp-dst-port#sctp-src-port#" + "sctp-dst-port#sctp-veri-tag#udp-key#gre-key#fld-1st#" + "fld-2nd#fld-3rd#fld-4th#fld-5th#fld-6th#fld-7th#" + "fld-8th#none"); +cmdline_parse_token_string_t cmd_set_hash_input_set_select = + TOKEN_STRING_INITIALIZER(struct cmd_set_hash_input_set_result, + select, "select#add"); + +cmdline_parse_inst_t cmd_set_hash_input_set = { + .f = cmd_set_hash_input_set_parsed, + .data = NULL, + .help_str = "set_hash_input_set <port_id> " + "ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|" + "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload|<flowtype_id> " + "ovlan|ivlan|src-ipv4|dst-ipv4|src-ipv6|dst-ipv6|ipv4-tos|ipv4-proto|" + "ipv6-tc|ipv6-next-header|udp-src-port|udp-dst-port|tcp-src-port|" + "tcp-dst-port|sctp-src-port|sctp-dst-port|sctp-veri-tag|udp-key|" + "gre-key|fld-1st|fld-2nd|fld-3rd|fld-4th|fld-5th|fld-6th|" + "fld-7th|fld-8th|none select|add", + .tokens = { + (void *)&cmd_set_hash_input_set_cmd, + (void *)&cmd_set_hash_input_set_port_id, + (void *)&cmd_set_hash_input_set_flow_type, + (void *)&cmd_set_hash_input_set_field, + (void *)&cmd_set_hash_input_set_select, + NULL, + }, +}; + +/* Set flow director input set */ +struct cmd_set_fdir_input_set_result { + cmdline_fixed_string_t set_fdir_input_set; + portid_t port_id; + cmdline_fixed_string_t flow_type; + cmdline_fixed_string_t inset_field; + cmdline_fixed_string_t select; +}; + +static void +cmd_set_fdir_input_set_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_fdir_input_set_result *res = parsed_result; + struct rte_eth_fdir_filter_info info; + + memset(&info, 0, sizeof(info)); + info.info_type = RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT; + info.info.input_set_conf.flow_type = str2flowtype(res->flow_type); + info.info.input_set_conf.field[0] = str2inset(res->inset_field); + info.info.input_set_conf.inset_size = 1; + if (!strcmp(res->select, "select")) + info.info.input_set_conf.op = RTE_ETH_INPUT_SET_SELECT; + else if (!strcmp(res->select, "add")) + info.info.input_set_conf.op = RTE_ETH_INPUT_SET_ADD; + rte_eth_dev_filter_ctrl(res->port_id, RTE_ETH_FILTER_FDIR, + RTE_ETH_FILTER_SET, &info); +} + +cmdline_parse_token_string_t cmd_set_fdir_input_set_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_set_fdir_input_set_result, + set_fdir_input_set, "set_fdir_input_set"); +cmdline_parse_token_num_t cmd_set_fdir_input_set_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_set_fdir_input_set_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_set_fdir_input_set_flow_type = + TOKEN_STRING_INITIALIZER(struct cmd_set_fdir_input_set_result, + flow_type, + "ipv4-frag#ipv4-tcp#ipv4-udp#ipv4-sctp#ipv4-other#" + "ipv6-frag#ipv6-tcp#ipv6-udp#ipv6-sctp#ipv6-other#l2_payload"); +cmdline_parse_token_string_t cmd_set_fdir_input_set_field = + TOKEN_STRING_INITIALIZER(struct cmd_set_fdir_input_set_result, + inset_field, + "ivlan#ethertype#src-ipv4#dst-ipv4#src-ipv6#dst-ipv6#" + "ipv4-tos#ipv4-proto#ipv4-ttl#ipv6-tc#ipv6-next-header#" + "ipv6-hop-limits#udp-src-port#udp-dst-port#" + "tcp-src-port#tcp-dst-port#sctp-src-port#sctp-dst-port#" + "sctp-veri-tag#none"); +cmdline_parse_token_string_t cmd_set_fdir_input_set_select = + TOKEN_STRING_INITIALIZER(struct cmd_set_fdir_input_set_result, + select, "select#add"); + +cmdline_parse_inst_t cmd_set_fdir_input_set = { + .f = cmd_set_fdir_input_set_parsed, + .data = NULL, + .help_str = "set_fdir_input_set <port_id> " + "ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|" + "ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload " + "ivlan|ethertype|src-ipv4|dst-ipv4|src-ipv6|dst-ipv6|" + "ipv4-tos|ipv4-proto|ipv4-ttl|ipv6-tc|ipv6-next-header|" + "ipv6-hop-limits|udp-src-port|udp-dst-port|" + "tcp-src-port|tcp-dst-port|sctp-src-port|sctp-dst-port|" + "sctp-veri-tag|none select|add", + .tokens = { + (void *)&cmd_set_fdir_input_set_cmd, + (void *)&cmd_set_fdir_input_set_port_id, + (void *)&cmd_set_fdir_input_set_flow_type, + (void *)&cmd_set_fdir_input_set_field, + (void *)&cmd_set_fdir_input_set_select, + NULL, + }, +}; + +/* *** ADD/REMOVE A MULTICAST MAC ADDRESS TO/FROM A PORT *** */ +struct cmd_mcast_addr_result { + cmdline_fixed_string_t mcast_addr_cmd; + cmdline_fixed_string_t what; + uint16_t port_num; + struct rte_ether_addr mc_addr; +}; + +static void cmd_mcast_addr_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_mcast_addr_result *res = parsed_result; + + if (!rte_is_multicast_ether_addr(&res->mc_addr)) { + printf("Invalid multicast addr %02X:%02X:%02X:%02X:%02X:%02X\n", + res->mc_addr.addr_bytes[0], res->mc_addr.addr_bytes[1], + res->mc_addr.addr_bytes[2], res->mc_addr.addr_bytes[3], + res->mc_addr.addr_bytes[4], res->mc_addr.addr_bytes[5]); + return; + } + if (strcmp(res->what, "add") == 0) + mcast_addr_add(res->port_num, &res->mc_addr); + else + mcast_addr_remove(res->port_num, &res->mc_addr); +} + +cmdline_parse_token_string_t cmd_mcast_addr_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_mcast_addr_result, + mcast_addr_cmd, "mcast_addr"); +cmdline_parse_token_string_t cmd_mcast_addr_what = + TOKEN_STRING_INITIALIZER(struct cmd_mcast_addr_result, what, + "add#remove"); +cmdline_parse_token_num_t cmd_mcast_addr_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_mcast_addr_result, port_num, UINT16); +cmdline_parse_token_etheraddr_t cmd_mcast_addr_addr = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_mac_addr_result, address); + +cmdline_parse_inst_t cmd_mcast_addr = { + .f = cmd_mcast_addr_parsed, + .data = (void *)0, + .help_str = "mcast_addr add|remove <port_id> <mcast_addr>: " + "Add/Remove multicast MAC address on port_id", + .tokens = { + (void *)&cmd_mcast_addr_cmd, + (void *)&cmd_mcast_addr_what, + (void *)&cmd_mcast_addr_portnum, + (void *)&cmd_mcast_addr_addr, + NULL, + }, +}; + +/* l2 tunnel config + * only support E-tag now. + */ + +/* Ether type config */ +struct cmd_config_l2_tunnel_eth_type_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + cmdline_fixed_string_t all; + portid_t id; + cmdline_fixed_string_t l2_tunnel; + cmdline_fixed_string_t l2_tunnel_type; + cmdline_fixed_string_t eth_type; + uint16_t eth_type_val; +}; + +cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_port = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_eth_type_result, + port, "port"); +cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_config = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_eth_type_result, + config, "config"); +cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_all_str = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_eth_type_result, + all, "all"); +cmdline_parse_token_num_t cmd_config_l2_tunnel_eth_type_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_l2_tunnel_eth_type_result, + id, UINT16); +cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_l2_tunnel = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_eth_type_result, + l2_tunnel, "l2-tunnel"); +cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_l2_tunnel_type = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_eth_type_result, + l2_tunnel_type, "E-tag"); +cmdline_parse_token_string_t cmd_config_l2_tunnel_eth_type_eth_type = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_eth_type_result, + eth_type, "ether-type"); +cmdline_parse_token_num_t cmd_config_l2_tunnel_eth_type_eth_type_val = + TOKEN_NUM_INITIALIZER + (struct cmd_config_l2_tunnel_eth_type_result, + eth_type_val, UINT16); + +static enum rte_eth_tunnel_type +str2fdir_l2_tunnel_type(char *string) +{ + uint32_t i = 0; + + static const struct { + char str[32]; + enum rte_eth_tunnel_type type; + } l2_tunnel_type_str[] = { + {"E-tag", RTE_L2_TUNNEL_TYPE_E_TAG}, + }; + + for (i = 0; i < RTE_DIM(l2_tunnel_type_str); i++) { + if (!strcmp(l2_tunnel_type_str[i].str, string)) + return l2_tunnel_type_str[i].type; + } + return RTE_TUNNEL_TYPE_NONE; +} + +/* ether type config for all ports */ +static void +cmd_config_l2_tunnel_eth_type_all_parsed + (void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_l2_tunnel_eth_type_result *res = parsed_result; + struct rte_eth_l2_tunnel_conf entry; + portid_t pid; + + entry.l2_tunnel_type = str2fdir_l2_tunnel_type(res->l2_tunnel_type); + entry.ether_type = res->eth_type_val; + + RTE_ETH_FOREACH_DEV(pid) { + rte_eth_dev_l2_tunnel_eth_type_conf(pid, &entry); + } +} + +cmdline_parse_inst_t cmd_config_l2_tunnel_eth_type_all = { + .f = cmd_config_l2_tunnel_eth_type_all_parsed, + .data = NULL, + .help_str = "port config all l2-tunnel E-tag ether-type <value>", + .tokens = { + (void *)&cmd_config_l2_tunnel_eth_type_port, + (void *)&cmd_config_l2_tunnel_eth_type_config, + (void *)&cmd_config_l2_tunnel_eth_type_all_str, + (void *)&cmd_config_l2_tunnel_eth_type_l2_tunnel, + (void *)&cmd_config_l2_tunnel_eth_type_l2_tunnel_type, + (void *)&cmd_config_l2_tunnel_eth_type_eth_type, + (void *)&cmd_config_l2_tunnel_eth_type_eth_type_val, + NULL, + }, +}; + +/* ether type config for a specific port */ +static void +cmd_config_l2_tunnel_eth_type_specific_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_l2_tunnel_eth_type_result *res = + parsed_result; + struct rte_eth_l2_tunnel_conf entry; + + if (port_id_is_invalid(res->id, ENABLED_WARN)) + return; + + entry.l2_tunnel_type = str2fdir_l2_tunnel_type(res->l2_tunnel_type); + entry.ether_type = res->eth_type_val; + + rte_eth_dev_l2_tunnel_eth_type_conf(res->id, &entry); +} + +cmdline_parse_inst_t cmd_config_l2_tunnel_eth_type_specific = { + .f = cmd_config_l2_tunnel_eth_type_specific_parsed, + .data = NULL, + .help_str = "port config <port_id> l2-tunnel E-tag ether-type <value>", + .tokens = { + (void *)&cmd_config_l2_tunnel_eth_type_port, + (void *)&cmd_config_l2_tunnel_eth_type_config, + (void *)&cmd_config_l2_tunnel_eth_type_id, + (void *)&cmd_config_l2_tunnel_eth_type_l2_tunnel, + (void *)&cmd_config_l2_tunnel_eth_type_l2_tunnel_type, + (void *)&cmd_config_l2_tunnel_eth_type_eth_type, + (void *)&cmd_config_l2_tunnel_eth_type_eth_type_val, + NULL, + }, +}; + +/* Enable/disable l2 tunnel */ +struct cmd_config_l2_tunnel_en_dis_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + cmdline_fixed_string_t all; + portid_t id; + cmdline_fixed_string_t l2_tunnel; + cmdline_fixed_string_t l2_tunnel_type; + cmdline_fixed_string_t en_dis; +}; + +cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_port = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_en_dis_result, + port, "port"); +cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_config = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_en_dis_result, + config, "config"); +cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_all_str = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_en_dis_result, + all, "all"); +cmdline_parse_token_num_t cmd_config_l2_tunnel_en_dis_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_l2_tunnel_en_dis_result, + id, UINT16); +cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_l2_tunnel = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_en_dis_result, + l2_tunnel, "l2-tunnel"); +cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_l2_tunnel_type = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_en_dis_result, + l2_tunnel_type, "E-tag"); +cmdline_parse_token_string_t cmd_config_l2_tunnel_en_dis_en_dis = + TOKEN_STRING_INITIALIZER + (struct cmd_config_l2_tunnel_en_dis_result, + en_dis, "enable#disable"); + +/* enable/disable l2 tunnel for all ports */ +static void +cmd_config_l2_tunnel_en_dis_all_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_l2_tunnel_en_dis_result *res = parsed_result; + struct rte_eth_l2_tunnel_conf entry; + portid_t pid; + uint8_t en; + + entry.l2_tunnel_type = str2fdir_l2_tunnel_type(res->l2_tunnel_type); + + if (!strcmp("enable", res->en_dis)) + en = 1; + else + en = 0; + + RTE_ETH_FOREACH_DEV(pid) { + rte_eth_dev_l2_tunnel_offload_set(pid, + &entry, + ETH_L2_TUNNEL_ENABLE_MASK, + en); + } +} + +cmdline_parse_inst_t cmd_config_l2_tunnel_en_dis_all = { + .f = cmd_config_l2_tunnel_en_dis_all_parsed, + .data = NULL, + .help_str = "port config all l2-tunnel E-tag enable|disable", + .tokens = { + (void *)&cmd_config_l2_tunnel_en_dis_port, + (void *)&cmd_config_l2_tunnel_en_dis_config, + (void *)&cmd_config_l2_tunnel_en_dis_all_str, + (void *)&cmd_config_l2_tunnel_en_dis_l2_tunnel, + (void *)&cmd_config_l2_tunnel_en_dis_l2_tunnel_type, + (void *)&cmd_config_l2_tunnel_en_dis_en_dis, + NULL, + }, +}; + +/* enable/disable l2 tunnel for a port */ +static void +cmd_config_l2_tunnel_en_dis_specific_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_l2_tunnel_en_dis_result *res = + parsed_result; + struct rte_eth_l2_tunnel_conf entry; + + if (port_id_is_invalid(res->id, ENABLED_WARN)) + return; + + entry.l2_tunnel_type = str2fdir_l2_tunnel_type(res->l2_tunnel_type); + + if (!strcmp("enable", res->en_dis)) + rte_eth_dev_l2_tunnel_offload_set(res->id, + &entry, + ETH_L2_TUNNEL_ENABLE_MASK, + 1); + else + rte_eth_dev_l2_tunnel_offload_set(res->id, + &entry, + ETH_L2_TUNNEL_ENABLE_MASK, + 0); +} + +cmdline_parse_inst_t cmd_config_l2_tunnel_en_dis_specific = { + .f = cmd_config_l2_tunnel_en_dis_specific_parsed, + .data = NULL, + .help_str = "port config <port_id> l2-tunnel E-tag enable|disable", + .tokens = { + (void *)&cmd_config_l2_tunnel_en_dis_port, + (void *)&cmd_config_l2_tunnel_en_dis_config, + (void *)&cmd_config_l2_tunnel_en_dis_id, + (void *)&cmd_config_l2_tunnel_en_dis_l2_tunnel, + (void *)&cmd_config_l2_tunnel_en_dis_l2_tunnel_type, + (void *)&cmd_config_l2_tunnel_en_dis_en_dis, + NULL, + }, +}; + +/* E-tag configuration */ + +/* Common result structure for all E-tag configuration */ +struct cmd_config_e_tag_result { + cmdline_fixed_string_t e_tag; + cmdline_fixed_string_t set; + cmdline_fixed_string_t insertion; + cmdline_fixed_string_t stripping; + cmdline_fixed_string_t forwarding; + cmdline_fixed_string_t filter; + cmdline_fixed_string_t add; + cmdline_fixed_string_t del; + cmdline_fixed_string_t on; + cmdline_fixed_string_t off; + cmdline_fixed_string_t on_off; + cmdline_fixed_string_t port_tag_id; + uint32_t port_tag_id_val; + cmdline_fixed_string_t e_tag_id; + uint16_t e_tag_id_val; + cmdline_fixed_string_t dst_pool; + uint8_t dst_pool_val; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t vf; + uint8_t vf_id; +}; + +/* Common CLI fields for all E-tag configuration */ +cmdline_parse_token_string_t cmd_config_e_tag_e_tag = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + e_tag, "E-tag"); +cmdline_parse_token_string_t cmd_config_e_tag_set = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + set, "set"); +cmdline_parse_token_string_t cmd_config_e_tag_insertion = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + insertion, "insertion"); +cmdline_parse_token_string_t cmd_config_e_tag_stripping = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + stripping, "stripping"); +cmdline_parse_token_string_t cmd_config_e_tag_forwarding = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + forwarding, "forwarding"); +cmdline_parse_token_string_t cmd_config_e_tag_filter = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + filter, "filter"); +cmdline_parse_token_string_t cmd_config_e_tag_add = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + add, "add"); +cmdline_parse_token_string_t cmd_config_e_tag_del = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + del, "del"); +cmdline_parse_token_string_t cmd_config_e_tag_on = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + on, "on"); +cmdline_parse_token_string_t cmd_config_e_tag_off = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + off, "off"); +cmdline_parse_token_string_t cmd_config_e_tag_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + on_off, "on#off"); +cmdline_parse_token_string_t cmd_config_e_tag_port_tag_id = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + port_tag_id, "port-tag-id"); +cmdline_parse_token_num_t cmd_config_e_tag_port_tag_id_val = + TOKEN_NUM_INITIALIZER + (struct cmd_config_e_tag_result, + port_tag_id_val, UINT32); +cmdline_parse_token_string_t cmd_config_e_tag_e_tag_id = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + e_tag_id, "e-tag-id"); +cmdline_parse_token_num_t cmd_config_e_tag_e_tag_id_val = + TOKEN_NUM_INITIALIZER + (struct cmd_config_e_tag_result, + e_tag_id_val, UINT16); +cmdline_parse_token_string_t cmd_config_e_tag_dst_pool = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + dst_pool, "dst-pool"); +cmdline_parse_token_num_t cmd_config_e_tag_dst_pool_val = + TOKEN_NUM_INITIALIZER + (struct cmd_config_e_tag_result, + dst_pool_val, UINT8); +cmdline_parse_token_string_t cmd_config_e_tag_port = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + port, "port"); +cmdline_parse_token_num_t cmd_config_e_tag_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_e_tag_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_e_tag_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_config_e_tag_result, + vf, "vf"); +cmdline_parse_token_num_t cmd_config_e_tag_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_e_tag_result, + vf_id, UINT8); + +/* E-tag insertion configuration */ +static void +cmd_config_e_tag_insertion_en_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_e_tag_result *res = + parsed_result; + struct rte_eth_l2_tunnel_conf entry; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG; + entry.tunnel_id = res->port_tag_id_val; + entry.vf_id = res->vf_id; + rte_eth_dev_l2_tunnel_offload_set(res->port_id, + &entry, + ETH_L2_TUNNEL_INSERTION_MASK, + 1); +} + +static void +cmd_config_e_tag_insertion_dis_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_e_tag_result *res = + parsed_result; + struct rte_eth_l2_tunnel_conf entry; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG; + entry.vf_id = res->vf_id; + + rte_eth_dev_l2_tunnel_offload_set(res->port_id, + &entry, + ETH_L2_TUNNEL_INSERTION_MASK, + 0); +} + +cmdline_parse_inst_t cmd_config_e_tag_insertion_en = { + .f = cmd_config_e_tag_insertion_en_parsed, + .data = NULL, + .help_str = "E-tag ... : E-tag insertion enable", + .tokens = { + (void *)&cmd_config_e_tag_e_tag, + (void *)&cmd_config_e_tag_set, + (void *)&cmd_config_e_tag_insertion, + (void *)&cmd_config_e_tag_on, + (void *)&cmd_config_e_tag_port_tag_id, + (void *)&cmd_config_e_tag_port_tag_id_val, + (void *)&cmd_config_e_tag_port, + (void *)&cmd_config_e_tag_port_id, + (void *)&cmd_config_e_tag_vf, + (void *)&cmd_config_e_tag_vf_id, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_config_e_tag_insertion_dis = { + .f = cmd_config_e_tag_insertion_dis_parsed, + .data = NULL, + .help_str = "E-tag ... : E-tag insertion disable", + .tokens = { + (void *)&cmd_config_e_tag_e_tag, + (void *)&cmd_config_e_tag_set, + (void *)&cmd_config_e_tag_insertion, + (void *)&cmd_config_e_tag_off, + (void *)&cmd_config_e_tag_port, + (void *)&cmd_config_e_tag_port_id, + (void *)&cmd_config_e_tag_vf, + (void *)&cmd_config_e_tag_vf_id, + NULL, + }, +}; + +/* E-tag stripping configuration */ +static void +cmd_config_e_tag_stripping_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_e_tag_result *res = + parsed_result; + struct rte_eth_l2_tunnel_conf entry; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG; + + if (!strcmp(res->on_off, "on")) + rte_eth_dev_l2_tunnel_offload_set + (res->port_id, + &entry, + ETH_L2_TUNNEL_STRIPPING_MASK, + 1); + else + rte_eth_dev_l2_tunnel_offload_set + (res->port_id, + &entry, + ETH_L2_TUNNEL_STRIPPING_MASK, + 0); +} + +cmdline_parse_inst_t cmd_config_e_tag_stripping_en_dis = { + .f = cmd_config_e_tag_stripping_parsed, + .data = NULL, + .help_str = "E-tag ... : E-tag stripping enable/disable", + .tokens = { + (void *)&cmd_config_e_tag_e_tag, + (void *)&cmd_config_e_tag_set, + (void *)&cmd_config_e_tag_stripping, + (void *)&cmd_config_e_tag_on_off, + (void *)&cmd_config_e_tag_port, + (void *)&cmd_config_e_tag_port_id, + NULL, + }, +}; + +/* E-tag forwarding configuration */ +static void +cmd_config_e_tag_forwarding_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_e_tag_result *res = parsed_result; + struct rte_eth_l2_tunnel_conf entry; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG; + + if (!strcmp(res->on_off, "on")) + rte_eth_dev_l2_tunnel_offload_set + (res->port_id, + &entry, + ETH_L2_TUNNEL_FORWARDING_MASK, + 1); + else + rte_eth_dev_l2_tunnel_offload_set + (res->port_id, + &entry, + ETH_L2_TUNNEL_FORWARDING_MASK, + 0); +} + +cmdline_parse_inst_t cmd_config_e_tag_forwarding_en_dis = { + .f = cmd_config_e_tag_forwarding_parsed, + .data = NULL, + .help_str = "E-tag ... : E-tag forwarding enable/disable", + .tokens = { + (void *)&cmd_config_e_tag_e_tag, + (void *)&cmd_config_e_tag_set, + (void *)&cmd_config_e_tag_forwarding, + (void *)&cmd_config_e_tag_on_off, + (void *)&cmd_config_e_tag_port, + (void *)&cmd_config_e_tag_port_id, + NULL, + }, +}; + +/* E-tag filter configuration */ +static void +cmd_config_e_tag_filter_add_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_e_tag_result *res = parsed_result; + struct rte_eth_l2_tunnel_conf entry; + int ret = 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + if (res->e_tag_id_val > 0x3fff) { + printf("e-tag-id must be equal or less than 0x3fff.\n"); + return; + } + + ret = rte_eth_dev_filter_supported(res->port_id, + RTE_ETH_FILTER_L2_TUNNEL); + if (ret < 0) { + printf("E-tag filter is not supported on port %u.\n", + res->port_id); + return; + } + + entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG; + entry.tunnel_id = res->e_tag_id_val; + entry.pool = res->dst_pool_val; + + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_L2_TUNNEL, + RTE_ETH_FILTER_ADD, + &entry); + if (ret < 0) + printf("E-tag filter programming error: (%s)\n", + strerror(-ret)); +} + +cmdline_parse_inst_t cmd_config_e_tag_filter_add = { + .f = cmd_config_e_tag_filter_add_parsed, + .data = NULL, + .help_str = "E-tag ... : E-tag filter add", + .tokens = { + (void *)&cmd_config_e_tag_e_tag, + (void *)&cmd_config_e_tag_set, + (void *)&cmd_config_e_tag_filter, + (void *)&cmd_config_e_tag_add, + (void *)&cmd_config_e_tag_e_tag_id, + (void *)&cmd_config_e_tag_e_tag_id_val, + (void *)&cmd_config_e_tag_dst_pool, + (void *)&cmd_config_e_tag_dst_pool_val, + (void *)&cmd_config_e_tag_port, + (void *)&cmd_config_e_tag_port_id, + NULL, + }, +}; + +static void +cmd_config_e_tag_filter_del_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_e_tag_result *res = parsed_result; + struct rte_eth_l2_tunnel_conf entry; + int ret = 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + if (res->e_tag_id_val > 0x3fff) { + printf("e-tag-id must be less than 0x3fff.\n"); + return; + } + + ret = rte_eth_dev_filter_supported(res->port_id, + RTE_ETH_FILTER_L2_TUNNEL); + if (ret < 0) { + printf("E-tag filter is not supported on port %u.\n", + res->port_id); + return; + } + + entry.l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG; + entry.tunnel_id = res->e_tag_id_val; + + ret = rte_eth_dev_filter_ctrl(res->port_id, + RTE_ETH_FILTER_L2_TUNNEL, + RTE_ETH_FILTER_DELETE, + &entry); + if (ret < 0) + printf("E-tag filter programming error: (%s)\n", + strerror(-ret)); +} + +cmdline_parse_inst_t cmd_config_e_tag_filter_del = { + .f = cmd_config_e_tag_filter_del_parsed, + .data = NULL, + .help_str = "E-tag ... : E-tag filter delete", + .tokens = { + (void *)&cmd_config_e_tag_e_tag, + (void *)&cmd_config_e_tag_set, + (void *)&cmd_config_e_tag_filter, + (void *)&cmd_config_e_tag_del, + (void *)&cmd_config_e_tag_e_tag_id, + (void *)&cmd_config_e_tag_e_tag_id_val, + (void *)&cmd_config_e_tag_port, + (void *)&cmd_config_e_tag_port_id, + NULL, + }, +}; + +/* vf vlan anti spoof configuration */ + +/* Common result structure for vf vlan anti spoof */ +struct cmd_vf_vlan_anti_spoof_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t vlan; + cmdline_fixed_string_t antispoof; + portid_t port_id; + uint32_t vf_id; + cmdline_fixed_string_t on_off; +}; + +/* Common CLI fields for vf vlan anti spoof enable disable */ +cmdline_parse_token_string_t cmd_vf_vlan_anti_spoof_set = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_anti_spoof_result, + set, "set"); +cmdline_parse_token_string_t cmd_vf_vlan_anti_spoof_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_anti_spoof_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_vf_vlan_anti_spoof_vlan = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_anti_spoof_result, + vlan, "vlan"); +cmdline_parse_token_string_t cmd_vf_vlan_anti_spoof_antispoof = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_anti_spoof_result, + antispoof, "antispoof"); +cmdline_parse_token_num_t cmd_vf_vlan_anti_spoof_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_vlan_anti_spoof_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_vf_vlan_anti_spoof_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_vlan_anti_spoof_result, + vf_id, UINT32); +cmdline_parse_token_string_t cmd_vf_vlan_anti_spoof_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_anti_spoof_result, + on_off, "on#off"); + +static void +cmd_set_vf_vlan_anti_spoof_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_vlan_anti_spoof_result *res = parsed_result; + int ret = -ENOTSUP; + + __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_IXGBE_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_ixgbe_set_vf_vlan_anti_spoof(res->port_id, + res->vf_id, is_on); +#endif +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_set_vf_vlan_anti_spoof(res->port_id, + res->vf_id, is_on); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_set_vf_vlan_anti_spoof(res->port_id, + res->vf_id, is_on); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d\n", res->vf_id); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_vf_vlan_anti_spoof = { + .f = cmd_set_vf_vlan_anti_spoof_parsed, + .data = NULL, + .help_str = "set vf vlan antispoof <port_id> <vf_id> on|off", + .tokens = { + (void *)&cmd_vf_vlan_anti_spoof_set, + (void *)&cmd_vf_vlan_anti_spoof_vf, + (void *)&cmd_vf_vlan_anti_spoof_vlan, + (void *)&cmd_vf_vlan_anti_spoof_antispoof, + (void *)&cmd_vf_vlan_anti_spoof_port_id, + (void *)&cmd_vf_vlan_anti_spoof_vf_id, + (void *)&cmd_vf_vlan_anti_spoof_on_off, + NULL, + }, +}; + +/* vf mac anti spoof configuration */ + +/* Common result structure for vf mac anti spoof */ +struct cmd_vf_mac_anti_spoof_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t mac; + cmdline_fixed_string_t antispoof; + portid_t port_id; + uint32_t vf_id; + cmdline_fixed_string_t on_off; +}; + +/* Common CLI fields for vf mac anti spoof enable disable */ +cmdline_parse_token_string_t cmd_vf_mac_anti_spoof_set = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_mac_anti_spoof_result, + set, "set"); +cmdline_parse_token_string_t cmd_vf_mac_anti_spoof_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_mac_anti_spoof_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_vf_mac_anti_spoof_mac = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_mac_anti_spoof_result, + mac, "mac"); +cmdline_parse_token_string_t cmd_vf_mac_anti_spoof_antispoof = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_mac_anti_spoof_result, + antispoof, "antispoof"); +cmdline_parse_token_num_t cmd_vf_mac_anti_spoof_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_mac_anti_spoof_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_vf_mac_anti_spoof_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_mac_anti_spoof_result, + vf_id, UINT32); +cmdline_parse_token_string_t cmd_vf_mac_anti_spoof_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_mac_anti_spoof_result, + on_off, "on#off"); + +static void +cmd_set_vf_mac_anti_spoof_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_mac_anti_spoof_result *res = parsed_result; + int ret = -ENOTSUP; + + __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_IXGBE_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_ixgbe_set_vf_mac_anti_spoof(res->port_id, + res->vf_id, is_on); +#endif +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_set_vf_mac_anti_spoof(res->port_id, + res->vf_id, is_on); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_set_vf_mac_anti_spoof(res->port_id, + res->vf_id, is_on); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d or is_on %d\n", res->vf_id, is_on); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_vf_mac_anti_spoof = { + .f = cmd_set_vf_mac_anti_spoof_parsed, + .data = NULL, + .help_str = "set vf mac antispoof <port_id> <vf_id> on|off", + .tokens = { + (void *)&cmd_vf_mac_anti_spoof_set, + (void *)&cmd_vf_mac_anti_spoof_vf, + (void *)&cmd_vf_mac_anti_spoof_mac, + (void *)&cmd_vf_mac_anti_spoof_antispoof, + (void *)&cmd_vf_mac_anti_spoof_port_id, + (void *)&cmd_vf_mac_anti_spoof_vf_id, + (void *)&cmd_vf_mac_anti_spoof_on_off, + NULL, + }, +}; + +/* vf vlan strip queue configuration */ + +/* Common result structure for vf mac anti spoof */ +struct cmd_vf_vlan_stripq_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t vlan; + cmdline_fixed_string_t stripq; + portid_t port_id; + uint16_t vf_id; + cmdline_fixed_string_t on_off; +}; + +/* Common CLI fields for vf vlan strip enable disable */ +cmdline_parse_token_string_t cmd_vf_vlan_stripq_set = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_stripq_result, + set, "set"); +cmdline_parse_token_string_t cmd_vf_vlan_stripq_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_stripq_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_vf_vlan_stripq_vlan = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_stripq_result, + vlan, "vlan"); +cmdline_parse_token_string_t cmd_vf_vlan_stripq_stripq = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_stripq_result, + stripq, "stripq"); +cmdline_parse_token_num_t cmd_vf_vlan_stripq_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_vlan_stripq_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_vf_vlan_stripq_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_vlan_stripq_result, + vf_id, UINT16); +cmdline_parse_token_string_t cmd_vf_vlan_stripq_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_stripq_result, + on_off, "on#off"); + +static void +cmd_set_vf_vlan_stripq_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_vlan_stripq_result *res = parsed_result; + int ret = -ENOTSUP; + + __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_IXGBE_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_ixgbe_set_vf_vlan_stripq(res->port_id, + res->vf_id, is_on); +#endif +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_set_vf_vlan_stripq(res->port_id, + res->vf_id, is_on); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_set_vf_vlan_stripq(res->port_id, + res->vf_id, is_on); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d or is_on %d\n", res->vf_id, is_on); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_vf_vlan_stripq = { + .f = cmd_set_vf_vlan_stripq_parsed, + .data = NULL, + .help_str = "set vf vlan stripq <port_id> <vf_id> on|off", + .tokens = { + (void *)&cmd_vf_vlan_stripq_set, + (void *)&cmd_vf_vlan_stripq_vf, + (void *)&cmd_vf_vlan_stripq_vlan, + (void *)&cmd_vf_vlan_stripq_stripq, + (void *)&cmd_vf_vlan_stripq_port_id, + (void *)&cmd_vf_vlan_stripq_vf_id, + (void *)&cmd_vf_vlan_stripq_on_off, + NULL, + }, +}; + +/* vf vlan insert configuration */ + +/* Common result structure for vf vlan insert */ +struct cmd_vf_vlan_insert_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t vlan; + cmdline_fixed_string_t insert; + portid_t port_id; + uint16_t vf_id; + uint16_t vlan_id; +}; + +/* Common CLI fields for vf vlan insert enable disable */ +cmdline_parse_token_string_t cmd_vf_vlan_insert_set = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_insert_result, + set, "set"); +cmdline_parse_token_string_t cmd_vf_vlan_insert_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_insert_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_vf_vlan_insert_vlan = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_insert_result, + vlan, "vlan"); +cmdline_parse_token_string_t cmd_vf_vlan_insert_insert = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_vlan_insert_result, + insert, "insert"); +cmdline_parse_token_num_t cmd_vf_vlan_insert_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_vlan_insert_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_vf_vlan_insert_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_vlan_insert_result, + vf_id, UINT16); +cmdline_parse_token_num_t cmd_vf_vlan_insert_vlan_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_vlan_insert_result, + vlan_id, UINT16); + +static void +cmd_set_vf_vlan_insert_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_vlan_insert_result *res = parsed_result; + int ret = -ENOTSUP; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_IXGBE_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_ixgbe_set_vf_vlan_insert(res->port_id, res->vf_id, + res->vlan_id); +#endif +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_set_vf_vlan_insert(res->port_id, res->vf_id, + res->vlan_id); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_set_vf_vlan_insert(res->port_id, res->vf_id, + res->vlan_id); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d or vlan_id %d\n", res->vf_id, res->vlan_id); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_vf_vlan_insert = { + .f = cmd_set_vf_vlan_insert_parsed, + .data = NULL, + .help_str = "set vf vlan insert <port_id> <vf_id> <vlan_id>", + .tokens = { + (void *)&cmd_vf_vlan_insert_set, + (void *)&cmd_vf_vlan_insert_vf, + (void *)&cmd_vf_vlan_insert_vlan, + (void *)&cmd_vf_vlan_insert_insert, + (void *)&cmd_vf_vlan_insert_port_id, + (void *)&cmd_vf_vlan_insert_vf_id, + (void *)&cmd_vf_vlan_insert_vlan_id, + NULL, + }, +}; + +/* tx loopback configuration */ + +/* Common result structure for tx loopback */ +struct cmd_tx_loopback_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t tx; + cmdline_fixed_string_t loopback; + portid_t port_id; + cmdline_fixed_string_t on_off; +}; + +/* Common CLI fields for tx loopback enable disable */ +cmdline_parse_token_string_t cmd_tx_loopback_set = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_loopback_result, + set, "set"); +cmdline_parse_token_string_t cmd_tx_loopback_tx = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_loopback_result, + tx, "tx"); +cmdline_parse_token_string_t cmd_tx_loopback_loopback = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_loopback_result, + loopback, "loopback"); +cmdline_parse_token_num_t cmd_tx_loopback_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_tx_loopback_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_tx_loopback_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_loopback_result, + on_off, "on#off"); + +static void +cmd_set_tx_loopback_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_tx_loopback_result *res = parsed_result; + int ret = -ENOTSUP; + + __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_IXGBE_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_ixgbe_set_tx_loopback(res->port_id, is_on); +#endif +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_set_tx_loopback(res->port_id, is_on); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_set_tx_loopback(res->port_id, is_on); +#endif +#if defined RTE_LIBRTE_DPAA_BUS && defined RTE_LIBRTE_DPAA_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_dpaa_set_tx_loopback(res->port_id, is_on); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid is_on %d\n", is_on); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_tx_loopback = { + .f = cmd_set_tx_loopback_parsed, + .data = NULL, + .help_str = "set tx loopback <port_id> on|off", + .tokens = { + (void *)&cmd_tx_loopback_set, + (void *)&cmd_tx_loopback_tx, + (void *)&cmd_tx_loopback_loopback, + (void *)&cmd_tx_loopback_port_id, + (void *)&cmd_tx_loopback_on_off, + NULL, + }, +}; + +/* all queues drop enable configuration */ + +/* Common result structure for all queues drop enable */ +struct cmd_all_queues_drop_en_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t all; + cmdline_fixed_string_t queues; + cmdline_fixed_string_t drop; + portid_t port_id; + cmdline_fixed_string_t on_off; +}; + +/* Common CLI fields for tx loopback enable disable */ +cmdline_parse_token_string_t cmd_all_queues_drop_en_set = + TOKEN_STRING_INITIALIZER + (struct cmd_all_queues_drop_en_result, + set, "set"); +cmdline_parse_token_string_t cmd_all_queues_drop_en_all = + TOKEN_STRING_INITIALIZER + (struct cmd_all_queues_drop_en_result, + all, "all"); +cmdline_parse_token_string_t cmd_all_queues_drop_en_queues = + TOKEN_STRING_INITIALIZER + (struct cmd_all_queues_drop_en_result, + queues, "queues"); +cmdline_parse_token_string_t cmd_all_queues_drop_en_drop = + TOKEN_STRING_INITIALIZER + (struct cmd_all_queues_drop_en_result, + drop, "drop"); +cmdline_parse_token_num_t cmd_all_queues_drop_en_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_all_queues_drop_en_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_all_queues_drop_en_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_all_queues_drop_en_result, + on_off, "on#off"); + +static void +cmd_set_all_queues_drop_en_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_all_queues_drop_en_result *res = parsed_result; + int ret = -ENOTSUP; + int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_IXGBE_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_ixgbe_set_all_queues_drop_en(res->port_id, is_on); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_set_all_queues_drop_en(res->port_id, is_on); +#endif + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid is_on %d\n", is_on); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_all_queues_drop_en = { + .f = cmd_set_all_queues_drop_en_parsed, + .data = NULL, + .help_str = "set all queues drop <port_id> on|off", + .tokens = { + (void *)&cmd_all_queues_drop_en_set, + (void *)&cmd_all_queues_drop_en_all, + (void *)&cmd_all_queues_drop_en_queues, + (void *)&cmd_all_queues_drop_en_drop, + (void *)&cmd_all_queues_drop_en_port_id, + (void *)&cmd_all_queues_drop_en_on_off, + NULL, + }, +}; + +/* vf split drop enable configuration */ + +/* Common result structure for vf split drop enable */ +struct cmd_vf_split_drop_en_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t split; + cmdline_fixed_string_t drop; + portid_t port_id; + uint16_t vf_id; + cmdline_fixed_string_t on_off; +}; + +/* Common CLI fields for vf split drop enable disable */ +cmdline_parse_token_string_t cmd_vf_split_drop_en_set = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_split_drop_en_result, + set, "set"); +cmdline_parse_token_string_t cmd_vf_split_drop_en_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_split_drop_en_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_vf_split_drop_en_split = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_split_drop_en_result, + split, "split"); +cmdline_parse_token_string_t cmd_vf_split_drop_en_drop = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_split_drop_en_result, + drop, "drop"); +cmdline_parse_token_num_t cmd_vf_split_drop_en_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_split_drop_en_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_vf_split_drop_en_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_split_drop_en_result, + vf_id, UINT16); +cmdline_parse_token_string_t cmd_vf_split_drop_en_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_split_drop_en_result, + on_off, "on#off"); + +static void +cmd_set_vf_split_drop_en_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_split_drop_en_result *res = parsed_result; + int ret = -ENOTSUP; + int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_IXGBE_PMD + ret = rte_pmd_ixgbe_set_vf_split_drop_en(res->port_id, res->vf_id, + is_on); +#endif + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d or is_on %d\n", res->vf_id, is_on); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("not supported on port %d\n", res->port_id); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_vf_split_drop_en = { + .f = cmd_set_vf_split_drop_en_parsed, + .data = NULL, + .help_str = "set vf split drop <port_id> <vf_id> on|off", + .tokens = { + (void *)&cmd_vf_split_drop_en_set, + (void *)&cmd_vf_split_drop_en_vf, + (void *)&cmd_vf_split_drop_en_split, + (void *)&cmd_vf_split_drop_en_drop, + (void *)&cmd_vf_split_drop_en_port_id, + (void *)&cmd_vf_split_drop_en_vf_id, + (void *)&cmd_vf_split_drop_en_on_off, + NULL, + }, +}; + +/* vf mac address configuration */ + +/* Common result structure for vf mac address */ +struct cmd_set_vf_mac_addr_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t mac; + cmdline_fixed_string_t addr; + portid_t port_id; + uint16_t vf_id; + struct rte_ether_addr mac_addr; + +}; + +/* Common CLI fields for vf split drop enable disable */ +cmdline_parse_token_string_t cmd_set_vf_mac_addr_set = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_mac_addr_result, + set, "set"); +cmdline_parse_token_string_t cmd_set_vf_mac_addr_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_mac_addr_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_set_vf_mac_addr_mac = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_mac_addr_result, + mac, "mac"); +cmdline_parse_token_string_t cmd_set_vf_mac_addr_addr = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_mac_addr_result, + addr, "addr"); +cmdline_parse_token_num_t cmd_set_vf_mac_addr_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_set_vf_mac_addr_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_set_vf_mac_addr_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_set_vf_mac_addr_result, + vf_id, UINT16); +cmdline_parse_token_etheraddr_t cmd_set_vf_mac_addr_mac_addr = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vf_mac_addr_result, + mac_addr); + +static void +cmd_set_vf_mac_addr_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_vf_mac_addr_result *res = parsed_result; + int ret = -ENOTSUP; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_IXGBE_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_ixgbe_set_vf_mac_addr(res->port_id, res->vf_id, + &res->mac_addr); +#endif +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_set_vf_mac_addr(res->port_id, res->vf_id, + &res->mac_addr); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_set_vf_mac_addr(res->port_id, res->vf_id, + &res->mac_addr); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d or mac_addr\n", res->vf_id); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_vf_mac_addr = { + .f = cmd_set_vf_mac_addr_parsed, + .data = NULL, + .help_str = "set vf mac addr <port_id> <vf_id> <mac_addr>", + .tokens = { + (void *)&cmd_set_vf_mac_addr_set, + (void *)&cmd_set_vf_mac_addr_vf, + (void *)&cmd_set_vf_mac_addr_mac, + (void *)&cmd_set_vf_mac_addr_addr, + (void *)&cmd_set_vf_mac_addr_port_id, + (void *)&cmd_set_vf_mac_addr_vf_id, + (void *)&cmd_set_vf_mac_addr_mac_addr, + NULL, + }, +}; + +/* MACsec configuration */ + +/* Common result structure for MACsec offload enable */ +struct cmd_macsec_offload_on_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t macsec; + cmdline_fixed_string_t offload; + portid_t port_id; + cmdline_fixed_string_t on; + cmdline_fixed_string_t encrypt; + cmdline_fixed_string_t en_on_off; + cmdline_fixed_string_t replay_protect; + cmdline_fixed_string_t rp_on_off; +}; + +/* Common CLI fields for MACsec offload disable */ +cmdline_parse_token_string_t cmd_macsec_offload_on_set = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_on_result, + set, "set"); +cmdline_parse_token_string_t cmd_macsec_offload_on_macsec = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_on_result, + macsec, "macsec"); +cmdline_parse_token_string_t cmd_macsec_offload_on_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_on_result, + offload, "offload"); +cmdline_parse_token_num_t cmd_macsec_offload_on_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_macsec_offload_on_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_macsec_offload_on_on = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_on_result, + on, "on"); +cmdline_parse_token_string_t cmd_macsec_offload_on_encrypt = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_on_result, + encrypt, "encrypt"); +cmdline_parse_token_string_t cmd_macsec_offload_on_en_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_on_result, + en_on_off, "on#off"); +cmdline_parse_token_string_t cmd_macsec_offload_on_replay_protect = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_on_result, + replay_protect, "replay-protect"); +cmdline_parse_token_string_t cmd_macsec_offload_on_rp_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_on_result, + rp_on_off, "on#off"); + +static void +cmd_set_macsec_offload_on_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_macsec_offload_on_result *res = parsed_result; + int ret = -ENOTSUP; + portid_t port_id = res->port_id; + int en = (strcmp(res->en_on_off, "on") == 0) ? 1 : 0; + int rp = (strcmp(res->rp_on_off, "on") == 0) ? 1 : 0; + struct rte_eth_dev_info dev_info; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (!port_is_stopped(port_id)) { + printf("Please stop port %d first\n", port_id); + return; + } + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) { +#ifdef RTE_LIBRTE_IXGBE_PMD + ret = rte_pmd_ixgbe_macsec_enable(port_id, en, rp); +#endif + } + RTE_SET_USED(en); + RTE_SET_USED(rp); + + switch (ret) { + case 0: + ports[port_id].dev_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MACSEC_INSERT; + cmd_reconfig_device_queue(port_id, 1, 1); + break; + case -ENODEV: + printf("invalid port_id %d\n", port_id); + break; + case -ENOTSUP: + printf("not supported on port %d\n", port_id); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_macsec_offload_on = { + .f = cmd_set_macsec_offload_on_parsed, + .data = NULL, + .help_str = "set macsec offload <port_id> on " + "encrypt on|off replay-protect on|off", + .tokens = { + (void *)&cmd_macsec_offload_on_set, + (void *)&cmd_macsec_offload_on_macsec, + (void *)&cmd_macsec_offload_on_offload, + (void *)&cmd_macsec_offload_on_port_id, + (void *)&cmd_macsec_offload_on_on, + (void *)&cmd_macsec_offload_on_encrypt, + (void *)&cmd_macsec_offload_on_en_on_off, + (void *)&cmd_macsec_offload_on_replay_protect, + (void *)&cmd_macsec_offload_on_rp_on_off, + NULL, + }, +}; + +/* Common result structure for MACsec offload disable */ +struct cmd_macsec_offload_off_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t macsec; + cmdline_fixed_string_t offload; + portid_t port_id; + cmdline_fixed_string_t off; +}; + +/* Common CLI fields for MACsec offload disable */ +cmdline_parse_token_string_t cmd_macsec_offload_off_set = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_off_result, + set, "set"); +cmdline_parse_token_string_t cmd_macsec_offload_off_macsec = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_off_result, + macsec, "macsec"); +cmdline_parse_token_string_t cmd_macsec_offload_off_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_off_result, + offload, "offload"); +cmdline_parse_token_num_t cmd_macsec_offload_off_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_macsec_offload_off_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_macsec_offload_off_off = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_offload_off_result, + off, "off"); + +static void +cmd_set_macsec_offload_off_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_macsec_offload_off_result *res = parsed_result; + int ret = -ENOTSUP; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (!port_is_stopped(port_id)) { + printf("Please stop port %d first\n", port_id); + return; + } + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MACSEC_INSERT) { +#ifdef RTE_LIBRTE_IXGBE_PMD + ret = rte_pmd_ixgbe_macsec_disable(port_id); +#endif + } + switch (ret) { + case 0: + ports[port_id].dev_conf.txmode.offloads &= + ~DEV_TX_OFFLOAD_MACSEC_INSERT; + cmd_reconfig_device_queue(port_id, 1, 1); + break; + case -ENODEV: + printf("invalid port_id %d\n", port_id); + break; + case -ENOTSUP: + printf("not supported on port %d\n", port_id); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_macsec_offload_off = { + .f = cmd_set_macsec_offload_off_parsed, + .data = NULL, + .help_str = "set macsec offload <port_id> off", + .tokens = { + (void *)&cmd_macsec_offload_off_set, + (void *)&cmd_macsec_offload_off_macsec, + (void *)&cmd_macsec_offload_off_offload, + (void *)&cmd_macsec_offload_off_port_id, + (void *)&cmd_macsec_offload_off_off, + NULL, + }, +}; + +/* Common result structure for MACsec secure connection configure */ +struct cmd_macsec_sc_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t macsec; + cmdline_fixed_string_t sc; + cmdline_fixed_string_t tx_rx; + portid_t port_id; + struct rte_ether_addr mac; + uint16_t pi; +}; + +/* Common CLI fields for MACsec secure connection configure */ +cmdline_parse_token_string_t cmd_macsec_sc_set = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_sc_result, + set, "set"); +cmdline_parse_token_string_t cmd_macsec_sc_macsec = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_sc_result, + macsec, "macsec"); +cmdline_parse_token_string_t cmd_macsec_sc_sc = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_sc_result, + sc, "sc"); +cmdline_parse_token_string_t cmd_macsec_sc_tx_rx = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_sc_result, + tx_rx, "tx#rx"); +cmdline_parse_token_num_t cmd_macsec_sc_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_macsec_sc_result, + port_id, UINT16); +cmdline_parse_token_etheraddr_t cmd_macsec_sc_mac = + TOKEN_ETHERADDR_INITIALIZER + (struct cmd_macsec_sc_result, + mac); +cmdline_parse_token_num_t cmd_macsec_sc_pi = + TOKEN_NUM_INITIALIZER + (struct cmd_macsec_sc_result, + pi, UINT16); + +static void +cmd_set_macsec_sc_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_macsec_sc_result *res = parsed_result; + int ret = -ENOTSUP; + int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0; + +#ifdef RTE_LIBRTE_IXGBE_PMD + ret = is_tx ? + rte_pmd_ixgbe_macsec_config_txsc(res->port_id, + res->mac.addr_bytes) : + rte_pmd_ixgbe_macsec_config_rxsc(res->port_id, + res->mac.addr_bytes, res->pi); +#endif + RTE_SET_USED(is_tx); + + switch (ret) { + case 0: + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("not supported on port %d\n", res->port_id); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_macsec_sc = { + .f = cmd_set_macsec_sc_parsed, + .data = NULL, + .help_str = "set macsec sc tx|rx <port_id> <mac> <pi>", + .tokens = { + (void *)&cmd_macsec_sc_set, + (void *)&cmd_macsec_sc_macsec, + (void *)&cmd_macsec_sc_sc, + (void *)&cmd_macsec_sc_tx_rx, + (void *)&cmd_macsec_sc_port_id, + (void *)&cmd_macsec_sc_mac, + (void *)&cmd_macsec_sc_pi, + NULL, + }, +}; + +/* Common result structure for MACsec secure connection configure */ +struct cmd_macsec_sa_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t macsec; + cmdline_fixed_string_t sa; + cmdline_fixed_string_t tx_rx; + portid_t port_id; + uint8_t idx; + uint8_t an; + uint32_t pn; + cmdline_fixed_string_t key; +}; + +/* Common CLI fields for MACsec secure connection configure */ +cmdline_parse_token_string_t cmd_macsec_sa_set = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_sa_result, + set, "set"); +cmdline_parse_token_string_t cmd_macsec_sa_macsec = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_sa_result, + macsec, "macsec"); +cmdline_parse_token_string_t cmd_macsec_sa_sa = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_sa_result, + sa, "sa"); +cmdline_parse_token_string_t cmd_macsec_sa_tx_rx = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_sa_result, + tx_rx, "tx#rx"); +cmdline_parse_token_num_t cmd_macsec_sa_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_macsec_sa_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_macsec_sa_idx = + TOKEN_NUM_INITIALIZER + (struct cmd_macsec_sa_result, + idx, UINT8); +cmdline_parse_token_num_t cmd_macsec_sa_an = + TOKEN_NUM_INITIALIZER + (struct cmd_macsec_sa_result, + an, UINT8); +cmdline_parse_token_num_t cmd_macsec_sa_pn = + TOKEN_NUM_INITIALIZER + (struct cmd_macsec_sa_result, + pn, UINT32); +cmdline_parse_token_string_t cmd_macsec_sa_key = + TOKEN_STRING_INITIALIZER + (struct cmd_macsec_sa_result, + key, NULL); + +static void +cmd_set_macsec_sa_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_macsec_sa_result *res = parsed_result; + int ret = -ENOTSUP; + int is_tx = (strcmp(res->tx_rx, "tx") == 0) ? 1 : 0; + uint8_t key[16] = { 0 }; + uint8_t xdgt0; + uint8_t xdgt1; + int key_len; + int i; + + key_len = strlen(res->key) / 2; + if (key_len > 16) + key_len = 16; + + for (i = 0; i < key_len; i++) { + xdgt0 = parse_and_check_key_hexa_digit(res->key, (i * 2)); + if (xdgt0 == 0xFF) + return; + xdgt1 = parse_and_check_key_hexa_digit(res->key, (i * 2) + 1); + if (xdgt1 == 0xFF) + return; + key[i] = (uint8_t) ((xdgt0 * 16) + xdgt1); + } + +#ifdef RTE_LIBRTE_IXGBE_PMD + ret = is_tx ? + rte_pmd_ixgbe_macsec_select_txsa(res->port_id, + res->idx, res->an, res->pn, key) : + rte_pmd_ixgbe_macsec_select_rxsa(res->port_id, + res->idx, res->an, res->pn, key); +#endif + RTE_SET_USED(is_tx); + RTE_SET_USED(key); + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid idx %d or an %d\n", res->idx, res->an); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("not supported on port %d\n", res->port_id); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_macsec_sa = { + .f = cmd_set_macsec_sa_parsed, + .data = NULL, + .help_str = "set macsec sa tx|rx <port_id> <idx> <an> <pn> <key>", + .tokens = { + (void *)&cmd_macsec_sa_set, + (void *)&cmd_macsec_sa_macsec, + (void *)&cmd_macsec_sa_sa, + (void *)&cmd_macsec_sa_tx_rx, + (void *)&cmd_macsec_sa_port_id, + (void *)&cmd_macsec_sa_idx, + (void *)&cmd_macsec_sa_an, + (void *)&cmd_macsec_sa_pn, + (void *)&cmd_macsec_sa_key, + NULL, + }, +}; + +/* VF unicast promiscuous mode configuration */ + +/* Common result structure for VF unicast promiscuous mode */ +struct cmd_vf_promisc_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t promisc; + portid_t port_id; + uint32_t vf_id; + cmdline_fixed_string_t on_off; +}; + +/* Common CLI fields for VF unicast promiscuous mode enable disable */ +cmdline_parse_token_string_t cmd_vf_promisc_set = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_promisc_result, + set, "set"); +cmdline_parse_token_string_t cmd_vf_promisc_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_promisc_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_vf_promisc_promisc = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_promisc_result, + promisc, "promisc"); +cmdline_parse_token_num_t cmd_vf_promisc_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_promisc_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_vf_promisc_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_promisc_result, + vf_id, UINT32); +cmdline_parse_token_string_t cmd_vf_promisc_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_promisc_result, + on_off, "on#off"); + +static void +cmd_set_vf_promisc_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_promisc_result *res = parsed_result; + int ret = -ENOTSUP; + + __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_set_vf_unicast_promisc(res->port_id, + res->vf_id, is_on); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d\n", res->vf_id); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_vf_promisc = { + .f = cmd_set_vf_promisc_parsed, + .data = NULL, + .help_str = "set vf promisc <port_id> <vf_id> on|off: " + "Set unicast promiscuous mode for a VF from the PF", + .tokens = { + (void *)&cmd_vf_promisc_set, + (void *)&cmd_vf_promisc_vf, + (void *)&cmd_vf_promisc_promisc, + (void *)&cmd_vf_promisc_port_id, + (void *)&cmd_vf_promisc_vf_id, + (void *)&cmd_vf_promisc_on_off, + NULL, + }, +}; + +/* VF multicast promiscuous mode configuration */ + +/* Common result structure for VF multicast promiscuous mode */ +struct cmd_vf_allmulti_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t allmulti; + portid_t port_id; + uint32_t vf_id; + cmdline_fixed_string_t on_off; +}; + +/* Common CLI fields for VF multicast promiscuous mode enable disable */ +cmdline_parse_token_string_t cmd_vf_allmulti_set = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_allmulti_result, + set, "set"); +cmdline_parse_token_string_t cmd_vf_allmulti_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_allmulti_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_vf_allmulti_allmulti = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_allmulti_result, + allmulti, "allmulti"); +cmdline_parse_token_num_t cmd_vf_allmulti_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_allmulti_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_vf_allmulti_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_allmulti_result, + vf_id, UINT32); +cmdline_parse_token_string_t cmd_vf_allmulti_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_allmulti_result, + on_off, "on#off"); + +static void +cmd_set_vf_allmulti_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_allmulti_result *res = parsed_result; + int ret = -ENOTSUP; + + __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_set_vf_multicast_promisc(res->port_id, + res->vf_id, is_on); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d\n", res->vf_id); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_vf_allmulti = { + .f = cmd_set_vf_allmulti_parsed, + .data = NULL, + .help_str = "set vf allmulti <port_id> <vf_id> on|off: " + "Set multicast promiscuous mode for a VF from the PF", + .tokens = { + (void *)&cmd_vf_allmulti_set, + (void *)&cmd_vf_allmulti_vf, + (void *)&cmd_vf_allmulti_allmulti, + (void *)&cmd_vf_allmulti_port_id, + (void *)&cmd_vf_allmulti_vf_id, + (void *)&cmd_vf_allmulti_on_off, + NULL, + }, +}; + +/* vf broadcast mode configuration */ + +/* Common result structure for vf broadcast */ +struct cmd_set_vf_broadcast_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t broadcast; + portid_t port_id; + uint16_t vf_id; + cmdline_fixed_string_t on_off; +}; + +/* Common CLI fields for vf broadcast enable disable */ +cmdline_parse_token_string_t cmd_set_vf_broadcast_set = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_broadcast_result, + set, "set"); +cmdline_parse_token_string_t cmd_set_vf_broadcast_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_broadcast_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_set_vf_broadcast_broadcast = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_broadcast_result, + broadcast, "broadcast"); +cmdline_parse_token_num_t cmd_set_vf_broadcast_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_set_vf_broadcast_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_set_vf_broadcast_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_set_vf_broadcast_result, + vf_id, UINT16); +cmdline_parse_token_string_t cmd_set_vf_broadcast_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_broadcast_result, + on_off, "on#off"); + +static void +cmd_set_vf_broadcast_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_vf_broadcast_result *res = parsed_result; + int ret = -ENOTSUP; + + __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_set_vf_broadcast(res->port_id, + res->vf_id, is_on); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d or is_on %d\n", res->vf_id, is_on); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_vf_broadcast = { + .f = cmd_set_vf_broadcast_parsed, + .data = NULL, + .help_str = "set vf broadcast <port_id> <vf_id> on|off", + .tokens = { + (void *)&cmd_set_vf_broadcast_set, + (void *)&cmd_set_vf_broadcast_vf, + (void *)&cmd_set_vf_broadcast_broadcast, + (void *)&cmd_set_vf_broadcast_port_id, + (void *)&cmd_set_vf_broadcast_vf_id, + (void *)&cmd_set_vf_broadcast_on_off, + NULL, + }, +}; + +/* vf vlan tag configuration */ + +/* Common result structure for vf vlan tag */ +struct cmd_set_vf_vlan_tag_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t vlan; + cmdline_fixed_string_t tag; + portid_t port_id; + uint16_t vf_id; + cmdline_fixed_string_t on_off; +}; + +/* Common CLI fields for vf vlan tag enable disable */ +cmdline_parse_token_string_t cmd_set_vf_vlan_tag_set = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_vlan_tag_result, + set, "set"); +cmdline_parse_token_string_t cmd_set_vf_vlan_tag_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_vlan_tag_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_set_vf_vlan_tag_vlan = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_vlan_tag_result, + vlan, "vlan"); +cmdline_parse_token_string_t cmd_set_vf_vlan_tag_tag = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_vlan_tag_result, + tag, "tag"); +cmdline_parse_token_num_t cmd_set_vf_vlan_tag_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_set_vf_vlan_tag_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_set_vf_vlan_tag_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_set_vf_vlan_tag_result, + vf_id, UINT16); +cmdline_parse_token_string_t cmd_set_vf_vlan_tag_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_set_vf_vlan_tag_result, + on_off, "on#off"); + +static void +cmd_set_vf_vlan_tag_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_vf_vlan_tag_result *res = parsed_result; + int ret = -ENOTSUP; + + __rte_unused int is_on = (strcmp(res->on_off, "on") == 0) ? 1 : 0; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_set_vf_vlan_tag(res->port_id, + res->vf_id, is_on); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d or is_on %d\n", res->vf_id, is_on); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_set_vf_vlan_tag = { + .f = cmd_set_vf_vlan_tag_parsed, + .data = NULL, + .help_str = "set vf vlan tag <port_id> <vf_id> on|off", + .tokens = { + (void *)&cmd_set_vf_vlan_tag_set, + (void *)&cmd_set_vf_vlan_tag_vf, + (void *)&cmd_set_vf_vlan_tag_vlan, + (void *)&cmd_set_vf_vlan_tag_tag, + (void *)&cmd_set_vf_vlan_tag_port_id, + (void *)&cmd_set_vf_vlan_tag_vf_id, + (void *)&cmd_set_vf_vlan_tag_on_off, + NULL, + }, +}; + +/* Common definition of VF and TC TX bandwidth configuration */ +struct cmd_vf_tc_bw_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t tc; + cmdline_fixed_string_t tx; + cmdline_fixed_string_t min_bw; + cmdline_fixed_string_t max_bw; + cmdline_fixed_string_t strict_link_prio; + portid_t port_id; + uint16_t vf_id; + uint8_t tc_no; + uint32_t bw; + cmdline_fixed_string_t bw_list; + uint8_t tc_map; +}; + +cmdline_parse_token_string_t cmd_vf_tc_bw_set = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_tc_bw_result, + set, "set"); +cmdline_parse_token_string_t cmd_vf_tc_bw_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_tc_bw_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_vf_tc_bw_tc = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_tc_bw_result, + tc, "tc"); +cmdline_parse_token_string_t cmd_vf_tc_bw_tx = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_tc_bw_result, + tx, "tx"); +cmdline_parse_token_string_t cmd_vf_tc_bw_strict_link_prio = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_tc_bw_result, + strict_link_prio, "strict-link-priority"); +cmdline_parse_token_string_t cmd_vf_tc_bw_min_bw = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_tc_bw_result, + min_bw, "min-bandwidth"); +cmdline_parse_token_string_t cmd_vf_tc_bw_max_bw = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_tc_bw_result, + max_bw, "max-bandwidth"); +cmdline_parse_token_num_t cmd_vf_tc_bw_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_tc_bw_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_vf_tc_bw_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_tc_bw_result, + vf_id, UINT16); +cmdline_parse_token_num_t cmd_vf_tc_bw_tc_no = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_tc_bw_result, + tc_no, UINT8); +cmdline_parse_token_num_t cmd_vf_tc_bw_bw = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_tc_bw_result, + bw, UINT32); +cmdline_parse_token_string_t cmd_vf_tc_bw_bw_list = + TOKEN_STRING_INITIALIZER + (struct cmd_vf_tc_bw_result, + bw_list, NULL); +cmdline_parse_token_num_t cmd_vf_tc_bw_tc_map = + TOKEN_NUM_INITIALIZER + (struct cmd_vf_tc_bw_result, + tc_map, UINT8); + +/* VF max bandwidth setting */ +static void +cmd_vf_max_bw_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_tc_bw_result *res = parsed_result; + int ret = -ENOTSUP; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_set_vf_max_bw(res->port_id, + res->vf_id, res->bw); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d or bandwidth %d\n", + res->vf_id, res->bw); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_vf_max_bw = { + .f = cmd_vf_max_bw_parsed, + .data = NULL, + .help_str = "set vf tx max-bandwidth <port_id> <vf_id> <bandwidth>", + .tokens = { + (void *)&cmd_vf_tc_bw_set, + (void *)&cmd_vf_tc_bw_vf, + (void *)&cmd_vf_tc_bw_tx, + (void *)&cmd_vf_tc_bw_max_bw, + (void *)&cmd_vf_tc_bw_port_id, + (void *)&cmd_vf_tc_bw_vf_id, + (void *)&cmd_vf_tc_bw_bw, + NULL, + }, +}; + +static int +vf_tc_min_bw_parse_bw_list(uint8_t *bw_list, + uint8_t *tc_num, + char *str) +{ + uint32_t size; + const char *p, *p0 = str; + char s[256]; + char *end; + char *str_fld[16]; + uint16_t i; + int ret; + + p = strchr(p0, '('); + if (p == NULL) { + printf("The bandwidth-list should be '(bw1, bw2, ...)'\n"); + return -1; + } + p++; + p0 = strchr(p, ')'); + if (p0 == NULL) { + printf("The bandwidth-list should be '(bw1, bw2, ...)'\n"); + return -1; + } + size = p0 - p; + if (size >= sizeof(s)) { + printf("The string size exceeds the internal buffer size\n"); + return -1; + } + snprintf(s, sizeof(s), "%.*s", size, p); + ret = rte_strsplit(s, sizeof(s), str_fld, 16, ','); + if (ret <= 0) { + printf("Failed to get the bandwidth list. "); + return -1; + } + *tc_num = ret; + for (i = 0; i < ret; i++) + bw_list[i] = (uint8_t)strtoul(str_fld[i], &end, 0); + + return 0; +} + +/* TC min bandwidth setting */ +static void +cmd_vf_tc_min_bw_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_tc_bw_result *res = parsed_result; + uint8_t tc_num; + uint8_t bw[16]; + int ret = -ENOTSUP; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + ret = vf_tc_min_bw_parse_bw_list(bw, &tc_num, res->bw_list); + if (ret) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_set_vf_tc_bw_alloc(res->port_id, res->vf_id, + tc_num, bw); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d or bandwidth\n", res->vf_id); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_vf_tc_min_bw = { + .f = cmd_vf_tc_min_bw_parsed, + .data = NULL, + .help_str = "set vf tc tx min-bandwidth <port_id> <vf_id>" + " <bw1, bw2, ...>", + .tokens = { + (void *)&cmd_vf_tc_bw_set, + (void *)&cmd_vf_tc_bw_vf, + (void *)&cmd_vf_tc_bw_tc, + (void *)&cmd_vf_tc_bw_tx, + (void *)&cmd_vf_tc_bw_min_bw, + (void *)&cmd_vf_tc_bw_port_id, + (void *)&cmd_vf_tc_bw_vf_id, + (void *)&cmd_vf_tc_bw_bw_list, + NULL, + }, +}; + +static void +cmd_tc_min_bw_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_tc_bw_result *res = parsed_result; + struct rte_port *port; + uint8_t tc_num; + uint8_t bw[16]; + int ret = -ENOTSUP; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + port = &ports[res->port_id]; + /** Check if the port is not started **/ + if (port->port_status != RTE_PORT_STOPPED) { + printf("Please stop port %d first\n", res->port_id); + return; + } + + ret = vf_tc_min_bw_parse_bw_list(bw, &tc_num, res->bw_list); + if (ret) + return; + +#ifdef RTE_LIBRTE_IXGBE_PMD + ret = rte_pmd_ixgbe_set_tc_bw_alloc(res->port_id, tc_num, bw); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid bandwidth\n"); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_tc_min_bw = { + .f = cmd_tc_min_bw_parsed, + .data = NULL, + .help_str = "set tc tx min-bandwidth <port_id> <bw1, bw2, ...>", + .tokens = { + (void *)&cmd_vf_tc_bw_set, + (void *)&cmd_vf_tc_bw_tc, + (void *)&cmd_vf_tc_bw_tx, + (void *)&cmd_vf_tc_bw_min_bw, + (void *)&cmd_vf_tc_bw_port_id, + (void *)&cmd_vf_tc_bw_bw_list, + NULL, + }, +}; + +/* TC max bandwidth setting */ +static void +cmd_vf_tc_max_bw_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_tc_bw_result *res = parsed_result; + int ret = -ENOTSUP; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_set_vf_tc_max_bw(res->port_id, res->vf_id, + res->tc_no, res->bw); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d, tc_no %d or bandwidth %d\n", + res->vf_id, res->tc_no, res->bw); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_vf_tc_max_bw = { + .f = cmd_vf_tc_max_bw_parsed, + .data = NULL, + .help_str = "set vf tc tx max-bandwidth <port_id> <vf_id> <tc_no>" + " <bandwidth>", + .tokens = { + (void *)&cmd_vf_tc_bw_set, + (void *)&cmd_vf_tc_bw_vf, + (void *)&cmd_vf_tc_bw_tc, + (void *)&cmd_vf_tc_bw_tx, + (void *)&cmd_vf_tc_bw_max_bw, + (void *)&cmd_vf_tc_bw_port_id, + (void *)&cmd_vf_tc_bw_vf_id, + (void *)&cmd_vf_tc_bw_tc_no, + (void *)&cmd_vf_tc_bw_bw, + NULL, + }, +}; + + +#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED + +/* *** Set Port default Traffic Management Hierarchy *** */ +struct cmd_set_port_tm_hierarchy_default_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t hierarchy; + cmdline_fixed_string_t def; + portid_t port_id; +}; + +cmdline_parse_token_string_t cmd_set_port_tm_hierarchy_default_set = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_hierarchy_default_result, set, "set"); +cmdline_parse_token_string_t cmd_set_port_tm_hierarchy_default_port = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_hierarchy_default_result, port, "port"); +cmdline_parse_token_string_t cmd_set_port_tm_hierarchy_default_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_hierarchy_default_result, tm, "tm"); +cmdline_parse_token_string_t cmd_set_port_tm_hierarchy_default_hierarchy = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_hierarchy_default_result, + hierarchy, "hierarchy"); +cmdline_parse_token_string_t cmd_set_port_tm_hierarchy_default_default = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_hierarchy_default_result, + def, "default"); +cmdline_parse_token_num_t cmd_set_port_tm_hierarchy_default_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_tm_hierarchy_default_result, + port_id, UINT16); + +static void cmd_set_port_tm_hierarchy_default_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_port_tm_hierarchy_default_result *res = parsed_result; + struct rte_port *p; + portid_t port_id = res->port_id; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + p = &ports[port_id]; + + /* Forward mode: tm */ + if (strcmp(cur_fwd_config.fwd_eng->fwd_mode_name, "softnic")) { + printf(" softnicfwd mode not enabled(error)\n"); + return; + } + + /* Set the default tm hierarchy */ + p->softport.default_tm_hierarchy_enable = 1; +} + +cmdline_parse_inst_t cmd_set_port_tm_hierarchy_default = { + .f = cmd_set_port_tm_hierarchy_default_parsed, + .data = NULL, + .help_str = "set port tm hierarchy default <port_id>", + .tokens = { + (void *)&cmd_set_port_tm_hierarchy_default_set, + (void *)&cmd_set_port_tm_hierarchy_default_port, + (void *)&cmd_set_port_tm_hierarchy_default_tm, + (void *)&cmd_set_port_tm_hierarchy_default_hierarchy, + (void *)&cmd_set_port_tm_hierarchy_default_default, + (void *)&cmd_set_port_tm_hierarchy_default_port_id, + NULL, + }, +}; +#endif + +/** Set VXLAN encapsulation details */ +struct cmd_set_vxlan_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t vxlan; + cmdline_fixed_string_t pos_token; + cmdline_fixed_string_t ip_version; + uint32_t vlan_present:1; + uint32_t vni; + uint16_t udp_src; + uint16_t udp_dst; + cmdline_ipaddr_t ip_src; + cmdline_ipaddr_t ip_dst; + uint16_t tci; + uint8_t tos; + uint8_t ttl; + struct rte_ether_addr eth_src; + struct rte_ether_addr eth_dst; +}; + +cmdline_parse_token_string_t cmd_set_vxlan_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, set, "set"); +cmdline_parse_token_string_t cmd_set_vxlan_vxlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, vxlan, "vxlan"); +cmdline_parse_token_string_t cmd_set_vxlan_vxlan_tos_ttl = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, vxlan, + "vxlan-tos-ttl"); +cmdline_parse_token_string_t cmd_set_vxlan_vxlan_with_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, vxlan, + "vxlan-with-vlan"); +cmdline_parse_token_string_t cmd_set_vxlan_ip_version = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "ip-version"); +cmdline_parse_token_string_t cmd_set_vxlan_ip_version_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, ip_version, + "ipv4#ipv6"); +cmdline_parse_token_string_t cmd_set_vxlan_vni = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "vni"); +cmdline_parse_token_num_t cmd_set_vxlan_vni_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, vni, UINT32); +cmdline_parse_token_string_t cmd_set_vxlan_udp_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "udp-src"); +cmdline_parse_token_num_t cmd_set_vxlan_udp_src_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, udp_src, UINT16); +cmdline_parse_token_string_t cmd_set_vxlan_udp_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "udp-dst"); +cmdline_parse_token_num_t cmd_set_vxlan_udp_dst_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, udp_dst, UINT16); +cmdline_parse_token_string_t cmd_set_vxlan_ip_tos = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "ip-tos"); +cmdline_parse_token_num_t cmd_set_vxlan_ip_tos_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, tos, UINT8); +cmdline_parse_token_string_t cmd_set_vxlan_ip_ttl = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "ip-ttl"); +cmdline_parse_token_num_t cmd_set_vxlan_ip_ttl_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, ttl, UINT8); +cmdline_parse_token_string_t cmd_set_vxlan_ip_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "ip-src"); +cmdline_parse_token_ipaddr_t cmd_set_vxlan_ip_src_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_vxlan_result, ip_src); +cmdline_parse_token_string_t cmd_set_vxlan_ip_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "ip-dst"); +cmdline_parse_token_ipaddr_t cmd_set_vxlan_ip_dst_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_vxlan_result, ip_dst); +cmdline_parse_token_string_t cmd_set_vxlan_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "vlan-tci"); +cmdline_parse_token_num_t cmd_set_vxlan_vlan_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_vxlan_result, tci, UINT16); +cmdline_parse_token_string_t cmd_set_vxlan_eth_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "eth-src"); +cmdline_parse_token_etheraddr_t cmd_set_vxlan_eth_src_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vxlan_result, eth_src); +cmdline_parse_token_string_t cmd_set_vxlan_eth_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_vxlan_result, pos_token, + "eth-dst"); +cmdline_parse_token_etheraddr_t cmd_set_vxlan_eth_dst_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_vxlan_result, eth_dst); + +static void cmd_set_vxlan_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_vxlan_result *res = parsed_result; + union { + uint32_t vxlan_id; + uint8_t vni[4]; + } id = { + .vxlan_id = rte_cpu_to_be_32(res->vni) & RTE_BE32(0x00ffffff), + }; + + vxlan_encap_conf.select_tos_ttl = 0; + if (strcmp(res->vxlan, "vxlan") == 0) + vxlan_encap_conf.select_vlan = 0; + else if (strcmp(res->vxlan, "vxlan-with-vlan") == 0) + vxlan_encap_conf.select_vlan = 1; + else if (strcmp(res->vxlan, "vxlan-tos-ttl") == 0) { + vxlan_encap_conf.select_vlan = 0; + vxlan_encap_conf.select_tos_ttl = 1; + } + if (strcmp(res->ip_version, "ipv4") == 0) + vxlan_encap_conf.select_ipv4 = 1; + else if (strcmp(res->ip_version, "ipv6") == 0) + vxlan_encap_conf.select_ipv4 = 0; + else + return; + rte_memcpy(vxlan_encap_conf.vni, &id.vni[1], 3); + vxlan_encap_conf.udp_src = rte_cpu_to_be_16(res->udp_src); + vxlan_encap_conf.udp_dst = rte_cpu_to_be_16(res->udp_dst); + vxlan_encap_conf.ip_tos = res->tos; + vxlan_encap_conf.ip_ttl = res->ttl; + if (vxlan_encap_conf.select_ipv4) { + IPV4_ADDR_TO_UINT(res->ip_src, vxlan_encap_conf.ipv4_src); + IPV4_ADDR_TO_UINT(res->ip_dst, vxlan_encap_conf.ipv4_dst); + } else { + IPV6_ADDR_TO_ARRAY(res->ip_src, vxlan_encap_conf.ipv6_src); + IPV6_ADDR_TO_ARRAY(res->ip_dst, vxlan_encap_conf.ipv6_dst); + } + if (vxlan_encap_conf.select_vlan) + vxlan_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci); + rte_memcpy(vxlan_encap_conf.eth_src, res->eth_src.addr_bytes, + RTE_ETHER_ADDR_LEN); + rte_memcpy(vxlan_encap_conf.eth_dst, res->eth_dst.addr_bytes, + RTE_ETHER_ADDR_LEN); +} + +cmdline_parse_inst_t cmd_set_vxlan = { + .f = cmd_set_vxlan_parsed, + .data = NULL, + .help_str = "set vxlan ip-version ipv4|ipv6 vni <vni> udp-src" + " <udp-src> udp-dst <udp-dst> ip-src <ip-src> ip-dst <ip-dst>" + " eth-src <eth-src> eth-dst <eth-dst>", + .tokens = { + (void *)&cmd_set_vxlan_set, + (void *)&cmd_set_vxlan_vxlan, + (void *)&cmd_set_vxlan_ip_version, + (void *)&cmd_set_vxlan_ip_version_value, + (void *)&cmd_set_vxlan_vni, + (void *)&cmd_set_vxlan_vni_value, + (void *)&cmd_set_vxlan_udp_src, + (void *)&cmd_set_vxlan_udp_src_value, + (void *)&cmd_set_vxlan_udp_dst, + (void *)&cmd_set_vxlan_udp_dst_value, + (void *)&cmd_set_vxlan_ip_src, + (void *)&cmd_set_vxlan_ip_src_value, + (void *)&cmd_set_vxlan_ip_dst, + (void *)&cmd_set_vxlan_ip_dst_value, + (void *)&cmd_set_vxlan_eth_src, + (void *)&cmd_set_vxlan_eth_src_value, + (void *)&cmd_set_vxlan_eth_dst, + (void *)&cmd_set_vxlan_eth_dst_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_vxlan_tos_ttl = { + .f = cmd_set_vxlan_parsed, + .data = NULL, + .help_str = "set vxlan-tos-ttl ip-version ipv4|ipv6 vni <vni> udp-src" + " <udp-src> udp-dst <udp-dst> ip-tos <ip-tos> ip-ttl <ip-ttl>" + " ip-src <ip-src> ip-dst <ip-dst> eth-src <eth-src>" + " eth-dst <eth-dst>", + .tokens = { + (void *)&cmd_set_vxlan_set, + (void *)&cmd_set_vxlan_vxlan_tos_ttl, + (void *)&cmd_set_vxlan_ip_version, + (void *)&cmd_set_vxlan_ip_version_value, + (void *)&cmd_set_vxlan_vni, + (void *)&cmd_set_vxlan_vni_value, + (void *)&cmd_set_vxlan_udp_src, + (void *)&cmd_set_vxlan_udp_src_value, + (void *)&cmd_set_vxlan_udp_dst, + (void *)&cmd_set_vxlan_udp_dst_value, + (void *)&cmd_set_vxlan_ip_tos, + (void *)&cmd_set_vxlan_ip_tos_value, + (void *)&cmd_set_vxlan_ip_ttl, + (void *)&cmd_set_vxlan_ip_ttl_value, + (void *)&cmd_set_vxlan_ip_src, + (void *)&cmd_set_vxlan_ip_src_value, + (void *)&cmd_set_vxlan_ip_dst, + (void *)&cmd_set_vxlan_ip_dst_value, + (void *)&cmd_set_vxlan_eth_src, + (void *)&cmd_set_vxlan_eth_src_value, + (void *)&cmd_set_vxlan_eth_dst, + (void *)&cmd_set_vxlan_eth_dst_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_vxlan_with_vlan = { + .f = cmd_set_vxlan_parsed, + .data = NULL, + .help_str = "set vxlan-with-vlan ip-version ipv4|ipv6 vni <vni>" + " udp-src <udp-src> udp-dst <udp-dst> ip-src <ip-src> ip-dst" + " <ip-dst> vlan-tci <vlan-tci> eth-src <eth-src> eth-dst" + " <eth-dst>", + .tokens = { + (void *)&cmd_set_vxlan_set, + (void *)&cmd_set_vxlan_vxlan_with_vlan, + (void *)&cmd_set_vxlan_ip_version, + (void *)&cmd_set_vxlan_ip_version_value, + (void *)&cmd_set_vxlan_vni, + (void *)&cmd_set_vxlan_vni_value, + (void *)&cmd_set_vxlan_udp_src, + (void *)&cmd_set_vxlan_udp_src_value, + (void *)&cmd_set_vxlan_udp_dst, + (void *)&cmd_set_vxlan_udp_dst_value, + (void *)&cmd_set_vxlan_ip_src, + (void *)&cmd_set_vxlan_ip_src_value, + (void *)&cmd_set_vxlan_ip_dst, + (void *)&cmd_set_vxlan_ip_dst_value, + (void *)&cmd_set_vxlan_vlan, + (void *)&cmd_set_vxlan_vlan_value, + (void *)&cmd_set_vxlan_eth_src, + (void *)&cmd_set_vxlan_eth_src_value, + (void *)&cmd_set_vxlan_eth_dst, + (void *)&cmd_set_vxlan_eth_dst_value, + NULL, + }, +}; + +/** Set NVGRE encapsulation details */ +struct cmd_set_nvgre_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t nvgre; + cmdline_fixed_string_t pos_token; + cmdline_fixed_string_t ip_version; + uint32_t tni; + cmdline_ipaddr_t ip_src; + cmdline_ipaddr_t ip_dst; + uint16_t tci; + struct rte_ether_addr eth_src; + struct rte_ether_addr eth_dst; +}; + +cmdline_parse_token_string_t cmd_set_nvgre_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, set, "set"); +cmdline_parse_token_string_t cmd_set_nvgre_nvgre = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, nvgre, "nvgre"); +cmdline_parse_token_string_t cmd_set_nvgre_nvgre_with_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, nvgre, + "nvgre-with-vlan"); +cmdline_parse_token_string_t cmd_set_nvgre_ip_version = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "ip-version"); +cmdline_parse_token_string_t cmd_set_nvgre_ip_version_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, ip_version, + "ipv4#ipv6"); +cmdline_parse_token_string_t cmd_set_nvgre_tni = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "tni"); +cmdline_parse_token_num_t cmd_set_nvgre_tni_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_nvgre_result, tni, UINT32); +cmdline_parse_token_string_t cmd_set_nvgre_ip_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "ip-src"); +cmdline_parse_token_num_t cmd_set_nvgre_ip_src_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_src); +cmdline_parse_token_string_t cmd_set_nvgre_ip_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "ip-dst"); +cmdline_parse_token_ipaddr_t cmd_set_nvgre_ip_dst_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_nvgre_result, ip_dst); +cmdline_parse_token_string_t cmd_set_nvgre_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "vlan-tci"); +cmdline_parse_token_num_t cmd_set_nvgre_vlan_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_nvgre_result, tci, UINT16); +cmdline_parse_token_string_t cmd_set_nvgre_eth_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "eth-src"); +cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_src_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result, eth_src); +cmdline_parse_token_string_t cmd_set_nvgre_eth_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_nvgre_result, pos_token, + "eth-dst"); +cmdline_parse_token_etheraddr_t cmd_set_nvgre_eth_dst_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_nvgre_result, eth_dst); + +static void cmd_set_nvgre_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_nvgre_result *res = parsed_result; + union { + uint32_t nvgre_tni; + uint8_t tni[4]; + } id = { + .nvgre_tni = rte_cpu_to_be_32(res->tni) & RTE_BE32(0x00ffffff), + }; + + if (strcmp(res->nvgre, "nvgre") == 0) + nvgre_encap_conf.select_vlan = 0; + else if (strcmp(res->nvgre, "nvgre-with-vlan") == 0) + nvgre_encap_conf.select_vlan = 1; + if (strcmp(res->ip_version, "ipv4") == 0) + nvgre_encap_conf.select_ipv4 = 1; + else if (strcmp(res->ip_version, "ipv6") == 0) + nvgre_encap_conf.select_ipv4 = 0; + else + return; + rte_memcpy(nvgre_encap_conf.tni, &id.tni[1], 3); + if (nvgre_encap_conf.select_ipv4) { + IPV4_ADDR_TO_UINT(res->ip_src, nvgre_encap_conf.ipv4_src); + IPV4_ADDR_TO_UINT(res->ip_dst, nvgre_encap_conf.ipv4_dst); + } else { + IPV6_ADDR_TO_ARRAY(res->ip_src, nvgre_encap_conf.ipv6_src); + IPV6_ADDR_TO_ARRAY(res->ip_dst, nvgre_encap_conf.ipv6_dst); + } + if (nvgre_encap_conf.select_vlan) + nvgre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci); + rte_memcpy(nvgre_encap_conf.eth_src, res->eth_src.addr_bytes, + RTE_ETHER_ADDR_LEN); + rte_memcpy(nvgre_encap_conf.eth_dst, res->eth_dst.addr_bytes, + RTE_ETHER_ADDR_LEN); +} + +cmdline_parse_inst_t cmd_set_nvgre = { + .f = cmd_set_nvgre_parsed, + .data = NULL, + .help_str = "set nvgre ip-version <ipv4|ipv6> tni <tni> ip-src" + " <ip-src> ip-dst <ip-dst> eth-src <eth-src>" + " eth-dst <eth-dst>", + .tokens = { + (void *)&cmd_set_nvgre_set, + (void *)&cmd_set_nvgre_nvgre, + (void *)&cmd_set_nvgre_ip_version, + (void *)&cmd_set_nvgre_ip_version_value, + (void *)&cmd_set_nvgre_tni, + (void *)&cmd_set_nvgre_tni_value, + (void *)&cmd_set_nvgre_ip_src, + (void *)&cmd_set_nvgre_ip_src_value, + (void *)&cmd_set_nvgre_ip_dst, + (void *)&cmd_set_nvgre_ip_dst_value, + (void *)&cmd_set_nvgre_eth_src, + (void *)&cmd_set_nvgre_eth_src_value, + (void *)&cmd_set_nvgre_eth_dst, + (void *)&cmd_set_nvgre_eth_dst_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_nvgre_with_vlan = { + .f = cmd_set_nvgre_parsed, + .data = NULL, + .help_str = "set nvgre-with-vlan ip-version <ipv4|ipv6> tni <tni>" + " ip-src <ip-src> ip-dst <ip-dst> vlan-tci <vlan-tci>" + " eth-src <eth-src> eth-dst <eth-dst>", + .tokens = { + (void *)&cmd_set_nvgre_set, + (void *)&cmd_set_nvgre_nvgre_with_vlan, + (void *)&cmd_set_nvgre_ip_version, + (void *)&cmd_set_nvgre_ip_version_value, + (void *)&cmd_set_nvgre_tni, + (void *)&cmd_set_nvgre_tni_value, + (void *)&cmd_set_nvgre_ip_src, + (void *)&cmd_set_nvgre_ip_src_value, + (void *)&cmd_set_nvgre_ip_dst, + (void *)&cmd_set_nvgre_ip_dst_value, + (void *)&cmd_set_nvgre_vlan, + (void *)&cmd_set_nvgre_vlan_value, + (void *)&cmd_set_nvgre_eth_src, + (void *)&cmd_set_nvgre_eth_src_value, + (void *)&cmd_set_nvgre_eth_dst, + (void *)&cmd_set_nvgre_eth_dst_value, + NULL, + }, +}; + +/** Set L2 encapsulation details */ +struct cmd_set_l2_encap_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t l2_encap; + cmdline_fixed_string_t pos_token; + cmdline_fixed_string_t ip_version; + uint32_t vlan_present:1; + uint16_t tci; + struct rte_ether_addr eth_src; + struct rte_ether_addr eth_dst; +}; + +cmdline_parse_token_string_t cmd_set_l2_encap_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, set, "set"); +cmdline_parse_token_string_t cmd_set_l2_encap_l2_encap = + TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, l2_encap, "l2_encap"); +cmdline_parse_token_string_t cmd_set_l2_encap_l2_encap_with_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, l2_encap, + "l2_encap-with-vlan"); +cmdline_parse_token_string_t cmd_set_l2_encap_ip_version = + TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, pos_token, + "ip-version"); +cmdline_parse_token_string_t cmd_set_l2_encap_ip_version_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, ip_version, + "ipv4#ipv6"); +cmdline_parse_token_string_t cmd_set_l2_encap_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, pos_token, + "vlan-tci"); +cmdline_parse_token_num_t cmd_set_l2_encap_vlan_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_l2_encap_result, tci, UINT16); +cmdline_parse_token_string_t cmd_set_l2_encap_eth_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, pos_token, + "eth-src"); +cmdline_parse_token_etheraddr_t cmd_set_l2_encap_eth_src_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_l2_encap_result, eth_src); +cmdline_parse_token_string_t cmd_set_l2_encap_eth_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_l2_encap_result, pos_token, + "eth-dst"); +cmdline_parse_token_etheraddr_t cmd_set_l2_encap_eth_dst_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_l2_encap_result, eth_dst); + +static void cmd_set_l2_encap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_l2_encap_result *res = parsed_result; + + if (strcmp(res->l2_encap, "l2_encap") == 0) + l2_encap_conf.select_vlan = 0; + else if (strcmp(res->l2_encap, "l2_encap-with-vlan") == 0) + l2_encap_conf.select_vlan = 1; + if (strcmp(res->ip_version, "ipv4") == 0) + l2_encap_conf.select_ipv4 = 1; + else if (strcmp(res->ip_version, "ipv6") == 0) + l2_encap_conf.select_ipv4 = 0; + else + return; + if (l2_encap_conf.select_vlan) + l2_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci); + rte_memcpy(l2_encap_conf.eth_src, res->eth_src.addr_bytes, + RTE_ETHER_ADDR_LEN); + rte_memcpy(l2_encap_conf.eth_dst, res->eth_dst.addr_bytes, + RTE_ETHER_ADDR_LEN); +} + +cmdline_parse_inst_t cmd_set_l2_encap = { + .f = cmd_set_l2_encap_parsed, + .data = NULL, + .help_str = "set l2_encap ip-version ipv4|ipv6" + " eth-src <eth-src> eth-dst <eth-dst>", + .tokens = { + (void *)&cmd_set_l2_encap_set, + (void *)&cmd_set_l2_encap_l2_encap, + (void *)&cmd_set_l2_encap_ip_version, + (void *)&cmd_set_l2_encap_ip_version_value, + (void *)&cmd_set_l2_encap_eth_src, + (void *)&cmd_set_l2_encap_eth_src_value, + (void *)&cmd_set_l2_encap_eth_dst, + (void *)&cmd_set_l2_encap_eth_dst_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_l2_encap_with_vlan = { + .f = cmd_set_l2_encap_parsed, + .data = NULL, + .help_str = "set l2_encap-with-vlan ip-version ipv4|ipv6" + " vlan-tci <vlan-tci> eth-src <eth-src> eth-dst <eth-dst>", + .tokens = { + (void *)&cmd_set_l2_encap_set, + (void *)&cmd_set_l2_encap_l2_encap_with_vlan, + (void *)&cmd_set_l2_encap_ip_version, + (void *)&cmd_set_l2_encap_ip_version_value, + (void *)&cmd_set_l2_encap_vlan, + (void *)&cmd_set_l2_encap_vlan_value, + (void *)&cmd_set_l2_encap_eth_src, + (void *)&cmd_set_l2_encap_eth_src_value, + (void *)&cmd_set_l2_encap_eth_dst, + (void *)&cmd_set_l2_encap_eth_dst_value, + NULL, + }, +}; + +/** Set L2 decapsulation details */ +struct cmd_set_l2_decap_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t l2_decap; + cmdline_fixed_string_t pos_token; + uint32_t vlan_present:1; +}; + +cmdline_parse_token_string_t cmd_set_l2_decap_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_l2_decap_result, set, "set"); +cmdline_parse_token_string_t cmd_set_l2_decap_l2_decap = + TOKEN_STRING_INITIALIZER(struct cmd_set_l2_decap_result, l2_decap, + "l2_decap"); +cmdline_parse_token_string_t cmd_set_l2_decap_l2_decap_with_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_l2_decap_result, l2_decap, + "l2_decap-with-vlan"); + +static void cmd_set_l2_decap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_l2_decap_result *res = parsed_result; + + if (strcmp(res->l2_decap, "l2_decap") == 0) + l2_decap_conf.select_vlan = 0; + else if (strcmp(res->l2_decap, "l2_decap-with-vlan") == 0) + l2_decap_conf.select_vlan = 1; +} + +cmdline_parse_inst_t cmd_set_l2_decap = { + .f = cmd_set_l2_decap_parsed, + .data = NULL, + .help_str = "set l2_decap", + .tokens = { + (void *)&cmd_set_l2_decap_set, + (void *)&cmd_set_l2_decap_l2_decap, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_l2_decap_with_vlan = { + .f = cmd_set_l2_decap_parsed, + .data = NULL, + .help_str = "set l2_decap-with-vlan", + .tokens = { + (void *)&cmd_set_l2_decap_set, + (void *)&cmd_set_l2_decap_l2_decap_with_vlan, + NULL, + }, +}; + +/** Set MPLSoGRE encapsulation details */ +struct cmd_set_mplsogre_encap_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t mplsogre; + cmdline_fixed_string_t pos_token; + cmdline_fixed_string_t ip_version; + uint32_t vlan_present:1; + uint32_t label; + cmdline_ipaddr_t ip_src; + cmdline_ipaddr_t ip_dst; + uint16_t tci; + struct rte_ether_addr eth_src; + struct rte_ether_addr eth_dst; +}; + +cmdline_parse_token_string_t cmd_set_mplsogre_encap_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, set, + "set"); +cmdline_parse_token_string_t cmd_set_mplsogre_encap_mplsogre_encap = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, mplsogre, + "mplsogre_encap"); +cmdline_parse_token_string_t cmd_set_mplsogre_encap_mplsogre_encap_with_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, + mplsogre, "mplsogre_encap-with-vlan"); +cmdline_parse_token_string_t cmd_set_mplsogre_encap_ip_version = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, + pos_token, "ip-version"); +cmdline_parse_token_string_t cmd_set_mplsogre_encap_ip_version_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, + ip_version, "ipv4#ipv6"); +cmdline_parse_token_string_t cmd_set_mplsogre_encap_label = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, + pos_token, "label"); +cmdline_parse_token_num_t cmd_set_mplsogre_encap_label_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_mplsogre_encap_result, label, + UINT32); +cmdline_parse_token_string_t cmd_set_mplsogre_encap_ip_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, + pos_token, "ip-src"); +cmdline_parse_token_ipaddr_t cmd_set_mplsogre_encap_ip_src_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_mplsogre_encap_result, ip_src); +cmdline_parse_token_string_t cmd_set_mplsogre_encap_ip_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, + pos_token, "ip-dst"); +cmdline_parse_token_ipaddr_t cmd_set_mplsogre_encap_ip_dst_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_mplsogre_encap_result, ip_dst); +cmdline_parse_token_string_t cmd_set_mplsogre_encap_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, + pos_token, "vlan-tci"); +cmdline_parse_token_num_t cmd_set_mplsogre_encap_vlan_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_mplsogre_encap_result, tci, + UINT16); +cmdline_parse_token_string_t cmd_set_mplsogre_encap_eth_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, + pos_token, "eth-src"); +cmdline_parse_token_etheraddr_t cmd_set_mplsogre_encap_eth_src_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_mplsogre_encap_result, + eth_src); +cmdline_parse_token_string_t cmd_set_mplsogre_encap_eth_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_encap_result, + pos_token, "eth-dst"); +cmdline_parse_token_etheraddr_t cmd_set_mplsogre_encap_eth_dst_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_mplsogre_encap_result, + eth_dst); + +static void cmd_set_mplsogre_encap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_mplsogre_encap_result *res = parsed_result; + union { + uint32_t mplsogre_label; + uint8_t label[4]; + } id = { + .mplsogre_label = rte_cpu_to_be_32(res->label<<12), + }; + + if (strcmp(res->mplsogre, "mplsogre_encap") == 0) + mplsogre_encap_conf.select_vlan = 0; + else if (strcmp(res->mplsogre, "mplsogre_encap-with-vlan") == 0) + mplsogre_encap_conf.select_vlan = 1; + if (strcmp(res->ip_version, "ipv4") == 0) + mplsogre_encap_conf.select_ipv4 = 1; + else if (strcmp(res->ip_version, "ipv6") == 0) + mplsogre_encap_conf.select_ipv4 = 0; + else + return; + rte_memcpy(mplsogre_encap_conf.label, &id.label, 3); + if (mplsogre_encap_conf.select_ipv4) { + IPV4_ADDR_TO_UINT(res->ip_src, mplsogre_encap_conf.ipv4_src); + IPV4_ADDR_TO_UINT(res->ip_dst, mplsogre_encap_conf.ipv4_dst); + } else { + IPV6_ADDR_TO_ARRAY(res->ip_src, mplsogre_encap_conf.ipv6_src); + IPV6_ADDR_TO_ARRAY(res->ip_dst, mplsogre_encap_conf.ipv6_dst); + } + if (mplsogre_encap_conf.select_vlan) + mplsogre_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci); + rte_memcpy(mplsogre_encap_conf.eth_src, res->eth_src.addr_bytes, + RTE_ETHER_ADDR_LEN); + rte_memcpy(mplsogre_encap_conf.eth_dst, res->eth_dst.addr_bytes, + RTE_ETHER_ADDR_LEN); +} + +cmdline_parse_inst_t cmd_set_mplsogre_encap = { + .f = cmd_set_mplsogre_encap_parsed, + .data = NULL, + .help_str = "set mplsogre_encap ip-version ipv4|ipv6 label <label>" + " ip-src <ip-src> ip-dst <ip-dst> eth-src <eth-src>" + " eth-dst <eth-dst>", + .tokens = { + (void *)&cmd_set_mplsogre_encap_set, + (void *)&cmd_set_mplsogre_encap_mplsogre_encap, + (void *)&cmd_set_mplsogre_encap_ip_version, + (void *)&cmd_set_mplsogre_encap_ip_version_value, + (void *)&cmd_set_mplsogre_encap_label, + (void *)&cmd_set_mplsogre_encap_label_value, + (void *)&cmd_set_mplsogre_encap_ip_src, + (void *)&cmd_set_mplsogre_encap_ip_src_value, + (void *)&cmd_set_mplsogre_encap_ip_dst, + (void *)&cmd_set_mplsogre_encap_ip_dst_value, + (void *)&cmd_set_mplsogre_encap_eth_src, + (void *)&cmd_set_mplsogre_encap_eth_src_value, + (void *)&cmd_set_mplsogre_encap_eth_dst, + (void *)&cmd_set_mplsogre_encap_eth_dst_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_mplsogre_encap_with_vlan = { + .f = cmd_set_mplsogre_encap_parsed, + .data = NULL, + .help_str = "set mplsogre_encap-with-vlan ip-version ipv4|ipv6" + " label <label> ip-src <ip-src> ip-dst <ip-dst>" + " vlan-tci <vlan-tci> eth-src <eth-src> eth-dst <eth-dst>", + .tokens = { + (void *)&cmd_set_mplsogre_encap_set, + (void *)&cmd_set_mplsogre_encap_mplsogre_encap_with_vlan, + (void *)&cmd_set_mplsogre_encap_ip_version, + (void *)&cmd_set_mplsogre_encap_ip_version_value, + (void *)&cmd_set_mplsogre_encap_label, + (void *)&cmd_set_mplsogre_encap_label_value, + (void *)&cmd_set_mplsogre_encap_ip_src, + (void *)&cmd_set_mplsogre_encap_ip_src_value, + (void *)&cmd_set_mplsogre_encap_ip_dst, + (void *)&cmd_set_mplsogre_encap_ip_dst_value, + (void *)&cmd_set_mplsogre_encap_vlan, + (void *)&cmd_set_mplsogre_encap_vlan_value, + (void *)&cmd_set_mplsogre_encap_eth_src, + (void *)&cmd_set_mplsogre_encap_eth_src_value, + (void *)&cmd_set_mplsogre_encap_eth_dst, + (void *)&cmd_set_mplsogre_encap_eth_dst_value, + NULL, + }, +}; + +/** Set MPLSoGRE decapsulation details */ +struct cmd_set_mplsogre_decap_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t mplsogre; + cmdline_fixed_string_t pos_token; + cmdline_fixed_string_t ip_version; + uint32_t vlan_present:1; +}; + +cmdline_parse_token_string_t cmd_set_mplsogre_decap_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_decap_result, set, + "set"); +cmdline_parse_token_string_t cmd_set_mplsogre_decap_mplsogre_decap = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_decap_result, mplsogre, + "mplsogre_decap"); +cmdline_parse_token_string_t cmd_set_mplsogre_decap_mplsogre_decap_with_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_decap_result, + mplsogre, "mplsogre_decap-with-vlan"); +cmdline_parse_token_string_t cmd_set_mplsogre_decap_ip_version = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_decap_result, + pos_token, "ip-version"); +cmdline_parse_token_string_t cmd_set_mplsogre_decap_ip_version_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsogre_decap_result, + ip_version, "ipv4#ipv6"); + +static void cmd_set_mplsogre_decap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_mplsogre_decap_result *res = parsed_result; + + if (strcmp(res->mplsogre, "mplsogre_decap") == 0) + mplsogre_decap_conf.select_vlan = 0; + else if (strcmp(res->mplsogre, "mplsogre_decap-with-vlan") == 0) + mplsogre_decap_conf.select_vlan = 1; + if (strcmp(res->ip_version, "ipv4") == 0) + mplsogre_decap_conf.select_ipv4 = 1; + else if (strcmp(res->ip_version, "ipv6") == 0) + mplsogre_decap_conf.select_ipv4 = 0; +} + +cmdline_parse_inst_t cmd_set_mplsogre_decap = { + .f = cmd_set_mplsogre_decap_parsed, + .data = NULL, + .help_str = "set mplsogre_decap ip-version ipv4|ipv6", + .tokens = { + (void *)&cmd_set_mplsogre_decap_set, + (void *)&cmd_set_mplsogre_decap_mplsogre_decap, + (void *)&cmd_set_mplsogre_decap_ip_version, + (void *)&cmd_set_mplsogre_decap_ip_version_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_mplsogre_decap_with_vlan = { + .f = cmd_set_mplsogre_decap_parsed, + .data = NULL, + .help_str = "set mplsogre_decap-with-vlan ip-version ipv4|ipv6", + .tokens = { + (void *)&cmd_set_mplsogre_decap_set, + (void *)&cmd_set_mplsogre_decap_mplsogre_decap_with_vlan, + (void *)&cmd_set_mplsogre_decap_ip_version, + (void *)&cmd_set_mplsogre_decap_ip_version_value, + NULL, + }, +}; + +/** Set MPLSoUDP encapsulation details */ +struct cmd_set_mplsoudp_encap_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t mplsoudp; + cmdline_fixed_string_t pos_token; + cmdline_fixed_string_t ip_version; + uint32_t vlan_present:1; + uint32_t label; + uint16_t udp_src; + uint16_t udp_dst; + cmdline_ipaddr_t ip_src; + cmdline_ipaddr_t ip_dst; + uint16_t tci; + struct rte_ether_addr eth_src; + struct rte_ether_addr eth_dst; +}; + +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, set, + "set"); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_mplsoudp_encap = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, mplsoudp, + "mplsoudp_encap"); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_mplsoudp_encap_with_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + mplsoudp, "mplsoudp_encap-with-vlan"); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_ip_version = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + pos_token, "ip-version"); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_ip_version_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + ip_version, "ipv4#ipv6"); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_label = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + pos_token, "label"); +cmdline_parse_token_num_t cmd_set_mplsoudp_encap_label_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_mplsoudp_encap_result, label, + UINT32); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_udp_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + pos_token, "udp-src"); +cmdline_parse_token_num_t cmd_set_mplsoudp_encap_udp_src_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_mplsoudp_encap_result, udp_src, + UINT16); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_udp_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + pos_token, "udp-dst"); +cmdline_parse_token_num_t cmd_set_mplsoudp_encap_udp_dst_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_mplsoudp_encap_result, udp_dst, + UINT16); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_ip_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + pos_token, "ip-src"); +cmdline_parse_token_ipaddr_t cmd_set_mplsoudp_encap_ip_src_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_mplsoudp_encap_result, ip_src); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_ip_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + pos_token, "ip-dst"); +cmdline_parse_token_ipaddr_t cmd_set_mplsoudp_encap_ip_dst_value = + TOKEN_IPADDR_INITIALIZER(struct cmd_set_mplsoudp_encap_result, ip_dst); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + pos_token, "vlan-tci"); +cmdline_parse_token_num_t cmd_set_mplsoudp_encap_vlan_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_mplsoudp_encap_result, tci, + UINT16); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_eth_src = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + pos_token, "eth-src"); +cmdline_parse_token_etheraddr_t cmd_set_mplsoudp_encap_eth_src_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + eth_src); +cmdline_parse_token_string_t cmd_set_mplsoudp_encap_eth_dst = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + pos_token, "eth-dst"); +cmdline_parse_token_etheraddr_t cmd_set_mplsoudp_encap_eth_dst_value = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_set_mplsoudp_encap_result, + eth_dst); + +static void cmd_set_mplsoudp_encap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_mplsoudp_encap_result *res = parsed_result; + union { + uint32_t mplsoudp_label; + uint8_t label[4]; + } id = { + .mplsoudp_label = rte_cpu_to_be_32(res->label<<12), + }; + + if (strcmp(res->mplsoudp, "mplsoudp_encap") == 0) + mplsoudp_encap_conf.select_vlan = 0; + else if (strcmp(res->mplsoudp, "mplsoudp_encap-with-vlan") == 0) + mplsoudp_encap_conf.select_vlan = 1; + if (strcmp(res->ip_version, "ipv4") == 0) + mplsoudp_encap_conf.select_ipv4 = 1; + else if (strcmp(res->ip_version, "ipv6") == 0) + mplsoudp_encap_conf.select_ipv4 = 0; + else + return; + rte_memcpy(mplsoudp_encap_conf.label, &id.label, 3); + mplsoudp_encap_conf.udp_src = rte_cpu_to_be_16(res->udp_src); + mplsoudp_encap_conf.udp_dst = rte_cpu_to_be_16(res->udp_dst); + if (mplsoudp_encap_conf.select_ipv4) { + IPV4_ADDR_TO_UINT(res->ip_src, mplsoudp_encap_conf.ipv4_src); + IPV4_ADDR_TO_UINT(res->ip_dst, mplsoudp_encap_conf.ipv4_dst); + } else { + IPV6_ADDR_TO_ARRAY(res->ip_src, mplsoudp_encap_conf.ipv6_src); + IPV6_ADDR_TO_ARRAY(res->ip_dst, mplsoudp_encap_conf.ipv6_dst); + } + if (mplsoudp_encap_conf.select_vlan) + mplsoudp_encap_conf.vlan_tci = rte_cpu_to_be_16(res->tci); + rte_memcpy(mplsoudp_encap_conf.eth_src, res->eth_src.addr_bytes, + RTE_ETHER_ADDR_LEN); + rte_memcpy(mplsoudp_encap_conf.eth_dst, res->eth_dst.addr_bytes, + RTE_ETHER_ADDR_LEN); +} + +cmdline_parse_inst_t cmd_set_mplsoudp_encap = { + .f = cmd_set_mplsoudp_encap_parsed, + .data = NULL, + .help_str = "set mplsoudp_encap ip-version ipv4|ipv6 label <label>" + " udp-src <udp-src> udp-dst <udp-dst> ip-src <ip-src>" + " ip-dst <ip-dst> eth-src <eth-src> eth-dst <eth-dst>", + .tokens = { + (void *)&cmd_set_mplsoudp_encap_set, + (void *)&cmd_set_mplsoudp_encap_mplsoudp_encap, + (void *)&cmd_set_mplsoudp_encap_ip_version, + (void *)&cmd_set_mplsoudp_encap_ip_version_value, + (void *)&cmd_set_mplsoudp_encap_label, + (void *)&cmd_set_mplsoudp_encap_label_value, + (void *)&cmd_set_mplsoudp_encap_udp_src, + (void *)&cmd_set_mplsoudp_encap_udp_src_value, + (void *)&cmd_set_mplsoudp_encap_udp_dst, + (void *)&cmd_set_mplsoudp_encap_udp_dst_value, + (void *)&cmd_set_mplsoudp_encap_ip_src, + (void *)&cmd_set_mplsoudp_encap_ip_src_value, + (void *)&cmd_set_mplsoudp_encap_ip_dst, + (void *)&cmd_set_mplsoudp_encap_ip_dst_value, + (void *)&cmd_set_mplsoudp_encap_eth_src, + (void *)&cmd_set_mplsoudp_encap_eth_src_value, + (void *)&cmd_set_mplsoudp_encap_eth_dst, + (void *)&cmd_set_mplsoudp_encap_eth_dst_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_mplsoudp_encap_with_vlan = { + .f = cmd_set_mplsoudp_encap_parsed, + .data = NULL, + .help_str = "set mplsoudp_encap-with-vlan ip-version ipv4|ipv6" + " label <label> udp-src <udp-src> udp-dst <udp-dst>" + " ip-src <ip-src> ip-dst <ip-dst> vlan-tci <vlan-tci>" + " eth-src <eth-src> eth-dst <eth-dst>", + .tokens = { + (void *)&cmd_set_mplsoudp_encap_set, + (void *)&cmd_set_mplsoudp_encap_mplsoudp_encap_with_vlan, + (void *)&cmd_set_mplsoudp_encap_ip_version, + (void *)&cmd_set_mplsoudp_encap_ip_version_value, + (void *)&cmd_set_mplsoudp_encap_label, + (void *)&cmd_set_mplsoudp_encap_label_value, + (void *)&cmd_set_mplsoudp_encap_udp_src, + (void *)&cmd_set_mplsoudp_encap_udp_src_value, + (void *)&cmd_set_mplsoudp_encap_udp_dst, + (void *)&cmd_set_mplsoudp_encap_udp_dst_value, + (void *)&cmd_set_mplsoudp_encap_ip_src, + (void *)&cmd_set_mplsoudp_encap_ip_src_value, + (void *)&cmd_set_mplsoudp_encap_ip_dst, + (void *)&cmd_set_mplsoudp_encap_ip_dst_value, + (void *)&cmd_set_mplsoudp_encap_vlan, + (void *)&cmd_set_mplsoudp_encap_vlan_value, + (void *)&cmd_set_mplsoudp_encap_eth_src, + (void *)&cmd_set_mplsoudp_encap_eth_src_value, + (void *)&cmd_set_mplsoudp_encap_eth_dst, + (void *)&cmd_set_mplsoudp_encap_eth_dst_value, + NULL, + }, +}; + +/** Set MPLSoUDP decapsulation details */ +struct cmd_set_mplsoudp_decap_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t mplsoudp; + cmdline_fixed_string_t pos_token; + cmdline_fixed_string_t ip_version; + uint32_t vlan_present:1; +}; + +cmdline_parse_token_string_t cmd_set_mplsoudp_decap_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_decap_result, set, + "set"); +cmdline_parse_token_string_t cmd_set_mplsoudp_decap_mplsoudp_decap = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_decap_result, mplsoudp, + "mplsoudp_decap"); +cmdline_parse_token_string_t cmd_set_mplsoudp_decap_mplsoudp_decap_with_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_decap_result, + mplsoudp, "mplsoudp_decap-with-vlan"); +cmdline_parse_token_string_t cmd_set_mplsoudp_decap_ip_version = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_decap_result, + pos_token, "ip-version"); +cmdline_parse_token_string_t cmd_set_mplsoudp_decap_ip_version_value = + TOKEN_STRING_INITIALIZER(struct cmd_set_mplsoudp_decap_result, + ip_version, "ipv4#ipv6"); + +static void cmd_set_mplsoudp_decap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_mplsoudp_decap_result *res = parsed_result; + + if (strcmp(res->mplsoudp, "mplsoudp_decap") == 0) + mplsoudp_decap_conf.select_vlan = 0; + else if (strcmp(res->mplsoudp, "mplsoudp_decap-with-vlan") == 0) + mplsoudp_decap_conf.select_vlan = 1; + if (strcmp(res->ip_version, "ipv4") == 0) + mplsoudp_decap_conf.select_ipv4 = 1; + else if (strcmp(res->ip_version, "ipv6") == 0) + mplsoudp_decap_conf.select_ipv4 = 0; +} + +cmdline_parse_inst_t cmd_set_mplsoudp_decap = { + .f = cmd_set_mplsoudp_decap_parsed, + .data = NULL, + .help_str = "set mplsoudp_decap ip-version ipv4|ipv6", + .tokens = { + (void *)&cmd_set_mplsoudp_decap_set, + (void *)&cmd_set_mplsoudp_decap_mplsoudp_decap, + (void *)&cmd_set_mplsoudp_decap_ip_version, + (void *)&cmd_set_mplsoudp_decap_ip_version_value, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_mplsoudp_decap_with_vlan = { + .f = cmd_set_mplsoudp_decap_parsed, + .data = NULL, + .help_str = "set mplsoudp_decap-with-vlan ip-version ipv4|ipv6", + .tokens = { + (void *)&cmd_set_mplsoudp_decap_set, + (void *)&cmd_set_mplsoudp_decap_mplsoudp_decap_with_vlan, + (void *)&cmd_set_mplsoudp_decap_ip_version, + (void *)&cmd_set_mplsoudp_decap_ip_version_value, + NULL, + }, +}; + +/* Strict link priority scheduling mode setting */ +static void +cmd_strict_link_prio_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_vf_tc_bw_result *res = parsed_result; + int ret = -ENOTSUP; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_set_tc_strict_prio(res->port_id, res->tc_map); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid tc_bitmap 0x%x\n", res->tc_map); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_strict_link_prio = { + .f = cmd_strict_link_prio_parsed, + .data = NULL, + .help_str = "set tx strict-link-priority <port_id> <tc_bitmap>", + .tokens = { + (void *)&cmd_vf_tc_bw_set, + (void *)&cmd_vf_tc_bw_tx, + (void *)&cmd_vf_tc_bw_strict_link_prio, + (void *)&cmd_vf_tc_bw_port_id, + (void *)&cmd_vf_tc_bw_tc_map, + NULL, + }, +}; + +/* Load dynamic device personalization*/ +struct cmd_ddp_add_result { + cmdline_fixed_string_t ddp; + cmdline_fixed_string_t add; + portid_t port_id; + char filepath[]; +}; + +cmdline_parse_token_string_t cmd_ddp_add_ddp = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_add_result, ddp, "ddp"); +cmdline_parse_token_string_t cmd_ddp_add_add = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_add_result, add, "add"); +cmdline_parse_token_num_t cmd_ddp_add_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_ddp_add_result, port_id, UINT16); +cmdline_parse_token_string_t cmd_ddp_add_filepath = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_add_result, filepath, NULL); + +static void +cmd_ddp_add_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_ddp_add_result *res = parsed_result; + uint8_t *buff; + uint32_t size; + char *filepath; + char *file_fld[2]; + int file_num; + int ret = -ENOTSUP; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + filepath = strdup(res->filepath); + if (filepath == NULL) { + printf("Failed to allocate memory\n"); + return; + } + file_num = rte_strsplit(filepath, strlen(filepath), file_fld, 2, ','); + + buff = open_file(file_fld[0], &size); + if (!buff) { + free((void *)filepath); + return; + } + +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_process_ddp_package(res->port_id, + buff, size, + RTE_PMD_I40E_PKG_OP_WR_ADD); +#endif + + if (ret == -EEXIST) + printf("Profile has already existed.\n"); + else if (ret < 0) + printf("Failed to load profile.\n"); + else if (file_num == 2) + save_file(file_fld[1], buff, size); + + close_file(buff); + free((void *)filepath); +} + +cmdline_parse_inst_t cmd_ddp_add = { + .f = cmd_ddp_add_parsed, + .data = NULL, + .help_str = "ddp add <port_id> <profile_path[,backup_profile_path]>", + .tokens = { + (void *)&cmd_ddp_add_ddp, + (void *)&cmd_ddp_add_add, + (void *)&cmd_ddp_add_port_id, + (void *)&cmd_ddp_add_filepath, + NULL, + }, +}; + +/* Delete dynamic device personalization*/ +struct cmd_ddp_del_result { + cmdline_fixed_string_t ddp; + cmdline_fixed_string_t del; + portid_t port_id; + char filepath[]; +}; + +cmdline_parse_token_string_t cmd_ddp_del_ddp = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_del_result, ddp, "ddp"); +cmdline_parse_token_string_t cmd_ddp_del_del = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_del_result, del, "del"); +cmdline_parse_token_num_t cmd_ddp_del_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_ddp_del_result, port_id, UINT16); +cmdline_parse_token_string_t cmd_ddp_del_filepath = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_del_result, filepath, NULL); + +static void +cmd_ddp_del_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_ddp_del_result *res = parsed_result; + uint8_t *buff; + uint32_t size; + int ret = -ENOTSUP; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + + buff = open_file(res->filepath, &size); + if (!buff) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_process_ddp_package(res->port_id, + buff, size, + RTE_PMD_I40E_PKG_OP_WR_DEL); +#endif + + if (ret == -EACCES) + printf("Profile does not exist.\n"); + else if (ret < 0) + printf("Failed to delete profile.\n"); + + close_file(buff); +} + +cmdline_parse_inst_t cmd_ddp_del = { + .f = cmd_ddp_del_parsed, + .data = NULL, + .help_str = "ddp del <port_id> <backup_profile_path>", + .tokens = { + (void *)&cmd_ddp_del_ddp, + (void *)&cmd_ddp_del_del, + (void *)&cmd_ddp_del_port_id, + (void *)&cmd_ddp_del_filepath, + NULL, + }, +}; + +/* Get dynamic device personalization profile info */ +struct cmd_ddp_info_result { + cmdline_fixed_string_t ddp; + cmdline_fixed_string_t get; + cmdline_fixed_string_t info; + char filepath[]; +}; + +cmdline_parse_token_string_t cmd_ddp_info_ddp = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, ddp, "ddp"); +cmdline_parse_token_string_t cmd_ddp_info_get = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, get, "get"); +cmdline_parse_token_string_t cmd_ddp_info_info = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, info, "info"); +cmdline_parse_token_string_t cmd_ddp_info_filepath = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_info_result, filepath, NULL); + +static void +cmd_ddp_info_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_ddp_info_result *res = parsed_result; + uint8_t *pkg; + uint32_t pkg_size; + int ret = -ENOTSUP; +#ifdef RTE_LIBRTE_I40E_PMD + uint32_t i, j, n; + uint8_t *buff; + uint32_t buff_size = 0; + struct rte_pmd_i40e_profile_info info; + uint32_t dev_num = 0; + struct rte_pmd_i40e_ddp_device_id *devs; + uint32_t proto_num = 0; + struct rte_pmd_i40e_proto_info *proto = NULL; + uint32_t pctype_num = 0; + struct rte_pmd_i40e_ptype_info *pctype; + uint32_t ptype_num = 0; + struct rte_pmd_i40e_ptype_info *ptype; + uint8_t proto_id; + +#endif + + pkg = open_file(res->filepath, &pkg_size); + if (!pkg) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&info, sizeof(info), + RTE_PMD_I40E_PKG_INFO_GLOBAL_HEADER); + if (!ret) { + printf("Global Track id: 0x%x\n", info.track_id); + printf("Global Version: %d.%d.%d.%d\n", + info.version.major, + info.version.minor, + info.version.update, + info.version.draft); + printf("Global Package name: %s\n\n", info.name); + } + + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&info, sizeof(info), + RTE_PMD_I40E_PKG_INFO_HEADER); + if (!ret) { + printf("i40e Profile Track id: 0x%x\n", info.track_id); + printf("i40e Profile Version: %d.%d.%d.%d\n", + info.version.major, + info.version.minor, + info.version.update, + info.version.draft); + printf("i40e Profile name: %s\n\n", info.name); + } + + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&buff_size, sizeof(buff_size), + RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES_SIZE); + if (!ret && buff_size) { + buff = (uint8_t *)malloc(buff_size); + if (buff) { + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + buff, buff_size, + RTE_PMD_I40E_PKG_INFO_GLOBAL_NOTES); + if (!ret) + printf("Package Notes:\n%s\n\n", buff); + free(buff); + } + } + + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&dev_num, sizeof(dev_num), + RTE_PMD_I40E_PKG_INFO_DEVID_NUM); + if (!ret && dev_num) { + buff_size = dev_num * sizeof(struct rte_pmd_i40e_ddp_device_id); + devs = (struct rte_pmd_i40e_ddp_device_id *)malloc(buff_size); + if (devs) { + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)devs, buff_size, + RTE_PMD_I40E_PKG_INFO_DEVID_LIST); + if (!ret) { + printf("List of supported devices:\n"); + for (i = 0; i < dev_num; i++) { + printf(" %04X:%04X %04X:%04X\n", + devs[i].vendor_dev_id >> 16, + devs[i].vendor_dev_id & 0xFFFF, + devs[i].sub_vendor_dev_id >> 16, + devs[i].sub_vendor_dev_id & 0xFFFF); + } + printf("\n"); + } + free(devs); + } + } + + /* get information about protocols and packet types */ + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&proto_num, sizeof(proto_num), + RTE_PMD_I40E_PKG_INFO_PROTOCOL_NUM); + if (ret || !proto_num) + goto no_print_return; + + buff_size = proto_num * sizeof(struct rte_pmd_i40e_proto_info); + proto = (struct rte_pmd_i40e_proto_info *)malloc(buff_size); + if (!proto) + goto no_print_return; + + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)proto, + buff_size, + RTE_PMD_I40E_PKG_INFO_PROTOCOL_LIST); + if (!ret) { + printf("List of used protocols:\n"); + for (i = 0; i < proto_num; i++) + printf(" %2u: %s\n", proto[i].proto_id, + proto[i].name); + printf("\n"); + } + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, + (uint8_t *)&pctype_num, sizeof(pctype_num), + RTE_PMD_I40E_PKG_INFO_PCTYPE_NUM); + if (ret || !pctype_num) + goto no_print_pctypes; + + buff_size = pctype_num * sizeof(struct rte_pmd_i40e_ptype_info); + pctype = (struct rte_pmd_i40e_ptype_info *)malloc(buff_size); + if (!pctype) + goto no_print_pctypes; + + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)pctype, + buff_size, + RTE_PMD_I40E_PKG_INFO_PCTYPE_LIST); + if (ret) { + free(pctype); + goto no_print_pctypes; + } + + printf("List of defined packet classification types:\n"); + for (i = 0; i < pctype_num; i++) { + printf(" %2u:", pctype[i].ptype_id); + for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) { + proto_id = pctype[i].protocols[j]; + if (proto_id != RTE_PMD_I40E_PROTO_UNUSED) { + for (n = 0; n < proto_num; n++) { + if (proto[n].proto_id == proto_id) { + printf(" %s", proto[n].name); + break; + } + } + } + } + printf("\n"); + } + printf("\n"); + free(pctype); + +no_print_pctypes: + + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)&ptype_num, + sizeof(ptype_num), + RTE_PMD_I40E_PKG_INFO_PTYPE_NUM); + if (ret || !ptype_num) + goto no_print_return; + + buff_size = ptype_num * sizeof(struct rte_pmd_i40e_ptype_info); + ptype = (struct rte_pmd_i40e_ptype_info *)malloc(buff_size); + if (!ptype) + goto no_print_return; + + ret = rte_pmd_i40e_get_ddp_info(pkg, pkg_size, (uint8_t *)ptype, + buff_size, + RTE_PMD_I40E_PKG_INFO_PTYPE_LIST); + if (ret) { + free(ptype); + goto no_print_return; + } + printf("List of defined packet types:\n"); + for (i = 0; i < ptype_num; i++) { + printf(" %2u:", ptype[i].ptype_id); + for (j = 0; j < RTE_PMD_I40E_PROTO_NUM; j++) { + proto_id = ptype[i].protocols[j]; + if (proto_id != RTE_PMD_I40E_PROTO_UNUSED) { + for (n = 0; n < proto_num; n++) { + if (proto[n].proto_id == proto_id) { + printf(" %s", proto[n].name); + break; + } + } + } + } + printf("\n"); + } + free(ptype); + printf("\n"); + + ret = 0; +no_print_return: + if (proto) + free(proto); +#endif + if (ret == -ENOTSUP) + printf("Function not supported in PMD driver\n"); + close_file(pkg); +} + +cmdline_parse_inst_t cmd_ddp_get_info = { + .f = cmd_ddp_info_parsed, + .data = NULL, + .help_str = "ddp get info <profile_path>", + .tokens = { + (void *)&cmd_ddp_info_ddp, + (void *)&cmd_ddp_info_get, + (void *)&cmd_ddp_info_info, + (void *)&cmd_ddp_info_filepath, + NULL, + }, +}; + +/* Get dynamic device personalization profile info list*/ +#define PROFILE_INFO_SIZE 48 +#define MAX_PROFILE_NUM 16 + +struct cmd_ddp_get_list_result { + cmdline_fixed_string_t ddp; + cmdline_fixed_string_t get; + cmdline_fixed_string_t list; + portid_t port_id; +}; + +cmdline_parse_token_string_t cmd_ddp_get_list_ddp = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_get_list_result, ddp, "ddp"); +cmdline_parse_token_string_t cmd_ddp_get_list_get = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_get_list_result, get, "get"); +cmdline_parse_token_string_t cmd_ddp_get_list_list = + TOKEN_STRING_INITIALIZER(struct cmd_ddp_get_list_result, list, "list"); +cmdline_parse_token_num_t cmd_ddp_get_list_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_ddp_get_list_result, port_id, UINT16); + +static void +cmd_ddp_get_list_parsed( + __rte_unused void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ +#ifdef RTE_LIBRTE_I40E_PMD + struct cmd_ddp_get_list_result *res = parsed_result; + struct rte_pmd_i40e_profile_list *p_list; + struct rte_pmd_i40e_profile_info *p_info; + uint32_t p_num; + uint32_t size; + uint32_t i; +#endif + int ret = -ENOTSUP; + +#ifdef RTE_LIBRTE_I40E_PMD + size = PROFILE_INFO_SIZE * MAX_PROFILE_NUM + 4; + p_list = (struct rte_pmd_i40e_profile_list *)malloc(size); + if (!p_list) { + printf("%s: Failed to malloc buffer\n", __func__); + return; + } + + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_get_ddp_list(res->port_id, + (uint8_t *)p_list, size); + + if (!ret) { + p_num = p_list->p_count; + printf("Profile number is: %d\n\n", p_num); + + for (i = 0; i < p_num; i++) { + p_info = &p_list->p_info[i]; + printf("Profile %d:\n", i); + printf("Track id: 0x%x\n", p_info->track_id); + printf("Version: %d.%d.%d.%d\n", + p_info->version.major, + p_info->version.minor, + p_info->version.update, + p_info->version.draft); + printf("Profile name: %s\n\n", p_info->name); + } + } + + free(p_list); +#endif + + if (ret < 0) + printf("Failed to get ddp list\n"); +} + +cmdline_parse_inst_t cmd_ddp_get_list = { + .f = cmd_ddp_get_list_parsed, + .data = NULL, + .help_str = "ddp get list <port_id>", + .tokens = { + (void *)&cmd_ddp_get_list_ddp, + (void *)&cmd_ddp_get_list_get, + (void *)&cmd_ddp_get_list_list, + (void *)&cmd_ddp_get_list_port_id, + NULL, + }, +}; + +/* Configure input set */ +struct cmd_cfg_input_set_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t cfg; + portid_t port_id; + cmdline_fixed_string_t pctype; + uint8_t pctype_id; + cmdline_fixed_string_t inset_type; + cmdline_fixed_string_t opt; + cmdline_fixed_string_t field; + uint8_t field_idx; +}; + +static void +cmd_cfg_input_set_parsed( + __rte_unused void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ +#ifdef RTE_LIBRTE_I40E_PMD + struct cmd_cfg_input_set_result *res = parsed_result; + enum rte_pmd_i40e_inset_type inset_type = INSET_NONE; + struct rte_pmd_i40e_inset inset; +#endif + int ret = -ENOTSUP; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + +#ifdef RTE_LIBRTE_I40E_PMD + if (!strcmp(res->inset_type, "hash_inset")) + inset_type = INSET_HASH; + else if (!strcmp(res->inset_type, "fdir_inset")) + inset_type = INSET_FDIR; + else if (!strcmp(res->inset_type, "fdir_flx_inset")) + inset_type = INSET_FDIR_FLX; + ret = rte_pmd_i40e_inset_get(res->port_id, res->pctype_id, + &inset, inset_type); + if (ret) { + printf("Failed to get input set.\n"); + return; + } + + if (!strcmp(res->opt, "get")) { + ret = rte_pmd_i40e_inset_field_get(inset.inset, + res->field_idx); + if (ret) + printf("Field index %d is enabled.\n", res->field_idx); + else + printf("Field index %d is disabled.\n", res->field_idx); + return; + } else if (!strcmp(res->opt, "set")) + ret = rte_pmd_i40e_inset_field_set(&inset.inset, + res->field_idx); + else if (!strcmp(res->opt, "clear")) + ret = rte_pmd_i40e_inset_field_clear(&inset.inset, + res->field_idx); + if (ret) { + printf("Failed to configure input set field.\n"); + return; + } + + ret = rte_pmd_i40e_inset_set(res->port_id, res->pctype_id, + &inset, inset_type); + if (ret) { + printf("Failed to set input set.\n"); + return; + } +#endif + + if (ret == -ENOTSUP) + printf("Function not supported\n"); +} + +cmdline_parse_token_string_t cmd_cfg_input_set_port = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + port, "port"); +cmdline_parse_token_string_t cmd_cfg_input_set_cfg = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + cfg, "config"); +cmdline_parse_token_num_t cmd_cfg_input_set_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_cfg_input_set_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_cfg_input_set_pctype = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + pctype, "pctype"); +cmdline_parse_token_num_t cmd_cfg_input_set_pctype_id = + TOKEN_NUM_INITIALIZER(struct cmd_cfg_input_set_result, + pctype_id, UINT8); +cmdline_parse_token_string_t cmd_cfg_input_set_inset_type = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + inset_type, + "hash_inset#fdir_inset#fdir_flx_inset"); +cmdline_parse_token_string_t cmd_cfg_input_set_opt = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + opt, "get#set#clear"); +cmdline_parse_token_string_t cmd_cfg_input_set_field = + TOKEN_STRING_INITIALIZER(struct cmd_cfg_input_set_result, + field, "field"); +cmdline_parse_token_num_t cmd_cfg_input_set_field_idx = + TOKEN_NUM_INITIALIZER(struct cmd_cfg_input_set_result, + field_idx, UINT8); + +cmdline_parse_inst_t cmd_cfg_input_set = { + .f = cmd_cfg_input_set_parsed, + .data = NULL, + .help_str = "port config <port_id> pctype <pctype_id> hash_inset|" + "fdir_inset|fdir_flx_inset get|set|clear field <field_idx>", + .tokens = { + (void *)&cmd_cfg_input_set_port, + (void *)&cmd_cfg_input_set_cfg, + (void *)&cmd_cfg_input_set_port_id, + (void *)&cmd_cfg_input_set_pctype, + (void *)&cmd_cfg_input_set_pctype_id, + (void *)&cmd_cfg_input_set_inset_type, + (void *)&cmd_cfg_input_set_opt, + (void *)&cmd_cfg_input_set_field, + (void *)&cmd_cfg_input_set_field_idx, + NULL, + }, +}; + +/* Clear input set */ +struct cmd_clear_input_set_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t cfg; + portid_t port_id; + cmdline_fixed_string_t pctype; + uint8_t pctype_id; + cmdline_fixed_string_t inset_type; + cmdline_fixed_string_t clear; + cmdline_fixed_string_t all; +}; + +static void +cmd_clear_input_set_parsed( + __rte_unused void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ +#ifdef RTE_LIBRTE_I40E_PMD + struct cmd_clear_input_set_result *res = parsed_result; + enum rte_pmd_i40e_inset_type inset_type = INSET_NONE; + struct rte_pmd_i40e_inset inset; +#endif + int ret = -ENOTSUP; + + if (!all_ports_stopped()) { + printf("Please stop all ports first\n"); + return; + } + +#ifdef RTE_LIBRTE_I40E_PMD + if (!strcmp(res->inset_type, "hash_inset")) + inset_type = INSET_HASH; + else if (!strcmp(res->inset_type, "fdir_inset")) + inset_type = INSET_FDIR; + else if (!strcmp(res->inset_type, "fdir_flx_inset")) + inset_type = INSET_FDIR_FLX; + + memset(&inset, 0, sizeof(inset)); + + ret = rte_pmd_i40e_inset_set(res->port_id, res->pctype_id, + &inset, inset_type); + if (ret) { + printf("Failed to clear input set.\n"); + return; + } + +#endif + + if (ret == -ENOTSUP) + printf("Function not supported\n"); +} + +cmdline_parse_token_string_t cmd_clear_input_set_port = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + port, "port"); +cmdline_parse_token_string_t cmd_clear_input_set_cfg = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + cfg, "config"); +cmdline_parse_token_num_t cmd_clear_input_set_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_clear_input_set_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_clear_input_set_pctype = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + pctype, "pctype"); +cmdline_parse_token_num_t cmd_clear_input_set_pctype_id = + TOKEN_NUM_INITIALIZER(struct cmd_clear_input_set_result, + pctype_id, UINT8); +cmdline_parse_token_string_t cmd_clear_input_set_inset_type = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + inset_type, + "hash_inset#fdir_inset#fdir_flx_inset"); +cmdline_parse_token_string_t cmd_clear_input_set_clear = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + clear, "clear"); +cmdline_parse_token_string_t cmd_clear_input_set_all = + TOKEN_STRING_INITIALIZER(struct cmd_clear_input_set_result, + all, "all"); + +cmdline_parse_inst_t cmd_clear_input_set = { + .f = cmd_clear_input_set_parsed, + .data = NULL, + .help_str = "port config <port_id> pctype <pctype_id> hash_inset|" + "fdir_inset|fdir_flx_inset clear all", + .tokens = { + (void *)&cmd_clear_input_set_port, + (void *)&cmd_clear_input_set_cfg, + (void *)&cmd_clear_input_set_port_id, + (void *)&cmd_clear_input_set_pctype, + (void *)&cmd_clear_input_set_pctype_id, + (void *)&cmd_clear_input_set_inset_type, + (void *)&cmd_clear_input_set_clear, + (void *)&cmd_clear_input_set_all, + NULL, + }, +}; + +/* show vf stats */ + +/* Common result structure for show vf stats */ +struct cmd_show_vf_stats_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t stats; + portid_t port_id; + uint16_t vf_id; +}; + +/* Common CLI fields show vf stats*/ +cmdline_parse_token_string_t cmd_show_vf_stats_show = + TOKEN_STRING_INITIALIZER + (struct cmd_show_vf_stats_result, + show, "show"); +cmdline_parse_token_string_t cmd_show_vf_stats_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_show_vf_stats_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_show_vf_stats_stats = + TOKEN_STRING_INITIALIZER + (struct cmd_show_vf_stats_result, + stats, "stats"); +cmdline_parse_token_num_t cmd_show_vf_stats_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_show_vf_stats_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_show_vf_stats_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_show_vf_stats_result, + vf_id, UINT16); + +static void +cmd_show_vf_stats_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_vf_stats_result *res = parsed_result; + struct rte_eth_stats stats; + int ret = -ENOTSUP; + static const char *nic_stats_border = "########################"; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + + memset(&stats, 0, sizeof(stats)); + +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_get_vf_stats(res->port_id, + res->vf_id, + &stats); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_get_vf_stats(res->port_id, + res->vf_id, + &stats); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d\n", res->vf_id); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } + + printf("\n %s NIC statistics for port %-2d vf %-2d %s\n", + nic_stats_border, res->port_id, res->vf_id, nic_stats_border); + + printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " + "%-"PRIu64"\n", + stats.ipackets, stats.imissed, stats.ibytes); + printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); + printf(" RX-nombuf: %-10"PRIu64"\n", + stats.rx_nombuf); + printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " + "%-"PRIu64"\n", + stats.opackets, stats.oerrors, stats.obytes); + + printf(" %s############################%s\n", + nic_stats_border, nic_stats_border); +} + +cmdline_parse_inst_t cmd_show_vf_stats = { + .f = cmd_show_vf_stats_parsed, + .data = NULL, + .help_str = "show vf stats <port_id> <vf_id>", + .tokens = { + (void *)&cmd_show_vf_stats_show, + (void *)&cmd_show_vf_stats_vf, + (void *)&cmd_show_vf_stats_stats, + (void *)&cmd_show_vf_stats_port_id, + (void *)&cmd_show_vf_stats_vf_id, + NULL, + }, +}; + +/* clear vf stats */ + +/* Common result structure for clear vf stats */ +struct cmd_clear_vf_stats_result { + cmdline_fixed_string_t clear; + cmdline_fixed_string_t vf; + cmdline_fixed_string_t stats; + portid_t port_id; + uint16_t vf_id; +}; + +/* Common CLI fields clear vf stats*/ +cmdline_parse_token_string_t cmd_clear_vf_stats_clear = + TOKEN_STRING_INITIALIZER + (struct cmd_clear_vf_stats_result, + clear, "clear"); +cmdline_parse_token_string_t cmd_clear_vf_stats_vf = + TOKEN_STRING_INITIALIZER + (struct cmd_clear_vf_stats_result, + vf, "vf"); +cmdline_parse_token_string_t cmd_clear_vf_stats_stats = + TOKEN_STRING_INITIALIZER + (struct cmd_clear_vf_stats_result, + stats, "stats"); +cmdline_parse_token_num_t cmd_clear_vf_stats_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_clear_vf_stats_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_clear_vf_stats_vf_id = + TOKEN_NUM_INITIALIZER + (struct cmd_clear_vf_stats_result, + vf_id, UINT16); + +static void +cmd_clear_vf_stats_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_clear_vf_stats_result *res = parsed_result; + int ret = -ENOTSUP; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_i40e_reset_vf_stats(res->port_id, + res->vf_id); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (ret == -ENOTSUP) + ret = rte_pmd_bnxt_reset_vf_stats(res->port_id, + res->vf_id); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid vf_id %d\n", res->vf_id); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_clear_vf_stats = { + .f = cmd_clear_vf_stats_parsed, + .data = NULL, + .help_str = "clear vf stats <port_id> <vf_id>", + .tokens = { + (void *)&cmd_clear_vf_stats_clear, + (void *)&cmd_clear_vf_stats_vf, + (void *)&cmd_clear_vf_stats_stats, + (void *)&cmd_clear_vf_stats_port_id, + (void *)&cmd_clear_vf_stats_vf_id, + NULL, + }, +}; + +/* port config pctype mapping reset */ + +/* Common result structure for port config pctype mapping reset */ +struct cmd_pctype_mapping_reset_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t port_id; + cmdline_fixed_string_t pctype; + cmdline_fixed_string_t mapping; + cmdline_fixed_string_t reset; +}; + +/* Common CLI fields for port config pctype mapping reset*/ +cmdline_parse_token_string_t cmd_pctype_mapping_reset_port = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_reset_result, + port, "port"); +cmdline_parse_token_string_t cmd_pctype_mapping_reset_config = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_reset_result, + config, "config"); +cmdline_parse_token_num_t cmd_pctype_mapping_reset_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_pctype_mapping_reset_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_pctype_mapping_reset_pctype = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_reset_result, + pctype, "pctype"); +cmdline_parse_token_string_t cmd_pctype_mapping_reset_mapping = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_reset_result, + mapping, "mapping"); +cmdline_parse_token_string_t cmd_pctype_mapping_reset_reset = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_reset_result, + reset, "reset"); + +static void +cmd_pctype_mapping_reset_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_pctype_mapping_reset_result *res = parsed_result; + int ret = -ENOTSUP; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_flow_type_mapping_reset(res->port_id); +#endif + + switch (ret) { + case 0: + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_pctype_mapping_reset = { + .f = cmd_pctype_mapping_reset_parsed, + .data = NULL, + .help_str = "port config <port_id> pctype mapping reset", + .tokens = { + (void *)&cmd_pctype_mapping_reset_port, + (void *)&cmd_pctype_mapping_reset_config, + (void *)&cmd_pctype_mapping_reset_port_id, + (void *)&cmd_pctype_mapping_reset_pctype, + (void *)&cmd_pctype_mapping_reset_mapping, + (void *)&cmd_pctype_mapping_reset_reset, + NULL, + }, +}; + +/* show port pctype mapping */ + +/* Common result structure for show port pctype mapping */ +struct cmd_pctype_mapping_get_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t pctype; + cmdline_fixed_string_t mapping; +}; + +/* Common CLI fields for pctype mapping get */ +cmdline_parse_token_string_t cmd_pctype_mapping_get_show = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_get_result, + show, "show"); +cmdline_parse_token_string_t cmd_pctype_mapping_get_port = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_get_result, + port, "port"); +cmdline_parse_token_num_t cmd_pctype_mapping_get_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_pctype_mapping_get_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_pctype_mapping_get_pctype = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_get_result, + pctype, "pctype"); +cmdline_parse_token_string_t cmd_pctype_mapping_get_mapping = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_get_result, + mapping, "mapping"); + +static void +cmd_pctype_mapping_get_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_pctype_mapping_get_result *res = parsed_result; + int ret = -ENOTSUP; +#ifdef RTE_LIBRTE_I40E_PMD + struct rte_pmd_i40e_flow_type_mapping + mapping[RTE_PMD_I40E_FLOW_TYPE_MAX]; + int i, j, first_pctype; +#endif + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_flow_type_mapping_get(res->port_id, mapping); +#endif + + switch (ret) { + case 0: + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + return; + case -ENOTSUP: + printf("function not implemented\n"); + return; + default: + printf("programming error: (%s)\n", strerror(-ret)); + return; + } + +#ifdef RTE_LIBRTE_I40E_PMD + for (i = 0; i < RTE_PMD_I40E_FLOW_TYPE_MAX; i++) { + if (mapping[i].pctype != 0ULL) { + first_pctype = 1; + + printf("pctype: "); + for (j = 0; j < RTE_PMD_I40E_PCTYPE_MAX; j++) { + if (mapping[i].pctype & (1ULL << j)) { + printf(first_pctype ? + "%02d" : ",%02d", j); + first_pctype = 0; + } + } + printf(" -> flowtype: %02d\n", mapping[i].flow_type); + } + } +#endif +} + +cmdline_parse_inst_t cmd_pctype_mapping_get = { + .f = cmd_pctype_mapping_get_parsed, + .data = NULL, + .help_str = "show port <port_id> pctype mapping", + .tokens = { + (void *)&cmd_pctype_mapping_get_show, + (void *)&cmd_pctype_mapping_get_port, + (void *)&cmd_pctype_mapping_get_port_id, + (void *)&cmd_pctype_mapping_get_pctype, + (void *)&cmd_pctype_mapping_get_mapping, + NULL, + }, +}; + +/* port config pctype mapping update */ + +/* Common result structure for port config pctype mapping update */ +struct cmd_pctype_mapping_update_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t port_id; + cmdline_fixed_string_t pctype; + cmdline_fixed_string_t mapping; + cmdline_fixed_string_t update; + cmdline_fixed_string_t pctype_list; + uint16_t flow_type; +}; + +/* Common CLI fields for pctype mapping update*/ +cmdline_parse_token_string_t cmd_pctype_mapping_update_port = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_update_result, + port, "port"); +cmdline_parse_token_string_t cmd_pctype_mapping_update_config = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_update_result, + config, "config"); +cmdline_parse_token_num_t cmd_pctype_mapping_update_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_pctype_mapping_update_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_pctype_mapping_update_pctype = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_update_result, + pctype, "pctype"); +cmdline_parse_token_string_t cmd_pctype_mapping_update_mapping = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_update_result, + mapping, "mapping"); +cmdline_parse_token_string_t cmd_pctype_mapping_update_update = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_update_result, + update, "update"); +cmdline_parse_token_string_t cmd_pctype_mapping_update_pc_type = + TOKEN_STRING_INITIALIZER + (struct cmd_pctype_mapping_update_result, + pctype_list, NULL); +cmdline_parse_token_num_t cmd_pctype_mapping_update_flow_type = + TOKEN_NUM_INITIALIZER + (struct cmd_pctype_mapping_update_result, + flow_type, UINT16); + +static void +cmd_pctype_mapping_update_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_pctype_mapping_update_result *res = parsed_result; + int ret = -ENOTSUP; +#ifdef RTE_LIBRTE_I40E_PMD + struct rte_pmd_i40e_flow_type_mapping mapping; + unsigned int i; + unsigned int nb_item; + unsigned int pctype_list[RTE_PMD_I40E_PCTYPE_MAX]; +#endif + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + nb_item = parse_item_list(res->pctype_list, "pctypes", + RTE_PMD_I40E_PCTYPE_MAX, pctype_list, 1); + mapping.flow_type = res->flow_type; + for (i = 0, mapping.pctype = 0ULL; i < nb_item; i++) + mapping.pctype |= (1ULL << pctype_list[i]); + ret = rte_pmd_i40e_flow_type_mapping_update(res->port_id, + &mapping, + 1, + 0); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid pctype or flow type\n"); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_pctype_mapping_update = { + .f = cmd_pctype_mapping_update_parsed, + .data = NULL, + .help_str = "port config <port_id> pctype mapping update" + " <pctype_id_0,[pctype_id_1]*> <flowtype_id>", + .tokens = { + (void *)&cmd_pctype_mapping_update_port, + (void *)&cmd_pctype_mapping_update_config, + (void *)&cmd_pctype_mapping_update_port_id, + (void *)&cmd_pctype_mapping_update_pctype, + (void *)&cmd_pctype_mapping_update_mapping, + (void *)&cmd_pctype_mapping_update_update, + (void *)&cmd_pctype_mapping_update_pc_type, + (void *)&cmd_pctype_mapping_update_flow_type, + NULL, + }, +}; + +/* ptype mapping get */ + +/* Common result structure for ptype mapping get */ +struct cmd_ptype_mapping_get_result { + cmdline_fixed_string_t ptype; + cmdline_fixed_string_t mapping; + cmdline_fixed_string_t get; + portid_t port_id; + uint8_t valid_only; +}; + +/* Common CLI fields for ptype mapping get */ +cmdline_parse_token_string_t cmd_ptype_mapping_get_ptype = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_get_result, + ptype, "ptype"); +cmdline_parse_token_string_t cmd_ptype_mapping_get_mapping = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_get_result, + mapping, "mapping"); +cmdline_parse_token_string_t cmd_ptype_mapping_get_get = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_get_result, + get, "get"); +cmdline_parse_token_num_t cmd_ptype_mapping_get_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_ptype_mapping_get_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_ptype_mapping_get_valid_only = + TOKEN_NUM_INITIALIZER + (struct cmd_ptype_mapping_get_result, + valid_only, UINT8); + +static void +cmd_ptype_mapping_get_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_ptype_mapping_get_result *res = parsed_result; + int ret = -ENOTSUP; +#ifdef RTE_LIBRTE_I40E_PMD + int max_ptype_num = 256; + struct rte_pmd_i40e_ptype_mapping mapping[max_ptype_num]; + uint16_t count; + int i; +#endif + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_ptype_mapping_get(res->port_id, + mapping, + max_ptype_num, + &count, + res->valid_only); +#endif + + switch (ret) { + case 0: + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } + +#ifdef RTE_LIBRTE_I40E_PMD + if (!ret) { + for (i = 0; i < count; i++) + printf("%3d\t0x%08x\n", + mapping[i].hw_ptype, mapping[i].sw_ptype); + } +#endif +} + +cmdline_parse_inst_t cmd_ptype_mapping_get = { + .f = cmd_ptype_mapping_get_parsed, + .data = NULL, + .help_str = "ptype mapping get <port_id> <valid_only>", + .tokens = { + (void *)&cmd_ptype_mapping_get_ptype, + (void *)&cmd_ptype_mapping_get_mapping, + (void *)&cmd_ptype_mapping_get_get, + (void *)&cmd_ptype_mapping_get_port_id, + (void *)&cmd_ptype_mapping_get_valid_only, + NULL, + }, +}; + +/* ptype mapping replace */ + +/* Common result structure for ptype mapping replace */ +struct cmd_ptype_mapping_replace_result { + cmdline_fixed_string_t ptype; + cmdline_fixed_string_t mapping; + cmdline_fixed_string_t replace; + portid_t port_id; + uint32_t target; + uint8_t mask; + uint32_t pkt_type; +}; + +/* Common CLI fields for ptype mapping replace */ +cmdline_parse_token_string_t cmd_ptype_mapping_replace_ptype = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_replace_result, + ptype, "ptype"); +cmdline_parse_token_string_t cmd_ptype_mapping_replace_mapping = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_replace_result, + mapping, "mapping"); +cmdline_parse_token_string_t cmd_ptype_mapping_replace_replace = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_replace_result, + replace, "replace"); +cmdline_parse_token_num_t cmd_ptype_mapping_replace_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_ptype_mapping_replace_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_ptype_mapping_replace_target = + TOKEN_NUM_INITIALIZER + (struct cmd_ptype_mapping_replace_result, + target, UINT32); +cmdline_parse_token_num_t cmd_ptype_mapping_replace_mask = + TOKEN_NUM_INITIALIZER + (struct cmd_ptype_mapping_replace_result, + mask, UINT8); +cmdline_parse_token_num_t cmd_ptype_mapping_replace_pkt_type = + TOKEN_NUM_INITIALIZER + (struct cmd_ptype_mapping_replace_result, + pkt_type, UINT32); + +static void +cmd_ptype_mapping_replace_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_ptype_mapping_replace_result *res = parsed_result; + int ret = -ENOTSUP; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_ptype_mapping_replace(res->port_id, + res->target, + res->mask, + res->pkt_type); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid ptype 0x%8x or 0x%8x\n", + res->target, res->pkt_type); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_ptype_mapping_replace = { + .f = cmd_ptype_mapping_replace_parsed, + .data = NULL, + .help_str = + "ptype mapping replace <port_id> <target> <mask> <pkt_type>", + .tokens = { + (void *)&cmd_ptype_mapping_replace_ptype, + (void *)&cmd_ptype_mapping_replace_mapping, + (void *)&cmd_ptype_mapping_replace_replace, + (void *)&cmd_ptype_mapping_replace_port_id, + (void *)&cmd_ptype_mapping_replace_target, + (void *)&cmd_ptype_mapping_replace_mask, + (void *)&cmd_ptype_mapping_replace_pkt_type, + NULL, + }, +}; + +/* ptype mapping reset */ + +/* Common result structure for ptype mapping reset */ +struct cmd_ptype_mapping_reset_result { + cmdline_fixed_string_t ptype; + cmdline_fixed_string_t mapping; + cmdline_fixed_string_t reset; + portid_t port_id; +}; + +/* Common CLI fields for ptype mapping reset*/ +cmdline_parse_token_string_t cmd_ptype_mapping_reset_ptype = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_reset_result, + ptype, "ptype"); +cmdline_parse_token_string_t cmd_ptype_mapping_reset_mapping = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_reset_result, + mapping, "mapping"); +cmdline_parse_token_string_t cmd_ptype_mapping_reset_reset = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_reset_result, + reset, "reset"); +cmdline_parse_token_num_t cmd_ptype_mapping_reset_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_ptype_mapping_reset_result, + port_id, UINT16); + +static void +cmd_ptype_mapping_reset_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_ptype_mapping_reset_result *res = parsed_result; + int ret = -ENOTSUP; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + ret = rte_pmd_i40e_ptype_mapping_reset(res->port_id); +#endif + + switch (ret) { + case 0: + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_ptype_mapping_reset = { + .f = cmd_ptype_mapping_reset_parsed, + .data = NULL, + .help_str = "ptype mapping reset <port_id>", + .tokens = { + (void *)&cmd_ptype_mapping_reset_ptype, + (void *)&cmd_ptype_mapping_reset_mapping, + (void *)&cmd_ptype_mapping_reset_reset, + (void *)&cmd_ptype_mapping_reset_port_id, + NULL, + }, +}; + +/* ptype mapping update */ + +/* Common result structure for ptype mapping update */ +struct cmd_ptype_mapping_update_result { + cmdline_fixed_string_t ptype; + cmdline_fixed_string_t mapping; + cmdline_fixed_string_t reset; + portid_t port_id; + uint8_t hw_ptype; + uint32_t sw_ptype; +}; + +/* Common CLI fields for ptype mapping update*/ +cmdline_parse_token_string_t cmd_ptype_mapping_update_ptype = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_update_result, + ptype, "ptype"); +cmdline_parse_token_string_t cmd_ptype_mapping_update_mapping = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_update_result, + mapping, "mapping"); +cmdline_parse_token_string_t cmd_ptype_mapping_update_update = + TOKEN_STRING_INITIALIZER + (struct cmd_ptype_mapping_update_result, + reset, "update"); +cmdline_parse_token_num_t cmd_ptype_mapping_update_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_ptype_mapping_update_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_ptype_mapping_update_hw_ptype = + TOKEN_NUM_INITIALIZER + (struct cmd_ptype_mapping_update_result, + hw_ptype, UINT8); +cmdline_parse_token_num_t cmd_ptype_mapping_update_sw_ptype = + TOKEN_NUM_INITIALIZER + (struct cmd_ptype_mapping_update_result, + sw_ptype, UINT32); + +static void +cmd_ptype_mapping_update_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_ptype_mapping_update_result *res = parsed_result; + int ret = -ENOTSUP; +#ifdef RTE_LIBRTE_I40E_PMD + struct rte_pmd_i40e_ptype_mapping mapping; +#endif + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + +#ifdef RTE_LIBRTE_I40E_PMD + mapping.hw_ptype = res->hw_ptype; + mapping.sw_ptype = res->sw_ptype; + ret = rte_pmd_i40e_ptype_mapping_update(res->port_id, + &mapping, + 1, + 0); +#endif + + switch (ret) { + case 0: + break; + case -EINVAL: + printf("invalid ptype 0x%8x\n", res->sw_ptype); + break; + case -ENODEV: + printf("invalid port_id %d\n", res->port_id); + break; + case -ENOTSUP: + printf("function not implemented\n"); + break; + default: + printf("programming error: (%s)\n", strerror(-ret)); + } +} + +cmdline_parse_inst_t cmd_ptype_mapping_update = { + .f = cmd_ptype_mapping_update_parsed, + .data = NULL, + .help_str = "ptype mapping update <port_id> <hw_ptype> <sw_ptype>", + .tokens = { + (void *)&cmd_ptype_mapping_update_ptype, + (void *)&cmd_ptype_mapping_update_mapping, + (void *)&cmd_ptype_mapping_update_update, + (void *)&cmd_ptype_mapping_update_port_id, + (void *)&cmd_ptype_mapping_update_hw_ptype, + (void *)&cmd_ptype_mapping_update_sw_ptype, + NULL, + }, +}; + +/* Common result structure for file commands */ +struct cmd_cmdfile_result { + cmdline_fixed_string_t load; + cmdline_fixed_string_t filename; +}; + +/* Common CLI fields for file commands */ +cmdline_parse_token_string_t cmd_load_cmdfile = + TOKEN_STRING_INITIALIZER(struct cmd_cmdfile_result, load, "load"); +cmdline_parse_token_string_t cmd_load_cmdfile_filename = + TOKEN_STRING_INITIALIZER(struct cmd_cmdfile_result, filename, NULL); + +static void +cmd_load_from_file_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_cmdfile_result *res = parsed_result; + + cmdline_read_from_file(res->filename); +} + +cmdline_parse_inst_t cmd_load_from_file = { + .f = cmd_load_from_file_parsed, + .data = NULL, + .help_str = "load <filename>", + .tokens = { + (void *)&cmd_load_cmdfile, + (void *)&cmd_load_cmdfile_filename, + NULL, + }, +}; + +/* Get Rx offloads capabilities */ +struct cmd_rx_offload_get_capa_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t rx_offload; + cmdline_fixed_string_t capabilities; +}; + +cmdline_parse_token_string_t cmd_rx_offload_get_capa_show = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_capa_result, + show, "show"); +cmdline_parse_token_string_t cmd_rx_offload_get_capa_port = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_capa_result, + port, "port"); +cmdline_parse_token_num_t cmd_rx_offload_get_capa_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_rx_offload_get_capa_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_rx_offload_get_capa_rx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_capa_result, + rx_offload, "rx_offload"); +cmdline_parse_token_string_t cmd_rx_offload_get_capa_capabilities = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_capa_result, + capabilities, "capabilities"); + +static void +print_rx_offloads(uint64_t offloads) +{ + uint64_t single_offload; + int begin; + int end; + int bit; + + if (offloads == 0) + return; + + begin = __builtin_ctzll(offloads); + end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads); + + single_offload = 1ULL << begin; + for (bit = begin; bit < end; bit++) { + if (offloads & single_offload) + printf(" %s", + rte_eth_dev_rx_offload_name(single_offload)); + single_offload <<= 1; + } +} + +static void +cmd_rx_offload_get_capa_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_rx_offload_get_capa_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + uint64_t queue_offloads; + uint64_t port_offloads; + int ret; + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + queue_offloads = dev_info.rx_queue_offload_capa; + port_offloads = dev_info.rx_offload_capa ^ queue_offloads; + + printf("Rx Offloading Capabilities of port %d :\n", port_id); + printf(" Per Queue :"); + print_rx_offloads(queue_offloads); + + printf("\n"); + printf(" Per Port :"); + print_rx_offloads(port_offloads); + printf("\n\n"); +} + +cmdline_parse_inst_t cmd_rx_offload_get_capa = { + .f = cmd_rx_offload_get_capa_parsed, + .data = NULL, + .help_str = "show port <port_id> rx_offload capabilities", + .tokens = { + (void *)&cmd_rx_offload_get_capa_show, + (void *)&cmd_rx_offload_get_capa_port, + (void *)&cmd_rx_offload_get_capa_port_id, + (void *)&cmd_rx_offload_get_capa_rx_offload, + (void *)&cmd_rx_offload_get_capa_capabilities, + NULL, + } +}; + +/* Get Rx offloads configuration */ +struct cmd_rx_offload_get_configuration_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t rx_offload; + cmdline_fixed_string_t configuration; +}; + +cmdline_parse_token_string_t cmd_rx_offload_get_configuration_show = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_configuration_result, + show, "show"); +cmdline_parse_token_string_t cmd_rx_offload_get_configuration_port = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_configuration_result, + port, "port"); +cmdline_parse_token_num_t cmd_rx_offload_get_configuration_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_rx_offload_get_configuration_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_rx_offload_get_configuration_rx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_configuration_result, + rx_offload, "rx_offload"); +cmdline_parse_token_string_t cmd_rx_offload_get_configuration_configuration = + TOKEN_STRING_INITIALIZER + (struct cmd_rx_offload_get_configuration_result, + configuration, "configuration"); + +static void +cmd_rx_offload_get_configuration_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_rx_offload_get_configuration_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + struct rte_port *port = &ports[port_id]; + uint64_t port_offloads; + uint64_t queue_offloads; + uint16_t nb_rx_queues; + int q; + int ret; + + printf("Rx Offloading Configuration of port %d :\n", port_id); + + port_offloads = port->dev_conf.rxmode.offloads; + printf(" Port :"); + print_rx_offloads(port_offloads); + printf("\n"); + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + nb_rx_queues = dev_info.nb_rx_queues; + for (q = 0; q < nb_rx_queues; q++) { + queue_offloads = port->rx_conf[q].offloads; + printf(" Queue[%2d] :", q); + print_rx_offloads(queue_offloads); + printf("\n"); + } + printf("\n"); +} + +cmdline_parse_inst_t cmd_rx_offload_get_configuration = { + .f = cmd_rx_offload_get_configuration_parsed, + .data = NULL, + .help_str = "show port <port_id> rx_offload configuration", + .tokens = { + (void *)&cmd_rx_offload_get_configuration_show, + (void *)&cmd_rx_offload_get_configuration_port, + (void *)&cmd_rx_offload_get_configuration_port_id, + (void *)&cmd_rx_offload_get_configuration_rx_offload, + (void *)&cmd_rx_offload_get_configuration_configuration, + NULL, + } +}; + +/* Enable/Disable a per port offloading */ +struct cmd_config_per_port_rx_offload_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t port_id; + cmdline_fixed_string_t rx_offload; + cmdline_fixed_string_t offload; + cmdline_fixed_string_t on_off; +}; + +cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_port = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + port, "port"); +cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_config = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + config, "config"); +cmdline_parse_token_num_t cmd_config_per_port_rx_offload_result_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_rx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + rx_offload, "rx_offload"); +cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#" + "qinq_strip#outer_ipv4_cksum#macsec_strip#" + "header_split#vlan_filter#vlan_extend#jumbo_frame#" + "scatter#timestamp#security#keep_crc#rss_hash"); +cmdline_parse_token_string_t cmd_config_per_port_rx_offload_result_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_rx_offload_result, + on_off, "on#off"); + +static uint64_t +search_rx_offload(const char *name) +{ + uint64_t single_offload; + const char *single_name; + int found = 0; + unsigned int bit; + + single_offload = 1; + for (bit = 0; bit < sizeof(single_offload) * CHAR_BIT; bit++) { + single_name = rte_eth_dev_rx_offload_name(single_offload); + if (!strcasecmp(single_name, name)) { + found = 1; + break; + } + single_offload <<= 1; + } + + if (found) + return single_offload; + + return 0; +} + +static void +cmd_config_per_port_rx_offload_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_per_port_rx_offload_result *res = parsed_result; + portid_t port_id = res->port_id; + struct rte_eth_dev_info dev_info; + struct rte_port *port = &ports[port_id]; + uint64_t single_offload; + uint16_t nb_rx_queues; + int q; + int ret; + + if (port->port_status != RTE_PORT_STOPPED) { + printf("Error: Can't config offload when Port %d " + "is not stopped\n", port_id); + return; + } + + single_offload = search_rx_offload(res->offload); + if (single_offload == 0) { + printf("Unknown offload name: %s\n", res->offload); + return; + } + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + nb_rx_queues = dev_info.nb_rx_queues; + if (!strcmp(res->on_off, "on")) { + port->dev_conf.rxmode.offloads |= single_offload; + for (q = 0; q < nb_rx_queues; q++) + port->rx_conf[q].offloads |= single_offload; + } else { + port->dev_conf.rxmode.offloads &= ~single_offload; + for (q = 0; q < nb_rx_queues; q++) + port->rx_conf[q].offloads &= ~single_offload; + } + + cmd_reconfig_device_queue(port_id, 1, 1); +} + +cmdline_parse_inst_t cmd_config_per_port_rx_offload = { + .f = cmd_config_per_port_rx_offload_parsed, + .data = NULL, + .help_str = "port config <port_id> rx_offload vlan_strip|ipv4_cksum|" + "udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|" + "macsec_strip|header_split|vlan_filter|vlan_extend|" + "jumbo_frame|scatter|timestamp|security|keep_crc|rss_hash " + "on|off", + .tokens = { + (void *)&cmd_config_per_port_rx_offload_result_port, + (void *)&cmd_config_per_port_rx_offload_result_config, + (void *)&cmd_config_per_port_rx_offload_result_port_id, + (void *)&cmd_config_per_port_rx_offload_result_rx_offload, + (void *)&cmd_config_per_port_rx_offload_result_offload, + (void *)&cmd_config_per_port_rx_offload_result_on_off, + NULL, + } +}; + +/* Enable/Disable a per queue offloading */ +struct cmd_config_per_queue_rx_offload_result { + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t rxq; + uint16_t queue_id; + cmdline_fixed_string_t rx_offload; + cmdline_fixed_string_t offload; + cmdline_fixed_string_t on_off; +}; + +cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_port = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + port, "port"); +cmdline_parse_token_num_t cmd_config_per_queue_rx_offload_result_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_rxq = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + rxq, "rxq"); +cmdline_parse_token_num_t cmd_config_per_queue_rx_offload_result_queue_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + queue_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_rxoffload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + rx_offload, "rx_offload"); +cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + offload, "vlan_strip#ipv4_cksum#udp_cksum#tcp_cksum#tcp_lro#" + "qinq_strip#outer_ipv4_cksum#macsec_strip#" + "header_split#vlan_filter#vlan_extend#jumbo_frame#" + "scatter#timestamp#security#keep_crc"); +cmdline_parse_token_string_t cmd_config_per_queue_rx_offload_result_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_rx_offload_result, + on_off, "on#off"); + +static void +cmd_config_per_queue_rx_offload_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_per_queue_rx_offload_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + uint16_t queue_id = res->queue_id; + struct rte_port *port = &ports[port_id]; + uint64_t single_offload; + int ret; + + if (port->port_status != RTE_PORT_STOPPED) { + printf("Error: Can't config offload when Port %d " + "is not stopped\n", port_id); + return; + } + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + if (queue_id >= dev_info.nb_rx_queues) { + printf("Error: input queue_id should be 0 ... " + "%d\n", dev_info.nb_rx_queues - 1); + return; + } + + single_offload = search_rx_offload(res->offload); + if (single_offload == 0) { + printf("Unknown offload name: %s\n", res->offload); + return; + } + + if (!strcmp(res->on_off, "on")) + port->rx_conf[queue_id].offloads |= single_offload; + else + port->rx_conf[queue_id].offloads &= ~single_offload; + + cmd_reconfig_device_queue(port_id, 1, 1); +} + +cmdline_parse_inst_t cmd_config_per_queue_rx_offload = { + .f = cmd_config_per_queue_rx_offload_parsed, + .data = NULL, + .help_str = "port <port_id> rxq <queue_id> rx_offload " + "vlan_strip|ipv4_cksum|" + "udp_cksum|tcp_cksum|tcp_lro|qinq_strip|outer_ipv4_cksum|" + "macsec_strip|header_split|vlan_filter|vlan_extend|" + "jumbo_frame|scatter|timestamp|security|keep_crc " + "on|off", + .tokens = { + (void *)&cmd_config_per_queue_rx_offload_result_port, + (void *)&cmd_config_per_queue_rx_offload_result_port_id, + (void *)&cmd_config_per_queue_rx_offload_result_rxq, + (void *)&cmd_config_per_queue_rx_offload_result_queue_id, + (void *)&cmd_config_per_queue_rx_offload_result_rxoffload, + (void *)&cmd_config_per_queue_rx_offload_result_offload, + (void *)&cmd_config_per_queue_rx_offload_result_on_off, + NULL, + } +}; + +/* Get Tx offloads capabilities */ +struct cmd_tx_offload_get_capa_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t tx_offload; + cmdline_fixed_string_t capabilities; +}; + +cmdline_parse_token_string_t cmd_tx_offload_get_capa_show = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_capa_result, + show, "show"); +cmdline_parse_token_string_t cmd_tx_offload_get_capa_port = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_capa_result, + port, "port"); +cmdline_parse_token_num_t cmd_tx_offload_get_capa_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_tx_offload_get_capa_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_tx_offload_get_capa_tx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_capa_result, + tx_offload, "tx_offload"); +cmdline_parse_token_string_t cmd_tx_offload_get_capa_capabilities = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_capa_result, + capabilities, "capabilities"); + +static void +print_tx_offloads(uint64_t offloads) +{ + uint64_t single_offload; + int begin; + int end; + int bit; + + if (offloads == 0) + return; + + begin = __builtin_ctzll(offloads); + end = sizeof(offloads) * CHAR_BIT - __builtin_clzll(offloads); + + single_offload = 1ULL << begin; + for (bit = begin; bit < end; bit++) { + if (offloads & single_offload) + printf(" %s", + rte_eth_dev_tx_offload_name(single_offload)); + single_offload <<= 1; + } +} + +static void +cmd_tx_offload_get_capa_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_tx_offload_get_capa_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + uint64_t queue_offloads; + uint64_t port_offloads; + int ret; + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + queue_offloads = dev_info.tx_queue_offload_capa; + port_offloads = dev_info.tx_offload_capa ^ queue_offloads; + + printf("Tx Offloading Capabilities of port %d :\n", port_id); + printf(" Per Queue :"); + print_tx_offloads(queue_offloads); + + printf("\n"); + printf(" Per Port :"); + print_tx_offloads(port_offloads); + printf("\n\n"); +} + +cmdline_parse_inst_t cmd_tx_offload_get_capa = { + .f = cmd_tx_offload_get_capa_parsed, + .data = NULL, + .help_str = "show port <port_id> tx_offload capabilities", + .tokens = { + (void *)&cmd_tx_offload_get_capa_show, + (void *)&cmd_tx_offload_get_capa_port, + (void *)&cmd_tx_offload_get_capa_port_id, + (void *)&cmd_tx_offload_get_capa_tx_offload, + (void *)&cmd_tx_offload_get_capa_capabilities, + NULL, + } +}; + +/* Get Tx offloads configuration */ +struct cmd_tx_offload_get_configuration_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t tx_offload; + cmdline_fixed_string_t configuration; +}; + +cmdline_parse_token_string_t cmd_tx_offload_get_configuration_show = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_configuration_result, + show, "show"); +cmdline_parse_token_string_t cmd_tx_offload_get_configuration_port = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_configuration_result, + port, "port"); +cmdline_parse_token_num_t cmd_tx_offload_get_configuration_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_tx_offload_get_configuration_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_tx_offload_get_configuration_tx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_configuration_result, + tx_offload, "tx_offload"); +cmdline_parse_token_string_t cmd_tx_offload_get_configuration_configuration = + TOKEN_STRING_INITIALIZER + (struct cmd_tx_offload_get_configuration_result, + configuration, "configuration"); + +static void +cmd_tx_offload_get_configuration_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_tx_offload_get_configuration_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + struct rte_port *port = &ports[port_id]; + uint64_t port_offloads; + uint64_t queue_offloads; + uint16_t nb_tx_queues; + int q; + int ret; + + printf("Tx Offloading Configuration of port %d :\n", port_id); + + port_offloads = port->dev_conf.txmode.offloads; + printf(" Port :"); + print_tx_offloads(port_offloads); + printf("\n"); + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + nb_tx_queues = dev_info.nb_tx_queues; + for (q = 0; q < nb_tx_queues; q++) { + queue_offloads = port->tx_conf[q].offloads; + printf(" Queue[%2d] :", q); + print_tx_offloads(queue_offloads); + printf("\n"); + } + printf("\n"); +} + +cmdline_parse_inst_t cmd_tx_offload_get_configuration = { + .f = cmd_tx_offload_get_configuration_parsed, + .data = NULL, + .help_str = "show port <port_id> tx_offload configuration", + .tokens = { + (void *)&cmd_tx_offload_get_configuration_show, + (void *)&cmd_tx_offload_get_configuration_port, + (void *)&cmd_tx_offload_get_configuration_port_id, + (void *)&cmd_tx_offload_get_configuration_tx_offload, + (void *)&cmd_tx_offload_get_configuration_configuration, + NULL, + } +}; + +/* Enable/Disable a per port offloading */ +struct cmd_config_per_port_tx_offload_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t config; + portid_t port_id; + cmdline_fixed_string_t tx_offload; + cmdline_fixed_string_t offload; + cmdline_fixed_string_t on_off; +}; + +cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_port = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + port, "port"); +cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_config = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + config, "config"); +cmdline_parse_token_num_t cmd_config_per_port_tx_offload_result_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_tx_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + tx_offload, "tx_offload"); +cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + offload, "vlan_insert#ipv4_cksum#udp_cksum#tcp_cksum#" + "sctp_cksum#tcp_tso#udp_tso#outer_ipv4_cksum#" + "qinq_insert#vxlan_tnl_tso#gre_tnl_tso#" + "ipip_tnl_tso#geneve_tnl_tso#macsec_insert#" + "mt_lockfree#multi_segs#mbuf_fast_free#security"); +cmdline_parse_token_string_t cmd_config_per_port_tx_offload_result_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_port_tx_offload_result, + on_off, "on#off"); + +static uint64_t +search_tx_offload(const char *name) +{ + uint64_t single_offload; + const char *single_name; + int found = 0; + unsigned int bit; + + single_offload = 1; + for (bit = 0; bit < sizeof(single_offload) * CHAR_BIT; bit++) { + single_name = rte_eth_dev_tx_offload_name(single_offload); + if (single_name == NULL) + break; + if (!strcasecmp(single_name, name)) { + found = 1; + break; + } else if (!strcasecmp(single_name, "UNKNOWN")) + break; + single_offload <<= 1; + } + + if (found) + return single_offload; + + return 0; +} + +static void +cmd_config_per_port_tx_offload_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_per_port_tx_offload_result *res = parsed_result; + portid_t port_id = res->port_id; + struct rte_eth_dev_info dev_info; + struct rte_port *port = &ports[port_id]; + uint64_t single_offload; + uint16_t nb_tx_queues; + int q; + int ret; + + if (port->port_status != RTE_PORT_STOPPED) { + printf("Error: Can't config offload when Port %d " + "is not stopped\n", port_id); + return; + } + + single_offload = search_tx_offload(res->offload); + if (single_offload == 0) { + printf("Unknown offload name: %s\n", res->offload); + return; + } + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + nb_tx_queues = dev_info.nb_tx_queues; + if (!strcmp(res->on_off, "on")) { + port->dev_conf.txmode.offloads |= single_offload; + for (q = 0; q < nb_tx_queues; q++) + port->tx_conf[q].offloads |= single_offload; + } else { + port->dev_conf.txmode.offloads &= ~single_offload; + for (q = 0; q < nb_tx_queues; q++) + port->tx_conf[q].offloads &= ~single_offload; + } + + cmd_reconfig_device_queue(port_id, 1, 1); +} + +cmdline_parse_inst_t cmd_config_per_port_tx_offload = { + .f = cmd_config_per_port_tx_offload_parsed, + .data = NULL, + .help_str = "port config <port_id> tx_offload " + "vlan_insert|ipv4_cksum|udp_cksum|tcp_cksum|" + "sctp_cksum|tcp_tso|udp_tso|outer_ipv4_cksum|" + "qinq_insert|vxlan_tnl_tso|gre_tnl_tso|" + "ipip_tnl_tso|geneve_tnl_tso|macsec_insert|" + "mt_lockfree|multi_segs|mbuf_fast_free|security on|off", + .tokens = { + (void *)&cmd_config_per_port_tx_offload_result_port, + (void *)&cmd_config_per_port_tx_offload_result_config, + (void *)&cmd_config_per_port_tx_offload_result_port_id, + (void *)&cmd_config_per_port_tx_offload_result_tx_offload, + (void *)&cmd_config_per_port_tx_offload_result_offload, + (void *)&cmd_config_per_port_tx_offload_result_on_off, + NULL, + } +}; + +/* Enable/Disable a per queue offloading */ +struct cmd_config_per_queue_tx_offload_result { + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t txq; + uint16_t queue_id; + cmdline_fixed_string_t tx_offload; + cmdline_fixed_string_t offload; + cmdline_fixed_string_t on_off; +}; + +cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_port = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + port, "port"); +cmdline_parse_token_num_t cmd_config_per_queue_tx_offload_result_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_txq = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + txq, "txq"); +cmdline_parse_token_num_t cmd_config_per_queue_tx_offload_result_queue_id = + TOKEN_NUM_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + queue_id, UINT16); +cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_txoffload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + tx_offload, "tx_offload"); +cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_offload = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + offload, "vlan_insert#ipv4_cksum#udp_cksum#tcp_cksum#" + "sctp_cksum#tcp_tso#udp_tso#outer_ipv4_cksum#" + "qinq_insert#vxlan_tnl_tso#gre_tnl_tso#" + "ipip_tnl_tso#geneve_tnl_tso#macsec_insert#" + "mt_lockfree#multi_segs#mbuf_fast_free#security"); +cmdline_parse_token_string_t cmd_config_per_queue_tx_offload_result_on_off = + TOKEN_STRING_INITIALIZER + (struct cmd_config_per_queue_tx_offload_result, + on_off, "on#off"); + +static void +cmd_config_per_queue_tx_offload_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_per_queue_tx_offload_result *res = parsed_result; + struct rte_eth_dev_info dev_info; + portid_t port_id = res->port_id; + uint16_t queue_id = res->queue_id; + struct rte_port *port = &ports[port_id]; + uint64_t single_offload; + int ret; + + if (port->port_status != RTE_PORT_STOPPED) { + printf("Error: Can't config offload when Port %d " + "is not stopped\n", port_id); + return; + } + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + if (queue_id >= dev_info.nb_tx_queues) { + printf("Error: input queue_id should be 0 ... " + "%d\n", dev_info.nb_tx_queues - 1); + return; + } + + single_offload = search_tx_offload(res->offload); + if (single_offload == 0) { + printf("Unknown offload name: %s\n", res->offload); + return; + } + + if (!strcmp(res->on_off, "on")) + port->tx_conf[queue_id].offloads |= single_offload; + else + port->tx_conf[queue_id].offloads &= ~single_offload; + + cmd_reconfig_device_queue(port_id, 1, 1); +} + +cmdline_parse_inst_t cmd_config_per_queue_tx_offload = { + .f = cmd_config_per_queue_tx_offload_parsed, + .data = NULL, + .help_str = "port <port_id> txq <queue_id> tx_offload " + "vlan_insert|ipv4_cksum|udp_cksum|tcp_cksum|" + "sctp_cksum|tcp_tso|udp_tso|outer_ipv4_cksum|" + "qinq_insert|vxlan_tnl_tso|gre_tnl_tso|" + "ipip_tnl_tso|geneve_tnl_tso|macsec_insert|" + "mt_lockfree|multi_segs|mbuf_fast_free|security " + "on|off", + .tokens = { + (void *)&cmd_config_per_queue_tx_offload_result_port, + (void *)&cmd_config_per_queue_tx_offload_result_port_id, + (void *)&cmd_config_per_queue_tx_offload_result_txq, + (void *)&cmd_config_per_queue_tx_offload_result_queue_id, + (void *)&cmd_config_per_queue_tx_offload_result_txoffload, + (void *)&cmd_config_per_queue_tx_offload_result_offload, + (void *)&cmd_config_per_queue_tx_offload_result_on_off, + NULL, + } +}; + +/* *** configure tx_metadata for specific port *** */ +struct cmd_config_tx_metadata_specific_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + uint16_t port_id; + cmdline_fixed_string_t item; + uint32_t value; +}; + +static void +cmd_config_tx_metadata_specific_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_tx_metadata_specific_result *res = parsed_result; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + ports[res->port_id].tx_metadata = res->value; + /* Add/remove callback to insert valid metadata in every Tx packet. */ + if (ports[res->port_id].tx_metadata) + add_tx_md_callback(res->port_id); + else + remove_tx_md_callback(res->port_id); + rte_flow_dynf_metadata_register(); +} + +cmdline_parse_token_string_t cmd_config_tx_metadata_specific_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_tx_metadata_specific_result, + port, "port"); +cmdline_parse_token_string_t cmd_config_tx_metadata_specific_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_tx_metadata_specific_result, + keyword, "config"); +cmdline_parse_token_num_t cmd_config_tx_metadata_specific_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_tx_metadata_specific_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_tx_metadata_specific_item = + TOKEN_STRING_INITIALIZER(struct cmd_config_tx_metadata_specific_result, + item, "tx_metadata"); +cmdline_parse_token_num_t cmd_config_tx_metadata_specific_value = + TOKEN_NUM_INITIALIZER(struct cmd_config_tx_metadata_specific_result, + value, UINT32); + +cmdline_parse_inst_t cmd_config_tx_metadata_specific = { + .f = cmd_config_tx_metadata_specific_parsed, + .data = NULL, + .help_str = "port config <port_id> tx_metadata <value>", + .tokens = { + (void *)&cmd_config_tx_metadata_specific_port, + (void *)&cmd_config_tx_metadata_specific_keyword, + (void *)&cmd_config_tx_metadata_specific_id, + (void *)&cmd_config_tx_metadata_specific_item, + (void *)&cmd_config_tx_metadata_specific_value, + NULL, + }, +}; + +/* *** set dynf *** */ +struct cmd_config_tx_dynf_specific_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t keyword; + uint16_t port_id; + cmdline_fixed_string_t item; + cmdline_fixed_string_t name; + cmdline_fixed_string_t value; +}; + +static void +cmd_config_dynf_specific_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_config_tx_dynf_specific_result *res = parsed_result; + struct rte_mbuf_dynflag desc_flag; + int flag; + uint64_t old_port_flags; + + if (port_id_is_invalid(res->port_id, ENABLED_WARN)) + return; + flag = rte_mbuf_dynflag_lookup(res->name, NULL); + if (flag <= 0) { + if (strlcpy(desc_flag.name, res->name, + RTE_MBUF_DYN_NAMESIZE) >= RTE_MBUF_DYN_NAMESIZE) { + printf("Flag name too long\n"); + return; + } + desc_flag.flags = 0; + flag = rte_mbuf_dynflag_register(&desc_flag); + if (flag < 0) { + printf("Can't register flag\n"); + return; + } + strcpy(dynf_names[flag], desc_flag.name); + } + old_port_flags = ports[res->port_id].mbuf_dynf; + if (!strcmp(res->value, "set")) { + ports[res->port_id].mbuf_dynf |= 1UL << flag; + if (old_port_flags == 0) + add_tx_dynf_callback(res->port_id); + } else { + ports[res->port_id].mbuf_dynf &= ~(1UL << flag); + if (ports[res->port_id].mbuf_dynf == 0) + remove_tx_dynf_callback(res->port_id); + } +} + +cmdline_parse_token_string_t cmd_config_tx_dynf_specific_port = + TOKEN_STRING_INITIALIZER(struct cmd_config_tx_dynf_specific_result, + keyword, "port"); +cmdline_parse_token_string_t cmd_config_tx_dynf_specific_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_config_tx_dynf_specific_result, + keyword, "config"); +cmdline_parse_token_num_t cmd_config_tx_dynf_specific_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_config_tx_dynf_specific_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_config_tx_dynf_specific_item = + TOKEN_STRING_INITIALIZER(struct cmd_config_tx_dynf_specific_result, + item, "dynf"); +cmdline_parse_token_string_t cmd_config_tx_dynf_specific_name = + TOKEN_STRING_INITIALIZER(struct cmd_config_tx_dynf_specific_result, + name, NULL); +cmdline_parse_token_string_t cmd_config_tx_dynf_specific_value = + TOKEN_STRING_INITIALIZER(struct cmd_config_tx_dynf_specific_result, + value, "set#clear"); + +cmdline_parse_inst_t cmd_config_tx_dynf_specific = { + .f = cmd_config_dynf_specific_parsed, + .data = NULL, + .help_str = "port config <port id> dynf <name> set|clear", + .tokens = { + (void *)&cmd_config_tx_dynf_specific_port, + (void *)&cmd_config_tx_dynf_specific_keyword, + (void *)&cmd_config_tx_dynf_specific_port_id, + (void *)&cmd_config_tx_dynf_specific_item, + (void *)&cmd_config_tx_dynf_specific_name, + (void *)&cmd_config_tx_dynf_specific_value, + NULL, + }, +}; + +/* *** display tx_metadata per port configuration *** */ +struct cmd_show_tx_metadata_result { + cmdline_fixed_string_t cmd_show; + cmdline_fixed_string_t cmd_port; + cmdline_fixed_string_t cmd_keyword; + portid_t cmd_pid; +}; + +static void +cmd_show_tx_metadata_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_tx_metadata_result *res = parsed_result; + + if (!rte_eth_dev_is_valid_port(res->cmd_pid)) { + printf("invalid port id %u\n", res->cmd_pid); + return; + } + if (!strcmp(res->cmd_keyword, "tx_metadata")) { + printf("Port %u tx_metadata: %u\n", res->cmd_pid, + ports[res->cmd_pid].tx_metadata); + } +} + +cmdline_parse_token_string_t cmd_show_tx_metadata_show = + TOKEN_STRING_INITIALIZER(struct cmd_show_tx_metadata_result, + cmd_show, "show"); +cmdline_parse_token_string_t cmd_show_tx_metadata_port = + TOKEN_STRING_INITIALIZER(struct cmd_show_tx_metadata_result, + cmd_port, "port"); +cmdline_parse_token_num_t cmd_show_tx_metadata_pid = + TOKEN_NUM_INITIALIZER(struct cmd_show_tx_metadata_result, + cmd_pid, UINT16); +cmdline_parse_token_string_t cmd_show_tx_metadata_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_show_tx_metadata_result, + cmd_keyword, "tx_metadata"); + +cmdline_parse_inst_t cmd_show_tx_metadata = { + .f = cmd_show_tx_metadata_parsed, + .data = NULL, + .help_str = "show port <port_id> tx_metadata", + .tokens = { + (void *)&cmd_show_tx_metadata_show, + (void *)&cmd_show_tx_metadata_port, + (void *)&cmd_show_tx_metadata_pid, + (void *)&cmd_show_tx_metadata_keyword, + NULL, + }, +}; + +/* show port supported ptypes */ + +/* Common result structure for show port ptypes */ +struct cmd_show_port_supported_ptypes_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t ptypes; +}; + +/* Common CLI fields for show port ptypes */ +cmdline_parse_token_string_t cmd_show_port_supported_ptypes_show = + TOKEN_STRING_INITIALIZER + (struct cmd_show_port_supported_ptypes_result, + show, "show"); +cmdline_parse_token_string_t cmd_show_port_supported_ptypes_port = + TOKEN_STRING_INITIALIZER + (struct cmd_show_port_supported_ptypes_result, + port, "port"); +cmdline_parse_token_num_t cmd_show_port_supported_ptypes_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_show_port_supported_ptypes_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_show_port_supported_ptypes_ptypes = + TOKEN_STRING_INITIALIZER + (struct cmd_show_port_supported_ptypes_result, + ptypes, "ptypes"); + +static void +cmd_show_port_supported_ptypes_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ +#define RSVD_PTYPE_MASK 0xf0000000 +#define MAX_PTYPES_PER_LAYER 16 +#define LTYPE_NAMESIZE 32 +#define PTYPE_NAMESIZE 256 + struct cmd_show_port_supported_ptypes_result *res = parsed_result; + char buf[PTYPE_NAMESIZE], ltype[LTYPE_NAMESIZE]; + uint32_t ptype_mask = RTE_PTYPE_L2_MASK; + uint32_t ptypes[MAX_PTYPES_PER_LAYER]; + uint16_t port_id = res->port_id; + int ret, i; + + ret = rte_eth_dev_get_supported_ptypes(port_id, ptype_mask, NULL, 0); + if (ret < 0) + return; + + while (ptype_mask != RSVD_PTYPE_MASK) { + + switch (ptype_mask) { + case RTE_PTYPE_L2_MASK: + strlcpy(ltype, "L2", sizeof(ltype)); + break; + case RTE_PTYPE_L3_MASK: + strlcpy(ltype, "L3", sizeof(ltype)); + break; + case RTE_PTYPE_L4_MASK: + strlcpy(ltype, "L4", sizeof(ltype)); + break; + case RTE_PTYPE_TUNNEL_MASK: + strlcpy(ltype, "Tunnel", sizeof(ltype)); + break; + case RTE_PTYPE_INNER_L2_MASK: + strlcpy(ltype, "Inner L2", sizeof(ltype)); + break; + case RTE_PTYPE_INNER_L3_MASK: + strlcpy(ltype, "Inner L3", sizeof(ltype)); + break; + case RTE_PTYPE_INNER_L4_MASK: + strlcpy(ltype, "Inner L4", sizeof(ltype)); + break; + default: + return; + } + + ret = rte_eth_dev_get_supported_ptypes(res->port_id, + ptype_mask, ptypes, + MAX_PTYPES_PER_LAYER); + + if (ret > 0) + printf("Supported %s ptypes:\n", ltype); + else + printf("%s ptypes unsupported\n", ltype); + + for (i = 0; i < ret; ++i) { + rte_get_ptype_name(ptypes[i], buf, sizeof(buf)); + printf("%s\n", buf); + } + + ptype_mask <<= 4; + } +} + +cmdline_parse_inst_t cmd_show_port_supported_ptypes = { + .f = cmd_show_port_supported_ptypes_parsed, + .data = NULL, + .help_str = "show port <port_id> ptypes", + .tokens = { + (void *)&cmd_show_port_supported_ptypes_show, + (void *)&cmd_show_port_supported_ptypes_port, + (void *)&cmd_show_port_supported_ptypes_port_id, + (void *)&cmd_show_port_supported_ptypes_ptypes, + NULL, + }, +}; + +/* *** display rx/tx descriptor status *** */ +struct cmd_show_rx_tx_desc_status_result { + cmdline_fixed_string_t cmd_show; + cmdline_fixed_string_t cmd_port; + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t cmd_desc; + cmdline_fixed_string_t cmd_status; + portid_t cmd_pid; + portid_t cmd_qid; + portid_t cmd_did; +}; + +static void +cmd_show_rx_tx_desc_status_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_rx_tx_desc_status_result *res = parsed_result; + int rc; + + if (!rte_eth_dev_is_valid_port(res->cmd_pid)) { + printf("invalid port id %u\n", res->cmd_pid); + return; + } + + if (!strcmp(res->cmd_keyword, "rxq")) { + rc = rte_eth_rx_descriptor_status(res->cmd_pid, res->cmd_qid, + res->cmd_did); + if (rc < 0) { + printf("Invalid queueid = %d\n", res->cmd_qid); + return; + } + if (rc == RTE_ETH_RX_DESC_AVAIL) + printf("Desc status = AVAILABLE\n"); + else if (rc == RTE_ETH_RX_DESC_DONE) + printf("Desc status = DONE\n"); + else + printf("Desc status = UNAVAILABLE\n"); + } else if (!strcmp(res->cmd_keyword, "txq")) { + rc = rte_eth_tx_descriptor_status(res->cmd_pid, res->cmd_qid, + res->cmd_did); + if (rc < 0) { + printf("Invalid queueid = %d\n", res->cmd_qid); + return; + } + if (rc == RTE_ETH_TX_DESC_FULL) + printf("Desc status = FULL\n"); + else if (rc == RTE_ETH_TX_DESC_DONE) + printf("Desc status = DONE\n"); + else + printf("Desc status = UNAVAILABLE\n"); + } +} + +cmdline_parse_token_string_t cmd_show_rx_tx_desc_status_show = + TOKEN_STRING_INITIALIZER(struct cmd_show_rx_tx_desc_status_result, + cmd_show, "show"); +cmdline_parse_token_string_t cmd_show_rx_tx_desc_status_port = + TOKEN_STRING_INITIALIZER(struct cmd_show_rx_tx_desc_status_result, + cmd_port, "port"); +cmdline_parse_token_num_t cmd_show_rx_tx_desc_status_pid = + TOKEN_NUM_INITIALIZER(struct cmd_show_rx_tx_desc_status_result, + cmd_pid, UINT16); +cmdline_parse_token_string_t cmd_show_rx_tx_desc_status_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_show_rx_tx_desc_status_result, + cmd_keyword, "rxq#txq"); +cmdline_parse_token_num_t cmd_show_rx_tx_desc_status_qid = + TOKEN_NUM_INITIALIZER(struct cmd_show_rx_tx_desc_status_result, + cmd_qid, UINT16); +cmdline_parse_token_string_t cmd_show_rx_tx_desc_status_desc = + TOKEN_STRING_INITIALIZER(struct cmd_show_rx_tx_desc_status_result, + cmd_desc, "desc"); +cmdline_parse_token_num_t cmd_show_rx_tx_desc_status_did = + TOKEN_NUM_INITIALIZER(struct cmd_show_rx_tx_desc_status_result, + cmd_did, UINT16); +cmdline_parse_token_string_t cmd_show_rx_tx_desc_status_status = + TOKEN_STRING_INITIALIZER(struct cmd_show_rx_tx_desc_status_result, + cmd_status, "status"); +cmdline_parse_inst_t cmd_show_rx_tx_desc_status = { + .f = cmd_show_rx_tx_desc_status_parsed, + .data = NULL, + .help_str = "show port <port_id> rxq|txq <queue_id> desc <desc_id> " + "status", + .tokens = { + (void *)&cmd_show_rx_tx_desc_status_show, + (void *)&cmd_show_rx_tx_desc_status_port, + (void *)&cmd_show_rx_tx_desc_status_pid, + (void *)&cmd_show_rx_tx_desc_status_keyword, + (void *)&cmd_show_rx_tx_desc_status_qid, + (void *)&cmd_show_rx_tx_desc_status_desc, + (void *)&cmd_show_rx_tx_desc_status_did, + (void *)&cmd_show_rx_tx_desc_status_status, + NULL, + }, +}; + +/* Common result structure for set port ptypes */ +struct cmd_set_port_ptypes_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + portid_t port_id; + cmdline_fixed_string_t ptype_mask; + uint32_t mask; +}; + +/* Common CLI fields for set port ptypes */ +cmdline_parse_token_string_t cmd_set_port_ptypes_set = + TOKEN_STRING_INITIALIZER + (struct cmd_set_port_ptypes_result, + set, "set"); +cmdline_parse_token_string_t cmd_set_port_ptypes_port = + TOKEN_STRING_INITIALIZER + (struct cmd_set_port_ptypes_result, + port, "port"); +cmdline_parse_token_num_t cmd_set_port_ptypes_port_id = + TOKEN_NUM_INITIALIZER + (struct cmd_set_port_ptypes_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_set_port_ptypes_mask_str = + TOKEN_STRING_INITIALIZER + (struct cmd_set_port_ptypes_result, + ptype_mask, "ptype_mask"); +cmdline_parse_token_num_t cmd_set_port_ptypes_mask_u32 = + TOKEN_NUM_INITIALIZER + (struct cmd_set_port_ptypes_result, + mask, UINT32); + +static void +cmd_set_port_ptypes_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_port_ptypes_result *res = parsed_result; +#define PTYPE_NAMESIZE 256 + char ptype_name[PTYPE_NAMESIZE]; + uint16_t port_id = res->port_id; + uint32_t ptype_mask = res->mask; + int ret, i; + + ret = rte_eth_dev_get_supported_ptypes(port_id, RTE_PTYPE_ALL_MASK, + NULL, 0); + if (ret <= 0) { + printf("Port %d doesn't support any ptypes.\n", port_id); + return; + } + + uint32_t ptypes[ret]; + + ret = rte_eth_dev_set_ptypes(port_id, ptype_mask, ptypes, ret); + if (ret < 0) { + printf("Unable to set requested ptypes for Port %d\n", port_id); + return; + } + + printf("Successfully set following ptypes for Port %d\n", port_id); + for (i = 0; i < ret && ptypes[i] != RTE_PTYPE_UNKNOWN; i++) { + rte_get_ptype_name(ptypes[i], ptype_name, sizeof(ptype_name)); + printf("%s\n", ptype_name); + } + + clear_ptypes = false; +} + +cmdline_parse_inst_t cmd_set_port_ptypes = { + .f = cmd_set_port_ptypes_parsed, + .data = NULL, + .help_str = "set port <port_id> ptype_mask <mask>", + .tokens = { + (void *)&cmd_set_port_ptypes_set, + (void *)&cmd_set_port_ptypes_port, + (void *)&cmd_set_port_ptypes_port_id, + (void *)&cmd_set_port_ptypes_mask_str, + (void *)&cmd_set_port_ptypes_mask_u32, + NULL, + }, +}; + +/* *** display mac addresses added to a port *** */ +struct cmd_showport_macs_result { + cmdline_fixed_string_t cmd_show; + cmdline_fixed_string_t cmd_port; + cmdline_fixed_string_t cmd_keyword; + portid_t cmd_pid; +}; + +static void +cmd_showport_macs_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_showport_macs_result *res = parsed_result; + + if (port_id_is_invalid(res->cmd_pid, ENABLED_WARN)) + return; + + if (!strcmp(res->cmd_keyword, "macs")) + show_macs(res->cmd_pid); + else if (!strcmp(res->cmd_keyword, "mcast_macs")) + show_mcast_macs(res->cmd_pid); +} + +cmdline_parse_token_string_t cmd_showport_macs_show = + TOKEN_STRING_INITIALIZER(struct cmd_showport_macs_result, + cmd_show, "show"); +cmdline_parse_token_string_t cmd_showport_macs_port = + TOKEN_STRING_INITIALIZER(struct cmd_showport_macs_result, + cmd_port, "port"); +cmdline_parse_token_num_t cmd_showport_macs_pid = + TOKEN_NUM_INITIALIZER(struct cmd_showport_macs_result, + cmd_pid, UINT16); +cmdline_parse_token_string_t cmd_showport_macs_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_showport_macs_result, + cmd_keyword, "macs#mcast_macs"); + +cmdline_parse_inst_t cmd_showport_macs = { + .f = cmd_showport_macs_parsed, + .data = NULL, + .help_str = "show port <port_id> macs|mcast_macs", + .tokens = { + (void *)&cmd_showport_macs_show, + (void *)&cmd_showport_macs_port, + (void *)&cmd_showport_macs_pid, + (void *)&cmd_showport_macs_keyword, + NULL, + }, +}; + +/* ******************************************************************************** */ + +/* list of instructions */ +cmdline_parse_ctx_t main_ctx[] = { + (cmdline_parse_inst_t *)&cmd_help_brief, + (cmdline_parse_inst_t *)&cmd_help_long, + (cmdline_parse_inst_t *)&cmd_quit, + (cmdline_parse_inst_t *)&cmd_load_from_file, + (cmdline_parse_inst_t *)&cmd_showport, + (cmdline_parse_inst_t *)&cmd_showqueue, + (cmdline_parse_inst_t *)&cmd_showportall, + (cmdline_parse_inst_t *)&cmd_showdevice, + (cmdline_parse_inst_t *)&cmd_showcfg, + (cmdline_parse_inst_t *)&cmd_showfwdall, + (cmdline_parse_inst_t *)&cmd_start, + (cmdline_parse_inst_t *)&cmd_start_tx_first, + (cmdline_parse_inst_t *)&cmd_start_tx_first_n, + (cmdline_parse_inst_t *)&cmd_set_link_up, + (cmdline_parse_inst_t *)&cmd_set_link_down, + (cmdline_parse_inst_t *)&cmd_reset, + (cmdline_parse_inst_t *)&cmd_set_numbers, + (cmdline_parse_inst_t *)&cmd_set_log, + (cmdline_parse_inst_t *)&cmd_set_txpkts, + (cmdline_parse_inst_t *)&cmd_set_txsplit, + (cmdline_parse_inst_t *)&cmd_set_fwd_list, + (cmdline_parse_inst_t *)&cmd_set_fwd_mask, + (cmdline_parse_inst_t *)&cmd_set_fwd_mode, + (cmdline_parse_inst_t *)&cmd_set_fwd_retry_mode, + (cmdline_parse_inst_t *)&cmd_set_burst_tx_retry, + (cmdline_parse_inst_t *)&cmd_set_promisc_mode_one, + (cmdline_parse_inst_t *)&cmd_set_promisc_mode_all, + (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_one, + (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_all, + (cmdline_parse_inst_t *)&cmd_set_flush_rx, + (cmdline_parse_inst_t *)&cmd_set_link_check, + (cmdline_parse_inst_t *)&cmd_set_bypass_mode, + (cmdline_parse_inst_t *)&cmd_set_bypass_event, + (cmdline_parse_inst_t *)&cmd_set_bypass_timeout, + (cmdline_parse_inst_t *)&cmd_show_bypass_config, +#ifdef RTE_LIBRTE_PMD_BOND + (cmdline_parse_inst_t *) &cmd_set_bonding_mode, + (cmdline_parse_inst_t *) &cmd_show_bonding_config, + (cmdline_parse_inst_t *) &cmd_set_bonding_primary, + (cmdline_parse_inst_t *) &cmd_add_bonding_slave, + (cmdline_parse_inst_t *) &cmd_remove_bonding_slave, + (cmdline_parse_inst_t *) &cmd_create_bonded_device, + (cmdline_parse_inst_t *) &cmd_set_bond_mac_addr, + (cmdline_parse_inst_t *) &cmd_set_balance_xmit_policy, + (cmdline_parse_inst_t *) &cmd_set_bond_mon_period, + (cmdline_parse_inst_t *) &cmd_set_lacp_dedicated_queues, + (cmdline_parse_inst_t *) &cmd_set_bonding_agg_mode_policy, +#endif + (cmdline_parse_inst_t *)&cmd_vlan_offload, + (cmdline_parse_inst_t *)&cmd_vlan_tpid, + (cmdline_parse_inst_t *)&cmd_rx_vlan_filter_all, + (cmdline_parse_inst_t *)&cmd_rx_vlan_filter, + (cmdline_parse_inst_t *)&cmd_tx_vlan_set, + (cmdline_parse_inst_t *)&cmd_tx_vlan_set_qinq, + (cmdline_parse_inst_t *)&cmd_tx_vlan_reset, + (cmdline_parse_inst_t *)&cmd_tx_vlan_set_pvid, + (cmdline_parse_inst_t *)&cmd_csum_set, + (cmdline_parse_inst_t *)&cmd_csum_show, + (cmdline_parse_inst_t *)&cmd_csum_tunnel, + (cmdline_parse_inst_t *)&cmd_tso_set, + (cmdline_parse_inst_t *)&cmd_tso_show, + (cmdline_parse_inst_t *)&cmd_tunnel_tso_set, + (cmdline_parse_inst_t *)&cmd_tunnel_tso_show, + (cmdline_parse_inst_t *)&cmd_gro_enable, + (cmdline_parse_inst_t *)&cmd_gro_flush, + (cmdline_parse_inst_t *)&cmd_gro_show, + (cmdline_parse_inst_t *)&cmd_gso_enable, + (cmdline_parse_inst_t *)&cmd_gso_size, + (cmdline_parse_inst_t *)&cmd_gso_show, + (cmdline_parse_inst_t *)&cmd_link_flow_control_set, + (cmdline_parse_inst_t *)&cmd_link_flow_control_set_rx, + (cmdline_parse_inst_t *)&cmd_link_flow_control_set_tx, + (cmdline_parse_inst_t *)&cmd_link_flow_control_set_hw, + (cmdline_parse_inst_t *)&cmd_link_flow_control_set_lw, + (cmdline_parse_inst_t *)&cmd_link_flow_control_set_pt, + (cmdline_parse_inst_t *)&cmd_link_flow_control_set_xon, + (cmdline_parse_inst_t *)&cmd_link_flow_control_set_macfwd, + (cmdline_parse_inst_t *)&cmd_link_flow_control_set_autoneg, + (cmdline_parse_inst_t *)&cmd_priority_flow_control_set, + (cmdline_parse_inst_t *)&cmd_config_dcb, + (cmdline_parse_inst_t *)&cmd_read_reg, + (cmdline_parse_inst_t *)&cmd_read_reg_bit_field, + (cmdline_parse_inst_t *)&cmd_read_reg_bit, + (cmdline_parse_inst_t *)&cmd_write_reg, + (cmdline_parse_inst_t *)&cmd_write_reg_bit_field, + (cmdline_parse_inst_t *)&cmd_write_reg_bit, + (cmdline_parse_inst_t *)&cmd_read_rxd_txd, + (cmdline_parse_inst_t *)&cmd_stop, + (cmdline_parse_inst_t *)&cmd_mac_addr, + (cmdline_parse_inst_t *)&cmd_set_fwd_eth_peer, + (cmdline_parse_inst_t *)&cmd_set_qmap, + (cmdline_parse_inst_t *)&cmd_set_xstats_hide_zero, + (cmdline_parse_inst_t *)&cmd_operate_port, + (cmdline_parse_inst_t *)&cmd_operate_specific_port, + (cmdline_parse_inst_t *)&cmd_operate_attach_port, + (cmdline_parse_inst_t *)&cmd_operate_detach_port, + (cmdline_parse_inst_t *)&cmd_operate_detach_device, + (cmdline_parse_inst_t *)&cmd_set_port_setup_on, + (cmdline_parse_inst_t *)&cmd_config_speed_all, + (cmdline_parse_inst_t *)&cmd_config_speed_specific, + (cmdline_parse_inst_t *)&cmd_config_loopback_all, + (cmdline_parse_inst_t *)&cmd_config_loopback_specific, + (cmdline_parse_inst_t *)&cmd_config_rx_tx, + (cmdline_parse_inst_t *)&cmd_config_mtu, + (cmdline_parse_inst_t *)&cmd_config_max_pkt_len, + (cmdline_parse_inst_t *)&cmd_config_max_lro_pkt_size, + (cmdline_parse_inst_t *)&cmd_config_rx_mode_flag, + (cmdline_parse_inst_t *)&cmd_config_rss, + (cmdline_parse_inst_t *)&cmd_config_rxtx_ring_size, + (cmdline_parse_inst_t *)&cmd_config_rxtx_queue, + (cmdline_parse_inst_t *)&cmd_config_deferred_start_rxtx_queue, + (cmdline_parse_inst_t *)&cmd_setup_rxtx_queue, + (cmdline_parse_inst_t *)&cmd_config_rss_reta, + (cmdline_parse_inst_t *)&cmd_showport_reta, + (cmdline_parse_inst_t *)&cmd_showport_macs, + (cmdline_parse_inst_t *)&cmd_config_burst, + (cmdline_parse_inst_t *)&cmd_config_thresh, + (cmdline_parse_inst_t *)&cmd_config_threshold, + (cmdline_parse_inst_t *)&cmd_set_uc_hash_filter, + (cmdline_parse_inst_t *)&cmd_set_uc_all_hash_filter, + (cmdline_parse_inst_t *)&cmd_vf_mac_addr_filter, + (cmdline_parse_inst_t *)&cmd_set_vf_macvlan_filter, + (cmdline_parse_inst_t *)&cmd_queue_rate_limit, + (cmdline_parse_inst_t *)&cmd_tunnel_filter, + (cmdline_parse_inst_t *)&cmd_tunnel_udp_config, + (cmdline_parse_inst_t *)&cmd_global_config, + (cmdline_parse_inst_t *)&cmd_set_mirror_mask, + (cmdline_parse_inst_t *)&cmd_set_mirror_link, + (cmdline_parse_inst_t *)&cmd_reset_mirror_rule, + (cmdline_parse_inst_t *)&cmd_showport_rss_hash, + (cmdline_parse_inst_t *)&cmd_showport_rss_hash_key, + (cmdline_parse_inst_t *)&cmd_config_rss_hash_key, + (cmdline_parse_inst_t *)&cmd_dump, + (cmdline_parse_inst_t *)&cmd_dump_one, + (cmdline_parse_inst_t *)&cmd_ethertype_filter, + (cmdline_parse_inst_t *)&cmd_syn_filter, + (cmdline_parse_inst_t *)&cmd_2tuple_filter, + (cmdline_parse_inst_t *)&cmd_5tuple_filter, + (cmdline_parse_inst_t *)&cmd_flex_filter, + (cmdline_parse_inst_t *)&cmd_add_del_ip_flow_director, + (cmdline_parse_inst_t *)&cmd_add_del_udp_flow_director, + (cmdline_parse_inst_t *)&cmd_add_del_sctp_flow_director, + (cmdline_parse_inst_t *)&cmd_add_del_l2_flow_director, + (cmdline_parse_inst_t *)&cmd_add_del_mac_vlan_flow_director, + (cmdline_parse_inst_t *)&cmd_add_del_tunnel_flow_director, + (cmdline_parse_inst_t *)&cmd_add_del_raw_flow_director, + (cmdline_parse_inst_t *)&cmd_flush_flow_director, + (cmdline_parse_inst_t *)&cmd_set_flow_director_ip_mask, + (cmdline_parse_inst_t *)&cmd_set_flow_director_mac_vlan_mask, + (cmdline_parse_inst_t *)&cmd_set_flow_director_tunnel_mask, + (cmdline_parse_inst_t *)&cmd_set_flow_director_flex_mask, + (cmdline_parse_inst_t *)&cmd_set_flow_director_flex_payload, + (cmdline_parse_inst_t *)&cmd_get_sym_hash_ena_per_port, + (cmdline_parse_inst_t *)&cmd_set_sym_hash_ena_per_port, + (cmdline_parse_inst_t *)&cmd_get_hash_global_config, + (cmdline_parse_inst_t *)&cmd_set_hash_global_config, + (cmdline_parse_inst_t *)&cmd_set_hash_input_set, + (cmdline_parse_inst_t *)&cmd_set_fdir_input_set, + (cmdline_parse_inst_t *)&cmd_flow, + (cmdline_parse_inst_t *)&cmd_show_port_meter_cap, + (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_srtcm, + (cmdline_parse_inst_t *)&cmd_add_port_meter_profile_trtcm, + (cmdline_parse_inst_t *)&cmd_del_port_meter_profile, + (cmdline_parse_inst_t *)&cmd_create_port_meter, + (cmdline_parse_inst_t *)&cmd_enable_port_meter, + (cmdline_parse_inst_t *)&cmd_disable_port_meter, + (cmdline_parse_inst_t *)&cmd_del_port_meter, + (cmdline_parse_inst_t *)&cmd_set_port_meter_profile, + (cmdline_parse_inst_t *)&cmd_set_port_meter_dscp_table, + (cmdline_parse_inst_t *)&cmd_set_port_meter_policer_action, + (cmdline_parse_inst_t *)&cmd_set_port_meter_stats_mask, + (cmdline_parse_inst_t *)&cmd_show_port_meter_stats, + (cmdline_parse_inst_t *)&cmd_mcast_addr, + (cmdline_parse_inst_t *)&cmd_config_l2_tunnel_eth_type_all, + (cmdline_parse_inst_t *)&cmd_config_l2_tunnel_eth_type_specific, + (cmdline_parse_inst_t *)&cmd_config_l2_tunnel_en_dis_all, + (cmdline_parse_inst_t *)&cmd_config_l2_tunnel_en_dis_specific, + (cmdline_parse_inst_t *)&cmd_config_e_tag_insertion_en, + (cmdline_parse_inst_t *)&cmd_config_e_tag_insertion_dis, + (cmdline_parse_inst_t *)&cmd_config_e_tag_stripping_en_dis, + (cmdline_parse_inst_t *)&cmd_config_e_tag_forwarding_en_dis, + (cmdline_parse_inst_t *)&cmd_config_e_tag_filter_add, + (cmdline_parse_inst_t *)&cmd_config_e_tag_filter_del, + (cmdline_parse_inst_t *)&cmd_set_vf_vlan_anti_spoof, + (cmdline_parse_inst_t *)&cmd_set_vf_mac_anti_spoof, + (cmdline_parse_inst_t *)&cmd_set_vf_vlan_stripq, + (cmdline_parse_inst_t *)&cmd_set_vf_vlan_insert, + (cmdline_parse_inst_t *)&cmd_set_tx_loopback, + (cmdline_parse_inst_t *)&cmd_set_all_queues_drop_en, + (cmdline_parse_inst_t *)&cmd_set_vf_split_drop_en, + (cmdline_parse_inst_t *)&cmd_set_macsec_offload_on, + (cmdline_parse_inst_t *)&cmd_set_macsec_offload_off, + (cmdline_parse_inst_t *)&cmd_set_macsec_sc, + (cmdline_parse_inst_t *)&cmd_set_macsec_sa, + (cmdline_parse_inst_t *)&cmd_set_vf_traffic, + (cmdline_parse_inst_t *)&cmd_set_vf_rxmode, + (cmdline_parse_inst_t *)&cmd_vf_rate_limit, + (cmdline_parse_inst_t *)&cmd_vf_rxvlan_filter, + (cmdline_parse_inst_t *)&cmd_set_vf_mac_addr, + (cmdline_parse_inst_t *)&cmd_set_vf_promisc, + (cmdline_parse_inst_t *)&cmd_set_vf_allmulti, + (cmdline_parse_inst_t *)&cmd_set_vf_broadcast, + (cmdline_parse_inst_t *)&cmd_set_vf_vlan_tag, + (cmdline_parse_inst_t *)&cmd_vf_max_bw, + (cmdline_parse_inst_t *)&cmd_vf_tc_min_bw, + (cmdline_parse_inst_t *)&cmd_vf_tc_max_bw, + (cmdline_parse_inst_t *)&cmd_strict_link_prio, + (cmdline_parse_inst_t *)&cmd_tc_min_bw, +#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED + (cmdline_parse_inst_t *)&cmd_set_port_tm_hierarchy_default, +#endif + (cmdline_parse_inst_t *)&cmd_set_vxlan, + (cmdline_parse_inst_t *)&cmd_set_vxlan_tos_ttl, + (cmdline_parse_inst_t *)&cmd_set_vxlan_with_vlan, + (cmdline_parse_inst_t *)&cmd_set_nvgre, + (cmdline_parse_inst_t *)&cmd_set_nvgre_with_vlan, + (cmdline_parse_inst_t *)&cmd_set_l2_encap, + (cmdline_parse_inst_t *)&cmd_set_l2_encap_with_vlan, + (cmdline_parse_inst_t *)&cmd_set_l2_decap, + (cmdline_parse_inst_t *)&cmd_set_l2_decap_with_vlan, + (cmdline_parse_inst_t *)&cmd_set_mplsogre_encap, + (cmdline_parse_inst_t *)&cmd_set_mplsogre_encap_with_vlan, + (cmdline_parse_inst_t *)&cmd_set_mplsogre_decap, + (cmdline_parse_inst_t *)&cmd_set_mplsogre_decap_with_vlan, + (cmdline_parse_inst_t *)&cmd_set_mplsoudp_encap, + (cmdline_parse_inst_t *)&cmd_set_mplsoudp_encap_with_vlan, + (cmdline_parse_inst_t *)&cmd_set_mplsoudp_decap, + (cmdline_parse_inst_t *)&cmd_set_mplsoudp_decap_with_vlan, + (cmdline_parse_inst_t *)&cmd_ddp_add, + (cmdline_parse_inst_t *)&cmd_ddp_del, + (cmdline_parse_inst_t *)&cmd_ddp_get_list, + (cmdline_parse_inst_t *)&cmd_ddp_get_info, + (cmdline_parse_inst_t *)&cmd_cfg_input_set, + (cmdline_parse_inst_t *)&cmd_clear_input_set, + (cmdline_parse_inst_t *)&cmd_show_vf_stats, + (cmdline_parse_inst_t *)&cmd_clear_vf_stats, + (cmdline_parse_inst_t *)&cmd_show_port_supported_ptypes, + (cmdline_parse_inst_t *)&cmd_set_port_ptypes, + (cmdline_parse_inst_t *)&cmd_ptype_mapping_get, + (cmdline_parse_inst_t *)&cmd_ptype_mapping_replace, + (cmdline_parse_inst_t *)&cmd_ptype_mapping_reset, + (cmdline_parse_inst_t *)&cmd_ptype_mapping_update, + + (cmdline_parse_inst_t *)&cmd_pctype_mapping_get, + (cmdline_parse_inst_t *)&cmd_pctype_mapping_reset, + (cmdline_parse_inst_t *)&cmd_pctype_mapping_update, + (cmdline_parse_inst_t *)&cmd_queue_region, + (cmdline_parse_inst_t *)&cmd_region_flowtype, + (cmdline_parse_inst_t *)&cmd_user_priority_region, + (cmdline_parse_inst_t *)&cmd_flush_queue_region, + (cmdline_parse_inst_t *)&cmd_show_queue_region_info_all, + (cmdline_parse_inst_t *)&cmd_show_port_tm_cap, + (cmdline_parse_inst_t *)&cmd_show_port_tm_level_cap, + (cmdline_parse_inst_t *)&cmd_show_port_tm_node_cap, + (cmdline_parse_inst_t *)&cmd_show_port_tm_node_type, + (cmdline_parse_inst_t *)&cmd_show_port_tm_node_stats, + (cmdline_parse_inst_t *)&cmd_add_port_tm_node_shaper_profile, + (cmdline_parse_inst_t *)&cmd_del_port_tm_node_shaper_profile, + (cmdline_parse_inst_t *)&cmd_add_port_tm_node_shared_shaper, + (cmdline_parse_inst_t *)&cmd_del_port_tm_node_shared_shaper, + (cmdline_parse_inst_t *)&cmd_add_port_tm_node_wred_profile, + (cmdline_parse_inst_t *)&cmd_del_port_tm_node_wred_profile, + (cmdline_parse_inst_t *)&cmd_set_port_tm_node_shaper_profile, + (cmdline_parse_inst_t *)&cmd_add_port_tm_nonleaf_node, + (cmdline_parse_inst_t *)&cmd_add_port_tm_leaf_node, + (cmdline_parse_inst_t *)&cmd_del_port_tm_node, + (cmdline_parse_inst_t *)&cmd_set_port_tm_node_parent, + (cmdline_parse_inst_t *)&cmd_suspend_port_tm_node, + (cmdline_parse_inst_t *)&cmd_resume_port_tm_node, + (cmdline_parse_inst_t *)&cmd_port_tm_hierarchy_commit, + (cmdline_parse_inst_t *)&cmd_port_tm_mark_ip_ecn, + (cmdline_parse_inst_t *)&cmd_port_tm_mark_ip_dscp, + (cmdline_parse_inst_t *)&cmd_port_tm_mark_vlan_dei, + (cmdline_parse_inst_t *)&cmd_cfg_tunnel_udp_port, + (cmdline_parse_inst_t *)&cmd_rx_offload_get_capa, + (cmdline_parse_inst_t *)&cmd_rx_offload_get_configuration, + (cmdline_parse_inst_t *)&cmd_config_per_port_rx_offload, + (cmdline_parse_inst_t *)&cmd_config_per_queue_rx_offload, + (cmdline_parse_inst_t *)&cmd_tx_offload_get_capa, + (cmdline_parse_inst_t *)&cmd_tx_offload_get_configuration, + (cmdline_parse_inst_t *)&cmd_config_per_port_tx_offload, + (cmdline_parse_inst_t *)&cmd_config_per_queue_tx_offload, +#ifdef RTE_LIBRTE_BPF + (cmdline_parse_inst_t *)&cmd_operate_bpf_ld_parse, + (cmdline_parse_inst_t *)&cmd_operate_bpf_unld_parse, +#endif + (cmdline_parse_inst_t *)&cmd_config_tx_metadata_specific, + (cmdline_parse_inst_t *)&cmd_show_tx_metadata, + (cmdline_parse_inst_t *)&cmd_show_rx_tx_desc_status, + (cmdline_parse_inst_t *)&cmd_set_raw, + (cmdline_parse_inst_t *)&cmd_show_set_raw, + (cmdline_parse_inst_t *)&cmd_show_set_raw_all, + (cmdline_parse_inst_t *)&cmd_config_tx_dynf_specific, + NULL, +}; + +/* read cmdline commands from file */ +void +cmdline_read_from_file(const char *filename) +{ + struct cmdline *cl; + + cl = cmdline_file_new(main_ctx, "testpmd> ", filename); + if (cl == NULL) { + printf("Failed to create file based cmdline context: %s\n", + filename); + return; + } + + cmdline_interact(cl); + cmdline_quit(cl); + + cmdline_free(cl); + + printf("Read CLI commands from %s\n", filename); +} + +/* prompt function, called from main on MASTER lcore */ +void +prompt(void) +{ + /* initialize non-constant commands */ + cmd_set_fwd_mode_init(); + cmd_set_fwd_retry_mode_init(); + + testpmd_cl = cmdline_stdin_new(main_ctx, "testpmd> "); + if (testpmd_cl == NULL) + return; + cmdline_interact(testpmd_cl); + cmdline_stdin_exit(testpmd_cl); +} + +void +prompt_exit(void) +{ + if (testpmd_cl != NULL) + cmdline_quit(testpmd_cl); +} + +static void +cmd_reconfig_device_queue(portid_t id, uint8_t dev, uint8_t queue) +{ + if (id == (portid_t)RTE_PORT_ALL) { + portid_t pid; + + RTE_ETH_FOREACH_DEV(pid) { + /* check if need_reconfig has been set to 1 */ + if (ports[pid].need_reconfig == 0) + ports[pid].need_reconfig = dev; + /* check if need_reconfig_queues has been set to 1 */ + if (ports[pid].need_reconfig_queues == 0) + ports[pid].need_reconfig_queues = queue; + } + } else if (!port_id_is_invalid(id, DISABLED_WARN)) { + /* check if need_reconfig has been set to 1 */ + if (ports[id].need_reconfig == 0) + ports[id].need_reconfig = dev; + /* check if need_reconfig_queues has been set to 1 */ + if (ports[id].need_reconfig_queues == 0) + ports[id].need_reconfig_queues = queue; + } +} diff --git a/src/spdk/dpdk/app/test-pmd/cmdline_flow.c b/src/spdk/dpdk/app/test-pmd/cmdline_flow.c new file mode 100644 index 000000000..4e2006c54 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/cmdline_flow.c @@ -0,0 +1,6888 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox Technologies, Ltd + */ + +#include <stddef.h> +#include <stdint.h> +#include <stdio.h> +#include <inttypes.h> +#include <errno.h> +#include <ctype.h> +#include <string.h> +#include <arpa/inet.h> +#include <sys/socket.h> + +#include <rte_string_fns.h> +#include <rte_common.h> +#include <rte_ethdev.h> +#include <rte_byteorder.h> +#include <cmdline_parse.h> +#include <cmdline_parse_etheraddr.h> +#include <cmdline_parse_string.h> +#include <cmdline_parse_num.h> +#include <rte_flow.h> +#include <rte_hexdump.h> + +#include "testpmd.h" + +/** Parser token indices. */ +enum index { + /* Special tokens. */ + ZERO = 0, + END, + START_SET, + END_SET, + + /* Common tokens. */ + INTEGER, + UNSIGNED, + PREFIX, + BOOLEAN, + STRING, + HEX, + FILE_PATH, + MAC_ADDR, + IPV4_ADDR, + IPV6_ADDR, + RULE_ID, + PORT_ID, + GROUP_ID, + PRIORITY_LEVEL, + + /* Top-level command. */ + SET, + /* Sub-leve commands. */ + SET_RAW_ENCAP, + SET_RAW_DECAP, + SET_RAW_INDEX, + + /* Top-level command. */ + FLOW, + /* Sub-level commands. */ + VALIDATE, + CREATE, + DESTROY, + FLUSH, + DUMP, + QUERY, + LIST, + AGED, + ISOLATE, + + /* Destroy arguments. */ + DESTROY_RULE, + + /* Query arguments. */ + QUERY_ACTION, + + /* List arguments. */ + LIST_GROUP, + + /* Destroy aged flow arguments. */ + AGED_DESTROY, + + /* Validate/create arguments. */ + GROUP, + PRIORITY, + INGRESS, + EGRESS, + TRANSFER, + + /* Validate/create pattern. */ + PATTERN, + ITEM_PARAM_IS, + ITEM_PARAM_SPEC, + ITEM_PARAM_LAST, + ITEM_PARAM_MASK, + ITEM_PARAM_PREFIX, + ITEM_NEXT, + ITEM_END, + ITEM_VOID, + ITEM_INVERT, + ITEM_ANY, + ITEM_ANY_NUM, + ITEM_PF, + ITEM_VF, + ITEM_VF_ID, + ITEM_PHY_PORT, + ITEM_PHY_PORT_INDEX, + ITEM_PORT_ID, + ITEM_PORT_ID_ID, + ITEM_MARK, + ITEM_MARK_ID, + ITEM_RAW, + ITEM_RAW_RELATIVE, + ITEM_RAW_SEARCH, + ITEM_RAW_OFFSET, + ITEM_RAW_LIMIT, + ITEM_RAW_PATTERN, + ITEM_ETH, + ITEM_ETH_DST, + ITEM_ETH_SRC, + ITEM_ETH_TYPE, + ITEM_VLAN, + ITEM_VLAN_TCI, + ITEM_VLAN_PCP, + ITEM_VLAN_DEI, + ITEM_VLAN_VID, + ITEM_VLAN_INNER_TYPE, + ITEM_IPV4, + ITEM_IPV4_TOS, + ITEM_IPV4_TTL, + ITEM_IPV4_PROTO, + ITEM_IPV4_SRC, + ITEM_IPV4_DST, + ITEM_IPV6, + ITEM_IPV6_TC, + ITEM_IPV6_FLOW, + ITEM_IPV6_PROTO, + ITEM_IPV6_HOP, + ITEM_IPV6_SRC, + ITEM_IPV6_DST, + ITEM_ICMP, + ITEM_ICMP_TYPE, + ITEM_ICMP_CODE, + ITEM_UDP, + ITEM_UDP_SRC, + ITEM_UDP_DST, + ITEM_TCP, + ITEM_TCP_SRC, + ITEM_TCP_DST, + ITEM_TCP_FLAGS, + ITEM_SCTP, + ITEM_SCTP_SRC, + ITEM_SCTP_DST, + ITEM_SCTP_TAG, + ITEM_SCTP_CKSUM, + ITEM_VXLAN, + ITEM_VXLAN_VNI, + ITEM_E_TAG, + ITEM_E_TAG_GRP_ECID_B, + ITEM_NVGRE, + ITEM_NVGRE_TNI, + ITEM_MPLS, + ITEM_MPLS_LABEL, + ITEM_MPLS_TC, + ITEM_MPLS_S, + ITEM_GRE, + ITEM_GRE_PROTO, + ITEM_GRE_C_RSVD0_VER, + ITEM_GRE_C_BIT, + ITEM_GRE_K_BIT, + ITEM_GRE_S_BIT, + ITEM_FUZZY, + ITEM_FUZZY_THRESH, + ITEM_GTP, + ITEM_GTP_FLAGS, + ITEM_GTP_MSG_TYPE, + ITEM_GTP_TEID, + ITEM_GTPC, + ITEM_GTPU, + ITEM_GENEVE, + ITEM_GENEVE_VNI, + ITEM_GENEVE_PROTO, + ITEM_VXLAN_GPE, + ITEM_VXLAN_GPE_VNI, + ITEM_ARP_ETH_IPV4, + ITEM_ARP_ETH_IPV4_SHA, + ITEM_ARP_ETH_IPV4_SPA, + ITEM_ARP_ETH_IPV4_THA, + ITEM_ARP_ETH_IPV4_TPA, + ITEM_IPV6_EXT, + ITEM_IPV6_EXT_NEXT_HDR, + ITEM_ICMP6, + ITEM_ICMP6_TYPE, + ITEM_ICMP6_CODE, + ITEM_ICMP6_ND_NS, + ITEM_ICMP6_ND_NS_TARGET_ADDR, + ITEM_ICMP6_ND_NA, + ITEM_ICMP6_ND_NA_TARGET_ADDR, + ITEM_ICMP6_ND_OPT, + ITEM_ICMP6_ND_OPT_TYPE, + ITEM_ICMP6_ND_OPT_SLA_ETH, + ITEM_ICMP6_ND_OPT_SLA_ETH_SLA, + ITEM_ICMP6_ND_OPT_TLA_ETH, + ITEM_ICMP6_ND_OPT_TLA_ETH_TLA, + ITEM_META, + ITEM_META_DATA, + ITEM_GRE_KEY, + ITEM_GRE_KEY_VALUE, + ITEM_GTP_PSC, + ITEM_GTP_PSC_QFI, + ITEM_GTP_PSC_PDU_T, + ITEM_PPPOES, + ITEM_PPPOED, + ITEM_PPPOE_SEID, + ITEM_PPPOE_PROTO_ID, + ITEM_HIGIG2, + ITEM_HIGIG2_CLASSIFICATION, + ITEM_HIGIG2_VID, + ITEM_TAG, + ITEM_TAG_DATA, + ITEM_TAG_INDEX, + ITEM_L2TPV3OIP, + ITEM_L2TPV3OIP_SESSION_ID, + ITEM_ESP, + ITEM_ESP_SPI, + ITEM_AH, + ITEM_AH_SPI, + ITEM_PFCP, + ITEM_PFCP_S_FIELD, + ITEM_PFCP_SEID, + + /* Validate/create actions. */ + ACTIONS, + ACTION_NEXT, + ACTION_END, + ACTION_VOID, + ACTION_PASSTHRU, + ACTION_JUMP, + ACTION_JUMP_GROUP, + ACTION_MARK, + ACTION_MARK_ID, + ACTION_FLAG, + ACTION_QUEUE, + ACTION_QUEUE_INDEX, + ACTION_DROP, + ACTION_COUNT, + ACTION_COUNT_SHARED, + ACTION_COUNT_ID, + ACTION_RSS, + ACTION_RSS_FUNC, + ACTION_RSS_LEVEL, + ACTION_RSS_FUNC_DEFAULT, + ACTION_RSS_FUNC_TOEPLITZ, + ACTION_RSS_FUNC_SIMPLE_XOR, + ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ, + ACTION_RSS_TYPES, + ACTION_RSS_TYPE, + ACTION_RSS_KEY, + ACTION_RSS_KEY_LEN, + ACTION_RSS_QUEUES, + ACTION_RSS_QUEUE, + ACTION_PF, + ACTION_VF, + ACTION_VF_ORIGINAL, + ACTION_VF_ID, + ACTION_PHY_PORT, + ACTION_PHY_PORT_ORIGINAL, + ACTION_PHY_PORT_INDEX, + ACTION_PORT_ID, + ACTION_PORT_ID_ORIGINAL, + ACTION_PORT_ID_ID, + ACTION_METER, + ACTION_METER_ID, + ACTION_OF_SET_MPLS_TTL, + ACTION_OF_SET_MPLS_TTL_MPLS_TTL, + ACTION_OF_DEC_MPLS_TTL, + ACTION_OF_SET_NW_TTL, + ACTION_OF_SET_NW_TTL_NW_TTL, + ACTION_OF_DEC_NW_TTL, + ACTION_OF_COPY_TTL_OUT, + ACTION_OF_COPY_TTL_IN, + ACTION_OF_POP_VLAN, + ACTION_OF_PUSH_VLAN, + ACTION_OF_PUSH_VLAN_ETHERTYPE, + ACTION_OF_SET_VLAN_VID, + ACTION_OF_SET_VLAN_VID_VLAN_VID, + ACTION_OF_SET_VLAN_PCP, + ACTION_OF_SET_VLAN_PCP_VLAN_PCP, + ACTION_OF_POP_MPLS, + ACTION_OF_POP_MPLS_ETHERTYPE, + ACTION_OF_PUSH_MPLS, + ACTION_OF_PUSH_MPLS_ETHERTYPE, + ACTION_VXLAN_ENCAP, + ACTION_VXLAN_DECAP, + ACTION_NVGRE_ENCAP, + ACTION_NVGRE_DECAP, + ACTION_L2_ENCAP, + ACTION_L2_DECAP, + ACTION_MPLSOGRE_ENCAP, + ACTION_MPLSOGRE_DECAP, + ACTION_MPLSOUDP_ENCAP, + ACTION_MPLSOUDP_DECAP, + ACTION_SET_IPV4_SRC, + ACTION_SET_IPV4_SRC_IPV4_SRC, + ACTION_SET_IPV4_DST, + ACTION_SET_IPV4_DST_IPV4_DST, + ACTION_SET_IPV6_SRC, + ACTION_SET_IPV6_SRC_IPV6_SRC, + ACTION_SET_IPV6_DST, + ACTION_SET_IPV6_DST_IPV6_DST, + ACTION_SET_TP_SRC, + ACTION_SET_TP_SRC_TP_SRC, + ACTION_SET_TP_DST, + ACTION_SET_TP_DST_TP_DST, + ACTION_MAC_SWAP, + ACTION_DEC_TTL, + ACTION_SET_TTL, + ACTION_SET_TTL_TTL, + ACTION_SET_MAC_SRC, + ACTION_SET_MAC_SRC_MAC_SRC, + ACTION_SET_MAC_DST, + ACTION_SET_MAC_DST_MAC_DST, + ACTION_INC_TCP_SEQ, + ACTION_INC_TCP_SEQ_VALUE, + ACTION_DEC_TCP_SEQ, + ACTION_DEC_TCP_SEQ_VALUE, + ACTION_INC_TCP_ACK, + ACTION_INC_TCP_ACK_VALUE, + ACTION_DEC_TCP_ACK, + ACTION_DEC_TCP_ACK_VALUE, + ACTION_RAW_ENCAP, + ACTION_RAW_DECAP, + ACTION_RAW_ENCAP_INDEX, + ACTION_RAW_ENCAP_INDEX_VALUE, + ACTION_RAW_DECAP_INDEX, + ACTION_RAW_DECAP_INDEX_VALUE, + ACTION_SET_TAG, + ACTION_SET_TAG_DATA, + ACTION_SET_TAG_INDEX, + ACTION_SET_TAG_MASK, + ACTION_SET_META, + ACTION_SET_META_DATA, + ACTION_SET_META_MASK, + ACTION_SET_IPV4_DSCP, + ACTION_SET_IPV4_DSCP_VALUE, + ACTION_SET_IPV6_DSCP, + ACTION_SET_IPV6_DSCP_VALUE, + ACTION_AGE, + ACTION_AGE_TIMEOUT, +}; + +/** Maximum size for pattern in struct rte_flow_item_raw. */ +#define ITEM_RAW_PATTERN_SIZE 40 + +/** Storage size for struct rte_flow_item_raw including pattern. */ +#define ITEM_RAW_SIZE \ + (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE) + +/** Maximum number of queue indices in struct rte_flow_action_rss. */ +#define ACTION_RSS_QUEUE_NUM 128 + +/** Storage for struct rte_flow_action_rss including external data. */ +struct action_rss_data { + struct rte_flow_action_rss conf; + uint8_t key[RSS_HASH_KEY_LENGTH]; + uint16_t queue[ACTION_RSS_QUEUE_NUM]; +}; + +/** Maximum data size in struct rte_flow_action_raw_encap. */ +#define ACTION_RAW_ENCAP_MAX_DATA 128 +#define RAW_ENCAP_CONFS_MAX_NUM 8 + +/** Storage for struct rte_flow_action_raw_encap. */ +struct raw_encap_conf { + uint8_t data[ACTION_RAW_ENCAP_MAX_DATA]; + uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA]; + size_t size; +}; + +struct raw_encap_conf raw_encap_confs[RAW_ENCAP_CONFS_MAX_NUM]; + +/** Storage for struct rte_flow_action_raw_encap including external data. */ +struct action_raw_encap_data { + struct rte_flow_action_raw_encap conf; + uint8_t data[ACTION_RAW_ENCAP_MAX_DATA]; + uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA]; + uint16_t idx; +}; + +/** Storage for struct rte_flow_action_raw_decap. */ +struct raw_decap_conf { + uint8_t data[ACTION_RAW_ENCAP_MAX_DATA]; + size_t size; +}; + +struct raw_decap_conf raw_decap_confs[RAW_ENCAP_CONFS_MAX_NUM]; + +/** Storage for struct rte_flow_action_raw_decap including external data. */ +struct action_raw_decap_data { + struct rte_flow_action_raw_decap conf; + uint8_t data[ACTION_RAW_ENCAP_MAX_DATA]; + uint16_t idx; +}; + +struct vxlan_encap_conf vxlan_encap_conf = { + .select_ipv4 = 1, + .select_vlan = 0, + .select_tos_ttl = 0, + .vni = "\x00\x00\x00", + .udp_src = 0, + .udp_dst = RTE_BE16(4789), + .ipv4_src = RTE_IPV4(127, 0, 0, 1), + .ipv4_dst = RTE_IPV4(255, 255, 255, 255), + .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x01", + .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x11\x11", + .vlan_tci = 0, + .ip_tos = 0, + .ip_ttl = 255, + .eth_src = "\x00\x00\x00\x00\x00\x00", + .eth_dst = "\xff\xff\xff\xff\xff\xff", +}; + +/** Maximum number of items in struct rte_flow_action_vxlan_encap. */ +#define ACTION_VXLAN_ENCAP_ITEMS_NUM 6 + +/** Storage for struct rte_flow_action_vxlan_encap including external data. */ +struct action_vxlan_encap_data { + struct rte_flow_action_vxlan_encap conf; + struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM]; + struct rte_flow_item_eth item_eth; + struct rte_flow_item_vlan item_vlan; + union { + struct rte_flow_item_ipv4 item_ipv4; + struct rte_flow_item_ipv6 item_ipv6; + }; + struct rte_flow_item_udp item_udp; + struct rte_flow_item_vxlan item_vxlan; +}; + +struct nvgre_encap_conf nvgre_encap_conf = { + .select_ipv4 = 1, + .select_vlan = 0, + .tni = "\x00\x00\x00", + .ipv4_src = RTE_IPV4(127, 0, 0, 1), + .ipv4_dst = RTE_IPV4(255, 255, 255, 255), + .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x01", + .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x11\x11", + .vlan_tci = 0, + .eth_src = "\x00\x00\x00\x00\x00\x00", + .eth_dst = "\xff\xff\xff\xff\xff\xff", +}; + +/** Maximum number of items in struct rte_flow_action_nvgre_encap. */ +#define ACTION_NVGRE_ENCAP_ITEMS_NUM 5 + +/** Storage for struct rte_flow_action_nvgre_encap including external data. */ +struct action_nvgre_encap_data { + struct rte_flow_action_nvgre_encap conf; + struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM]; + struct rte_flow_item_eth item_eth; + struct rte_flow_item_vlan item_vlan; + union { + struct rte_flow_item_ipv4 item_ipv4; + struct rte_flow_item_ipv6 item_ipv6; + }; + struct rte_flow_item_nvgre item_nvgre; +}; + +struct l2_encap_conf l2_encap_conf; + +struct l2_decap_conf l2_decap_conf; + +struct mplsogre_encap_conf mplsogre_encap_conf; + +struct mplsogre_decap_conf mplsogre_decap_conf; + +struct mplsoudp_encap_conf mplsoudp_encap_conf; + +struct mplsoudp_decap_conf mplsoudp_decap_conf; + +/** Maximum number of subsequent tokens and arguments on the stack. */ +#define CTX_STACK_SIZE 16 + +/** Parser context. */ +struct context { + /** Stack of subsequent token lists to process. */ + const enum index *next[CTX_STACK_SIZE]; + /** Arguments for stacked tokens. */ + const void *args[CTX_STACK_SIZE]; + enum index curr; /**< Current token index. */ + enum index prev; /**< Index of the last token seen. */ + int next_num; /**< Number of entries in next[]. */ + int args_num; /**< Number of entries in args[]. */ + uint32_t eol:1; /**< EOL has been detected. */ + uint32_t last:1; /**< No more arguments. */ + portid_t port; /**< Current port ID (for completions). */ + uint32_t objdata; /**< Object-specific data. */ + void *object; /**< Address of current object for relative offsets. */ + void *objmask; /**< Object a full mask must be written to. */ +}; + +/** Token argument. */ +struct arg { + uint32_t hton:1; /**< Use network byte ordering. */ + uint32_t sign:1; /**< Value is signed. */ + uint32_t bounded:1; /**< Value is bounded. */ + uintmax_t min; /**< Minimum value if bounded. */ + uintmax_t max; /**< Maximum value if bounded. */ + uint32_t offset; /**< Relative offset from ctx->object. */ + uint32_t size; /**< Field size. */ + const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */ +}; + +/** Parser token definition. */ +struct token { + /** Type displayed during completion (defaults to "TOKEN"). */ + const char *type; + /** Help displayed during completion (defaults to token name). */ + const char *help; + /** Private data used by parser functions. */ + const void *priv; + /** + * Lists of subsequent tokens to push on the stack. Each call to the + * parser consumes the last entry of that stack. + */ + const enum index *const *next; + /** Arguments stack for subsequent tokens that need them. */ + const struct arg *const *args; + /** + * Token-processing callback, returns -1 in case of error, the + * length of the matched string otherwise. If NULL, attempts to + * match the token name. + * + * If buf is not NULL, the result should be stored in it according + * to context. An error is returned if not large enough. + */ + int (*call)(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size); + /** + * Callback that provides possible values for this token, used for + * completion. Returns -1 in case of error, the number of possible + * values otherwise. If NULL, the token name is used. + * + * If buf is not NULL, entry index ent is written to buf and the + * full length of the entry is returned (same behavior as + * snprintf()). + */ + int (*comp)(struct context *ctx, const struct token *token, + unsigned int ent, char *buf, unsigned int size); + /** Mandatory token name, no default value. */ + const char *name; +}; + +/** Static initializer for the next field. */ +#define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, } + +/** Static initializer for a NEXT() entry. */ +#define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, } + +/** Static initializer for the args field. */ +#define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, } + +/** Static initializer for ARGS() to target a field. */ +#define ARGS_ENTRY(s, f) \ + (&(const struct arg){ \ + .offset = offsetof(s, f), \ + .size = sizeof(((s *)0)->f), \ + }) + +/** Static initializer for ARGS() to target a bit-field. */ +#define ARGS_ENTRY_BF(s, f, b) \ + (&(const struct arg){ \ + .size = sizeof(s), \ + .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \ + }) + +/** Static initializer for ARGS() to target an arbitrary bit-mask. */ +#define ARGS_ENTRY_MASK(s, f, m) \ + (&(const struct arg){ \ + .offset = offsetof(s, f), \ + .size = sizeof(((s *)0)->f), \ + .mask = (const void *)(m), \ + }) + +/** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */ +#define ARGS_ENTRY_MASK_HTON(s, f, m) \ + (&(const struct arg){ \ + .hton = 1, \ + .offset = offsetof(s, f), \ + .size = sizeof(((s *)0)->f), \ + .mask = (const void *)(m), \ + }) + +/** Static initializer for ARGS() to target a pointer. */ +#define ARGS_ENTRY_PTR(s, f) \ + (&(const struct arg){ \ + .size = sizeof(*((s *)0)->f), \ + }) + +/** Static initializer for ARGS() with arbitrary offset and size. */ +#define ARGS_ENTRY_ARB(o, s) \ + (&(const struct arg){ \ + .offset = (o), \ + .size = (s), \ + }) + +/** Same as ARGS_ENTRY_ARB() with bounded values. */ +#define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \ + (&(const struct arg){ \ + .bounded = 1, \ + .min = (i), \ + .max = (a), \ + .offset = (o), \ + .size = (s), \ + }) + +/** Same as ARGS_ENTRY() using network byte ordering. */ +#define ARGS_ENTRY_HTON(s, f) \ + (&(const struct arg){ \ + .hton = 1, \ + .offset = offsetof(s, f), \ + .size = sizeof(((s *)0)->f), \ + }) + +/** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */ +#define ARG_ENTRY_HTON(s) \ + (&(const struct arg){ \ + .hton = 1, \ + .offset = 0, \ + .size = sizeof(s), \ + }) + +/** Parser output buffer layout expected by cmd_flow_parsed(). */ +struct buffer { + enum index command; /**< Flow command. */ + portid_t port; /**< Affected port ID. */ + union { + struct { + struct rte_flow_attr attr; + struct rte_flow_item *pattern; + struct rte_flow_action *actions; + uint32_t pattern_n; + uint32_t actions_n; + uint8_t *data; + } vc; /**< Validate/create arguments. */ + struct { + uint32_t *rule; + uint32_t rule_n; + } destroy; /**< Destroy arguments. */ + struct { + char file[128]; + } dump; /**< Dump arguments. */ + struct { + uint32_t rule; + struct rte_flow_action action; + } query; /**< Query arguments. */ + struct { + uint32_t *group; + uint32_t group_n; + } list; /**< List arguments. */ + struct { + int set; + } isolate; /**< Isolated mode arguments. */ + struct { + int destroy; + } aged; /**< Aged arguments. */ + } args; /**< Command arguments. */ +}; + +/** Private data for pattern items. */ +struct parse_item_priv { + enum rte_flow_item_type type; /**< Item type. */ + uint32_t size; /**< Size of item specification structure. */ +}; + +#define PRIV_ITEM(t, s) \ + (&(const struct parse_item_priv){ \ + .type = RTE_FLOW_ITEM_TYPE_ ## t, \ + .size = s, \ + }) + +/** Private data for actions. */ +struct parse_action_priv { + enum rte_flow_action_type type; /**< Action type. */ + uint32_t size; /**< Size of action configuration structure. */ +}; + +#define PRIV_ACTION(t, s) \ + (&(const struct parse_action_priv){ \ + .type = RTE_FLOW_ACTION_TYPE_ ## t, \ + .size = s, \ + }) + +static const enum index next_vc_attr[] = { + GROUP, + PRIORITY, + INGRESS, + EGRESS, + TRANSFER, + PATTERN, + ZERO, +}; + +static const enum index next_destroy_attr[] = { + DESTROY_RULE, + END, + ZERO, +}; + +static const enum index next_dump_attr[] = { + FILE_PATH, + END, + ZERO, +}; + +static const enum index next_list_attr[] = { + LIST_GROUP, + END, + ZERO, +}; + +static const enum index next_aged_attr[] = { + AGED_DESTROY, + END, + ZERO, +}; + +static const enum index item_param[] = { + ITEM_PARAM_IS, + ITEM_PARAM_SPEC, + ITEM_PARAM_LAST, + ITEM_PARAM_MASK, + ITEM_PARAM_PREFIX, + ZERO, +}; + +static const enum index next_item[] = { + ITEM_END, + ITEM_VOID, + ITEM_INVERT, + ITEM_ANY, + ITEM_PF, + ITEM_VF, + ITEM_PHY_PORT, + ITEM_PORT_ID, + ITEM_MARK, + ITEM_RAW, + ITEM_ETH, + ITEM_VLAN, + ITEM_IPV4, + ITEM_IPV6, + ITEM_ICMP, + ITEM_UDP, + ITEM_TCP, + ITEM_SCTP, + ITEM_VXLAN, + ITEM_E_TAG, + ITEM_NVGRE, + ITEM_MPLS, + ITEM_GRE, + ITEM_FUZZY, + ITEM_GTP, + ITEM_GTPC, + ITEM_GTPU, + ITEM_GENEVE, + ITEM_VXLAN_GPE, + ITEM_ARP_ETH_IPV4, + ITEM_IPV6_EXT, + ITEM_ICMP6, + ITEM_ICMP6_ND_NS, + ITEM_ICMP6_ND_NA, + ITEM_ICMP6_ND_OPT, + ITEM_ICMP6_ND_OPT_SLA_ETH, + ITEM_ICMP6_ND_OPT_TLA_ETH, + ITEM_META, + ITEM_GRE_KEY, + ITEM_GTP_PSC, + ITEM_PPPOES, + ITEM_PPPOED, + ITEM_PPPOE_PROTO_ID, + ITEM_HIGIG2, + ITEM_TAG, + ITEM_L2TPV3OIP, + ITEM_ESP, + ITEM_AH, + ITEM_PFCP, + END_SET, + ZERO, +}; + +static const enum index item_fuzzy[] = { + ITEM_FUZZY_THRESH, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_any[] = { + ITEM_ANY_NUM, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_vf[] = { + ITEM_VF_ID, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_phy_port[] = { + ITEM_PHY_PORT_INDEX, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_port_id[] = { + ITEM_PORT_ID_ID, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_mark[] = { + ITEM_MARK_ID, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_raw[] = { + ITEM_RAW_RELATIVE, + ITEM_RAW_SEARCH, + ITEM_RAW_OFFSET, + ITEM_RAW_LIMIT, + ITEM_RAW_PATTERN, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_eth[] = { + ITEM_ETH_DST, + ITEM_ETH_SRC, + ITEM_ETH_TYPE, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_vlan[] = { + ITEM_VLAN_TCI, + ITEM_VLAN_PCP, + ITEM_VLAN_DEI, + ITEM_VLAN_VID, + ITEM_VLAN_INNER_TYPE, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_ipv4[] = { + ITEM_IPV4_TOS, + ITEM_IPV4_TTL, + ITEM_IPV4_PROTO, + ITEM_IPV4_SRC, + ITEM_IPV4_DST, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_ipv6[] = { + ITEM_IPV6_TC, + ITEM_IPV6_FLOW, + ITEM_IPV6_PROTO, + ITEM_IPV6_HOP, + ITEM_IPV6_SRC, + ITEM_IPV6_DST, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp[] = { + ITEM_ICMP_TYPE, + ITEM_ICMP_CODE, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_udp[] = { + ITEM_UDP_SRC, + ITEM_UDP_DST, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_tcp[] = { + ITEM_TCP_SRC, + ITEM_TCP_DST, + ITEM_TCP_FLAGS, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_sctp[] = { + ITEM_SCTP_SRC, + ITEM_SCTP_DST, + ITEM_SCTP_TAG, + ITEM_SCTP_CKSUM, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_vxlan[] = { + ITEM_VXLAN_VNI, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_e_tag[] = { + ITEM_E_TAG_GRP_ECID_B, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_nvgre[] = { + ITEM_NVGRE_TNI, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_mpls[] = { + ITEM_MPLS_LABEL, + ITEM_MPLS_TC, + ITEM_MPLS_S, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_gre[] = { + ITEM_GRE_PROTO, + ITEM_GRE_C_RSVD0_VER, + ITEM_GRE_C_BIT, + ITEM_GRE_K_BIT, + ITEM_GRE_S_BIT, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_gre_key[] = { + ITEM_GRE_KEY_VALUE, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_gtp[] = { + ITEM_GTP_FLAGS, + ITEM_GTP_MSG_TYPE, + ITEM_GTP_TEID, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_geneve[] = { + ITEM_GENEVE_VNI, + ITEM_GENEVE_PROTO, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_vxlan_gpe[] = { + ITEM_VXLAN_GPE_VNI, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_arp_eth_ipv4[] = { + ITEM_ARP_ETH_IPV4_SHA, + ITEM_ARP_ETH_IPV4_SPA, + ITEM_ARP_ETH_IPV4_THA, + ITEM_ARP_ETH_IPV4_TPA, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_ipv6_ext[] = { + ITEM_IPV6_EXT_NEXT_HDR, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6[] = { + ITEM_ICMP6_TYPE, + ITEM_ICMP6_CODE, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_ns[] = { + ITEM_ICMP6_ND_NS_TARGET_ADDR, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_na[] = { + ITEM_ICMP6_ND_NA_TARGET_ADDR, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_opt[] = { + ITEM_ICMP6_ND_OPT_TYPE, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_opt_sla_eth[] = { + ITEM_ICMP6_ND_OPT_SLA_ETH_SLA, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_opt_tla_eth[] = { + ITEM_ICMP6_ND_OPT_TLA_ETH_TLA, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_meta[] = { + ITEM_META_DATA, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_gtp_psc[] = { + ITEM_GTP_PSC_QFI, + ITEM_GTP_PSC_PDU_T, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_pppoed[] = { + ITEM_PPPOE_SEID, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_pppoes[] = { + ITEM_PPPOE_SEID, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_pppoe_proto_id[] = { + ITEM_NEXT, + ZERO, +}; + +static const enum index item_higig2[] = { + ITEM_HIGIG2_CLASSIFICATION, + ITEM_HIGIG2_VID, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_esp[] = { + ITEM_ESP_SPI, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_ah[] = { + ITEM_AH_SPI, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_pfcp[] = { + ITEM_PFCP_S_FIELD, + ITEM_PFCP_SEID, + ITEM_NEXT, + ZERO, +}; + +static const enum index next_set_raw[] = { + SET_RAW_INDEX, + ITEM_ETH, + ZERO, +}; + +static const enum index item_tag[] = { + ITEM_TAG_DATA, + ITEM_TAG_INDEX, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_l2tpv3oip[] = { + ITEM_L2TPV3OIP_SESSION_ID, + ITEM_NEXT, + ZERO, +}; + +static const enum index next_action[] = { + ACTION_END, + ACTION_VOID, + ACTION_PASSTHRU, + ACTION_JUMP, + ACTION_MARK, + ACTION_FLAG, + ACTION_QUEUE, + ACTION_DROP, + ACTION_COUNT, + ACTION_RSS, + ACTION_PF, + ACTION_VF, + ACTION_PHY_PORT, + ACTION_PORT_ID, + ACTION_METER, + ACTION_OF_SET_MPLS_TTL, + ACTION_OF_DEC_MPLS_TTL, + ACTION_OF_SET_NW_TTL, + ACTION_OF_DEC_NW_TTL, + ACTION_OF_COPY_TTL_OUT, + ACTION_OF_COPY_TTL_IN, + ACTION_OF_POP_VLAN, + ACTION_OF_PUSH_VLAN, + ACTION_OF_SET_VLAN_VID, + ACTION_OF_SET_VLAN_PCP, + ACTION_OF_POP_MPLS, + ACTION_OF_PUSH_MPLS, + ACTION_VXLAN_ENCAP, + ACTION_VXLAN_DECAP, + ACTION_NVGRE_ENCAP, + ACTION_NVGRE_DECAP, + ACTION_L2_ENCAP, + ACTION_L2_DECAP, + ACTION_MPLSOGRE_ENCAP, + ACTION_MPLSOGRE_DECAP, + ACTION_MPLSOUDP_ENCAP, + ACTION_MPLSOUDP_DECAP, + ACTION_SET_IPV4_SRC, + ACTION_SET_IPV4_DST, + ACTION_SET_IPV6_SRC, + ACTION_SET_IPV6_DST, + ACTION_SET_TP_SRC, + ACTION_SET_TP_DST, + ACTION_MAC_SWAP, + ACTION_DEC_TTL, + ACTION_SET_TTL, + ACTION_SET_MAC_SRC, + ACTION_SET_MAC_DST, + ACTION_INC_TCP_SEQ, + ACTION_DEC_TCP_SEQ, + ACTION_INC_TCP_ACK, + ACTION_DEC_TCP_ACK, + ACTION_RAW_ENCAP, + ACTION_RAW_DECAP, + ACTION_SET_TAG, + ACTION_SET_META, + ACTION_SET_IPV4_DSCP, + ACTION_SET_IPV6_DSCP, + ACTION_AGE, + ZERO, +}; + +static const enum index action_mark[] = { + ACTION_MARK_ID, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_queue[] = { + ACTION_QUEUE_INDEX, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_count[] = { + ACTION_COUNT_ID, + ACTION_COUNT_SHARED, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_rss[] = { + ACTION_RSS_FUNC, + ACTION_RSS_LEVEL, + ACTION_RSS_TYPES, + ACTION_RSS_KEY, + ACTION_RSS_KEY_LEN, + ACTION_RSS_QUEUES, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_vf[] = { + ACTION_VF_ORIGINAL, + ACTION_VF_ID, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_phy_port[] = { + ACTION_PHY_PORT_ORIGINAL, + ACTION_PHY_PORT_INDEX, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_port_id[] = { + ACTION_PORT_ID_ORIGINAL, + ACTION_PORT_ID_ID, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_meter[] = { + ACTION_METER_ID, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_set_mpls_ttl[] = { + ACTION_OF_SET_MPLS_TTL_MPLS_TTL, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_set_nw_ttl[] = { + ACTION_OF_SET_NW_TTL_NW_TTL, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_push_vlan[] = { + ACTION_OF_PUSH_VLAN_ETHERTYPE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_set_vlan_vid[] = { + ACTION_OF_SET_VLAN_VID_VLAN_VID, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_set_vlan_pcp[] = { + ACTION_OF_SET_VLAN_PCP_VLAN_PCP, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_pop_mpls[] = { + ACTION_OF_POP_MPLS_ETHERTYPE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_push_mpls[] = { + ACTION_OF_PUSH_MPLS_ETHERTYPE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ipv4_src[] = { + ACTION_SET_IPV4_SRC_IPV4_SRC, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_mac_src[] = { + ACTION_SET_MAC_SRC_MAC_SRC, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ipv4_dst[] = { + ACTION_SET_IPV4_DST_IPV4_DST, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ipv6_src[] = { + ACTION_SET_IPV6_SRC_IPV6_SRC, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ipv6_dst[] = { + ACTION_SET_IPV6_DST_IPV6_DST, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_tp_src[] = { + ACTION_SET_TP_SRC_TP_SRC, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_tp_dst[] = { + ACTION_SET_TP_DST_TP_DST, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ttl[] = { + ACTION_SET_TTL_TTL, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_jump[] = { + ACTION_JUMP_GROUP, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_mac_dst[] = { + ACTION_SET_MAC_DST_MAC_DST, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_inc_tcp_seq[] = { + ACTION_INC_TCP_SEQ_VALUE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_dec_tcp_seq[] = { + ACTION_DEC_TCP_SEQ_VALUE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_inc_tcp_ack[] = { + ACTION_INC_TCP_ACK_VALUE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_dec_tcp_ack[] = { + ACTION_DEC_TCP_ACK_VALUE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_raw_encap[] = { + ACTION_RAW_ENCAP_INDEX, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_raw_decap[] = { + ACTION_RAW_DECAP_INDEX, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_tag[] = { + ACTION_SET_TAG_DATA, + ACTION_SET_TAG_INDEX, + ACTION_SET_TAG_MASK, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_meta[] = { + ACTION_SET_META_DATA, + ACTION_SET_META_MASK, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ipv4_dscp[] = { + ACTION_SET_IPV4_DSCP_VALUE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ipv6_dscp[] = { + ACTION_SET_IPV6_DSCP_VALUE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_age[] = { + ACTION_AGE, + ACTION_AGE_TIMEOUT, + ACTION_NEXT, + ZERO, +}; + +static int parse_set_raw_encap_decap(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_set_init(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_init(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_vc(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_vc_spec(struct context *, const struct token *, + const char *, unsigned int, void *, unsigned int); +static int parse_vc_conf(struct context *, const struct token *, + const char *, unsigned int, void *, unsigned int); +static int parse_vc_action_rss(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_rss_func(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_rss_type(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_rss_queue(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_vxlan_encap(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_nvgre_encap(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_l2_encap(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_l2_decap(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_mplsogre_encap(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); +static int parse_vc_action_mplsogre_decap(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); +static int parse_vc_action_mplsoudp_encap(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); +static int parse_vc_action_mplsoudp_decap(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); +static int parse_vc_action_raw_encap(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); +static int parse_vc_action_raw_decap(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); +static int parse_vc_action_raw_encap_index(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); +static int parse_vc_action_raw_decap_index(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); +static int parse_vc_action_set_meta(struct context *ctx, + const struct token *token, const char *str, + unsigned int len, void *buf, + unsigned int size); +static int parse_destroy(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_flush(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_dump(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_query(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_action(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_list(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_aged(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_isolate(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_int(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_prefix(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_boolean(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_string(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_hex(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size); +static int parse_string0(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_mac_addr(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_ipv4_addr(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_ipv6_addr(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int parse_port(struct context *, const struct token *, + const char *, unsigned int, + void *, unsigned int); +static int comp_none(struct context *, const struct token *, + unsigned int, char *, unsigned int); +static int comp_boolean(struct context *, const struct token *, + unsigned int, char *, unsigned int); +static int comp_action(struct context *, const struct token *, + unsigned int, char *, unsigned int); +static int comp_port(struct context *, const struct token *, + unsigned int, char *, unsigned int); +static int comp_rule_id(struct context *, const struct token *, + unsigned int, char *, unsigned int); +static int comp_vc_action_rss_type(struct context *, const struct token *, + unsigned int, char *, unsigned int); +static int comp_vc_action_rss_queue(struct context *, const struct token *, + unsigned int, char *, unsigned int); +static int comp_set_raw_index(struct context *, const struct token *, + unsigned int, char *, unsigned int); + +/** Token definitions. */ +static const struct token token_list[] = { + /* Special tokens. */ + [ZERO] = { + .name = "ZERO", + .help = "null entry, abused as the entry point", + .next = NEXT(NEXT_ENTRY(FLOW)), + }, + [END] = { + .name = "", + .type = "RETURN", + .help = "command may end here", + }, + [START_SET] = { + .name = "START_SET", + .help = "null entry, abused as the entry point for set", + .next = NEXT(NEXT_ENTRY(SET)), + }, + [END_SET] = { + .name = "end_set", + .type = "RETURN", + .help = "set command may end here", + }, + /* Common tokens. */ + [INTEGER] = { + .name = "{int}", + .type = "INTEGER", + .help = "integer value", + .call = parse_int, + .comp = comp_none, + }, + [UNSIGNED] = { + .name = "{unsigned}", + .type = "UNSIGNED", + .help = "unsigned integer value", + .call = parse_int, + .comp = comp_none, + }, + [PREFIX] = { + .name = "{prefix}", + .type = "PREFIX", + .help = "prefix length for bit-mask", + .call = parse_prefix, + .comp = comp_none, + }, + [BOOLEAN] = { + .name = "{boolean}", + .type = "BOOLEAN", + .help = "any boolean value", + .call = parse_boolean, + .comp = comp_boolean, + }, + [STRING] = { + .name = "{string}", + .type = "STRING", + .help = "fixed string", + .call = parse_string, + .comp = comp_none, + }, + [HEX] = { + .name = "{hex}", + .type = "HEX", + .help = "fixed string", + .call = parse_hex, + }, + [FILE_PATH] = { + .name = "{file path}", + .type = "STRING", + .help = "file path", + .call = parse_string0, + .comp = comp_none, + }, + [MAC_ADDR] = { + .name = "{MAC address}", + .type = "MAC-48", + .help = "standard MAC address notation", + .call = parse_mac_addr, + .comp = comp_none, + }, + [IPV4_ADDR] = { + .name = "{IPv4 address}", + .type = "IPV4 ADDRESS", + .help = "standard IPv4 address notation", + .call = parse_ipv4_addr, + .comp = comp_none, + }, + [IPV6_ADDR] = { + .name = "{IPv6 address}", + .type = "IPV6 ADDRESS", + .help = "standard IPv6 address notation", + .call = parse_ipv6_addr, + .comp = comp_none, + }, + [RULE_ID] = { + .name = "{rule id}", + .type = "RULE ID", + .help = "rule identifier", + .call = parse_int, + .comp = comp_rule_id, + }, + [PORT_ID] = { + .name = "{port_id}", + .type = "PORT ID", + .help = "port identifier", + .call = parse_port, + .comp = comp_port, + }, + [GROUP_ID] = { + .name = "{group_id}", + .type = "GROUP ID", + .help = "group identifier", + .call = parse_int, + .comp = comp_none, + }, + [PRIORITY_LEVEL] = { + .name = "{level}", + .type = "PRIORITY", + .help = "priority level", + .call = parse_int, + .comp = comp_none, + }, + /* Top-level command. */ + [FLOW] = { + .name = "flow", + .type = "{command} {port_id} [{arg} [...]]", + .help = "manage ingress/egress flow rules", + .next = NEXT(NEXT_ENTRY + (VALIDATE, + CREATE, + DESTROY, + FLUSH, + DUMP, + LIST, + AGED, + QUERY, + ISOLATE)), + .call = parse_init, + }, + /* Sub-level commands. */ + [VALIDATE] = { + .name = "validate", + .help = "check whether a flow rule can be created", + .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, port)), + .call = parse_vc, + }, + [CREATE] = { + .name = "create", + .help = "create a flow rule", + .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, port)), + .call = parse_vc, + }, + [DESTROY] = { + .name = "destroy", + .help = "destroy specific flow rules", + .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, port)), + .call = parse_destroy, + }, + [FLUSH] = { + .name = "flush", + .help = "destroy all flow rules", + .next = NEXT(NEXT_ENTRY(PORT_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, port)), + .call = parse_flush, + }, + [DUMP] = { + .name = "dump", + .help = "dump all flow rules to file", + .next = NEXT(next_dump_attr, NEXT_ENTRY(PORT_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, args.dump.file), + ARGS_ENTRY(struct buffer, port)), + .call = parse_dump, + }, + [QUERY] = { + .name = "query", + .help = "query an existing flow rule", + .next = NEXT(NEXT_ENTRY(QUERY_ACTION), + NEXT_ENTRY(RULE_ID), + NEXT_ENTRY(PORT_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type), + ARGS_ENTRY(struct buffer, args.query.rule), + ARGS_ENTRY(struct buffer, port)), + .call = parse_query, + }, + [LIST] = { + .name = "list", + .help = "list existing flow rules", + .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, port)), + .call = parse_list, + }, + [AGED] = { + .name = "aged", + .help = "list and destroy aged flows", + .next = NEXT(next_aged_attr, NEXT_ENTRY(PORT_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, port)), + .call = parse_aged, + }, + [ISOLATE] = { + .name = "isolate", + .help = "restrict ingress traffic to the defined flow rules", + .next = NEXT(NEXT_ENTRY(BOOLEAN), + NEXT_ENTRY(PORT_ID)), + .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set), + ARGS_ENTRY(struct buffer, port)), + .call = parse_isolate, + }, + /* Destroy arguments. */ + [DESTROY_RULE] = { + .name = "rule", + .help = "specify a rule identifier", + .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)), + .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)), + .call = parse_destroy, + }, + /* Query arguments. */ + [QUERY_ACTION] = { + .name = "{action}", + .type = "ACTION", + .help = "action to query, must be part of the rule", + .call = parse_action, + .comp = comp_action, + }, + /* List arguments. */ + [LIST_GROUP] = { + .name = "group", + .help = "specify a group", + .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)), + .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)), + .call = parse_list, + }, + [AGED_DESTROY] = { + .name = "destroy", + .help = "specify aged flows need be destroyed", + .call = parse_aged, + .comp = comp_none, + }, + /* Validate/create attributes. */ + [GROUP] = { + .name = "group", + .help = "specify a group", + .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)), + .call = parse_vc, + }, + [PRIORITY] = { + .name = "priority", + .help = "specify a priority level", + .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)), + .call = parse_vc, + }, + [INGRESS] = { + .name = "ingress", + .help = "affect rule to ingress", + .next = NEXT(next_vc_attr), + .call = parse_vc, + }, + [EGRESS] = { + .name = "egress", + .help = "affect rule to egress", + .next = NEXT(next_vc_attr), + .call = parse_vc, + }, + [TRANSFER] = { + .name = "transfer", + .help = "apply rule directly to endpoints found in pattern", + .next = NEXT(next_vc_attr), + .call = parse_vc, + }, + /* Validate/create pattern. */ + [PATTERN] = { + .name = "pattern", + .help = "submit a list of pattern items", + .next = NEXT(next_item), + .call = parse_vc, + }, + [ITEM_PARAM_IS] = { + .name = "is", + .help = "match value perfectly (with full bit-mask)", + .call = parse_vc_spec, + }, + [ITEM_PARAM_SPEC] = { + .name = "spec", + .help = "match value according to configured bit-mask", + .call = parse_vc_spec, + }, + [ITEM_PARAM_LAST] = { + .name = "last", + .help = "specify upper bound to establish a range", + .call = parse_vc_spec, + }, + [ITEM_PARAM_MASK] = { + .name = "mask", + .help = "specify bit-mask with relevant bits set to one", + .call = parse_vc_spec, + }, + [ITEM_PARAM_PREFIX] = { + .name = "prefix", + .help = "generate bit-mask from a prefix length", + .call = parse_vc_spec, + }, + [ITEM_NEXT] = { + .name = "/", + .help = "specify next pattern item", + .next = NEXT(next_item), + }, + [ITEM_END] = { + .name = "end", + .help = "end list of pattern items", + .priv = PRIV_ITEM(END, 0), + .next = NEXT(NEXT_ENTRY(ACTIONS)), + .call = parse_vc, + }, + [ITEM_VOID] = { + .name = "void", + .help = "no-op pattern item", + .priv = PRIV_ITEM(VOID, 0), + .next = NEXT(NEXT_ENTRY(ITEM_NEXT)), + .call = parse_vc, + }, + [ITEM_INVERT] = { + .name = "invert", + .help = "perform actions when pattern does not match", + .priv = PRIV_ITEM(INVERT, 0), + .next = NEXT(NEXT_ENTRY(ITEM_NEXT)), + .call = parse_vc, + }, + [ITEM_ANY] = { + .name = "any", + .help = "match any protocol for the current layer", + .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)), + .next = NEXT(item_any), + .call = parse_vc, + }, + [ITEM_ANY_NUM] = { + .name = "num", + .help = "number of layers covered", + .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)), + }, + [ITEM_PF] = { + .name = "pf", + .help = "match traffic from/to the physical function", + .priv = PRIV_ITEM(PF, 0), + .next = NEXT(NEXT_ENTRY(ITEM_NEXT)), + .call = parse_vc, + }, + [ITEM_VF] = { + .name = "vf", + .help = "match traffic from/to a virtual function ID", + .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)), + .next = NEXT(item_vf), + .call = parse_vc, + }, + [ITEM_VF_ID] = { + .name = "id", + .help = "VF ID", + .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)), + }, + [ITEM_PHY_PORT] = { + .name = "phy_port", + .help = "match traffic from/to a specific physical port", + .priv = PRIV_ITEM(PHY_PORT, + sizeof(struct rte_flow_item_phy_port)), + .next = NEXT(item_phy_port), + .call = parse_vc, + }, + [ITEM_PHY_PORT_INDEX] = { + .name = "index", + .help = "physical port index", + .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)), + }, + [ITEM_PORT_ID] = { + .name = "port_id", + .help = "match traffic from/to a given DPDK port ID", + .priv = PRIV_ITEM(PORT_ID, + sizeof(struct rte_flow_item_port_id)), + .next = NEXT(item_port_id), + .call = parse_vc, + }, + [ITEM_PORT_ID_ID] = { + .name = "id", + .help = "DPDK port ID", + .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)), + }, + [ITEM_MARK] = { + .name = "mark", + .help = "match traffic against value set in previously matched rule", + .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)), + .next = NEXT(item_mark), + .call = parse_vc, + }, + [ITEM_MARK_ID] = { + .name = "id", + .help = "Integer value to match against", + .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)), + }, + [ITEM_RAW] = { + .name = "raw", + .help = "match an arbitrary byte string", + .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE), + .next = NEXT(item_raw), + .call = parse_vc, + }, + [ITEM_RAW_RELATIVE] = { + .name = "relative", + .help = "look for pattern after the previous item", + .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param), + .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw, + relative, 1)), + }, + [ITEM_RAW_SEARCH] = { + .name = "search", + .help = "search pattern from offset (see also limit)", + .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param), + .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw, + search, 1)), + }, + [ITEM_RAW_OFFSET] = { + .name = "offset", + .help = "absolute or relative offset for pattern", + .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)), + }, + [ITEM_RAW_LIMIT] = { + .name = "limit", + .help = "search area limit for start of pattern", + .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)), + }, + [ITEM_RAW_PATTERN] = { + .name = "pattern", + .help = "byte string to look for", + .next = NEXT(item_raw, + NEXT_ENTRY(STRING), + NEXT_ENTRY(ITEM_PARAM_IS, + ITEM_PARAM_SPEC, + ITEM_PARAM_MASK)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern), + ARGS_ENTRY(struct rte_flow_item_raw, length), + ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw), + ITEM_RAW_PATTERN_SIZE)), + }, + [ITEM_ETH] = { + .name = "eth", + .help = "match Ethernet header", + .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)), + .next = NEXT(item_eth), + .call = parse_vc, + }, + [ITEM_ETH_DST] = { + .name = "dst", + .help = "destination MAC", + .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)), + }, + [ITEM_ETH_SRC] = { + .name = "src", + .help = "source MAC", + .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)), + }, + [ITEM_ETH_TYPE] = { + .name = "type", + .help = "EtherType", + .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)), + }, + [ITEM_VLAN] = { + .name = "vlan", + .help = "match 802.1Q/ad VLAN tag", + .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)), + .next = NEXT(item_vlan), + .call = parse_vc, + }, + [ITEM_VLAN_TCI] = { + .name = "tci", + .help = "tag control information", + .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)), + }, + [ITEM_VLAN_PCP] = { + .name = "pcp", + .help = "priority code point", + .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan, + tci, "\xe0\x00")), + }, + [ITEM_VLAN_DEI] = { + .name = "dei", + .help = "drop eligible indicator", + .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan, + tci, "\x10\x00")), + }, + [ITEM_VLAN_VID] = { + .name = "vid", + .help = "VLAN identifier", + .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan, + tci, "\x0f\xff")), + }, + [ITEM_VLAN_INNER_TYPE] = { + .name = "inner_type", + .help = "inner EtherType", + .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, + inner_type)), + }, + [ITEM_IPV4] = { + .name = "ipv4", + .help = "match IPv4 header", + .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)), + .next = NEXT(item_ipv4), + .call = parse_vc, + }, + [ITEM_IPV4_TOS] = { + .name = "tos", + .help = "type of service", + .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4, + hdr.type_of_service)), + }, + [ITEM_IPV4_TTL] = { + .name = "ttl", + .help = "time to live", + .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4, + hdr.time_to_live)), + }, + [ITEM_IPV4_PROTO] = { + .name = "proto", + .help = "next protocol ID", + .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4, + hdr.next_proto_id)), + }, + [ITEM_IPV4_SRC] = { + .name = "src", + .help = "source address", + .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4, + hdr.src_addr)), + }, + [ITEM_IPV4_DST] = { + .name = "dst", + .help = "destination address", + .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4, + hdr.dst_addr)), + }, + [ITEM_IPV6] = { + .name = "ipv6", + .help = "match IPv6 header", + .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)), + .next = NEXT(item_ipv6), + .call = parse_vc, + }, + [ITEM_IPV6_TC] = { + .name = "tc", + .help = "traffic class", + .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6, + hdr.vtc_flow, + "\x0f\xf0\x00\x00")), + }, + [ITEM_IPV6_FLOW] = { + .name = "flow", + .help = "flow label", + .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6, + hdr.vtc_flow, + "\x00\x0f\xff\xff")), + }, + [ITEM_IPV6_PROTO] = { + .name = "proto", + .help = "protocol (next header)", + .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6, + hdr.proto)), + }, + [ITEM_IPV6_HOP] = { + .name = "hop", + .help = "hop limit", + .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6, + hdr.hop_limits)), + }, + [ITEM_IPV6_SRC] = { + .name = "src", + .help = "source address", + .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6, + hdr.src_addr)), + }, + [ITEM_IPV6_DST] = { + .name = "dst", + .help = "destination address", + .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6, + hdr.dst_addr)), + }, + [ITEM_ICMP] = { + .name = "icmp", + .help = "match ICMP header", + .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)), + .next = NEXT(item_icmp), + .call = parse_vc, + }, + [ITEM_ICMP_TYPE] = { + .name = "type", + .help = "ICMP packet type", + .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp, + hdr.icmp_type)), + }, + [ITEM_ICMP_CODE] = { + .name = "code", + .help = "ICMP packet code", + .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp, + hdr.icmp_code)), + }, + [ITEM_UDP] = { + .name = "udp", + .help = "match UDP header", + .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)), + .next = NEXT(item_udp), + .call = parse_vc, + }, + [ITEM_UDP_SRC] = { + .name = "src", + .help = "UDP source port", + .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp, + hdr.src_port)), + }, + [ITEM_UDP_DST] = { + .name = "dst", + .help = "UDP destination port", + .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp, + hdr.dst_port)), + }, + [ITEM_TCP] = { + .name = "tcp", + .help = "match TCP header", + .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), + .next = NEXT(item_tcp), + .call = parse_vc, + }, + [ITEM_TCP_SRC] = { + .name = "src", + .help = "TCP source port", + .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp, + hdr.src_port)), + }, + [ITEM_TCP_DST] = { + .name = "dst", + .help = "TCP destination port", + .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp, + hdr.dst_port)), + }, + [ITEM_TCP_FLAGS] = { + .name = "flags", + .help = "TCP flags", + .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp, + hdr.tcp_flags)), + }, + [ITEM_SCTP] = { + .name = "sctp", + .help = "match SCTP header", + .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), + .next = NEXT(item_sctp), + .call = parse_vc, + }, + [ITEM_SCTP_SRC] = { + .name = "src", + .help = "SCTP source port", + .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp, + hdr.src_port)), + }, + [ITEM_SCTP_DST] = { + .name = "dst", + .help = "SCTP destination port", + .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp, + hdr.dst_port)), + }, + [ITEM_SCTP_TAG] = { + .name = "tag", + .help = "validation tag", + .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp, + hdr.tag)), + }, + [ITEM_SCTP_CKSUM] = { + .name = "cksum", + .help = "checksum", + .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp, + hdr.cksum)), + }, + [ITEM_VXLAN] = { + .name = "vxlan", + .help = "match VXLAN header", + .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), + .next = NEXT(item_vxlan), + .call = parse_vc, + }, + [ITEM_VXLAN_VNI] = { + .name = "vni", + .help = "VXLAN identifier", + .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)), + }, + [ITEM_E_TAG] = { + .name = "e_tag", + .help = "match E-Tag header", + .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), + .next = NEXT(item_e_tag), + .call = parse_vc, + }, + [ITEM_E_TAG_GRP_ECID_B] = { + .name = "grp_ecid_b", + .help = "GRP and E-CID base", + .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag, + rsvd_grp_ecid_b, + "\x3f\xff")), + }, + [ITEM_NVGRE] = { + .name = "nvgre", + .help = "match NVGRE header", + .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), + .next = NEXT(item_nvgre), + .call = parse_vc, + }, + [ITEM_NVGRE_TNI] = { + .name = "tni", + .help = "virtual subnet ID", + .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)), + }, + [ITEM_MPLS] = { + .name = "mpls", + .help = "match MPLS header", + .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), + .next = NEXT(item_mpls), + .call = parse_vc, + }, + [ITEM_MPLS_LABEL] = { + .name = "label", + .help = "MPLS label", + .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls, + label_tc_s, + "\xff\xff\xf0")), + }, + [ITEM_MPLS_TC] = { + .name = "tc", + .help = "MPLS Traffic Class", + .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls, + label_tc_s, + "\x00\x00\x0e")), + }, + [ITEM_MPLS_S] = { + .name = "s", + .help = "MPLS Bottom-of-Stack", + .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls, + label_tc_s, + "\x00\x00\x01")), + }, + [ITEM_GRE] = { + .name = "gre", + .help = "match GRE header", + .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)), + .next = NEXT(item_gre), + .call = parse_vc, + }, + [ITEM_GRE_PROTO] = { + .name = "protocol", + .help = "GRE protocol type", + .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre, + protocol)), + }, + [ITEM_GRE_C_RSVD0_VER] = { + .name = "c_rsvd0_ver", + .help = + "checksum (1b), undefined (1b), key bit (1b)," + " sequence number (1b), reserved 0 (9b)," + " version (3b)", + .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre, + c_rsvd0_ver)), + }, + [ITEM_GRE_C_BIT] = { + .name = "c_bit", + .help = "checksum bit (C)", + .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre, + c_rsvd0_ver, + "\x80\x00\x00\x00")), + }, + [ITEM_GRE_S_BIT] = { + .name = "s_bit", + .help = "sequence number bit (S)", + .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre, + c_rsvd0_ver, + "\x10\x00\x00\x00")), + }, + [ITEM_GRE_K_BIT] = { + .name = "k_bit", + .help = "key bit (K)", + .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre, + c_rsvd0_ver, + "\x20\x00\x00\x00")), + }, + [ITEM_FUZZY] = { + .name = "fuzzy", + .help = "fuzzy pattern match, expect faster than default", + .priv = PRIV_ITEM(FUZZY, + sizeof(struct rte_flow_item_fuzzy)), + .next = NEXT(item_fuzzy), + .call = parse_vc, + }, + [ITEM_FUZZY_THRESH] = { + .name = "thresh", + .help = "match accuracy threshold", + .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy, + thresh)), + }, + [ITEM_GTP] = { + .name = "gtp", + .help = "match GTP header", + .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)), + .next = NEXT(item_gtp), + .call = parse_vc, + }, + [ITEM_GTP_FLAGS] = { + .name = "v_pt_rsv_flags", + .help = "GTP flags", + .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_gtp, + v_pt_rsv_flags)), + }, + [ITEM_GTP_MSG_TYPE] = { + .name = "msg_type", + .help = "GTP message type", + .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_gtp, msg_type)), + }, + [ITEM_GTP_TEID] = { + .name = "teid", + .help = "tunnel endpoint identifier", + .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)), + }, + [ITEM_GTPC] = { + .name = "gtpc", + .help = "match GTP header", + .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)), + .next = NEXT(item_gtp), + .call = parse_vc, + }, + [ITEM_GTPU] = { + .name = "gtpu", + .help = "match GTP header", + .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)), + .next = NEXT(item_gtp), + .call = parse_vc, + }, + [ITEM_GENEVE] = { + .name = "geneve", + .help = "match GENEVE header", + .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), + .next = NEXT(item_geneve), + .call = parse_vc, + }, + [ITEM_GENEVE_VNI] = { + .name = "vni", + .help = "virtual network identifier", + .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)), + }, + [ITEM_GENEVE_PROTO] = { + .name = "protocol", + .help = "GENEVE protocol type", + .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, + protocol)), + }, + [ITEM_VXLAN_GPE] = { + .name = "vxlan-gpe", + .help = "match VXLAN-GPE header", + .priv = PRIV_ITEM(VXLAN_GPE, + sizeof(struct rte_flow_item_vxlan_gpe)), + .next = NEXT(item_vxlan_gpe), + .call = parse_vc, + }, + [ITEM_VXLAN_GPE_VNI] = { + .name = "vni", + .help = "VXLAN-GPE identifier", + .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe, + vni)), + }, + [ITEM_ARP_ETH_IPV4] = { + .name = "arp_eth_ipv4", + .help = "match ARP header for Ethernet/IPv4", + .priv = PRIV_ITEM(ARP_ETH_IPV4, + sizeof(struct rte_flow_item_arp_eth_ipv4)), + .next = NEXT(item_arp_eth_ipv4), + .call = parse_vc, + }, + [ITEM_ARP_ETH_IPV4_SHA] = { + .name = "sha", + .help = "sender hardware address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + sha)), + }, + [ITEM_ARP_ETH_IPV4_SPA] = { + .name = "spa", + .help = "sender IPv4 address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + spa)), + }, + [ITEM_ARP_ETH_IPV4_THA] = { + .name = "tha", + .help = "target hardware address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + tha)), + }, + [ITEM_ARP_ETH_IPV4_TPA] = { + .name = "tpa", + .help = "target IPv4 address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + tpa)), + }, + [ITEM_IPV6_EXT] = { + .name = "ipv6_ext", + .help = "match presence of any IPv6 extension header", + .priv = PRIV_ITEM(IPV6_EXT, + sizeof(struct rte_flow_item_ipv6_ext)), + .next = NEXT(item_ipv6_ext), + .call = parse_vc, + }, + [ITEM_IPV6_EXT_NEXT_HDR] = { + .name = "next_hdr", + .help = "next header", + .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext, + next_hdr)), + }, + [ITEM_ICMP6] = { + .name = "icmp6", + .help = "match any ICMPv6 header", + .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), + .next = NEXT(item_icmp6), + .call = parse_vc, + }, + [ITEM_ICMP6_TYPE] = { + .name = "type", + .help = "ICMPv6 type", + .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6, + type)), + }, + [ITEM_ICMP6_CODE] = { + .name = "code", + .help = "ICMPv6 code", + .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6, + code)), + }, + [ITEM_ICMP6_ND_NS] = { + .name = "icmp6_nd_ns", + .help = "match ICMPv6 neighbor discovery solicitation", + .priv = PRIV_ITEM(ICMP6_ND_NS, + sizeof(struct rte_flow_item_icmp6_nd_ns)), + .next = NEXT(item_icmp6_nd_ns), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_NS_TARGET_ADDR] = { + .name = "target_addr", + .help = "target address", + .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns, + target_addr)), + }, + [ITEM_ICMP6_ND_NA] = { + .name = "icmp6_nd_na", + .help = "match ICMPv6 neighbor discovery advertisement", + .priv = PRIV_ITEM(ICMP6_ND_NA, + sizeof(struct rte_flow_item_icmp6_nd_na)), + .next = NEXT(item_icmp6_nd_na), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_NA_TARGET_ADDR] = { + .name = "target_addr", + .help = "target address", + .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na, + target_addr)), + }, + [ITEM_ICMP6_ND_OPT] = { + .name = "icmp6_nd_opt", + .help = "match presence of any ICMPv6 neighbor discovery" + " option", + .priv = PRIV_ITEM(ICMP6_ND_OPT, + sizeof(struct rte_flow_item_icmp6_nd_opt)), + .next = NEXT(item_icmp6_nd_opt), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_OPT_TYPE] = { + .name = "type", + .help = "ND option type", + .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt, + type)), + }, + [ITEM_ICMP6_ND_OPT_SLA_ETH] = { + .name = "icmp6_nd_opt_sla_eth", + .help = "match ICMPv6 neighbor discovery source Ethernet" + " link-layer address option", + .priv = PRIV_ITEM + (ICMP6_ND_OPT_SLA_ETH, + sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), + .next = NEXT(item_icmp6_nd_opt_sla_eth), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = { + .name = "sla", + .help = "source Ethernet LLA", + .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)), + }, + [ITEM_ICMP6_ND_OPT_TLA_ETH] = { + .name = "icmp6_nd_opt_tla_eth", + .help = "match ICMPv6 neighbor discovery target Ethernet" + " link-layer address option", + .priv = PRIV_ITEM + (ICMP6_ND_OPT_TLA_ETH, + sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), + .next = NEXT(item_icmp6_nd_opt_tla_eth), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = { + .name = "tla", + .help = "target Ethernet LLA", + .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)), + }, + [ITEM_META] = { + .name = "meta", + .help = "match metadata header", + .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)), + .next = NEXT(item_meta), + .call = parse_vc, + }, + [ITEM_META_DATA] = { + .name = "data", + .help = "metadata value", + .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_MASK(struct rte_flow_item_meta, + data, "\xff\xff\xff\xff")), + }, + [ITEM_GRE_KEY] = { + .name = "gre_key", + .help = "match GRE key", + .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)), + .next = NEXT(item_gre_key), + .call = parse_vc, + }, + [ITEM_GRE_KEY_VALUE] = { + .name = "value", + .help = "key value", + .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)), + }, + [ITEM_GTP_PSC] = { + .name = "gtp_psc", + .help = "match GTP extension header with type 0x85", + .priv = PRIV_ITEM(GTP_PSC, + sizeof(struct rte_flow_item_gtp_psc)), + .next = NEXT(item_gtp_psc), + .call = parse_vc, + }, + [ITEM_GTP_PSC_QFI] = { + .name = "qfi", + .help = "QoS flow identifier", + .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc, + qfi)), + }, + [ITEM_GTP_PSC_PDU_T] = { + .name = "pdu_t", + .help = "PDU type", + .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc, + pdu_type)), + }, + [ITEM_PPPOES] = { + .name = "pppoes", + .help = "match PPPoE session header", + .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)), + .next = NEXT(item_pppoes), + .call = parse_vc, + }, + [ITEM_PPPOED] = { + .name = "pppoed", + .help = "match PPPoE discovery header", + .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)), + .next = NEXT(item_pppoed), + .call = parse_vc, + }, + [ITEM_PPPOE_SEID] = { + .name = "seid", + .help = "session identifier", + .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe, + session_id)), + }, + [ITEM_PPPOE_PROTO_ID] = { + .name = "pppoe_proto_id", + .help = "match PPPoE session protocol identifier", + .priv = PRIV_ITEM(PPPOE_PROTO_ID, + sizeof(struct rte_flow_item_pppoe_proto_id)), + .next = NEXT(item_pppoe_proto_id, NEXT_ENTRY(UNSIGNED), + item_param), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_item_pppoe_proto_id, proto_id)), + .call = parse_vc, + }, + [ITEM_HIGIG2] = { + .name = "higig2", + .help = "matches higig2 header", + .priv = PRIV_ITEM(HIGIG2, + sizeof(struct rte_flow_item_higig2_hdr)), + .next = NEXT(item_higig2), + .call = parse_vc, + }, + [ITEM_HIGIG2_CLASSIFICATION] = { + .name = "classification", + .help = "matches classification of higig2 header", + .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr, + hdr.ppt1.classification)), + }, + [ITEM_HIGIG2_VID] = { + .name = "vid", + .help = "matches vid of higig2 header", + .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr, + hdr.ppt1.vid)), + }, + [ITEM_TAG] = { + .name = "tag", + .help = "match tag value", + .priv = PRIV_ITEM(TAG, sizeof(struct rte_flow_item_tag)), + .next = NEXT(item_tag), + .call = parse_vc, + }, + [ITEM_TAG_DATA] = { + .name = "data", + .help = "tag value to match", + .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, data)), + }, + [ITEM_TAG_INDEX] = { + .name = "index", + .help = "index of tag array to match", + .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED), + NEXT_ENTRY(ITEM_PARAM_IS)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, index)), + }, + [ITEM_L2TPV3OIP] = { + .name = "l2tpv3oip", + .help = "match L2TPv3 over IP header", + .priv = PRIV_ITEM(L2TPV3OIP, + sizeof(struct rte_flow_item_l2tpv3oip)), + .next = NEXT(item_l2tpv3oip), + .call = parse_vc, + }, + [ITEM_L2TPV3OIP_SESSION_ID] = { + .name = "session_id", + .help = "session identifier", + .next = NEXT(item_l2tpv3oip, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_l2tpv3oip, + session_id)), + }, + [ITEM_ESP] = { + .name = "esp", + .help = "match ESP header", + .priv = PRIV_ITEM(ESP, sizeof(struct rte_flow_item_esp)), + .next = NEXT(item_esp), + .call = parse_vc, + }, + [ITEM_ESP_SPI] = { + .name = "spi", + .help = "security policy index", + .next = NEXT(item_esp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_esp, + hdr.spi)), + }, + [ITEM_AH] = { + .name = "ah", + .help = "match AH header", + .priv = PRIV_ITEM(AH, sizeof(struct rte_flow_item_ah)), + .next = NEXT(item_ah), + .call = parse_vc, + }, + [ITEM_AH_SPI] = { + .name = "spi", + .help = "security parameters index", + .next = NEXT(item_ah, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ah, spi)), + }, + [ITEM_PFCP] = { + .name = "pfcp", + .help = "match pfcp header", + .priv = PRIV_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)), + .next = NEXT(item_pfcp), + .call = parse_vc, + }, + [ITEM_PFCP_S_FIELD] = { + .name = "s_field", + .help = "S field", + .next = NEXT(item_pfcp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pfcp, + s_field)), + }, + [ITEM_PFCP_SEID] = { + .name = "seid", + .help = "session endpoint identifier", + .next = NEXT(item_pfcp, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pfcp, seid)), + }, + /* Validate/create actions. */ + [ACTIONS] = { + .name = "actions", + .help = "submit a list of associated actions", + .next = NEXT(next_action), + .call = parse_vc, + }, + [ACTION_NEXT] = { + .name = "/", + .help = "specify next action", + .next = NEXT(next_action), + }, + [ACTION_END] = { + .name = "end", + .help = "end list of actions", + .priv = PRIV_ACTION(END, 0), + .call = parse_vc, + }, + [ACTION_VOID] = { + .name = "void", + .help = "no-op action", + .priv = PRIV_ACTION(VOID, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_PASSTHRU] = { + .name = "passthru", + .help = "let subsequent rule process matched packets", + .priv = PRIV_ACTION(PASSTHRU, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_JUMP] = { + .name = "jump", + .help = "redirect traffic to a given group", + .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)), + .next = NEXT(action_jump), + .call = parse_vc, + }, + [ACTION_JUMP_GROUP] = { + .name = "group", + .help = "group to redirect traffic to", + .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)), + .call = parse_vc_conf, + }, + [ACTION_MARK] = { + .name = "mark", + .help = "attach 32 bit value to packets", + .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)), + .next = NEXT(action_mark), + .call = parse_vc, + }, + [ACTION_MARK_ID] = { + .name = "id", + .help = "32 bit value to return with packets", + .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)), + .call = parse_vc_conf, + }, + [ACTION_FLAG] = { + .name = "flag", + .help = "flag packets", + .priv = PRIV_ACTION(FLAG, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_QUEUE] = { + .name = "queue", + .help = "assign packets to a given queue index", + .priv = PRIV_ACTION(QUEUE, + sizeof(struct rte_flow_action_queue)), + .next = NEXT(action_queue), + .call = parse_vc, + }, + [ACTION_QUEUE_INDEX] = { + .name = "index", + .help = "queue index to use", + .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)), + .call = parse_vc_conf, + }, + [ACTION_DROP] = { + .name = "drop", + .help = "drop packets (note: passthru has priority)", + .priv = PRIV_ACTION(DROP, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_COUNT] = { + .name = "count", + .help = "enable counters for this rule", + .priv = PRIV_ACTION(COUNT, + sizeof(struct rte_flow_action_count)), + .next = NEXT(action_count), + .call = parse_vc, + }, + [ACTION_COUNT_ID] = { + .name = "identifier", + .help = "counter identifier to use", + .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)), + .call = parse_vc_conf, + }, + [ACTION_COUNT_SHARED] = { + .name = "shared", + .help = "shared counter", + .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)), + .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count, + shared, 1)), + .call = parse_vc_conf, + }, + [ACTION_RSS] = { + .name = "rss", + .help = "spread packets among several queues", + .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)), + .next = NEXT(action_rss), + .call = parse_vc_action_rss, + }, + [ACTION_RSS_FUNC] = { + .name = "func", + .help = "RSS hash function to apply", + .next = NEXT(action_rss, + NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT, + ACTION_RSS_FUNC_TOEPLITZ, + ACTION_RSS_FUNC_SIMPLE_XOR, + ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)), + }, + [ACTION_RSS_FUNC_DEFAULT] = { + .name = "default", + .help = "default hash function", + .call = parse_vc_action_rss_func, + }, + [ACTION_RSS_FUNC_TOEPLITZ] = { + .name = "toeplitz", + .help = "Toeplitz hash function", + .call = parse_vc_action_rss_func, + }, + [ACTION_RSS_FUNC_SIMPLE_XOR] = { + .name = "simple_xor", + .help = "simple XOR hash function", + .call = parse_vc_action_rss_func, + }, + [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = { + .name = "symmetric_toeplitz", + .help = "Symmetric Toeplitz hash function", + .call = parse_vc_action_rss_func, + }, + [ACTION_RSS_LEVEL] = { + .name = "level", + .help = "encapsulation level for \"types\"", + .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_ARB + (offsetof(struct action_rss_data, conf) + + offsetof(struct rte_flow_action_rss, level), + sizeof(((struct rte_flow_action_rss *)0)-> + level))), + }, + [ACTION_RSS_TYPES] = { + .name = "types", + .help = "specific RSS hash types", + .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)), + }, + [ACTION_RSS_TYPE] = { + .name = "{type}", + .help = "RSS hash type", + .call = parse_vc_action_rss_type, + .comp = comp_vc_action_rss_type, + }, + [ACTION_RSS_KEY] = { + .name = "key", + .help = "RSS hash key", + .next = NEXT(action_rss, NEXT_ENTRY(HEX)), + .args = ARGS(ARGS_ENTRY_ARB(0, 0), + ARGS_ENTRY_ARB + (offsetof(struct action_rss_data, conf) + + offsetof(struct rte_flow_action_rss, key_len), + sizeof(((struct rte_flow_action_rss *)0)-> + key_len)), + ARGS_ENTRY(struct action_rss_data, key)), + }, + [ACTION_RSS_KEY_LEN] = { + .name = "key_len", + .help = "RSS hash key length in bytes", + .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_ARB_BOUNDED + (offsetof(struct action_rss_data, conf) + + offsetof(struct rte_flow_action_rss, key_len), + sizeof(((struct rte_flow_action_rss *)0)-> + key_len), + 0, + RSS_HASH_KEY_LENGTH)), + }, + [ACTION_RSS_QUEUES] = { + .name = "queues", + .help = "queue indices to use", + .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)), + .call = parse_vc_conf, + }, + [ACTION_RSS_QUEUE] = { + .name = "{queue}", + .help = "queue index", + .call = parse_vc_action_rss_queue, + .comp = comp_vc_action_rss_queue, + }, + [ACTION_PF] = { + .name = "pf", + .help = "direct traffic to physical function", + .priv = PRIV_ACTION(PF, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_VF] = { + .name = "vf", + .help = "direct traffic to a virtual function ID", + .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)), + .next = NEXT(action_vf), + .call = parse_vc, + }, + [ACTION_VF_ORIGINAL] = { + .name = "original", + .help = "use original VF ID if possible", + .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)), + .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf, + original, 1)), + .call = parse_vc_conf, + }, + [ACTION_VF_ID] = { + .name = "id", + .help = "VF ID", + .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)), + .call = parse_vc_conf, + }, + [ACTION_PHY_PORT] = { + .name = "phy_port", + .help = "direct packets to physical port index", + .priv = PRIV_ACTION(PHY_PORT, + sizeof(struct rte_flow_action_phy_port)), + .next = NEXT(action_phy_port), + .call = parse_vc, + }, + [ACTION_PHY_PORT_ORIGINAL] = { + .name = "original", + .help = "use original port index if possible", + .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)), + .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port, + original, 1)), + .call = parse_vc_conf, + }, + [ACTION_PHY_PORT_INDEX] = { + .name = "index", + .help = "physical port index", + .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port, + index)), + .call = parse_vc_conf, + }, + [ACTION_PORT_ID] = { + .name = "port_id", + .help = "direct matching traffic to a given DPDK port ID", + .priv = PRIV_ACTION(PORT_ID, + sizeof(struct rte_flow_action_port_id)), + .next = NEXT(action_port_id), + .call = parse_vc, + }, + [ACTION_PORT_ID_ORIGINAL] = { + .name = "original", + .help = "use original DPDK port ID if possible", + .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)), + .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id, + original, 1)), + .call = parse_vc_conf, + }, + [ACTION_PORT_ID_ID] = { + .name = "id", + .help = "DPDK port ID", + .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)), + .call = parse_vc_conf, + }, + [ACTION_METER] = { + .name = "meter", + .help = "meter the directed packets at given id", + .priv = PRIV_ACTION(METER, + sizeof(struct rte_flow_action_meter)), + .next = NEXT(action_meter), + .call = parse_vc, + }, + [ACTION_METER_ID] = { + .name = "mtr_id", + .help = "meter id to use", + .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)), + .call = parse_vc_conf, + }, + [ACTION_OF_SET_MPLS_TTL] = { + .name = "of_set_mpls_ttl", + .help = "OpenFlow's OFPAT_SET_MPLS_TTL", + .priv = PRIV_ACTION + (OF_SET_MPLS_TTL, + sizeof(struct rte_flow_action_of_set_mpls_ttl)), + .next = NEXT(action_of_set_mpls_ttl), + .call = parse_vc, + }, + [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = { + .name = "mpls_ttl", + .help = "MPLS TTL", + .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl, + mpls_ttl)), + .call = parse_vc_conf, + }, + [ACTION_OF_DEC_MPLS_TTL] = { + .name = "of_dec_mpls_ttl", + .help = "OpenFlow's OFPAT_DEC_MPLS_TTL", + .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_SET_NW_TTL] = { + .name = "of_set_nw_ttl", + .help = "OpenFlow's OFPAT_SET_NW_TTL", + .priv = PRIV_ACTION + (OF_SET_NW_TTL, + sizeof(struct rte_flow_action_of_set_nw_ttl)), + .next = NEXT(action_of_set_nw_ttl), + .call = parse_vc, + }, + [ACTION_OF_SET_NW_TTL_NW_TTL] = { + .name = "nw_ttl", + .help = "IP TTL", + .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl, + nw_ttl)), + .call = parse_vc_conf, + }, + [ACTION_OF_DEC_NW_TTL] = { + .name = "of_dec_nw_ttl", + .help = "OpenFlow's OFPAT_DEC_NW_TTL", + .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_COPY_TTL_OUT] = { + .name = "of_copy_ttl_out", + .help = "OpenFlow's OFPAT_COPY_TTL_OUT", + .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_COPY_TTL_IN] = { + .name = "of_copy_ttl_in", + .help = "OpenFlow's OFPAT_COPY_TTL_IN", + .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_POP_VLAN] = { + .name = "of_pop_vlan", + .help = "OpenFlow's OFPAT_POP_VLAN", + .priv = PRIV_ACTION(OF_POP_VLAN, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_PUSH_VLAN] = { + .name = "of_push_vlan", + .help = "OpenFlow's OFPAT_PUSH_VLAN", + .priv = PRIV_ACTION + (OF_PUSH_VLAN, + sizeof(struct rte_flow_action_of_push_vlan)), + .next = NEXT(action_of_push_vlan), + .call = parse_vc, + }, + [ACTION_OF_PUSH_VLAN_ETHERTYPE] = { + .name = "ethertype", + .help = "EtherType", + .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_push_vlan, + ethertype)), + .call = parse_vc_conf, + }, + [ACTION_OF_SET_VLAN_VID] = { + .name = "of_set_vlan_vid", + .help = "OpenFlow's OFPAT_SET_VLAN_VID", + .priv = PRIV_ACTION + (OF_SET_VLAN_VID, + sizeof(struct rte_flow_action_of_set_vlan_vid)), + .next = NEXT(action_of_set_vlan_vid), + .call = parse_vc, + }, + [ACTION_OF_SET_VLAN_VID_VLAN_VID] = { + .name = "vlan_vid", + .help = "VLAN id", + .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_set_vlan_vid, + vlan_vid)), + .call = parse_vc_conf, + }, + [ACTION_OF_SET_VLAN_PCP] = { + .name = "of_set_vlan_pcp", + .help = "OpenFlow's OFPAT_SET_VLAN_PCP", + .priv = PRIV_ACTION + (OF_SET_VLAN_PCP, + sizeof(struct rte_flow_action_of_set_vlan_pcp)), + .next = NEXT(action_of_set_vlan_pcp), + .call = parse_vc, + }, + [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = { + .name = "vlan_pcp", + .help = "VLAN priority", + .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_set_vlan_pcp, + vlan_pcp)), + .call = parse_vc_conf, + }, + [ACTION_OF_POP_MPLS] = { + .name = "of_pop_mpls", + .help = "OpenFlow's OFPAT_POP_MPLS", + .priv = PRIV_ACTION(OF_POP_MPLS, + sizeof(struct rte_flow_action_of_pop_mpls)), + .next = NEXT(action_of_pop_mpls), + .call = parse_vc, + }, + [ACTION_OF_POP_MPLS_ETHERTYPE] = { + .name = "ethertype", + .help = "EtherType", + .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_pop_mpls, + ethertype)), + .call = parse_vc_conf, + }, + [ACTION_OF_PUSH_MPLS] = { + .name = "of_push_mpls", + .help = "OpenFlow's OFPAT_PUSH_MPLS", + .priv = PRIV_ACTION + (OF_PUSH_MPLS, + sizeof(struct rte_flow_action_of_push_mpls)), + .next = NEXT(action_of_push_mpls), + .call = parse_vc, + }, + [ACTION_OF_PUSH_MPLS_ETHERTYPE] = { + .name = "ethertype", + .help = "EtherType", + .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_push_mpls, + ethertype)), + .call = parse_vc_conf, + }, + [ACTION_VXLAN_ENCAP] = { + .name = "vxlan_encap", + .help = "VXLAN encapsulation, uses configuration set by \"set" + " vxlan\"", + .priv = PRIV_ACTION(VXLAN_ENCAP, + sizeof(struct action_vxlan_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_vxlan_encap, + }, + [ACTION_VXLAN_DECAP] = { + .name = "vxlan_decap", + .help = "Performs a decapsulation action by stripping all" + " headers of the VXLAN tunnel network overlay from the" + " matched flow.", + .priv = PRIV_ACTION(VXLAN_DECAP, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_NVGRE_ENCAP] = { + .name = "nvgre_encap", + .help = "NVGRE encapsulation, uses configuration set by \"set" + " nvgre\"", + .priv = PRIV_ACTION(NVGRE_ENCAP, + sizeof(struct action_nvgre_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_nvgre_encap, + }, + [ACTION_NVGRE_DECAP] = { + .name = "nvgre_decap", + .help = "Performs a decapsulation action by stripping all" + " headers of the NVGRE tunnel network overlay from the" + " matched flow.", + .priv = PRIV_ACTION(NVGRE_DECAP, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_L2_ENCAP] = { + .name = "l2_encap", + .help = "l2 encap, uses configuration set by" + " \"set l2_encap\"", + .priv = PRIV_ACTION(RAW_ENCAP, + sizeof(struct action_raw_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_l2_encap, + }, + [ACTION_L2_DECAP] = { + .name = "l2_decap", + .help = "l2 decap, uses configuration set by" + " \"set l2_decap\"", + .priv = PRIV_ACTION(RAW_DECAP, + sizeof(struct action_raw_decap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_l2_decap, + }, + [ACTION_MPLSOGRE_ENCAP] = { + .name = "mplsogre_encap", + .help = "mplsogre encapsulation, uses configuration set by" + " \"set mplsogre_encap\"", + .priv = PRIV_ACTION(RAW_ENCAP, + sizeof(struct action_raw_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_mplsogre_encap, + }, + [ACTION_MPLSOGRE_DECAP] = { + .name = "mplsogre_decap", + .help = "mplsogre decapsulation, uses configuration set by" + " \"set mplsogre_decap\"", + .priv = PRIV_ACTION(RAW_DECAP, + sizeof(struct action_raw_decap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_mplsogre_decap, + }, + [ACTION_MPLSOUDP_ENCAP] = { + .name = "mplsoudp_encap", + .help = "mplsoudp encapsulation, uses configuration set by" + " \"set mplsoudp_encap\"", + .priv = PRIV_ACTION(RAW_ENCAP, + sizeof(struct action_raw_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_mplsoudp_encap, + }, + [ACTION_MPLSOUDP_DECAP] = { + .name = "mplsoudp_decap", + .help = "mplsoudp decapsulation, uses configuration set by" + " \"set mplsoudp_decap\"", + .priv = PRIV_ACTION(RAW_DECAP, + sizeof(struct action_raw_decap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_mplsoudp_decap, + }, + [ACTION_SET_IPV4_SRC] = { + .name = "set_ipv4_src", + .help = "Set a new IPv4 source address in the outermost" + " IPv4 header", + .priv = PRIV_ACTION(SET_IPV4_SRC, + sizeof(struct rte_flow_action_set_ipv4)), + .next = NEXT(action_set_ipv4_src), + .call = parse_vc, + }, + [ACTION_SET_IPV4_SRC_IPV4_SRC] = { + .name = "ipv4_addr", + .help = "new IPv4 source address to set", + .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_ipv4, ipv4_addr)), + .call = parse_vc_conf, + }, + [ACTION_SET_IPV4_DST] = { + .name = "set_ipv4_dst", + .help = "Set a new IPv4 destination address in the outermost" + " IPv4 header", + .priv = PRIV_ACTION(SET_IPV4_DST, + sizeof(struct rte_flow_action_set_ipv4)), + .next = NEXT(action_set_ipv4_dst), + .call = parse_vc, + }, + [ACTION_SET_IPV4_DST_IPV4_DST] = { + .name = "ipv4_addr", + .help = "new IPv4 destination address to set", + .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_ipv4, ipv4_addr)), + .call = parse_vc_conf, + }, + [ACTION_SET_IPV6_SRC] = { + .name = "set_ipv6_src", + .help = "Set a new IPv6 source address in the outermost" + " IPv6 header", + .priv = PRIV_ACTION(SET_IPV6_SRC, + sizeof(struct rte_flow_action_set_ipv6)), + .next = NEXT(action_set_ipv6_src), + .call = parse_vc, + }, + [ACTION_SET_IPV6_SRC_IPV6_SRC] = { + .name = "ipv6_addr", + .help = "new IPv6 source address to set", + .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_ipv6, ipv6_addr)), + .call = parse_vc_conf, + }, + [ACTION_SET_IPV6_DST] = { + .name = "set_ipv6_dst", + .help = "Set a new IPv6 destination address in the outermost" + " IPv6 header", + .priv = PRIV_ACTION(SET_IPV6_DST, + sizeof(struct rte_flow_action_set_ipv6)), + .next = NEXT(action_set_ipv6_dst), + .call = parse_vc, + }, + [ACTION_SET_IPV6_DST_IPV6_DST] = { + .name = "ipv6_addr", + .help = "new IPv6 destination address to set", + .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_ipv6, ipv6_addr)), + .call = parse_vc_conf, + }, + [ACTION_SET_TP_SRC] = { + .name = "set_tp_src", + .help = "set a new source port number in the outermost" + " TCP/UDP header", + .priv = PRIV_ACTION(SET_TP_SRC, + sizeof(struct rte_flow_action_set_tp)), + .next = NEXT(action_set_tp_src), + .call = parse_vc, + }, + [ACTION_SET_TP_SRC_TP_SRC] = { + .name = "port", + .help = "new source port number to set", + .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_tp, port)), + .call = parse_vc_conf, + }, + [ACTION_SET_TP_DST] = { + .name = "set_tp_dst", + .help = "set a new destination port number in the outermost" + " TCP/UDP header", + .priv = PRIV_ACTION(SET_TP_DST, + sizeof(struct rte_flow_action_set_tp)), + .next = NEXT(action_set_tp_dst), + .call = parse_vc, + }, + [ACTION_SET_TP_DST_TP_DST] = { + .name = "port", + .help = "new destination port number to set", + .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_tp, port)), + .call = parse_vc_conf, + }, + [ACTION_MAC_SWAP] = { + .name = "mac_swap", + .help = "Swap the source and destination MAC addresses" + " in the outermost Ethernet header", + .priv = PRIV_ACTION(MAC_SWAP, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_DEC_TTL] = { + .name = "dec_ttl", + .help = "decrease network TTL if available", + .priv = PRIV_ACTION(DEC_TTL, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_SET_TTL] = { + .name = "set_ttl", + .help = "set ttl value", + .priv = PRIV_ACTION(SET_TTL, + sizeof(struct rte_flow_action_set_ttl)), + .next = NEXT(action_set_ttl), + .call = parse_vc, + }, + [ACTION_SET_TTL_TTL] = { + .name = "ttl_value", + .help = "new ttl value to set", + .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_ttl, ttl_value)), + .call = parse_vc_conf, + }, + [ACTION_SET_MAC_SRC] = { + .name = "set_mac_src", + .help = "set source mac address", + .priv = PRIV_ACTION(SET_MAC_SRC, + sizeof(struct rte_flow_action_set_mac)), + .next = NEXT(action_set_mac_src), + .call = parse_vc, + }, + [ACTION_SET_MAC_SRC_MAC_SRC] = { + .name = "mac_addr", + .help = "new source mac address", + .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_mac, mac_addr)), + .call = parse_vc_conf, + }, + [ACTION_SET_MAC_DST] = { + .name = "set_mac_dst", + .help = "set destination mac address", + .priv = PRIV_ACTION(SET_MAC_DST, + sizeof(struct rte_flow_action_set_mac)), + .next = NEXT(action_set_mac_dst), + .call = parse_vc, + }, + [ACTION_SET_MAC_DST_MAC_DST] = { + .name = "mac_addr", + .help = "new destination mac address to set", + .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_mac, mac_addr)), + .call = parse_vc_conf, + }, + [ACTION_INC_TCP_SEQ] = { + .name = "inc_tcp_seq", + .help = "increase TCP sequence number", + .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)), + .next = NEXT(action_inc_tcp_seq), + .call = parse_vc, + }, + [ACTION_INC_TCP_SEQ_VALUE] = { + .name = "value", + .help = "the value to increase TCP sequence number by", + .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)), + .call = parse_vc_conf, + }, + [ACTION_DEC_TCP_SEQ] = { + .name = "dec_tcp_seq", + .help = "decrease TCP sequence number", + .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)), + .next = NEXT(action_dec_tcp_seq), + .call = parse_vc, + }, + [ACTION_DEC_TCP_SEQ_VALUE] = { + .name = "value", + .help = "the value to decrease TCP sequence number by", + .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)), + .call = parse_vc_conf, + }, + [ACTION_INC_TCP_ACK] = { + .name = "inc_tcp_ack", + .help = "increase TCP acknowledgment number", + .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)), + .next = NEXT(action_inc_tcp_ack), + .call = parse_vc, + }, + [ACTION_INC_TCP_ACK_VALUE] = { + .name = "value", + .help = "the value to increase TCP acknowledgment number by", + .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)), + .call = parse_vc_conf, + }, + [ACTION_DEC_TCP_ACK] = { + .name = "dec_tcp_ack", + .help = "decrease TCP acknowledgment number", + .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)), + .next = NEXT(action_dec_tcp_ack), + .call = parse_vc, + }, + [ACTION_DEC_TCP_ACK_VALUE] = { + .name = "value", + .help = "the value to decrease TCP acknowledgment number by", + .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)), + .call = parse_vc_conf, + }, + [ACTION_RAW_ENCAP] = { + .name = "raw_encap", + .help = "encapsulation data, defined by set raw_encap", + .priv = PRIV_ACTION(RAW_ENCAP, + sizeof(struct action_raw_encap_data)), + .next = NEXT(action_raw_encap), + .call = parse_vc_action_raw_encap, + }, + [ACTION_RAW_ENCAP_INDEX] = { + .name = "index", + .help = "the index of raw_encap_confs", + .next = NEXT(NEXT_ENTRY(ACTION_RAW_ENCAP_INDEX_VALUE)), + }, + [ACTION_RAW_ENCAP_INDEX_VALUE] = { + .name = "{index}", + .type = "UNSIGNED", + .help = "unsigned integer value", + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_raw_encap_index, + .comp = comp_set_raw_index, + }, + [ACTION_RAW_DECAP] = { + .name = "raw_decap", + .help = "decapsulation data, defined by set raw_encap", + .priv = PRIV_ACTION(RAW_DECAP, + sizeof(struct action_raw_decap_data)), + .next = NEXT(action_raw_decap), + .call = parse_vc_action_raw_decap, + }, + [ACTION_RAW_DECAP_INDEX] = { + .name = "index", + .help = "the index of raw_encap_confs", + .next = NEXT(NEXT_ENTRY(ACTION_RAW_DECAP_INDEX_VALUE)), + }, + [ACTION_RAW_DECAP_INDEX_VALUE] = { + .name = "{index}", + .type = "UNSIGNED", + .help = "unsigned integer value", + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_raw_decap_index, + .comp = comp_set_raw_index, + }, + /* Top level command. */ + [SET] = { + .name = "set", + .help = "set raw encap/decap data", + .type = "set raw_encap|raw_decap <index> <pattern>", + .next = NEXT(NEXT_ENTRY + (SET_RAW_ENCAP, + SET_RAW_DECAP)), + .call = parse_set_init, + }, + /* Sub-level commands. */ + [SET_RAW_ENCAP] = { + .name = "raw_encap", + .help = "set raw encap data", + .next = NEXT(next_set_raw), + .args = ARGS(ARGS_ENTRY_ARB_BOUNDED + (offsetof(struct buffer, port), + sizeof(((struct buffer *)0)->port), + 0, RAW_ENCAP_CONFS_MAX_NUM - 1)), + .call = parse_set_raw_encap_decap, + }, + [SET_RAW_DECAP] = { + .name = "raw_decap", + .help = "set raw decap data", + .next = NEXT(next_set_raw), + .args = ARGS(ARGS_ENTRY_ARB_BOUNDED + (offsetof(struct buffer, port), + sizeof(((struct buffer *)0)->port), + 0, RAW_ENCAP_CONFS_MAX_NUM - 1)), + .call = parse_set_raw_encap_decap, + }, + [SET_RAW_INDEX] = { + .name = "{index}", + .type = "UNSIGNED", + .help = "index of raw_encap/raw_decap data", + .next = NEXT(next_item), + .call = parse_port, + }, + [ACTION_SET_TAG] = { + .name = "set_tag", + .help = "set tag", + .priv = PRIV_ACTION(SET_TAG, + sizeof(struct rte_flow_action_set_tag)), + .next = NEXT(action_set_tag), + .call = parse_vc, + }, + [ACTION_SET_TAG_INDEX] = { + .name = "index", + .help = "index of tag array", + .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_set_tag, index)), + .call = parse_vc_conf, + }, + [ACTION_SET_TAG_DATA] = { + .name = "data", + .help = "tag value", + .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY + (struct rte_flow_action_set_tag, data)), + .call = parse_vc_conf, + }, + [ACTION_SET_TAG_MASK] = { + .name = "mask", + .help = "mask for tag value", + .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY + (struct rte_flow_action_set_tag, mask)), + .call = parse_vc_conf, + }, + [ACTION_SET_META] = { + .name = "set_meta", + .help = "set metadata", + .priv = PRIV_ACTION(SET_META, + sizeof(struct rte_flow_action_set_meta)), + .next = NEXT(action_set_meta), + .call = parse_vc_action_set_meta, + }, + [ACTION_SET_META_DATA] = { + .name = "data", + .help = "metadata value", + .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY + (struct rte_flow_action_set_meta, data)), + .call = parse_vc_conf, + }, + [ACTION_SET_META_MASK] = { + .name = "mask", + .help = "mask for metadata value", + .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY + (struct rte_flow_action_set_meta, mask)), + .call = parse_vc_conf, + }, + [ACTION_SET_IPV4_DSCP] = { + .name = "set_ipv4_dscp", + .help = "set DSCP value", + .priv = PRIV_ACTION(SET_IPV4_DSCP, + sizeof(struct rte_flow_action_set_dscp)), + .next = NEXT(action_set_ipv4_dscp), + .call = parse_vc, + }, + [ACTION_SET_IPV4_DSCP_VALUE] = { + .name = "dscp_value", + .help = "new IPv4 DSCP value to set", + .next = NEXT(action_set_ipv4_dscp, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY + (struct rte_flow_action_set_dscp, dscp)), + .call = parse_vc_conf, + }, + [ACTION_SET_IPV6_DSCP] = { + .name = "set_ipv6_dscp", + .help = "set DSCP value", + .priv = PRIV_ACTION(SET_IPV6_DSCP, + sizeof(struct rte_flow_action_set_dscp)), + .next = NEXT(action_set_ipv6_dscp), + .call = parse_vc, + }, + [ACTION_SET_IPV6_DSCP_VALUE] = { + .name = "dscp_value", + .help = "new IPv6 DSCP value to set", + .next = NEXT(action_set_ipv6_dscp, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY + (struct rte_flow_action_set_dscp, dscp)), + .call = parse_vc_conf, + }, + [ACTION_AGE] = { + .name = "age", + .help = "set a specific metadata header", + .next = NEXT(action_age), + .priv = PRIV_ACTION(AGE, + sizeof(struct rte_flow_action_age)), + .call = parse_vc, + }, + [ACTION_AGE_TIMEOUT] = { + .name = "timeout", + .help = "flow age timeout value", + .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_age, + timeout, 24)), + .next = NEXT(action_age, NEXT_ENTRY(UNSIGNED)), + .call = parse_vc_conf, + }, +}; + +/** Remove and return last entry from argument stack. */ +static const struct arg * +pop_args(struct context *ctx) +{ + return ctx->args_num ? ctx->args[--ctx->args_num] : NULL; +} + +/** Add entry on top of the argument stack. */ +static int +push_args(struct context *ctx, const struct arg *arg) +{ + if (ctx->args_num == CTX_STACK_SIZE) + return -1; + ctx->args[ctx->args_num++] = arg; + return 0; +} + +/** Spread value into buffer according to bit-mask. */ +static size_t +arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg) +{ + uint32_t i = arg->size; + uint32_t end = 0; + int sub = 1; + int add = 0; + size_t len = 0; + + if (!arg->mask) + return 0; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + if (!arg->hton) { + i = 0; + end = arg->size; + sub = 0; + add = 1; + } +#endif + while (i != end) { + unsigned int shift = 0; + uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub); + + for (shift = 0; arg->mask[i] >> shift; ++shift) { + if (!(arg->mask[i] & (1 << shift))) + continue; + ++len; + if (!dst) + continue; + *buf &= ~(1 << shift); + *buf |= (val & 1) << shift; + val >>= 1; + } + i += add; + } + return len; +} + +/** Compare a string with a partial one of a given length. */ +static int +strcmp_partial(const char *full, const char *partial, size_t partial_len) +{ + int r = strncmp(full, partial, partial_len); + + if (r) + return r; + if (strlen(full) <= partial_len) + return 0; + return full[partial_len]; +} + +/** + * Parse a prefix length and generate a bit-mask. + * + * Last argument (ctx->args) is retrieved to determine mask size, storage + * location and whether the result must use network byte ordering. + */ +static int +parse_prefix(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + const struct arg *arg = pop_args(ctx); + static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff"; + char *end; + uintmax_t u; + unsigned int bytes; + unsigned int extra; + + (void)token; + /* Argument is expected. */ + if (!arg) + return -1; + errno = 0; + u = strtoumax(str, &end, 0); + if (errno || (size_t)(end - str) != len) + goto error; + if (arg->mask) { + uintmax_t v = 0; + + extra = arg_entry_bf_fill(NULL, 0, arg); + if (u > extra) + goto error; + if (!ctx->object) + return len; + extra -= u; + while (u--) + (v <<= 1, v |= 1); + v <<= extra; + if (!arg_entry_bf_fill(ctx->object, v, arg) || + !arg_entry_bf_fill(ctx->objmask, -1, arg)) + goto error; + return len; + } + bytes = u / 8; + extra = u % 8; + size = arg->size; + if (bytes > size || bytes + !!extra > size) + goto error; + if (!ctx->object) + return len; + buf = (uint8_t *)ctx->object + arg->offset; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + if (!arg->hton) { + memset((uint8_t *)buf + size - bytes, 0xff, bytes); + memset(buf, 0x00, size - bytes); + if (extra) + ((uint8_t *)buf)[size - bytes - 1] = conv[extra]; + } else +#endif + { + memset(buf, 0xff, bytes); + memset((uint8_t *)buf + bytes, 0x00, size - bytes); + if (extra) + ((uint8_t *)buf)[bytes] = conv[extra]; + } + if (ctx->objmask) + memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size); + return len; +error: + push_args(ctx, arg); + return -1; +} + +/** Default parsing function for token name matching. */ +static int +parse_default(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + (void)ctx; + (void)buf; + (void)size; + if (strcmp_partial(token->name, str, len)) + return -1; + return len; +} + +/** Parse flow command, initialize output buffer for subsequent tokens. */ +static int +parse_init(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + /* Make sure buffer is large enough. */ + if (size < sizeof(*out)) + return -1; + /* Initialize buffer. */ + memset(out, 0x00, sizeof(*out)); + memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out)); + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + return len; +} + +/** Parse tokens for validate/create commands. */ +static int +parse_vc(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + uint8_t *data; + uint32_t data_size; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + if (!out->command) { + if (ctx->curr != VALIDATE && ctx->curr != CREATE) + return -1; + if (sizeof(*out) > size) + return -1; + out->command = ctx->curr; + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + out->args.vc.data = (uint8_t *)out + size; + return len; + } + ctx->objdata = 0; + ctx->object = &out->args.vc.attr; + ctx->objmask = NULL; + switch (ctx->curr) { + case GROUP: + case PRIORITY: + return len; + case INGRESS: + out->args.vc.attr.ingress = 1; + return len; + case EGRESS: + out->args.vc.attr.egress = 1; + return len; + case TRANSFER: + out->args.vc.attr.transfer = 1; + return len; + case PATTERN: + out->args.vc.pattern = + (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1), + sizeof(double)); + ctx->object = out->args.vc.pattern; + ctx->objmask = NULL; + return len; + case ACTIONS: + out->args.vc.actions = + (void *)RTE_ALIGN_CEIL((uintptr_t) + (out->args.vc.pattern + + out->args.vc.pattern_n), + sizeof(double)); + ctx->object = out->args.vc.actions; + ctx->objmask = NULL; + return len; + default: + if (!token->priv) + return -1; + break; + } + if (!out->args.vc.actions) { + const struct parse_item_priv *priv = token->priv; + struct rte_flow_item *item = + out->args.vc.pattern + out->args.vc.pattern_n; + + data_size = priv->size * 3; /* spec, last, mask */ + data = (void *)RTE_ALIGN_FLOOR((uintptr_t) + (out->args.vc.data - data_size), + sizeof(double)); + if ((uint8_t *)item + sizeof(*item) > data) + return -1; + *item = (struct rte_flow_item){ + .type = priv->type, + }; + ++out->args.vc.pattern_n; + ctx->object = item; + ctx->objmask = NULL; + } else { + const struct parse_action_priv *priv = token->priv; + struct rte_flow_action *action = + out->args.vc.actions + out->args.vc.actions_n; + + data_size = priv->size; /* configuration */ + data = (void *)RTE_ALIGN_FLOOR((uintptr_t) + (out->args.vc.data - data_size), + sizeof(double)); + if ((uint8_t *)action + sizeof(*action) > data) + return -1; + *action = (struct rte_flow_action){ + .type = priv->type, + .conf = data_size ? data : NULL, + }; + ++out->args.vc.actions_n; + ctx->object = action; + ctx->objmask = NULL; + } + memset(data, 0, data_size); + out->args.vc.data = data; + ctx->objdata = data_size; + return len; +} + +/** Parse pattern item parameter type. */ +static int +parse_vc_spec(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_item *item; + uint32_t data_size; + int index; + int objmask = 0; + + (void)size; + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Parse parameter types. */ + switch (ctx->curr) { + static const enum index prefix[] = NEXT_ENTRY(PREFIX); + + case ITEM_PARAM_IS: + index = 0; + objmask = 1; + break; + case ITEM_PARAM_SPEC: + index = 0; + break; + case ITEM_PARAM_LAST: + index = 1; + break; + case ITEM_PARAM_PREFIX: + /* Modify next token to expect a prefix. */ + if (ctx->next_num < 2) + return -1; + ctx->next[ctx->next_num - 2] = prefix; + /* Fall through. */ + case ITEM_PARAM_MASK: + index = 2; + break; + default: + return -1; + } + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + if (!out->args.vc.pattern_n) + return -1; + item = &out->args.vc.pattern[out->args.vc.pattern_n - 1]; + data_size = ctx->objdata / 3; /* spec, last, mask */ + /* Point to selected object. */ + ctx->object = out->args.vc.data + (data_size * index); + if (objmask) { + ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */ + item->mask = ctx->objmask; + } else + ctx->objmask = NULL; + /* Update relevant item pointer. */ + *((const void **[]){ &item->spec, &item->last, &item->mask })[index] = + ctx->object; + return len; +} + +/** Parse action configuration field. */ +static int +parse_vc_conf(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + (void)size; + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + return len; +} + +/** Parse RSS action. */ +static int +parse_vc_action_rss(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_rss_data *action_rss_data; + unsigned int i; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Set up default configuration. */ + action_rss_data = ctx->object; + *action_rss_data = (struct action_rss_data){ + .conf = (struct rte_flow_action_rss){ + .func = RTE_ETH_HASH_FUNCTION_DEFAULT, + .level = 0, + .types = rss_hf, + .key_len = sizeof(action_rss_data->key), + .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM), + .key = action_rss_data->key, + .queue = action_rss_data->queue, + }, + .key = "testpmd's default RSS hash key, " + "override it for better balancing", + .queue = { 0 }, + }; + for (i = 0; i < action_rss_data->conf.queue_num; ++i) + action_rss_data->queue[i] = i; + if (!port_id_is_invalid(ctx->port, DISABLED_WARN) && + ctx->port != (portid_t)RTE_PORT_ALL) { + struct rte_eth_dev_info info; + int ret2; + + ret2 = rte_eth_dev_info_get(ctx->port, &info); + if (ret2 != 0) + return ret2; + + action_rss_data->conf.key_len = + RTE_MIN(sizeof(action_rss_data->key), + info.hash_key_size); + } + action->conf = &action_rss_data->conf; + return ret; +} + +/** + * Parse func field for RSS action. + * + * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the + * ACTION_RSS_FUNC_* index that called this function. + */ +static int +parse_vc_action_rss_func(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct action_rss_data *action_rss_data; + enum rte_eth_hash_function func; + + (void)buf; + (void)size; + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + switch (ctx->curr) { + case ACTION_RSS_FUNC_DEFAULT: + func = RTE_ETH_HASH_FUNCTION_DEFAULT; + break; + case ACTION_RSS_FUNC_TOEPLITZ: + func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; + break; + case ACTION_RSS_FUNC_SIMPLE_XOR: + func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; + break; + case ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ: + func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ; + break; + default: + return -1; + } + if (!ctx->object) + return len; + action_rss_data = ctx->object; + action_rss_data->conf.func = func; + return len; +} + +/** + * Parse type field for RSS action. + * + * Valid tokens are type field names and the "end" token. + */ +static int +parse_vc_action_rss_type(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE); + struct action_rss_data *action_rss_data; + unsigned int i; + + (void)token; + (void)buf; + (void)size; + if (ctx->curr != ACTION_RSS_TYPE) + return -1; + if (!(ctx->objdata >> 16) && ctx->object) { + action_rss_data = ctx->object; + action_rss_data->conf.types = 0; + } + if (!strcmp_partial("end", str, len)) { + ctx->objdata &= 0xffff; + return len; + } + for (i = 0; rss_type_table[i].str; ++i) + if (!strcmp_partial(rss_type_table[i].str, str, len)) + break; + if (!rss_type_table[i].str) + return -1; + ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff); + /* Repeat token. */ + if (ctx->next_num == RTE_DIM(ctx->next)) + return -1; + ctx->next[ctx->next_num++] = next; + if (!ctx->object) + return len; + action_rss_data = ctx->object; + action_rss_data->conf.types |= rss_type_table[i].rss_type; + return len; +} + +/** + * Parse queue field for RSS action. + * + * Valid tokens are queue indices and the "end" token. + */ +static int +parse_vc_action_rss_queue(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE); + struct action_rss_data *action_rss_data; + const struct arg *arg; + int ret; + int i; + + (void)token; + (void)buf; + (void)size; + if (ctx->curr != ACTION_RSS_QUEUE) + return -1; + i = ctx->objdata >> 16; + if (!strcmp_partial("end", str, len)) { + ctx->objdata &= 0xffff; + goto end; + } + if (i >= ACTION_RSS_QUEUE_NUM) + return -1; + arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) + + i * sizeof(action_rss_data->queue[i]), + sizeof(action_rss_data->queue[i])); + if (push_args(ctx, arg)) + return -1; + ret = parse_int(ctx, token, str, len, NULL, 0); + if (ret < 0) { + pop_args(ctx); + return -1; + } + ++i; + ctx->objdata = i << 16 | (ctx->objdata & 0xffff); + /* Repeat token. */ + if (ctx->next_num == RTE_DIM(ctx->next)) + return -1; + ctx->next[ctx->next_num++] = next; +end: + if (!ctx->object) + return len; + action_rss_data = ctx->object; + action_rss_data->conf.queue_num = i; + action_rss_data->conf.queue = i ? action_rss_data->queue : NULL; + return len; +} + +/** Parse VXLAN encap action. */ +static int +parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_vxlan_encap_data *action_vxlan_encap_data; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Set up default configuration. */ + action_vxlan_encap_data = ctx->object; + *action_vxlan_encap_data = (struct action_vxlan_encap_data){ + .conf = (struct rte_flow_action_vxlan_encap){ + .definition = action_vxlan_encap_data->items, + }, + .items = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = &action_vxlan_encap_data->item_eth, + .mask = &rte_flow_item_eth_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_VLAN, + .spec = &action_vxlan_encap_data->item_vlan, + .mask = &rte_flow_item_vlan_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .spec = &action_vxlan_encap_data->item_ipv4, + .mask = &rte_flow_item_ipv4_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .spec = &action_vxlan_encap_data->item_udp, + .mask = &rte_flow_item_udp_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_VXLAN, + .spec = &action_vxlan_encap_data->item_vxlan, + .mask = &rte_flow_item_vxlan_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }, + .item_eth.type = 0, + .item_vlan = { + .tci = vxlan_encap_conf.vlan_tci, + .inner_type = 0, + }, + .item_ipv4.hdr = { + .src_addr = vxlan_encap_conf.ipv4_src, + .dst_addr = vxlan_encap_conf.ipv4_dst, + }, + .item_udp.hdr = { + .src_port = vxlan_encap_conf.udp_src, + .dst_port = vxlan_encap_conf.udp_dst, + }, + .item_vxlan.flags = 0, + }; + memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes, + vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes, + vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); + if (!vxlan_encap_conf.select_ipv4) { + memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr, + &vxlan_encap_conf.ipv6_src, + sizeof(vxlan_encap_conf.ipv6_src)); + memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr, + &vxlan_encap_conf.ipv6_dst, + sizeof(vxlan_encap_conf.ipv6_dst)); + action_vxlan_encap_data->items[2] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .spec = &action_vxlan_encap_data->item_ipv6, + .mask = &rte_flow_item_ipv6_mask, + }; + } + if (!vxlan_encap_conf.select_vlan) + action_vxlan_encap_data->items[1].type = + RTE_FLOW_ITEM_TYPE_VOID; + if (vxlan_encap_conf.select_tos_ttl) { + if (vxlan_encap_conf.select_ipv4) { + static struct rte_flow_item_ipv4 ipv4_mask_tos; + + memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask, + sizeof(ipv4_mask_tos)); + ipv4_mask_tos.hdr.type_of_service = 0xff; + ipv4_mask_tos.hdr.time_to_live = 0xff; + action_vxlan_encap_data->item_ipv4.hdr.type_of_service = + vxlan_encap_conf.ip_tos; + action_vxlan_encap_data->item_ipv4.hdr.time_to_live = + vxlan_encap_conf.ip_ttl; + action_vxlan_encap_data->items[2].mask = + &ipv4_mask_tos; + } else { + static struct rte_flow_item_ipv6 ipv6_mask_tos; + + memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask, + sizeof(ipv6_mask_tos)); + ipv6_mask_tos.hdr.vtc_flow |= + RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT); + ipv6_mask_tos.hdr.hop_limits = 0xff; + action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |= + rte_cpu_to_be_32 + ((uint32_t)vxlan_encap_conf.ip_tos << + RTE_IPV6_HDR_TC_SHIFT); + action_vxlan_encap_data->item_ipv6.hdr.hop_limits = + vxlan_encap_conf.ip_ttl; + action_vxlan_encap_data->items[2].mask = + &ipv6_mask_tos; + } + } + memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni, + RTE_DIM(vxlan_encap_conf.vni)); + action->conf = &action_vxlan_encap_data->conf; + return ret; +} + +/** Parse NVGRE encap action. */ +static int +parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_nvgre_encap_data *action_nvgre_encap_data; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Set up default configuration. */ + action_nvgre_encap_data = ctx->object; + *action_nvgre_encap_data = (struct action_nvgre_encap_data){ + .conf = (struct rte_flow_action_nvgre_encap){ + .definition = action_nvgre_encap_data->items, + }, + .items = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = &action_nvgre_encap_data->item_eth, + .mask = &rte_flow_item_eth_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_VLAN, + .spec = &action_nvgre_encap_data->item_vlan, + .mask = &rte_flow_item_vlan_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .spec = &action_nvgre_encap_data->item_ipv4, + .mask = &rte_flow_item_ipv4_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_NVGRE, + .spec = &action_nvgre_encap_data->item_nvgre, + .mask = &rte_flow_item_nvgre_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }, + .item_eth.type = 0, + .item_vlan = { + .tci = nvgre_encap_conf.vlan_tci, + .inner_type = 0, + }, + .item_ipv4.hdr = { + .src_addr = nvgre_encap_conf.ipv4_src, + .dst_addr = nvgre_encap_conf.ipv4_dst, + }, + .item_nvgre.flow_id = 0, + }; + memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes, + nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes, + nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); + if (!nvgre_encap_conf.select_ipv4) { + memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr, + &nvgre_encap_conf.ipv6_src, + sizeof(nvgre_encap_conf.ipv6_src)); + memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr, + &nvgre_encap_conf.ipv6_dst, + sizeof(nvgre_encap_conf.ipv6_dst)); + action_nvgre_encap_data->items[2] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .spec = &action_nvgre_encap_data->item_ipv6, + .mask = &rte_flow_item_ipv6_mask, + }; + } + if (!nvgre_encap_conf.select_vlan) + action_nvgre_encap_data->items[1].type = + RTE_FLOW_ITEM_TYPE_VOID; + memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni, + RTE_DIM(nvgre_encap_conf.tni)); + action->conf = &action_nvgre_encap_data->conf; + return ret; +} + +/** Parse l2 encap action. */ +static int +parse_vc_action_l2_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_encap_data *action_encap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = { + .tci = mplsoudp_encap_conf.vlan_tci, + .inner_type = 0, + }; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_encap_data = ctx->object; + *action_encap_data = (struct action_raw_encap_data) { + .conf = (struct rte_flow_action_raw_encap){ + .data = action_encap_data->data, + }, + .data = {}, + }; + header = action_encap_data->data; + if (l2_encap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); + else if (l2_encap_conf.select_ipv4) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + memcpy(eth.dst.addr_bytes, + l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (l2_encap_conf.select_vlan) { + if (l2_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + action_encap_data->conf.size = header - + action_encap_data->data; + action->conf = &action_encap_data->conf; + return ret; +} + +/** Parse l2 decap action. */ +static int +parse_vc_action_l2_decap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_decap_data *action_decap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = { + .tci = mplsoudp_encap_conf.vlan_tci, + .inner_type = 0, + }; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_decap_data = ctx->object; + *action_decap_data = (struct action_raw_decap_data) { + .conf = (struct rte_flow_action_raw_decap){ + .data = action_decap_data->data, + }, + .data = {}, + }; + header = action_decap_data->data; + if (l2_decap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (l2_decap_conf.select_vlan) { + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + action_decap_data->conf.size = header - + action_decap_data->data; + action->conf = &action_decap_data->conf; + return ret; +} + +#define ETHER_TYPE_MPLS_UNICAST 0x8847 + +/** Parse MPLSOGRE encap action. */ +static int +parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_encap_data *action_encap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = { + .tci = mplsogre_encap_conf.vlan_tci, + .inner_type = 0, + }; + struct rte_flow_item_ipv4 ipv4 = { + .hdr = { + .src_addr = mplsogre_encap_conf.ipv4_src, + .dst_addr = mplsogre_encap_conf.ipv4_dst, + .next_proto_id = IPPROTO_GRE, + .version_ihl = RTE_IPV4_VHL_DEF, + .time_to_live = IPDEFTTL, + }, + }; + struct rte_flow_item_ipv6 ipv6 = { + .hdr = { + .proto = IPPROTO_GRE, + .hop_limits = IPDEFTTL, + }, + }; + struct rte_flow_item_gre gre = { + .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST), + }; + struct rte_flow_item_mpls mpls = { + .ttl = 0, + }; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_encap_data = ctx->object; + *action_encap_data = (struct action_raw_encap_data) { + .conf = (struct rte_flow_action_raw_encap){ + .data = action_encap_data->data, + }, + .data = {}, + .preserve = {}, + }; + header = action_encap_data->data; + if (mplsogre_encap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); + else if (mplsogre_encap_conf.select_ipv4) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + memcpy(eth.dst.addr_bytes, + mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (mplsogre_encap_conf.select_vlan) { + if (mplsogre_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + if (mplsogre_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); + header += sizeof(ipv4); + } else { + memcpy(&ipv6.hdr.src_addr, + &mplsogre_encap_conf.ipv6_src, + sizeof(mplsogre_encap_conf.ipv6_src)); + memcpy(&ipv6.hdr.dst_addr, + &mplsogre_encap_conf.ipv6_dst, + sizeof(mplsogre_encap_conf.ipv6_dst)); + memcpy(header, &ipv6, sizeof(ipv6)); + header += sizeof(ipv6); + } + memcpy(header, &gre, sizeof(gre)); + header += sizeof(gre); + memcpy(mpls.label_tc_s, mplsogre_encap_conf.label, + RTE_DIM(mplsogre_encap_conf.label)); + mpls.label_tc_s[2] |= 0x1; + memcpy(header, &mpls, sizeof(mpls)); + header += sizeof(mpls); + action_encap_data->conf.size = header - + action_encap_data->data; + action->conf = &action_encap_data->conf; + return ret; +} + +/** Parse MPLSOGRE decap action. */ +static int +parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_decap_data *action_decap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = {.tci = 0}; + struct rte_flow_item_ipv4 ipv4 = { + .hdr = { + .next_proto_id = IPPROTO_GRE, + }, + }; + struct rte_flow_item_ipv6 ipv6 = { + .hdr = { + .proto = IPPROTO_GRE, + }, + }; + struct rte_flow_item_gre gre = { + .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST), + }; + struct rte_flow_item_mpls mpls; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_decap_data = ctx->object; + *action_decap_data = (struct action_raw_decap_data) { + .conf = (struct rte_flow_action_raw_decap){ + .data = action_decap_data->data, + }, + .data = {}, + }; + header = action_decap_data->data; + if (mplsogre_decap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); + else if (mplsogre_encap_conf.select_ipv4) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + memcpy(eth.dst.addr_bytes, + mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (mplsogre_encap_conf.select_vlan) { + if (mplsogre_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + if (mplsogre_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); + header += sizeof(ipv4); + } else { + memcpy(header, &ipv6, sizeof(ipv6)); + header += sizeof(ipv6); + } + memcpy(header, &gre, sizeof(gre)); + header += sizeof(gre); + memset(&mpls, 0, sizeof(mpls)); + memcpy(header, &mpls, sizeof(mpls)); + header += sizeof(mpls); + action_decap_data->conf.size = header - + action_decap_data->data; + action->conf = &action_decap_data->conf; + return ret; +} + +/** Parse MPLSOUDP encap action. */ +static int +parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_encap_data *action_encap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = { + .tci = mplsoudp_encap_conf.vlan_tci, + .inner_type = 0, + }; + struct rte_flow_item_ipv4 ipv4 = { + .hdr = { + .src_addr = mplsoudp_encap_conf.ipv4_src, + .dst_addr = mplsoudp_encap_conf.ipv4_dst, + .next_proto_id = IPPROTO_UDP, + .version_ihl = RTE_IPV4_VHL_DEF, + .time_to_live = IPDEFTTL, + }, + }; + struct rte_flow_item_ipv6 ipv6 = { + .hdr = { + .proto = IPPROTO_UDP, + .hop_limits = IPDEFTTL, + }, + }; + struct rte_flow_item_udp udp = { + .hdr = { + .src_port = mplsoudp_encap_conf.udp_src, + .dst_port = mplsoudp_encap_conf.udp_dst, + }, + }; + struct rte_flow_item_mpls mpls; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_encap_data = ctx->object; + *action_encap_data = (struct action_raw_encap_data) { + .conf = (struct rte_flow_action_raw_encap){ + .data = action_encap_data->data, + }, + .data = {}, + .preserve = {}, + }; + header = action_encap_data->data; + if (mplsoudp_encap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); + else if (mplsoudp_encap_conf.select_ipv4) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + memcpy(eth.dst.addr_bytes, + mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (mplsoudp_encap_conf.select_vlan) { + if (mplsoudp_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + if (mplsoudp_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); + header += sizeof(ipv4); + } else { + memcpy(&ipv6.hdr.src_addr, + &mplsoudp_encap_conf.ipv6_src, + sizeof(mplsoudp_encap_conf.ipv6_src)); + memcpy(&ipv6.hdr.dst_addr, + &mplsoudp_encap_conf.ipv6_dst, + sizeof(mplsoudp_encap_conf.ipv6_dst)); + memcpy(header, &ipv6, sizeof(ipv6)); + header += sizeof(ipv6); + } + memcpy(header, &udp, sizeof(udp)); + header += sizeof(udp); + memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label, + RTE_DIM(mplsoudp_encap_conf.label)); + mpls.label_tc_s[2] |= 0x1; + memcpy(header, &mpls, sizeof(mpls)); + header += sizeof(mpls); + action_encap_data->conf.size = header - + action_encap_data->data; + action->conf = &action_encap_data->conf; + return ret; +} + +/** Parse MPLSOUDP decap action. */ +static int +parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_decap_data *action_decap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = {.tci = 0}; + struct rte_flow_item_ipv4 ipv4 = { + .hdr = { + .next_proto_id = IPPROTO_UDP, + }, + }; + struct rte_flow_item_ipv6 ipv6 = { + .hdr = { + .proto = IPPROTO_UDP, + }, + }; + struct rte_flow_item_udp udp = { + .hdr = { + .dst_port = rte_cpu_to_be_16(6635), + }, + }; + struct rte_flow_item_mpls mpls; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_decap_data = ctx->object; + *action_decap_data = (struct action_raw_decap_data) { + .conf = (struct rte_flow_action_raw_decap){ + .data = action_decap_data->data, + }, + .data = {}, + }; + header = action_decap_data->data; + if (mplsoudp_decap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); + else if (mplsoudp_encap_conf.select_ipv4) + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + memcpy(eth.dst.addr_bytes, + mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (mplsoudp_encap_conf.select_vlan) { + if (mplsoudp_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + else + vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6); + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + if (mplsoudp_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); + header += sizeof(ipv4); + } else { + memcpy(header, &ipv6, sizeof(ipv6)); + header += sizeof(ipv6); + } + memcpy(header, &udp, sizeof(udp)); + header += sizeof(udp); + memset(&mpls, 0, sizeof(mpls)); + memcpy(header, &mpls, sizeof(mpls)); + header += sizeof(mpls); + action_decap_data->conf.size = header - + action_decap_data->data; + action->conf = &action_decap_data->conf; + return ret; +} + +static int +parse_vc_action_raw_decap_index(struct context *ctx, const struct token *token, + const char *str, unsigned int len, void *buf, + unsigned int size) +{ + struct action_raw_decap_data *action_raw_decap_data; + struct rte_flow_action *action; + const struct arg *arg; + struct buffer *out = buf; + int ret; + uint16_t idx; + + RTE_SET_USED(token); + RTE_SET_USED(buf); + RTE_SET_USED(size); + arg = ARGS_ENTRY_ARB_BOUNDED + (offsetof(struct action_raw_decap_data, idx), + sizeof(((struct action_raw_decap_data *)0)->idx), + 0, RAW_ENCAP_CONFS_MAX_NUM - 1); + if (push_args(ctx, arg)) + return -1; + ret = parse_int(ctx, token, str, len, NULL, 0); + if (ret < 0) { + pop_args(ctx); + return -1; + } + if (!ctx->object) + return len; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + action_raw_decap_data = ctx->object; + idx = action_raw_decap_data->idx; + action_raw_decap_data->conf.data = raw_decap_confs[idx].data; + action_raw_decap_data->conf.size = raw_decap_confs[idx].size; + action->conf = &action_raw_decap_data->conf; + return len; +} + + +static int +parse_vc_action_raw_encap_index(struct context *ctx, const struct token *token, + const char *str, unsigned int len, void *buf, + unsigned int size) +{ + struct action_raw_encap_data *action_raw_encap_data; + struct rte_flow_action *action; + const struct arg *arg; + struct buffer *out = buf; + int ret; + uint16_t idx; + + RTE_SET_USED(token); + RTE_SET_USED(buf); + RTE_SET_USED(size); + if (ctx->curr != ACTION_RAW_ENCAP_INDEX_VALUE) + return -1; + arg = ARGS_ENTRY_ARB_BOUNDED + (offsetof(struct action_raw_encap_data, idx), + sizeof(((struct action_raw_encap_data *)0)->idx), + 0, RAW_ENCAP_CONFS_MAX_NUM - 1); + if (push_args(ctx, arg)) + return -1; + ret = parse_int(ctx, token, str, len, NULL, 0); + if (ret < 0) { + pop_args(ctx); + return -1; + } + if (!ctx->object) + return len; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + action_raw_encap_data = ctx->object; + idx = action_raw_encap_data->idx; + action_raw_encap_data->conf.data = raw_encap_confs[idx].data; + action_raw_encap_data->conf.size = raw_encap_confs[idx].size; + action_raw_encap_data->conf.preserve = NULL; + action->conf = &action_raw_encap_data->conf; + return len; +} + +static int +parse_vc_action_raw_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, void *buf, + unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_encap_data *action_raw_encap_data = NULL; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_raw_encap_data = ctx->object; + action_raw_encap_data->conf.data = raw_encap_confs[0].data; + action_raw_encap_data->conf.preserve = NULL; + action_raw_encap_data->conf.size = raw_encap_confs[0].size; + action->conf = &action_raw_encap_data->conf; + return ret; +} + +static int +parse_vc_action_raw_decap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, void *buf, + unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_decap_data *action_raw_decap_data = NULL; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_raw_decap_data = ctx->object; + action_raw_decap_data->conf.data = raw_decap_confs[0].data; + action_raw_decap_data->conf.size = raw_decap_confs[0].size; + action->conf = &action_raw_decap_data->conf; + return ret; +} + +static int +parse_vc_action_set_meta(struct context *ctx, const struct token *token, + const char *str, unsigned int len, void *buf, + unsigned int size) +{ + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + ret = rte_flow_dynf_metadata_register(); + if (ret < 0) + return -1; + return len; +} + +/** Parse tokens for destroy command. */ +static int +parse_destroy(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + if (!out->command) { + if (ctx->curr != DESTROY) + return -1; + if (sizeof(*out) > size) + return -1; + out->command = ctx->curr; + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + out->args.destroy.rule = + (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1), + sizeof(double)); + return len; + } + if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) + + sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size) + return -1; + ctx->objdata = 0; + ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++; + ctx->objmask = NULL; + return len; +} + +/** Parse tokens for flush command. */ +static int +parse_flush(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + if (!out->command) { + if (ctx->curr != FLUSH) + return -1; + if (sizeof(*out) > size) + return -1; + out->command = ctx->curr; + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + } + return len; +} + +/** Parse tokens for dump command. */ +static int +parse_dump(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + if (!out->command) { + if (ctx->curr != DUMP) + return -1; + if (sizeof(*out) > size) + return -1; + out->command = ctx->curr; + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + } + return len; +} + +/** Parse tokens for query command. */ +static int +parse_query(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + if (!out->command) { + if (ctx->curr != QUERY) + return -1; + if (sizeof(*out) > size) + return -1; + out->command = ctx->curr; + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + } + return len; +} + +/** Parse action names. */ +static int +parse_action(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + const struct arg *arg = pop_args(ctx); + unsigned int i; + + (void)size; + /* Argument is expected. */ + if (!arg) + return -1; + /* Parse action name. */ + for (i = 0; next_action[i]; ++i) { + const struct parse_action_priv *priv; + + token = &token_list[next_action[i]]; + if (strcmp_partial(token->name, str, len)) + continue; + priv = token->priv; + if (!priv) + goto error; + if (out) + memcpy((uint8_t *)ctx->object + arg->offset, + &priv->type, + arg->size); + return len; + } +error: + push_args(ctx, arg); + return -1; +} + +/** Parse tokens for list command. */ +static int +parse_list(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + if (!out->command) { + if (ctx->curr != LIST) + return -1; + if (sizeof(*out) > size) + return -1; + out->command = ctx->curr; + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + out->args.list.group = + (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1), + sizeof(double)); + return len; + } + if (((uint8_t *)(out->args.list.group + out->args.list.group_n) + + sizeof(*out->args.list.group)) > (uint8_t *)out + size) + return -1; + ctx->objdata = 0; + ctx->object = out->args.list.group + out->args.list.group_n++; + ctx->objmask = NULL; + return len; +} + +/** Parse tokens for list all aged flows command. */ +static int +parse_aged(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + if (!out->command) { + if (ctx->curr != AGED) + return -1; + if (sizeof(*out) > size) + return -1; + out->command = ctx->curr; + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + } + if (ctx->curr == AGED_DESTROY) + out->args.aged.destroy = 1; + return len; +} + +/** Parse tokens for isolate command. */ +static int +parse_isolate(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + if (!out->command) { + if (ctx->curr != ISOLATE) + return -1; + if (sizeof(*out) > size) + return -1; + out->command = ctx->curr; + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + } + return len; +} + +/** + * Parse signed/unsigned integers 8 to 64-bit long. + * + * Last argument (ctx->args) is retrieved to determine integer type and + * storage location. + */ +static int +parse_int(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + const struct arg *arg = pop_args(ctx); + uintmax_t u; + char *end; + + (void)token; + /* Argument is expected. */ + if (!arg) + return -1; + errno = 0; + u = arg->sign ? + (uintmax_t)strtoimax(str, &end, 0) : + strtoumax(str, &end, 0); + if (errno || (size_t)(end - str) != len) + goto error; + if (arg->bounded && + ((arg->sign && ((intmax_t)u < (intmax_t)arg->min || + (intmax_t)u > (intmax_t)arg->max)) || + (!arg->sign && (u < arg->min || u > arg->max)))) + goto error; + if (!ctx->object) + return len; + if (arg->mask) { + if (!arg_entry_bf_fill(ctx->object, u, arg) || + !arg_entry_bf_fill(ctx->objmask, -1, arg)) + goto error; + return len; + } + buf = (uint8_t *)ctx->object + arg->offset; + size = arg->size; + if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t)) + return -1; +objmask: + switch (size) { + case sizeof(uint8_t): + *(uint8_t *)buf = u; + break; + case sizeof(uint16_t): + *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u; + break; + case sizeof(uint8_t [3]): +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + if (!arg->hton) { + ((uint8_t *)buf)[0] = u; + ((uint8_t *)buf)[1] = u >> 8; + ((uint8_t *)buf)[2] = u >> 16; + break; + } +#endif + ((uint8_t *)buf)[0] = u >> 16; + ((uint8_t *)buf)[1] = u >> 8; + ((uint8_t *)buf)[2] = u; + break; + case sizeof(uint32_t): + *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u; + break; + case sizeof(uint64_t): + *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u; + break; + default: + goto error; + } + if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) { + u = -1; + buf = (uint8_t *)ctx->objmask + arg->offset; + goto objmask; + } + return len; +error: + push_args(ctx, arg); + return -1; +} + +/** + * Parse a string. + * + * Three arguments (ctx->args) are retrieved from the stack to store data, + * its actual length and address (in that order). + */ +static int +parse_string(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + const struct arg *arg_data = pop_args(ctx); + const struct arg *arg_len = pop_args(ctx); + const struct arg *arg_addr = pop_args(ctx); + char tmp[16]; /* Ought to be enough. */ + int ret; + + /* Arguments are expected. */ + if (!arg_data) + return -1; + if (!arg_len) { + push_args(ctx, arg_data); + return -1; + } + if (!arg_addr) { + push_args(ctx, arg_len); + push_args(ctx, arg_data); + return -1; + } + size = arg_data->size; + /* Bit-mask fill is not supported. */ + if (arg_data->mask || size < len) + goto error; + if (!ctx->object) + return len; + /* Let parse_int() fill length information first. */ + ret = snprintf(tmp, sizeof(tmp), "%u", len); + if (ret < 0) + goto error; + push_args(ctx, arg_len); + ret = parse_int(ctx, token, tmp, ret, NULL, 0); + if (ret < 0) { + pop_args(ctx); + goto error; + } + buf = (uint8_t *)ctx->object + arg_data->offset; + /* Output buffer is not necessarily NUL-terminated. */ + memcpy(buf, str, len); + memset((uint8_t *)buf + len, 0x00, size - len); + if (ctx->objmask) + memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len); + /* Save address if requested. */ + if (arg_addr->size) { + memcpy((uint8_t *)ctx->object + arg_addr->offset, + (void *[]){ + (uint8_t *)ctx->object + arg_data->offset + }, + arg_addr->size); + if (ctx->objmask) + memcpy((uint8_t *)ctx->objmask + arg_addr->offset, + (void *[]){ + (uint8_t *)ctx->objmask + arg_data->offset + }, + arg_addr->size); + } + return len; +error: + push_args(ctx, arg_addr); + push_args(ctx, arg_len); + push_args(ctx, arg_data); + return -1; +} + +static int +parse_hex_string(const char *src, uint8_t *dst, uint32_t *size) +{ + char *c = NULL; + uint32_t i, len; + char tmp[3]; + + /* Check input parameters */ + if ((src == NULL) || + (dst == NULL) || + (size == NULL) || + (*size == 0)) + return -1; + + /* Convert chars to bytes */ + for (i = 0, len = 0; i < *size; i += 2) { + snprintf(tmp, 3, "%s", src + i); + dst[len++] = strtoul(tmp, &c, 16); + if (*c != 0) { + len--; + dst[len] = 0; + *size = len; + return -1; + } + } + dst[len] = 0; + *size = len; + + return 0; +} + +static int +parse_hex(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + const struct arg *arg_data = pop_args(ctx); + const struct arg *arg_len = pop_args(ctx); + const struct arg *arg_addr = pop_args(ctx); + char tmp[16]; /* Ought to be enough. */ + int ret; + unsigned int hexlen = len; + unsigned int length = 256; + uint8_t hex_tmp[length]; + + /* Arguments are expected. */ + if (!arg_data) + return -1; + if (!arg_len) { + push_args(ctx, arg_data); + return -1; + } + if (!arg_addr) { + push_args(ctx, arg_len); + push_args(ctx, arg_data); + return -1; + } + size = arg_data->size; + /* Bit-mask fill is not supported. */ + if (arg_data->mask) + goto error; + if (!ctx->object) + return len; + + /* translate bytes string to array. */ + if (str[0] == '0' && ((str[1] == 'x') || + (str[1] == 'X'))) { + str += 2; + hexlen -= 2; + } + if (hexlen > length) + return -1; + ret = parse_hex_string(str, hex_tmp, &hexlen); + if (ret < 0) + goto error; + /* Let parse_int() fill length information first. */ + ret = snprintf(tmp, sizeof(tmp), "%u", hexlen); + if (ret < 0) + goto error; + push_args(ctx, arg_len); + ret = parse_int(ctx, token, tmp, ret, NULL, 0); + if (ret < 0) { + pop_args(ctx); + goto error; + } + buf = (uint8_t *)ctx->object + arg_data->offset; + /* Output buffer is not necessarily NUL-terminated. */ + memcpy(buf, hex_tmp, hexlen); + memset((uint8_t *)buf + hexlen, 0x00, size - hexlen); + if (ctx->objmask) + memset((uint8_t *)ctx->objmask + arg_data->offset, + 0xff, hexlen); + /* Save address if requested. */ + if (arg_addr->size) { + memcpy((uint8_t *)ctx->object + arg_addr->offset, + (void *[]){ + (uint8_t *)ctx->object + arg_data->offset + }, + arg_addr->size); + if (ctx->objmask) + memcpy((uint8_t *)ctx->objmask + arg_addr->offset, + (void *[]){ + (uint8_t *)ctx->objmask + arg_data->offset + }, + arg_addr->size); + } + return len; +error: + push_args(ctx, arg_addr); + push_args(ctx, arg_len); + push_args(ctx, arg_data); + return -1; + +} + +/** + * Parse a zero-ended string. + */ +static int +parse_string0(struct context *ctx, const struct token *token __rte_unused, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + const struct arg *arg_data = pop_args(ctx); + + /* Arguments are expected. */ + if (!arg_data) + return -1; + size = arg_data->size; + /* Bit-mask fill is not supported. */ + if (arg_data->mask || size < len + 1) + goto error; + if (!ctx->object) + return len; + buf = (uint8_t *)ctx->object + arg_data->offset; + strncpy(buf, str, len); + if (ctx->objmask) + memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len); + return len; +error: + push_args(ctx, arg_data); + return -1; +} + +/** + * Parse a MAC address. + * + * Last argument (ctx->args) is retrieved to determine storage size and + * location. + */ +static int +parse_mac_addr(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + const struct arg *arg = pop_args(ctx); + struct rte_ether_addr tmp; + int ret; + + (void)token; + /* Argument is expected. */ + if (!arg) + return -1; + size = arg->size; + /* Bit-mask fill is not supported. */ + if (arg->mask || size != sizeof(tmp)) + goto error; + /* Only network endian is supported. */ + if (!arg->hton) + goto error; + ret = cmdline_parse_etheraddr(NULL, str, &tmp, size); + if (ret < 0 || (unsigned int)ret != len) + goto error; + if (!ctx->object) + return len; + buf = (uint8_t *)ctx->object + arg->offset; + memcpy(buf, &tmp, size); + if (ctx->objmask) + memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size); + return len; +error: + push_args(ctx, arg); + return -1; +} + +/** + * Parse an IPv4 address. + * + * Last argument (ctx->args) is retrieved to determine storage size and + * location. + */ +static int +parse_ipv4_addr(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + const struct arg *arg = pop_args(ctx); + char str2[len + 1]; + struct in_addr tmp; + int ret; + + /* Argument is expected. */ + if (!arg) + return -1; + size = arg->size; + /* Bit-mask fill is not supported. */ + if (arg->mask || size != sizeof(tmp)) + goto error; + /* Only network endian is supported. */ + if (!arg->hton) + goto error; + memcpy(str2, str, len); + str2[len] = '\0'; + ret = inet_pton(AF_INET, str2, &tmp); + if (ret != 1) { + /* Attempt integer parsing. */ + push_args(ctx, arg); + return parse_int(ctx, token, str, len, buf, size); + } + if (!ctx->object) + return len; + buf = (uint8_t *)ctx->object + arg->offset; + memcpy(buf, &tmp, size); + if (ctx->objmask) + memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size); + return len; +error: + push_args(ctx, arg); + return -1; +} + +/** + * Parse an IPv6 address. + * + * Last argument (ctx->args) is retrieved to determine storage size and + * location. + */ +static int +parse_ipv6_addr(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + const struct arg *arg = pop_args(ctx); + char str2[len + 1]; + struct in6_addr tmp; + int ret; + + (void)token; + /* Argument is expected. */ + if (!arg) + return -1; + size = arg->size; + /* Bit-mask fill is not supported. */ + if (arg->mask || size != sizeof(tmp)) + goto error; + /* Only network endian is supported. */ + if (!arg->hton) + goto error; + memcpy(str2, str, len); + str2[len] = '\0'; + ret = inet_pton(AF_INET6, str2, &tmp); + if (ret != 1) + goto error; + if (!ctx->object) + return len; + buf = (uint8_t *)ctx->object + arg->offset; + memcpy(buf, &tmp, size); + if (ctx->objmask) + memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size); + return len; +error: + push_args(ctx, arg); + return -1; +} + +/** Boolean values (even indices stand for false). */ +static const char *const boolean_name[] = { + "0", "1", + "false", "true", + "no", "yes", + "N", "Y", + "off", "on", + NULL, +}; + +/** + * Parse a boolean value. + * + * Last argument (ctx->args) is retrieved to determine storage size and + * location. + */ +static int +parse_boolean(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + const struct arg *arg = pop_args(ctx); + unsigned int i; + int ret; + + /* Argument is expected. */ + if (!arg) + return -1; + for (i = 0; boolean_name[i]; ++i) + if (!strcmp_partial(boolean_name[i], str, len)) + break; + /* Process token as integer. */ + if (boolean_name[i]) + str = i & 1 ? "1" : "0"; + push_args(ctx, arg); + ret = parse_int(ctx, token, str, strlen(str), buf, size); + return ret > 0 ? (int)len : ret; +} + +/** Parse port and update context. */ +static int +parse_port(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = &(struct buffer){ .port = 0 }; + int ret; + + if (buf) + out = buf; + else { + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + size = sizeof(*out); + } + ret = parse_int(ctx, token, str, len, out, size); + if (ret >= 0) + ctx->port = out->port; + if (!buf) + ctx->object = NULL; + return ret; +} + +/** Parse set command, initialize output buffer for subsequent tokens. */ +static int +parse_set_raw_encap_decap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + /* Make sure buffer is large enough. */ + if (size < sizeof(*out)) + return -1; + ctx->objdata = 0; + ctx->objmask = NULL; + ctx->object = out; + if (!out->command) + return -1; + out->command = ctx->curr; + return len; +} + +/** + * Parse set raw_encap/raw_decap command, + * initialize output buffer for subsequent tokens. + */ +static int +parse_set_init(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + + /* Token name must match. */ + if (parse_default(ctx, token, str, len, NULL, 0) < 0) + return -1; + /* Nothing else to do if there is no buffer. */ + if (!out) + return len; + /* Make sure buffer is large enough. */ + if (size < sizeof(*out)) + return -1; + /* Initialize buffer. */ + memset(out, 0x00, sizeof(*out)); + memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out)); + ctx->objdata = 0; + ctx->object = out; + ctx->objmask = NULL; + if (!out->command) { + if (ctx->curr != SET) + return -1; + if (sizeof(*out) > size) + return -1; + out->command = ctx->curr; + out->args.vc.data = (uint8_t *)out + size; + /* All we need is pattern */ + out->args.vc.pattern = + (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1), + sizeof(double)); + ctx->object = out->args.vc.pattern; + } + return len; +} + +/** No completion. */ +static int +comp_none(struct context *ctx, const struct token *token, + unsigned int ent, char *buf, unsigned int size) +{ + (void)ctx; + (void)token; + (void)ent; + (void)buf; + (void)size; + return 0; +} + +/** Complete boolean values. */ +static int +comp_boolean(struct context *ctx, const struct token *token, + unsigned int ent, char *buf, unsigned int size) +{ + unsigned int i; + + (void)ctx; + (void)token; + for (i = 0; boolean_name[i]; ++i) + if (buf && i == ent) + return strlcpy(buf, boolean_name[i], size); + if (buf) + return -1; + return i; +} + +/** Complete action names. */ +static int +comp_action(struct context *ctx, const struct token *token, + unsigned int ent, char *buf, unsigned int size) +{ + unsigned int i; + + (void)ctx; + (void)token; + for (i = 0; next_action[i]; ++i) + if (buf && i == ent) + return strlcpy(buf, token_list[next_action[i]].name, + size); + if (buf) + return -1; + return i; +} + +/** Complete available ports. */ +static int +comp_port(struct context *ctx, const struct token *token, + unsigned int ent, char *buf, unsigned int size) +{ + unsigned int i = 0; + portid_t p; + + (void)ctx; + (void)token; + RTE_ETH_FOREACH_DEV(p) { + if (buf && i == ent) + return snprintf(buf, size, "%u", p); + ++i; + } + if (buf) + return -1; + return i; +} + +/** Complete available rule IDs. */ +static int +comp_rule_id(struct context *ctx, const struct token *token, + unsigned int ent, char *buf, unsigned int size) +{ + unsigned int i = 0; + struct rte_port *port; + struct port_flow *pf; + + (void)token; + if (port_id_is_invalid(ctx->port, DISABLED_WARN) || + ctx->port == (portid_t)RTE_PORT_ALL) + return -1; + port = &ports[ctx->port]; + for (pf = port->flow_list; pf != NULL; pf = pf->next) { + if (buf && i == ent) + return snprintf(buf, size, "%u", pf->id); + ++i; + } + if (buf) + return -1; + return i; +} + +/** Complete type field for RSS action. */ +static int +comp_vc_action_rss_type(struct context *ctx, const struct token *token, + unsigned int ent, char *buf, unsigned int size) +{ + unsigned int i; + + (void)ctx; + (void)token; + for (i = 0; rss_type_table[i].str; ++i) + ; + if (!buf) + return i + 1; + if (ent < i) + return strlcpy(buf, rss_type_table[ent].str, size); + if (ent == i) + return snprintf(buf, size, "end"); + return -1; +} + +/** Complete queue field for RSS action. */ +static int +comp_vc_action_rss_queue(struct context *ctx, const struct token *token, + unsigned int ent, char *buf, unsigned int size) +{ + (void)ctx; + (void)token; + if (!buf) + return nb_rxq + 1; + if (ent < nb_rxq) + return snprintf(buf, size, "%u", ent); + if (ent == nb_rxq) + return snprintf(buf, size, "end"); + return -1; +} + +/** Complete index number for set raw_encap/raw_decap commands. */ +static int +comp_set_raw_index(struct context *ctx, const struct token *token, + unsigned int ent, char *buf, unsigned int size) +{ + uint16_t idx = 0; + uint16_t nb = 0; + + RTE_SET_USED(ctx); + RTE_SET_USED(token); + for (idx = 0; idx < RAW_ENCAP_CONFS_MAX_NUM; ++idx) { + if (buf && idx == ent) + return snprintf(buf, size, "%u", idx); + ++nb; + } + return nb; +} + +/** Internal context. */ +static struct context cmd_flow_context; + +/** Global parser instance (cmdline API). */ +cmdline_parse_inst_t cmd_flow; +cmdline_parse_inst_t cmd_set_raw; + +/** Initialize context. */ +static void +cmd_flow_context_init(struct context *ctx) +{ + /* A full memset() is not necessary. */ + ctx->curr = ZERO; + ctx->prev = ZERO; + ctx->next_num = 0; + ctx->args_num = 0; + ctx->eol = 0; + ctx->last = 0; + ctx->port = 0; + ctx->objdata = 0; + ctx->object = NULL; + ctx->objmask = NULL; +} + +/** Parse a token (cmdline API). */ +static int +cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result, + unsigned int size) +{ + struct context *ctx = &cmd_flow_context; + const struct token *token; + const enum index *list; + int len; + int i; + + (void)hdr; + token = &token_list[ctx->curr]; + /* Check argument length. */ + ctx->eol = 0; + ctx->last = 1; + for (len = 0; src[len]; ++len) + if (src[len] == '#' || isspace(src[len])) + break; + if (!len) + return -1; + /* Last argument and EOL detection. */ + for (i = len; src[i]; ++i) + if (src[i] == '#' || src[i] == '\r' || src[i] == '\n') + break; + else if (!isspace(src[i])) { + ctx->last = 0; + break; + } + for (; src[i]; ++i) + if (src[i] == '\r' || src[i] == '\n') { + ctx->eol = 1; + break; + } + /* Initialize context if necessary. */ + if (!ctx->next_num) { + if (!token->next) + return 0; + ctx->next[ctx->next_num++] = token->next[0]; + } + /* Process argument through candidates. */ + ctx->prev = ctx->curr; + list = ctx->next[ctx->next_num - 1]; + for (i = 0; list[i]; ++i) { + const struct token *next = &token_list[list[i]]; + int tmp; + + ctx->curr = list[i]; + if (next->call) + tmp = next->call(ctx, next, src, len, result, size); + else + tmp = parse_default(ctx, next, src, len, result, size); + if (tmp == -1 || tmp != len) + continue; + token = next; + break; + } + if (!list[i]) + return -1; + --ctx->next_num; + /* Push subsequent tokens if any. */ + if (token->next) + for (i = 0; token->next[i]; ++i) { + if (ctx->next_num == RTE_DIM(ctx->next)) + return -1; + ctx->next[ctx->next_num++] = token->next[i]; + } + /* Push arguments if any. */ + if (token->args) + for (i = 0; token->args[i]; ++i) { + if (ctx->args_num == RTE_DIM(ctx->args)) + return -1; + ctx->args[ctx->args_num++] = token->args[i]; + } + return len; +} + +/** Return number of completion entries (cmdline API). */ +static int +cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr) +{ + struct context *ctx = &cmd_flow_context; + const struct token *token = &token_list[ctx->curr]; + const enum index *list; + int i; + + (void)hdr; + /* Count number of tokens in current list. */ + if (ctx->next_num) + list = ctx->next[ctx->next_num - 1]; + else + list = token->next[0]; + for (i = 0; list[i]; ++i) + ; + if (!i) + return 0; + /* + * If there is a single token, use its completion callback, otherwise + * return the number of entries. + */ + token = &token_list[list[0]]; + if (i == 1 && token->comp) { + /* Save index for cmd_flow_get_help(). */ + ctx->prev = list[0]; + return token->comp(ctx, token, 0, NULL, 0); + } + return i; +} + +/** Return a completion entry (cmdline API). */ +static int +cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index, + char *dst, unsigned int size) +{ + struct context *ctx = &cmd_flow_context; + const struct token *token = &token_list[ctx->curr]; + const enum index *list; + int i; + + (void)hdr; + /* Count number of tokens in current list. */ + if (ctx->next_num) + list = ctx->next[ctx->next_num - 1]; + else + list = token->next[0]; + for (i = 0; list[i]; ++i) + ; + if (!i) + return -1; + /* If there is a single token, use its completion callback. */ + token = &token_list[list[0]]; + if (i == 1 && token->comp) { + /* Save index for cmd_flow_get_help(). */ + ctx->prev = list[0]; + return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0; + } + /* Otherwise make sure the index is valid and use defaults. */ + if (index >= i) + return -1; + token = &token_list[list[index]]; + strlcpy(dst, token->name, size); + /* Save index for cmd_flow_get_help(). */ + ctx->prev = list[index]; + return 0; +} + +/** Populate help strings for current token (cmdline API). */ +static int +cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size) +{ + struct context *ctx = &cmd_flow_context; + const struct token *token = &token_list[ctx->prev]; + + (void)hdr; + if (!size) + return -1; + /* Set token type and update global help with details. */ + strlcpy(dst, (token->type ? token->type : "TOKEN"), size); + if (token->help) + cmd_flow.help_str = token->help; + else + cmd_flow.help_str = token->name; + return 0; +} + +/** Token definition template (cmdline API). */ +static struct cmdline_token_hdr cmd_flow_token_hdr = { + .ops = &(struct cmdline_token_ops){ + .parse = cmd_flow_parse, + .complete_get_nb = cmd_flow_complete_get_nb, + .complete_get_elt = cmd_flow_complete_get_elt, + .get_help = cmd_flow_get_help, + }, + .offset = 0, +}; + +/** Populate the next dynamic token. */ +static void +cmd_flow_tok(cmdline_parse_token_hdr_t **hdr, + cmdline_parse_token_hdr_t **hdr_inst) +{ + struct context *ctx = &cmd_flow_context; + + /* Always reinitialize context before requesting the first token. */ + if (!(hdr_inst - cmd_flow.tokens)) + cmd_flow_context_init(ctx); + /* Return NULL when no more tokens are expected. */ + if (!ctx->next_num && ctx->curr) { + *hdr = NULL; + return; + } + /* Determine if command should end here. */ + if (ctx->eol && ctx->last && ctx->next_num) { + const enum index *list = ctx->next[ctx->next_num - 1]; + int i; + + for (i = 0; list[i]; ++i) { + if (list[i] != END) + continue; + *hdr = NULL; + return; + } + } + *hdr = &cmd_flow_token_hdr; +} + +/** Dispatch parsed buffer to function calls. */ +static void +cmd_flow_parsed(const struct buffer *in) +{ + switch (in->command) { + case VALIDATE: + port_flow_validate(in->port, &in->args.vc.attr, + in->args.vc.pattern, in->args.vc.actions); + break; + case CREATE: + port_flow_create(in->port, &in->args.vc.attr, + in->args.vc.pattern, in->args.vc.actions); + break; + case DESTROY: + port_flow_destroy(in->port, in->args.destroy.rule_n, + in->args.destroy.rule); + break; + case FLUSH: + port_flow_flush(in->port); + break; + case DUMP: + port_flow_dump(in->port, in->args.dump.file); + break; + case QUERY: + port_flow_query(in->port, in->args.query.rule, + &in->args.query.action); + break; + case LIST: + port_flow_list(in->port, in->args.list.group_n, + in->args.list.group); + break; + case ISOLATE: + port_flow_isolate(in->port, in->args.isolate.set); + break; + case AGED: + port_flow_aged(in->port, in->args.aged.destroy); + break; + default: + break; + } +} + +/** Token generator and output processing callback (cmdline API). */ +static void +cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2) +{ + if (cl == NULL) + cmd_flow_tok(arg0, arg2); + else + cmd_flow_parsed(arg0); +} + +/** Global parser instance (cmdline API). */ +cmdline_parse_inst_t cmd_flow = { + .f = cmd_flow_cb, + .data = NULL, /**< Unused. */ + .help_str = NULL, /**< Updated by cmd_flow_get_help(). */ + .tokens = { + NULL, + }, /**< Tokens are returned by cmd_flow_tok(). */ +}; + +/** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */ + +static void +update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto) +{ + struct rte_flow_item_ipv4 *ipv4; + struct rte_flow_item_eth *eth; + struct rte_flow_item_ipv6 *ipv6; + struct rte_flow_item_vxlan *vxlan; + struct rte_flow_item_vxlan_gpe *gpe; + struct rte_flow_item_nvgre *nvgre; + uint32_t ipv6_vtc_flow; + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + eth = (struct rte_flow_item_eth *)buf; + if (next_proto) + eth->type = rte_cpu_to_be_16(next_proto); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + ipv4 = (struct rte_flow_item_ipv4 *)buf; + ipv4->hdr.version_ihl = 0x45; + if (next_proto && ipv4->hdr.next_proto_id == 0) + ipv4->hdr.next_proto_id = (uint8_t)next_proto; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + ipv6 = (struct rte_flow_item_ipv6 *)buf; + if (next_proto && ipv6->hdr.proto == 0) + ipv6->hdr.proto = (uint8_t)next_proto; + ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow); + ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */ + ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */ + ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + vxlan = (struct rte_flow_item_vxlan *)buf; + vxlan->flags = 0x08; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + gpe = (struct rte_flow_item_vxlan_gpe *)buf; + gpe->flags = 0x0C; + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + nvgre = (struct rte_flow_item_nvgre *)buf; + nvgre->protocol = rte_cpu_to_be_16(0x6558); + nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000); + break; + default: + break; + } +} + +/** Helper of get item's default mask. */ +static const void * +flow_item_default_mask(const struct rte_flow_item *item) +{ + const void *mask = NULL; + static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX); + + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ANY: + mask = &rte_flow_item_any_mask; + break; + case RTE_FLOW_ITEM_TYPE_VF: + mask = &rte_flow_item_vf_mask; + break; + case RTE_FLOW_ITEM_TYPE_PORT_ID: + mask = &rte_flow_item_port_id_mask; + break; + case RTE_FLOW_ITEM_TYPE_RAW: + mask = &rte_flow_item_raw_mask; + break; + case RTE_FLOW_ITEM_TYPE_ETH: + mask = &rte_flow_item_eth_mask; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + mask = &rte_flow_item_vlan_mask; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + mask = &rte_flow_item_ipv4_mask; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + mask = &rte_flow_item_ipv6_mask; + break; + case RTE_FLOW_ITEM_TYPE_ICMP: + mask = &rte_flow_item_icmp_mask; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + mask = &rte_flow_item_udp_mask; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + mask = &rte_flow_item_tcp_mask; + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + mask = &rte_flow_item_sctp_mask; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + mask = &rte_flow_item_vxlan_mask; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + mask = &rte_flow_item_vxlan_gpe_mask; + break; + case RTE_FLOW_ITEM_TYPE_E_TAG: + mask = &rte_flow_item_e_tag_mask; + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + mask = &rte_flow_item_nvgre_mask; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + mask = &rte_flow_item_mpls_mask; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + mask = &rte_flow_item_gre_mask; + break; + case RTE_FLOW_ITEM_TYPE_GRE_KEY: + mask = &gre_key_default_mask; + break; + case RTE_FLOW_ITEM_TYPE_META: + mask = &rte_flow_item_meta_mask; + break; + case RTE_FLOW_ITEM_TYPE_FUZZY: + mask = &rte_flow_item_fuzzy_mask; + break; + case RTE_FLOW_ITEM_TYPE_GTP: + mask = &rte_flow_item_gtp_mask; + break; + case RTE_FLOW_ITEM_TYPE_GTP_PSC: + mask = &rte_flow_item_gtp_psc_mask; + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + mask = &rte_flow_item_geneve_mask; + break; + case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID: + mask = &rte_flow_item_pppoe_proto_id_mask; + break; + case RTE_FLOW_ITEM_TYPE_L2TPV3OIP: + mask = &rte_flow_item_l2tpv3oip_mask; + break; + case RTE_FLOW_ITEM_TYPE_ESP: + mask = &rte_flow_item_esp_mask; + break; + case RTE_FLOW_ITEM_TYPE_AH: + mask = &rte_flow_item_ah_mask; + break; + case RTE_FLOW_ITEM_TYPE_PFCP: + mask = &rte_flow_item_pfcp_mask; + break; + default: + break; + } + return mask; +} + + + +/** Dispatch parsed buffer to function calls. */ +static void +cmd_set_raw_parsed(const struct buffer *in) +{ + uint32_t n = in->args.vc.pattern_n; + int i = 0; + struct rte_flow_item *item = NULL; + size_t size = 0; + uint8_t *data = NULL; + uint8_t *data_tail = NULL; + size_t *total_size = NULL; + uint16_t upper_layer = 0; + uint16_t proto = 0; + uint16_t idx = in->port; /* We borrow port field as index */ + + RTE_ASSERT(in->command == SET_RAW_ENCAP || + in->command == SET_RAW_DECAP); + if (in->command == SET_RAW_ENCAP) { + total_size = &raw_encap_confs[idx].size; + data = (uint8_t *)&raw_encap_confs[idx].data; + } else { + total_size = &raw_decap_confs[idx].size; + data = (uint8_t *)&raw_decap_confs[idx].data; + } + *total_size = 0; + memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA); + /* process hdr from upper layer to low layer (L3/L4 -> L2). */ + data_tail = data + ACTION_RAW_ENCAP_MAX_DATA; + for (i = n - 1 ; i >= 0; --i) { + item = in->args.vc.pattern + i; + if (item->spec == NULL) + item->spec = flow_item_default_mask(item); + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + size = sizeof(struct rte_flow_item_eth); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + size = sizeof(struct rte_flow_item_vlan); + proto = RTE_ETHER_TYPE_VLAN; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + size = sizeof(struct rte_flow_item_ipv4); + proto = RTE_ETHER_TYPE_IPV4; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + size = sizeof(struct rte_flow_item_ipv6); + proto = RTE_ETHER_TYPE_IPV6; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + size = sizeof(struct rte_flow_item_udp); + proto = 0x11; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + size = sizeof(struct rte_flow_item_tcp); + proto = 0x06; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + size = sizeof(struct rte_flow_item_vxlan); + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + size = sizeof(struct rte_flow_item_vxlan_gpe); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + size = sizeof(struct rte_flow_item_gre); + proto = 0x2F; + break; + case RTE_FLOW_ITEM_TYPE_GRE_KEY: + size = sizeof(rte_be32_t); + proto = 0x0; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + size = sizeof(struct rte_flow_item_mpls); + proto = 0x0; + break; + case RTE_FLOW_ITEM_TYPE_NVGRE: + size = sizeof(struct rte_flow_item_nvgre); + proto = 0x2F; + break; + case RTE_FLOW_ITEM_TYPE_GENEVE: + size = sizeof(struct rte_flow_item_geneve); + break; + case RTE_FLOW_ITEM_TYPE_L2TPV3OIP: + size = sizeof(struct rte_flow_item_l2tpv3oip); + proto = 0x73; + break; + case RTE_FLOW_ITEM_TYPE_ESP: + size = sizeof(struct rte_flow_item_esp); + proto = 0x32; + break; + case RTE_FLOW_ITEM_TYPE_AH: + size = sizeof(struct rte_flow_item_ah); + proto = 0x33; + break; + case RTE_FLOW_ITEM_TYPE_GTP: + size = sizeof(struct rte_flow_item_gtp); + break; + case RTE_FLOW_ITEM_TYPE_PFCP: + size = sizeof(struct rte_flow_item_pfcp); + break; + default: + printf("Error - Not supported item\n"); + *total_size = 0; + memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA); + return; + } + *total_size += size; + rte_memcpy(data_tail - (*total_size), item->spec, size); + /* update some fields which cannot be set by cmdline */ + update_fields((data_tail - (*total_size)), item, + upper_layer); + upper_layer = proto; + } + if (verbose_level & 0x1) + printf("total data size is %zu\n", (*total_size)); + RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA); + memmove(data, (data_tail - (*total_size)), *total_size); +} + +/** Populate help strings for current token (cmdline API). */ +static int +cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, + unsigned int size) +{ + struct context *ctx = &cmd_flow_context; + const struct token *token = &token_list[ctx->prev]; + + (void)hdr; + if (!size) + return -1; + /* Set token type and update global help with details. */ + snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN")); + if (token->help) + cmd_set_raw.help_str = token->help; + else + cmd_set_raw.help_str = token->name; + return 0; +} + +/** Token definition template (cmdline API). */ +static struct cmdline_token_hdr cmd_set_raw_token_hdr = { + .ops = &(struct cmdline_token_ops){ + .parse = cmd_flow_parse, + .complete_get_nb = cmd_flow_complete_get_nb, + .complete_get_elt = cmd_flow_complete_get_elt, + .get_help = cmd_set_raw_get_help, + }, + .offset = 0, +}; + +/** Populate the next dynamic token. */ +static void +cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr, + cmdline_parse_token_hdr_t **hdr_inst) +{ + struct context *ctx = &cmd_flow_context; + + /* Always reinitialize context before requesting the first token. */ + if (!(hdr_inst - cmd_set_raw.tokens)) { + cmd_flow_context_init(ctx); + ctx->curr = START_SET; + } + /* Return NULL when no more tokens are expected. */ + if (!ctx->next_num && (ctx->curr != START_SET)) { + *hdr = NULL; + return; + } + /* Determine if command should end here. */ + if (ctx->eol && ctx->last && ctx->next_num) { + const enum index *list = ctx->next[ctx->next_num - 1]; + int i; + + for (i = 0; list[i]; ++i) { + if (list[i] != END) + continue; + *hdr = NULL; + return; + } + } + *hdr = &cmd_set_raw_token_hdr; +} + +/** Token generator and output processing callback (cmdline API). */ +static void +cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2) +{ + if (cl == NULL) + cmd_set_raw_tok(arg0, arg2); + else + cmd_set_raw_parsed(arg0); +} + +/** Global parser instance (cmdline API). */ +cmdline_parse_inst_t cmd_set_raw = { + .f = cmd_set_raw_cb, + .data = NULL, /**< Unused. */ + .help_str = NULL, /**< Updated by cmd_flow_get_help(). */ + .tokens = { + NULL, + }, /**< Tokens are returned by cmd_flow_tok(). */ +}; + +/* *** display raw_encap/raw_decap buf */ +struct cmd_show_set_raw_result { + cmdline_fixed_string_t cmd_show; + cmdline_fixed_string_t cmd_what; + cmdline_fixed_string_t cmd_all; + uint16_t cmd_index; +}; + +static void +cmd_show_set_raw_parsed(void *parsed_result, struct cmdline *cl, void *data) +{ + struct cmd_show_set_raw_result *res = parsed_result; + uint16_t index = res->cmd_index; + uint8_t all = 0; + uint8_t *raw_data = NULL; + size_t raw_size = 0; + char title[16] = {0}; + + RTE_SET_USED(cl); + RTE_SET_USED(data); + if (!strcmp(res->cmd_all, "all")) { + all = 1; + index = 0; + } else if (index >= RAW_ENCAP_CONFS_MAX_NUM) { + printf("index should be 0-%u\n", RAW_ENCAP_CONFS_MAX_NUM - 1); + return; + } + do { + if (!strcmp(res->cmd_what, "raw_encap")) { + raw_data = (uint8_t *)&raw_encap_confs[index].data; + raw_size = raw_encap_confs[index].size; + snprintf(title, 16, "\nindex: %u", index); + rte_hexdump(stdout, title, raw_data, raw_size); + } else { + raw_data = (uint8_t *)&raw_decap_confs[index].data; + raw_size = raw_decap_confs[index].size; + snprintf(title, 16, "\nindex: %u", index); + rte_hexdump(stdout, title, raw_data, raw_size); + } + } while (all && ++index < RAW_ENCAP_CONFS_MAX_NUM); +} + +cmdline_parse_token_string_t cmd_show_set_raw_cmd_show = + TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result, + cmd_show, "show"); +cmdline_parse_token_string_t cmd_show_set_raw_cmd_what = + TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result, + cmd_what, "raw_encap#raw_decap"); +cmdline_parse_token_num_t cmd_show_set_raw_cmd_index = + TOKEN_NUM_INITIALIZER(struct cmd_show_set_raw_result, + cmd_index, UINT16); +cmdline_parse_token_string_t cmd_show_set_raw_cmd_all = + TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result, + cmd_all, "all"); +cmdline_parse_inst_t cmd_show_set_raw = { + .f = cmd_show_set_raw_parsed, + .data = NULL, + .help_str = "show <raw_encap|raw_decap> <index>", + .tokens = { + (void *)&cmd_show_set_raw_cmd_show, + (void *)&cmd_show_set_raw_cmd_what, + (void *)&cmd_show_set_raw_cmd_index, + NULL, + }, +}; +cmdline_parse_inst_t cmd_show_set_raw_all = { + .f = cmd_show_set_raw_parsed, + .data = NULL, + .help_str = "show <raw_encap|raw_decap> all", + .tokens = { + (void *)&cmd_show_set_raw_cmd_show, + (void *)&cmd_show_set_raw_cmd_what, + (void *)&cmd_show_set_raw_cmd_all, + NULL, + }, +}; diff --git a/src/spdk/dpdk/app/test-pmd/cmdline_mtr.c b/src/spdk/dpdk/app/test-pmd/cmdline_mtr.c new file mode 100644 index 000000000..caa7e9864 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/cmdline_mtr.c @@ -0,0 +1,1467 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include <cmdline_parse.h> +#include <cmdline_parse_num.h> +#include <cmdline_parse_string.h> + +#include <rte_ethdev.h> +#include <rte_flow.h> +#include <rte_mtr.h> + +#include "testpmd.h" +#include "cmdline_mtr.h" + +#define PARSE_DELIMITER " \f\n\r\t\v" +#define MAX_DSCP_TABLE_ENTRIES 64 + +/** Display Meter Error Message */ +static void +print_err_msg(struct rte_mtr_error *error) +{ + static const char *const errstrlist[] = { + [RTE_MTR_ERROR_TYPE_NONE] = "no error", + [RTE_MTR_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", + [RTE_MTR_ERROR_TYPE_METER_PROFILE_ID] = "meter profile id", + [RTE_MTR_ERROR_TYPE_METER_PROFILE] = "meter profile null", + [RTE_MTR_ERROR_TYPE_MTR_ID] = "meter id", + [RTE_MTR_ERROR_TYPE_MTR_PARAMS] = "meter params null", + [RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN] + = "policer action(green)", + [RTE_MTR_ERROR_TYPE_POLICER_ACTION_YELLOW] + = "policer action(yellow)", + [RTE_MTR_ERROR_TYPE_POLICER_ACTION_RED] + = "policer action(red)", + [RTE_MTR_ERROR_TYPE_STATS_MASK] = "stats mask", + [RTE_MTR_ERROR_TYPE_STATS] = "stats", + [RTE_MTR_ERROR_TYPE_SHARED] + = "shared meter", + }; + + const char *errstr; + char buf[64]; + + if ((unsigned int)error->type >= RTE_DIM(errstrlist) || + !errstrlist[error->type]) + errstr = "unknown type"; + else + errstr = errstrlist[error->type]; + + if (error->cause) + snprintf(buf, sizeof(buf), "cause: %p, ", error->cause); + + printf("%s: %s%s (error %d)\n", errstr, error->cause ? buf : "", + error->message ? error->message : "(no stated reason)", + error->type); +} + +static int +parse_uint(uint64_t *value, const char *str) +{ + char *next = NULL; + uint64_t n; + + errno = 0; + /* Parse number string */ + n = strtol(str, &next, 10); + if (errno != 0 || str == next || *next != '\0') + return -1; + + *value = n; + + return 0; +} + +static int +parse_dscp_table_entries(char *str, enum rte_color **dscp_table) +{ + char *token; + int i = 0; + + token = strtok_r(str, PARSE_DELIMITER, &str); + if (token == NULL) + return 0; + + /* Allocate memory for dscp table */ + *dscp_table = (enum rte_color *)malloc(MAX_DSCP_TABLE_ENTRIES * + sizeof(enum rte_color)); + if (*dscp_table == NULL) + return -1; + + while (1) { + if (strcmp(token, "G") == 0 || + strcmp(token, "g") == 0) + *dscp_table[i++] = RTE_COLOR_GREEN; + else if (strcmp(token, "Y") == 0 || + strcmp(token, "y") == 0) + *dscp_table[i++] = RTE_COLOR_YELLOW; + else if (strcmp(token, "R") == 0 || + strcmp(token, "r") == 0) + *dscp_table[i++] = RTE_COLOR_RED; + else { + free(*dscp_table); + return -1; + } + if (i == MAX_DSCP_TABLE_ENTRIES) + break; + + token = strtok_r(str, PARSE_DELIMITER, &str); + if (token == NULL) { + free(*dscp_table); + return -1; + } + } + return 0; +} + +static int +parse_meter_color_str(char *c_str, uint32_t *use_prev_meter_color, + enum rte_color **dscp_table) +{ + char *token; + uint64_t previous_mtr_color = 0; + int ret; + + /* First token: use previous meter color */ + token = strtok_r(c_str, PARSE_DELIMITER, &c_str); + if (token == NULL) + return -1; + + ret = parse_uint(&previous_mtr_color, token); + if (ret != 0) + return -1; + + /* Check if previous meter color to be used */ + if (previous_mtr_color) { + *use_prev_meter_color = previous_mtr_color; + return 0; + } + + /* Parse dscp table entries */ + ret = parse_dscp_table_entries(c_str, dscp_table); + if (ret != 0) + return -1; + + return 0; +} + +static int +string_to_policer_action(char *s) +{ + if ((strcmp(s, "G") == 0) || (strcmp(s, "g") == 0)) + return MTR_POLICER_ACTION_COLOR_GREEN; + + if ((strcmp(s, "Y") == 0) || (strcmp(s, "y") == 0)) + return MTR_POLICER_ACTION_COLOR_YELLOW; + + if ((strcmp(s, "R") == 0) || (strcmp(s, "r") == 0)) + return MTR_POLICER_ACTION_COLOR_RED; + + if ((strcmp(s, "D") == 0) || (strcmp(s, "d") == 0)) + return MTR_POLICER_ACTION_DROP; + + return -1; +} + +static int +parse_policer_action_string(char *p_str, uint32_t action_mask, + enum rte_mtr_policer_action actions[]) +{ + char *token; + int count = __builtin_popcount(action_mask); + int g_color = 0, y_color = 0, action, i; + + for (i = 0; i < count; i++) { + token = strtok_r(p_str, PARSE_DELIMITER, &p_str); + if (token == NULL) + return -1; + + action = string_to_policer_action(token); + if (action == -1) + return -1; + + if (g_color == 0 && (action_mask & 0x1)) { + actions[RTE_COLOR_GREEN] = action; + g_color = 1; + } else if (y_color == 0 && (action_mask & 0x2)) { + actions[RTE_COLOR_YELLOW] = action; + y_color = 1; + } else + actions[RTE_COLOR_RED] = action; + } + return 0; +} + +static int +parse_multi_token_string(char *t_str, uint16_t *port_id, + uint32_t *mtr_id, enum rte_color **dscp_table) +{ + char *token; + uint64_t val; + int ret; + + /* First token: port id */ + token = strtok_r(t_str, PARSE_DELIMITER, &t_str); + if (token == NULL) + return -1; + + ret = parse_uint(&val, token); + if (ret != 0 || val > UINT16_MAX) + return -1; + + *port_id = val; + + /* Second token: meter id */ + token = strtok_r(t_str, PARSE_DELIMITER, &t_str); + if (token == NULL) + return 0; + + ret = parse_uint(&val, token); + if (ret != 0 || val > UINT32_MAX) + return -1; + + *mtr_id = val; + + ret = parse_dscp_table_entries(t_str, dscp_table); + if (ret != 0) + return -1; + + return 0; +} + +/* *** Show Port Meter Capabilities *** */ +struct cmd_show_port_meter_cap_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t cap; + uint16_t port_id; +}; + +cmdline_parse_token_string_t cmd_show_port_meter_cap_show = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_cap_result, show, "show"); +cmdline_parse_token_string_t cmd_show_port_meter_cap_port = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_cap_result, port, "port"); +cmdline_parse_token_string_t cmd_show_port_meter_cap_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_cap_result, meter, "meter"); +cmdline_parse_token_string_t cmd_show_port_meter_cap_cap = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_cap_result, cap, "cap"); +cmdline_parse_token_num_t cmd_show_port_meter_cap_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_show_port_meter_cap_result, port_id, UINT16); + +static void cmd_show_port_meter_cap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_port_meter_cap_result *res = parsed_result; + struct rte_mtr_capabilities cap; + struct rte_mtr_error error; + uint16_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&cap, 0, sizeof(struct rte_mtr_capabilities)); + ret = rte_mtr_capabilities_get(port_id, &cap, &error); + if (ret) { + print_err_msg(&error); + return; + } + + printf("\n**** Port Meter Object Capabilities ****\n\n"); + printf("cap.n_max %" PRIu32 "\n", cap.n_max); + printf("cap.n_shared_max %" PRIu32 "\n", cap.n_shared_max); + printf("cap.identical %" PRId32 "\n", cap.identical); + printf("cap.shared_identical %" PRId32 "\n", + cap.shared_identical); + printf("cap.shared_n_flows_per_mtr_max %" PRIu32 "\n", + cap.shared_n_flows_per_mtr_max); + printf("cap.chaining_n_mtrs_per_flow_max %" PRIu32 "\n", + cap.chaining_n_mtrs_per_flow_max); + printf("cap.chaining_use_prev_mtr_color_supported %" PRId32 "\n", + cap.chaining_use_prev_mtr_color_supported); + printf("cap.chaining_use_prev_mtr_color_enforced %" PRId32 "\n", + cap.chaining_use_prev_mtr_color_enforced); + printf("cap.meter_srtcm_rfc2697_n_max %" PRIu32 "\n", + cap.meter_srtcm_rfc2697_n_max); + printf("cap.meter_trtcm_rfc2698_n_max %" PRIu32 "\n", + cap.meter_trtcm_rfc2698_n_max); + printf("cap.meter_trtcm_rfc4115_n_max %" PRIu32 "\n", + cap.meter_trtcm_rfc4115_n_max); + printf("cap.meter_rate_max %" PRIu64 "\n", cap.meter_rate_max); + printf("cap.color_aware_srtcm_rfc2697_supported %" PRId32 "\n", + cap.color_aware_srtcm_rfc2697_supported); + printf("cap.color_aware_trtcm_rfc2698_supported %" PRId32 "\n", + cap.color_aware_trtcm_rfc2698_supported); + printf("cap.color_aware_trtcm_rfc4115_supported %" PRId32 "\n", + cap.color_aware_trtcm_rfc4115_supported); + printf("cap.policer_action_recolor_supported %" PRId32 "\n", + cap.policer_action_recolor_supported); + printf("cap.policer_action_drop_supported %" PRId32 "\n", + cap.policer_action_drop_supported); + printf("cap.stats_mask %" PRIx64 "\n", cap.stats_mask); +} + +cmdline_parse_inst_t cmd_show_port_meter_cap = { + .f = cmd_show_port_meter_cap_parsed, + .data = NULL, + .help_str = "Show port meter cap", + .tokens = { + (void *)&cmd_show_port_meter_cap_show, + (void *)&cmd_show_port_meter_cap_port, + (void *)&cmd_show_port_meter_cap_meter, + (void *)&cmd_show_port_meter_cap_cap, + (void *)&cmd_show_port_meter_cap_port_id, + NULL, + }, +}; + +/* *** Add Port Meter Profile srtcm_rfc2697 *** */ +struct cmd_add_port_meter_profile_srtcm_result { + cmdline_fixed_string_t add; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t profile; + cmdline_fixed_string_t srtcm_rfc2697; + uint16_t port_id; + uint32_t profile_id; + uint64_t cir; + uint64_t cbs; + uint64_t ebs; +}; + +cmdline_parse_token_string_t cmd_add_port_meter_profile_srtcm_add = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_srtcm_result, add, "add"); +cmdline_parse_token_string_t cmd_add_port_meter_profile_srtcm_port = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_srtcm_result, + port, "port"); +cmdline_parse_token_string_t cmd_add_port_meter_profile_srtcm_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_srtcm_result, + meter, "meter"); +cmdline_parse_token_string_t cmd_add_port_meter_profile_srtcm_profile = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_srtcm_result, + profile, "profile"); +cmdline_parse_token_string_t cmd_add_port_meter_profile_srtcm_srtcm_rfc2697 = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_srtcm_result, + srtcm_rfc2697, "srtcm_rfc2697"); +cmdline_parse_token_num_t cmd_add_port_meter_profile_srtcm_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_srtcm_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_add_port_meter_profile_srtcm_profile_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_srtcm_result, + profile_id, UINT32); +cmdline_parse_token_num_t cmd_add_port_meter_profile_srtcm_cir = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_srtcm_result, + cir, UINT64); +cmdline_parse_token_num_t cmd_add_port_meter_profile_srtcm_cbs = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_srtcm_result, + cbs, UINT64); +cmdline_parse_token_num_t cmd_add_port_meter_profile_srtcm_ebs = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_srtcm_result, + ebs, UINT64); + +static void cmd_add_port_meter_profile_srtcm_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_add_port_meter_profile_srtcm_result *res = parsed_result; + struct rte_mtr_meter_profile mp; + struct rte_mtr_error error; + uint32_t profile_id = res->profile_id; + uint16_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Private shaper profile params */ + memset(&mp, 0, sizeof(struct rte_mtr_meter_profile)); + mp.alg = RTE_MTR_SRTCM_RFC2697; + mp.srtcm_rfc2697.cir = res->cir; + mp.srtcm_rfc2697.cbs = res->cbs; + mp.srtcm_rfc2697.ebs = res->ebs; + + ret = rte_mtr_meter_profile_add(port_id, profile_id, &mp, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_add_port_meter_profile_srtcm = { + .f = cmd_add_port_meter_profile_srtcm_parsed, + .data = NULL, + .help_str = "Add port meter profile srtcm (rfc2697)", + .tokens = { + (void *)&cmd_add_port_meter_profile_srtcm_add, + (void *)&cmd_add_port_meter_profile_srtcm_port, + (void *)&cmd_add_port_meter_profile_srtcm_meter, + (void *)&cmd_add_port_meter_profile_srtcm_profile, + (void *)&cmd_add_port_meter_profile_srtcm_srtcm_rfc2697, + (void *)&cmd_add_port_meter_profile_srtcm_port_id, + (void *)&cmd_add_port_meter_profile_srtcm_profile_id, + (void *)&cmd_add_port_meter_profile_srtcm_cir, + (void *)&cmd_add_port_meter_profile_srtcm_cbs, + (void *)&cmd_add_port_meter_profile_srtcm_ebs, + NULL, + }, +}; + +/* *** Add Port Meter Profile trtcm_rfc2698 *** */ +struct cmd_add_port_meter_profile_trtcm_result { + cmdline_fixed_string_t add; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t profile; + cmdline_fixed_string_t trtcm_rfc2698; + uint16_t port_id; + uint32_t profile_id; + uint64_t cir; + uint64_t pir; + uint64_t cbs; + uint64_t pbs; +}; + +cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_add = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_result, add, "add"); +cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_port = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_result, + port, "port"); +cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_result, + meter, "meter"); +cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_profile = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_result, + profile, "profile"); +cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_trtcm_rfc2698 = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_result, + trtcm_rfc2698, "trtcm_rfc2698"); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_profile_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_result, + profile_id, UINT32); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_cir = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_result, + cir, UINT64); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_pir = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_result, + pir, UINT64); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_cbs = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_result, + cbs, UINT64); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_pbs = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_result, + pbs, UINT64); + +static void cmd_add_port_meter_profile_trtcm_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_add_port_meter_profile_trtcm_result *res = parsed_result; + struct rte_mtr_meter_profile mp; + struct rte_mtr_error error; + uint32_t profile_id = res->profile_id; + uint16_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Private shaper profile params */ + memset(&mp, 0, sizeof(struct rte_mtr_meter_profile)); + mp.alg = RTE_MTR_TRTCM_RFC2698; + mp.trtcm_rfc2698.cir = res->cir; + mp.trtcm_rfc2698.pir = res->pir; + mp.trtcm_rfc2698.cbs = res->cbs; + mp.trtcm_rfc2698.pbs = res->pbs; + + ret = rte_mtr_meter_profile_add(port_id, profile_id, &mp, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm = { + .f = cmd_add_port_meter_profile_trtcm_parsed, + .data = NULL, + .help_str = "Add port meter profile trtcm (rfc2698)", + .tokens = { + (void *)&cmd_add_port_meter_profile_trtcm_add, + (void *)&cmd_add_port_meter_profile_trtcm_port, + (void *)&cmd_add_port_meter_profile_trtcm_meter, + (void *)&cmd_add_port_meter_profile_trtcm_profile, + (void *)&cmd_add_port_meter_profile_trtcm_trtcm_rfc2698, + (void *)&cmd_add_port_meter_profile_trtcm_port_id, + (void *)&cmd_add_port_meter_profile_trtcm_profile_id, + (void *)&cmd_add_port_meter_profile_trtcm_cir, + (void *)&cmd_add_port_meter_profile_trtcm_pir, + (void *)&cmd_add_port_meter_profile_trtcm_cbs, + (void *)&cmd_add_port_meter_profile_trtcm_pbs, + NULL, + }, +}; + +/* *** Add Port Meter Profile trtcm_rfc4115 *** */ +struct cmd_add_port_meter_profile_trtcm_rfc4115_result { + cmdline_fixed_string_t add; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t profile; + cmdline_fixed_string_t trtcm_rfc4115; + uint16_t port_id; + uint32_t profile_id; + uint64_t cir; + uint64_t eir; + uint64_t cbs; + uint64_t ebs; +}; + +cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_rfc4115_add = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_rfc4115_result, add, + "add"); +cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_rfc4115_port = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_rfc4115_result, + port, "port"); +cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_rfc4115_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_rfc4115_result, + meter, "meter"); +cmdline_parse_token_string_t cmd_add_port_meter_profile_trtcm_rfc4115_profile = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_rfc4115_result, + profile, "profile"); +cmdline_parse_token_string_t + cmd_add_port_meter_profile_trtcm_rfc4115_trtcm_rfc4115 = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_rfc4115_result, + trtcm_rfc4115, "trtcm_rfc4115"); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_rfc4115_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_profile_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_rfc4115_result, + profile_id, UINT32); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_cir = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_rfc4115_result, + cir, UINT64); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_eir = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_rfc4115_result, + eir, UINT64); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_cbs = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_rfc4115_result, + cbs, UINT64); +cmdline_parse_token_num_t cmd_add_port_meter_profile_trtcm_rfc4115_ebs = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_meter_profile_trtcm_rfc4115_result, + ebs, UINT64); + +static void cmd_add_port_meter_profile_trtcm_rfc4115_parsed( + void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_add_port_meter_profile_trtcm_rfc4115_result *res = + parsed_result; + struct rte_mtr_meter_profile mp; + struct rte_mtr_error error; + uint32_t profile_id = res->profile_id; + uint16_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Private shaper profile params */ + memset(&mp, 0, sizeof(struct rte_mtr_meter_profile)); + mp.alg = RTE_MTR_TRTCM_RFC4115; + mp.trtcm_rfc4115.cir = res->cir; + mp.trtcm_rfc4115.eir = res->eir; + mp.trtcm_rfc4115.cbs = res->cbs; + mp.trtcm_rfc4115.ebs = res->ebs; + + ret = rte_mtr_meter_profile_add(port_id, profile_id, &mp, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm_rfc4115 = { + .f = cmd_add_port_meter_profile_trtcm_rfc4115_parsed, + .data = NULL, + .help_str = "Add port meter profile trtcm (rfc4115)", + .tokens = { + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_add, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_port, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_meter, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_profile, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_trtcm_rfc4115, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_port_id, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_profile_id, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_cir, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_eir, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_cbs, + (void *)&cmd_add_port_meter_profile_trtcm_rfc4115_ebs, + NULL, + }, +}; + +/* *** Delete Port Meter Profile *** */ +struct cmd_del_port_meter_profile_result { + cmdline_fixed_string_t del; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t profile; + uint16_t port_id; + uint32_t profile_id; +}; + +cmdline_parse_token_string_t cmd_del_port_meter_profile_del = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_meter_profile_result, del, "del"); +cmdline_parse_token_string_t cmd_del_port_meter_profile_port = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_meter_profile_result, + port, "port"); +cmdline_parse_token_string_t cmd_del_port_meter_profile_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_meter_profile_result, + meter, "meter"); +cmdline_parse_token_string_t cmd_del_port_meter_profile_profile = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_meter_profile_result, + profile, "profile"); +cmdline_parse_token_num_t cmd_del_port_meter_profile_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_del_port_meter_profile_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_del_port_meter_profile_profile_id = + TOKEN_NUM_INITIALIZER( + struct cmd_del_port_meter_profile_result, + profile_id, UINT32); + +static void cmd_del_port_meter_profile_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_del_port_meter_profile_result *res = parsed_result; + struct rte_mtr_error error; + uint32_t profile_id = res->profile_id; + uint16_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Delete meter profile */ + ret = rte_mtr_meter_profile_delete(port_id, profile_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_del_port_meter_profile = { + .f = cmd_del_port_meter_profile_parsed, + .data = NULL, + .help_str = "Delete port meter profile", + .tokens = { + (void *)&cmd_del_port_meter_profile_del, + (void *)&cmd_del_port_meter_profile_port, + (void *)&cmd_del_port_meter_profile_meter, + (void *)&cmd_del_port_meter_profile_profile, + (void *)&cmd_del_port_meter_profile_port_id, + (void *)&cmd_del_port_meter_profile_profile_id, + NULL, + }, +}; + +/* *** Create Port Meter Object *** */ +struct cmd_create_port_meter_result { + cmdline_fixed_string_t create; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + uint16_t port_id; + uint32_t mtr_id; + uint32_t profile_id; + cmdline_fixed_string_t meter_enable; + cmdline_fixed_string_t g_action; + cmdline_fixed_string_t y_action; + cmdline_fixed_string_t r_action; + uint64_t statistics_mask; + uint32_t shared; + cmdline_multi_string_t meter_input_color; +}; + +cmdline_parse_token_string_t cmd_create_port_meter_create = + TOKEN_STRING_INITIALIZER( + struct cmd_create_port_meter_result, create, "create"); +cmdline_parse_token_string_t cmd_create_port_meter_port = + TOKEN_STRING_INITIALIZER( + struct cmd_create_port_meter_result, port, "port"); +cmdline_parse_token_string_t cmd_create_port_meter_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_create_port_meter_result, meter, "meter"); +cmdline_parse_token_num_t cmd_create_port_meter_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_create_port_meter_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_create_port_meter_mtr_id = + TOKEN_NUM_INITIALIZER( + struct cmd_create_port_meter_result, mtr_id, UINT32); +cmdline_parse_token_num_t cmd_create_port_meter_profile_id = + TOKEN_NUM_INITIALIZER( + struct cmd_create_port_meter_result, profile_id, UINT32); +cmdline_parse_token_string_t cmd_create_port_meter_meter_enable = + TOKEN_STRING_INITIALIZER(struct cmd_create_port_meter_result, + meter_enable, "yes#no"); +cmdline_parse_token_string_t cmd_create_port_meter_g_action = + TOKEN_STRING_INITIALIZER(struct cmd_create_port_meter_result, + g_action, "R#Y#G#D#r#y#g#d"); +cmdline_parse_token_string_t cmd_create_port_meter_y_action = + TOKEN_STRING_INITIALIZER(struct cmd_create_port_meter_result, + y_action, "R#Y#G#D#r#y#g#d"); +cmdline_parse_token_string_t cmd_create_port_meter_r_action = + TOKEN_STRING_INITIALIZER(struct cmd_create_port_meter_result, + r_action, "R#Y#G#D#r#y#g#d"); +cmdline_parse_token_num_t cmd_create_port_meter_statistics_mask = + TOKEN_NUM_INITIALIZER(struct cmd_create_port_meter_result, + statistics_mask, UINT64); +cmdline_parse_token_num_t cmd_create_port_meter_shared = + TOKEN_NUM_INITIALIZER(struct cmd_create_port_meter_result, + shared, UINT32); +cmdline_parse_token_string_t cmd_create_port_meter_input_color = + TOKEN_STRING_INITIALIZER(struct cmd_create_port_meter_result, + meter_input_color, TOKEN_STRING_MULTI); + +static void cmd_create_port_meter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_create_port_meter_result *res = parsed_result; + struct rte_mtr_error error; + struct rte_mtr_params params; + uint32_t mtr_id = res->mtr_id; + uint32_t shared = res->shared; + uint32_t use_prev_meter_color = 0; + uint16_t port_id = res->port_id; + enum rte_color *dscp_table = NULL; + char *c_str = res->meter_input_color; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Meter params */ + memset(¶ms, 0, sizeof(struct rte_mtr_params)); + params.meter_profile_id = res->profile_id; + + /* Parse meter input color string params */ + ret = parse_meter_color_str(c_str, &use_prev_meter_color, &dscp_table); + if (ret) { + printf(" Meter input color params string parse error\n"); + return; + } + + params.use_prev_mtr_color = use_prev_meter_color; + params.dscp_table = dscp_table; + + if (strcmp(res->meter_enable, "yes") == 0) + params.meter_enable = 1; + else + params.meter_enable = 0; + + params.action[RTE_COLOR_GREEN] = + string_to_policer_action(res->g_action); + params.action[RTE_COLOR_YELLOW] = + string_to_policer_action(res->y_action); + params.action[RTE_COLOR_RED] = + string_to_policer_action(res->r_action); + params.stats_mask = res->statistics_mask; + + ret = rte_mtr_create(port_id, mtr_id, ¶ms, shared, &error); + if (ret != 0) { + free(dscp_table); + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_create_port_meter = { + .f = cmd_create_port_meter_parsed, + .data = NULL, + .help_str = "Create port meter", + .tokens = { + (void *)&cmd_create_port_meter_create, + (void *)&cmd_create_port_meter_port, + (void *)&cmd_create_port_meter_meter, + (void *)&cmd_create_port_meter_port_id, + (void *)&cmd_create_port_meter_mtr_id, + (void *)&cmd_create_port_meter_profile_id, + (void *)&cmd_create_port_meter_meter_enable, + (void *)&cmd_create_port_meter_g_action, + (void *)&cmd_create_port_meter_y_action, + (void *)&cmd_create_port_meter_r_action, + (void *)&cmd_create_port_meter_statistics_mask, + (void *)&cmd_create_port_meter_shared, + (void *)&cmd_create_port_meter_input_color, + NULL, + }, +}; + +/* *** Enable Meter of MTR Object *** */ +struct cmd_enable_port_meter_result { + cmdline_fixed_string_t enable; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + uint16_t port_id; + uint32_t mtr_id; +}; + +cmdline_parse_token_string_t cmd_enable_port_meter_enable = + TOKEN_STRING_INITIALIZER( + struct cmd_enable_port_meter_result, enable, "enable"); +cmdline_parse_token_string_t cmd_enable_port_meter_port = + TOKEN_STRING_INITIALIZER( + struct cmd_enable_port_meter_result, port, "port"); +cmdline_parse_token_string_t cmd_enable_port_meter_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_enable_port_meter_result, meter, "meter"); +cmdline_parse_token_num_t cmd_enable_port_meter_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_enable_port_meter_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_enable_port_meter_mtr_id = + TOKEN_NUM_INITIALIZER( + struct cmd_enable_port_meter_result, mtr_id, UINT32); + +static void cmd_enable_port_meter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_enable_port_meter_result *res = parsed_result; + struct rte_mtr_error error; + uint32_t mtr_id = res->mtr_id; + uint16_t port_id = res->port_id; + + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Enable Meter */ + ret = rte_mtr_meter_enable(port_id, mtr_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_enable_port_meter = { + .f = cmd_enable_port_meter_parsed, + .data = NULL, + .help_str = "Enable port meter", + .tokens = { + (void *)&cmd_enable_port_meter_enable, + (void *)&cmd_enable_port_meter_port, + (void *)&cmd_enable_port_meter_meter, + (void *)&cmd_enable_port_meter_port_id, + (void *)&cmd_enable_port_meter_mtr_id, + NULL, + }, +}; + +/* *** Disable Meter of MTR Object *** */ +struct cmd_disable_port_meter_result { + cmdline_fixed_string_t disable; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + uint16_t port_id; + uint32_t mtr_id; +}; + +cmdline_parse_token_string_t cmd_disable_port_meter_disable = + TOKEN_STRING_INITIALIZER( + struct cmd_disable_port_meter_result, disable, "disable"); +cmdline_parse_token_string_t cmd_disable_port_meter_port = + TOKEN_STRING_INITIALIZER( + struct cmd_disable_port_meter_result, port, "port"); +cmdline_parse_token_string_t cmd_disable_port_meter_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_disable_port_meter_result, meter, "meter"); +cmdline_parse_token_num_t cmd_disable_port_meter_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_disable_port_meter_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_disable_port_meter_mtr_id = + TOKEN_NUM_INITIALIZER( + struct cmd_disable_port_meter_result, mtr_id, UINT32); + +static void cmd_disable_port_meter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_disable_port_meter_result *res = parsed_result; + struct rte_mtr_error error; + uint32_t mtr_id = res->mtr_id; + uint16_t port_id = res->port_id; + + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Disable Meter */ + ret = rte_mtr_meter_disable(port_id, mtr_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_disable_port_meter = { + .f = cmd_disable_port_meter_parsed, + .data = NULL, + .help_str = "Disable port meter", + .tokens = { + (void *)&cmd_disable_port_meter_disable, + (void *)&cmd_disable_port_meter_port, + (void *)&cmd_disable_port_meter_meter, + (void *)&cmd_disable_port_meter_port_id, + (void *)&cmd_disable_port_meter_mtr_id, + NULL, + }, +}; + +/* *** Delete Port Meter Object *** */ +struct cmd_del_port_meter_result { + cmdline_fixed_string_t del; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + uint16_t port_id; + uint32_t mtr_id; +}; + +cmdline_parse_token_string_t cmd_del_port_meter_del = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_meter_result, del, "del"); +cmdline_parse_token_string_t cmd_del_port_meter_port = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_meter_result, port, "port"); +cmdline_parse_token_string_t cmd_del_port_meter_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_meter_result, meter, "meter"); +cmdline_parse_token_num_t cmd_del_port_meter_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_del_port_meter_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_del_port_meter_mtr_id = + TOKEN_NUM_INITIALIZER( + struct cmd_del_port_meter_result, mtr_id, UINT32); + +static void cmd_del_port_meter_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_del_port_meter_result *res = parsed_result; + struct rte_mtr_error error; + uint32_t mtr_id = res->mtr_id; + uint16_t port_id = res->port_id; + + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Destroy Meter */ + ret = rte_mtr_destroy(port_id, mtr_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_del_port_meter = { + .f = cmd_del_port_meter_parsed, + .data = NULL, + .help_str = "Delete port meter", + .tokens = { + (void *)&cmd_del_port_meter_del, + (void *)&cmd_del_port_meter_port, + (void *)&cmd_del_port_meter_meter, + (void *)&cmd_del_port_meter_port_id, + (void *)&cmd_del_port_meter_mtr_id, + NULL, + }, +}; + +/* *** Set Port Meter Profile *** */ +struct cmd_set_port_meter_profile_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t profile; + uint16_t port_id; + uint32_t mtr_id; + uint32_t profile_id; +}; + +cmdline_parse_token_string_t cmd_set_port_meter_profile_set = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_profile_result, set, "set"); +cmdline_parse_token_string_t cmd_set_port_meter_profile_port = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_profile_result, port, "port"); +cmdline_parse_token_string_t cmd_set_port_meter_profile_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_profile_result, meter, "meter"); +cmdline_parse_token_string_t cmd_set_port_meter_profile_profile = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_profile_result, profile, "profile"); +cmdline_parse_token_num_t cmd_set_port_meter_profile_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_meter_profile_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_set_port_meter_profile_mtr_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_meter_profile_result, mtr_id, UINT32); +cmdline_parse_token_num_t cmd_set_port_meter_profile_profile_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_meter_profile_result, profile_id, UINT32); + +static void cmd_set_port_meter_profile_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_port_meter_profile_result *res = parsed_result; + struct rte_mtr_error error; + uint32_t mtr_id = res->mtr_id; + uint32_t profile_id = res->profile_id; + uint16_t port_id = res->port_id; + + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Set meter profile */ + ret = rte_mtr_meter_profile_update(port_id, mtr_id, + profile_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_set_port_meter_profile = { + .f = cmd_set_port_meter_profile_parsed, + .data = NULL, + .help_str = "Set port meter profile", + .tokens = { + (void *)&cmd_set_port_meter_profile_set, + (void *)&cmd_set_port_meter_profile_port, + (void *)&cmd_set_port_meter_profile_meter, + (void *)&cmd_set_port_meter_profile_profile, + (void *)&cmd_set_port_meter_profile_port_id, + (void *)&cmd_set_port_meter_profile_mtr_id, + (void *)&cmd_set_port_meter_profile_profile_id, + NULL, + }, +}; + +/* *** Set Port Meter DSCP Table *** */ +struct cmd_set_port_meter_dscp_table_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t dscp_table; + cmdline_multi_string_t token_string; +}; + +cmdline_parse_token_string_t cmd_set_port_meter_dscp_table_set = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_dscp_table_result, set, "set"); +cmdline_parse_token_string_t cmd_set_port_meter_dscp_table_port = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_dscp_table_result, port, "port"); +cmdline_parse_token_string_t cmd_set_port_meter_dscp_table_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_dscp_table_result, meter, "meter"); +cmdline_parse_token_string_t cmd_set_port_meter_dscp_table_dscp_table = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_dscp_table_result, + dscp_table, "dscp table"); +cmdline_parse_token_string_t cmd_set_port_meter_dscp_table_token_string = + TOKEN_STRING_INITIALIZER(struct cmd_set_port_meter_dscp_table_result, + token_string, TOKEN_STRING_MULTI); + +static void cmd_set_port_meter_dscp_table_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_port_meter_dscp_table_result *res = parsed_result; + struct rte_mtr_error error; + enum rte_color *dscp_table = NULL; + char *t_str = res->token_string; + uint32_t mtr_id = 0; + uint16_t port_id; + int ret; + + /* Parse string */ + ret = parse_multi_token_string(t_str, &port_id, &mtr_id, &dscp_table); + if (ret) { + printf(" Multi token string parse error\n"); + return; + } + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + goto free_table; + + /* Update Meter DSCP Table*/ + ret = rte_mtr_meter_dscp_table_update(port_id, mtr_id, + dscp_table, &error); + if (ret != 0) + print_err_msg(&error); + +free_table: + free(dscp_table); +} + +cmdline_parse_inst_t cmd_set_port_meter_dscp_table = { + .f = cmd_set_port_meter_dscp_table_parsed, + .data = NULL, + .help_str = "Update port meter dscp table", + .tokens = { + (void *)&cmd_set_port_meter_dscp_table_set, + (void *)&cmd_set_port_meter_dscp_table_port, + (void *)&cmd_set_port_meter_dscp_table_meter, + (void *)&cmd_set_port_meter_dscp_table_dscp_table, + (void *)&cmd_set_port_meter_dscp_table_token_string, + NULL, + }, +}; + +/* *** Set Port Meter Policer Action *** */ +struct cmd_set_port_meter_policer_action_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t policer; + cmdline_fixed_string_t action; + uint16_t port_id; + uint32_t mtr_id; + uint32_t action_mask; + cmdline_multi_string_t policer_action; +}; + +cmdline_parse_token_string_t cmd_set_port_meter_policer_action_set = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_policer_action_result, set, "set"); +cmdline_parse_token_string_t cmd_set_port_meter_policer_action_port = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_policer_action_result, port, "port"); +cmdline_parse_token_string_t cmd_set_port_meter_policer_action_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_policer_action_result, meter, + "meter"); +cmdline_parse_token_string_t cmd_set_port_meter_policer_action_policer = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_policer_action_result, policer, + "policer"); +cmdline_parse_token_string_t cmd_set_port_meter_policer_action_action = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_policer_action_result, action, + "action"); +cmdline_parse_token_num_t cmd_set_port_meter_policer_action_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_meter_policer_action_result, port_id, + UINT16); +cmdline_parse_token_num_t cmd_set_port_meter_policer_action_mtr_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_meter_policer_action_result, mtr_id, + UINT32); +cmdline_parse_token_num_t cmd_set_port_meter_policer_action_action_mask = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_meter_policer_action_result, action_mask, + UINT32); +cmdline_parse_token_string_t cmd_set_port_meter_policer_action_policer_action = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_policer_action_result, + policer_action, TOKEN_STRING_MULTI); + +static void cmd_set_port_meter_policer_action_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_port_meter_policer_action_result *res = parsed_result; + enum rte_mtr_policer_action *actions; + struct rte_mtr_error error; + uint32_t mtr_id = res->mtr_id; + uint32_t action_mask = res->action_mask; + uint16_t port_id = res->port_id; + char *p_str = res->policer_action; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Check: action mask */ + if (action_mask == 0 || (action_mask & (~0x7UL))) { + printf(" Policer action mask not correct (error)\n"); + return; + } + + /* Allocate memory for policer actions */ + actions = (enum rte_mtr_policer_action *)malloc(RTE_COLORS * + sizeof(enum rte_mtr_policer_action)); + if (actions == NULL) { + printf("Memory for policer actions not allocated (error)\n"); + return; + } + /* Parse policer action string */ + ret = parse_policer_action_string(p_str, action_mask, actions); + if (ret) { + printf(" Policer action string parse error\n"); + free(actions); + return; + } + + ret = rte_mtr_policer_actions_update(port_id, mtr_id, + action_mask, actions, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } + + free(actions); +} + +cmdline_parse_inst_t cmd_set_port_meter_policer_action = { + .f = cmd_set_port_meter_policer_action_parsed, + .data = NULL, + .help_str = "Set port meter policer action", + .tokens = { + (void *)&cmd_set_port_meter_policer_action_set, + (void *)&cmd_set_port_meter_policer_action_port, + (void *)&cmd_set_port_meter_policer_action_meter, + (void *)&cmd_set_port_meter_policer_action_policer, + (void *)&cmd_set_port_meter_policer_action_action, + (void *)&cmd_set_port_meter_policer_action_port_id, + (void *)&cmd_set_port_meter_policer_action_mtr_id, + (void *)&cmd_set_port_meter_policer_action_action_mask, + (void *)&cmd_set_port_meter_policer_action_policer_action, + NULL, + }, +}; + +/* *** Set Port Meter Stats Mask *** */ +struct cmd_set_port_meter_stats_mask_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t stats; + cmdline_fixed_string_t mask; + uint16_t port_id; + uint32_t mtr_id; + uint64_t stats_mask; +}; + +cmdline_parse_token_string_t cmd_set_port_meter_stats_mask_set = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_stats_mask_result, set, "set"); +cmdline_parse_token_string_t cmd_set_port_meter_stats_mask_port = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_stats_mask_result, port, "port"); +cmdline_parse_token_string_t cmd_set_port_meter_stats_mask_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_stats_mask_result, meter, "meter"); +cmdline_parse_token_string_t cmd_set_port_meter_stats_mask_stats = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_stats_mask_result, stats, "stats"); +cmdline_parse_token_string_t cmd_set_port_meter_stats_mask_mask = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_meter_stats_mask_result, mask, "mask"); +cmdline_parse_token_num_t cmd_set_port_meter_stats_mask_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_meter_stats_mask_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_set_port_meter_stats_mask_mtr_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_meter_stats_mask_result, mtr_id, UINT32); +cmdline_parse_token_num_t cmd_set_port_meter_stats_mask_stats_mask = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_meter_stats_mask_result, stats_mask, + UINT64); + +static void cmd_set_port_meter_stats_mask_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_port_meter_stats_mask_result *res = parsed_result; + struct rte_mtr_error error; + uint64_t stats_mask = res->stats_mask; + uint32_t mtr_id = res->mtr_id; + uint16_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + ret = rte_mtr_stats_update(port_id, mtr_id, stats_mask, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_set_port_meter_stats_mask = { + .f = cmd_set_port_meter_stats_mask_parsed, + .data = NULL, + .help_str = "Set port meter stats mask", + .tokens = { + (void *)&cmd_set_port_meter_stats_mask_set, + (void *)&cmd_set_port_meter_stats_mask_port, + (void *)&cmd_set_port_meter_stats_mask_meter, + (void *)&cmd_set_port_meter_stats_mask_stats, + (void *)&cmd_set_port_meter_stats_mask_mask, + (void *)&cmd_set_port_meter_stats_mask_port_id, + (void *)&cmd_set_port_meter_stats_mask_mtr_id, + (void *)&cmd_set_port_meter_stats_mask_stats_mask, + NULL, + }, +}; + +/* *** Show Port Meter Stats *** */ +struct cmd_show_port_meter_stats_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t meter; + cmdline_fixed_string_t stats; + uint16_t port_id; + uint32_t mtr_id; + cmdline_fixed_string_t clear; +}; + +cmdline_parse_token_string_t cmd_show_port_meter_stats_show = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_stats_result, show, "show"); +cmdline_parse_token_string_t cmd_show_port_meter_stats_port = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_stats_result, port, "port"); +cmdline_parse_token_string_t cmd_show_port_meter_stats_meter = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_stats_result, meter, "meter"); +cmdline_parse_token_string_t cmd_show_port_meter_stats_stats = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_stats_result, stats, "stats"); +cmdline_parse_token_num_t cmd_show_port_meter_stats_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_show_port_meter_stats_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_show_port_meter_stats_mtr_id = + TOKEN_NUM_INITIALIZER( + struct cmd_show_port_meter_stats_result, mtr_id, UINT32); +cmdline_parse_token_string_t cmd_show_port_meter_stats_clear = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_meter_stats_result, clear, "yes#no"); + +static void cmd_show_port_meter_stats_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_port_meter_stats_result *res = parsed_result; + struct rte_mtr_stats stats; + uint64_t stats_mask = 0; + struct rte_mtr_error error; + uint32_t mtr_id = res->mtr_id; + uint32_t clear = 0; + uint16_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + if (strcmp(res->clear, "yes") == 0) + clear = 1; + + memset(&stats, 0, sizeof(struct rte_mtr_stats)); + ret = rte_mtr_stats_read(port_id, mtr_id, &stats, + &stats_mask, clear, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } + + /* Display stats */ + if (stats_mask & RTE_MTR_STATS_N_PKTS_GREEN) + printf("\tPkts G: %" PRIu64 "\n", + stats.n_pkts[RTE_COLOR_GREEN]); + if (stats_mask & RTE_MTR_STATS_N_BYTES_GREEN) + printf("\tBytes G: %" PRIu64 "\n", + stats.n_bytes[RTE_COLOR_GREEN]); + if (stats_mask & RTE_MTR_STATS_N_PKTS_YELLOW) + printf("\tPkts Y: %" PRIu64 "\n", + stats.n_pkts[RTE_COLOR_YELLOW]); + if (stats_mask & RTE_MTR_STATS_N_BYTES_YELLOW) + printf("\tBytes Y: %" PRIu64 "\n", + stats.n_bytes[RTE_COLOR_YELLOW]); + if (stats_mask & RTE_MTR_STATS_N_PKTS_RED) + printf("\tPkts R: %" PRIu64 "\n", + stats.n_pkts[RTE_COLOR_RED]); + if (stats_mask & RTE_MTR_STATS_N_BYTES_RED) + printf("\tBytes R: %" PRIu64 "\n", + stats.n_bytes[RTE_COLOR_RED]); + if (stats_mask & RTE_MTR_STATS_N_PKTS_DROPPED) + printf("\tPkts DROPPED: %" PRIu64 "\n", + stats.n_pkts_dropped); + if (stats_mask & RTE_MTR_STATS_N_BYTES_DROPPED) + printf("\tBytes DROPPED: %" PRIu64 "\n", + stats.n_bytes_dropped); +} + +cmdline_parse_inst_t cmd_show_port_meter_stats = { + .f = cmd_show_port_meter_stats_parsed, + .data = NULL, + .help_str = "Show port meter stats", + .tokens = { + (void *)&cmd_show_port_meter_stats_show, + (void *)&cmd_show_port_meter_stats_port, + (void *)&cmd_show_port_meter_stats_meter, + (void *)&cmd_show_port_meter_stats_stats, + (void *)&cmd_show_port_meter_stats_port_id, + (void *)&cmd_show_port_meter_stats_mtr_id, + (void *)&cmd_show_port_meter_stats_clear, + NULL, + }, +}; diff --git a/src/spdk/dpdk/app/test-pmd/cmdline_mtr.h b/src/spdk/dpdk/app/test-pmd/cmdline_mtr.h new file mode 100644 index 000000000..e69d6da02 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/cmdline_mtr.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _CMDLINE_MTR_H_ +#define _CMDLINE_MTR_H_ + +/* Traffic Metering and Policing */ +extern cmdline_parse_inst_t cmd_show_port_meter_cap; +extern cmdline_parse_inst_t cmd_add_port_meter_profile_srtcm; +extern cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm; +extern cmdline_parse_inst_t cmd_add_port_meter_profile_trtcm_rfc4115; +extern cmdline_parse_inst_t cmd_del_port_meter_profile; +extern cmdline_parse_inst_t cmd_create_port_meter; +extern cmdline_parse_inst_t cmd_enable_port_meter; +extern cmdline_parse_inst_t cmd_disable_port_meter; +extern cmdline_parse_inst_t cmd_del_port_meter; +extern cmdline_parse_inst_t cmd_set_port_meter_profile; +extern cmdline_parse_inst_t cmd_set_port_meter_dscp_table; +extern cmdline_parse_inst_t cmd_set_port_meter_policer_action; +extern cmdline_parse_inst_t cmd_set_port_meter_stats_mask; +extern cmdline_parse_inst_t cmd_show_port_meter_stats; + +#endif /* _CMDLINE_MTR_H_ */ diff --git a/src/spdk/dpdk/app/test-pmd/cmdline_tm.c b/src/spdk/dpdk/app/test-pmd/cmdline_tm.c new file mode 100644 index 000000000..6951beb58 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/cmdline_tm.c @@ -0,0 +1,2463 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#include <cmdline_parse.h> +#include <cmdline_parse_num.h> +#include <cmdline_parse_string.h> + +#include <rte_ethdev.h> +#include <rte_flow.h> +#include <rte_tm.h> + +#include "testpmd.h" +#include "cmdline_tm.h" + +#define PARSE_DELIMITER " \f\n\r\t\v" +#define MAX_NUM_SHARED_SHAPERS 256 + +#define skip_white_spaces(pos) \ +({ \ + __typeof__(pos) _p = (pos); \ + for ( ; isspace(*_p); _p++) \ + ; \ + _p; \ +}) + +/** Display TM Error Message */ +static void +print_err_msg(struct rte_tm_error *error) +{ + static const char *const errstrlist[] = { + [RTE_TM_ERROR_TYPE_NONE] = "no error", + [RTE_TM_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", + [RTE_TM_ERROR_TYPE_CAPABILITIES] + = "capability parameter null", + [RTE_TM_ERROR_TYPE_LEVEL_ID] = "level id", + [RTE_TM_ERROR_TYPE_WRED_PROFILE] + = "wred profile null", + [RTE_TM_ERROR_TYPE_WRED_PROFILE_GREEN] = "wred profile(green)", + [RTE_TM_ERROR_TYPE_WRED_PROFILE_YELLOW] + = "wred profile(yellow)", + [RTE_TM_ERROR_TYPE_WRED_PROFILE_RED] = "wred profile(red)", + [RTE_TM_ERROR_TYPE_WRED_PROFILE_ID] = "wred profile id", + [RTE_TM_ERROR_TYPE_SHARED_WRED_CONTEXT_ID] + = "shared wred context id", + [RTE_TM_ERROR_TYPE_SHAPER_PROFILE] = "shaper profile null", + [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE] + = "committed rate field (shaper profile)", + [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE] + = "committed size field (shaper profile)", + [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_RATE] + = "peak rate field (shaper profile)", + [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE] + = "peak size field (shaper profile)", + [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN] + = "packet adjust length field (shaper profile)", + [RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID] = "shaper profile id", + [RTE_TM_ERROR_TYPE_SHARED_SHAPER_ID] = "shared shaper id", + [RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID] = "parent node id", + [RTE_TM_ERROR_TYPE_NODE_PRIORITY] = "node priority", + [RTE_TM_ERROR_TYPE_NODE_WEIGHT] = "node weight", + [RTE_TM_ERROR_TYPE_NODE_PARAMS] = "node parameter null", + [RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID] + = "shaper profile id field (node params)", + [RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_SHAPER_ID] + = "shared shaper id field (node params)", + [RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS] + = "num shared shapers field (node params)", + [RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE] + = "wfq weght mode field (node params)", + [RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES] + = "num strict priorities field (node params)", + [RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN] + = "congestion management mode field (node params)", + [RTE_TM_ERROR_TYPE_NODE_PARAMS_WRED_PROFILE_ID] = + "wred profile id field (node params)", + [RTE_TM_ERROR_TYPE_NODE_PARAMS_SHARED_WRED_CONTEXT_ID] + = "shared wred context id field (node params)", + [RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_WRED_CONTEXTS] + = "num shared wred contexts field (node params)", + [RTE_TM_ERROR_TYPE_NODE_PARAMS_STATS] + = "stats field (node params)", + [RTE_TM_ERROR_TYPE_NODE_ID] = "node id", + }; + + const char *errstr; + char buf[64]; + + if ((unsigned int)error->type >= RTE_DIM(errstrlist) || + !errstrlist[error->type]) + errstr = "unknown type"; + else + errstr = errstrlist[error->type]; + + if (error->cause) + snprintf(buf, sizeof(buf), "cause: %p, ", error->cause); + + printf("%s: %s%s (error %d)\n", errstr, error->cause ? buf : "", + error->message ? error->message : "(no stated reason)", + error->type); +} + +static int +read_uint64(uint64_t *value, const char *p) +{ + char *next; + uint64_t val; + + p = skip_white_spaces(p); + if (!isdigit(*p)) + return -EINVAL; + + val = strtoul(p, &next, 10); + if (p == next) + return -EINVAL; + + p = next; + switch (*p) { + case 'T': + val *= 1024ULL; + /* fall through */ + case 'G': + val *= 1024ULL; + /* fall through */ + case 'M': + val *= 1024ULL; + /* fall through */ + case 'k': + case 'K': + val *= 1024ULL; + p++; + break; + } + + p = skip_white_spaces(p); + if (*p != '\0') + return -EINVAL; + + *value = val; + return 0; +} + +static int +read_uint32(uint32_t *value, const char *p) +{ + uint64_t val = 0; + int ret = read_uint64(&val, p); + + if (ret < 0) + return ret; + + if (val > UINT32_MAX) + return -ERANGE; + + *value = val; + return 0; +} + +static int +parse_multi_ss_id_str(char *s_str, uint32_t *n_ssp, uint32_t shaper_id[]) +{ + uint32_t n_shared_shapers = 0, i = 0; + char *token; + + /* First token: num of shared shapers */ + token = strtok_r(s_str, PARSE_DELIMITER, &s_str); + if (token == NULL) + return -1; + + if (read_uint32(&n_shared_shapers, token)) + return -1; + + /* Check: num of shared shaper */ + if (n_shared_shapers >= MAX_NUM_SHARED_SHAPERS) { + printf(" Number of shared shapers exceed the max (error)\n"); + return -1; + } + + /* Parse shared shaper ids */ + while (1) { + token = strtok_r(s_str, PARSE_DELIMITER, &s_str); + if ((token != NULL && n_shared_shapers == 0) || + (token == NULL && i < n_shared_shapers)) + return -1; + + if (token == NULL) + break; + + if (read_uint32(&shaper_id[i], token)) + return -1; + i++; + } + *n_ssp = n_shared_shapers; + + return 0; +} +/* *** Port TM Capability *** */ +struct cmd_show_port_tm_cap_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t cap; + uint16_t port_id; +}; + +cmdline_parse_token_string_t cmd_show_port_tm_cap_show = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_cap_result, + show, "show"); +cmdline_parse_token_string_t cmd_show_port_tm_cap_port = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_cap_result, + port, "port"); +cmdline_parse_token_string_t cmd_show_port_tm_cap_tm = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_cap_result, + tm, "tm"); +cmdline_parse_token_string_t cmd_show_port_tm_cap_cap = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_cap_result, + cap, "cap"); +cmdline_parse_token_num_t cmd_show_port_tm_cap_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_cap_result, + port_id, UINT16); + +static void cmd_show_port_tm_cap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_port_tm_cap_result *res = parsed_result; + struct rte_tm_capabilities cap; + struct rte_tm_error error; + portid_t port_id = res->port_id; + uint32_t i; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&cap, 0, sizeof(struct rte_tm_capabilities)); + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_capabilities_get(port_id, &cap, &error); + if (ret) { + print_err_msg(&error); + return; + } + + printf("\n**** Port TM Capabilities ****\n\n"); + printf("cap.n_nodes_max %" PRIu32 "\n", cap.n_nodes_max); + printf("cap.n_levels_max %" PRIu32 "\n", cap.n_levels_max); + printf("cap.non_leaf_nodes_identical %" PRId32 "\n", + cap.non_leaf_nodes_identical); + printf("cap.leaf_nodes_identical %" PRId32 "\n", + cap.leaf_nodes_identical); + printf("cap.shaper_n_max %u\n", cap.shaper_n_max); + printf("cap.shaper_private_n_max %" PRIu32 "\n", + cap.shaper_private_n_max); + printf("cap.shaper_private_dual_rate_n_max %" PRId32 "\n", + cap.shaper_private_dual_rate_n_max); + printf("cap.shaper_private_rate_min %" PRIu64 "\n", + cap.shaper_private_rate_min); + printf("cap.shaper_private_rate_max %" PRIu64 "\n", + cap.shaper_private_rate_max); + printf("cap.shaper_shared_n_max %" PRIu32 "\n", + cap.shaper_shared_n_max); + printf("cap.shaper_shared_n_nodes_per_shaper_max %" PRIu32 "\n", + cap.shaper_shared_n_nodes_per_shaper_max); + printf("cap.shaper_shared_n_shapers_per_node_max %" PRIu32 "\n", + cap.shaper_shared_n_shapers_per_node_max); + printf("cap.shaper_shared_dual_rate_n_max %" PRIu32 "\n", + cap.shaper_shared_dual_rate_n_max); + printf("cap.shaper_shared_rate_min %" PRIu64 "\n", + cap.shaper_shared_rate_min); + printf("cap.shaper_shared_rate_max %" PRIu64 "\n", + cap.shaper_shared_rate_max); + printf("cap.shaper_pkt_length_adjust_min %" PRId32 "\n", + cap.shaper_pkt_length_adjust_min); + printf("cap.shaper_pkt_length_adjust_max %" PRId32 "\n", + cap.shaper_pkt_length_adjust_max); + printf("cap.sched_n_children_max %" PRIu32 "\n", + cap.sched_n_children_max); + printf("cap.sched_sp_n_priorities_max %" PRIu32 "\n", + cap.sched_sp_n_priorities_max); + printf("cap.sched_wfq_n_children_per_group_max %" PRIu32 "\n", + cap.sched_wfq_n_children_per_group_max); + printf("cap.sched_wfq_n_groups_max %" PRIu32 "\n", + cap.sched_wfq_n_groups_max); + printf("cap.sched_wfq_weight_max %" PRIu32 "\n", + cap.sched_wfq_weight_max); + printf("cap.cman_head_drop_supported %" PRId32 "\n", + cap.cman_head_drop_supported); + printf("cap.cman_wred_context_n_max %" PRIu32 "\n", + cap.cman_wred_context_n_max); + printf("cap.cman_wred_context_private_n_max %" PRIu32 "\n", + cap.cman_wred_context_private_n_max); + printf("cap.cman_wred_context_shared_n_max %" PRIu32 "\n", + cap.cman_wred_context_shared_n_max); + printf("cap.cman_wred_context_shared_n_nodes_per_context_max %" PRIu32 + "\n", cap.cman_wred_context_shared_n_nodes_per_context_max); + printf("cap.cman_wred_context_shared_n_contexts_per_node_max %" PRIu32 + "\n", cap.cman_wred_context_shared_n_contexts_per_node_max); + + for (i = 0; i < RTE_COLORS; i++) { + printf("cap.mark_vlan_dei_supported %" PRId32 "\n", + cap.mark_vlan_dei_supported[i]); + printf("cap.mark_ip_ecn_tcp_supported %" PRId32 "\n", + cap.mark_ip_ecn_tcp_supported[i]); + printf("cap.mark_ip_ecn_sctp_supported %" PRId32 "\n", + cap.mark_ip_ecn_sctp_supported[i]); + printf("cap.mark_ip_dscp_supported %" PRId32 "\n", + cap.mark_ip_dscp_supported[i]); + } + + printf("cap.dynamic_update_mask %" PRIx64 "\n", + cap.dynamic_update_mask); + printf("cap.stats_mask %" PRIx64 "\n", cap.stats_mask); +} + +cmdline_parse_inst_t cmd_show_port_tm_cap = { + .f = cmd_show_port_tm_cap_parsed, + .data = NULL, + .help_str = "Show Port TM Capabilities", + .tokens = { + (void *)&cmd_show_port_tm_cap_show, + (void *)&cmd_show_port_tm_cap_port, + (void *)&cmd_show_port_tm_cap_tm, + (void *)&cmd_show_port_tm_cap_cap, + (void *)&cmd_show_port_tm_cap_port_id, + NULL, + }, +}; + +/* *** Port TM Hierarchical Level Capability *** */ +struct cmd_show_port_tm_level_cap_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t level; + cmdline_fixed_string_t cap; + uint16_t port_id; + uint32_t level_id; +}; + +cmdline_parse_token_string_t cmd_show_port_tm_level_cap_show = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_level_cap_result, + show, "show"); +cmdline_parse_token_string_t cmd_show_port_tm_level_cap_port = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_level_cap_result, + port, "port"); +cmdline_parse_token_string_t cmd_show_port_tm_level_cap_tm = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_level_cap_result, + tm, "tm"); +cmdline_parse_token_string_t cmd_show_port_tm_level_cap_level = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_level_cap_result, + level, "level"); +cmdline_parse_token_string_t cmd_show_port_tm_level_cap_cap = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_level_cap_result, + cap, "cap"); +cmdline_parse_token_num_t cmd_show_port_tm_level_cap_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_level_cap_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_show_port_tm_level_cap_level_id = + TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_level_cap_result, + level_id, UINT32); + + +static void cmd_show_port_tm_level_cap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_port_tm_level_cap_result *res = parsed_result; + struct rte_tm_level_capabilities lcap; + struct rte_tm_error error; + portid_t port_id = res->port_id; + uint32_t level_id = res->level_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&lcap, 0, sizeof(struct rte_tm_level_capabilities)); + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_level_capabilities_get(port_id, level_id, &lcap, &error); + if (ret) { + print_err_msg(&error); + return; + } + printf("\n** Port TM Hierarchy level %" PRIu32 " Capability **\n\n", + level_id); + + printf("cap.n_nodes_max %" PRIu32 "\n", lcap.n_nodes_max); + printf("cap.n_nodes_nonleaf_max %" PRIu32 "\n", + lcap.n_nodes_nonleaf_max); + printf("cap.n_nodes_leaf_max %" PRIu32 "\n", lcap.n_nodes_leaf_max); + printf("cap.non_leaf_nodes_identical %" PRId32 "\n", + lcap.non_leaf_nodes_identical); + printf("cap.leaf_nodes_identical %" PRId32 "\n", + lcap.leaf_nodes_identical); + if (level_id <= 3) { + printf("cap.nonleaf.shaper_private_supported %" PRId32 "\n", + lcap.nonleaf.shaper_private_supported); + printf("cap.nonleaf.shaper_private_dual_rate_supported %" PRId32 + "\n", lcap.nonleaf.shaper_private_dual_rate_supported); + printf("cap.nonleaf.shaper_private_rate_min %" PRIu64 "\n", + lcap.nonleaf.shaper_private_rate_min); + printf("cap.nonleaf.shaper_private_rate_max %" PRIu64 "\n", + lcap.nonleaf.shaper_private_rate_max); + printf("cap.nonleaf.shaper_shared_n_max %" PRIu32 "\n", + lcap.nonleaf.shaper_shared_n_max); + printf("cap.nonleaf.sched_n_children_max %" PRIu32 "\n", + lcap.nonleaf.sched_n_children_max); + printf("cap.nonleaf.sched_sp_n_priorities_max %" PRIu32 "\n", + lcap.nonleaf.sched_sp_n_priorities_max); + printf("cap.nonleaf.sched_wfq_n_children_per_group_max %" PRIu32 + "\n", lcap.nonleaf.sched_wfq_n_children_per_group_max); + printf("cap.nonleaf.sched_wfq_n_groups_max %" PRIu32 "\n", + lcap.nonleaf.sched_wfq_n_groups_max); + printf("cap.nonleaf.sched_wfq_weight_max %" PRIu32 "\n", + lcap.nonleaf.sched_wfq_weight_max); + printf("cap.nonleaf.stats_mask %" PRIx64 "\n", + lcap.nonleaf.stats_mask); + } else { + printf("cap.leaf.shaper_private_supported %" PRId32 "\n", + lcap.leaf.shaper_private_supported); + printf("cap.leaf.shaper_private_dual_rate_supported %" PRId32 + "\n", lcap.leaf.shaper_private_dual_rate_supported); + printf("cap.leaf.shaper_private_rate_min %" PRIu64 "\n", + lcap.leaf.shaper_private_rate_min); + printf("cap.leaf.shaper_private_rate_max %" PRIu64 "\n", + lcap.leaf.shaper_private_rate_max); + printf("cap.leaf.shaper_shared_n_max %" PRIu32 "\n", + lcap.leaf.shaper_shared_n_max); + printf("cap.leaf.cman_head_drop_supported %" PRId32 "\n", + lcap.leaf.cman_head_drop_supported); + printf("cap.leaf.cman_wred_context_private_supported %" PRId32 + "\n", lcap.leaf.cman_wred_context_private_supported); + printf("cap.leaf.cman_wred_context_shared_n_max %" PRIu32 "\n", + lcap.leaf.cman_wred_context_shared_n_max); + printf("cap.leaf.stats_mask %" PRIx64 "\n", + lcap.leaf.stats_mask); + } +} + +cmdline_parse_inst_t cmd_show_port_tm_level_cap = { + .f = cmd_show_port_tm_level_cap_parsed, + .data = NULL, + .help_str = "Show Port TM Hierarhical level Capabilities", + .tokens = { + (void *)&cmd_show_port_tm_level_cap_show, + (void *)&cmd_show_port_tm_level_cap_port, + (void *)&cmd_show_port_tm_level_cap_tm, + (void *)&cmd_show_port_tm_level_cap_level, + (void *)&cmd_show_port_tm_level_cap_cap, + (void *)&cmd_show_port_tm_level_cap_port_id, + (void *)&cmd_show_port_tm_level_cap_level_id, + NULL, + }, +}; + +/* *** Port TM Hierarchy Node Capability *** */ +struct cmd_show_port_tm_node_cap_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + cmdline_fixed_string_t cap; + uint16_t port_id; + uint32_t node_id; +}; + +cmdline_parse_token_string_t cmd_show_port_tm_node_cap_show = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_node_cap_result, + show, "show"); +cmdline_parse_token_string_t cmd_show_port_tm_node_cap_port = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_node_cap_result, + port, "port"); +cmdline_parse_token_string_t cmd_show_port_tm_node_cap_tm = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_node_cap_result, + tm, "tm"); +cmdline_parse_token_string_t cmd_show_port_tm_node_cap_node = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_node_cap_result, + node, "node"); +cmdline_parse_token_string_t cmd_show_port_tm_node_cap_cap = + TOKEN_STRING_INITIALIZER(struct cmd_show_port_tm_node_cap_result, + cap, "cap"); +cmdline_parse_token_num_t cmd_show_port_tm_node_cap_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_node_cap_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_show_port_tm_node_cap_node_id = + TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_node_cap_result, + node_id, UINT32); + +static void cmd_show_port_tm_node_cap_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_port_tm_node_cap_result *res = parsed_result; + struct rte_tm_node_capabilities ncap; + struct rte_tm_error error; + uint32_t node_id = res->node_id; + portid_t port_id = res->port_id; + int ret, is_leaf = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + /* Node id must be valid */ + ret = rte_tm_node_type_get(port_id, node_id, &is_leaf, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } + + memset(&ncap, 0, sizeof(struct rte_tm_node_capabilities)); + ret = rte_tm_node_capabilities_get(port_id, node_id, &ncap, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } + printf("\n** Port TM Hierarchy node %" PRIu32 " Capability **\n\n", + node_id); + printf("cap.shaper_private_supported %" PRId32 "\n", + ncap.shaper_private_supported); + printf("cap.shaper_private_dual_rate_supported %" PRId32 "\n", + ncap.shaper_private_dual_rate_supported); + printf("cap.shaper_private_rate_min %" PRIu64 "\n", + ncap.shaper_private_rate_min); + printf("cap.shaper_private_rate_max %" PRIu64 "\n", + ncap.shaper_private_rate_max); + printf("cap.shaper_shared_n_max %" PRIu32 "\n", + ncap.shaper_shared_n_max); + if (!is_leaf) { + printf("cap.nonleaf.sched_n_children_max %" PRIu32 "\n", + ncap.nonleaf.sched_n_children_max); + printf("cap.nonleaf.sched_sp_n_priorities_max %" PRIu32 "\n", + ncap.nonleaf.sched_sp_n_priorities_max); + printf("cap.nonleaf.sched_wfq_n_children_per_group_max %" PRIu32 + "\n", ncap.nonleaf.sched_wfq_n_children_per_group_max); + printf("cap.nonleaf.sched_wfq_n_groups_max %" PRIu32 "\n", + ncap.nonleaf.sched_wfq_n_groups_max); + printf("cap.nonleaf.sched_wfq_weight_max %" PRIu32 "\n", + ncap.nonleaf.sched_wfq_weight_max); + } else { + printf("cap.leaf.cman_head_drop_supported %" PRId32 "\n", + ncap.leaf.cman_head_drop_supported); + printf("cap.leaf.cman_wred_context_private_supported %" PRId32 + "\n", ncap.leaf.cman_wred_context_private_supported); + printf("cap.leaf.cman_wred_context_shared_n_max %" PRIu32 "\n", + ncap.leaf.cman_wred_context_shared_n_max); + } + printf("cap.stats_mask %" PRIx64 "\n", ncap.stats_mask); +} + +cmdline_parse_inst_t cmd_show_port_tm_node_cap = { + .f = cmd_show_port_tm_node_cap_parsed, + .data = NULL, + .help_str = "Show Port TM Hierarchy node capabilities", + .tokens = { + (void *)&cmd_show_port_tm_node_cap_show, + (void *)&cmd_show_port_tm_node_cap_port, + (void *)&cmd_show_port_tm_node_cap_tm, + (void *)&cmd_show_port_tm_node_cap_node, + (void *)&cmd_show_port_tm_node_cap_cap, + (void *)&cmd_show_port_tm_node_cap_port_id, + (void *)&cmd_show_port_tm_node_cap_node_id, + NULL, + }, +}; + +/* *** Show Port TM Node Statistics *** */ +struct cmd_show_port_tm_node_stats_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + cmdline_fixed_string_t stats; + uint16_t port_id; + uint32_t node_id; + uint32_t clear; +}; + +cmdline_parse_token_string_t cmd_show_port_tm_node_stats_show = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_tm_node_stats_result, show, "show"); +cmdline_parse_token_string_t cmd_show_port_tm_node_stats_port = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_tm_node_stats_result, port, "port"); +cmdline_parse_token_string_t cmd_show_port_tm_node_stats_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_tm_node_stats_result, tm, "tm"); +cmdline_parse_token_string_t cmd_show_port_tm_node_stats_node = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_tm_node_stats_result, node, "node"); +cmdline_parse_token_string_t cmd_show_port_tm_node_stats_stats = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_tm_node_stats_result, stats, "stats"); +cmdline_parse_token_num_t cmd_show_port_tm_node_stats_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_show_port_tm_node_stats_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_show_port_tm_node_stats_node_id = + TOKEN_NUM_INITIALIZER( + struct cmd_show_port_tm_node_stats_result, + node_id, UINT32); +cmdline_parse_token_num_t cmd_show_port_tm_node_stats_clear = + TOKEN_NUM_INITIALIZER( + struct cmd_show_port_tm_node_stats_result, clear, UINT32); + +static void cmd_show_port_tm_node_stats_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_port_tm_node_stats_result *res = parsed_result; + struct rte_tm_node_stats stats; + struct rte_tm_error error; + uint64_t stats_mask = 0; + uint32_t node_id = res->node_id; + uint32_t clear = res->clear; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + /* Port status */ + if (!port_is_started(port_id)) { + printf(" Port %u not started (error)\n", port_id); + return; + } + + memset(&stats, 0, sizeof(struct rte_tm_node_stats)); + ret = rte_tm_node_stats_read(port_id, node_id, &stats, + &stats_mask, clear, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } + + /* Display stats */ + if (stats_mask & RTE_TM_STATS_N_PKTS) + printf("\tPkts scheduled from node: %" PRIu64 "\n", + stats.n_pkts); + if (stats_mask & RTE_TM_STATS_N_BYTES) + printf("\tBytes scheduled from node: %" PRIu64 "\n", + stats.n_bytes); + if (stats_mask & RTE_TM_STATS_N_PKTS_GREEN_DROPPED) + printf("\tPkts dropped (green): %" PRIu64 "\n", + stats.leaf.n_pkts_dropped[RTE_COLOR_GREEN]); + if (stats_mask & RTE_TM_STATS_N_PKTS_YELLOW_DROPPED) + printf("\tPkts dropped (yellow): %" PRIu64 "\n", + stats.leaf.n_pkts_dropped[RTE_COLOR_YELLOW]); + if (stats_mask & RTE_TM_STATS_N_PKTS_RED_DROPPED) + printf("\tPkts dropped (red): %" PRIu64 "\n", + stats.leaf.n_pkts_dropped[RTE_COLOR_RED]); + if (stats_mask & RTE_TM_STATS_N_BYTES_GREEN_DROPPED) + printf("\tBytes dropped (green): %" PRIu64 "\n", + stats.leaf.n_bytes_dropped[RTE_COLOR_GREEN]); + if (stats_mask & RTE_TM_STATS_N_BYTES_YELLOW_DROPPED) + printf("\tBytes dropped (yellow): %" PRIu64 "\n", + stats.leaf.n_bytes_dropped[RTE_COLOR_YELLOW]); + if (stats_mask & RTE_TM_STATS_N_BYTES_RED_DROPPED) + printf("\tBytes dropped (red): %" PRIu64 "\n", + stats.leaf.n_bytes_dropped[RTE_COLOR_RED]); + if (stats_mask & RTE_TM_STATS_N_PKTS_QUEUED) + printf("\tPkts queued: %" PRIu64 "\n", + stats.leaf.n_pkts_queued); + if (stats_mask & RTE_TM_STATS_N_BYTES_QUEUED) + printf("\tBytes queued: %" PRIu64 "\n", + stats.leaf.n_bytes_queued); +} + +cmdline_parse_inst_t cmd_show_port_tm_node_stats = { + .f = cmd_show_port_tm_node_stats_parsed, + .data = NULL, + .help_str = "Show port tm node stats", + .tokens = { + (void *)&cmd_show_port_tm_node_stats_show, + (void *)&cmd_show_port_tm_node_stats_port, + (void *)&cmd_show_port_tm_node_stats_tm, + (void *)&cmd_show_port_tm_node_stats_node, + (void *)&cmd_show_port_tm_node_stats_stats, + (void *)&cmd_show_port_tm_node_stats_port_id, + (void *)&cmd_show_port_tm_node_stats_node_id, + (void *)&cmd_show_port_tm_node_stats_clear, + NULL, + }, +}; + +/* *** Show Port TM Node Type *** */ +struct cmd_show_port_tm_node_type_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + cmdline_fixed_string_t type; + uint16_t port_id; + uint32_t node_id; +}; + +cmdline_parse_token_string_t cmd_show_port_tm_node_type_show = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_tm_node_type_result, show, "show"); +cmdline_parse_token_string_t cmd_show_port_tm_node_type_port = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_tm_node_type_result, port, "port"); +cmdline_parse_token_string_t cmd_show_port_tm_node_type_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_tm_node_type_result, tm, "tm"); +cmdline_parse_token_string_t cmd_show_port_tm_node_type_node = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_tm_node_type_result, node, "node"); +cmdline_parse_token_string_t cmd_show_port_tm_node_type_type = + TOKEN_STRING_INITIALIZER( + struct cmd_show_port_tm_node_type_result, type, "type"); +cmdline_parse_token_num_t cmd_show_port_tm_node_type_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_show_port_tm_node_type_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_show_port_tm_node_type_node_id = + TOKEN_NUM_INITIALIZER( + struct cmd_show_port_tm_node_type_result, + node_id, UINT32); + +static void cmd_show_port_tm_node_type_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_show_port_tm_node_type_result *res = parsed_result; + struct rte_tm_error error; + uint32_t node_id = res->node_id; + portid_t port_id = res->port_id; + int ret, is_leaf = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_node_type_get(port_id, node_id, &is_leaf, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } + + if (is_leaf == 1) + printf("leaf node\n"); + else + printf("nonleaf node\n"); + +} + +cmdline_parse_inst_t cmd_show_port_tm_node_type = { + .f = cmd_show_port_tm_node_type_parsed, + .data = NULL, + .help_str = "Show port tm node type", + .tokens = { + (void *)&cmd_show_port_tm_node_type_show, + (void *)&cmd_show_port_tm_node_type_port, + (void *)&cmd_show_port_tm_node_type_tm, + (void *)&cmd_show_port_tm_node_type_node, + (void *)&cmd_show_port_tm_node_type_type, + (void *)&cmd_show_port_tm_node_type_port_id, + (void *)&cmd_show_port_tm_node_type_node_id, + NULL, + }, +}; + +/* *** Add Port TM Private Shaper Profile *** */ +struct cmd_add_port_tm_node_shaper_profile_result { + cmdline_fixed_string_t add; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + cmdline_fixed_string_t shaper; + cmdline_fixed_string_t profile; + uint16_t port_id; + uint32_t shaper_id; + uint64_t cmit_tb_rate; + uint64_t cmit_tb_size; + uint64_t peak_tb_rate; + uint64_t peak_tb_size; + uint32_t pktlen_adjust; +}; + +cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_add = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, add, "add"); +cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_port = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + port, "port"); +cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + tm, "tm"); +cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_node = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + node, "node"); +cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_shaper = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + shaper, "shaper"); +cmdline_parse_token_string_t cmd_add_port_tm_node_shaper_profile_profile = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + profile, "profile"); +cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_shaper_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + shaper_id, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_cmit_tb_rate = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + cmit_tb_rate, UINT64); +cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_cmit_tb_size = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + cmit_tb_size, UINT64); +cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_peak_tb_rate = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + peak_tb_rate, UINT64); +cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_peak_tb_size = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + peak_tb_size, UINT64); +cmdline_parse_token_num_t cmd_add_port_tm_node_shaper_profile_pktlen_adjust = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_shaper_profile_result, + pktlen_adjust, UINT32); + +static void cmd_add_port_tm_node_shaper_profile_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_add_port_tm_node_shaper_profile_result *res = parsed_result; + struct rte_tm_shaper_params sp; + struct rte_tm_error error; + uint32_t shaper_id = res->shaper_id; + uint32_t pkt_len_adjust = res->pktlen_adjust; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + /* Private shaper profile params */ + memset(&sp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&error, 0, sizeof(struct rte_tm_error)); + sp.committed.rate = res->cmit_tb_rate; + sp.committed.size = res->cmit_tb_size; + sp.peak.rate = res->peak_tb_rate; + sp.peak.size = res->peak_tb_size; + sp.pkt_length_adjust = pkt_len_adjust; + + ret = rte_tm_shaper_profile_add(port_id, shaper_id, &sp, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_add_port_tm_node_shaper_profile = { + .f = cmd_add_port_tm_node_shaper_profile_parsed, + .data = NULL, + .help_str = "Add port tm node private shaper profile", + .tokens = { + (void *)&cmd_add_port_tm_node_shaper_profile_add, + (void *)&cmd_add_port_tm_node_shaper_profile_port, + (void *)&cmd_add_port_tm_node_shaper_profile_tm, + (void *)&cmd_add_port_tm_node_shaper_profile_node, + (void *)&cmd_add_port_tm_node_shaper_profile_shaper, + (void *)&cmd_add_port_tm_node_shaper_profile_profile, + (void *)&cmd_add_port_tm_node_shaper_profile_port_id, + (void *)&cmd_add_port_tm_node_shaper_profile_shaper_id, + (void *)&cmd_add_port_tm_node_shaper_profile_cmit_tb_rate, + (void *)&cmd_add_port_tm_node_shaper_profile_cmit_tb_size, + (void *)&cmd_add_port_tm_node_shaper_profile_peak_tb_rate, + (void *)&cmd_add_port_tm_node_shaper_profile_peak_tb_size, + (void *)&cmd_add_port_tm_node_shaper_profile_pktlen_adjust, + NULL, + }, +}; + +/* *** Delete Port TM Private Shaper Profile *** */ +struct cmd_del_port_tm_node_shaper_profile_result { + cmdline_fixed_string_t del; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + cmdline_fixed_string_t shaper; + cmdline_fixed_string_t profile; + uint16_t port_id; + uint32_t shaper_id; +}; + +cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_del = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shaper_profile_result, del, "del"); +cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_port = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shaper_profile_result, + port, "port"); +cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shaper_profile_result, tm, "tm"); +cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_node = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shaper_profile_result, + node, "node"); +cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_shaper = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shaper_profile_result, + shaper, "shaper"); +cmdline_parse_token_string_t cmd_del_port_tm_node_shaper_profile_profile = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shaper_profile_result, + profile, "profile"); +cmdline_parse_token_num_t cmd_del_port_tm_node_shaper_profile_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_del_port_tm_node_shaper_profile_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_del_port_tm_node_shaper_profile_shaper_id = + TOKEN_NUM_INITIALIZER( + struct cmd_del_port_tm_node_shaper_profile_result, + shaper_id, UINT32); + +static void cmd_del_port_tm_node_shaper_profile_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_del_port_tm_node_shaper_profile_result *res = parsed_result; + struct rte_tm_error error; + uint32_t shaper_id = res->shaper_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_shaper_profile_delete(port_id, shaper_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_del_port_tm_node_shaper_profile = { + .f = cmd_del_port_tm_node_shaper_profile_parsed, + .data = NULL, + .help_str = "Delete port tm node private shaper profile", + .tokens = { + (void *)&cmd_del_port_tm_node_shaper_profile_del, + (void *)&cmd_del_port_tm_node_shaper_profile_port, + (void *)&cmd_del_port_tm_node_shaper_profile_tm, + (void *)&cmd_del_port_tm_node_shaper_profile_node, + (void *)&cmd_del_port_tm_node_shaper_profile_shaper, + (void *)&cmd_del_port_tm_node_shaper_profile_profile, + (void *)&cmd_del_port_tm_node_shaper_profile_port_id, + (void *)&cmd_del_port_tm_node_shaper_profile_shaper_id, + NULL, + }, +}; + +/* *** Add/Update Port TM shared Shaper *** */ +struct cmd_add_port_tm_node_shared_shaper_result { + cmdline_fixed_string_t cmd_type; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + cmdline_fixed_string_t shared; + cmdline_fixed_string_t shaper; + uint16_t port_id; + uint32_t shared_shaper_id; + uint32_t shaper_profile_id; +}; + +cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_cmd_type = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shared_shaper_result, + cmd_type, "add#set"); +cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_port = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shared_shaper_result, port, "port"); +cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shared_shaper_result, tm, "tm"); +cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_node = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shared_shaper_result, node, "node"); +cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_shared = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shared_shaper_result, + shared, "shared"); +cmdline_parse_token_string_t cmd_add_port_tm_node_shared_shaper_shaper = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_shared_shaper_result, + shaper, "shaper"); +cmdline_parse_token_num_t cmd_add_port_tm_node_shared_shaper_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_shared_shaper_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_add_port_tm_node_shared_shaper_shared_shaper_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_shared_shaper_result, + shared_shaper_id, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_node_shared_shaper_shaper_profile_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_shared_shaper_result, + shaper_profile_id, UINT32); + +static void cmd_add_port_tm_node_shared_shaper_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_add_port_tm_node_shared_shaper_result *res = parsed_result; + struct rte_tm_error error; + uint32_t shared_shaper_id = res->shared_shaper_id; + uint32_t shaper_profile_id = res->shaper_profile_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + /* Command type: add */ + if ((strcmp(res->cmd_type, "add") == 0) && + (port_is_started(port_id))) { + printf(" Port %u not stopped (error)\n", port_id); + return; + } + + /* Command type: set (update) */ + if ((strcmp(res->cmd_type, "set") == 0) && + (!port_is_started(port_id))) { + printf(" Port %u not started (error)\n", port_id); + return; + } + + ret = rte_tm_shared_shaper_add_update(port_id, shared_shaper_id, + shaper_profile_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_add_port_tm_node_shared_shaper = { + .f = cmd_add_port_tm_node_shared_shaper_parsed, + .data = NULL, + .help_str = "add/update port tm node shared shaper", + .tokens = { + (void *)&cmd_add_port_tm_node_shared_shaper_cmd_type, + (void *)&cmd_add_port_tm_node_shared_shaper_port, + (void *)&cmd_add_port_tm_node_shared_shaper_tm, + (void *)&cmd_add_port_tm_node_shared_shaper_node, + (void *)&cmd_add_port_tm_node_shared_shaper_shared, + (void *)&cmd_add_port_tm_node_shared_shaper_shaper, + (void *)&cmd_add_port_tm_node_shared_shaper_port_id, + (void *)&cmd_add_port_tm_node_shared_shaper_shared_shaper_id, + (void *)&cmd_add_port_tm_node_shared_shaper_shaper_profile_id, + NULL, + }, +}; + +/* *** Delete Port TM shared Shaper *** */ +struct cmd_del_port_tm_node_shared_shaper_result { + cmdline_fixed_string_t del; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + cmdline_fixed_string_t shared; + cmdline_fixed_string_t shaper; + uint16_t port_id; + uint32_t shared_shaper_id; +}; + +cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_del = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shared_shaper_result, del, "del"); +cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_port = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shared_shaper_result, port, "port"); +cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shared_shaper_result, tm, "tm"); +cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_node = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shared_shaper_result, node, "node"); +cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_shared = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shared_shaper_result, + shared, "shared"); +cmdline_parse_token_string_t cmd_del_port_tm_node_shared_shaper_shaper = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_shared_shaper_result, + shaper, "shaper"); +cmdline_parse_token_num_t cmd_del_port_tm_node_shared_shaper_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_del_port_tm_node_shared_shaper_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_del_port_tm_node_shared_shaper_shared_shaper_id = + TOKEN_NUM_INITIALIZER( + struct cmd_del_port_tm_node_shared_shaper_result, + shared_shaper_id, UINT32); + +static void cmd_del_port_tm_node_shared_shaper_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_del_port_tm_node_shared_shaper_result *res = parsed_result; + struct rte_tm_error error; + uint32_t shared_shaper_id = res->shared_shaper_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_shared_shaper_delete(port_id, shared_shaper_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_del_port_tm_node_shared_shaper = { + .f = cmd_del_port_tm_node_shared_shaper_parsed, + .data = NULL, + .help_str = "delete port tm node shared shaper", + .tokens = { + (void *)&cmd_del_port_tm_node_shared_shaper_del, + (void *)&cmd_del_port_tm_node_shared_shaper_port, + (void *)&cmd_del_port_tm_node_shared_shaper_tm, + (void *)&cmd_del_port_tm_node_shared_shaper_node, + (void *)&cmd_del_port_tm_node_shared_shaper_shared, + (void *)&cmd_del_port_tm_node_shared_shaper_shaper, + (void *)&cmd_del_port_tm_node_shared_shaper_port_id, + (void *)&cmd_del_port_tm_node_shared_shaper_shared_shaper_id, + NULL, + }, +}; + +/* *** Add Port TM Node WRED Profile *** */ +struct cmd_add_port_tm_node_wred_profile_result { + cmdline_fixed_string_t add; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + cmdline_fixed_string_t wred; + cmdline_fixed_string_t profile; + uint16_t port_id; + uint32_t wred_profile_id; + cmdline_fixed_string_t color_g; + uint64_t min_th_g; + uint64_t max_th_g; + uint16_t maxp_inv_g; + uint16_t wq_log2_g; + cmdline_fixed_string_t color_y; + uint64_t min_th_y; + uint64_t max_th_y; + uint16_t maxp_inv_y; + uint16_t wq_log2_y; + cmdline_fixed_string_t color_r; + uint64_t min_th_r; + uint64_t max_th_r; + uint16_t maxp_inv_r; + uint16_t wq_log2_r; +}; + +cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_add = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, add, "add"); +cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_port = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, port, "port"); +cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, tm, "tm"); +cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_node = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, node, "node"); +cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_wred = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, wred, "wred"); +cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_profile = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + profile, "profile"); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_wred_profile_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + wred_profile_id, UINT32); +cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_color_g = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + color_g, "G#g"); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_min_th_g = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + min_th_g, UINT64); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_max_th_g = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + max_th_g, UINT64); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_maxp_inv_g = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + maxp_inv_g, UINT16); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_wq_log2_g = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + wq_log2_g, UINT16); +cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_color_y = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + color_y, "Y#y"); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_min_th_y = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + min_th_y, UINT64); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_max_th_y = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + max_th_y, UINT64); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_maxp_inv_y = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + maxp_inv_y, UINT16); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_wq_log2_y = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + wq_log2_y, UINT16); +cmdline_parse_token_string_t cmd_add_port_tm_node_wred_profile_color_r = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + color_r, "R#r"); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_min_th_r = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + min_th_r, UINT64); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_max_th_r = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + max_th_r, UINT64); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_maxp_inv_r = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + maxp_inv_r, UINT16); +cmdline_parse_token_num_t cmd_add_port_tm_node_wred_profile_wq_log2_r = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_node_wred_profile_result, + wq_log2_r, UINT16); + + +static void cmd_add_port_tm_node_wred_profile_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_add_port_tm_node_wred_profile_result *res = parsed_result; + struct rte_tm_wred_params wp; + enum rte_color color; + struct rte_tm_error error; + uint32_t wred_profile_id = res->wred_profile_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&wp, 0, sizeof(struct rte_tm_wred_params)); + memset(&error, 0, sizeof(struct rte_tm_error)); + + /* WRED Params (Green Color)*/ + color = RTE_COLOR_GREEN; + wp.red_params[color].min_th = res->min_th_g; + wp.red_params[color].max_th = res->max_th_g; + wp.red_params[color].maxp_inv = res->maxp_inv_g; + wp.red_params[color].wq_log2 = res->wq_log2_g; + + + /* WRED Params (Yellow Color)*/ + color = RTE_COLOR_YELLOW; + wp.red_params[color].min_th = res->min_th_y; + wp.red_params[color].max_th = res->max_th_y; + wp.red_params[color].maxp_inv = res->maxp_inv_y; + wp.red_params[color].wq_log2 = res->wq_log2_y; + + /* WRED Params (Red Color)*/ + color = RTE_COLOR_RED; + wp.red_params[color].min_th = res->min_th_r; + wp.red_params[color].max_th = res->max_th_r; + wp.red_params[color].maxp_inv = res->maxp_inv_r; + wp.red_params[color].wq_log2 = res->wq_log2_r; + + ret = rte_tm_wred_profile_add(port_id, wred_profile_id, &wp, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_add_port_tm_node_wred_profile = { + .f = cmd_add_port_tm_node_wred_profile_parsed, + .data = NULL, + .help_str = "Add port tm node wred profile", + .tokens = { + (void *)&cmd_add_port_tm_node_wred_profile_add, + (void *)&cmd_add_port_tm_node_wred_profile_port, + (void *)&cmd_add_port_tm_node_wred_profile_tm, + (void *)&cmd_add_port_tm_node_wred_profile_node, + (void *)&cmd_add_port_tm_node_wred_profile_wred, + (void *)&cmd_add_port_tm_node_wred_profile_profile, + (void *)&cmd_add_port_tm_node_wred_profile_port_id, + (void *)&cmd_add_port_tm_node_wred_profile_wred_profile_id, + (void *)&cmd_add_port_tm_node_wred_profile_color_g, + (void *)&cmd_add_port_tm_node_wred_profile_min_th_g, + (void *)&cmd_add_port_tm_node_wred_profile_max_th_g, + (void *)&cmd_add_port_tm_node_wred_profile_maxp_inv_g, + (void *)&cmd_add_port_tm_node_wred_profile_wq_log2_g, + (void *)&cmd_add_port_tm_node_wred_profile_color_y, + (void *)&cmd_add_port_tm_node_wred_profile_min_th_y, + (void *)&cmd_add_port_tm_node_wred_profile_max_th_y, + (void *)&cmd_add_port_tm_node_wred_profile_maxp_inv_y, + (void *)&cmd_add_port_tm_node_wred_profile_wq_log2_y, + (void *)&cmd_add_port_tm_node_wred_profile_color_r, + (void *)&cmd_add_port_tm_node_wred_profile_min_th_r, + (void *)&cmd_add_port_tm_node_wred_profile_max_th_r, + (void *)&cmd_add_port_tm_node_wred_profile_maxp_inv_r, + (void *)&cmd_add_port_tm_node_wred_profile_wq_log2_r, + NULL, + }, +}; + +/* *** Delete Port TM node WRED Profile *** */ +struct cmd_del_port_tm_node_wred_profile_result { + cmdline_fixed_string_t del; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + cmdline_fixed_string_t wred; + cmdline_fixed_string_t profile; + uint16_t port_id; + uint32_t wred_profile_id; +}; + +cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_del = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_wred_profile_result, del, "del"); +cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_port = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_wred_profile_result, port, "port"); +cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_wred_profile_result, tm, "tm"); +cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_node = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_wred_profile_result, node, "node"); +cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_wred = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_wred_profile_result, wred, "wred"); +cmdline_parse_token_string_t cmd_del_port_tm_node_wred_profile_profile = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_wred_profile_result, + profile, "profile"); +cmdline_parse_token_num_t cmd_del_port_tm_node_wred_profile_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_del_port_tm_node_wred_profile_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_del_port_tm_node_wred_profile_wred_profile_id = + TOKEN_NUM_INITIALIZER( + struct cmd_del_port_tm_node_wred_profile_result, + wred_profile_id, UINT32); + +static void cmd_del_port_tm_node_wred_profile_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_del_port_tm_node_wred_profile_result *res = parsed_result; + struct rte_tm_error error; + uint32_t wred_profile_id = res->wred_profile_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_wred_profile_delete(port_id, wred_profile_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_del_port_tm_node_wred_profile = { + .f = cmd_del_port_tm_node_wred_profile_parsed, + .data = NULL, + .help_str = "Delete port tm node wred profile", + .tokens = { + (void *)&cmd_del_port_tm_node_wred_profile_del, + (void *)&cmd_del_port_tm_node_wred_profile_port, + (void *)&cmd_del_port_tm_node_wred_profile_tm, + (void *)&cmd_del_port_tm_node_wred_profile_node, + (void *)&cmd_del_port_tm_node_wred_profile_wred, + (void *)&cmd_del_port_tm_node_wred_profile_profile, + (void *)&cmd_del_port_tm_node_wred_profile_port_id, + (void *)&cmd_del_port_tm_node_wred_profile_wred_profile_id, + NULL, + }, +}; + +/* *** Update Port TM Node Shaper profile *** */ +struct cmd_set_port_tm_node_shaper_profile_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + cmdline_fixed_string_t shaper; + cmdline_fixed_string_t profile; + uint16_t port_id; + uint32_t node_id; + uint32_t shaper_profile_id; +}; + +cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_set = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_node_shaper_profile_result, set, "set"); +cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_port = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_node_shaper_profile_result, + port, "port"); +cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_node_shaper_profile_result, tm, "tm"); +cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_node = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_node_shaper_profile_result, + node, "node"); +cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_shaper = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_node_shaper_profile_result, + shaper, "shaper"); +cmdline_parse_token_string_t cmd_set_port_tm_node_shaper_profile_profile = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_node_shaper_profile_result, + profile, "profile"); +cmdline_parse_token_num_t cmd_set_port_tm_node_shaper_profile_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_tm_node_shaper_profile_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_set_port_tm_node_shaper_profile_node_id = + TOKEN_NUM_INITIALIZER(struct cmd_set_port_tm_node_shaper_profile_result, + node_id, UINT32); +cmdline_parse_token_num_t + cmd_set_port_tm_node_shaper_shaper_profile_profile_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_tm_node_shaper_profile_result, + shaper_profile_id, UINT32); + +static void cmd_set_port_tm_node_shaper_profile_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_port_tm_node_shaper_profile_result *res = parsed_result; + struct rte_tm_error error; + uint32_t node_id = res->node_id; + uint32_t shaper_profile_id = res->shaper_profile_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + /* Port status */ + if (!port_is_started(port_id)) { + printf(" Port %u not started (error)\n", port_id); + return; + } + + ret = rte_tm_node_shaper_update(port_id, node_id, + shaper_profile_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_set_port_tm_node_shaper_profile = { + .f = cmd_set_port_tm_node_shaper_profile_parsed, + .data = NULL, + .help_str = "Set port tm node shaper profile", + .tokens = { + (void *)&cmd_set_port_tm_node_shaper_profile_set, + (void *)&cmd_set_port_tm_node_shaper_profile_port, + (void *)&cmd_set_port_tm_node_shaper_profile_tm, + (void *)&cmd_set_port_tm_node_shaper_profile_node, + (void *)&cmd_set_port_tm_node_shaper_profile_shaper, + (void *)&cmd_set_port_tm_node_shaper_profile_profile, + (void *)&cmd_set_port_tm_node_shaper_profile_port_id, + (void *)&cmd_set_port_tm_node_shaper_profile_node_id, + (void *)&cmd_set_port_tm_node_shaper_shaper_profile_profile_id, + NULL, + }, +}; + +/* *** Add Port TM nonleaf node *** */ +struct cmd_add_port_tm_nonleaf_node_result { + cmdline_fixed_string_t add; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t nonleaf; + cmdline_fixed_string_t node; + uint16_t port_id; + uint32_t node_id; + int32_t parent_node_id; + uint32_t priority; + uint32_t weight; + uint32_t level_id; + int32_t shaper_profile_id; + uint32_t n_sp_priorities; + uint64_t stats_mask; + cmdline_multi_string_t multi_shared_shaper_id; +}; + +cmdline_parse_token_string_t cmd_add_port_tm_nonleaf_node_add = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_nonleaf_node_result, add, "add"); +cmdline_parse_token_string_t cmd_add_port_tm_nonleaf_node_port = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_nonleaf_node_result, port, "port"); +cmdline_parse_token_string_t cmd_add_port_tm_nonleaf_node_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_nonleaf_node_result, tm, "tm"); +cmdline_parse_token_string_t cmd_add_port_tm_nonleaf_node_nonleaf = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_nonleaf_node_result, nonleaf, "nonleaf"); +cmdline_parse_token_string_t cmd_add_port_tm_nonleaf_node_node = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_nonleaf_node_result, node, "node"); +cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_add_port_tm_nonleaf_node_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_node_id = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result, + node_id, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_parent_node_id = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result, + parent_node_id, INT32); +cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_priority = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result, + priority, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_weight = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result, + weight, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_level_id = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result, + level_id, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_shaper_profile_id = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result, + shaper_profile_id, INT32); +cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_n_sp_priorities = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result, + n_sp_priorities, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_nonleaf_node_stats_mask = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result, + stats_mask, UINT64); +cmdline_parse_token_string_t + cmd_add_port_tm_nonleaf_node_multi_shared_shaper_id = + TOKEN_STRING_INITIALIZER(struct cmd_add_port_tm_nonleaf_node_result, + multi_shared_shaper_id, TOKEN_STRING_MULTI); + +static void cmd_add_port_tm_nonleaf_node_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_add_port_tm_nonleaf_node_result *res = parsed_result; + struct rte_tm_error error; + struct rte_tm_node_params np; + uint32_t *shared_shaper_id; + uint32_t parent_node_id, n_shared_shapers = 0; + char *s_str = res->multi_shared_shaper_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&np, 0, sizeof(struct rte_tm_node_params)); + memset(&error, 0, sizeof(struct rte_tm_error)); + + /* Node parameters */ + if (res->parent_node_id < 0) + parent_node_id = UINT32_MAX; + else + parent_node_id = res->parent_node_id; + + shared_shaper_id = (uint32_t *)malloc(MAX_NUM_SHARED_SHAPERS * + sizeof(uint32_t)); + if (shared_shaper_id == NULL) { + printf(" Memory not allocated for shared shapers (error)\n"); + return; + } + + /* Parse multi shared shaper id string */ + ret = parse_multi_ss_id_str(s_str, &n_shared_shapers, shared_shaper_id); + if (ret) { + printf(" Shared shapers params string parse error\n"); + free(shared_shaper_id); + return; + } + + if (res->shaper_profile_id < 0) + np.shaper_profile_id = UINT32_MAX; + else + np.shaper_profile_id = res->shaper_profile_id; + + np.n_shared_shapers = n_shared_shapers; + if (np.n_shared_shapers) { + np.shared_shaper_id = &shared_shaper_id[0]; + } else { + free(shared_shaper_id); + shared_shaper_id = NULL; + } + + np.nonleaf.n_sp_priorities = res->n_sp_priorities; + np.stats_mask = res->stats_mask; + np.nonleaf.wfq_weight_mode = NULL; + + ret = rte_tm_node_add(port_id, res->node_id, parent_node_id, + res->priority, res->weight, res->level_id, + &np, &error); + if (ret != 0) { + print_err_msg(&error); + free(shared_shaper_id); + return; + } +} + +cmdline_parse_inst_t cmd_add_port_tm_nonleaf_node = { + .f = cmd_add_port_tm_nonleaf_node_parsed, + .data = NULL, + .help_str = "Add port tm nonleaf node", + .tokens = { + (void *)&cmd_add_port_tm_nonleaf_node_add, + (void *)&cmd_add_port_tm_nonleaf_node_port, + (void *)&cmd_add_port_tm_nonleaf_node_tm, + (void *)&cmd_add_port_tm_nonleaf_node_nonleaf, + (void *)&cmd_add_port_tm_nonleaf_node_node, + (void *)&cmd_add_port_tm_nonleaf_node_port_id, + (void *)&cmd_add_port_tm_nonleaf_node_node_id, + (void *)&cmd_add_port_tm_nonleaf_node_parent_node_id, + (void *)&cmd_add_port_tm_nonleaf_node_priority, + (void *)&cmd_add_port_tm_nonleaf_node_weight, + (void *)&cmd_add_port_tm_nonleaf_node_level_id, + (void *)&cmd_add_port_tm_nonleaf_node_shaper_profile_id, + (void *)&cmd_add_port_tm_nonleaf_node_n_sp_priorities, + (void *)&cmd_add_port_tm_nonleaf_node_stats_mask, + (void *)&cmd_add_port_tm_nonleaf_node_multi_shared_shaper_id, + NULL, + }, +}; + +/* *** Add Port TM leaf node *** */ +struct cmd_add_port_tm_leaf_node_result { + cmdline_fixed_string_t add; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t leaf; + cmdline_fixed_string_t node; + uint16_t port_id; + uint32_t node_id; + int32_t parent_node_id; + uint32_t priority; + uint32_t weight; + uint32_t level_id; + int32_t shaper_profile_id; + uint32_t cman_mode; + uint32_t wred_profile_id; + uint64_t stats_mask; + cmdline_multi_string_t multi_shared_shaper_id; +}; + +cmdline_parse_token_string_t cmd_add_port_tm_leaf_node_add = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_leaf_node_result, add, "add"); +cmdline_parse_token_string_t cmd_add_port_tm_leaf_node_port = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_leaf_node_result, port, "port"); +cmdline_parse_token_string_t cmd_add_port_tm_leaf_node_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_leaf_node_result, tm, "tm"); +cmdline_parse_token_string_t cmd_add_port_tm_leaf_node_nonleaf = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_leaf_node_result, leaf, "leaf"); +cmdline_parse_token_string_t cmd_add_port_tm_leaf_node_node = + TOKEN_STRING_INITIALIZER( + struct cmd_add_port_tm_leaf_node_result, node, "node"); +cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_node_id = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, + node_id, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_parent_node_id = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, + parent_node_id, INT32); +cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_priority = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, + priority, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_weight = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, + weight, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_level_id = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, + level_id, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_shaper_profile_id = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, + shaper_profile_id, INT32); +cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_cman_mode = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, + cman_mode, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_wred_profile_id = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, + wred_profile_id, UINT32); +cmdline_parse_token_num_t cmd_add_port_tm_leaf_node_stats_mask = + TOKEN_NUM_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, + stats_mask, UINT64); +cmdline_parse_token_string_t + cmd_add_port_tm_leaf_node_multi_shared_shaper_id = + TOKEN_STRING_INITIALIZER(struct cmd_add_port_tm_leaf_node_result, + multi_shared_shaper_id, TOKEN_STRING_MULTI); + +static void cmd_add_port_tm_leaf_node_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_add_port_tm_leaf_node_result *res = parsed_result; + struct rte_tm_error error; + struct rte_tm_node_params np; + uint32_t *shared_shaper_id; + uint32_t parent_node_id, n_shared_shapers = 0; + portid_t port_id = res->port_id; + char *s_str = res->multi_shared_shaper_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&np, 0, sizeof(struct rte_tm_node_params)); + memset(&error, 0, sizeof(struct rte_tm_error)); + + /* Node parameters */ + if (res->parent_node_id < 0) + parent_node_id = UINT32_MAX; + else + parent_node_id = res->parent_node_id; + + shared_shaper_id = (uint32_t *)malloc(MAX_NUM_SHARED_SHAPERS * + sizeof(uint32_t)); + if (shared_shaper_id == NULL) { + printf(" Memory not allocated for shared shapers (error)\n"); + return; + } + + /* Parse multi shared shaper id string */ + ret = parse_multi_ss_id_str(s_str, &n_shared_shapers, shared_shaper_id); + if (ret) { + printf(" Shared shapers params string parse error\n"); + free(shared_shaper_id); + return; + } + + if (res->shaper_profile_id < 0) + np.shaper_profile_id = UINT32_MAX; + else + np.shaper_profile_id = res->shaper_profile_id; + + np.n_shared_shapers = n_shared_shapers; + + if (np.n_shared_shapers) { + np.shared_shaper_id = &shared_shaper_id[0]; + } else { + free(shared_shaper_id); + shared_shaper_id = NULL; + } + + np.leaf.cman = res->cman_mode; + np.leaf.wred.wred_profile_id = res->wred_profile_id; + np.stats_mask = res->stats_mask; + + ret = rte_tm_node_add(port_id, res->node_id, parent_node_id, + res->priority, res->weight, res->level_id, + &np, &error); + if (ret != 0) { + print_err_msg(&error); + free(shared_shaper_id); + return; + } +} + +cmdline_parse_inst_t cmd_add_port_tm_leaf_node = { + .f = cmd_add_port_tm_leaf_node_parsed, + .data = NULL, + .help_str = "Add port tm leaf node", + .tokens = { + (void *)&cmd_add_port_tm_leaf_node_add, + (void *)&cmd_add_port_tm_leaf_node_port, + (void *)&cmd_add_port_tm_leaf_node_tm, + (void *)&cmd_add_port_tm_leaf_node_nonleaf, + (void *)&cmd_add_port_tm_leaf_node_node, + (void *)&cmd_add_port_tm_leaf_node_port_id, + (void *)&cmd_add_port_tm_leaf_node_node_id, + (void *)&cmd_add_port_tm_leaf_node_parent_node_id, + (void *)&cmd_add_port_tm_leaf_node_priority, + (void *)&cmd_add_port_tm_leaf_node_weight, + (void *)&cmd_add_port_tm_leaf_node_level_id, + (void *)&cmd_add_port_tm_leaf_node_shaper_profile_id, + (void *)&cmd_add_port_tm_leaf_node_cman_mode, + (void *)&cmd_add_port_tm_leaf_node_wred_profile_id, + (void *)&cmd_add_port_tm_leaf_node_stats_mask, + (void *)&cmd_add_port_tm_leaf_node_multi_shared_shaper_id, + NULL, + }, +}; + +/* *** Delete Port TM Node *** */ +struct cmd_del_port_tm_node_result { + cmdline_fixed_string_t del; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + uint16_t port_id; + uint32_t node_id; +}; + +cmdline_parse_token_string_t cmd_del_port_tm_node_del = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_result, del, "del"); +cmdline_parse_token_string_t cmd_del_port_tm_node_port = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_result, port, "port"); +cmdline_parse_token_string_t cmd_del_port_tm_node_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_result, tm, "tm"); +cmdline_parse_token_string_t cmd_del_port_tm_node_node = + TOKEN_STRING_INITIALIZER( + struct cmd_del_port_tm_node_result, node, "node"); +cmdline_parse_token_num_t cmd_del_port_tm_node_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_del_port_tm_node_result, + port_id, UINT16); +cmdline_parse_token_num_t cmd_del_port_tm_node_node_id = + TOKEN_NUM_INITIALIZER(struct cmd_del_port_tm_node_result, + node_id, UINT32); + +static void cmd_del_port_tm_node_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_del_port_tm_node_result *res = parsed_result; + struct rte_tm_error error; + uint32_t node_id = res->node_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + /* Port status */ + if (port_is_started(port_id)) { + printf(" Port %u not stopped (error)\n", port_id); + return; + } + + ret = rte_tm_node_delete(port_id, node_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_del_port_tm_node = { + .f = cmd_del_port_tm_node_parsed, + .data = NULL, + .help_str = "Delete port tm node", + .tokens = { + (void *)&cmd_del_port_tm_node_del, + (void *)&cmd_del_port_tm_node_port, + (void *)&cmd_del_port_tm_node_tm, + (void *)&cmd_del_port_tm_node_node, + (void *)&cmd_del_port_tm_node_port_id, + (void *)&cmd_del_port_tm_node_node_id, + NULL, + }, +}; + +/* *** Update Port TM Node Parent *** */ +struct cmd_set_port_tm_node_parent_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + cmdline_fixed_string_t parent; + uint16_t port_id; + uint32_t node_id; + uint32_t parent_id; + uint32_t priority; + uint32_t weight; +}; + +cmdline_parse_token_string_t cmd_set_port_tm_node_parent_set = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_node_parent_result, set, "set"); +cmdline_parse_token_string_t cmd_set_port_tm_node_parent_port = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_node_parent_result, port, "port"); +cmdline_parse_token_string_t cmd_set_port_tm_node_parent_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_node_parent_result, tm, "tm"); +cmdline_parse_token_string_t cmd_set_port_tm_node_parent_node = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_node_parent_result, node, "node"); +cmdline_parse_token_string_t cmd_set_port_tm_node_parent_parent = + TOKEN_STRING_INITIALIZER( + struct cmd_set_port_tm_node_parent_result, parent, "parent"); +cmdline_parse_token_num_t cmd_set_port_tm_node_parent_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_tm_node_parent_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_set_port_tm_node_parent_node_id = + TOKEN_NUM_INITIALIZER( + struct cmd_set_port_tm_node_parent_result, node_id, UINT32); +cmdline_parse_token_num_t cmd_set_port_tm_node_parent_parent_id = + TOKEN_NUM_INITIALIZER(struct cmd_set_port_tm_node_parent_result, + parent_id, UINT32); +cmdline_parse_token_num_t cmd_set_port_tm_node_parent_priority = + TOKEN_NUM_INITIALIZER(struct cmd_set_port_tm_node_parent_result, + priority, UINT32); +cmdline_parse_token_num_t cmd_set_port_tm_node_parent_weight = + TOKEN_NUM_INITIALIZER(struct cmd_set_port_tm_node_parent_result, + weight, UINT32); + +static void cmd_set_port_tm_node_parent_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_port_tm_node_parent_result *res = parsed_result; + struct rte_tm_error error; + uint32_t node_id = res->node_id; + uint32_t parent_id = res->parent_id; + uint32_t priority = res->priority; + uint32_t weight = res->weight; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + /* Port status */ + if (!port_is_started(port_id)) { + printf(" Port %u not started (error)\n", port_id); + return; + } + + ret = rte_tm_node_parent_update(port_id, node_id, + parent_id, priority, weight, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_set_port_tm_node_parent = { + .f = cmd_set_port_tm_node_parent_parsed, + .data = NULL, + .help_str = "Set port tm node parent", + .tokens = { + (void *)&cmd_set_port_tm_node_parent_set, + (void *)&cmd_set_port_tm_node_parent_port, + (void *)&cmd_set_port_tm_node_parent_tm, + (void *)&cmd_set_port_tm_node_parent_node, + (void *)&cmd_set_port_tm_node_parent_parent, + (void *)&cmd_set_port_tm_node_parent_port_id, + (void *)&cmd_set_port_tm_node_parent_node_id, + (void *)&cmd_set_port_tm_node_parent_parent_id, + (void *)&cmd_set_port_tm_node_parent_priority, + (void *)&cmd_set_port_tm_node_parent_weight, + NULL, + }, +}; + +/* *** Suspend Port TM Node *** */ +struct cmd_suspend_port_tm_node_result { + cmdline_fixed_string_t suspend; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + uint16_t port_id; + uint32_t node_id; +}; + +cmdline_parse_token_string_t cmd_suspend_port_tm_node_suspend = + TOKEN_STRING_INITIALIZER( + struct cmd_suspend_port_tm_node_result, suspend, "suspend"); +cmdline_parse_token_string_t cmd_suspend_port_tm_node_port = + TOKEN_STRING_INITIALIZER( + struct cmd_suspend_port_tm_node_result, port, "port"); +cmdline_parse_token_string_t cmd_suspend_port_tm_node_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_suspend_port_tm_node_result, tm, "tm"); +cmdline_parse_token_string_t cmd_suspend_port_tm_node_node = + TOKEN_STRING_INITIALIZER( + struct cmd_suspend_port_tm_node_result, node, "node"); +cmdline_parse_token_num_t cmd_suspend_port_tm_node_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_suspend_port_tm_node_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_suspend_port_tm_node_node_id = + TOKEN_NUM_INITIALIZER( + struct cmd_suspend_port_tm_node_result, node_id, UINT32); + +static void cmd_suspend_port_tm_node_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_suspend_port_tm_node_result *res = parsed_result; + struct rte_tm_error error; + uint32_t node_id = res->node_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_node_suspend(port_id, node_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_suspend_port_tm_node = { + .f = cmd_suspend_port_tm_node_parsed, + .data = NULL, + .help_str = "Suspend port tm node", + .tokens = { + (void *)&cmd_suspend_port_tm_node_suspend, + (void *)&cmd_suspend_port_tm_node_port, + (void *)&cmd_suspend_port_tm_node_tm, + (void *)&cmd_suspend_port_tm_node_node, + (void *)&cmd_suspend_port_tm_node_port_id, + (void *)&cmd_suspend_port_tm_node_node_id, + NULL, + }, +}; + +/* *** Resume Port TM Node *** */ +struct cmd_resume_port_tm_node_result { + cmdline_fixed_string_t resume; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t node; + uint16_t port_id; + uint32_t node_id; +}; + +cmdline_parse_token_string_t cmd_resume_port_tm_node_resume = + TOKEN_STRING_INITIALIZER( + struct cmd_resume_port_tm_node_result, resume, "resume"); +cmdline_parse_token_string_t cmd_resume_port_tm_node_port = + TOKEN_STRING_INITIALIZER( + struct cmd_resume_port_tm_node_result, port, "port"); +cmdline_parse_token_string_t cmd_resume_port_tm_node_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_resume_port_tm_node_result, tm, "tm"); +cmdline_parse_token_string_t cmd_resume_port_tm_node_node = + TOKEN_STRING_INITIALIZER( + struct cmd_resume_port_tm_node_result, node, "node"); +cmdline_parse_token_num_t cmd_resume_port_tm_node_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_resume_port_tm_node_result, port_id, UINT16); +cmdline_parse_token_num_t cmd_resume_port_tm_node_node_id = + TOKEN_NUM_INITIALIZER( + struct cmd_resume_port_tm_node_result, node_id, UINT32); + +static void cmd_resume_port_tm_node_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_resume_port_tm_node_result *res = parsed_result; + struct rte_tm_error error; + uint32_t node_id = res->node_id; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_node_resume(port_id, node_id, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_resume_port_tm_node = { + .f = cmd_resume_port_tm_node_parsed, + .data = NULL, + .help_str = "Resume port tm node", + .tokens = { + (void *)&cmd_resume_port_tm_node_resume, + (void *)&cmd_resume_port_tm_node_port, + (void *)&cmd_resume_port_tm_node_tm, + (void *)&cmd_resume_port_tm_node_node, + (void *)&cmd_resume_port_tm_node_port_id, + (void *)&cmd_resume_port_tm_node_node_id, + NULL, + }, +}; + +/* *** Port TM Hierarchy Commit *** */ +struct cmd_port_tm_hierarchy_commit_result { + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t hierarchy; + cmdline_fixed_string_t commit; + uint16_t port_id; + cmdline_fixed_string_t clean_on_fail; +}; + +cmdline_parse_token_string_t cmd_port_tm_hierarchy_commit_port = + TOKEN_STRING_INITIALIZER( + struct cmd_port_tm_hierarchy_commit_result, port, "port"); +cmdline_parse_token_string_t cmd_port_tm_hierarchy_commit_tm = + TOKEN_STRING_INITIALIZER( + struct cmd_port_tm_hierarchy_commit_result, tm, "tm"); +cmdline_parse_token_string_t cmd_port_tm_hierarchy_commit_hierarchy = + TOKEN_STRING_INITIALIZER( + struct cmd_port_tm_hierarchy_commit_result, + hierarchy, "hierarchy"); +cmdline_parse_token_string_t cmd_port_tm_hierarchy_commit_commit = + TOKEN_STRING_INITIALIZER( + struct cmd_port_tm_hierarchy_commit_result, commit, "commit"); +cmdline_parse_token_num_t cmd_port_tm_hierarchy_commit_port_id = + TOKEN_NUM_INITIALIZER( + struct cmd_port_tm_hierarchy_commit_result, + port_id, UINT16); +cmdline_parse_token_string_t cmd_port_tm_hierarchy_commit_clean_on_fail = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_hierarchy_commit_result, + clean_on_fail, "yes#no"); + +static void cmd_port_tm_hierarchy_commit_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_port_tm_hierarchy_commit_result *res = parsed_result; + struct rte_tm_error error; + uint32_t clean_on_fail; + portid_t port_id = res->port_id; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + if (strcmp(res->clean_on_fail, "yes") == 0) + clean_on_fail = 1; + else + clean_on_fail = 0; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_hierarchy_commit(port_id, clean_on_fail, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_port_tm_hierarchy_commit = { + .f = cmd_port_tm_hierarchy_commit_parsed, + .data = NULL, + .help_str = "Commit port tm hierarchy", + .tokens = { + (void *)&cmd_port_tm_hierarchy_commit_port, + (void *)&cmd_port_tm_hierarchy_commit_tm, + (void *)&cmd_port_tm_hierarchy_commit_hierarchy, + (void *)&cmd_port_tm_hierarchy_commit_commit, + (void *)&cmd_port_tm_hierarchy_commit_port_id, + (void *)&cmd_port_tm_hierarchy_commit_clean_on_fail, + NULL, + }, +}; + +/* *** Port TM Mark IP ECN *** */ +struct cmd_port_tm_mark_ip_ecn_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t mark; + cmdline_fixed_string_t ip_ecn; + uint16_t port_id; + uint16_t green; + uint16_t yellow; + uint16_t red; +}; + +cmdline_parse_token_string_t cmd_port_tm_mark_ip_ecn_set = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result, + set, "set"); + +cmdline_parse_token_string_t cmd_port_tm_mark_ip_ecn_port = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result, + port, "port"); + +cmdline_parse_token_string_t cmd_port_tm_mark_ip_ecn_tm = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result, tm, + "tm"); + +cmdline_parse_token_string_t cmd_port_tm_mark_ip_ecn_mark = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result, + mark, "mark"); + +cmdline_parse_token_string_t cmd_port_tm_mark_ip_ecn_ip_ecn = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result, + ip_ecn, "ip_ecn"); +cmdline_parse_token_num_t cmd_port_tm_mark_ip_ecn_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result, + port_id, UINT16); + +cmdline_parse_token_num_t cmd_port_tm_mark_ip_ecn_green = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result, + green, UINT16); +cmdline_parse_token_num_t cmd_port_tm_mark_ip_ecn_yellow = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result, + yellow, UINT16); +cmdline_parse_token_num_t cmd_port_tm_mark_ip_ecn_red = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_ecn_result, + red, UINT16); + +static void cmd_port_tm_mark_ip_ecn_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_port_tm_mark_ip_ecn_result *res = parsed_result; + struct rte_tm_error error; + portid_t port_id = res->port_id; + int green = res->green; + int yellow = res->yellow; + int red = res->red; + int ret; + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_mark_ip_ecn(port_id, green, yellow, red, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_port_tm_mark_ip_ecn = { + .f = cmd_port_tm_mark_ip_ecn_parsed, + .data = NULL, + .help_str = "set port tm mark ip_ecn <port> <green> <yellow> <red>", + .tokens = { + (void *)&cmd_port_tm_mark_ip_ecn_set, + (void *)&cmd_port_tm_mark_ip_ecn_port, + (void *)&cmd_port_tm_mark_ip_ecn_tm, + (void *)&cmd_port_tm_mark_ip_ecn_mark, + (void *)&cmd_port_tm_mark_ip_ecn_ip_ecn, + (void *)&cmd_port_tm_mark_ip_ecn_port_id, + (void *)&cmd_port_tm_mark_ip_ecn_green, + (void *)&cmd_port_tm_mark_ip_ecn_yellow, + (void *)&cmd_port_tm_mark_ip_ecn_red, + NULL, + }, +}; + + +/* *** Port TM Mark IP DSCP *** */ +struct cmd_port_tm_mark_ip_dscp_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t mark; + cmdline_fixed_string_t ip_dscp; + uint16_t port_id; + uint16_t green; + uint16_t yellow; + uint16_t red; +}; + +cmdline_parse_token_string_t cmd_port_tm_mark_ip_dscp_set = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result, + set, "set"); + +cmdline_parse_token_string_t cmd_port_tm_mark_ip_dscp_port = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result, + port, "port"); + +cmdline_parse_token_string_t cmd_port_tm_mark_ip_dscp_tm = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result, tm, + "tm"); + +cmdline_parse_token_string_t cmd_port_tm_mark_ip_dscp_mark = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result, + mark, "mark"); + +cmdline_parse_token_string_t cmd_port_tm_mark_ip_dscp_ip_dscp = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result, + ip_dscp, "ip_dscp"); +cmdline_parse_token_num_t cmd_port_tm_mark_ip_dscp_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result, + port_id, UINT16); + +cmdline_parse_token_num_t cmd_port_tm_mark_ip_dscp_green = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result, + green, UINT16); +cmdline_parse_token_num_t cmd_port_tm_mark_ip_dscp_yellow = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result, + yellow, UINT16); +cmdline_parse_token_num_t cmd_port_tm_mark_ip_dscp_red = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_ip_dscp_result, + red, UINT16); + +static void cmd_port_tm_mark_ip_dscp_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_port_tm_mark_ip_dscp_result *res = parsed_result; + struct rte_tm_error error; + portid_t port_id = res->port_id; + int green = res->green; + int yellow = res->yellow; + int red = res->red; + int ret; + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_mark_ip_dscp(port_id, green, yellow, red, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_port_tm_mark_ip_dscp = { + .f = cmd_port_tm_mark_ip_dscp_parsed, + .data = NULL, + .help_str = "set port tm mark ip_dscp <port> <green> <yellow> <red>", + .tokens = { + (void *)&cmd_port_tm_mark_ip_dscp_set, + (void *)&cmd_port_tm_mark_ip_dscp_port, + (void *)&cmd_port_tm_mark_ip_dscp_tm, + (void *)&cmd_port_tm_mark_ip_dscp_mark, + (void *)&cmd_port_tm_mark_ip_dscp_ip_dscp, + (void *)&cmd_port_tm_mark_ip_dscp_port_id, + (void *)&cmd_port_tm_mark_ip_dscp_green, + (void *)&cmd_port_tm_mark_ip_dscp_yellow, + (void *)&cmd_port_tm_mark_ip_dscp_red, + NULL, + }, +}; + + +/* *** Port TM Mark VLAN_DEI *** */ +struct cmd_port_tm_mark_vlan_dei_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t port; + cmdline_fixed_string_t tm; + cmdline_fixed_string_t mark; + cmdline_fixed_string_t vlan_dei; + uint16_t port_id; + uint16_t green; + uint16_t yellow; + uint16_t red; +}; + +cmdline_parse_token_string_t cmd_port_tm_mark_vlan_dei_set = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result, + set, "set"); + +cmdline_parse_token_string_t cmd_port_tm_mark_vlan_dei_port = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result, + port, "port"); + +cmdline_parse_token_string_t cmd_port_tm_mark_vlan_dei_tm = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result, tm, + "tm"); + +cmdline_parse_token_string_t cmd_port_tm_mark_vlan_dei_mark = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result, + mark, "mark"); + +cmdline_parse_token_string_t cmd_port_tm_mark_vlan_dei_vlan_dei = + TOKEN_STRING_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result, + vlan_dei, "vlan_dei"); +cmdline_parse_token_num_t cmd_port_tm_mark_vlan_dei_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result, + port_id, UINT16); + +cmdline_parse_token_num_t cmd_port_tm_mark_vlan_dei_green = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result, + green, UINT16); +cmdline_parse_token_num_t cmd_port_tm_mark_vlan_dei_yellow = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result, + yellow, UINT16); +cmdline_parse_token_num_t cmd_port_tm_mark_vlan_dei_red = + TOKEN_NUM_INITIALIZER(struct cmd_port_tm_mark_vlan_dei_result, + red, UINT16); + +static void cmd_port_tm_mark_vlan_dei_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_port_tm_mark_vlan_dei_result *res = parsed_result; + struct rte_tm_error error; + portid_t port_id = res->port_id; + int green = res->green; + int yellow = res->yellow; + int red = res->red; + int ret; + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + memset(&error, 0, sizeof(struct rte_tm_error)); + ret = rte_tm_mark_vlan_dei(port_id, green, yellow, red, &error); + if (ret != 0) { + print_err_msg(&error); + return; + } +} + +cmdline_parse_inst_t cmd_port_tm_mark_vlan_dei = { + .f = cmd_port_tm_mark_vlan_dei_parsed, + .data = NULL, + .help_str = "set port tm mark vlan_dei <port> <green> <yellow> <red>", + .tokens = { + (void *)&cmd_port_tm_mark_vlan_dei_set, + (void *)&cmd_port_tm_mark_vlan_dei_port, + (void *)&cmd_port_tm_mark_vlan_dei_tm, + (void *)&cmd_port_tm_mark_vlan_dei_mark, + (void *)&cmd_port_tm_mark_vlan_dei_vlan_dei, + (void *)&cmd_port_tm_mark_vlan_dei_port_id, + (void *)&cmd_port_tm_mark_vlan_dei_green, + (void *)&cmd_port_tm_mark_vlan_dei_yellow, + (void *)&cmd_port_tm_mark_vlan_dei_red, + NULL, + }, +}; diff --git a/src/spdk/dpdk/app/test-pmd/cmdline_tm.h b/src/spdk/dpdk/app/test-pmd/cmdline_tm.h new file mode 100644 index 000000000..950cb7538 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/cmdline_tm.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ + +#ifndef _CMDLINE_TM_H_ +#define _CMDLINE_TM_H_ + + /* Traffic Management CLI */ +extern cmdline_parse_inst_t cmd_show_port_tm_cap; +extern cmdline_parse_inst_t cmd_show_port_tm_level_cap; +extern cmdline_parse_inst_t cmd_show_port_tm_node_cap; +extern cmdline_parse_inst_t cmd_show_port_tm_node_type; +extern cmdline_parse_inst_t cmd_show_port_tm_node_stats; +extern cmdline_parse_inst_t cmd_add_port_tm_node_shaper_profile; +extern cmdline_parse_inst_t cmd_del_port_tm_node_shaper_profile; +extern cmdline_parse_inst_t cmd_add_port_tm_node_shared_shaper; +extern cmdline_parse_inst_t cmd_del_port_tm_node_shared_shaper; +extern cmdline_parse_inst_t cmd_add_port_tm_node_wred_profile; +extern cmdline_parse_inst_t cmd_del_port_tm_node_wred_profile; +extern cmdline_parse_inst_t cmd_set_port_tm_node_shaper_profile; +extern cmdline_parse_inst_t cmd_add_port_tm_nonleaf_node; +extern cmdline_parse_inst_t cmd_add_port_tm_leaf_node; +extern cmdline_parse_inst_t cmd_del_port_tm_node; +extern cmdline_parse_inst_t cmd_set_port_tm_node_parent; +extern cmdline_parse_inst_t cmd_suspend_port_tm_node; +extern cmdline_parse_inst_t cmd_resume_port_tm_node; +extern cmdline_parse_inst_t cmd_port_tm_hierarchy_commit; +extern cmdline_parse_inst_t cmd_port_tm_mark_vlan_dei; +extern cmdline_parse_inst_t cmd_port_tm_mark_ip_ecn; +extern cmdline_parse_inst_t cmd_port_tm_mark_ip_dscp; + +#endif /* _CMDLINE_TM_H_ */ diff --git a/src/spdk/dpdk/app/test-pmd/config.c b/src/spdk/dpdk/app/test-pmd/config.c new file mode 100644 index 000000000..5381207cc --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/config.c @@ -0,0 +1,4318 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation. + * Copyright 2013-2014 6WIND S.A. + */ + +#include <stdarg.h> +#include <errno.h> +#include <stdio.h> +#include <string.h> +#include <stdint.h> +#include <inttypes.h> + +#include <sys/queue.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <unistd.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_debug.h> +#include <rte_log.h> +#include <rte_memory.h> +#include <rte_memcpy.h> +#include <rte_memzone.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_string_fns.h> +#include <rte_cycles.h> +#include <rte_flow.h> +#include <rte_errno.h> +#ifdef RTE_LIBRTE_IXGBE_PMD +#include <rte_pmd_ixgbe.h> +#endif +#ifdef RTE_LIBRTE_I40E_PMD +#include <rte_pmd_i40e.h> +#endif +#ifdef RTE_LIBRTE_BNXT_PMD +#include <rte_pmd_bnxt.h> +#endif +#include <rte_gro.h> + +#include "testpmd.h" + +#define ETHDEV_FWVERS_LEN 32 + +static char *flowtype_to_str(uint16_t flow_type); + +static const struct { + enum tx_pkt_split split; + const char *name; +} tx_split_name[] = { + { + .split = TX_PKT_SPLIT_OFF, + .name = "off", + }, + { + .split = TX_PKT_SPLIT_ON, + .name = "on", + }, + { + .split = TX_PKT_SPLIT_RND, + .name = "rand", + }, +}; + +const struct rss_type_info rss_type_table[] = { + { "all", ETH_RSS_ETH | ETH_RSS_VLAN | ETH_RSS_IP | ETH_RSS_TCP | + ETH_RSS_UDP | ETH_RSS_SCTP | ETH_RSS_L2_PAYLOAD | + ETH_RSS_L2TPV3 | ETH_RSS_ESP | ETH_RSS_AH | ETH_RSS_PFCP}, + { "none", 0 }, + { "eth", ETH_RSS_ETH }, + { "l2-src-only", ETH_RSS_L2_SRC_ONLY }, + { "l2-dst-only", ETH_RSS_L2_DST_ONLY }, + { "vlan", ETH_RSS_VLAN }, + { "s-vlan", ETH_RSS_S_VLAN }, + { "c-vlan", ETH_RSS_C_VLAN }, + { "ipv4", ETH_RSS_IPV4 }, + { "ipv4-frag", ETH_RSS_FRAG_IPV4 }, + { "ipv4-tcp", ETH_RSS_NONFRAG_IPV4_TCP }, + { "ipv4-udp", ETH_RSS_NONFRAG_IPV4_UDP }, + { "ipv4-sctp", ETH_RSS_NONFRAG_IPV4_SCTP }, + { "ipv4-other", ETH_RSS_NONFRAG_IPV4_OTHER }, + { "ipv6", ETH_RSS_IPV6 }, + { "ipv6-frag", ETH_RSS_FRAG_IPV6 }, + { "ipv6-tcp", ETH_RSS_NONFRAG_IPV6_TCP }, + { "ipv6-udp", ETH_RSS_NONFRAG_IPV6_UDP }, + { "ipv6-sctp", ETH_RSS_NONFRAG_IPV6_SCTP }, + { "ipv6-other", ETH_RSS_NONFRAG_IPV6_OTHER }, + { "l2-payload", ETH_RSS_L2_PAYLOAD }, + { "ipv6-ex", ETH_RSS_IPV6_EX }, + { "ipv6-tcp-ex", ETH_RSS_IPV6_TCP_EX }, + { "ipv6-udp-ex", ETH_RSS_IPV6_UDP_EX }, + { "port", ETH_RSS_PORT }, + { "vxlan", ETH_RSS_VXLAN }, + { "geneve", ETH_RSS_GENEVE }, + { "nvgre", ETH_RSS_NVGRE }, + { "ip", ETH_RSS_IP }, + { "udp", ETH_RSS_UDP }, + { "tcp", ETH_RSS_TCP }, + { "sctp", ETH_RSS_SCTP }, + { "tunnel", ETH_RSS_TUNNEL }, + { "l3-src-only", ETH_RSS_L3_SRC_ONLY }, + { "l3-dst-only", ETH_RSS_L3_DST_ONLY }, + { "l4-src-only", ETH_RSS_L4_SRC_ONLY }, + { "l4-dst-only", ETH_RSS_L4_DST_ONLY }, + { "esp", ETH_RSS_ESP }, + { "ah", ETH_RSS_AH }, + { "l2tpv3", ETH_RSS_L2TPV3 }, + { "pfcp", ETH_RSS_PFCP }, + { NULL, 0 }, +}; + +static void +print_ethaddr(const char *name, struct rte_ether_addr *eth_addr) +{ + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); + printf("%s%s", name, buf); +} + +void +nic_stats_display(portid_t port_id) +{ + static uint64_t prev_pkts_rx[RTE_MAX_ETHPORTS]; + static uint64_t prev_pkts_tx[RTE_MAX_ETHPORTS]; + static uint64_t prev_bytes_rx[RTE_MAX_ETHPORTS]; + static uint64_t prev_bytes_tx[RTE_MAX_ETHPORTS]; + static uint64_t prev_cycles[RTE_MAX_ETHPORTS]; + uint64_t diff_pkts_rx, diff_pkts_tx, diff_bytes_rx, diff_bytes_tx, + diff_cycles; + uint64_t mpps_rx, mpps_tx, mbps_rx, mbps_tx; + struct rte_eth_stats stats; + struct rte_port *port = &ports[port_id]; + uint8_t i; + + static const char *nic_stats_border = "########################"; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) { + print_valid_ports(); + return; + } + rte_eth_stats_get(port_id, &stats); + printf("\n %s NIC statistics for port %-2d %s\n", + nic_stats_border, port_id, nic_stats_border); + + if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { + printf(" RX-packets: %-10"PRIu64" RX-missed: %-10"PRIu64" RX-bytes: " + "%-"PRIu64"\n", + stats.ipackets, stats.imissed, stats.ibytes); + printf(" RX-errors: %-"PRIu64"\n", stats.ierrors); + printf(" RX-nombuf: %-10"PRIu64"\n", + stats.rx_nombuf); + printf(" TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64" TX-bytes: " + "%-"PRIu64"\n", + stats.opackets, stats.oerrors, stats.obytes); + } + else { + printf(" RX-packets: %10"PRIu64" RX-errors: %10"PRIu64 + " RX-bytes: %10"PRIu64"\n", + stats.ipackets, stats.ierrors, stats.ibytes); + printf(" RX-errors: %10"PRIu64"\n", stats.ierrors); + printf(" RX-nombuf: %10"PRIu64"\n", + stats.rx_nombuf); + printf(" TX-packets: %10"PRIu64" TX-errors: %10"PRIu64 + " TX-bytes: %10"PRIu64"\n", + stats.opackets, stats.oerrors, stats.obytes); + } + + if (port->rx_queue_stats_mapping_enabled) { + printf("\n"); + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { + printf(" Stats reg %2d RX-packets: %10"PRIu64 + " RX-errors: %10"PRIu64 + " RX-bytes: %10"PRIu64"\n", + i, stats.q_ipackets[i], stats.q_errors[i], stats.q_ibytes[i]); + } + } + if (port->tx_queue_stats_mapping_enabled) { + printf("\n"); + for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { + printf(" Stats reg %2d TX-packets: %10"PRIu64 + " TX-bytes: %10"PRIu64"\n", + i, stats.q_opackets[i], stats.q_obytes[i]); + } + } + + diff_cycles = prev_cycles[port_id]; + prev_cycles[port_id] = rte_rdtsc(); + if (diff_cycles > 0) + diff_cycles = prev_cycles[port_id] - diff_cycles; + + diff_pkts_rx = (stats.ipackets > prev_pkts_rx[port_id]) ? + (stats.ipackets - prev_pkts_rx[port_id]) : 0; + diff_pkts_tx = (stats.opackets > prev_pkts_tx[port_id]) ? + (stats.opackets - prev_pkts_tx[port_id]) : 0; + prev_pkts_rx[port_id] = stats.ipackets; + prev_pkts_tx[port_id] = stats.opackets; + mpps_rx = diff_cycles > 0 ? + diff_pkts_rx * rte_get_tsc_hz() / diff_cycles : 0; + mpps_tx = diff_cycles > 0 ? + diff_pkts_tx * rte_get_tsc_hz() / diff_cycles : 0; + + diff_bytes_rx = (stats.ibytes > prev_bytes_rx[port_id]) ? + (stats.ibytes - prev_bytes_rx[port_id]) : 0; + diff_bytes_tx = (stats.obytes > prev_bytes_tx[port_id]) ? + (stats.obytes - prev_bytes_tx[port_id]) : 0; + prev_bytes_rx[port_id] = stats.ibytes; + prev_bytes_tx[port_id] = stats.obytes; + mbps_rx = diff_cycles > 0 ? + diff_bytes_rx * rte_get_tsc_hz() / diff_cycles : 0; + mbps_tx = diff_cycles > 0 ? + diff_bytes_tx * rte_get_tsc_hz() / diff_cycles : 0; + + printf("\n Throughput (since last show)\n"); + printf(" Rx-pps: %12"PRIu64" Rx-bps: %12"PRIu64"\n Tx-pps: %12" + PRIu64" Tx-bps: %12"PRIu64"\n", mpps_rx, mbps_rx * 8, + mpps_tx, mbps_tx * 8); + + printf(" %s############################%s\n", + nic_stats_border, nic_stats_border); +} + +void +nic_stats_clear(portid_t port_id) +{ + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) { + print_valid_ports(); + return; + } + + ret = rte_eth_stats_reset(port_id); + if (ret != 0) { + printf("%s: Error: failed to reset stats (port %u): %s", + __func__, port_id, strerror(ret)); + return; + } + + ret = rte_eth_stats_get(port_id, &ports[port_id].stats); + if (ret != 0) { + printf("%s: Error: failed to get stats (port %u): %s", + __func__, port_id, strerror(ret)); + return; + } + printf("\n NIC statistics for port %d cleared\n", port_id); +} + +void +nic_xstats_display(portid_t port_id) +{ + struct rte_eth_xstat *xstats; + int cnt_xstats, idx_xstat; + struct rte_eth_xstat_name *xstats_names; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) { + print_valid_ports(); + return; + } + printf("###### NIC extended statistics for port %-2d\n", port_id); + if (!rte_eth_dev_is_valid_port(port_id)) { + printf("Error: Invalid port number %i\n", port_id); + return; + } + + /* Get count */ + cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0); + if (cnt_xstats < 0) { + printf("Error: Cannot get count of xstats\n"); + return; + } + + /* Get id-name lookup table */ + xstats_names = malloc(sizeof(struct rte_eth_xstat_name) * cnt_xstats); + if (xstats_names == NULL) { + printf("Cannot allocate memory for xstats lookup\n"); + return; + } + if (cnt_xstats != rte_eth_xstats_get_names( + port_id, xstats_names, cnt_xstats)) { + printf("Error: Cannot get xstats lookup\n"); + free(xstats_names); + return; + } + + /* Get stats themselves */ + xstats = malloc(sizeof(struct rte_eth_xstat) * cnt_xstats); + if (xstats == NULL) { + printf("Cannot allocate memory for xstats\n"); + free(xstats_names); + return; + } + if (cnt_xstats != rte_eth_xstats_get(port_id, xstats, cnt_xstats)) { + printf("Error: Unable to get xstats\n"); + free(xstats_names); + free(xstats); + return; + } + + /* Display xstats */ + for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { + if (xstats_hide_zero && !xstats[idx_xstat].value) + continue; + printf("%s: %"PRIu64"\n", + xstats_names[idx_xstat].name, + xstats[idx_xstat].value); + } + free(xstats_names); + free(xstats); +} + +void +nic_xstats_clear(portid_t port_id) +{ + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) { + print_valid_ports(); + return; + } + + ret = rte_eth_xstats_reset(port_id); + if (ret != 0) { + printf("%s: Error: failed to reset xstats (port %u): %s", + __func__, port_id, strerror(ret)); + return; + } + + ret = rte_eth_stats_get(port_id, &ports[port_id].stats); + if (ret != 0) { + printf("%s: Error: failed to get stats (port %u): %s", + __func__, port_id, strerror(ret)); + return; + } +} + +void +nic_stats_mapping_display(portid_t port_id) +{ + struct rte_port *port = &ports[port_id]; + uint16_t i; + + static const char *nic_stats_mapping_border = "########################"; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) { + print_valid_ports(); + return; + } + + if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { + printf("Port id %d - either does not support queue statistic mapping or" + " no queue statistic mapping set\n", port_id); + return; + } + + printf("\n %s NIC statistics mapping for port %-2d %s\n", + nic_stats_mapping_border, port_id, nic_stats_mapping_border); + + if (port->rx_queue_stats_mapping_enabled) { + for (i = 0; i < nb_rx_queue_stats_mappings; i++) { + if (rx_queue_stats_mappings[i].port_id == port_id) { + printf(" RX-queue %2d mapped to Stats Reg %2d\n", + rx_queue_stats_mappings[i].queue_id, + rx_queue_stats_mappings[i].stats_counter_id); + } + } + printf("\n"); + } + + + if (port->tx_queue_stats_mapping_enabled) { + for (i = 0; i < nb_tx_queue_stats_mappings; i++) { + if (tx_queue_stats_mappings[i].port_id == port_id) { + printf(" TX-queue %2d mapped to Stats Reg %2d\n", + tx_queue_stats_mappings[i].queue_id, + tx_queue_stats_mappings[i].stats_counter_id); + } + } + } + + printf(" %s####################################%s\n", + nic_stats_mapping_border, nic_stats_mapping_border); +} + +void +rx_queue_infos_display(portid_t port_id, uint16_t queue_id) +{ + struct rte_eth_burst_mode mode; + struct rte_eth_rxq_info qinfo; + int32_t rc; + static const char *info_border = "*********************"; + + rc = rte_eth_rx_queue_info_get(port_id, queue_id, &qinfo); + if (rc != 0) { + printf("Failed to retrieve information for port: %u, " + "RX queue: %hu\nerror desc: %s(%d)\n", + port_id, queue_id, strerror(-rc), rc); + return; + } + + printf("\n%s Infos for port %-2u, RX queue %-2u %s", + info_border, port_id, queue_id, info_border); + + printf("\nMempool: %s", (qinfo.mp == NULL) ? "NULL" : qinfo.mp->name); + printf("\nRX prefetch threshold: %hhu", qinfo.conf.rx_thresh.pthresh); + printf("\nRX host threshold: %hhu", qinfo.conf.rx_thresh.hthresh); + printf("\nRX writeback threshold: %hhu", qinfo.conf.rx_thresh.wthresh); + printf("\nRX free threshold: %hu", qinfo.conf.rx_free_thresh); + printf("\nRX drop packets: %s", + (qinfo.conf.rx_drop_en != 0) ? "on" : "off"); + printf("\nRX deferred start: %s", + (qinfo.conf.rx_deferred_start != 0) ? "on" : "off"); + printf("\nRX scattered packets: %s", + (qinfo.scattered_rx != 0) ? "on" : "off"); + printf("\nNumber of RXDs: %hu", qinfo.nb_desc); + + if (rte_eth_rx_burst_mode_get(port_id, queue_id, &mode) == 0) + printf("\nBurst mode: %s%s", + mode.info, + mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? + " (per queue)" : ""); + + printf("\n"); +} + +void +tx_queue_infos_display(portid_t port_id, uint16_t queue_id) +{ + struct rte_eth_burst_mode mode; + struct rte_eth_txq_info qinfo; + int32_t rc; + static const char *info_border = "*********************"; + + rc = rte_eth_tx_queue_info_get(port_id, queue_id, &qinfo); + if (rc != 0) { + printf("Failed to retrieve information for port: %u, " + "TX queue: %hu\nerror desc: %s(%d)\n", + port_id, queue_id, strerror(-rc), rc); + return; + } + + printf("\n%s Infos for port %-2u, TX queue %-2u %s", + info_border, port_id, queue_id, info_border); + + printf("\nTX prefetch threshold: %hhu", qinfo.conf.tx_thresh.pthresh); + printf("\nTX host threshold: %hhu", qinfo.conf.tx_thresh.hthresh); + printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); + printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); + printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); + printf("\nTX deferred start: %s", + (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); + printf("\nNumber of TXDs: %hu", qinfo.nb_desc); + + if (rte_eth_tx_burst_mode_get(port_id, queue_id, &mode) == 0) + printf("\nBurst mode: %s%s", + mode.info, + mode.flags & RTE_ETH_BURST_FLAG_PER_QUEUE ? + " (per queue)" : ""); + + printf("\n"); +} + +static int bus_match_all(const struct rte_bus *bus, const void *data) +{ + RTE_SET_USED(bus); + RTE_SET_USED(data); + return 0; +} + +void +device_infos_display(const char *identifier) +{ + static const char *info_border = "*********************"; + struct rte_bus *start = NULL, *next; + struct rte_dev_iterator dev_iter; + char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_ether_addr mac_addr; + struct rte_device *dev; + struct rte_devargs da; + portid_t port_id; + char devstr[128]; + + memset(&da, 0, sizeof(da)); + if (!identifier) + goto skip_parse; + + if (rte_devargs_parsef(&da, "%s", identifier)) { + printf("cannot parse identifier\n"); + if (da.args) + free(da.args); + return; + } + +skip_parse: + while ((next = rte_bus_find(start, bus_match_all, NULL)) != NULL) { + + start = next; + if (identifier && da.bus != next) + continue; + + /* Skip buses that don't have iterate method */ + if (!next->dev_iterate) + continue; + + snprintf(devstr, sizeof(devstr), "bus=%s", next->name); + RTE_DEV_FOREACH(dev, devstr, &dev_iter) { + + if (!dev->driver) + continue; + /* Check for matching device if identifier is present */ + if (identifier && + strncmp(da.name, dev->name, strlen(dev->name))) + continue; + printf("\n%s Infos for device %s %s\n", + info_border, dev->name, info_border); + printf("Bus name: %s", dev->bus->name); + printf("\nDriver name: %s", dev->driver->name); + printf("\nDevargs: %s", + dev->devargs ? dev->devargs->args : ""); + printf("\nConnect to socket: %d", dev->numa_node); + printf("\n"); + + /* List ports with matching device name */ + RTE_ETH_FOREACH_DEV_OF(port_id, dev) { + printf("\n\tPort id: %-2d", port_id); + if (eth_macaddr_get_print_err(port_id, + &mac_addr) == 0) + print_ethaddr("\n\tMAC address: ", + &mac_addr); + rte_eth_dev_get_name_by_port(port_id, name); + printf("\n\tDevice name: %s", name); + printf("\n"); + } + } + }; +} + +void +port_infos_display(portid_t port_id) +{ + struct rte_port *port; + struct rte_ether_addr mac_addr; + struct rte_eth_link link; + struct rte_eth_dev_info dev_info; + int vlan_offload; + struct rte_mempool * mp; + static const char *info_border = "*********************"; + uint16_t mtu; + char name[RTE_ETH_NAME_MAX_LEN]; + int ret; + char fw_version[ETHDEV_FWVERS_LEN]; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) { + print_valid_ports(); + return; + } + port = &ports[port_id]; + ret = eth_link_get_nowait_print_err(port_id, &link); + if (ret < 0) + return; + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + printf("\n%s Infos for port %-2d %s\n", + info_border, port_id, info_border); + if (eth_macaddr_get_print_err(port_id, &mac_addr) == 0) + print_ethaddr("MAC address: ", &mac_addr); + rte_eth_dev_get_name_by_port(port_id, name); + printf("\nDevice name: %s", name); + printf("\nDriver name: %s", dev_info.driver_name); + + if (rte_eth_dev_fw_version_get(port_id, fw_version, + ETHDEV_FWVERS_LEN) == 0) + printf("\nFirmware-version: %s", fw_version); + else + printf("\nFirmware-version: %s", "not available"); + + if (dev_info.device->devargs && dev_info.device->devargs->args) + printf("\nDevargs: %s", dev_info.device->devargs->args); + printf("\nConnect to socket: %u", port->socket_id); + + if (port_numa[port_id] != NUMA_NO_CONFIG) { + mp = mbuf_pool_find(port_numa[port_id]); + if (mp) + printf("\nmemory allocation on the socket: %d", + port_numa[port_id]); + } else + printf("\nmemory allocation on the socket: %u",port->socket_id); + + printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); + printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); + printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex")); + + if (!rte_eth_dev_get_mtu(port_id, &mtu)) + printf("MTU: %u\n", mtu); + + printf("Promiscuous mode: %s\n", + rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); + printf("Allmulticast mode: %s\n", + rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); + printf("Maximum number of MAC addresses: %u\n", + (unsigned int)(port->dev_info.max_mac_addrs)); + printf("Maximum number of MAC addresses of hash filtering: %u\n", + (unsigned int)(port->dev_info.max_hash_mac_addrs)); + + vlan_offload = rte_eth_dev_get_vlan_offload(port_id); + if (vlan_offload >= 0){ + printf("VLAN offload: \n"); + if (vlan_offload & ETH_VLAN_STRIP_OFFLOAD) + printf(" strip on, "); + else + printf(" strip off, "); + + if (vlan_offload & ETH_VLAN_FILTER_OFFLOAD) + printf("filter on, "); + else + printf("filter off, "); + + if (vlan_offload & ETH_VLAN_EXTEND_OFFLOAD) + printf("extend on, "); + else + printf("extend off, "); + + if (vlan_offload & ETH_QINQ_STRIP_OFFLOAD) + printf("qinq strip on\n"); + else + printf("qinq strip off\n"); + } + + if (dev_info.hash_key_size > 0) + printf("Hash key size in bytes: %u\n", dev_info.hash_key_size); + if (dev_info.reta_size > 0) + printf("Redirection table size: %u\n", dev_info.reta_size); + if (!dev_info.flow_type_rss_offloads) + printf("No RSS offload flow type is supported.\n"); + else { + uint16_t i; + char *p; + + printf("Supported RSS offload flow types:\n"); + for (i = RTE_ETH_FLOW_UNKNOWN + 1; + i < sizeof(dev_info.flow_type_rss_offloads) * CHAR_BIT; i++) { + if (!(dev_info.flow_type_rss_offloads & (1ULL << i))) + continue; + p = flowtype_to_str(i); + if (p) + printf(" %s\n", p); + else + printf(" user defined %d\n", i); + } + } + + printf("Minimum size of RX buffer: %u\n", dev_info.min_rx_bufsize); + printf("Maximum configurable length of RX packet: %u\n", + dev_info.max_rx_pktlen); + printf("Maximum configurable size of LRO aggregated packet: %u\n", + dev_info.max_lro_pkt_size); + if (dev_info.max_vfs) + printf("Maximum number of VFs: %u\n", dev_info.max_vfs); + if (dev_info.max_vmdq_pools) + printf("Maximum number of VMDq pools: %u\n", + dev_info.max_vmdq_pools); + + printf("Current number of RX queues: %u\n", dev_info.nb_rx_queues); + printf("Max possible RX queues: %u\n", dev_info.max_rx_queues); + printf("Max possible number of RXDs per queue: %hu\n", + dev_info.rx_desc_lim.nb_max); + printf("Min possible number of RXDs per queue: %hu\n", + dev_info.rx_desc_lim.nb_min); + printf("RXDs number alignment: %hu\n", dev_info.rx_desc_lim.nb_align); + + printf("Current number of TX queues: %u\n", dev_info.nb_tx_queues); + printf("Max possible TX queues: %u\n", dev_info.max_tx_queues); + printf("Max possible number of TXDs per queue: %hu\n", + dev_info.tx_desc_lim.nb_max); + printf("Min possible number of TXDs per queue: %hu\n", + dev_info.tx_desc_lim.nb_min); + printf("TXDs number alignment: %hu\n", dev_info.tx_desc_lim.nb_align); + printf("Max segment number per packet: %hu\n", + dev_info.tx_desc_lim.nb_seg_max); + printf("Max segment number per MTU/TSO: %hu\n", + dev_info.tx_desc_lim.nb_mtu_seg_max); + + /* Show switch info only if valid switch domain and port id is set */ + if (dev_info.switch_info.domain_id != + RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID) { + if (dev_info.switch_info.name) + printf("Switch name: %s\n", dev_info.switch_info.name); + + printf("Switch domain Id: %u\n", + dev_info.switch_info.domain_id); + printf("Switch Port Id: %u\n", + dev_info.switch_info.port_id); + } +} + +void +port_summary_header_display(void) +{ + uint16_t port_number; + + port_number = rte_eth_dev_count_avail(); + printf("Number of available ports: %i\n", port_number); + printf("%-4s %-17s %-12s %-14s %-8s %s\n", "Port", "MAC Address", "Name", + "Driver", "Status", "Link"); +} + +void +port_summary_display(portid_t port_id) +{ + struct rte_ether_addr mac_addr; + struct rte_eth_link link; + struct rte_eth_dev_info dev_info; + char name[RTE_ETH_NAME_MAX_LEN]; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) { + print_valid_ports(); + return; + } + + ret = eth_link_get_nowait_print_err(port_id, &link); + if (ret < 0) + return; + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + rte_eth_dev_get_name_by_port(port_id, name); + ret = eth_macaddr_get_print_err(port_id, &mac_addr); + if (ret != 0) + return; + + printf("%-4d %02X:%02X:%02X:%02X:%02X:%02X %-12s %-14s %-8s %uMbps\n", + port_id, mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], + mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], + mac_addr.addr_bytes[4], mac_addr.addr_bytes[5], name, + dev_info.driver_name, (link.link_status) ? ("up") : ("down"), + (unsigned int) link.link_speed); +} + +void +port_offload_cap_display(portid_t port_id) +{ + struct rte_eth_dev_info dev_info; + static const char *info_border = "************"; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + printf("\n%s Port %d supported offload features: %s\n", + info_border, port_id, info_border); + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_VLAN_STRIP) { + printf("VLAN stripped: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_QINQ_STRIP) { + printf("Double VLANs stripped: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_QINQ_STRIP) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { + printf("RX IPv4 checksum: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_IPV4_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) { + printf("RX UDP checksum: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_UDP_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) { + printf("RX TCP checksum: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_TCP_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SCTP_CKSUM) { + printf("RX SCTP checksum: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SCTP_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) { + printf("RX Outer IPv4 checksum: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) { + printf("RX Outer UDP checksum: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_OUTER_UDP_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) { + printf("Large receive offload: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_TCP_LRO) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TIMESTAMP) { + printf("HW timestamp: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_TIMESTAMP) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_KEEP_CRC) { + printf("Rx Keep CRC: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_KEEP_CRC) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) { + printf("RX offload security: "); + if (ports[port_id].dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SECURITY) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) { + printf("VLAN insert: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_VLAN_INSERT) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) { + printf("Double VLANs insert: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_QINQ_INSERT) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) { + printf("TX IPv4 checksum: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_IPV4_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) { + printf("TX UDP checksum: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_UDP_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) { + printf("TX TCP checksum: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_TCP_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SCTP_CKSUM) { + printf("TX SCTP checksum: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_SCTP_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { + printf("TX Outer IPv4 checksum: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) { + printf("TX TCP segmentation: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_TCP_TSO) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TSO) { + printf("TX UDP segmentation: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_UDP_TSO) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO) { + printf("TSO for VXLAN tunnel packet: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_VXLAN_TNL_TSO) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GRE_TNL_TSO) { + printf("TSO for GRE tunnel packet: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_GRE_TNL_TSO) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IPIP_TNL_TSO) { + printf("TSO for IPIP tunnel packet: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_IPIP_TNL_TSO) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_GENEVE_TNL_TSO) { + printf("TSO for GENEVE tunnel packet: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_GENEVE_TNL_TSO) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_IP_TNL_TSO) { + printf("IP tunnel TSO: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_IP_TNL_TSO) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_TNL_TSO) { + printf("UDP tunnel TSO: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_UDP_TNL_TSO) + printf("on\n"); + else + printf("off\n"); + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { + printf("TX Outer UDP checksum: "); + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) + printf("on\n"); + else + printf("off\n"); + } + +} + +int +port_id_is_invalid(portid_t port_id, enum print_warning warning) +{ + uint16_t pid; + + if (port_id == (portid_t)RTE_PORT_ALL) + return 0; + + RTE_ETH_FOREACH_DEV(pid) + if (port_id == pid) + return 0; + + if (warning == ENABLED_WARN) + printf("Invalid port %d\n", port_id); + + return 1; +} + +void print_valid_ports(void) +{ + portid_t pid; + + printf("The valid ports array is ["); + RTE_ETH_FOREACH_DEV(pid) { + printf(" %d", pid); + } + printf(" ]\n"); +} + +static int +vlan_id_is_invalid(uint16_t vlan_id) +{ + if (vlan_id < 4096) + return 0; + printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); + return 1; +} + +static int +port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) +{ + const struct rte_pci_device *pci_dev; + const struct rte_bus *bus; + uint64_t pci_len; + + if (reg_off & 0x3) { + printf("Port register offset 0x%X not aligned on a 4-byte " + "boundary\n", + (unsigned)reg_off); + return 1; + } + + if (!ports[port_id].dev_info.device) { + printf("Invalid device\n"); + return 0; + } + + bus = rte_bus_find_by_device(ports[port_id].dev_info.device); + if (bus && !strcmp(bus->name, "pci")) { + pci_dev = RTE_DEV_TO_PCI(ports[port_id].dev_info.device); + } else { + printf("Not a PCI device\n"); + return 1; + } + + pci_len = pci_dev->mem_resource[0].len; + if (reg_off >= pci_len) { + printf("Port %d: register offset %u (0x%X) out of port PCI " + "resource (length=%"PRIu64")\n", + port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); + return 1; + } + return 0; +} + +static int +reg_bit_pos_is_invalid(uint8_t bit_pos) +{ + if (bit_pos <= 31) + return 0; + printf("Invalid bit position %d (must be <= 31)\n", bit_pos); + return 1; +} + +#define display_port_and_reg_off(port_id, reg_off) \ + printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) + +static inline void +display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) +{ + display_port_and_reg_off(port_id, (unsigned)reg_off); + printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); +} + +void +port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) +{ + uint32_t reg_v; + + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + if (reg_bit_pos_is_invalid(bit_x)) + return; + reg_v = port_id_pci_reg_read(port_id, reg_off); + display_port_and_reg_off(port_id, (unsigned)reg_off); + printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); +} + +void +port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, + uint8_t bit1_pos, uint8_t bit2_pos) +{ + uint32_t reg_v; + uint8_t l_bit; + uint8_t h_bit; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + if (reg_bit_pos_is_invalid(bit1_pos)) + return; + if (reg_bit_pos_is_invalid(bit2_pos)) + return; + if (bit1_pos > bit2_pos) + l_bit = bit2_pos, h_bit = bit1_pos; + else + l_bit = bit1_pos, h_bit = bit2_pos; + + reg_v = port_id_pci_reg_read(port_id, reg_off); + reg_v >>= l_bit; + if (h_bit < 31) + reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); + display_port_and_reg_off(port_id, (unsigned)reg_off); + printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, + ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); +} + +void +port_reg_display(portid_t port_id, uint32_t reg_off) +{ + uint32_t reg_v; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + reg_v = port_id_pci_reg_read(port_id, reg_off); + display_port_reg_value(port_id, reg_off, reg_v); +} + +void +port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, + uint8_t bit_v) +{ + uint32_t reg_v; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + if (reg_bit_pos_is_invalid(bit_pos)) + return; + if (bit_v > 1) { + printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); + return; + } + reg_v = port_id_pci_reg_read(port_id, reg_off); + if (bit_v == 0) + reg_v &= ~(1 << bit_pos); + else + reg_v |= (1 << bit_pos); + port_id_pci_reg_write(port_id, reg_off, reg_v); + display_port_reg_value(port_id, reg_off, reg_v); +} + +void +port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, + uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) +{ + uint32_t max_v; + uint32_t reg_v; + uint8_t l_bit; + uint8_t h_bit; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + if (reg_bit_pos_is_invalid(bit1_pos)) + return; + if (reg_bit_pos_is_invalid(bit2_pos)) + return; + if (bit1_pos > bit2_pos) + l_bit = bit2_pos, h_bit = bit1_pos; + else + l_bit = bit1_pos, h_bit = bit2_pos; + + if ((h_bit - l_bit) < 31) + max_v = (1 << (h_bit - l_bit + 1)) - 1; + else + max_v = 0xFFFFFFFF; + + if (value > max_v) { + printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", + (unsigned)value, (unsigned)value, + (unsigned)max_v, (unsigned)max_v); + return; + } + reg_v = port_id_pci_reg_read(port_id, reg_off); + reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ + reg_v |= (value << l_bit); /* Set changed bits */ + port_id_pci_reg_write(port_id, reg_off, reg_v); + display_port_reg_value(port_id, reg_off, reg_v); +} + +void +port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) +{ + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + port_id_pci_reg_write(port_id, reg_off, reg_v); + display_port_reg_value(port_id, reg_off, reg_v); +} + +void +port_mtu_set(portid_t port_id, uint16_t mtu) +{ + int diag; + struct rte_port *rte_port = &ports[port_id]; + struct rte_eth_dev_info dev_info; + uint16_t eth_overhead; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) { + printf("Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n", + mtu, dev_info.min_mtu, dev_info.max_mtu); + return; + } + diag = rte_eth_dev_set_mtu(port_id, mtu); + if (diag == 0 && + dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) { + /* + * Ether overhead in driver is equal to the difference of + * max_rx_pktlen and max_mtu in rte_eth_dev_info when the + * device supports jumbo frame. + */ + eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu; + if (mtu > RTE_ETHER_MAX_LEN - eth_overhead) { + rte_port->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + rte_port->dev_conf.rxmode.max_rx_pkt_len = + mtu + eth_overhead; + } else + rte_port->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + return; + } + printf("Set MTU failed. diag=%d\n", diag); +} + +/* Generic flow management functions. */ + +/** Generate a port_flow entry from attributes/pattern/actions. */ +static struct port_flow * +port_flow_new(const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + const struct rte_flow_conv_rule rule = { + .attr_ro = attr, + .pattern_ro = pattern, + .actions_ro = actions, + }; + struct port_flow *pf; + int ret; + + ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, error); + if (ret < 0) + return NULL; + pf = calloc(1, offsetof(struct port_flow, rule) + ret); + if (!pf) { + rte_flow_error_set + (error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "calloc() failed"); + return NULL; + } + if (rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &pf->rule, ret, &rule, + error) >= 0) + return pf; + free(pf); + return NULL; +} + +/** Print a message out of a flow error. */ +static int +port_flow_complain(struct rte_flow_error *error) +{ + static const char *const errstrlist[] = { + [RTE_FLOW_ERROR_TYPE_NONE] = "no error", + [RTE_FLOW_ERROR_TYPE_UNSPECIFIED] = "cause unspecified", + [RTE_FLOW_ERROR_TYPE_HANDLE] = "flow rule (handle)", + [RTE_FLOW_ERROR_TYPE_ATTR_GROUP] = "group field", + [RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY] = "priority field", + [RTE_FLOW_ERROR_TYPE_ATTR_INGRESS] = "ingress field", + [RTE_FLOW_ERROR_TYPE_ATTR_EGRESS] = "egress field", + [RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER] = "transfer field", + [RTE_FLOW_ERROR_TYPE_ATTR] = "attributes structure", + [RTE_FLOW_ERROR_TYPE_ITEM_NUM] = "pattern length", + [RTE_FLOW_ERROR_TYPE_ITEM_SPEC] = "item specification", + [RTE_FLOW_ERROR_TYPE_ITEM_LAST] = "item specification range", + [RTE_FLOW_ERROR_TYPE_ITEM_MASK] = "item specification mask", + [RTE_FLOW_ERROR_TYPE_ITEM] = "specific pattern item", + [RTE_FLOW_ERROR_TYPE_ACTION_NUM] = "number of actions", + [RTE_FLOW_ERROR_TYPE_ACTION_CONF] = "action configuration", + [RTE_FLOW_ERROR_TYPE_ACTION] = "specific action", + }; + const char *errstr; + char buf[32]; + int err = rte_errno; + + if ((unsigned int)error->type >= RTE_DIM(errstrlist) || + !errstrlist[error->type]) + errstr = "unknown type"; + else + errstr = errstrlist[error->type]; + printf("%s(): Caught PMD error type %d (%s): %s%s: %s\n", __func__, + error->type, errstr, + error->cause ? (snprintf(buf, sizeof(buf), "cause: %p, ", + error->cause), buf) : "", + error->message ? error->message : "(no stated reason)", + rte_strerror(err)); + return -err; +} + +/** Validate flow rule. */ +int +port_flow_validate(portid_t port_id, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions) +{ + struct rte_flow_error error; + + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x11, sizeof(error)); + if (rte_flow_validate(port_id, attr, pattern, actions, &error)) + return port_flow_complain(&error); + printf("Flow rule validated\n"); + return 0; +} + +/** Update age action context by port_flow pointer. */ +void +update_age_action_context(const struct rte_flow_action *actions, + struct port_flow *pf) +{ + struct rte_flow_action_age *age = NULL; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_AGE: + age = (struct rte_flow_action_age *) + (uintptr_t)actions->conf; + age->context = pf; + return; + default: + break; + } + } +} + +/** Create flow rule. */ +int +port_flow_create(portid_t port_id, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions) +{ + struct rte_flow *flow; + struct rte_port *port; + struct port_flow *pf; + uint32_t id = 0; + struct rte_flow_error error; + + port = &ports[port_id]; + if (port->flow_list) { + if (port->flow_list->id == UINT32_MAX) { + printf("Highest rule ID is already assigned, delete" + " it first"); + return -ENOMEM; + } + id = port->flow_list->id + 1; + } + pf = port_flow_new(attr, pattern, actions, &error); + if (!pf) + return port_flow_complain(&error); + update_age_action_context(actions, pf); + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x22, sizeof(error)); + flow = rte_flow_create(port_id, attr, pattern, actions, &error); + if (!flow) { + free(pf); + return port_flow_complain(&error); + } + pf->next = port->flow_list; + pf->id = id; + pf->flow = flow; + port->flow_list = pf; + printf("Flow rule #%u created\n", pf->id); + return 0; +} + +/** Destroy a number of flow rules. */ +int +port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule) +{ + struct rte_port *port; + struct port_flow **tmp; + uint32_t c = 0; + int ret = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + tmp = &port->flow_list; + while (*tmp) { + uint32_t i; + + for (i = 0; i != n; ++i) { + struct rte_flow_error error; + struct port_flow *pf = *tmp; + + if (rule[i] != pf->id) + continue; + /* + * Poisoning to make sure PMDs update it in case + * of error. + */ + memset(&error, 0x33, sizeof(error)); + if (rte_flow_destroy(port_id, pf->flow, &error)) { + ret = port_flow_complain(&error); + continue; + } + printf("Flow rule #%u destroyed\n", pf->id); + *tmp = pf->next; + free(pf); + break; + } + if (i == n) + tmp = &(*tmp)->next; + ++c; + } + return ret; +} + +/** Remove all flow rules. */ +int +port_flow_flush(portid_t port_id) +{ + struct rte_flow_error error; + struct rte_port *port; + int ret = 0; + + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x44, sizeof(error)); + if (rte_flow_flush(port_id, &error)) { + ret = port_flow_complain(&error); + if (port_id_is_invalid(port_id, DISABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return ret; + } + port = &ports[port_id]; + while (port->flow_list) { + struct port_flow *pf = port->flow_list->next; + + free(port->flow_list); + port->flow_list = pf; + } + return ret; +} + +/** Dump all flow rules. */ +int +port_flow_dump(portid_t port_id, const char *file_name) +{ + int ret = 0; + FILE *file = stdout; + struct rte_flow_error error; + + if (file_name && strlen(file_name)) { + file = fopen(file_name, "w"); + if (!file) { + printf("Failed to create file %s: %s\n", file_name, + strerror(errno)); + return -errno; + } + } + ret = rte_flow_dev_dump(port_id, file, &error); + if (ret) { + port_flow_complain(&error); + printf("Failed to dump flow: %s\n", strerror(-ret)); + } else + printf("Flow dump finished\n"); + if (file_name && strlen(file_name)) + fclose(file); + return ret; +} + +/** Query a flow rule. */ +int +port_flow_query(portid_t port_id, uint32_t rule, + const struct rte_flow_action *action) +{ + struct rte_flow_error error; + struct rte_port *port; + struct port_flow *pf; + const char *name; + union { + struct rte_flow_query_count count; + } query; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return -EINVAL; + port = &ports[port_id]; + for (pf = port->flow_list; pf; pf = pf->next) + if (pf->id == rule) + break; + if (!pf) { + printf("Flow rule #%u not found\n", rule); + return -ENOENT; + } + ret = rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, + &name, sizeof(name), + (void *)(uintptr_t)action->type, &error); + if (ret < 0) + return port_flow_complain(&error); + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_COUNT: + break; + default: + printf("Cannot query action type %d (%s)\n", + action->type, name); + return -ENOTSUP; + } + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x55, sizeof(error)); + memset(&query, 0, sizeof(query)); + if (rte_flow_query(port_id, pf->flow, action, &query, &error)) + return port_flow_complain(&error); + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_COUNT: + printf("%s:\n" + " hits_set: %u\n" + " bytes_set: %u\n" + " hits: %" PRIu64 "\n" + " bytes: %" PRIu64 "\n", + name, + query.count.hits_set, + query.count.bytes_set, + query.count.hits, + query.count.bytes); + break; + default: + printf("Cannot display result for action type %d (%s)\n", + action->type, name); + break; + } + return 0; +} + +/** List simply and destroy all aged flows. */ +void +port_flow_aged(portid_t port_id, uint8_t destroy) +{ + void **contexts; + int nb_context, total = 0, idx; + struct rte_flow_error error; + struct port_flow *pf; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return; + total = rte_flow_get_aged_flows(port_id, NULL, 0, &error); + printf("Port %u total aged flows: %d\n", port_id, total); + if (total < 0) { + port_flow_complain(&error); + return; + } + if (total == 0) + return; + contexts = malloc(sizeof(void *) * total); + if (contexts == NULL) { + printf("Cannot allocate contexts for aged flow\n"); + return; + } + printf("ID\tGroup\tPrio\tAttr\n"); + nb_context = rte_flow_get_aged_flows(port_id, contexts, total, &error); + if (nb_context != total) { + printf("Port:%d get aged flows count(%d) != total(%d)\n", + port_id, nb_context, total); + free(contexts); + return; + } + for (idx = 0; idx < nb_context; idx++) { + pf = (struct port_flow *)contexts[idx]; + if (!pf) { + printf("Error: get Null context in port %u\n", port_id); + continue; + } + printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t\n", + pf->id, + pf->rule.attr->group, + pf->rule.attr->priority, + pf->rule.attr->ingress ? 'i' : '-', + pf->rule.attr->egress ? 'e' : '-', + pf->rule.attr->transfer ? 't' : '-'); + } + if (destroy) { + int ret; + uint32_t flow_id; + + total = 0; + printf("\n"); + for (idx = 0; idx < nb_context; idx++) { + pf = (struct port_flow *)contexts[idx]; + if (!pf) + continue; + flow_id = pf->id; + ret = port_flow_destroy(port_id, 1, &flow_id); + if (!ret) + total++; + } + printf("%d flows be destroyed\n", total); + } + free(contexts); +} + +/** List flow rules. */ +void +port_flow_list(portid_t port_id, uint32_t n, const uint32_t group[n]) +{ + struct rte_port *port; + struct port_flow *pf; + struct port_flow *list = NULL; + uint32_t i; + + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) + return; + port = &ports[port_id]; + if (!port->flow_list) + return; + /* Sort flows by group, priority and ID. */ + for (pf = port->flow_list; pf != NULL; pf = pf->next) { + struct port_flow **tmp; + const struct rte_flow_attr *curr = pf->rule.attr; + + if (n) { + /* Filter out unwanted groups. */ + for (i = 0; i != n; ++i) + if (curr->group == group[i]) + break; + if (i == n) + continue; + } + for (tmp = &list; *tmp; tmp = &(*tmp)->tmp) { + const struct rte_flow_attr *comp = (*tmp)->rule.attr; + + if (curr->group > comp->group || + (curr->group == comp->group && + curr->priority > comp->priority) || + (curr->group == comp->group && + curr->priority == comp->priority && + pf->id > (*tmp)->id)) + continue; + break; + } + pf->tmp = *tmp; + *tmp = pf; + } + printf("ID\tGroup\tPrio\tAttr\tRule\n"); + for (pf = list; pf != NULL; pf = pf->tmp) { + const struct rte_flow_item *item = pf->rule.pattern; + const struct rte_flow_action *action = pf->rule.actions; + const char *name; + + printf("%" PRIu32 "\t%" PRIu32 "\t%" PRIu32 "\t%c%c%c\t", + pf->id, + pf->rule.attr->group, + pf->rule.attr->priority, + pf->rule.attr->ingress ? 'i' : '-', + pf->rule.attr->egress ? 'e' : '-', + pf->rule.attr->transfer ? 't' : '-'); + while (item->type != RTE_FLOW_ITEM_TYPE_END) { + if (rte_flow_conv(RTE_FLOW_CONV_OP_ITEM_NAME_PTR, + &name, sizeof(name), + (void *)(uintptr_t)item->type, + NULL) <= 0) + name = "[UNKNOWN]"; + if (item->type != RTE_FLOW_ITEM_TYPE_VOID) + printf("%s ", name); + ++item; + } + printf("=>"); + while (action->type != RTE_FLOW_ACTION_TYPE_END) { + if (rte_flow_conv(RTE_FLOW_CONV_OP_ACTION_NAME_PTR, + &name, sizeof(name), + (void *)(uintptr_t)action->type, + NULL) <= 0) + name = "[UNKNOWN]"; + if (action->type != RTE_FLOW_ACTION_TYPE_VOID) + printf(" %s", name); + ++action; + } + printf("\n"); + } +} + +/** Restrict ingress traffic to the defined flow rules. */ +int +port_flow_isolate(portid_t port_id, int set) +{ + struct rte_flow_error error; + + /* Poisoning to make sure PMDs update it in case of error. */ + memset(&error, 0x66, sizeof(error)); + if (rte_flow_isolate(port_id, set, &error)) + return port_flow_complain(&error); + printf("Ingress traffic on port %u is %s to the defined flow rules\n", + port_id, + set ? "now restricted" : "not restricted anymore"); + return 0; +} + +/* + * RX/TX ring descriptors display functions. + */ +int +rx_queue_id_is_invalid(queueid_t rxq_id) +{ + if (rxq_id < nb_rxq) + return 0; + printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); + return 1; +} + +int +tx_queue_id_is_invalid(queueid_t txq_id) +{ + if (txq_id < nb_txq) + return 0; + printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); + return 1; +} + +static int +rx_desc_id_is_invalid(uint16_t rxdesc_id) +{ + if (rxdesc_id < nb_rxd) + return 0; + printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", + rxdesc_id, nb_rxd); + return 1; +} + +static int +tx_desc_id_is_invalid(uint16_t txdesc_id) +{ + if (txdesc_id < nb_txd) + return 0; + printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", + txdesc_id, nb_txd); + return 1; +} + +static const struct rte_memzone * +ring_dma_zone_lookup(const char *ring_name, portid_t port_id, uint16_t q_id) +{ + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + + snprintf(mz_name, sizeof(mz_name), "eth_p%d_q%d_%s", + port_id, q_id, ring_name); + mz = rte_memzone_lookup(mz_name); + if (mz == NULL) + printf("%s ring memory zoneof (port %d, queue %d) not" + "found (zone name = %s\n", + ring_name, port_id, q_id, mz_name); + return mz; +} + +union igb_ring_dword { + uint64_t dword; + struct { +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN + uint32_t lo; + uint32_t hi; +#else + uint32_t hi; + uint32_t lo; +#endif + } words; +}; + +struct igb_ring_desc_32_bytes { + union igb_ring_dword lo_dword; + union igb_ring_dword hi_dword; + union igb_ring_dword resv1; + union igb_ring_dword resv2; +}; + +struct igb_ring_desc_16_bytes { + union igb_ring_dword lo_dword; + union igb_ring_dword hi_dword; +}; + +static void +ring_rxd_display_dword(union igb_ring_dword dword) +{ + printf(" 0x%08X - 0x%08X\n", (unsigned)dword.words.lo, + (unsigned)dword.words.hi); +} + +static void +ring_rx_descriptor_display(const struct rte_memzone *ring_mz, +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + portid_t port_id, +#else + __rte_unused portid_t port_id, +#endif + uint16_t desc_id) +{ + struct igb_ring_desc_16_bytes *ring = + (struct igb_ring_desc_16_bytes *)ring_mz->addr; +#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC + int ret; + struct rte_eth_dev_info dev_info; + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + if (strstr(dev_info.driver_name, "i40e") != NULL) { + /* 32 bytes RX descriptor, i40e only */ + struct igb_ring_desc_32_bytes *ring = + (struct igb_ring_desc_32_bytes *)ring_mz->addr; + ring[desc_id].lo_dword.dword = + rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); + ring_rxd_display_dword(ring[desc_id].lo_dword); + ring[desc_id].hi_dword.dword = + rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); + ring_rxd_display_dword(ring[desc_id].hi_dword); + ring[desc_id].resv1.dword = + rte_le_to_cpu_64(ring[desc_id].resv1.dword); + ring_rxd_display_dword(ring[desc_id].resv1); + ring[desc_id].resv2.dword = + rte_le_to_cpu_64(ring[desc_id].resv2.dword); + ring_rxd_display_dword(ring[desc_id].resv2); + + return; + } +#endif + /* 16 bytes RX descriptor */ + ring[desc_id].lo_dword.dword = + rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); + ring_rxd_display_dword(ring[desc_id].lo_dword); + ring[desc_id].hi_dword.dword = + rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); + ring_rxd_display_dword(ring[desc_id].hi_dword); +} + +static void +ring_tx_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) +{ + struct igb_ring_desc_16_bytes *ring; + struct igb_ring_desc_16_bytes txd; + + ring = (struct igb_ring_desc_16_bytes *)ring_mz->addr; + txd.lo_dword.dword = rte_le_to_cpu_64(ring[desc_id].lo_dword.dword); + txd.hi_dword.dword = rte_le_to_cpu_64(ring[desc_id].hi_dword.dword); + printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", + (unsigned)txd.lo_dword.words.lo, + (unsigned)txd.lo_dword.words.hi, + (unsigned)txd.hi_dword.words.lo, + (unsigned)txd.hi_dword.words.hi); +} + +void +rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) +{ + const struct rte_memzone *rx_mz; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (rx_queue_id_is_invalid(rxq_id)) + return; + if (rx_desc_id_is_invalid(rxd_id)) + return; + rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); + if (rx_mz == NULL) + return; + ring_rx_descriptor_display(rx_mz, port_id, rxd_id); +} + +void +tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) +{ + const struct rte_memzone *tx_mz; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (tx_queue_id_is_invalid(txq_id)) + return; + if (tx_desc_id_is_invalid(txd_id)) + return; + tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); + if (tx_mz == NULL) + return; + ring_tx_descriptor_display(tx_mz, txd_id); +} + +void +fwd_lcores_config_display(void) +{ + lcoreid_t lc_id; + + printf("List of forwarding lcores:"); + for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) + printf(" %2u", fwd_lcores_cpuids[lc_id]); + printf("\n"); +} +void +rxtx_config_display(void) +{ + portid_t pid; + queueid_t qid; + + printf(" %s packet forwarding%s packets/burst=%d\n", + cur_fwd_eng->fwd_mode_name, + retry_enabled == 0 ? "" : " with retry", + nb_pkt_per_burst); + + if (cur_fwd_eng == &tx_only_engine || cur_fwd_eng == &flow_gen_engine) + printf(" packet len=%u - nb packet segments=%d\n", + (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); + + printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", + nb_fwd_lcores, nb_fwd_ports); + + RTE_ETH_FOREACH_DEV(pid) { + struct rte_eth_rxconf *rx_conf = &ports[pid].rx_conf[0]; + struct rte_eth_txconf *tx_conf = &ports[pid].tx_conf[0]; + uint16_t *nb_rx_desc = &ports[pid].nb_rx_desc[0]; + uint16_t *nb_tx_desc = &ports[pid].nb_tx_desc[0]; + uint16_t nb_rx_desc_tmp; + uint16_t nb_tx_desc_tmp; + struct rte_eth_rxq_info rx_qinfo; + struct rte_eth_txq_info tx_qinfo; + int32_t rc; + + /* per port config */ + printf(" port %d: RX queue number: %d Tx queue number: %d\n", + (unsigned int)pid, nb_rxq, nb_txq); + + printf(" Rx offloads=0x%"PRIx64" Tx offloads=0x%"PRIx64"\n", + ports[pid].dev_conf.rxmode.offloads, + ports[pid].dev_conf.txmode.offloads); + + /* per rx queue config only for first queue to be less verbose */ + for (qid = 0; qid < 1; qid++) { + rc = rte_eth_rx_queue_info_get(pid, qid, &rx_qinfo); + if (rc) + nb_rx_desc_tmp = nb_rx_desc[qid]; + else + nb_rx_desc_tmp = rx_qinfo.nb_desc; + + printf(" RX queue: %d\n", qid); + printf(" RX desc=%d - RX free threshold=%d\n", + nb_rx_desc_tmp, rx_conf[qid].rx_free_thresh); + printf(" RX threshold registers: pthresh=%d hthresh=%d " + " wthresh=%d\n", + rx_conf[qid].rx_thresh.pthresh, + rx_conf[qid].rx_thresh.hthresh, + rx_conf[qid].rx_thresh.wthresh); + printf(" RX Offloads=0x%"PRIx64"\n", + rx_conf[qid].offloads); + } + + /* per tx queue config only for first queue to be less verbose */ + for (qid = 0; qid < 1; qid++) { + rc = rte_eth_tx_queue_info_get(pid, qid, &tx_qinfo); + if (rc) + nb_tx_desc_tmp = nb_tx_desc[qid]; + else + nb_tx_desc_tmp = tx_qinfo.nb_desc; + + printf(" TX queue: %d\n", qid); + printf(" TX desc=%d - TX free threshold=%d\n", + nb_tx_desc_tmp, tx_conf[qid].tx_free_thresh); + printf(" TX threshold registers: pthresh=%d hthresh=%d " + " wthresh=%d\n", + tx_conf[qid].tx_thresh.pthresh, + tx_conf[qid].tx_thresh.hthresh, + tx_conf[qid].tx_thresh.wthresh); + printf(" TX offloads=0x%"PRIx64" - TX RS bit threshold=%d\n", + tx_conf[qid].offloads, tx_conf->tx_rs_thresh); + } + } +} + +void +port_rss_reta_info(portid_t port_id, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t nb_entries) +{ + uint16_t i, idx, shift; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + ret = rte_eth_dev_rss_reta_query(port_id, reta_conf, nb_entries); + if (ret != 0) { + printf("Failed to get RSS RETA info, return code = %d\n", ret); + return; + } + + for (i = 0; i < nb_entries; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (!(reta_conf[idx].mask & (1ULL << shift))) + continue; + printf("RSS RETA configuration: hash index=%u, queue=%u\n", + i, reta_conf[idx].reta[shift]); + } +} + +/* + * Displays the RSS hash functions of a port, and, optionaly, the RSS hash + * key of the port. + */ +void +port_rss_hash_conf_show(portid_t port_id, int show_rss_key) +{ + struct rte_eth_rss_conf rss_conf = {0}; + uint8_t rss_key[RSS_HASH_KEY_LENGTH]; + uint64_t rss_hf; + uint8_t i; + int diag; + struct rte_eth_dev_info dev_info; + uint8_t hash_key_size; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + if (dev_info.hash_key_size > 0 && + dev_info.hash_key_size <= sizeof(rss_key)) + hash_key_size = dev_info.hash_key_size; + else { + printf("dev_info did not provide a valid hash key size\n"); + return; + } + + /* Get RSS hash key if asked to display it */ + rss_conf.rss_key = (show_rss_key) ? rss_key : NULL; + rss_conf.rss_key_len = hash_key_size; + diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); + if (diag != 0) { + switch (diag) { + case -ENODEV: + printf("port index %d invalid\n", port_id); + break; + case -ENOTSUP: + printf("operation not supported by device\n"); + break; + default: + printf("operation failed - diag=%d\n", diag); + break; + } + return; + } + rss_hf = rss_conf.rss_hf; + if (rss_hf == 0) { + printf("RSS disabled\n"); + return; + } + printf("RSS functions:\n "); + for (i = 0; rss_type_table[i].str; i++) { + if (rss_hf & rss_type_table[i].rss_type) + printf("%s ", rss_type_table[i].str); + } + printf("\n"); + if (!show_rss_key) + return; + printf("RSS key:\n"); + for (i = 0; i < hash_key_size; i++) + printf("%02X", rss_key[i]); + printf("\n"); +} + +void +port_rss_hash_key_update(portid_t port_id, char rss_type[], uint8_t *hash_key, + uint hash_key_len) +{ + struct rte_eth_rss_conf rss_conf; + int diag; + unsigned int i; + + rss_conf.rss_key = NULL; + rss_conf.rss_key_len = hash_key_len; + rss_conf.rss_hf = 0; + for (i = 0; rss_type_table[i].str; i++) { + if (!strcmp(rss_type_table[i].str, rss_type)) + rss_conf.rss_hf = rss_type_table[i].rss_type; + } + diag = rte_eth_dev_rss_hash_conf_get(port_id, &rss_conf); + if (diag == 0) { + rss_conf.rss_key = hash_key; + diag = rte_eth_dev_rss_hash_update(port_id, &rss_conf); + } + if (diag == 0) + return; + + switch (diag) { + case -ENODEV: + printf("port index %d invalid\n", port_id); + break; + case -ENOTSUP: + printf("operation not supported by device\n"); + break; + default: + printf("operation failed - diag=%d\n", diag); + break; + } +} + +/* + * Setup forwarding configuration for each logical core. + */ +static void +setup_fwd_config_of_each_lcore(struct fwd_config *cfg) +{ + streamid_t nb_fs_per_lcore; + streamid_t nb_fs; + streamid_t sm_id; + lcoreid_t nb_extra; + lcoreid_t nb_fc; + lcoreid_t nb_lc; + lcoreid_t lc_id; + + nb_fs = cfg->nb_fwd_streams; + nb_fc = cfg->nb_fwd_lcores; + if (nb_fs <= nb_fc) { + nb_fs_per_lcore = 1; + nb_extra = 0; + } else { + nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); + nb_extra = (lcoreid_t) (nb_fs % nb_fc); + } + + nb_lc = (lcoreid_t) (nb_fc - nb_extra); + sm_id = 0; + for (lc_id = 0; lc_id < nb_lc; lc_id++) { + fwd_lcores[lc_id]->stream_idx = sm_id; + fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; + sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); + } + + /* + * Assign extra remaining streams, if any. + */ + nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); + for (lc_id = 0; lc_id < nb_extra; lc_id++) { + fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; + fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; + sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); + } +} + +static portid_t +fwd_topology_tx_port_get(portid_t rxp) +{ + static int warning_once = 1; + + RTE_ASSERT(rxp < cur_fwd_config.nb_fwd_ports); + + switch (port_topology) { + default: + case PORT_TOPOLOGY_PAIRED: + if ((rxp & 0x1) == 0) { + if (rxp + 1 < cur_fwd_config.nb_fwd_ports) + return rxp + 1; + if (warning_once) { + printf("\nWarning! port-topology=paired" + " and odd forward ports number," + " the last port will pair with" + " itself.\n\n"); + warning_once = 0; + } + return rxp; + } + return rxp - 1; + case PORT_TOPOLOGY_CHAINED: + return (rxp + 1) % cur_fwd_config.nb_fwd_ports; + case PORT_TOPOLOGY_LOOP: + return rxp; + } +} + +static void +simple_fwd_config_setup(void) +{ + portid_t i; + + cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; + cur_fwd_config.nb_fwd_streams = + (streamid_t) cur_fwd_config.nb_fwd_ports; + + /* reinitialize forwarding streams */ + init_fwd_streams(); + + /* + * In the simple forwarding test, the number of forwarding cores + * must be lower or equal to the number of forwarding ports. + */ + cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; + if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) + cur_fwd_config.nb_fwd_lcores = + (lcoreid_t) cur_fwd_config.nb_fwd_ports; + setup_fwd_config_of_each_lcore(&cur_fwd_config); + + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { + fwd_streams[i]->rx_port = fwd_ports_ids[i]; + fwd_streams[i]->rx_queue = 0; + fwd_streams[i]->tx_port = + fwd_ports_ids[fwd_topology_tx_port_get(i)]; + fwd_streams[i]->tx_queue = 0; + fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; + fwd_streams[i]->retry_enabled = retry_enabled; + } +} + +/** + * For the RSS forwarding test all streams distributed over lcores. Each stream + * being composed of a RX queue to poll on a RX port for input messages, + * associated with a TX queue of a TX port where to send forwarded packets. + */ +static void +rss_fwd_config_setup(void) +{ + portid_t rxp; + portid_t txp; + queueid_t rxq; + queueid_t nb_q; + streamid_t sm_id; + + nb_q = nb_rxq; + if (nb_q > nb_txq) + nb_q = nb_txq; + cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; + cur_fwd_config.nb_fwd_ports = nb_fwd_ports; + cur_fwd_config.nb_fwd_streams = + (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); + + if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) + cur_fwd_config.nb_fwd_lcores = + (lcoreid_t)cur_fwd_config.nb_fwd_streams; + + /* reinitialize forwarding streams */ + init_fwd_streams(); + + setup_fwd_config_of_each_lcore(&cur_fwd_config); + rxp = 0; rxq = 0; + for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { + struct fwd_stream *fs; + + fs = fwd_streams[sm_id]; + txp = fwd_topology_tx_port_get(rxp); + fs->rx_port = fwd_ports_ids[rxp]; + fs->rx_queue = rxq; + fs->tx_port = fwd_ports_ids[txp]; + fs->tx_queue = rxq; + fs->peer_addr = fs->tx_port; + fs->retry_enabled = retry_enabled; + rxp++; + if (rxp < nb_fwd_ports) + continue; + rxp = 0; + rxq++; + } +} + +/** + * For the DCB forwarding test, each core is assigned on each traffic class. + * + * Each core is assigned a multi-stream, each stream being composed of + * a RX queue to poll on a RX port for input messages, associated with + * a TX queue of a TX port where to send forwarded packets. All RX and + * TX queues are mapping to the same traffic class. + * If VMDQ and DCB co-exist, each traffic class on different POOLs share + * the same core + */ +static void +dcb_fwd_config_setup(void) +{ + struct rte_eth_dcb_info rxp_dcb_info, txp_dcb_info; + portid_t txp, rxp = 0; + queueid_t txq, rxq = 0; + lcoreid_t lc_id; + uint16_t nb_rx_queue, nb_tx_queue; + uint16_t i, j, k, sm_id = 0; + uint8_t tc = 0; + + cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; + cur_fwd_config.nb_fwd_ports = nb_fwd_ports; + cur_fwd_config.nb_fwd_streams = + (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); + + /* reinitialize forwarding streams */ + init_fwd_streams(); + sm_id = 0; + txp = 1; + /* get the dcb info on the first RX and TX ports */ + (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); + (void)rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); + + for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { + fwd_lcores[lc_id]->stream_nb = 0; + fwd_lcores[lc_id]->stream_idx = sm_id; + for (i = 0; i < ETH_MAX_VMDQ_POOL; i++) { + /* if the nb_queue is zero, means this tc is + * not enabled on the POOL + */ + if (rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue == 0) + break; + k = fwd_lcores[lc_id]->stream_nb + + fwd_lcores[lc_id]->stream_idx; + rxq = rxp_dcb_info.tc_queue.tc_rxq[i][tc].base; + txq = txp_dcb_info.tc_queue.tc_txq[i][tc].base; + nb_rx_queue = txp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; + nb_tx_queue = txp_dcb_info.tc_queue.tc_txq[i][tc].nb_queue; + for (j = 0; j < nb_rx_queue; j++) { + struct fwd_stream *fs; + + fs = fwd_streams[k + j]; + fs->rx_port = fwd_ports_ids[rxp]; + fs->rx_queue = rxq + j; + fs->tx_port = fwd_ports_ids[txp]; + fs->tx_queue = txq + j % nb_tx_queue; + fs->peer_addr = fs->tx_port; + fs->retry_enabled = retry_enabled; + } + fwd_lcores[lc_id]->stream_nb += + rxp_dcb_info.tc_queue.tc_rxq[i][tc].nb_queue; + } + sm_id = (streamid_t) (sm_id + fwd_lcores[lc_id]->stream_nb); + + tc++; + if (tc < rxp_dcb_info.nb_tcs) + continue; + /* Restart from TC 0 on next RX port */ + tc = 0; + if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) + rxp = (portid_t) + (rxp + ((nb_ports >> 1) / nb_fwd_ports)); + else + rxp++; + if (rxp >= nb_fwd_ports) + return; + /* get the dcb information on next RX and TX ports */ + if ((rxp & 0x1) == 0) + txp = (portid_t) (rxp + 1); + else + txp = (portid_t) (rxp - 1); + rte_eth_dev_get_dcb_info(fwd_ports_ids[rxp], &rxp_dcb_info); + rte_eth_dev_get_dcb_info(fwd_ports_ids[txp], &txp_dcb_info); + } +} + +static void +icmp_echo_config_setup(void) +{ + portid_t rxp; + queueid_t rxq; + lcoreid_t lc_id; + uint16_t sm_id; + + if ((nb_txq * nb_fwd_ports) < nb_fwd_lcores) + cur_fwd_config.nb_fwd_lcores = (lcoreid_t) + (nb_txq * nb_fwd_ports); + else + cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; + cur_fwd_config.nb_fwd_ports = nb_fwd_ports; + cur_fwd_config.nb_fwd_streams = + (streamid_t) (nb_rxq * cur_fwd_config.nb_fwd_ports); + if (cur_fwd_config.nb_fwd_streams < cur_fwd_config.nb_fwd_lcores) + cur_fwd_config.nb_fwd_lcores = + (lcoreid_t)cur_fwd_config.nb_fwd_streams; + if (verbose_level > 0) { + printf("%s fwd_cores=%d fwd_ports=%d fwd_streams=%d\n", + __FUNCTION__, + cur_fwd_config.nb_fwd_lcores, + cur_fwd_config.nb_fwd_ports, + cur_fwd_config.nb_fwd_streams); + } + + /* reinitialize forwarding streams */ + init_fwd_streams(); + setup_fwd_config_of_each_lcore(&cur_fwd_config); + rxp = 0; rxq = 0; + for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { + if (verbose_level > 0) + printf(" core=%d: \n", lc_id); + for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { + struct fwd_stream *fs; + fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; + fs->rx_port = fwd_ports_ids[rxp]; + fs->rx_queue = rxq; + fs->tx_port = fs->rx_port; + fs->tx_queue = rxq; + fs->peer_addr = fs->tx_port; + fs->retry_enabled = retry_enabled; + if (verbose_level > 0) + printf(" stream=%d port=%d rxq=%d txq=%d\n", + sm_id, fs->rx_port, fs->rx_queue, + fs->tx_queue); + rxq = (queueid_t) (rxq + 1); + if (rxq == nb_rxq) { + rxq = 0; + rxp = (portid_t) (rxp + 1); + } + } + } +} + +#if defined RTE_LIBRTE_PMD_SOFTNIC +static void +softnic_fwd_config_setup(void) +{ + struct rte_port *port; + portid_t pid, softnic_portid; + queueid_t i; + uint8_t softnic_enable = 0; + + RTE_ETH_FOREACH_DEV(pid) { + port = &ports[pid]; + const char *driver = port->dev_info.driver_name; + + if (strcmp(driver, "net_softnic") == 0) { + softnic_portid = pid; + softnic_enable = 1; + break; + } + } + + if (softnic_enable == 0) { + printf("Softnic mode not configured(%s)!\n", __func__); + return; + } + + cur_fwd_config.nb_fwd_ports = 1; + cur_fwd_config.nb_fwd_streams = (streamid_t) nb_rxq; + + /* Re-initialize forwarding streams */ + init_fwd_streams(); + + /* + * In the softnic forwarding test, the number of forwarding cores + * is set to one and remaining are used for softnic packet processing. + */ + cur_fwd_config.nb_fwd_lcores = 1; + setup_fwd_config_of_each_lcore(&cur_fwd_config); + + for (i = 0; i < cur_fwd_config.nb_fwd_streams; i++) { + fwd_streams[i]->rx_port = softnic_portid; + fwd_streams[i]->rx_queue = i; + fwd_streams[i]->tx_port = softnic_portid; + fwd_streams[i]->tx_queue = i; + fwd_streams[i]->peer_addr = fwd_streams[i]->tx_port; + fwd_streams[i]->retry_enabled = retry_enabled; + } +} +#endif + +void +fwd_config_setup(void) +{ + cur_fwd_config.fwd_eng = cur_fwd_eng; + if (strcmp(cur_fwd_eng->fwd_mode_name, "icmpecho") == 0) { + icmp_echo_config_setup(); + return; + } + +#if defined RTE_LIBRTE_PMD_SOFTNIC + if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { + softnic_fwd_config_setup(); + return; + } +#endif + + if ((nb_rxq > 1) && (nb_txq > 1)){ + if (dcb_config) + dcb_fwd_config_setup(); + else + rss_fwd_config_setup(); + } + else + simple_fwd_config_setup(); +} + +static const char * +mp_alloc_to_str(uint8_t mode) +{ + switch (mode) { + case MP_ALLOC_NATIVE: + return "native"; + case MP_ALLOC_ANON: + return "anon"; + case MP_ALLOC_XMEM: + return "xmem"; + case MP_ALLOC_XMEM_HUGE: + return "xmemhuge"; + case MP_ALLOC_XBUF: + return "xbuf"; + default: + return "invalid"; + } +} + +void +pkt_fwd_config_display(struct fwd_config *cfg) +{ + struct fwd_stream *fs; + lcoreid_t lc_id; + streamid_t sm_id; + + printf("%s packet forwarding%s - ports=%d - cores=%d - streams=%d - " + "NUMA support %s, MP allocation mode: %s\n", + cfg->fwd_eng->fwd_mode_name, + retry_enabled == 0 ? "" : " with retry", + cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, + numa_support == 1 ? "enabled" : "disabled", + mp_alloc_to_str(mp_alloc_type)); + + if (retry_enabled) + printf("TX retry num: %u, delay between TX retries: %uus\n", + burst_tx_retry_num, burst_tx_delay_time); + for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { + printf("Logical Core %u (socket %u) forwards packets on " + "%d streams:", + fwd_lcores_cpuids[lc_id], + rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), + fwd_lcores[lc_id]->stream_nb); + for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { + fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; + printf("\n RX P=%d/Q=%d (socket %u) -> TX " + "P=%d/Q=%d (socket %u) ", + fs->rx_port, fs->rx_queue, + ports[fs->rx_port].socket_id, + fs->tx_port, fs->tx_queue, + ports[fs->tx_port].socket_id); + print_ethaddr("peer=", + &peer_eth_addrs[fs->peer_addr]); + } + printf("\n"); + } + printf("\n"); +} + +void +set_fwd_eth_peer(portid_t port_id, char *peer_addr) +{ + struct rte_ether_addr new_peer_addr; + if (!rte_eth_dev_is_valid_port(port_id)) { + printf("Error: Invalid port number %i\n", port_id); + return; + } + if (rte_ether_unformat_addr(peer_addr, &new_peer_addr) < 0) { + printf("Error: Invalid ethernet address: %s\n", peer_addr); + return; + } + peer_eth_addrs[port_id] = new_peer_addr; +} + +int +set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) +{ + unsigned int i; + unsigned int lcore_cpuid; + int record_now; + + record_now = 0; + again: + for (i = 0; i < nb_lc; i++) { + lcore_cpuid = lcorelist[i]; + if (! rte_lcore_is_enabled(lcore_cpuid)) { + printf("lcore %u not enabled\n", lcore_cpuid); + return -1; + } + if (lcore_cpuid == rte_get_master_lcore()) { + printf("lcore %u cannot be masked on for running " + "packet forwarding, which is the master lcore " + "and reserved for command line parsing only\n", + lcore_cpuid); + return -1; + } + if (record_now) + fwd_lcores_cpuids[i] = lcore_cpuid; + } + if (record_now == 0) { + record_now = 1; + goto again; + } + nb_cfg_lcores = (lcoreid_t) nb_lc; + if (nb_fwd_lcores != (lcoreid_t) nb_lc) { + printf("previous number of forwarding cores %u - changed to " + "number of configured cores %u\n", + (unsigned int) nb_fwd_lcores, nb_lc); + nb_fwd_lcores = (lcoreid_t) nb_lc; + } + + return 0; +} + +int +set_fwd_lcores_mask(uint64_t lcoremask) +{ + unsigned int lcorelist[64]; + unsigned int nb_lc; + unsigned int i; + + if (lcoremask == 0) { + printf("Invalid NULL mask of cores\n"); + return -1; + } + nb_lc = 0; + for (i = 0; i < 64; i++) { + if (! ((uint64_t)(1ULL << i) & lcoremask)) + continue; + lcorelist[nb_lc++] = i; + } + return set_fwd_lcores_list(lcorelist, nb_lc); +} + +void +set_fwd_lcores_number(uint16_t nb_lc) +{ + if (nb_lc > nb_cfg_lcores) { + printf("nb fwd cores %u > %u (max. number of configured " + "lcores) - ignored\n", + (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); + return; + } + nb_fwd_lcores = (lcoreid_t) nb_lc; + printf("Number of forwarding cores set to %u\n", + (unsigned int) nb_fwd_lcores); +} + +void +set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) +{ + unsigned int i; + portid_t port_id; + int record_now; + + record_now = 0; + again: + for (i = 0; i < nb_pt; i++) { + port_id = (portid_t) portlist[i]; + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (record_now) + fwd_ports_ids[i] = port_id; + } + if (record_now == 0) { + record_now = 1; + goto again; + } + nb_cfg_ports = (portid_t) nb_pt; + if (nb_fwd_ports != (portid_t) nb_pt) { + printf("previous number of forwarding ports %u - changed to " + "number of configured ports %u\n", + (unsigned int) nb_fwd_ports, nb_pt); + nb_fwd_ports = (portid_t) nb_pt; + } +} + +/** + * Parse the user input and obtain the list of forwarding ports + * + * @param[in] list + * String containing the user input. User can specify + * in these formats 1,3,5 or 1-3 or 1-2,5 or 3,5-6. + * For example, if the user wants to use all the available + * 4 ports in his system, then the input can be 0-3 or 0,1,2,3. + * If the user wants to use only the ports 1,2 then the input + * is 1,2. + * valid characters are '-' and ',' + * @param[out] values + * This array will be filled with a list of port IDs + * based on the user input + * Note that duplicate entries are discarded and only the first + * count entries in this array are port IDs and all the rest + * will contain default values + * @param[in] maxsize + * This parameter denotes 2 things + * 1) Number of elements in the values array + * 2) Maximum value of each element in the values array + * @return + * On success, returns total count of parsed port IDs + * On failure, returns 0 + */ +static unsigned int +parse_port_list(const char *list, unsigned int *values, unsigned int maxsize) +{ + unsigned int count = 0; + char *end = NULL; + int min, max; + int value, i; + unsigned int marked[maxsize]; + + if (list == NULL || values == NULL) + return 0; + + for (i = 0; i < (int)maxsize; i++) + marked[i] = 0; + + min = INT_MAX; + + do { + /*Remove the blank spaces if any*/ + while (isblank(*list)) + list++; + if (*list == '\0') + break; + errno = 0; + value = strtol(list, &end, 10); + if (errno || end == NULL) + return 0; + if (value < 0 || value >= (int)maxsize) + return 0; + while (isblank(*end)) + end++; + if (*end == '-' && min == INT_MAX) { + min = value; + } else if ((*end == ',') || (*end == '\0')) { + max = value; + if (min == INT_MAX) + min = value; + for (i = min; i <= max; i++) { + if (count < maxsize) { + if (marked[i]) + continue; + values[count] = i; + marked[i] = 1; + count++; + } + } + min = INT_MAX; + } else + return 0; + list = end + 1; + } while (*end != '\0'); + + return count; +} + +void +parse_fwd_portlist(const char *portlist) +{ + unsigned int portcount; + unsigned int portindex[RTE_MAX_ETHPORTS]; + unsigned int i, valid_port_count = 0; + + portcount = parse_port_list(portlist, portindex, RTE_MAX_ETHPORTS); + if (!portcount) + rte_exit(EXIT_FAILURE, "Invalid fwd port list\n"); + + /* + * Here we verify the validity of the ports + * and thereby calculate the total number of + * valid ports + */ + for (i = 0; i < portcount && i < RTE_DIM(portindex); i++) { + if (rte_eth_dev_is_valid_port(portindex[i])) { + portindex[valid_port_count] = portindex[i]; + valid_port_count++; + } + } + + set_fwd_ports_list(portindex, valid_port_count); +} + +void +set_fwd_ports_mask(uint64_t portmask) +{ + unsigned int portlist[64]; + unsigned int nb_pt; + unsigned int i; + + if (portmask == 0) { + printf("Invalid NULL mask of ports\n"); + return; + } + nb_pt = 0; + RTE_ETH_FOREACH_DEV(i) { + if (! ((uint64_t)(1ULL << i) & portmask)) + continue; + portlist[nb_pt++] = i; + } + set_fwd_ports_list(portlist, nb_pt); +} + +void +set_fwd_ports_number(uint16_t nb_pt) +{ + if (nb_pt > nb_cfg_ports) { + printf("nb fwd ports %u > %u (number of configured " + "ports) - ignored\n", + (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); + return; + } + nb_fwd_ports = (portid_t) nb_pt; + printf("Number of forwarding ports set to %u\n", + (unsigned int) nb_fwd_ports); +} + +int +port_is_forwarding(portid_t port_id) +{ + unsigned int i; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return -1; + + for (i = 0; i < nb_fwd_ports; i++) { + if (fwd_ports_ids[i] == port_id) + return 1; + } + + return 0; +} + +void +set_nb_pkt_per_burst(uint16_t nb) +{ + if (nb > MAX_PKT_BURST) { + printf("nb pkt per burst: %u > %u (maximum packet per burst) " + " ignored\n", + (unsigned int) nb, (unsigned int) MAX_PKT_BURST); + return; + } + nb_pkt_per_burst = nb; + printf("Number of packets per burst set to %u\n", + (unsigned int) nb_pkt_per_burst); +} + +static const char * +tx_split_get_name(enum tx_pkt_split split) +{ + uint32_t i; + + for (i = 0; i != RTE_DIM(tx_split_name); i++) { + if (tx_split_name[i].split == split) + return tx_split_name[i].name; + } + return NULL; +} + +void +set_tx_pkt_split(const char *name) +{ + uint32_t i; + + for (i = 0; i != RTE_DIM(tx_split_name); i++) { + if (strcmp(tx_split_name[i].name, name) == 0) { + tx_pkt_split = tx_split_name[i].split; + return; + } + } + printf("unknown value: \"%s\"\n", name); +} + +void +show_tx_pkt_segments(void) +{ + uint32_t i, n; + const char *split; + + n = tx_pkt_nb_segs; + split = tx_split_get_name(tx_pkt_split); + + printf("Number of segments: %u\n", n); + printf("Segment sizes: "); + for (i = 0; i != n - 1; i++) + printf("%hu,", tx_pkt_seg_lengths[i]); + printf("%hu\n", tx_pkt_seg_lengths[i]); + printf("Split packet: %s\n", split); +} + +void +set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) +{ + uint16_t tx_pkt_len; + unsigned i; + + if (nb_segs >= (unsigned) nb_txd) { + printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", + nb_segs, (unsigned int) nb_txd); + return; + } + + /* + * Check that each segment length is greater or equal than + * the mbuf data sise. + * Check also that the total packet length is greater or equal than the + * size of an empty UDP/IP packet (sizeof(struct rte_ether_hdr) + + * 20 + 8). + */ + tx_pkt_len = 0; + for (i = 0; i < nb_segs; i++) { + if (seg_lengths[i] > (unsigned) mbuf_data_size) { + printf("length[%u]=%u > mbuf_data_size=%u - give up\n", + i, seg_lengths[i], (unsigned) mbuf_data_size); + return; + } + tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); + } + if (tx_pkt_len < (sizeof(struct rte_ether_hdr) + 20 + 8)) { + printf("total packet length=%u < %d - give up\n", + (unsigned) tx_pkt_len, + (int)(sizeof(struct rte_ether_hdr) + 20 + 8)); + return; + } + + for (i = 0; i < nb_segs; i++) + tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; + + tx_pkt_length = tx_pkt_len; + tx_pkt_nb_segs = (uint8_t) nb_segs; +} + +void +setup_gro(const char *onoff, portid_t port_id) +{ + if (!rte_eth_dev_is_valid_port(port_id)) { + printf("invalid port id %u\n", port_id); + return; + } + if (test_done == 0) { + printf("Before enable/disable GRO," + " please stop forwarding first\n"); + return; + } + if (strcmp(onoff, "on") == 0) { + if (gro_ports[port_id].enable != 0) { + printf("Port %u has enabled GRO. Please" + " disable GRO first\n", port_id); + return; + } + if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { + gro_ports[port_id].param.gro_types = RTE_GRO_TCP_IPV4; + gro_ports[port_id].param.max_flow_num = + GRO_DEFAULT_FLOW_NUM; + gro_ports[port_id].param.max_item_per_flow = + GRO_DEFAULT_ITEM_NUM_PER_FLOW; + } + gro_ports[port_id].enable = 1; + } else { + if (gro_ports[port_id].enable == 0) { + printf("Port %u has disabled GRO\n", port_id); + return; + } + gro_ports[port_id].enable = 0; + } +} + +void +setup_gro_flush_cycles(uint8_t cycles) +{ + if (test_done == 0) { + printf("Before change flush interval for GRO," + " please stop forwarding first.\n"); + return; + } + + if (cycles > GRO_MAX_FLUSH_CYCLES || cycles < + GRO_DEFAULT_FLUSH_CYCLES) { + printf("The flushing cycle be in the range" + " of 1 to %u. Revert to the default" + " value %u.\n", + GRO_MAX_FLUSH_CYCLES, + GRO_DEFAULT_FLUSH_CYCLES); + cycles = GRO_DEFAULT_FLUSH_CYCLES; + } + + gro_flush_cycles = cycles; +} + +void +show_gro(portid_t port_id) +{ + struct rte_gro_param *param; + uint32_t max_pkts_num; + + param = &gro_ports[port_id].param; + + if (!rte_eth_dev_is_valid_port(port_id)) { + printf("Invalid port id %u.\n", port_id); + return; + } + if (gro_ports[port_id].enable) { + printf("GRO type: TCP/IPv4\n"); + if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { + max_pkts_num = param->max_flow_num * + param->max_item_per_flow; + } else + max_pkts_num = MAX_PKT_BURST * GRO_MAX_FLUSH_CYCLES; + printf("Max number of packets to perform GRO: %u\n", + max_pkts_num); + printf("Flushing cycles: %u\n", gro_flush_cycles); + } else + printf("Port %u doesn't enable GRO.\n", port_id); +} + +void +setup_gso(const char *mode, portid_t port_id) +{ + if (!rte_eth_dev_is_valid_port(port_id)) { + printf("invalid port id %u\n", port_id); + return; + } + if (strcmp(mode, "on") == 0) { + if (test_done == 0) { + printf("before enabling GSO," + " please stop forwarding first\n"); + return; + } + gso_ports[port_id].enable = 1; + } else if (strcmp(mode, "off") == 0) { + if (test_done == 0) { + printf("before disabling GSO," + " please stop forwarding first\n"); + return; + } + gso_ports[port_id].enable = 0; + } +} + +char* +list_pkt_forwarding_modes(void) +{ + static char fwd_modes[128] = ""; + const char *separator = "|"; + struct fwd_engine *fwd_eng; + unsigned i = 0; + + if (strlen (fwd_modes) == 0) { + while ((fwd_eng = fwd_engines[i++]) != NULL) { + strncat(fwd_modes, fwd_eng->fwd_mode_name, + sizeof(fwd_modes) - strlen(fwd_modes) - 1); + strncat(fwd_modes, separator, + sizeof(fwd_modes) - strlen(fwd_modes) - 1); + } + fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; + } + + return fwd_modes; +} + +char* +list_pkt_forwarding_retry_modes(void) +{ + static char fwd_modes[128] = ""; + const char *separator = "|"; + struct fwd_engine *fwd_eng; + unsigned i = 0; + + if (strlen(fwd_modes) == 0) { + while ((fwd_eng = fwd_engines[i++]) != NULL) { + if (fwd_eng == &rx_only_engine) + continue; + strncat(fwd_modes, fwd_eng->fwd_mode_name, + sizeof(fwd_modes) - + strlen(fwd_modes) - 1); + strncat(fwd_modes, separator, + sizeof(fwd_modes) - + strlen(fwd_modes) - 1); + } + fwd_modes[strlen(fwd_modes) - strlen(separator)] = '\0'; + } + + return fwd_modes; +} + +void +set_pkt_forwarding_mode(const char *fwd_mode_name) +{ + struct fwd_engine *fwd_eng; + unsigned i; + + i = 0; + while ((fwd_eng = fwd_engines[i]) != NULL) { + if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { + printf("Set %s packet forwarding mode%s\n", + fwd_mode_name, + retry_enabled == 0 ? "" : " with retry"); + cur_fwd_eng = fwd_eng; + return; + } + i++; + } + printf("Invalid %s packet forwarding mode\n", fwd_mode_name); +} + +void +add_rx_dump_callbacks(portid_t portid) +{ + struct rte_eth_dev_info dev_info; + uint16_t queue; + int ret; + + if (port_id_is_invalid(portid, ENABLED_WARN)) + return; + + ret = eth_dev_info_get_print_err(portid, &dev_info); + if (ret != 0) + return; + + for (queue = 0; queue < dev_info.nb_rx_queues; queue++) + if (!ports[portid].rx_dump_cb[queue]) + ports[portid].rx_dump_cb[queue] = + rte_eth_add_rx_callback(portid, queue, + dump_rx_pkts, NULL); +} + +void +add_tx_dump_callbacks(portid_t portid) +{ + struct rte_eth_dev_info dev_info; + uint16_t queue; + int ret; + + if (port_id_is_invalid(portid, ENABLED_WARN)) + return; + + ret = eth_dev_info_get_print_err(portid, &dev_info); + if (ret != 0) + return; + + for (queue = 0; queue < dev_info.nb_tx_queues; queue++) + if (!ports[portid].tx_dump_cb[queue]) + ports[portid].tx_dump_cb[queue] = + rte_eth_add_tx_callback(portid, queue, + dump_tx_pkts, NULL); +} + +void +remove_rx_dump_callbacks(portid_t portid) +{ + struct rte_eth_dev_info dev_info; + uint16_t queue; + int ret; + + if (port_id_is_invalid(portid, ENABLED_WARN)) + return; + + ret = eth_dev_info_get_print_err(portid, &dev_info); + if (ret != 0) + return; + + for (queue = 0; queue < dev_info.nb_rx_queues; queue++) + if (ports[portid].rx_dump_cb[queue]) { + rte_eth_remove_rx_callback(portid, queue, + ports[portid].rx_dump_cb[queue]); + ports[portid].rx_dump_cb[queue] = NULL; + } +} + +void +remove_tx_dump_callbacks(portid_t portid) +{ + struct rte_eth_dev_info dev_info; + uint16_t queue; + int ret; + + if (port_id_is_invalid(portid, ENABLED_WARN)) + return; + + ret = eth_dev_info_get_print_err(portid, &dev_info); + if (ret != 0) + return; + + for (queue = 0; queue < dev_info.nb_tx_queues; queue++) + if (ports[portid].tx_dump_cb[queue]) { + rte_eth_remove_tx_callback(portid, queue, + ports[portid].tx_dump_cb[queue]); + ports[portid].tx_dump_cb[queue] = NULL; + } +} + +void +configure_rxtx_dump_callbacks(uint16_t verbose) +{ + portid_t portid; + +#ifndef RTE_ETHDEV_RXTX_CALLBACKS + TESTPMD_LOG(ERR, "setting rxtx callbacks is not enabled\n"); + return; +#endif + + RTE_ETH_FOREACH_DEV(portid) + { + if (verbose == 1 || verbose > 2) + add_rx_dump_callbacks(portid); + else + remove_rx_dump_callbacks(portid); + if (verbose >= 2) + add_tx_dump_callbacks(portid); + else + remove_tx_dump_callbacks(portid); + } +} + +void +set_verbose_level(uint16_t vb_level) +{ + printf("Change verbose level from %u to %u\n", + (unsigned int) verbose_level, (unsigned int) vb_level); + verbose_level = vb_level; + configure_rxtx_dump_callbacks(verbose_level); +} + +void +vlan_extend_set(portid_t port_id, int on) +{ + int diag; + int vlan_offload; + uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + vlan_offload = rte_eth_dev_get_vlan_offload(port_id); + + if (on) { + vlan_offload |= ETH_VLAN_EXTEND_OFFLOAD; + port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; + } else { + vlan_offload &= ~ETH_VLAN_EXTEND_OFFLOAD; + port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; + } + + diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); + if (diag < 0) + printf("rx_vlan_extend_set(port_pi=%d, on=%d) failed " + "diag=%d\n", port_id, on, diag); + ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; +} + +void +rx_vlan_strip_set(portid_t port_id, int on) +{ + int diag; + int vlan_offload; + uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + vlan_offload = rte_eth_dev_get_vlan_offload(port_id); + + if (on) { + vlan_offload |= ETH_VLAN_STRIP_OFFLOAD; + port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + } else { + vlan_offload &= ~ETH_VLAN_STRIP_OFFLOAD; + port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + } + + diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); + if (diag < 0) + printf("rx_vlan_strip_set(port_pi=%d, on=%d) failed " + "diag=%d\n", port_id, on, diag); + ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; +} + +void +rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on) +{ + int diag; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + diag = rte_eth_dev_set_vlan_strip_on_queue(port_id, queue_id, on); + if (diag < 0) + printf("rx_vlan_strip_set_on_queue(port_pi=%d, queue_id=%d, on=%d) failed " + "diag=%d\n", port_id, queue_id, on, diag); +} + +void +rx_vlan_filter_set(portid_t port_id, int on) +{ + int diag; + int vlan_offload; + uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + vlan_offload = rte_eth_dev_get_vlan_offload(port_id); + + if (on) { + vlan_offload |= ETH_VLAN_FILTER_OFFLOAD; + port_rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + } else { + vlan_offload &= ~ETH_VLAN_FILTER_OFFLOAD; + port_rx_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; + } + + diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); + if (diag < 0) + printf("rx_vlan_filter_set(port_pi=%d, on=%d) failed " + "diag=%d\n", port_id, on, diag); + ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; +} + +void +rx_vlan_qinq_strip_set(portid_t port_id, int on) +{ + int diag; + int vlan_offload; + uint64_t port_rx_offloads = ports[port_id].dev_conf.rxmode.offloads; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + vlan_offload = rte_eth_dev_get_vlan_offload(port_id); + + if (on) { + vlan_offload |= ETH_QINQ_STRIP_OFFLOAD; + port_rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; + } else { + vlan_offload &= ~ETH_QINQ_STRIP_OFFLOAD; + port_rx_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; + } + + diag = rte_eth_dev_set_vlan_offload(port_id, vlan_offload); + if (diag < 0) + printf("%s(port_pi=%d, on=%d) failed " + "diag=%d\n", __func__, port_id, on, diag); + ports[port_id].dev_conf.rxmode.offloads = port_rx_offloads; +} + +int +rx_vft_set(portid_t port_id, uint16_t vlan_id, int on) +{ + int diag; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return 1; + if (vlan_id_is_invalid(vlan_id)) + return 1; + diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); + if (diag == 0) + return 0; + printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " + "diag=%d\n", + port_id, vlan_id, on, diag); + return -1; +} + +void +rx_vlan_all_filter_set(portid_t port_id, int on) +{ + uint16_t vlan_id; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + for (vlan_id = 0; vlan_id < 4096; vlan_id++) { + if (rx_vft_set(port_id, vlan_id, on)) + break; + } +} + +void +vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, uint16_t tp_id) +{ + int diag; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + diag = rte_eth_dev_set_vlan_ether_type(port_id, vlan_type, tp_id); + if (diag == 0) + return; + + printf("tx_vlan_tpid_set(port_pi=%d, vlan_type=%d, tpid=%d) failed " + "diag=%d\n", + port_id, vlan_type, tp_id, diag); +} + +void +tx_vlan_set(portid_t port_id, uint16_t vlan_id) +{ + struct rte_eth_dev_info dev_info; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (vlan_id_is_invalid(vlan_id)) + return; + + if (ports[port_id].dev_conf.txmode.offloads & + DEV_TX_OFFLOAD_QINQ_INSERT) { + printf("Error, as QinQ has been enabled.\n"); + return; + } + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_VLAN_INSERT) == 0) { + printf("Error: vlan insert is not supported by port %d\n", + port_id); + return; + } + + tx_vlan_reset(port_id); + ports[port_id].dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_VLAN_INSERT; + ports[port_id].tx_vlan_id = vlan_id; +} + +void +tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer) +{ + struct rte_eth_dev_info dev_info; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + if (vlan_id_is_invalid(vlan_id)) + return; + if (vlan_id_is_invalid(vlan_id_outer)) + return; + + ret = eth_dev_info_get_print_err(port_id, &dev_info); + if (ret != 0) + return; + + if ((dev_info.tx_offload_capa & DEV_TX_OFFLOAD_QINQ_INSERT) == 0) { + printf("Error: qinq insert not supported by port %d\n", + port_id); + return; + } + + tx_vlan_reset(port_id); + ports[port_id].dev_conf.txmode.offloads |= (DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT); + ports[port_id].tx_vlan_id = vlan_id; + ports[port_id].tx_vlan_id_outer = vlan_id_outer; +} + +void +tx_vlan_reset(portid_t port_id) +{ + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + ports[port_id].dev_conf.txmode.offloads &= + ~(DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_QINQ_INSERT); + ports[port_id].tx_vlan_id = 0; + ports[port_id].tx_vlan_id_outer = 0; +} + +void +tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on) +{ + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + rte_eth_dev_set_vlan_pvid(port_id, vlan_id, on); +} + +void +set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value) +{ + uint16_t i; + uint8_t existing_mapping_found = 0; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + if (is_rx ? (rx_queue_id_is_invalid(queue_id)) : (tx_queue_id_is_invalid(queue_id))) + return; + + if (map_value >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { + printf("map_value not in required range 0..%d\n", + RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + return; + } + + if (!is_rx) { /*then tx*/ + for (i = 0; i < nb_tx_queue_stats_mappings; i++) { + if ((tx_queue_stats_mappings[i].port_id == port_id) && + (tx_queue_stats_mappings[i].queue_id == queue_id)) { + tx_queue_stats_mappings[i].stats_counter_id = map_value; + existing_mapping_found = 1; + break; + } + } + if (!existing_mapping_found) { /* A new additional mapping... */ + tx_queue_stats_mappings[nb_tx_queue_stats_mappings].port_id = port_id; + tx_queue_stats_mappings[nb_tx_queue_stats_mappings].queue_id = queue_id; + tx_queue_stats_mappings[nb_tx_queue_stats_mappings].stats_counter_id = map_value; + nb_tx_queue_stats_mappings++; + } + } + else { /*rx*/ + for (i = 0; i < nb_rx_queue_stats_mappings; i++) { + if ((rx_queue_stats_mappings[i].port_id == port_id) && + (rx_queue_stats_mappings[i].queue_id == queue_id)) { + rx_queue_stats_mappings[i].stats_counter_id = map_value; + existing_mapping_found = 1; + break; + } + } + if (!existing_mapping_found) { /* A new additional mapping... */ + rx_queue_stats_mappings[nb_rx_queue_stats_mappings].port_id = port_id; + rx_queue_stats_mappings[nb_rx_queue_stats_mappings].queue_id = queue_id; + rx_queue_stats_mappings[nb_rx_queue_stats_mappings].stats_counter_id = map_value; + nb_rx_queue_stats_mappings++; + } + } +} + +void +set_xstats_hide_zero(uint8_t on_off) +{ + xstats_hide_zero = on_off; +} + +static inline void +print_fdir_mask(struct rte_eth_fdir_masks *mask) +{ + printf("\n vlan_tci: 0x%04x", rte_be_to_cpu_16(mask->vlan_tci_mask)); + + if (fdir_conf.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + printf(", mac_addr: 0x%02x, tunnel_type: 0x%01x," + " tunnel_id: 0x%08x", + mask->mac_addr_byte_mask, mask->tunnel_type_mask, + rte_be_to_cpu_32(mask->tunnel_id_mask)); + else if (fdir_conf.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) { + printf(", src_ipv4: 0x%08x, dst_ipv4: 0x%08x", + rte_be_to_cpu_32(mask->ipv4_mask.src_ip), + rte_be_to_cpu_32(mask->ipv4_mask.dst_ip)); + + printf("\n src_port: 0x%04x, dst_port: 0x%04x", + rte_be_to_cpu_16(mask->src_port_mask), + rte_be_to_cpu_16(mask->dst_port_mask)); + + printf("\n src_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", + rte_be_to_cpu_32(mask->ipv6_mask.src_ip[0]), + rte_be_to_cpu_32(mask->ipv6_mask.src_ip[1]), + rte_be_to_cpu_32(mask->ipv6_mask.src_ip[2]), + rte_be_to_cpu_32(mask->ipv6_mask.src_ip[3])); + + printf("\n dst_ipv6: 0x%08x,0x%08x,0x%08x,0x%08x", + rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[0]), + rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[1]), + rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[2]), + rte_be_to_cpu_32(mask->ipv6_mask.dst_ip[3])); + } + + printf("\n"); +} + +static inline void +print_fdir_flex_payload(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) +{ + struct rte_eth_flex_payload_cfg *cfg; + uint32_t i, j; + + for (i = 0; i < flex_conf->nb_payloads; i++) { + cfg = &flex_conf->flex_set[i]; + if (cfg->type == RTE_ETH_RAW_PAYLOAD) + printf("\n RAW: "); + else if (cfg->type == RTE_ETH_L2_PAYLOAD) + printf("\n L2_PAYLOAD: "); + else if (cfg->type == RTE_ETH_L3_PAYLOAD) + printf("\n L3_PAYLOAD: "); + else if (cfg->type == RTE_ETH_L4_PAYLOAD) + printf("\n L4_PAYLOAD: "); + else + printf("\n UNKNOWN PAYLOAD(%u): ", cfg->type); + for (j = 0; j < num; j++) + printf(" %-5u", cfg->src_offset[j]); + } + printf("\n"); +} + +static char * +flowtype_to_str(uint16_t flow_type) +{ + struct flow_type_info { + char str[32]; + uint16_t ftype; + }; + + uint8_t i; + static struct flow_type_info flowtype_str_table[] = { + {"raw", RTE_ETH_FLOW_RAW}, + {"ipv4", RTE_ETH_FLOW_IPV4}, + {"ipv4-frag", RTE_ETH_FLOW_FRAG_IPV4}, + {"ipv4-tcp", RTE_ETH_FLOW_NONFRAG_IPV4_TCP}, + {"ipv4-udp", RTE_ETH_FLOW_NONFRAG_IPV4_UDP}, + {"ipv4-sctp", RTE_ETH_FLOW_NONFRAG_IPV4_SCTP}, + {"ipv4-other", RTE_ETH_FLOW_NONFRAG_IPV4_OTHER}, + {"ipv6", RTE_ETH_FLOW_IPV6}, + {"ipv6-frag", RTE_ETH_FLOW_FRAG_IPV6}, + {"ipv6-tcp", RTE_ETH_FLOW_NONFRAG_IPV6_TCP}, + {"ipv6-udp", RTE_ETH_FLOW_NONFRAG_IPV6_UDP}, + {"ipv6-sctp", RTE_ETH_FLOW_NONFRAG_IPV6_SCTP}, + {"ipv6-other", RTE_ETH_FLOW_NONFRAG_IPV6_OTHER}, + {"l2_payload", RTE_ETH_FLOW_L2_PAYLOAD}, + {"port", RTE_ETH_FLOW_PORT}, + {"vxlan", RTE_ETH_FLOW_VXLAN}, + {"geneve", RTE_ETH_FLOW_GENEVE}, + {"nvgre", RTE_ETH_FLOW_NVGRE}, + {"vxlan-gpe", RTE_ETH_FLOW_VXLAN_GPE}, + }; + + for (i = 0; i < RTE_DIM(flowtype_str_table); i++) { + if (flowtype_str_table[i].ftype == flow_type) + return flowtype_str_table[i].str; + } + + return NULL; +} + +static inline void +print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num) +{ + struct rte_eth_fdir_flex_mask *mask; + uint32_t i, j; + char *p; + + for (i = 0; i < flex_conf->nb_flexmasks; i++) { + mask = &flex_conf->flex_mask[i]; + p = flowtype_to_str(mask->flow_type); + printf("\n %s:\t", p ? p : "unknown"); + for (j = 0; j < num; j++) + printf(" %02x", mask->mask[j]); + } + printf("\n"); +} + +static inline void +print_fdir_flow_type(uint32_t flow_types_mask) +{ + int i; + char *p; + + for (i = RTE_ETH_FLOW_UNKNOWN; i < RTE_ETH_FLOW_MAX; i++) { + if (!(flow_types_mask & (1 << i))) + continue; + p = flowtype_to_str(i); + if (p) + printf(" %s", p); + else + printf(" unknown"); + } + printf("\n"); +} + +void +fdir_get_infos(portid_t port_id) +{ + struct rte_eth_fdir_stats fdir_stat; + struct rte_eth_fdir_info fdir_info; + int ret; + + static const char *fdir_stats_border = "########################"; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR); + if (ret < 0) { + printf("\n FDIR is not supported on port %-2d\n", + port_id); + return; + } + + memset(&fdir_info, 0, sizeof(fdir_info)); + rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, + RTE_ETH_FILTER_INFO, &fdir_info); + memset(&fdir_stat, 0, sizeof(fdir_stat)); + rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, + RTE_ETH_FILTER_STATS, &fdir_stat); + printf("\n %s FDIR infos for port %-2d %s\n", + fdir_stats_border, port_id, fdir_stats_border); + printf(" MODE: "); + if (fdir_info.mode == RTE_FDIR_MODE_PERFECT) + printf(" PERFECT\n"); + else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) + printf(" PERFECT-MAC-VLAN\n"); + else if (fdir_info.mode == RTE_FDIR_MODE_PERFECT_TUNNEL) + printf(" PERFECT-TUNNEL\n"); + else if (fdir_info.mode == RTE_FDIR_MODE_SIGNATURE) + printf(" SIGNATURE\n"); + else + printf(" DISABLE\n"); + if (fdir_info.mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN + && fdir_info.mode != RTE_FDIR_MODE_PERFECT_TUNNEL) { + printf(" SUPPORTED FLOW TYPE: "); + print_fdir_flow_type(fdir_info.flow_types_mask[0]); + } + printf(" FLEX PAYLOAD INFO:\n"); + printf(" max_len: %-10"PRIu32" payload_limit: %-10"PRIu32"\n" + " payload_unit: %-10"PRIu32" payload_seg: %-10"PRIu32"\n" + " bitmask_unit: %-10"PRIu32" bitmask_num: %-10"PRIu32"\n", + fdir_info.max_flexpayload, fdir_info.flex_payload_limit, + fdir_info.flex_payload_unit, + fdir_info.max_flex_payload_segment_num, + fdir_info.flex_bitmask_unit, fdir_info.max_flex_bitmask_num); + printf(" MASK: "); + print_fdir_mask(&fdir_info.mask); + if (fdir_info.flex_conf.nb_payloads > 0) { + printf(" FLEX PAYLOAD SRC OFFSET:"); + print_fdir_flex_payload(&fdir_info.flex_conf, fdir_info.max_flexpayload); + } + if (fdir_info.flex_conf.nb_flexmasks > 0) { + printf(" FLEX MASK CFG:"); + print_fdir_flex_mask(&fdir_info.flex_conf, fdir_info.max_flexpayload); + } + printf(" guarant_count: %-10"PRIu32" best_count: %"PRIu32"\n", + fdir_stat.guarant_cnt, fdir_stat.best_cnt); + printf(" guarant_space: %-10"PRIu32" best_space: %"PRIu32"\n", + fdir_info.guarant_spc, fdir_info.best_spc); + printf(" collision: %-10"PRIu32" free: %"PRIu32"\n" + " maxhash: %-10"PRIu32" maxlen: %"PRIu32"\n" + " add: %-10"PRIu64" remove: %"PRIu64"\n" + " f_add: %-10"PRIu64" f_remove: %"PRIu64"\n", + fdir_stat.collision, fdir_stat.free, + fdir_stat.maxhash, fdir_stat.maxlen, + fdir_stat.add, fdir_stat.remove, + fdir_stat.f_add, fdir_stat.f_remove); + printf(" %s############################%s\n", + fdir_stats_border, fdir_stats_border); +} + +void +fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg) +{ + struct rte_port *port; + struct rte_eth_fdir_flex_conf *flex_conf; + int i, idx = 0; + + port = &ports[port_id]; + flex_conf = &port->dev_conf.fdir_conf.flex_conf; + for (i = 0; i < RTE_ETH_FLOW_MAX; i++) { + if (cfg->flow_type == flex_conf->flex_mask[i].flow_type) { + idx = i; + break; + } + } + if (i >= RTE_ETH_FLOW_MAX) { + if (flex_conf->nb_flexmasks < RTE_DIM(flex_conf->flex_mask)) { + idx = flex_conf->nb_flexmasks; + flex_conf->nb_flexmasks++; + } else { + printf("The flex mask table is full. Can not set flex" + " mask for flow_type(%u).", cfg->flow_type); + return; + } + } + rte_memcpy(&flex_conf->flex_mask[idx], + cfg, + sizeof(struct rte_eth_fdir_flex_mask)); +} + +void +fdir_set_flex_payload(portid_t port_id, struct rte_eth_flex_payload_cfg *cfg) +{ + struct rte_port *port; + struct rte_eth_fdir_flex_conf *flex_conf; + int i, idx = 0; + + port = &ports[port_id]; + flex_conf = &port->dev_conf.fdir_conf.flex_conf; + for (i = 0; i < RTE_ETH_PAYLOAD_MAX; i++) { + if (cfg->type == flex_conf->flex_set[i].type) { + idx = i; + break; + } + } + if (i >= RTE_ETH_PAYLOAD_MAX) { + if (flex_conf->nb_payloads < RTE_DIM(flex_conf->flex_set)) { + idx = flex_conf->nb_payloads; + flex_conf->nb_payloads++; + } else { + printf("The flex payload table is full. Can not set" + " flex payload for type(%u).", cfg->type); + return; + } + } + rte_memcpy(&flex_conf->flex_set[idx], + cfg, + sizeof(struct rte_eth_flex_payload_cfg)); + +} + +void +set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on) +{ +#ifdef RTE_LIBRTE_IXGBE_PMD + int diag; + + if (is_rx) + diag = rte_pmd_ixgbe_set_vf_rx(port_id, vf, on); + else + diag = rte_pmd_ixgbe_set_vf_tx(port_id, vf, on); + + if (diag == 0) + return; + printf("rte_pmd_ixgbe_set_vf_%s for port_id=%d failed diag=%d\n", + is_rx ? "rx" : "tx", port_id, diag); + return; +#endif + printf("VF %s setting not supported for port %d\n", + is_rx ? "Rx" : "Tx", port_id); + RTE_SET_USED(vf); + RTE_SET_USED(on); +} + +int +set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate) +{ + int diag; + struct rte_eth_link link; + int ret; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return 1; + ret = eth_link_get_nowait_print_err(port_id, &link); + if (ret < 0) + return 1; + if (rate > link.link_speed) { + printf("Invalid rate value:%u bigger than link speed: %u\n", + rate, link.link_speed); + return 1; + } + diag = rte_eth_set_queue_rate_limit(port_id, queue_idx, rate); + if (diag == 0) + return diag; + printf("rte_eth_set_queue_rate_limit for port_id=%d failed diag=%d\n", + port_id, diag); + return diag; +} + +int +set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, uint64_t q_msk) +{ + int diag = -ENOTSUP; + + RTE_SET_USED(vf); + RTE_SET_USED(rate); + RTE_SET_USED(q_msk); + +#ifdef RTE_LIBRTE_IXGBE_PMD + if (diag == -ENOTSUP) + diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate, + q_msk); +#endif +#ifdef RTE_LIBRTE_BNXT_PMD + if (diag == -ENOTSUP) + diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk); +#endif + if (diag == 0) + return diag; + + printf("set_vf_rate_limit for port_id=%d failed diag=%d\n", + port_id, diag); + return diag; +} + +/* + * Functions to manage the set of filtered Multicast MAC addresses. + * + * A pool of filtered multicast MAC addresses is associated with each port. + * The pool is allocated in chunks of MCAST_POOL_INC multicast addresses. + * The address of the pool and the number of valid multicast MAC addresses + * recorded in the pool are stored in the fields "mc_addr_pool" and + * "mc_addr_nb" of the "rte_port" data structure. + * + * The function "rte_eth_dev_set_mc_addr_list" of the PMDs API imposes + * to be supplied a contiguous array of multicast MAC addresses. + * To comply with this constraint, the set of multicast addresses recorded + * into the pool are systematically compacted at the beginning of the pool. + * Hence, when a multicast address is removed from the pool, all following + * addresses, if any, are copied back to keep the set contiguous. + */ +#define MCAST_POOL_INC 32 + +static int +mcast_addr_pool_extend(struct rte_port *port) +{ + struct rte_ether_addr *mc_pool; + size_t mc_pool_size; + + /* + * If a free entry is available at the end of the pool, just + * increment the number of recorded multicast addresses. + */ + if ((port->mc_addr_nb % MCAST_POOL_INC) != 0) { + port->mc_addr_nb++; + return 0; + } + + /* + * [re]allocate a pool with MCAST_POOL_INC more entries. + * The previous test guarantees that port->mc_addr_nb is a multiple + * of MCAST_POOL_INC. + */ + mc_pool_size = sizeof(struct rte_ether_addr) * (port->mc_addr_nb + + MCAST_POOL_INC); + mc_pool = (struct rte_ether_addr *) realloc(port->mc_addr_pool, + mc_pool_size); + if (mc_pool == NULL) { + printf("allocation of pool of %u multicast addresses failed\n", + port->mc_addr_nb + MCAST_POOL_INC); + return -ENOMEM; + } + + port->mc_addr_pool = mc_pool; + port->mc_addr_nb++; + return 0; + +} + +static void +mcast_addr_pool_append(struct rte_port *port, struct rte_ether_addr *mc_addr) +{ + if (mcast_addr_pool_extend(port) != 0) + return; + rte_ether_addr_copy(mc_addr, &port->mc_addr_pool[port->mc_addr_nb - 1]); +} + +static void +mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) +{ + port->mc_addr_nb--; + if (addr_idx == port->mc_addr_nb) { + /* No need to recompact the set of multicast addressses. */ + if (port->mc_addr_nb == 0) { + /* free the pool of multicast addresses. */ + free(port->mc_addr_pool); + port->mc_addr_pool = NULL; + } + return; + } + memmove(&port->mc_addr_pool[addr_idx], + &port->mc_addr_pool[addr_idx + 1], + sizeof(struct rte_ether_addr) * (port->mc_addr_nb - addr_idx)); +} + +static int +eth_port_multicast_addr_list_set(portid_t port_id) +{ + struct rte_port *port; + int diag; + + port = &ports[port_id]; + diag = rte_eth_dev_set_mc_addr_list(port_id, port->mc_addr_pool, + port->mc_addr_nb); + if (diag < 0) + printf("rte_eth_dev_set_mc_addr_list(port=%d, nb=%u) failed. diag=%d\n", + port_id, port->mc_addr_nb, diag); + + return diag; +} + +void +mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr) +{ + struct rte_port *port; + uint32_t i; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + port = &ports[port_id]; + + /* + * Check that the added multicast MAC address is not already recorded + * in the pool of multicast addresses. + */ + for (i = 0; i < port->mc_addr_nb; i++) { + if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) { + printf("multicast address already filtered by port\n"); + return; + } + } + + mcast_addr_pool_append(port, mc_addr); + if (eth_port_multicast_addr_list_set(port_id) < 0) + /* Rollback on failure, remove the address from the pool */ + mcast_addr_pool_remove(port, i); +} + +void +mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr) +{ + struct rte_port *port; + uint32_t i; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + port = &ports[port_id]; + + /* + * Search the pool of multicast MAC addresses for the removed address. + */ + for (i = 0; i < port->mc_addr_nb; i++) { + if (rte_is_same_ether_addr(mc_addr, &port->mc_addr_pool[i])) + break; + } + if (i == port->mc_addr_nb) { + printf("multicast address not filtered by port %d\n", port_id); + return; + } + + mcast_addr_pool_remove(port, i); + if (eth_port_multicast_addr_list_set(port_id) < 0) + /* Rollback on failure, add the address back into the pool */ + mcast_addr_pool_append(port, mc_addr); +} + +void +port_dcb_info_display(portid_t port_id) +{ + struct rte_eth_dcb_info dcb_info; + uint16_t i; + int ret; + static const char *border = "================"; + + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + ret = rte_eth_dev_get_dcb_info(port_id, &dcb_info); + if (ret) { + printf("\n Failed to get dcb infos on port %-2d\n", + port_id); + return; + } + printf("\n %s DCB infos for port %-2d %s\n", border, port_id, border); + printf(" TC NUMBER: %d\n", dcb_info.nb_tcs); + printf("\n TC : "); + for (i = 0; i < dcb_info.nb_tcs; i++) + printf("\t%4d", i); + printf("\n Priority : "); + for (i = 0; i < dcb_info.nb_tcs; i++) + printf("\t%4d", dcb_info.prio_tc[i]); + printf("\n BW percent :"); + for (i = 0; i < dcb_info.nb_tcs; i++) + printf("\t%4d%%", dcb_info.tc_bws[i]); + printf("\n RXQ base : "); + for (i = 0; i < dcb_info.nb_tcs; i++) + printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].base); + printf("\n RXQ number :"); + for (i = 0; i < dcb_info.nb_tcs; i++) + printf("\t%4d", dcb_info.tc_queue.tc_rxq[0][i].nb_queue); + printf("\n TXQ base : "); + for (i = 0; i < dcb_info.nb_tcs; i++) + printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].base); + printf("\n TXQ number :"); + for (i = 0; i < dcb_info.nb_tcs; i++) + printf("\t%4d", dcb_info.tc_queue.tc_txq[0][i].nb_queue); + printf("\n"); +} + +uint8_t * +open_file(const char *file_path, uint32_t *size) +{ + int fd = open(file_path, O_RDONLY); + off_t pkg_size; + uint8_t *buf = NULL; + int ret = 0; + struct stat st_buf; + + if (size) + *size = 0; + + if (fd == -1) { + printf("%s: Failed to open %s\n", __func__, file_path); + return buf; + } + + if ((fstat(fd, &st_buf) != 0) || (!S_ISREG(st_buf.st_mode))) { + close(fd); + printf("%s: File operations failed\n", __func__); + return buf; + } + + pkg_size = st_buf.st_size; + if (pkg_size < 0) { + close(fd); + printf("%s: File operations failed\n", __func__); + return buf; + } + + buf = (uint8_t *)malloc(pkg_size); + if (!buf) { + close(fd); + printf("%s: Failed to malloc memory\n", __func__); + return buf; + } + + ret = read(fd, buf, pkg_size); + if (ret < 0) { + close(fd); + printf("%s: File read operation failed\n", __func__); + close_file(buf); + return NULL; + } + + if (size) + *size = pkg_size; + + close(fd); + + return buf; +} + +int +save_file(const char *file_path, uint8_t *buf, uint32_t size) +{ + FILE *fh = fopen(file_path, "wb"); + + if (fh == NULL) { + printf("%s: Failed to open %s\n", __func__, file_path); + return -1; + } + + if (fwrite(buf, 1, size, fh) != size) { + fclose(fh); + printf("%s: File write operation failed\n", __func__); + return -1; + } + + fclose(fh); + + return 0; +} + +int +close_file(uint8_t *buf) +{ + if (buf) { + free((void *)buf); + return 0; + } + + return -1; +} + +void +port_queue_region_info_display(portid_t port_id, void *buf) +{ +#ifdef RTE_LIBRTE_I40E_PMD + uint16_t i, j; + struct rte_pmd_i40e_queue_regions *info = + (struct rte_pmd_i40e_queue_regions *)buf; + static const char *queue_region_info_stats_border = "-------"; + + if (!info->queue_region_number) + printf("there is no region has been set before"); + + printf("\n %s All queue region info for port=%2d %s", + queue_region_info_stats_border, port_id, + queue_region_info_stats_border); + printf("\n queue_region_number: %-14u \n", + info->queue_region_number); + + for (i = 0; i < info->queue_region_number; i++) { + printf("\n region_id: %-14u queue_number: %-14u " + "queue_start_index: %-14u \n", + info->region[i].region_id, + info->region[i].queue_num, + info->region[i].queue_start_index); + + printf(" user_priority_num is %-14u :", + info->region[i].user_priority_num); + for (j = 0; j < info->region[i].user_priority_num; j++) + printf(" %-14u ", info->region[i].user_priority[j]); + + printf("\n flowtype_num is %-14u :", + info->region[i].flowtype_num); + for (j = 0; j < info->region[i].flowtype_num; j++) + printf(" %-14u ", info->region[i].hw_flowtype[j]); + } +#else + RTE_SET_USED(port_id); + RTE_SET_USED(buf); +#endif + + printf("\n\n"); +} + +void +show_macs(portid_t port_id) +{ + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_eth_dev_info dev_info; + struct rte_ether_addr *addr; + uint32_t i, num_macs = 0; + struct rte_eth_dev *dev; + + dev = &rte_eth_devices[port_id]; + + rte_eth_dev_info_get(port_id, &dev_info); + + for (i = 0; i < dev_info.max_mac_addrs; i++) { + addr = &dev->data->mac_addrs[i]; + + /* skip zero address */ + if (rte_is_zero_ether_addr(addr)) + continue; + + num_macs++; + } + + printf("Number of MAC address added: %d\n", num_macs); + + for (i = 0; i < dev_info.max_mac_addrs; i++) { + addr = &dev->data->mac_addrs[i]; + + /* skip zero address */ + if (rte_is_zero_ether_addr(addr)) + continue; + + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); + printf(" %s\n", buf); + } +} + +void +show_mcast_macs(portid_t port_id) +{ + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + struct rte_ether_addr *addr; + struct rte_port *port; + uint32_t i; + + port = &ports[port_id]; + + printf("Number of Multicast MAC address added: %d\n", port->mc_addr_nb); + + for (i = 0; i < port->mc_addr_nb; i++) { + addr = &port->mc_addr_pool[i]; + + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, addr); + printf(" %s\n", buf); + } +} diff --git a/src/spdk/dpdk/app/test-pmd/csumonly.c b/src/spdk/dpdk/app/test-pmd/csumonly.c new file mode 100644 index 000000000..862622379 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/csumonly.c @@ -0,0 +1,1112 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation. + * Copyright 2014 6WIND S.A. + */ + +#include <stdarg.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <unistd.h> +#include <inttypes.h> + +#include <sys/queue.h> +#include <sys/stat.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_memcpy.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_ip.h> +#include <rte_tcp.h> +#include <rte_udp.h> +#include <rte_vxlan.h> +#include <rte_sctp.h> +#include <rte_gtp.h> +#include <rte_prefetch.h> +#include <rte_string_fns.h> +#include <rte_flow.h> +#include <rte_gro.h> +#include <rte_gso.h> + +#include "testpmd.h" + +#define IP_DEFTTL 64 /* from RFC 1340. */ + +#define GRE_CHECKSUM_PRESENT 0x8000 +#define GRE_KEY_PRESENT 0x2000 +#define GRE_SEQUENCE_PRESENT 0x1000 +#define GRE_EXT_LEN 4 +#define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\ + GRE_SEQUENCE_PRESENT) + +/* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */ +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8))) +#else +#define _htons(x) (x) +#endif + +uint16_t vxlan_gpe_udp_port = 4790; + +/* structure that caches offload info for the current packet */ +struct testpmd_offload_info { + uint16_t ethertype; + uint8_t gso_enable; + uint16_t l2_len; + uint16_t l3_len; + uint16_t l4_len; + uint8_t l4_proto; + uint8_t is_tunnel; + uint16_t outer_ethertype; + uint16_t outer_l2_len; + uint16_t outer_l3_len; + uint8_t outer_l4_proto; + uint16_t tso_segsz; + uint16_t tunnel_tso_segsz; + uint32_t pkt_len; +}; + +/* simplified GRE header */ +struct simple_gre_hdr { + uint16_t flags; + uint16_t proto; +} __rte_packed; + +static uint16_t +get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype) +{ + if (ethertype == _htons(RTE_ETHER_TYPE_IPV4)) + return rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr); + else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */ + return rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr); +} + +/* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */ +static void +parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info) +{ + struct rte_tcp_hdr *tcp_hdr; + + info->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4; + info->l4_proto = ipv4_hdr->next_proto_id; + + /* only fill l4_len for TCP, it's useful for TSO */ + if (info->l4_proto == IPPROTO_TCP) { + tcp_hdr = (struct rte_tcp_hdr *) + ((char *)ipv4_hdr + info->l3_len); + info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; + } else if (info->l4_proto == IPPROTO_UDP) + info->l4_len = sizeof(struct rte_udp_hdr); + else + info->l4_len = 0; +} + +/* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */ +static void +parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info) +{ + struct rte_tcp_hdr *tcp_hdr; + + info->l3_len = sizeof(struct rte_ipv6_hdr); + info->l4_proto = ipv6_hdr->proto; + + /* only fill l4_len for TCP, it's useful for TSO */ + if (info->l4_proto == IPPROTO_TCP) { + tcp_hdr = (struct rte_tcp_hdr *) + ((char *)ipv6_hdr + info->l3_len); + info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2; + } else if (info->l4_proto == IPPROTO_UDP) + info->l4_len = sizeof(struct rte_udp_hdr); + else + info->l4_len = 0; +} + +/* + * Parse an ethernet header to fill the ethertype, l2_len, l3_len and + * ipproto. This function is able to recognize IPv4/IPv6 with optional VLAN + * headers. The l4_len argument is only set in case of TCP (useful for TSO). + */ +static void +parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info) +{ + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_vlan_hdr *vlan_hdr; + + info->l2_len = sizeof(struct rte_ether_hdr); + info->ethertype = eth_hdr->ether_type; + + while (info->ethertype == _htons(RTE_ETHER_TYPE_VLAN) || + info->ethertype == _htons(RTE_ETHER_TYPE_QINQ)) { + vlan_hdr = (struct rte_vlan_hdr *) + ((char *)eth_hdr + info->l2_len); + info->l2_len += sizeof(struct rte_vlan_hdr); + info->ethertype = vlan_hdr->eth_proto; + } + + switch (info->ethertype) { + case _htons(RTE_ETHER_TYPE_IPV4): + ipv4_hdr = (struct rte_ipv4_hdr *) + ((char *)eth_hdr + info->l2_len); + parse_ipv4(ipv4_hdr, info); + break; + case _htons(RTE_ETHER_TYPE_IPV6): + ipv6_hdr = (struct rte_ipv6_hdr *) + ((char *)eth_hdr + info->l2_len); + parse_ipv6(ipv6_hdr, info); + break; + default: + info->l4_len = 0; + info->l3_len = 0; + info->l4_proto = 0; + break; + } +} + +/* + * Parse a GTP protocol header. + * No optional fields and next extension header type. + */ +static void +parse_gtp(struct rte_udp_hdr *udp_hdr, + struct testpmd_offload_info *info) +{ + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_gtp_hdr *gtp_hdr; + uint8_t gtp_len = sizeof(*gtp_hdr); + uint8_t ip_ver; + + /* Check udp destination port. */ + if (udp_hdr->dst_port != _htons(RTE_GTPC_UDP_PORT) && + udp_hdr->src_port != _htons(RTE_GTPC_UDP_PORT) && + udp_hdr->dst_port != _htons(RTE_GTPU_UDP_PORT)) + return; + + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + info->l2_len = 0; + + gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr + + sizeof(struct rte_udp_hdr)); + + /* + * Check message type. If message type is 0xff, it is + * a GTP data packet. If not, it is a GTP control packet + */ + if (gtp_hdr->msg_type == 0xff) { + ip_ver = *(uint8_t *)((char *)udp_hdr + + sizeof(struct rte_udp_hdr) + + sizeof(struct rte_gtp_hdr)); + ip_ver = (ip_ver) & 0xf0; + + if (ip_ver == RTE_GTP_TYPE_IPV4) { + ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gtp_hdr + + gtp_len); + info->ethertype = _htons(RTE_ETHER_TYPE_IPV4); + parse_ipv4(ipv4_hdr, info); + } else if (ip_ver == RTE_GTP_TYPE_IPV6) { + ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gtp_hdr + + gtp_len); + info->ethertype = _htons(RTE_ETHER_TYPE_IPV6); + parse_ipv6(ipv6_hdr, info); + } + } else { + info->ethertype = 0; + info->l4_len = 0; + info->l3_len = 0; + info->l4_proto = 0; + } + + info->l2_len += RTE_ETHER_GTP_HLEN; +} + +/* Parse a vxlan header */ +static void +parse_vxlan(struct rte_udp_hdr *udp_hdr, + struct testpmd_offload_info *info, + uint32_t pkt_type) +{ + struct rte_ether_hdr *eth_hdr; + + /* check udp destination port, 4789 is the default vxlan port + * (rfc7348) or that the rx offload flag is set (i40e only + * currently) */ + if (udp_hdr->dst_port != _htons(4789) && + RTE_ETH_IS_TUNNEL_PKT(pkt_type) == 0) + return; + + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr + + sizeof(struct rte_udp_hdr) + + sizeof(struct rte_vxlan_hdr)); + + parse_ethernet(eth_hdr, info); + info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */ +} + +/* Parse a vxlan-gpe header */ +static void +parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr, + struct testpmd_offload_info *info) +{ + struct rte_ether_hdr *eth_hdr; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr; + uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr); + + /* Check udp destination port. */ + if (udp_hdr->dst_port != _htons(vxlan_gpe_udp_port)) + return; + + vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr + + sizeof(struct rte_udp_hdr)); + + if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto == + RTE_VXLAN_GPE_TYPE_IPV4) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + parse_ipv4(ipv4_hdr, info); + info->ethertype = _htons(RTE_ETHER_TYPE_IPV4); + info->l2_len = 0; + + } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + info->ethertype = _htons(RTE_ETHER_TYPE_IPV6); + parse_ipv6(ipv6_hdr, info); + info->l2_len = 0; + + } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_gpe_hdr + + vxlan_gpe_len); + + parse_ethernet(eth_hdr, info); + } else + return; + + info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN; +} + +/* Parse a gre header */ +static void +parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info) +{ + struct rte_ether_hdr *eth_hdr; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + uint8_t gre_len = 0; + + gre_len += sizeof(struct simple_gre_hdr); + + if (gre_hdr->flags & _htons(GRE_KEY_PRESENT)) + gre_len += GRE_EXT_LEN; + if (gre_hdr->flags & _htons(GRE_SEQUENCE_PRESENT)) + gre_len += GRE_EXT_LEN; + if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT)) + gre_len += GRE_EXT_LEN; + + if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPV4)) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len); + + parse_ipv4(ipv4_hdr, info); + info->ethertype = _htons(RTE_ETHER_TYPE_IPV4); + info->l2_len = 0; + + } else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPV6)) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len); + + info->ethertype = _htons(RTE_ETHER_TYPE_IPV6); + parse_ipv6(ipv6_hdr, info); + info->l2_len = 0; + + } else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_TEB)) { + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + info->outer_l4_proto = info->l4_proto; + + eth_hdr = (struct rte_ether_hdr *)((char *)gre_hdr + gre_len); + + parse_ethernet(eth_hdr, info); + } else + return; + + info->l2_len += gre_len; +} + + +/* Parse an encapsulated ip or ipv6 header */ +static void +parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info) +{ + struct rte_ipv4_hdr *ipv4_hdr = encap_ip; + struct rte_ipv6_hdr *ipv6_hdr = encap_ip; + uint8_t ip_version; + + ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4; + + if (ip_version != 4 && ip_version != 6) + return; + + info->is_tunnel = 1; + info->outer_ethertype = info->ethertype; + info->outer_l2_len = info->l2_len; + info->outer_l3_len = info->l3_len; + + if (ip_version == 4) { + parse_ipv4(ipv4_hdr, info); + info->ethertype = _htons(RTE_ETHER_TYPE_IPV4); + } else { + parse_ipv6(ipv6_hdr, info); + info->ethertype = _htons(RTE_ETHER_TYPE_IPV6); + } + info->l2_len = 0; +} + +/* if possible, calculate the checksum of a packet in hw or sw, + * depending on the testpmd command line configuration */ +static uint64_t +process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info, + uint64_t tx_offloads) +{ + struct rte_ipv4_hdr *ipv4_hdr = l3_hdr; + struct rte_udp_hdr *udp_hdr; + struct rte_tcp_hdr *tcp_hdr; + struct rte_sctp_hdr *sctp_hdr; + uint64_t ol_flags = 0; + uint32_t max_pkt_len, tso_segsz = 0; + + /* ensure packet is large enough to require tso */ + if (!info->is_tunnel) { + max_pkt_len = info->l2_len + info->l3_len + info->l4_len + + info->tso_segsz; + if (info->tso_segsz != 0 && info->pkt_len > max_pkt_len) + tso_segsz = info->tso_segsz; + } else { + max_pkt_len = info->outer_l2_len + info->outer_l3_len + + info->l2_len + info->l3_len + info->l4_len + + info->tunnel_tso_segsz; + if (info->tunnel_tso_segsz != 0 && info->pkt_len > max_pkt_len) + tso_segsz = info->tunnel_tso_segsz; + } + + if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV4)) { + ipv4_hdr = l3_hdr; + ipv4_hdr->hdr_checksum = 0; + + ol_flags |= PKT_TX_IPV4; + if (info->l4_proto == IPPROTO_TCP && tso_segsz) { + ol_flags |= PKT_TX_IP_CKSUM; + } else { + if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + ol_flags |= PKT_TX_IP_CKSUM; + else + ipv4_hdr->hdr_checksum = + rte_ipv4_cksum(ipv4_hdr); + } + } else if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV6)) + ol_flags |= PKT_TX_IPV6; + else + return 0; /* packet type not supported, nothing to do */ + + if (info->l4_proto == IPPROTO_UDP) { + udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len); + /* do not recalculate udp cksum if it was 0 */ + if (udp_hdr->dgram_cksum != 0) { + udp_hdr->dgram_cksum = 0; + if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) + ol_flags |= PKT_TX_UDP_CKSUM; + else { + udp_hdr->dgram_cksum = + get_udptcp_checksum(l3_hdr, udp_hdr, + info->ethertype); + } + } + if (info->gso_enable) + ol_flags |= PKT_TX_UDP_SEG; + } else if (info->l4_proto == IPPROTO_TCP) { + tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len); + tcp_hdr->cksum = 0; + if (tso_segsz) + ol_flags |= PKT_TX_TCP_SEG; + else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) + ol_flags |= PKT_TX_TCP_CKSUM; + else { + tcp_hdr->cksum = + get_udptcp_checksum(l3_hdr, tcp_hdr, + info->ethertype); + } + if (info->gso_enable) + ol_flags |= PKT_TX_TCP_SEG; + } else if (info->l4_proto == IPPROTO_SCTP) { + sctp_hdr = (struct rte_sctp_hdr *) + ((char *)l3_hdr + info->l3_len); + sctp_hdr->cksum = 0; + /* sctp payload must be a multiple of 4 to be + * offloaded */ + if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) && + ((ipv4_hdr->total_length & 0x3) == 0)) { + ol_flags |= PKT_TX_SCTP_CKSUM; + } else { + /* XXX implement CRC32c, example available in + * RFC3309 */ + } + } + + return ol_flags; +} + +/* Calculate the checksum of outer header */ +static uint64_t +process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info, + uint64_t tx_offloads, int tso_enabled) +{ + struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr; + struct rte_ipv6_hdr *ipv6_hdr = outer_l3_hdr; + struct rte_udp_hdr *udp_hdr; + uint64_t ol_flags = 0; + + if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) { + ipv4_hdr->hdr_checksum = 0; + ol_flags |= PKT_TX_OUTER_IPV4; + + if (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) + ol_flags |= PKT_TX_OUTER_IP_CKSUM; + else + ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); + } else + ol_flags |= PKT_TX_OUTER_IPV6; + + if (info->outer_l4_proto != IPPROTO_UDP) + return ol_flags; + + udp_hdr = (struct rte_udp_hdr *) + ((char *)outer_l3_hdr + info->outer_l3_len); + + if (tso_enabled) + ol_flags |= PKT_TX_TCP_SEG; + + /* Skip SW outer UDP checksum generation if HW supports it */ + if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) { + if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) + udp_hdr->dgram_cksum + = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags); + else + udp_hdr->dgram_cksum + = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags); + + ol_flags |= PKT_TX_OUTER_UDP_CKSUM; + return ol_flags; + } + + /* outer UDP checksum is done in software. In the other side, for + * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be + * set to zero. + * + * If a packet will be TSOed into small packets by NIC, we cannot + * set/calculate a non-zero checksum, because it will be a wrong + * value after the packet be split into several small packets. + */ + if (tso_enabled) + udp_hdr->dgram_cksum = 0; + + /* do not recalculate udp cksum if it was 0 */ + if (udp_hdr->dgram_cksum != 0) { + udp_hdr->dgram_cksum = 0; + if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) + udp_hdr->dgram_cksum = + rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr); + else + udp_hdr->dgram_cksum = + rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr); + } + + return ol_flags; +} + +/* + * Helper function. + * Performs actual copying. + * Returns number of segments in the destination mbuf on success, + * or negative error code on failure. + */ +static int +mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[], + uint16_t seglen[], uint8_t nb_seg) +{ + uint32_t dlen, slen, tlen; + uint32_t i, len; + const struct rte_mbuf *m; + const uint8_t *src; + uint8_t *dst; + + dlen = 0; + slen = 0; + tlen = 0; + + dst = NULL; + src = NULL; + + m = ms; + i = 0; + while (ms != NULL && i != nb_seg) { + + if (slen == 0) { + slen = rte_pktmbuf_data_len(ms); + src = rte_pktmbuf_mtod(ms, const uint8_t *); + } + + if (dlen == 0) { + dlen = RTE_MIN(seglen[i], slen); + md[i]->data_len = dlen; + md[i]->next = (i + 1 == nb_seg) ? NULL : md[i + 1]; + dst = rte_pktmbuf_mtod(md[i], uint8_t *); + } + + len = RTE_MIN(slen, dlen); + memcpy(dst, src, len); + tlen += len; + slen -= len; + dlen -= len; + src += len; + dst += len; + + if (slen == 0) + ms = ms->next; + if (dlen == 0) + i++; + } + + if (ms != NULL) + return -ENOBUFS; + else if (tlen != m->pkt_len) + return -EINVAL; + + md[0]->nb_segs = nb_seg; + md[0]->pkt_len = tlen; + md[0]->vlan_tci = m->vlan_tci; + md[0]->vlan_tci_outer = m->vlan_tci_outer; + md[0]->ol_flags = m->ol_flags; + md[0]->tx_offload = m->tx_offload; + + return nb_seg; +} + +/* + * Allocate a new mbuf with up to tx_pkt_nb_segs segments. + * Copy packet contents and offload information into the new segmented mbuf. + */ +static struct rte_mbuf * +pkt_copy_split(const struct rte_mbuf *pkt) +{ + int32_t n, rc; + uint32_t i, len, nb_seg; + struct rte_mempool *mp; + uint16_t seglen[RTE_MAX_SEGS_PER_PKT]; + struct rte_mbuf *p, *md[RTE_MAX_SEGS_PER_PKT]; + + mp = current_fwd_lcore()->mbp; + + if (tx_pkt_split == TX_PKT_SPLIT_RND) + nb_seg = random() % tx_pkt_nb_segs + 1; + else + nb_seg = tx_pkt_nb_segs; + + memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0])); + + /* calculate number of segments to use and their length. */ + len = 0; + for (i = 0; i != nb_seg && len < pkt->pkt_len; i++) { + len += seglen[i]; + md[i] = NULL; + } + + n = pkt->pkt_len - len; + + /* update size of the last segment to fit rest of the packet */ + if (n >= 0) { + seglen[i - 1] += n; + len += n; + } + + nb_seg = i; + while (i != 0) { + p = rte_pktmbuf_alloc(mp); + if (p == NULL) { + TESTPMD_LOG(ERR, + "failed to allocate %u-th of %u mbuf " + "from mempool: %s\n", + nb_seg - i, nb_seg, mp->name); + break; + } + + md[--i] = p; + if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) { + TESTPMD_LOG(ERR, "mempool %s, %u-th segment: " + "expected seglen: %u, " + "actual mbuf tailroom: %u\n", + mp->name, i, seglen[i], + rte_pktmbuf_tailroom(md[i])); + break; + } + } + + /* all mbufs successfully allocated, do copy */ + if (i == 0) { + rc = mbuf_copy_split(pkt, md, seglen, nb_seg); + if (rc < 0) + TESTPMD_LOG(ERR, + "mbuf_copy_split for %p(len=%u, nb_seg=%u) " + "into %u segments failed with error code: %d\n", + pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc); + + /* figure out how many mbufs to free. */ + i = RTE_MAX(rc, 0); + } + + /* free unused mbufs */ + for (; i != nb_seg; i++) { + rte_pktmbuf_free_seg(md[i]); + md[i] = NULL; + } + + return md[0]; +} + +/* + * Receive a burst of packets, and for each packet: + * - parse packet, and try to recognize a supported packet type (1) + * - if it's not a supported packet type, don't touch the packet, else: + * - reprocess the checksum of all supported layers. This is done in SW + * or HW, depending on testpmd command line configuration + * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP + * segmentation offload (this implies HW TCP checksum) + * Then transmit packets on the output port. + * + * (1) Supported packets are: + * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP . + * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 / + * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 / + * UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 / + * UDP|TCP|SCTP + * Ether / (vlan) / outer IP / outer UDP / GTP / IP|IP6 / UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP + * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP + * + * The testpmd command line for this forward engine sets the flags + * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control + * wether a checksum must be calculated in software or in hardware. The + * IP, UDP, TCP and SCTP flags always concern the inner layer. The + * OUTER_IP is only useful for tunnel packets. + */ +static void +pkt_burst_checksum_forward(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST]; + struct rte_gso_ctx *gso_ctx; + struct rte_mbuf **tx_pkts_burst; + struct rte_port *txp; + struct rte_mbuf *m, *p; + struct rte_ether_hdr *eth_hdr; + void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */ + void **gro_ctx; + uint16_t gro_pkts_num; + uint8_t gro_enable; + uint16_t nb_rx; + uint16_t nb_tx; + uint16_t nb_prep; + uint16_t i; + uint64_t rx_ol_flags, tx_ol_flags; + uint64_t tx_offloads; + uint32_t retry; + uint32_t rx_bad_ip_csum; + uint32_t rx_bad_l4_csum; + uint32_t rx_bad_outer_l4_csum; + struct testpmd_offload_info info; + uint16_t nb_segments = 0; + int ret; + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* receive a burst of packet */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, + nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + return; +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + fs->rx_packets += nb_rx; + rx_bad_ip_csum = 0; + rx_bad_l4_csum = 0; + rx_bad_outer_l4_csum = 0; + gro_enable = gro_ports[fs->rx_port].enable; + + txp = &ports[fs->tx_port]; + tx_offloads = txp->dev_conf.txmode.offloads; + memset(&info, 0, sizeof(info)); + info.tso_segsz = txp->tso_segsz; + info.tunnel_tso_segsz = txp->tunnel_tso_segsz; + if (gso_ports[fs->tx_port].enable) + info.gso_enable = 1; + + for (i = 0; i < nb_rx; i++) { + if (likely(i < nb_rx - 1)) + rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1], + void *)); + + m = pkts_burst[i]; + info.is_tunnel = 0; + info.pkt_len = rte_pktmbuf_pkt_len(m); + tx_ol_flags = m->ol_flags & + (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF); + rx_ol_flags = m->ol_flags; + + /* Update the L3/L4 checksum error packet statistics */ + if ((rx_ol_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_BAD) + rx_bad_ip_csum += 1; + if ((rx_ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD) + rx_bad_l4_csum += 1; + if (rx_ol_flags & PKT_RX_OUTER_L4_CKSUM_BAD) + rx_bad_outer_l4_csum += 1; + + /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan + * and inner headers */ + + eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], + ð_hdr->d_addr); + rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, + ð_hdr->s_addr); + parse_ethernet(eth_hdr, &info); + l3_hdr = (char *)eth_hdr + info.l2_len; + + /* check if it's a supported tunnel */ + if (txp->parse_tunnel) { + if (info.l4_proto == IPPROTO_UDP) { + struct rte_udp_hdr *udp_hdr; + + udp_hdr = (struct rte_udp_hdr *) + ((char *)l3_hdr + info.l3_len); + parse_gtp(udp_hdr, &info); + if (info.is_tunnel) { + tx_ol_flags |= PKT_TX_TUNNEL_GTP; + goto tunnel_update; + } + parse_vxlan_gpe(udp_hdr, &info); + if (info.is_tunnel) { + tx_ol_flags |= + PKT_TX_TUNNEL_VXLAN_GPE; + goto tunnel_update; + } + parse_vxlan(udp_hdr, &info, + m->packet_type); + if (info.is_tunnel) + tx_ol_flags |= + PKT_TX_TUNNEL_VXLAN; + } else if (info.l4_proto == IPPROTO_GRE) { + struct simple_gre_hdr *gre_hdr; + + gre_hdr = (struct simple_gre_hdr *) + ((char *)l3_hdr + info.l3_len); + parse_gre(gre_hdr, &info); + if (info.is_tunnel) + tx_ol_flags |= PKT_TX_TUNNEL_GRE; + } else if (info.l4_proto == IPPROTO_IPIP) { + void *encap_ip_hdr; + + encap_ip_hdr = (char *)l3_hdr + info.l3_len; + parse_encap_ip(encap_ip_hdr, &info); + if (info.is_tunnel) + tx_ol_flags |= PKT_TX_TUNNEL_IPIP; + } + } + +tunnel_update: + /* update l3_hdr and outer_l3_hdr if a tunnel was parsed */ + if (info.is_tunnel) { + outer_l3_hdr = l3_hdr; + l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len; + } + + /* step 2: depending on user command line configuration, + * recompute checksum either in software or flag the + * mbuf to offload the calculation to the NIC. If TSO + * is configured, prepare the mbuf for TCP segmentation. */ + + /* process checksums of inner headers first */ + tx_ol_flags |= process_inner_cksums(l3_hdr, &info, + tx_offloads); + + /* Then process outer headers if any. Note that the software + * checksum will be wrong if one of the inner checksums is + * processed in hardware. */ + if (info.is_tunnel == 1) { + tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info, + tx_offloads, + !!(tx_ol_flags & PKT_TX_TCP_SEG)); + } + + /* step 3: fill the mbuf meta data (flags and header lengths) */ + + m->tx_offload = 0; + if (info.is_tunnel == 1) { + if (info.tunnel_tso_segsz || + (tx_offloads & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || + (tx_offloads & + DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) || + (tx_ol_flags & PKT_TX_OUTER_IPV6)) { + m->outer_l2_len = info.outer_l2_len; + m->outer_l3_len = info.outer_l3_len; + m->l2_len = info.l2_len; + m->l3_len = info.l3_len; + m->l4_len = info.l4_len; + m->tso_segsz = info.tunnel_tso_segsz; + } + else { + /* if there is a outer UDP cksum + processed in sw and the inner in hw, + the outer checksum will be wrong as + the payload will be modified by the + hardware */ + m->l2_len = info.outer_l2_len + + info.outer_l3_len + info.l2_len; + m->l3_len = info.l3_len; + m->l4_len = info.l4_len; + } + } else { + /* this is only useful if an offload flag is + * set, but it does not hurt to fill it in any + * case */ + m->l2_len = info.l2_len; + m->l3_len = info.l3_len; + m->l4_len = info.l4_len; + m->tso_segsz = info.tso_segsz; + } + m->ol_flags = tx_ol_flags; + + /* Do split & copy for the packet. */ + if (tx_pkt_split != TX_PKT_SPLIT_OFF) { + p = pkt_copy_split(m); + if (p != NULL) { + rte_pktmbuf_free(m); + m = p; + pkts_burst[i] = m; + } + } + + /* if verbose mode is enabled, dump debug info */ + if (verbose_level > 0) { + char buf[256]; + + printf("-----------------\n"); + printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%u:\n", + fs->rx_port, m, m->pkt_len, m->nb_segs); + /* dump rx parsed packet info */ + rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf)); + printf("rx: l2_len=%d ethertype=%x l3_len=%d " + "l4_proto=%d l4_len=%d flags=%s\n", + info.l2_len, rte_be_to_cpu_16(info.ethertype), + info.l3_len, info.l4_proto, info.l4_len, buf); + if (rx_ol_flags & PKT_RX_LRO) + printf("rx: m->lro_segsz=%u\n", m->tso_segsz); + if (info.is_tunnel == 1) + printf("rx: outer_l2_len=%d outer_ethertype=%x " + "outer_l3_len=%d\n", info.outer_l2_len, + rte_be_to_cpu_16(info.outer_ethertype), + info.outer_l3_len); + /* dump tx packet info */ + if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM)) || + info.tso_segsz != 0) + printf("tx: m->l2_len=%d m->l3_len=%d " + "m->l4_len=%d\n", + m->l2_len, m->l3_len, m->l4_len); + if (info.is_tunnel == 1) { + if ((tx_offloads & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) || + (tx_offloads & + DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) || + (tx_ol_flags & PKT_TX_OUTER_IPV6)) + printf("tx: m->outer_l2_len=%d " + "m->outer_l3_len=%d\n", + m->outer_l2_len, + m->outer_l3_len); + if (info.tunnel_tso_segsz != 0 && + (m->ol_flags & PKT_TX_TCP_SEG)) + printf("tx: m->tso_segsz=%d\n", + m->tso_segsz); + } else if (info.tso_segsz != 0 && + (m->ol_flags & PKT_TX_TCP_SEG)) + printf("tx: m->tso_segsz=%d\n", m->tso_segsz); + rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf)); + printf("tx: flags=%s", buf); + printf("\n"); + } + } + + if (unlikely(gro_enable)) { + if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) { + nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx, + &(gro_ports[fs->rx_port].param)); + } else { + gro_ctx = current_fwd_lcore()->gro_ctx; + nb_rx = rte_gro_reassemble(pkts_burst, nb_rx, gro_ctx); + + if (++fs->gro_times >= gro_flush_cycles) { + gro_pkts_num = rte_gro_get_pkt_count(gro_ctx); + if (gro_pkts_num > MAX_PKT_BURST - nb_rx) + gro_pkts_num = MAX_PKT_BURST - nb_rx; + + nb_rx += rte_gro_timeout_flush(gro_ctx, 0, + RTE_GRO_TCP_IPV4, + &pkts_burst[nb_rx], + gro_pkts_num); + fs->gro_times = 0; + } + } + } + + if (gso_ports[fs->tx_port].enable == 0) + tx_pkts_burst = pkts_burst; + else { + gso_ctx = &(current_fwd_lcore()->gso_ctx); + gso_ctx->gso_size = gso_max_segment_size; + for (i = 0; i < nb_rx; i++) { + ret = rte_gso_segment(pkts_burst[i], gso_ctx, + &gso_segments[nb_segments], + GSO_MAX_PKT_BURST - nb_segments); + if (ret >= 0) + nb_segments += ret; + else { + TESTPMD_LOG(DEBUG, "Unable to segment packet"); + rte_pktmbuf_free(pkts_burst[i]); + } + } + + tx_pkts_burst = gso_segments; + nb_rx = nb_segments; + } + + nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue, + tx_pkts_burst, nb_rx); + if (nb_prep != nb_rx) + printf("Preparing packet burst to transmit failed: %s\n", + rte_strerror(rte_errno)); + + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst, + nb_prep); + + /* + * Retry if necessary + */ + if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { + retry = 0; + while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + &tx_pkts_burst[nb_tx], nb_rx - nb_tx); + } + } + fs->tx_packets += nb_tx; + fs->rx_bad_ip_csum += rx_bad_ip_csum; + fs->rx_bad_l4_csum += rx_bad_l4_csum; + fs->rx_bad_outer_l4_csum += rx_bad_outer_l4_csum; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + if (unlikely(nb_tx < nb_rx)) { + fs->fwd_dropped += (nb_rx - nb_tx); + do { + rte_pktmbuf_free(tx_pkts_burst[nb_tx]); + } while (++nb_tx < nb_rx); + } + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +struct fwd_engine csum_fwd_engine = { + .fwd_mode_name = "csum", + .port_fwd_begin = NULL, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_checksum_forward, +}; diff --git a/src/spdk/dpdk/app/test-pmd/flowgen.c b/src/spdk/dpdk/app/test-pmd/flowgen.c new file mode 100644 index 000000000..4bd351e67 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/flowgen.c @@ -0,0 +1,222 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2014-2020 Mellanox Technologies, Ltd + */ + +#include <stdarg.h> +#include <string.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <unistd.h> +#include <inttypes.h> + +#include <sys/queue.h> +#include <sys/stat.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_memcpy.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_ip.h> +#include <rte_tcp.h> +#include <rte_udp.h> +#include <rte_string_fns.h> +#include <rte_flow.h> + +#include "testpmd.h" + +/* hardcoded configuration (for now) */ +static unsigned cfg_n_flows = 1024; +static uint32_t cfg_ip_src = RTE_IPV4(10, 254, 0, 0); +static uint32_t cfg_ip_dst = RTE_IPV4(10, 253, 0, 0); +static uint16_t cfg_udp_src = 1000; +static uint16_t cfg_udp_dst = 1001; +static struct rte_ether_addr cfg_ether_src = + {{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x00 }}; +static struct rte_ether_addr cfg_ether_dst = + {{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x01 }}; + +#define IP_DEFTTL 64 /* from RFC 1340. */ + +static inline uint16_t +ip_sum(const unaligned_uint16_t *hdr, int hdr_len) +{ + uint32_t sum = 0; + + while (hdr_len > 1) + { + sum += *hdr++; + if (sum & 0x80000000) + sum = (sum & 0xFFFF) + (sum >> 16); + hdr_len -= 2; + } + + while (sum >> 16) + sum = (sum & 0xFFFF) + (sum >> 16); + + return ~sum; +} + +/* + * Multi-flow generation mode. + * + * We originate a bunch of flows (varying destination IP addresses), and + * terminate receive traffic. Received traffic is simply discarded, but we + * still do so in order to maintain traffic statistics. + */ +static void +pkt_burst_flow_gen(struct fwd_stream *fs) +{ + unsigned pkt_size = tx_pkt_length - 4; /* Adjust FCS */ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mempool *mbp; + struct rte_mbuf *pkt; + struct rte_ether_hdr *eth_hdr; + struct rte_ipv4_hdr *ip_hdr; + struct rte_udp_hdr *udp_hdr; + uint16_t vlan_tci, vlan_tci_outer; + uint64_t ol_flags = 0; + uint16_t nb_rx; + uint16_t nb_tx; + uint16_t nb_pkt; + uint16_t i; + uint32_t retry; + uint64_t tx_offloads; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + static int next_flow = 0; + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* Receive a burst of packets and discard them. */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, + nb_pkt_per_burst); + fs->rx_packets += nb_rx; + + for (i = 0; i < nb_rx; i++) + rte_pktmbuf_free(pkts_burst[i]); + + mbp = current_fwd_lcore()->mbp; + vlan_tci = ports[fs->tx_port].tx_vlan_id; + vlan_tci_outer = ports[fs->tx_port].tx_vlan_id_outer; + + tx_offloads = ports[fs->tx_port].dev_conf.txmode.offloads; + if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) + ol_flags |= PKT_TX_VLAN_PKT; + if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) + ol_flags |= PKT_TX_QINQ_PKT; + if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) + ol_flags |= PKT_TX_MACSEC; + + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + pkt = rte_mbuf_raw_alloc(mbp); + if (!pkt) + break; + + pkt->data_len = pkt_size; + pkt->next = NULL; + + /* Initialize Ethernet header. */ + eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); + rte_ether_addr_copy(&cfg_ether_dst, ð_hdr->d_addr); + rte_ether_addr_copy(&cfg_ether_src, ð_hdr->s_addr); + eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + + /* Initialize IP header. */ + ip_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1); + memset(ip_hdr, 0, sizeof(*ip_hdr)); + ip_hdr->version_ihl = RTE_IPV4_VHL_DEF; + ip_hdr->type_of_service = 0; + ip_hdr->fragment_offset = 0; + ip_hdr->time_to_live = IP_DEFTTL; + ip_hdr->next_proto_id = IPPROTO_UDP; + ip_hdr->packet_id = 0; + ip_hdr->src_addr = rte_cpu_to_be_32(cfg_ip_src); + ip_hdr->dst_addr = rte_cpu_to_be_32(cfg_ip_dst + + next_flow); + ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_size - + sizeof(*eth_hdr)); + ip_hdr->hdr_checksum = ip_sum((unaligned_uint16_t *)ip_hdr, + sizeof(*ip_hdr)); + + /* Initialize UDP header. */ + udp_hdr = (struct rte_udp_hdr *)(ip_hdr + 1); + udp_hdr->src_port = rte_cpu_to_be_16(cfg_udp_src); + udp_hdr->dst_port = rte_cpu_to_be_16(cfg_udp_dst); + udp_hdr->dgram_cksum = 0; /* No UDP checksum. */ + udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_size - + sizeof(*eth_hdr) - + sizeof(*ip_hdr)); + pkt->nb_segs = 1; + pkt->pkt_len = pkt_size; + pkt->ol_flags &= EXT_ATTACHED_MBUF; + pkt->ol_flags |= ol_flags; + pkt->vlan_tci = vlan_tci; + pkt->vlan_tci_outer = vlan_tci_outer; + pkt->l2_len = sizeof(struct rte_ether_hdr); + pkt->l3_len = sizeof(struct rte_ipv4_hdr); + pkts_burst[nb_pkt] = pkt; + + next_flow = (next_flow + 1) % cfg_n_flows; + } + + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt); + /* + * Retry if necessary + */ + if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { + retry = 0; + while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + &pkts_burst[nb_tx], nb_rx - nb_tx); + } + } + fs->tx_packets += nb_tx; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + if (unlikely(nb_tx < nb_pkt)) { + /* Back out the flow counter. */ + next_flow -= (nb_pkt - nb_tx); + while (next_flow < 0) + next_flow += cfg_n_flows; + + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_pkt); + } +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +struct fwd_engine flow_gen_engine = { + .fwd_mode_name = "flowgen", + .port_fwd_begin = NULL, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_flow_gen, +}; diff --git a/src/spdk/dpdk/app/test-pmd/icmpecho.c b/src/spdk/dpdk/app/test-pmd/icmpecho.c new file mode 100644 index 000000000..65aece16c --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/icmpecho.c @@ -0,0 +1,535 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013 6WIND S.A. + */ + +#include <stdarg.h> +#include <string.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <unistd.h> +#include <inttypes.h> + +#include <sys/queue.h> +#include <sys/stat.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_memory.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_arp.h> +#include <rte_ip.h> +#include <rte_icmp.h> +#include <rte_string_fns.h> +#include <rte_flow.h> + +#include "testpmd.h" + +static const char * +arp_op_name(uint16_t arp_op) +{ + switch (arp_op) { + case RTE_ARP_OP_REQUEST: + return "ARP Request"; + case RTE_ARP_OP_REPLY: + return "ARP Reply"; + case RTE_ARP_OP_REVREQUEST: + return "Reverse ARP Request"; + case RTE_ARP_OP_REVREPLY: + return "Reverse ARP Reply"; + case RTE_ARP_OP_INVREQUEST: + return "Peer Identify Request"; + case RTE_ARP_OP_INVREPLY: + return "Peer Identify Reply"; + default: + break; + } + return "Unkwown ARP op"; +} + +static const char * +ip_proto_name(uint16_t ip_proto) +{ + static const char * ip_proto_names[] = { + "IP6HOPOPTS", /**< IP6 hop-by-hop options */ + "ICMP", /**< control message protocol */ + "IGMP", /**< group mgmt protocol */ + "GGP", /**< gateway^2 (deprecated) */ + "IPv4", /**< IPv4 encapsulation */ + + "UNASSIGNED", + "TCP", /**< transport control protocol */ + "ST", /**< Stream protocol II */ + "EGP", /**< exterior gateway protocol */ + "PIGP", /**< private interior gateway */ + + "RCC_MON", /**< BBN RCC Monitoring */ + "NVPII", /**< network voice protocol*/ + "PUP", /**< pup */ + "ARGUS", /**< Argus */ + "EMCON", /**< EMCON */ + + "XNET", /**< Cross Net Debugger */ + "CHAOS", /**< Chaos*/ + "UDP", /**< user datagram protocol */ + "MUX", /**< Multiplexing */ + "DCN_MEAS", /**< DCN Measurement Subsystems */ + + "HMP", /**< Host Monitoring */ + "PRM", /**< Packet Radio Measurement */ + "XNS_IDP", /**< xns idp */ + "TRUNK1", /**< Trunk-1 */ + "TRUNK2", /**< Trunk-2 */ + + "LEAF1", /**< Leaf-1 */ + "LEAF2", /**< Leaf-2 */ + "RDP", /**< Reliable Data */ + "IRTP", /**< Reliable Transaction */ + "TP4", /**< tp-4 w/ class negotiation */ + + "BLT", /**< Bulk Data Transfer */ + "NSP", /**< Network Services */ + "INP", /**< Merit Internodal */ + "SEP", /**< Sequential Exchange */ + "3PC", /**< Third Party Connect */ + + "IDPR", /**< InterDomain Policy Routing */ + "XTP", /**< XTP */ + "DDP", /**< Datagram Delivery */ + "CMTP", /**< Control Message Transport */ + "TPXX", /**< TP++ Transport */ + + "ILTP", /**< IL transport protocol */ + "IPv6_HDR", /**< IP6 header */ + "SDRP", /**< Source Demand Routing */ + "IPv6_RTG", /**< IP6 routing header */ + "IPv6_FRAG", /**< IP6 fragmentation header */ + + "IDRP", /**< InterDomain Routing*/ + "RSVP", /**< resource reservation */ + "GRE", /**< General Routing Encap. */ + "MHRP", /**< Mobile Host Routing */ + "BHA", /**< BHA */ + + "ESP", /**< IP6 Encap Sec. Payload */ + "AH", /**< IP6 Auth Header */ + "INLSP", /**< Integ. Net Layer Security */ + "SWIPE", /**< IP with encryption */ + "NHRP", /**< Next Hop Resolution */ + + "UNASSIGNED", + "UNASSIGNED", + "UNASSIGNED", + "ICMPv6", /**< ICMP6 */ + "IPv6NONEXT", /**< IP6 no next header */ + + "Ipv6DSTOPTS",/**< IP6 destination option */ + "AHIP", /**< any host internal protocol */ + "CFTP", /**< CFTP */ + "HELLO", /**< "hello" routing protocol */ + "SATEXPAK", /**< SATNET/Backroom EXPAK */ + + "KRYPTOLAN", /**< Kryptolan */ + "RVD", /**< Remote Virtual Disk */ + "IPPC", /**< Pluribus Packet Core */ + "ADFS", /**< Any distributed FS */ + "SATMON", /**< Satnet Monitoring */ + + "VISA", /**< VISA Protocol */ + "IPCV", /**< Packet Core Utility */ + "CPNX", /**< Comp. Prot. Net. Executive */ + "CPHB", /**< Comp. Prot. HeartBeat */ + "WSN", /**< Wang Span Network */ + + "PVP", /**< Packet Video Protocol */ + "BRSATMON", /**< BackRoom SATNET Monitoring */ + "ND", /**< Sun net disk proto (temp.) */ + "WBMON", /**< WIDEBAND Monitoring */ + "WBEXPAK", /**< WIDEBAND EXPAK */ + + "EON", /**< ISO cnlp */ + "VMTP", /**< VMTP */ + "SVMTP", /**< Secure VMTP */ + "VINES", /**< Banyon VINES */ + "TTP", /**< TTP */ + + "IGP", /**< NSFNET-IGP */ + "DGP", /**< dissimilar gateway prot. */ + "TCF", /**< TCF */ + "IGRP", /**< Cisco/GXS IGRP */ + "OSPFIGP", /**< OSPFIGP */ + + "SRPC", /**< Strite RPC protocol */ + "LARP", /**< Locus Address Resolution */ + "MTP", /**< Multicast Transport */ + "AX25", /**< AX.25 Frames */ + "4IN4", /**< IP encapsulated in IP */ + + "MICP", /**< Mobile Int.ing control */ + "SCCSP", /**< Semaphore Comm. security */ + "ETHERIP", /**< Ethernet IP encapsulation */ + "ENCAP", /**< encapsulation header */ + "AES", /**< any private encr. scheme */ + + "GMTP", /**< GMTP */ + "IPCOMP", /**< payload compression (IPComp) */ + "UNASSIGNED", + "UNASSIGNED", + "PIM", /**< Protocol Independent Mcast */ + }; + + if (ip_proto < RTE_DIM(ip_proto_names)) + return ip_proto_names[ip_proto]; + switch (ip_proto) { +#ifdef IPPROTO_PGM + case IPPROTO_PGM: /**< PGM */ + return "PGM"; +#endif + case IPPROTO_SCTP: /**< Stream Control Transport Protocol */ + return "SCTP"; +#ifdef IPPROTO_DIVERT + case IPPROTO_DIVERT: /**< divert pseudo-protocol */ + return "DIVERT"; +#endif + case IPPROTO_RAW: /**< raw IP packet */ + return "RAW"; + default: + break; + } + return "UNASSIGNED"; +} + +static void +ipv4_addr_to_dot(uint32_t be_ipv4_addr, char *buf) +{ + uint32_t ipv4_addr; + + ipv4_addr = rte_be_to_cpu_32(be_ipv4_addr); + sprintf(buf, "%d.%d.%d.%d", (ipv4_addr >> 24) & 0xFF, + (ipv4_addr >> 16) & 0xFF, (ipv4_addr >> 8) & 0xFF, + ipv4_addr & 0xFF); +} + +static void +ether_addr_dump(const char *what, const struct rte_ether_addr *ea) +{ + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, ea); + if (what) + printf("%s", what); + printf("%s", buf); +} + +static void +ipv4_addr_dump(const char *what, uint32_t be_ipv4_addr) +{ + char buf[16]; + + ipv4_addr_to_dot(be_ipv4_addr, buf); + if (what) + printf("%s", what); + printf("%s", buf); +} + +static uint16_t +ipv4_hdr_cksum(struct rte_ipv4_hdr *ip_h) +{ + uint16_t *v16_h; + uint32_t ip_cksum; + + /* + * Compute the sum of successive 16-bit words of the IPv4 header, + * skipping the checksum field of the header. + */ + v16_h = (unaligned_uint16_t *) ip_h; + ip_cksum = v16_h[0] + v16_h[1] + v16_h[2] + v16_h[3] + + v16_h[4] + v16_h[6] + v16_h[7] + v16_h[8] + v16_h[9]; + + /* reduce 32 bit checksum to 16 bits and complement it */ + ip_cksum = (ip_cksum & 0xffff) + (ip_cksum >> 16); + ip_cksum = (ip_cksum & 0xffff) + (ip_cksum >> 16); + ip_cksum = (~ip_cksum) & 0x0000FFFF; + return (ip_cksum == 0) ? 0xFFFF : (uint16_t) ip_cksum; +} + +#define is_multicast_ipv4_addr(ipv4_addr) \ + (((rte_be_to_cpu_32((ipv4_addr)) >> 24) & 0x000000FF) == 0xE0) + +/* + * Receive a burst of packets, lookup for ICMP echo requests, and, if any, + * send back ICMP echo replies. + */ +static void +reply_to_icmp_echo_rqsts(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mbuf *pkt; + struct rte_ether_hdr *eth_h; + struct rte_vlan_hdr *vlan_h; + struct rte_arp_hdr *arp_h; + struct rte_ipv4_hdr *ip_h; + struct rte_icmp_hdr *icmp_h; + struct rte_ether_addr eth_addr; + uint32_t retry; + uint32_t ip_addr; + uint16_t nb_rx; + uint16_t nb_tx; + uint16_t nb_replies; + uint16_t eth_type; + uint16_t vlan_id; + uint16_t arp_op; + uint16_t arp_pro; + uint32_t cksum; + uint8_t i; + int l2_len; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* + * First, receive a burst of packets. + */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, + nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + return; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + fs->rx_packets += nb_rx; + nb_replies = 0; + for (i = 0; i < nb_rx; i++) { + if (likely(i < nb_rx - 1)) + rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1], + void *)); + pkt = pkts_burst[i]; + eth_h = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *); + eth_type = RTE_BE_TO_CPU_16(eth_h->ether_type); + l2_len = sizeof(struct rte_ether_hdr); + if (verbose_level > 0) { + printf("\nPort %d pkt-len=%u nb-segs=%u\n", + fs->rx_port, pkt->pkt_len, pkt->nb_segs); + ether_addr_dump(" ETH: src=", ð_h->s_addr); + ether_addr_dump(" dst=", ð_h->d_addr); + } + if (eth_type == RTE_ETHER_TYPE_VLAN) { + vlan_h = (struct rte_vlan_hdr *) + ((char *)eth_h + sizeof(struct rte_ether_hdr)); + l2_len += sizeof(struct rte_vlan_hdr); + eth_type = rte_be_to_cpu_16(vlan_h->eth_proto); + if (verbose_level > 0) { + vlan_id = rte_be_to_cpu_16(vlan_h->vlan_tci) + & 0xFFF; + printf(" [vlan id=%u]", vlan_id); + } + } + if (verbose_level > 0) { + printf(" type=0x%04x\n", eth_type); + } + + /* Reply to ARP requests */ + if (eth_type == RTE_ETHER_TYPE_ARP) { + arp_h = (struct rte_arp_hdr *) ((char *)eth_h + l2_len); + arp_op = RTE_BE_TO_CPU_16(arp_h->arp_opcode); + arp_pro = RTE_BE_TO_CPU_16(arp_h->arp_protocol); + if (verbose_level > 0) { + printf(" ARP: hrd=%d proto=0x%04x hln=%d " + "pln=%d op=%u (%s)\n", + RTE_BE_TO_CPU_16(arp_h->arp_hardware), + arp_pro, arp_h->arp_hlen, + arp_h->arp_plen, arp_op, + arp_op_name(arp_op)); + } + if ((RTE_BE_TO_CPU_16(arp_h->arp_hardware) != + RTE_ARP_HRD_ETHER) || + (arp_pro != RTE_ETHER_TYPE_IPV4) || + (arp_h->arp_hlen != 6) || + (arp_h->arp_plen != 4) + ) { + rte_pktmbuf_free(pkt); + if (verbose_level > 0) + printf("\n"); + continue; + } + if (verbose_level > 0) { + rte_ether_addr_copy(&arp_h->arp_data.arp_sha, + ð_addr); + ether_addr_dump(" sha=", ð_addr); + ip_addr = arp_h->arp_data.arp_sip; + ipv4_addr_dump(" sip=", ip_addr); + printf("\n"); + rte_ether_addr_copy(&arp_h->arp_data.arp_tha, + ð_addr); + ether_addr_dump(" tha=", ð_addr); + ip_addr = arp_h->arp_data.arp_tip; + ipv4_addr_dump(" tip=", ip_addr); + printf("\n"); + } + if (arp_op != RTE_ARP_OP_REQUEST) { + rte_pktmbuf_free(pkt); + continue; + } + + /* + * Build ARP reply. + */ + + /* Use source MAC address as destination MAC address. */ + rte_ether_addr_copy(ð_h->s_addr, ð_h->d_addr); + /* Set source MAC address with MAC address of TX port */ + rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, + ð_h->s_addr); + + arp_h->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY); + rte_ether_addr_copy(&arp_h->arp_data.arp_tha, + ð_addr); + rte_ether_addr_copy(&arp_h->arp_data.arp_sha, + &arp_h->arp_data.arp_tha); + rte_ether_addr_copy(ð_h->s_addr, + &arp_h->arp_data.arp_sha); + + /* Swap IP addresses in ARP payload */ + ip_addr = arp_h->arp_data.arp_sip; + arp_h->arp_data.arp_sip = arp_h->arp_data.arp_tip; + arp_h->arp_data.arp_tip = ip_addr; + pkts_burst[nb_replies++] = pkt; + continue; + } + + if (eth_type != RTE_ETHER_TYPE_IPV4) { + rte_pktmbuf_free(pkt); + continue; + } + ip_h = (struct rte_ipv4_hdr *) ((char *)eth_h + l2_len); + if (verbose_level > 0) { + ipv4_addr_dump(" IPV4: src=", ip_h->src_addr); + ipv4_addr_dump(" dst=", ip_h->dst_addr); + printf(" proto=%d (%s)\n", + ip_h->next_proto_id, + ip_proto_name(ip_h->next_proto_id)); + } + + /* + * Check if packet is a ICMP echo request. + */ + icmp_h = (struct rte_icmp_hdr *) ((char *)ip_h + + sizeof(struct rte_ipv4_hdr)); + if (! ((ip_h->next_proto_id == IPPROTO_ICMP) && + (icmp_h->icmp_type == RTE_IP_ICMP_ECHO_REQUEST) && + (icmp_h->icmp_code == 0))) { + rte_pktmbuf_free(pkt); + continue; + } + + if (verbose_level > 0) + printf(" ICMP: echo request seq id=%d\n", + rte_be_to_cpu_16(icmp_h->icmp_seq_nb)); + + /* + * Prepare ICMP echo reply to be sent back. + * - switch ethernet source and destinations addresses, + * - use the request IP source address as the reply IP + * destination address, + * - if the request IP destination address is a multicast + * address: + * - choose a reply IP source address different from the + * request IP source address, + * - re-compute the IP header checksum. + * Otherwise: + * - switch the request IP source and destination + * addresses in the reply IP header, + * - keep the IP header checksum unchanged. + * - set RTE_IP_ICMP_ECHO_REPLY in ICMP header. + * ICMP checksum is computed by assuming it is valid in the + * echo request and not verified. + */ + rte_ether_addr_copy(ð_h->s_addr, ð_addr); + rte_ether_addr_copy(ð_h->d_addr, ð_h->s_addr); + rte_ether_addr_copy(ð_addr, ð_h->d_addr); + ip_addr = ip_h->src_addr; + if (is_multicast_ipv4_addr(ip_h->dst_addr)) { + uint32_t ip_src; + + ip_src = rte_be_to_cpu_32(ip_addr); + if ((ip_src & 0x00000003) == 1) + ip_src = (ip_src & 0xFFFFFFFC) | 0x00000002; + else + ip_src = (ip_src & 0xFFFFFFFC) | 0x00000001; + ip_h->src_addr = rte_cpu_to_be_32(ip_src); + ip_h->dst_addr = ip_addr; + ip_h->hdr_checksum = ipv4_hdr_cksum(ip_h); + } else { + ip_h->src_addr = ip_h->dst_addr; + ip_h->dst_addr = ip_addr; + } + icmp_h->icmp_type = RTE_IP_ICMP_ECHO_REPLY; + cksum = ~icmp_h->icmp_cksum & 0xffff; + cksum += ~htons(RTE_IP_ICMP_ECHO_REQUEST << 8) & 0xffff; + cksum += htons(RTE_IP_ICMP_ECHO_REPLY << 8); + cksum = (cksum & 0xffff) + (cksum >> 16); + cksum = (cksum & 0xffff) + (cksum >> 16); + icmp_h->icmp_cksum = ~cksum; + pkts_burst[nb_replies++] = pkt; + } + + /* Send back ICMP echo replies, if any. */ + if (nb_replies > 0) { + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, + nb_replies); + /* + * Retry if necessary + */ + if (unlikely(nb_tx < nb_replies) && fs->retry_enabled) { + retry = 0; + while (nb_tx < nb_replies && + retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, + fs->tx_queue, + &pkts_burst[nb_tx], + nb_replies - nb_tx); + } + } + fs->tx_packets += nb_tx; +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + if (unlikely(nb_tx < nb_replies)) { + fs->fwd_dropped += (nb_replies - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_replies); + } + } + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +struct fwd_engine icmp_echo_engine = { + .fwd_mode_name = "icmpecho", + .port_fwd_begin = NULL, + .port_fwd_end = NULL, + .packet_fwd = reply_to_icmp_echo_rqsts, +}; diff --git a/src/spdk/dpdk/app/test-pmd/ieee1588fwd.c b/src/spdk/dpdk/app/test-pmd/ieee1588fwd.c new file mode 100644 index 000000000..e3b98e3e0 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/ieee1588fwd.c @@ -0,0 +1,218 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2015 Intel Corporation + */ + + +#include <rte_cycles.h> +#include <rte_ethdev.h> +#include <rte_flow.h> + +#include "testpmd.h" + +/** + * The structure of a PTP V2 packet. + * + * Only the minimum fields used by the ieee1588 test are represented. + */ +struct ptpv2_msg { + uint8_t msg_id; + uint8_t version; /**< must be 0x02 */ + uint8_t unused[34]; +}; + +#define PTP_SYNC_MESSAGE 0x0 +#define PTP_DELAY_REQ_MESSAGE 0x1 +#define PTP_PATH_DELAY_REQ_MESSAGE 0x2 +#define PTP_PATH_DELAY_RESP_MESSAGE 0x3 +#define PTP_FOLLOWUP_MESSAGE 0x8 +#define PTP_DELAY_RESP_MESSAGE 0x9 +#define PTP_PATH_DELAY_FOLLOWUP_MESSAGE 0xA +#define PTP_ANNOUNCE_MESSAGE 0xB +#define PTP_SIGNALLING_MESSAGE 0xC +#define PTP_MANAGEMENT_MESSAGE 0xD + +/* + * Forwarding of IEEE1588 Precise Time Protocol (PTP) packets. + * + * In this mode, packets are received one by one and are expected to be + * PTP V2 L2 Ethernet frames (with the specific Ethernet type "0x88F7") + * containing PTP "sync" messages (version 2 at offset 1, and message ID + * 0 at offset 0). + * + * Check that each received packet is a IEEE1588 PTP V2 packet of type + * PTP_SYNC_MESSAGE, and that it has been identified and timestamped + * by the hardware. + * Check that the value of the last RX timestamp recorded by the controller + * is greater than the previous one. + * + * If everything is OK, send the received packet back on the same port, + * requesting for it to be timestamped by the hardware. + * Check that the value of the last TX timestamp recorded by the controller + * is greater than the previous one. + */ + +static void +port_ieee1588_rx_timestamp_check(portid_t pi, uint32_t index) +{ + struct timespec timestamp = {0, 0}; + + if (rte_eth_timesync_read_rx_timestamp(pi, ×tamp, index) < 0) { + printf("Port %u RX timestamp registers not valid\n", pi); + return; + } + printf("Port %u RX timestamp value %lu s %lu ns\n", + pi, timestamp.tv_sec, timestamp.tv_nsec); +} + +#define MAX_TX_TMST_WAIT_MICROSECS 1000 /**< 1 milli-second */ + +static void +port_ieee1588_tx_timestamp_check(portid_t pi) +{ + struct timespec timestamp = {0, 0}; + unsigned wait_us = 0; + + while ((rte_eth_timesync_read_tx_timestamp(pi, ×tamp) < 0) && + (wait_us < MAX_TX_TMST_WAIT_MICROSECS)) { + rte_delay_us(1); + wait_us++; + } + if (wait_us >= MAX_TX_TMST_WAIT_MICROSECS) { + printf("Port %u TX timestamp registers not valid after " + "%u micro-seconds\n", + pi, MAX_TX_TMST_WAIT_MICROSECS); + return; + } + printf("Port %u TX timestamp value %lu s %lu ns validated after " + "%u micro-second%s\n", + pi, timestamp.tv_sec, timestamp.tv_nsec, wait_us, + (wait_us == 1) ? "" : "s"); +} + +static void +ieee1588_packet_fwd(struct fwd_stream *fs) +{ + struct rte_mbuf *mb; + struct rte_ether_hdr *eth_hdr; + struct rte_ether_addr addr; + struct ptpv2_msg *ptp_hdr; + uint16_t eth_type; + uint32_t timesync_index; + + /* + * Receive 1 packet at a time. + */ + if (rte_eth_rx_burst(fs->rx_port, fs->rx_queue, &mb, 1) == 0) + return; + + fs->rx_packets += 1; + + /* + * Check that the received packet is a PTP packet that was detected + * by the hardware. + */ + eth_hdr = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *); + eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); + + if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) { + if (eth_type == RTE_ETHER_TYPE_1588) { + printf("Port %u Received PTP packet not filtered" + " by hardware\n", + fs->rx_port); + } else { + printf("Port %u Received non PTP packet type=0x%4x " + "len=%u\n", + fs->rx_port, eth_type, + (unsigned) mb->pkt_len); + } + rte_pktmbuf_free(mb); + return; + } + if (eth_type != RTE_ETHER_TYPE_1588) { + printf("Port %u Received NON PTP packet incorrectly" + " detected by hardware\n", + fs->rx_port); + rte_pktmbuf_free(mb); + return; + } + + /* + * Check that the received PTP packet is a PTP V2 packet of type + * PTP_SYNC_MESSAGE. + */ + ptp_hdr = (struct ptpv2_msg *) (rte_pktmbuf_mtod(mb, char *) + + sizeof(struct rte_ether_hdr)); + if (ptp_hdr->version != 0x02) { + printf("Port %u Received PTP V2 Ethernet frame with wrong PTP" + " protocol version 0x%x (should be 0x02)\n", + fs->rx_port, ptp_hdr->version); + rte_pktmbuf_free(mb); + return; + } + if (ptp_hdr->msg_id != PTP_SYNC_MESSAGE) { + printf("Port %u Received PTP V2 Ethernet frame with unexpected" + " message ID 0x%x (expected 0x0 - PTP_SYNC_MESSAGE)\n", + fs->rx_port, ptp_hdr->msg_id); + rte_pktmbuf_free(mb); + return; + } + printf("Port %u IEEE1588 PTP V2 SYNC Message filtered by hardware\n", + fs->rx_port); + + /* + * Check that the received PTP packet has been timestamped by the + * hardware. + */ + if (! (mb->ol_flags & PKT_RX_IEEE1588_TMST)) { + printf("Port %u Received PTP packet not timestamped" + " by hardware\n", + fs->rx_port); + rte_pktmbuf_free(mb); + return; + } + + /* For i40e we need the timesync register index. It is ignored for the + * other PMDs. */ + timesync_index = mb->timesync & 0x3; + /* Read and check the RX timestamp. */ + port_ieee1588_rx_timestamp_check(fs->rx_port, timesync_index); + + /* Swap dest and src mac addresses. */ + rte_ether_addr_copy(ð_hdr->d_addr, &addr); + rte_ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr); + rte_ether_addr_copy(&addr, ð_hdr->s_addr); + + /* Forward PTP packet with hardware TX timestamp */ + mb->ol_flags |= PKT_TX_IEEE1588_TMST; + fs->tx_packets += 1; + if (rte_eth_tx_burst(fs->rx_port, fs->tx_queue, &mb, 1) == 0) { + printf("Port %u sent PTP packet dropped\n", fs->rx_port); + fs->fwd_dropped += 1; + rte_pktmbuf_free(mb); + return; + } + + /* + * Check the TX timestamp. + */ + port_ieee1588_tx_timestamp_check(fs->rx_port); +} + +static void +port_ieee1588_fwd_begin(portid_t pi) +{ + rte_eth_timesync_enable(pi); +} + +static void +port_ieee1588_fwd_end(portid_t pi) +{ + rte_eth_timesync_disable(pi); +} + +struct fwd_engine ieee1588_fwd_engine = { + .fwd_mode_name = "ieee1588", + .port_fwd_begin = port_ieee1588_fwd_begin, + .port_fwd_end = port_ieee1588_fwd_end, + .packet_fwd = ieee1588_packet_fwd, +}; diff --git a/src/spdk/dpdk/app/test-pmd/iofwd.c b/src/spdk/dpdk/app/test-pmd/iofwd.c new file mode 100644 index 000000000..9dce76efe --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/iofwd.c @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#include <stdarg.h> +#include <stdio.h> +#include <string.h> +#include <errno.h> +#include <stdint.h> +#include <unistd.h> +#include <inttypes.h> + +#include <sys/queue.h> +#include <sys/stat.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_memcpy.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_string_fns.h> +#include <rte_flow.h> + +#include "testpmd.h" + +/* + * Forwarding of packets in I/O mode. + * Forward packets "as-is". + * This is the fastest possible forwarding operation, as it does not access + * to packets data. + */ +static void +pkt_burst_io_forward(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + uint16_t nb_rx; + uint16_t nb_tx; + uint32_t retry; + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* + * Receive a burst of packets and forward them. + */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, + pkts_burst, nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + return; + fs->rx_packets += nb_rx; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + pkts_burst, nb_rx); + /* + * Retry if necessary + */ + if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { + retry = 0; + while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + &pkts_burst[nb_tx], nb_rx - nb_tx); + } + } + fs->tx_packets += nb_tx; +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + if (unlikely(nb_tx < nb_rx)) { + fs->fwd_dropped += (nb_rx - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_rx); + } +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +struct fwd_engine io_fwd_engine = { + .fwd_mode_name = "io", + .port_fwd_begin = NULL, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_io_forward, +}; diff --git a/src/spdk/dpdk/app/test-pmd/macfwd.c b/src/spdk/dpdk/app/test-pmd/macfwd.c new file mode 100644 index 000000000..d2ebb1105 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/macfwd.c @@ -0,0 +1,141 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#include <stdarg.h> +#include <string.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <unistd.h> +#include <inttypes.h> + +#include <sys/queue.h> +#include <sys/stat.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_memcpy.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_ip.h> +#include <rte_string_fns.h> +#include <rte_flow.h> + +#include "testpmd.h" + +/* + * Forwarding of packets in MAC mode. + * Change the source and the destination Ethernet addressed of packets + * before forwarding them. + */ +static void +pkt_burst_mac_forward(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_port *txp; + struct rte_mbuf *mb; + struct rte_ether_hdr *eth_hdr; + uint32_t retry; + uint16_t nb_rx; + uint16_t nb_tx; + uint16_t i; + uint64_t ol_flags = 0; + uint64_t tx_offloads; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* + * Receive a burst of packets and forward them. + */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, + nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + return; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + fs->rx_packets += nb_rx; + txp = &ports[fs->tx_port]; + tx_offloads = txp->dev_conf.txmode.offloads; + if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) + ol_flags = PKT_TX_VLAN_PKT; + if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) + ol_flags |= PKT_TX_QINQ_PKT; + if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) + ol_flags |= PKT_TX_MACSEC; + for (i = 0; i < nb_rx; i++) { + if (likely(i < nb_rx - 1)) + rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1], + void *)); + mb = pkts_burst[i]; + eth_hdr = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *); + rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], + ð_hdr->d_addr); + rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, + ð_hdr->s_addr); + mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF; + mb->ol_flags |= ol_flags; + mb->l2_len = sizeof(struct rte_ether_hdr); + mb->l3_len = sizeof(struct rte_ipv4_hdr); + mb->vlan_tci = txp->tx_vlan_id; + mb->vlan_tci_outer = txp->tx_vlan_id_outer; + } + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); + /* + * Retry if necessary + */ + if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { + retry = 0; + while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + &pkts_burst[nb_tx], nb_rx - nb_tx); + } + } + + fs->tx_packets += nb_tx; +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + if (unlikely(nb_tx < nb_rx)) { + fs->fwd_dropped += (nb_rx - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_rx); + } +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +struct fwd_engine mac_fwd_engine = { + .fwd_mode_name = "mac", + .port_fwd_begin = NULL, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_mac_forward, +}; diff --git a/src/spdk/dpdk/app/test-pmd/macswap.c b/src/spdk/dpdk/app/test-pmd/macswap.c new file mode 100644 index 000000000..8428c26d8 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/macswap.c @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2014-2020 Mellanox Technologies, Ltd + */ + +#include <stdarg.h> +#include <string.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <unistd.h> +#include <inttypes.h> + +#include <sys/queue.h> +#include <sys/stat.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_memcpy.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_ip.h> +#include <rte_string_fns.h> +#include <rte_flow.h> + +#include "testpmd.h" +#if defined(RTE_ARCH_X86) +#include "macswap_sse.h" +#elif defined(RTE_MACHINE_CPUFLAG_NEON) +#include "macswap_neon.h" +#else +#include "macswap.h" +#endif + +/* + * MAC swap forwarding mode: Swap the source and the destination Ethernet + * addresses of packets before forwarding them. + */ +static void +pkt_burst_mac_swap(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_port *txp; + uint16_t nb_rx; + uint16_t nb_tx; + uint32_t retry; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* + * Receive a burst of packets and forward them. + */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, + nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + return; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + fs->rx_packets += nb_rx; + txp = &ports[fs->tx_port]; + + do_macswap(pkts_burst, nb_rx, txp); + + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); + /* + * Retry if necessary + */ + if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { + retry = 0; + while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + &pkts_burst[nb_tx], nb_rx - nb_tx); + } + } + fs->tx_packets += nb_tx; +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + if (unlikely(nb_tx < nb_rx)) { + fs->fwd_dropped += (nb_rx - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_rx); + } +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +struct fwd_engine mac_swap_engine = { + .fwd_mode_name = "macswap", + .port_fwd_begin = NULL, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_mac_swap, +}; diff --git a/src/spdk/dpdk/app/test-pmd/macswap.h b/src/spdk/dpdk/app/test-pmd/macswap.h new file mode 100644 index 000000000..013844156 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/macswap.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _MACSWAP_H_ +#define _MACSWAP_H_ + +#include "macswap_common.h" + +static inline void +do_macswap(struct rte_mbuf *pkts[], uint16_t nb, + struct rte_port *txp) +{ + struct rte_ether_hdr *eth_hdr; + struct rte_mbuf *mb; + struct rte_ether_addr addr; + uint64_t ol_flags; + int i; + + ol_flags = ol_flags_init(txp->dev_conf.txmode.offloads); + vlan_qinq_set(pkts, nb, ol_flags, + txp->tx_vlan_id, txp->tx_vlan_id_outer); + + for (i = 0; i < nb; i++) { + if (likely(i < nb - 1)) + rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1], void *)); + mb = pkts[i]; + + eth_hdr = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *); + + /* Swap dest and src mac addresses. */ + rte_ether_addr_copy(ð_hdr->d_addr, &addr); + rte_ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr); + rte_ether_addr_copy(&addr, ð_hdr->s_addr); + + mbuf_field_set(mb, ol_flags); + } +} + +#endif /* _MACSWAP_H_ */ diff --git a/src/spdk/dpdk/app/test-pmd/macswap_common.h b/src/spdk/dpdk/app/test-pmd/macswap_common.h new file mode 100644 index 000000000..7e9a3590a --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/macswap_common.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _MACSWAP_COMMON_H_ +#define _MACSWAP_COMMON_H_ + +static inline uint64_t +ol_flags_init(uint64_t tx_offload) +{ + uint64_t ol_flags = 0; + + ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ? + PKT_TX_VLAN : 0; + ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ? + PKT_TX_QINQ : 0; + ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ? + PKT_TX_MACSEC : 0; + + return ol_flags; +} + +static inline void +vlan_qinq_set(struct rte_mbuf *pkts[], uint16_t nb, + uint64_t ol_flags, uint16_t vlan, uint16_t outer_vlan) +{ + int i; + + if (ol_flags & PKT_TX_VLAN) + for (i = 0; i < nb; i++) + pkts[i]->vlan_tci = vlan; + if (ol_flags & PKT_TX_QINQ) + for (i = 0; i < nb; i++) + pkts[i]->vlan_tci_outer = outer_vlan; +} + +static inline void +mbuf_field_set(struct rte_mbuf *mb, uint64_t ol_flags) +{ + mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF; + mb->ol_flags |= ol_flags; + mb->l2_len = sizeof(struct rte_ether_hdr); + mb->l3_len = sizeof(struct rte_ipv4_hdr); +} + +#endif /* _MACSWAP_COMMON_H_ */ diff --git a/src/spdk/dpdk/app/test-pmd/macswap_neon.h b/src/spdk/dpdk/app/test-pmd/macswap_neon.h new file mode 100644 index 000000000..df6c260cd --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/macswap_neon.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2019 Arm Limited + * + * Copyright(c) 2019 Intel Corporation + * + * Derived do_macswap implementation from app/test-pmd/macswap_sse.h + */ + +#ifndef _MACSWAP_NEON_H_ +#define _MACSWAP_NEON_H_ + +#include "macswap_common.h" +#include "rte_vect.h" + +static inline void +do_macswap(struct rte_mbuf *pkts[], uint16_t nb, + struct rte_port *txp) +{ + struct rte_ether_hdr *eth_hdr[4]; + struct rte_mbuf *mb[4]; + uint64_t ol_flags; + int i; + int r; + uint8x16_t v0, v1, v2, v3; + /** + * Index map be used to shuffle the 16 bytes. + * byte 0-5 will be swapped with byte 6-11. + * byte 12-15 will keep unchanged. + */ + const uint8x16_t idx_map = {6, 7, 8, 9, 10, 11, 0, 1, 2, 3, 4, 5, + 12, 13, 14, 15}; + + ol_flags = ol_flags_init(txp->dev_conf.txmode.offloads); + vlan_qinq_set(pkts, nb, ol_flags, + txp->tx_vlan_id, txp->tx_vlan_id_outer); + + i = 0; + r = nb; + + while (r >= 4) { + if (r >= 8) { + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 4], void *)); + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 5], void *)); + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 6], void *)); + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 7], void *)); + } + + mb[0] = pkts[i++]; + eth_hdr[0] = rte_pktmbuf_mtod(mb[0], struct rte_ether_hdr *); + + mb[1] = pkts[i++]; + eth_hdr[1] = rte_pktmbuf_mtod(mb[1], struct rte_ether_hdr *); + + mb[2] = pkts[i++]; + eth_hdr[2] = rte_pktmbuf_mtod(mb[2], struct rte_ether_hdr *); + + mb[3] = pkts[i++]; + eth_hdr[3] = rte_pktmbuf_mtod(mb[3], struct rte_ether_hdr *); + + v0 = vld1q_u8((uint8_t const *)eth_hdr[0]); + v1 = vld1q_u8((uint8_t const *)eth_hdr[1]); + v2 = vld1q_u8((uint8_t const *)eth_hdr[2]); + v3 = vld1q_u8((uint8_t const *)eth_hdr[3]); + + v0 = vqtbl1q_u8(v0, idx_map); + v1 = vqtbl1q_u8(v1, idx_map); + v2 = vqtbl1q_u8(v2, idx_map); + v3 = vqtbl1q_u8(v3, idx_map); + + vst1q_u8((uint8_t *)eth_hdr[0], v0); + vst1q_u8((uint8_t *)eth_hdr[1], v1); + vst1q_u8((uint8_t *)eth_hdr[2], v2); + vst1q_u8((uint8_t *)eth_hdr[3], v3); + + mbuf_field_set(mb[0], ol_flags); + mbuf_field_set(mb[1], ol_flags); + mbuf_field_set(mb[2], ol_flags); + mbuf_field_set(mb[3], ol_flags); + r -= 4; + } + + for ( ; i < nb; i++) { + if (i < nb - 1) + rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1], void *)); + mb[0] = pkts[i]; + eth_hdr[0] = rte_pktmbuf_mtod(mb[0], struct rte_ether_hdr *); + + /* Swap dest and src mac addresses. */ + v0 = vld1q_u8((uint8_t const *)eth_hdr[0]); + v0 = vqtbl1q_u8(v0, idx_map); + vst1q_u8((uint8_t *)eth_hdr[0], v0); + + mbuf_field_set(mb[0], ol_flags); + } +} + +#endif /* _MACSWAP_NEON_H_ */ diff --git a/src/spdk/dpdk/app/test-pmd/macswap_sse.h b/src/spdk/dpdk/app/test-pmd/macswap_sse.h new file mode 100644 index 000000000..223f87a53 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/macswap_sse.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#ifndef _MACSWAP_SSE_H_ +#define _MACSWAP_SSE_H_ + +#include "macswap_common.h" + +static inline void +do_macswap(struct rte_mbuf *pkts[], uint16_t nb, + struct rte_port *txp) +{ + struct rte_ether_hdr *eth_hdr[4]; + struct rte_mbuf *mb[4]; + uint64_t ol_flags; + int i; + int r; + __m128i addr0, addr1, addr2, addr3; + /** + * shuffle mask be used to shuffle the 16 bytes. + * byte 0-5 wills be swapped with byte 6-11. + * byte 12-15 will keep unchanged. + */ + __m128i shfl_msk = _mm_set_epi8(15, 14, 13, 12, + 5, 4, 3, 2, + 1, 0, 11, 10, + 9, 8, 7, 6); + + ol_flags = ol_flags_init(txp->dev_conf.txmode.offloads); + vlan_qinq_set(pkts, nb, ol_flags, + txp->tx_vlan_id, txp->tx_vlan_id_outer); + + i = 0; + r = nb; + + while (r >= 4) { + if (r >= 8) { + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 4], void *)); + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 5], void *)); + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 6], void *)); + rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 7], void *)); + } + + mb[0] = pkts[i++]; + eth_hdr[0] = rte_pktmbuf_mtod(mb[0], struct rte_ether_hdr *); + addr0 = _mm_loadu_si128((__m128i *)eth_hdr[0]); + + mb[1] = pkts[i++]; + eth_hdr[1] = rte_pktmbuf_mtod(mb[1], struct rte_ether_hdr *); + addr1 = _mm_loadu_si128((__m128i *)eth_hdr[1]); + + + mb[2] = pkts[i++]; + eth_hdr[2] = rte_pktmbuf_mtod(mb[2], struct rte_ether_hdr *); + addr2 = _mm_loadu_si128((__m128i *)eth_hdr[2]); + + mb[3] = pkts[i++]; + eth_hdr[3] = rte_pktmbuf_mtod(mb[3], struct rte_ether_hdr *); + addr3 = _mm_loadu_si128((__m128i *)eth_hdr[3]); + + addr0 = _mm_shuffle_epi8(addr0, shfl_msk); + addr1 = _mm_shuffle_epi8(addr1, shfl_msk); + addr2 = _mm_shuffle_epi8(addr2, shfl_msk); + addr3 = _mm_shuffle_epi8(addr3, shfl_msk); + + _mm_storeu_si128((__m128i *)eth_hdr[0], addr0); + _mm_storeu_si128((__m128i *)eth_hdr[1], addr1); + _mm_storeu_si128((__m128i *)eth_hdr[2], addr2); + _mm_storeu_si128((__m128i *)eth_hdr[3], addr3); + + mbuf_field_set(mb[0], ol_flags); + mbuf_field_set(mb[1], ol_flags); + mbuf_field_set(mb[2], ol_flags); + mbuf_field_set(mb[3], ol_flags); + r -= 4; + } + + for ( ; i < nb; i++) { + if (i < nb - 1) + rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1], void *)); + mb[0] = pkts[i]; + eth_hdr[0] = rte_pktmbuf_mtod(mb[0], struct rte_ether_hdr *); + + /* Swap dest and src mac addresses. */ + addr0 = _mm_loadu_si128((__m128i *)eth_hdr[0]); + addr0 = _mm_shuffle_epi8(addr0, shfl_msk); + _mm_storeu_si128((__m128i *)eth_hdr[0], addr0); + + mbuf_field_set(mb[0], ol_flags); + } +} + +#endif /* _MACSWAP_SSE_H_ */ diff --git a/src/spdk/dpdk/app/test-pmd/meson.build b/src/spdk/dpdk/app/test-pmd/meson.build new file mode 100644 index 000000000..487288297 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/meson.build @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Intel Corporation + +# override default name to drop the hyphen +name = 'testpmd' +cflags += '-Wno-deprecated-declarations' +sources = files('cmdline.c', + 'cmdline_flow.c', + 'cmdline_mtr.c', + 'cmdline_tm.c', + 'config.c', + 'csumonly.c', + 'flowgen.c', + 'icmpecho.c', + 'ieee1588fwd.c', + 'iofwd.c', + 'macfwd.c', + 'macswap.c', + 'noisy_vnf.c', + 'parameters.c', + 'rxonly.c', + 'testpmd.c', + 'txonly.c', + 'util.c') + +deps += ['ethdev', 'gro', 'gso', 'cmdline', 'metrics', 'meter', 'bus_pci'] +if dpdk_conf.has('RTE_LIBRTE_PDUMP') + deps += 'pdump' +endif +if dpdk_conf.has('RTE_LIBRTE_BNXT_PMD') + deps += 'pmd_bnxt' +endif +if dpdk_conf.has('RTE_LIBRTE_I40E_PMD') + deps += 'pmd_i40e' +endif +if dpdk_conf.has('RTE_LIBRTE_IXGBE_PMD') + deps += 'pmd_ixgbe' +endif +if dpdk_conf.has('RTE_LIBRTE_SOFTNIC_PMD') + sources += files('softnicfwd.c') + deps += 'pmd_softnic' +endif +if dpdk_conf.has('RTE_LIBRTE_DPAA_PMD') + deps += ['bus_dpaa', 'mempool_dpaa', 'pmd_dpaa'] +endif +if dpdk_conf.has('RTE_LIBRTE_BPF') + sources += files('bpf_cmd.c') + deps += 'bpf' +endif diff --git a/src/spdk/dpdk/app/test-pmd/noisy_vnf.c b/src/spdk/dpdk/app/test-pmd/noisy_vnf.c new file mode 100644 index 000000000..58c4ee925 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/noisy_vnf.c @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Red Hat Corp. + */ + +#include <stdarg.h> +#include <stdio.h> +#include <stdbool.h> +#include <string.h> +#include <errno.h> +#include <stdint.h> +#include <unistd.h> +#include <inttypes.h> + +#include <sys/queue.h> +#include <sys/stat.h> + +#include <rte_common.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_memcpy.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_flow.h> +#include <rte_malloc.h> + +#include "testpmd.h" + +struct noisy_config { + struct rte_ring *f; + uint64_t prev_time; + char *vnf_mem; + bool do_buffering; + bool do_flush; + bool do_sim; +}; + +struct noisy_config *noisy_cfg[RTE_MAX_ETHPORTS]; + +static inline void +do_write(char *vnf_mem) +{ + uint64_t i = rte_rand(); + uint64_t w = rte_rand(); + + vnf_mem[i % ((noisy_lkup_mem_sz * 1024 * 1024) / + RTE_CACHE_LINE_SIZE)] = w; +} + +static inline void +do_read(char *vnf_mem) +{ + uint64_t i = rte_rand(); + uint64_t r; + + r = vnf_mem[i % ((noisy_lkup_mem_sz * 1024 * 1024) / + RTE_CACHE_LINE_SIZE)]; + r++; +} + +static inline void +do_readwrite(char *vnf_mem) +{ + do_read(vnf_mem); + do_write(vnf_mem); +} + +/* + * Simulate route lookups as defined by commandline parameters + */ +static void +sim_memory_lookups(struct noisy_config *ncf, uint16_t nb_pkts) +{ + uint16_t i, j; + + if (!ncf->do_sim) + return; + + for (i = 0; i < nb_pkts; i++) { + for (j = 0; j < noisy_lkup_num_writes; j++) + do_write(ncf->vnf_mem); + for (j = 0; j < noisy_lkup_num_reads; j++) + do_read(ncf->vnf_mem); + for (j = 0; j < noisy_lkup_num_reads_writes; j++) + do_readwrite(ncf->vnf_mem); + } +} + +static uint16_t +do_retry(uint16_t nb_rx, uint16_t nb_tx, struct rte_mbuf **pkts, + struct fwd_stream *fs) +{ + uint32_t retry = 0; + + while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + &pkts[nb_tx], nb_rx - nb_tx); + } + + return nb_tx; +} + +static uint32_t +drop_pkts(struct rte_mbuf **pkts, uint16_t nb_rx, uint16_t nb_tx) +{ + if (nb_tx < nb_rx) { + do { + rte_pktmbuf_free(pkts[nb_tx]); + } while (++nb_tx < nb_rx); + } + + return nb_rx - nb_tx; +} + +/* + * Forwarding of packets in noisy VNF mode. Forward packets but perform + * memory operations first as specified on cmdline. + * + * Depending on which commandline parameters are specified we have + * different cases to handle: + * + * 1. No FIFO size was given, so we don't do buffering of incoming + * packets. This case is pretty much what iofwd does but in this case + * we also do simulation of memory accesses (depending on which + * parameters were specified for it). + * 2. User wants do buffer packets in a FIFO and sent out overflowing + * packets. + * 3. User wants a FIFO and specifies a time in ms to flush all packets + * out of the FIFO + * 4. Cases 2 and 3 combined + */ +static void +pkt_burst_noisy_vnf(struct fwd_stream *fs) +{ + const uint64_t freq_khz = rte_get_timer_hz() / 1000; + struct noisy_config *ncf = noisy_cfg[fs->rx_port]; + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mbuf *tmp_pkts[MAX_PKT_BURST]; + uint16_t nb_deqd = 0; + uint16_t nb_rx = 0; + uint16_t nb_tx = 0; + uint16_t nb_enqd; + unsigned int fifo_free; + uint64_t delta_ms; + bool needs_flush = false; + uint64_t now; + + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, + pkts_burst, nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + goto flush; + fs->rx_packets += nb_rx; + + if (!ncf->do_buffering) { + sim_memory_lookups(ncf, nb_rx); + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + pkts_burst, nb_rx); + if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) + nb_tx += do_retry(nb_rx, nb_tx, pkts_burst, fs); + fs->tx_packets += nb_tx; + fs->fwd_dropped += drop_pkts(pkts_burst, nb_rx, nb_tx); + return; + } + + fifo_free = rte_ring_free_count(ncf->f); + if (fifo_free >= nb_rx) { + nb_enqd = rte_ring_enqueue_burst(ncf->f, + (void **) pkts_burst, nb_rx, NULL); + if (nb_enqd < nb_rx) + fs->fwd_dropped += drop_pkts(pkts_burst, + nb_rx, nb_enqd); + } else { + nb_deqd = rte_ring_dequeue_burst(ncf->f, + (void **) tmp_pkts, nb_rx, NULL); + nb_enqd = rte_ring_enqueue_burst(ncf->f, + (void **) pkts_burst, nb_deqd, NULL); + if (nb_deqd > 0) { + nb_tx = rte_eth_tx_burst(fs->tx_port, + fs->tx_queue, tmp_pkts, + nb_deqd); + if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) + nb_tx += do_retry(nb_rx, nb_tx, tmp_pkts, fs); + fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, nb_tx); + } + } + + sim_memory_lookups(ncf, nb_enqd); + +flush: + if (ncf->do_flush) { + if (!ncf->prev_time) + now = ncf->prev_time = rte_get_timer_cycles(); + else + now = rte_get_timer_cycles(); + delta_ms = (now - ncf->prev_time) / freq_khz; + needs_flush = delta_ms >= noisy_tx_sw_buf_flush_time && + noisy_tx_sw_buf_flush_time > 0 && !nb_tx; + } + while (needs_flush && !rte_ring_empty(ncf->f)) { + unsigned int sent; + nb_deqd = rte_ring_dequeue_burst(ncf->f, (void **)tmp_pkts, + MAX_PKT_BURST, NULL); + sent = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + tmp_pkts, nb_deqd); + if (unlikely(sent < nb_deqd) && fs->retry_enabled) + nb_tx += do_retry(nb_rx, nb_tx, tmp_pkts, fs); + fs->fwd_dropped += drop_pkts(tmp_pkts, nb_deqd, sent); + ncf->prev_time = rte_get_timer_cycles(); + } +} + +#define NOISY_STRSIZE 256 +#define NOISY_RING "noisy_ring_%d\n" + +static void +noisy_fwd_end(portid_t pi) +{ + rte_ring_free(noisy_cfg[pi]->f); + rte_free(noisy_cfg[pi]->vnf_mem); + rte_free(noisy_cfg[pi]); +} + +static void +noisy_fwd_begin(portid_t pi) +{ + struct noisy_config *n; + char name[NOISY_STRSIZE]; + + noisy_cfg[pi] = rte_zmalloc("testpmd noisy fifo and timers", + sizeof(struct noisy_config), + RTE_CACHE_LINE_SIZE); + if (noisy_cfg[pi] == NULL) { + rte_exit(EXIT_FAILURE, + "rte_zmalloc(%d) struct noisy_config) failed\n", + (int) pi); + } + n = noisy_cfg[pi]; + n->do_buffering = noisy_tx_sw_bufsz > 0; + n->do_sim = noisy_lkup_num_writes + noisy_lkup_num_reads + + noisy_lkup_num_reads_writes; + n->do_flush = noisy_tx_sw_buf_flush_time > 0; + + if (n->do_buffering) { + snprintf(name, NOISY_STRSIZE, NOISY_RING, pi); + n->f = rte_ring_create(name, noisy_tx_sw_bufsz, + rte_socket_id(), 0); + if (!n->f) + rte_exit(EXIT_FAILURE, + "rte_ring_create(%d), size %d) failed\n", + (int) pi, + noisy_tx_sw_bufsz); + } + if (noisy_lkup_mem_sz > 0) { + n->vnf_mem = (char *) rte_zmalloc("vnf sim memory", + noisy_lkup_mem_sz * 1024 * 1024, + RTE_CACHE_LINE_SIZE); + if (!n->vnf_mem) + rte_exit(EXIT_FAILURE, + "rte_zmalloc(%" PRIu64 ") for vnf memory) failed\n", + noisy_lkup_mem_sz); + } else if (n->do_sim) { + rte_exit(EXIT_FAILURE, + "--noisy-lkup-memory-size must be > 0\n"); + } +} + +struct fwd_engine noisy_vnf_engine = { + .fwd_mode_name = "noisy", + .port_fwd_begin = noisy_fwd_begin, + .port_fwd_end = noisy_fwd_end, + .packet_fwd = pkt_burst_noisy_vnf, +}; diff --git a/src/spdk/dpdk/app/test-pmd/parameters.c b/src/spdk/dpdk/app/test-pmd/parameters.c new file mode 100644 index 000000000..f761e1470 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/parameters.c @@ -0,0 +1,1413 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include <errno.h> +#include <getopt.h> +#include <stdarg.h> +#include <stdio.h> +#include <stdlib.h> +#include <signal.h> +#include <string.h> +#include <time.h> +#include <fcntl.h> +#include <sys/types.h> + +#include <sys/queue.h> +#include <sys/stat.h> + +#include <stdint.h> +#include <unistd.h> +#include <inttypes.h> +#include <arpa/inet.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_string_fns.h> +#ifdef RTE_LIBRTE_PMD_BOND +#include <rte_eth_bond.h> +#endif +#include <rte_flow.h> + +#include "testpmd.h" + +static void +usage(char* progname) +{ + printf("usage: %s [EAL options] -- " +#ifdef RTE_LIBRTE_CMDLINE + "[--interactive|-i] " + "[--cmdline-file=FILENAME] " +#endif + "[--help|-h] | [--auto-start|-a] | [" + "--tx-first | --stats-period=PERIOD | " + "--coremask=COREMASK --portmask=PORTMASK --numa " + "--portlist=PORTLIST " + "--mbuf-size= | --total-num-mbufs= | " + "--nb-cores= | --nb-ports= | " +#ifdef RTE_LIBRTE_CMDLINE + "--eth-peers-configfile= | " + "--eth-peer=X,M:M:M:M:M:M | " + "--tx-ip=SRC,DST | --tx-udp=PORT | " +#endif + "--pkt-filter-mode= |" + "--rss-ip | --rss-udp | " + "--rxpt= | --rxht= | --rxwt= | --rxfreet= | " + "--txpt= | --txht= | --txwt= | --txfreet= | " + "--txrst= | --tx-offloads= | | --rx-offloads= | " + "--vxlan-gpe-port= ]\n", + progname); +#ifdef RTE_LIBRTE_CMDLINE + printf(" --interactive: run in interactive mode.\n"); + printf(" --cmdline-file: execute cli commands before startup.\n"); +#endif + printf(" --auto-start: start forwarding on init " + "[always when non-interactive].\n"); + printf(" --help: display this message and quit.\n"); + printf(" --tx-first: start forwarding sending a burst first " + "(only if interactive is disabled).\n"); + printf(" --stats-period=PERIOD: statistics will be shown " + "every PERIOD seconds (only if interactive is disabled).\n"); + printf(" --nb-cores=N: set the number of forwarding cores " + "(1 <= N <= %d).\n", nb_lcores); + printf(" --nb-ports=N: set the number of forwarding ports " + "(1 <= N <= %d).\n", nb_ports); + printf(" --coremask=COREMASK: hexadecimal bitmask of cores running " + "the packet forwarding test. The master lcore is reserved for " + "command line parsing only, and cannot be masked on for " + "packet forwarding.\n"); + printf(" --portmask=PORTMASK: hexadecimal bitmask of ports used " + "by the packet forwarding test.\n"); + printf(" --portlist=PORTLIST: list of forwarding ports\n"); + printf(" --numa: enable NUMA-aware allocation of RX/TX rings and of " + "RX memory buffers (mbufs).\n"); + printf(" --port-numa-config=(port,socket)[,(port,socket)]: " + "specify the socket on which the memory pool " + "used by the port will be allocated.\n"); + printf(" --ring-numa-config=(port,flag,socket)[,(port,flag,socket)]: " + "specify the socket on which the TX/RX rings for " + "the port will be allocated " + "(flag: 1 for RX; 2 for TX; 3 for RX and TX).\n"); + printf(" --socket-num=N: set socket from which all memory is allocated " + "in NUMA mode.\n"); + printf(" --mbuf-size=N: set the data size of mbuf to N bytes.\n"); + printf(" --total-num-mbufs=N: set the number of mbufs to be allocated " + "in mbuf pools.\n"); + printf(" --max-pkt-len=N: set the maximum size of packet to N bytes.\n"); + printf(" --max-lro-pkt-size=N: set the maximum LRO aggregated packet " + "size to N bytes.\n"); +#ifdef RTE_LIBRTE_CMDLINE + printf(" --eth-peers-configfile=name: config file with ethernet addresses " + "of peer ports.\n"); + printf(" --eth-peer=X,M:M:M:M:M:M: set the MAC address of the X peer " + "port (0 <= X < %d).\n", RTE_MAX_ETHPORTS); +#endif + printf(" --pkt-filter-mode=N: set Flow Director mode " + "(N: none (default mode) or signature or perfect).\n"); + printf(" --pkt-filter-report-hash=N: set Flow Director report mode " + "(N: none or match (default) or always).\n"); + printf(" --pkt-filter-size=N: set Flow Director mode " + "(N: 64K (default mode) or 128K or 256K).\n"); + printf(" --pkt-filter-drop-queue=N: set drop-queue. " + "In perfect mode, when you add a rule with queue = -1 " + "the packet will be enqueued into the rx drop-queue. " + "If the drop-queue doesn't exist, the packet is dropped. " + "By default drop-queue=127.\n"); +#ifdef RTE_LIBRTE_LATENCY_STATS + printf(" --latencystats=N: enable latency and jitter statistcs " + "monitoring on forwarding lcore id N.\n"); +#endif + printf(" --disable-crc-strip: disable CRC stripping by hardware.\n"); + printf(" --enable-lro: enable large receive offload.\n"); + printf(" --enable-rx-cksum: enable rx hardware checksum offload.\n"); + printf(" --enable-rx-timestamp: enable rx hardware timestamp offload.\n"); + printf(" --enable-hw-vlan: enable hardware vlan.\n"); + printf(" --enable-hw-vlan-filter: enable hardware vlan filter.\n"); + printf(" --enable-hw-vlan-strip: enable hardware vlan strip.\n"); + printf(" --enable-hw-vlan-extend: enable hardware vlan extend.\n"); + printf(" --enable-hw-qinq-strip: enable hardware qinq strip.\n"); + printf(" --enable-drop-en: enable per queue packet drop.\n"); + printf(" --disable-rss: disable rss.\n"); + printf(" --port-topology=<paired|chained|loop>: set port topology (paired " + "is default).\n"); + printf(" --forward-mode=N: set forwarding mode (N: %s).\n", + list_pkt_forwarding_modes()); + printf(" --rss-ip: set RSS functions to IPv4/IPv6 only .\n"); + printf(" --rss-udp: set RSS functions to IPv4/IPv6 + UDP.\n"); + printf(" --rxq=N: set the number of RX queues per port to N.\n"); + printf(" --rxd=N: set the number of descriptors in RX rings to N.\n"); + printf(" --txq=N: set the number of TX queues per port to N.\n"); + printf(" --txd=N: set the number of descriptors in TX rings to N.\n"); + printf(" --hairpinq=N: set the number of hairpin queues per port to " + "N.\n"); + printf(" --burst=N: set the number of packets per burst to N.\n"); + printf(" --mbcache=N: set the cache of mbuf memory pool to N.\n"); + printf(" --rxpt=N: set prefetch threshold register of RX rings to N.\n"); + printf(" --rxht=N: set the host threshold register of RX rings to N.\n"); + printf(" --rxfreet=N: set the free threshold of RX descriptors to N " + "(0 <= N < value of rxd).\n"); + printf(" --rxwt=N: set the write-back threshold register of RX rings to N.\n"); + printf(" --txpt=N: set the prefetch threshold register of TX rings to N.\n"); + printf(" --txht=N: set the nhost threshold register of TX rings to N.\n"); + printf(" --txwt=N: set the write-back threshold register of TX rings to N.\n"); + printf(" --txfreet=N: set the transmit free threshold of TX rings to N " + "(0 <= N <= value of txd).\n"); + printf(" --txrst=N: set the transmit RS bit threshold of TX rings to N " + "(0 <= N <= value of txd).\n"); + printf(" --tx-queue-stats-mapping=(port,queue,mapping)[,(port,queue,mapping]: " + "tx queues statistics counters mapping " + "(0 <= mapping <= %d).\n", RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + printf(" --rx-queue-stats-mapping=(port,queue,mapping)[,(port,queue,mapping]: " + "rx queues statistics counters mapping " + "(0 <= mapping <= %d).\n", RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + printf(" --no-flush-rx: Don't flush RX streams before forwarding." + " Used mainly with PCAP drivers.\n"); + printf(" --txpkts=X[,Y]*: set TX segment sizes" + " or total packet length.\n"); + printf(" --txonly-multi-flow: generate multiple flows in txonly mode\n"); + printf(" --disable-link-check: disable check on link status when " + "starting/stopping ports.\n"); + printf(" --disable-device-start: do not automatically start port\n"); + printf(" --no-lsc-interrupt: disable link status change interrupt.\n"); + printf(" --no-rmv-interrupt: disable device removal interrupt.\n"); + printf(" --bitrate-stats=N: set the logical core N to perform " + "bit-rate calculation.\n"); + printf(" --print-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|flow_aged|all>: " + "enable print of designated event or all of them.\n"); + printf(" --mask-event <unknown|intr_lsc|queue_state|intr_reset|vf_mbox|macsec|intr_rmv|flow_aged|all>: " + "disable print of designated event or all of them.\n"); + printf(" --flow-isolate-all: " + "requests flow API isolated mode on all ports at initialization time.\n"); + printf(" --tx-offloads=0xXXXXXXXX: hexadecimal bitmask of TX queue offloads\n"); + printf(" --rx-offloads=0xXXXXXXXX: hexadecimal bitmask of RX queue offloads\n"); + printf(" --hot-plug: enable hot plug for device.\n"); + printf(" --vxlan-gpe-port=N: UPD port of tunnel VXLAN-GPE\n"); + printf(" --mlockall: lock all memory\n"); + printf(" --no-mlockall: do not lock all memory\n"); + printf(" --mp-alloc <native|anon|xmem|xmemhuge>: mempool allocation method.\n" + " native: use regular DPDK memory to create and populate mempool\n" + " anon: use regular DPDK memory to create and anonymous memory to populate mempool\n" + " xmem: use anonymous memory to create and populate mempool\n" + " xmemhuge: use anonymous hugepage memory to create and populate mempool\n"); + printf(" --noisy-tx-sw-buffer-size=N: size of FIFO buffer\n"); + printf(" --noisy-tx-sw-buffer-flushtime=N: flush FIFO after N ms\n"); + printf(" --noisy-lkup-memory=N: allocate N MB of VNF memory\n"); + printf(" --noisy-lkup-num-writes=N: do N random writes per packet\n"); + printf(" --noisy-lkup-num-reads=N: do N random reads per packet\n"); + printf(" --noisy-lkup-num-writes=N: do N random reads and writes per packet\n"); + printf(" --no-iova-contig: mempool memory can be IOVA non contiguous. " + "valid only with --mp-alloc=anon\n"); + printf(" --rx-mq-mode=0xX: hexadecimal bitmask of RX mq mode can be " + "enabled\n"); +} + +#ifdef RTE_LIBRTE_CMDLINE +static int +init_peer_eth_addrs(char *config_filename) +{ + FILE *config_file; + portid_t i; + char buf[50]; + + config_file = fopen(config_filename, "r"); + if (config_file == NULL) { + perror("Failed to open eth config file\n"); + return -1; + } + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + + if (fgets(buf, sizeof(buf), config_file) == NULL) + break; + + if (rte_ether_unformat_addr(buf, &peer_eth_addrs[i]) < 0) { + printf("Bad MAC address format on line %d\n", i+1); + fclose(config_file); + return -1; + } + } + fclose(config_file); + nb_peer_eth_addrs = (portid_t) i; + return 0; +} +#endif + +/* + * Parse the coremask given as argument (hexadecimal string) and set + * the global configuration of forwarding cores. + */ +static void +parse_fwd_coremask(const char *coremask) +{ + char *end; + unsigned long long int cm; + + /* parse hexadecimal string */ + end = NULL; + cm = strtoull(coremask, &end, 16); + if ((coremask[0] == '\0') || (end == NULL) || (*end != '\0')) + rte_exit(EXIT_FAILURE, "Invalid fwd core mask\n"); + else if (set_fwd_lcores_mask((uint64_t) cm) < 0) + rte_exit(EXIT_FAILURE, "coremask is not valid\n"); +} + +/* + * Parse the coremask given as argument (hexadecimal string) and set + * the global configuration of forwarding cores. + */ +static void +parse_fwd_portmask(const char *portmask) +{ + char *end; + unsigned long long int pm; + + /* parse hexadecimal string */ + end = NULL; + pm = strtoull(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + rte_exit(EXIT_FAILURE, "Invalid fwd port mask\n"); + else + set_fwd_ports_mask((uint64_t) pm); +} + + +static int +parse_queue_stats_mapping_config(const char *q_arg, int is_rx) +{ + char s[256]; + const char *p, *p0 = q_arg; + char *end; + enum fieldnames { + FLD_PORT = 0, + FLD_QUEUE, + FLD_STATS_COUNTER, + _NUM_FLD + }; + unsigned long int_fld[_NUM_FLD]; + char *str_fld[_NUM_FLD]; + int i; + unsigned size; + + /* reset from value set at definition */ + is_rx ? (nb_rx_queue_stats_mappings = 0) : (nb_tx_queue_stats_mappings = 0); + + while ((p = strchr(p0,'(')) != NULL) { + ++p; + if((p0 = strchr(p,')')) == NULL) + return -1; + + size = p0 - p; + if(size >= sizeof(s)) + return -1; + + snprintf(s, sizeof(s), "%.*s", size, p); + if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) + return -1; + for (i = 0; i < _NUM_FLD; i++){ + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); + if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) + return -1; + } + /* Check mapping field is in correct range (0..RTE_ETHDEV_QUEUE_STAT_CNTRS-1) */ + if (int_fld[FLD_STATS_COUNTER] >= RTE_ETHDEV_QUEUE_STAT_CNTRS) { + printf("Stats counter not in the correct range 0..%d\n", + RTE_ETHDEV_QUEUE_STAT_CNTRS - 1); + return -1; + } + + if (!is_rx) { + if ((nb_tx_queue_stats_mappings >= + MAX_TX_QUEUE_STATS_MAPPINGS)) { + printf("exceeded max number of TX queue " + "statistics mappings: %hu\n", + nb_tx_queue_stats_mappings); + return -1; + } + tx_queue_stats_mappings_array[nb_tx_queue_stats_mappings].port_id = + (uint8_t)int_fld[FLD_PORT]; + tx_queue_stats_mappings_array[nb_tx_queue_stats_mappings].queue_id = + (uint8_t)int_fld[FLD_QUEUE]; + tx_queue_stats_mappings_array[nb_tx_queue_stats_mappings].stats_counter_id = + (uint8_t)int_fld[FLD_STATS_COUNTER]; + ++nb_tx_queue_stats_mappings; + } + else { + if ((nb_rx_queue_stats_mappings >= + MAX_RX_QUEUE_STATS_MAPPINGS)) { + printf("exceeded max number of RX queue " + "statistics mappings: %hu\n", + nb_rx_queue_stats_mappings); + return -1; + } + rx_queue_stats_mappings_array[nb_rx_queue_stats_mappings].port_id = + (uint8_t)int_fld[FLD_PORT]; + rx_queue_stats_mappings_array[nb_rx_queue_stats_mappings].queue_id = + (uint8_t)int_fld[FLD_QUEUE]; + rx_queue_stats_mappings_array[nb_rx_queue_stats_mappings].stats_counter_id = + (uint8_t)int_fld[FLD_STATS_COUNTER]; + ++nb_rx_queue_stats_mappings; + } + + } +/* Reassign the rx/tx_queue_stats_mappings pointer to point to this newly populated array rather */ +/* than to the default array (that was set at its definition) */ + is_rx ? (rx_queue_stats_mappings = rx_queue_stats_mappings_array) : + (tx_queue_stats_mappings = tx_queue_stats_mappings_array); + return 0; +} + +static void +print_invalid_socket_id_error(void) +{ + unsigned int i = 0; + + printf("Invalid socket id, options are: "); + for (i = 0; i < num_sockets; i++) { + printf("%u%s", socket_ids[i], + (i == num_sockets - 1) ? "\n" : ","); + } +} + +static int +parse_portnuma_config(const char *q_arg) +{ + char s[256]; + const char *p, *p0 = q_arg; + char *end; + uint8_t i, socket_id; + portid_t port_id; + unsigned size; + enum fieldnames { + FLD_PORT = 0, + FLD_SOCKET, + _NUM_FLD + }; + unsigned long int_fld[_NUM_FLD]; + char *str_fld[_NUM_FLD]; + + /* reset from value set at definition */ + while ((p = strchr(p0,'(')) != NULL) { + ++p; + if((p0 = strchr(p,')')) == NULL) + return -1; + + size = p0 - p; + if(size >= sizeof(s)) + return -1; + + snprintf(s, sizeof(s), "%.*s", size, p); + if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) + return -1; + for (i = 0; i < _NUM_FLD; i++) { + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); + if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) + return -1; + } + port_id = (portid_t)int_fld[FLD_PORT]; + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) { + print_valid_ports(); + return -1; + } + socket_id = (uint8_t)int_fld[FLD_SOCKET]; + if (new_socket_id(socket_id)) { + if (num_sockets >= RTE_MAX_NUMA_NODES) { + print_invalid_socket_id_error(); + return -1; + } + socket_ids[num_sockets++] = socket_id; + } + port_numa[port_id] = socket_id; + } + + return 0; +} + +static int +parse_ringnuma_config(const char *q_arg) +{ + char s[256]; + const char *p, *p0 = q_arg; + char *end; + uint8_t i, ring_flag, socket_id; + portid_t port_id; + unsigned size; + enum fieldnames { + FLD_PORT = 0, + FLD_FLAG, + FLD_SOCKET, + _NUM_FLD + }; + unsigned long int_fld[_NUM_FLD]; + char *str_fld[_NUM_FLD]; + #define RX_RING_ONLY 0x1 + #define TX_RING_ONLY 0x2 + #define RXTX_RING 0x3 + + /* reset from value set at definition */ + while ((p = strchr(p0,'(')) != NULL) { + ++p; + if((p0 = strchr(p,')')) == NULL) + return -1; + + size = p0 - p; + if(size >= sizeof(s)) + return -1; + + snprintf(s, sizeof(s), "%.*s", size, p); + if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) + return -1; + for (i = 0; i < _NUM_FLD; i++) { + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); + if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) + return -1; + } + port_id = (portid_t)int_fld[FLD_PORT]; + if (port_id_is_invalid(port_id, ENABLED_WARN) || + port_id == (portid_t)RTE_PORT_ALL) { + print_valid_ports(); + return -1; + } + socket_id = (uint8_t)int_fld[FLD_SOCKET]; + if (new_socket_id(socket_id)) { + if (num_sockets >= RTE_MAX_NUMA_NODES) { + print_invalid_socket_id_error(); + return -1; + } + socket_ids[num_sockets++] = socket_id; + } + ring_flag = (uint8_t)int_fld[FLD_FLAG]; + if ((ring_flag < RX_RING_ONLY) || (ring_flag > RXTX_RING)) { + printf("Invalid ring-flag=%d config for port =%d\n", + ring_flag,port_id); + return -1; + } + + switch (ring_flag & RXTX_RING) { + case RX_RING_ONLY: + rxring_numa[port_id] = socket_id; + break; + case TX_RING_ONLY: + txring_numa[port_id] = socket_id; + break; + case RXTX_RING: + rxring_numa[port_id] = socket_id; + txring_numa[port_id] = socket_id; + break; + default: + printf("Invalid ring-flag=%d config for port=%d\n", + ring_flag,port_id); + break; + } + } + + return 0; +} + +static int +parse_event_printing_config(const char *optarg, int enable) +{ + uint32_t mask = 0; + + if (!strcmp(optarg, "unknown")) + mask = UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN; + else if (!strcmp(optarg, "intr_lsc")) + mask = UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC; + else if (!strcmp(optarg, "queue_state")) + mask = UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE; + else if (!strcmp(optarg, "intr_reset")) + mask = UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET; + else if (!strcmp(optarg, "vf_mbox")) + mask = UINT32_C(1) << RTE_ETH_EVENT_VF_MBOX; + else if (!strcmp(optarg, "ipsec")) + mask = UINT32_C(1) << RTE_ETH_EVENT_IPSEC; + else if (!strcmp(optarg, "macsec")) + mask = UINT32_C(1) << RTE_ETH_EVENT_MACSEC; + else if (!strcmp(optarg, "intr_rmv")) + mask = UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV; + else if (!strcmp(optarg, "dev_probed")) + mask = UINT32_C(1) << RTE_ETH_EVENT_NEW; + else if (!strcmp(optarg, "dev_released")) + mask = UINT32_C(1) << RTE_ETH_EVENT_DESTROY; + else if (!strcmp(optarg, "flow_aged")) + mask = UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED; + else if (!strcmp(optarg, "all")) + mask = ~UINT32_C(0); + else { + fprintf(stderr, "Invalid event: %s\n", optarg); + return -1; + } + if (enable) + event_print_mask |= mask; + else + event_print_mask &= ~mask; + return 0; +} + +void +launch_args_parse(int argc, char** argv) +{ + int n, opt; + char **argvopt; + int opt_idx; + portid_t pid; + enum { TX, RX }; + /* Default offloads for all ports. */ + uint64_t rx_offloads = rx_mode.offloads; + uint64_t tx_offloads = tx_mode.offloads; + struct rte_eth_dev_info dev_info; + uint16_t rec_nb_pkts; + int ret; + + static struct option lgopts[] = { + { "help", 0, 0, 0 }, +#ifdef RTE_LIBRTE_CMDLINE + { "interactive", 0, 0, 0 }, + { "cmdline-file", 1, 0, 0 }, + { "auto-start", 0, 0, 0 }, + { "eth-peers-configfile", 1, 0, 0 }, + { "eth-peer", 1, 0, 0 }, +#endif + { "tx-first", 0, 0, 0 }, + { "stats-period", 1, 0, 0 }, + { "ports", 1, 0, 0 }, + { "nb-cores", 1, 0, 0 }, + { "nb-ports", 1, 0, 0 }, + { "coremask", 1, 0, 0 }, + { "portmask", 1, 0, 0 }, + { "portlist", 1, 0, 0 }, + { "numa", 0, 0, 0 }, + { "no-numa", 0, 0, 0 }, + { "mp-anon", 0, 0, 0 }, + { "port-numa-config", 1, 0, 0 }, + { "ring-numa-config", 1, 0, 0 }, + { "socket-num", 1, 0, 0 }, + { "mbuf-size", 1, 0, 0 }, + { "total-num-mbufs", 1, 0, 0 }, + { "max-pkt-len", 1, 0, 0 }, + { "max-lro-pkt-size", 1, 0, 0 }, + { "pkt-filter-mode", 1, 0, 0 }, + { "pkt-filter-report-hash", 1, 0, 0 }, + { "pkt-filter-size", 1, 0, 0 }, + { "pkt-filter-drop-queue", 1, 0, 0 }, +#ifdef RTE_LIBRTE_LATENCY_STATS + { "latencystats", 1, 0, 0 }, +#endif +#ifdef RTE_LIBRTE_BITRATE + { "bitrate-stats", 1, 0, 0 }, +#endif + { "disable-crc-strip", 0, 0, 0 }, + { "enable-lro", 0, 0, 0 }, + { "enable-rx-cksum", 0, 0, 0 }, + { "enable-rx-timestamp", 0, 0, 0 }, + { "enable-scatter", 0, 0, 0 }, + { "enable-hw-vlan", 0, 0, 0 }, + { "enable-hw-vlan-filter", 0, 0, 0 }, + { "enable-hw-vlan-strip", 0, 0, 0 }, + { "enable-hw-vlan-extend", 0, 0, 0 }, + { "enable-hw-qinq-strip", 0, 0, 0 }, + { "enable-drop-en", 0, 0, 0 }, + { "disable-rss", 0, 0, 0 }, + { "port-topology", 1, 0, 0 }, + { "forward-mode", 1, 0, 0 }, + { "rss-ip", 0, 0, 0 }, + { "rss-udp", 0, 0, 0 }, + { "rxq", 1, 0, 0 }, + { "txq", 1, 0, 0 }, + { "rxd", 1, 0, 0 }, + { "txd", 1, 0, 0 }, + { "hairpinq", 1, 0, 0 }, + { "burst", 1, 0, 0 }, + { "mbcache", 1, 0, 0 }, + { "txpt", 1, 0, 0 }, + { "txht", 1, 0, 0 }, + { "txwt", 1, 0, 0 }, + { "txfreet", 1, 0, 0 }, + { "txrst", 1, 0, 0 }, + { "rxpt", 1, 0, 0 }, + { "rxht", 1, 0, 0 }, + { "rxwt", 1, 0, 0 }, + { "rxfreet", 1, 0, 0 }, + { "tx-queue-stats-mapping", 1, 0, 0 }, + { "rx-queue-stats-mapping", 1, 0, 0 }, + { "no-flush-rx", 0, 0, 0 }, + { "flow-isolate-all", 0, 0, 0 }, + { "txpkts", 1, 0, 0 }, + { "txonly-multi-flow", 0, 0, 0 }, + { "disable-link-check", 0, 0, 0 }, + { "disable-device-start", 0, 0, 0 }, + { "no-lsc-interrupt", 0, 0, 0 }, + { "no-rmv-interrupt", 0, 0, 0 }, + { "print-event", 1, 0, 0 }, + { "mask-event", 1, 0, 0 }, + { "tx-offloads", 1, 0, 0 }, + { "rx-offloads", 1, 0, 0 }, + { "hot-plug", 0, 0, 0 }, + { "vxlan-gpe-port", 1, 0, 0 }, + { "mlockall", 0, 0, 0 }, + { "no-mlockall", 0, 0, 0 }, + { "mp-alloc", 1, 0, 0 }, + { "tx-ip", 1, 0, 0 }, + { "tx-udp", 1, 0, 0 }, + { "noisy-tx-sw-buffer-size", 1, 0, 0 }, + { "noisy-tx-sw-buffer-flushtime", 1, 0, 0 }, + { "noisy-lkup-memory", 1, 0, 0 }, + { "noisy-lkup-num-writes", 1, 0, 0 }, + { "noisy-lkup-num-reads", 1, 0, 0 }, + { "noisy-lkup-num-reads-writes", 1, 0, 0 }, + { "no-iova-contig", 0, 0, 0 }, + { "rx-mq-mode", 1, 0, 0 }, + { 0, 0, 0, 0 }, + }; + + argvopt = argv; + +#ifdef RTE_LIBRTE_CMDLINE +#define SHORTOPTS "i" +#else +#define SHORTOPTS "" +#endif + while ((opt = getopt_long(argc, argvopt, SHORTOPTS "ah", + lgopts, &opt_idx)) != EOF) { + switch (opt) { +#ifdef RTE_LIBRTE_CMDLINE + case 'i': + printf("Interactive-mode selected\n"); + interactive = 1; + break; +#endif + case 'a': + printf("Auto-start selected\n"); + auto_start = 1; + break; + + case 0: /*long options */ + if (!strcmp(lgopts[opt_idx].name, "help")) { + usage(argv[0]); + rte_exit(EXIT_SUCCESS, "Displayed help\n"); + } +#ifdef RTE_LIBRTE_CMDLINE + if (!strcmp(lgopts[opt_idx].name, "interactive")) { + printf("Interactive-mode selected\n"); + interactive = 1; + } + if (!strcmp(lgopts[opt_idx].name, "cmdline-file")) { + printf("CLI commands to be read from %s\n", + optarg); + strlcpy(cmdline_filename, optarg, + sizeof(cmdline_filename)); + } + if (!strcmp(lgopts[opt_idx].name, "auto-start")) { + printf("Auto-start selected\n"); + auto_start = 1; + } + if (!strcmp(lgopts[opt_idx].name, "tx-first")) { + printf("Ports to start sending a burst of " + "packets first\n"); + tx_first = 1; + } + if (!strcmp(lgopts[opt_idx].name, "stats-period")) { + char *end = NULL; + unsigned int n; + + n = strtoul(optarg, &end, 10); + if ((optarg[0] == '\0') || (end == NULL) || + (*end != '\0')) + break; + + stats_period = n; + break; + } + if (!strcmp(lgopts[opt_idx].name, + "eth-peers-configfile")) { + if (init_peer_eth_addrs(optarg) != 0) + rte_exit(EXIT_FAILURE, + "Cannot open logfile\n"); + } + if (!strcmp(lgopts[opt_idx].name, "eth-peer")) { + char *port_end; + + errno = 0; + n = strtoul(optarg, &port_end, 10); + if (errno != 0 || port_end == optarg || *port_end++ != ',') + rte_exit(EXIT_FAILURE, + "Invalid eth-peer: %s", optarg); + if (n >= RTE_MAX_ETHPORTS) + rte_exit(EXIT_FAILURE, + "eth-peer: port %d >= RTE_MAX_ETHPORTS(%d)\n", + n, RTE_MAX_ETHPORTS); + + if (rte_ether_unformat_addr(port_end, + &peer_eth_addrs[n]) < 0) + rte_exit(EXIT_FAILURE, + "Invalid ethernet address: %s\n", + port_end); + nb_peer_eth_addrs++; + } +#endif + if (!strcmp(lgopts[opt_idx].name, "tx-ip")) { + struct in_addr in; + char *end; + + end = strchr(optarg, ','); + if (end == optarg || !end) + rte_exit(EXIT_FAILURE, + "Invalid tx-ip: %s", optarg); + + *end++ = 0; + if (inet_aton(optarg, &in) == 0) + rte_exit(EXIT_FAILURE, + "Invalid source IP address: %s\n", + optarg); + tx_ip_src_addr = rte_be_to_cpu_32(in.s_addr); + + if (inet_aton(end, &in) == 0) + rte_exit(EXIT_FAILURE, + "Invalid destination IP address: %s\n", + optarg); + tx_ip_dst_addr = rte_be_to_cpu_32(in.s_addr); + } + if (!strcmp(lgopts[opt_idx].name, "tx-udp")) { + char *end = NULL; + + errno = 0; + n = strtoul(optarg, &end, 10); + if (errno != 0 || end == optarg || + n > UINT16_MAX || + !(*end == '\0' || *end == ',')) + rte_exit(EXIT_FAILURE, + "Invalid UDP port: %s\n", + optarg); + tx_udp_src_port = n; + if (*end == ',') { + char *dst = end + 1; + + n = strtoul(dst, &end, 10); + if (errno != 0 || end == dst || + n > UINT16_MAX || *end) + rte_exit(EXIT_FAILURE, + "Invalid destination UDP port: %s\n", + dst); + tx_udp_dst_port = n; + } else { + tx_udp_dst_port = n; + } + + } + if (!strcmp(lgopts[opt_idx].name, "nb-ports")) { + n = atoi(optarg); + if (n > 0 && n <= nb_ports) + nb_fwd_ports = n; + else + rte_exit(EXIT_FAILURE, + "Invalid port %d\n", n); + } + if (!strcmp(lgopts[opt_idx].name, "nb-cores")) { + n = atoi(optarg); + if (n > 0 && n <= nb_lcores) + nb_fwd_lcores = (uint8_t) n; + else + rte_exit(EXIT_FAILURE, + "nb-cores should be > 0 and <= %d\n", + nb_lcores); + } + if (!strcmp(lgopts[opt_idx].name, "coremask")) + parse_fwd_coremask(optarg); + if (!strcmp(lgopts[opt_idx].name, "portmask")) + parse_fwd_portmask(optarg); + if (!strcmp(lgopts[opt_idx].name, "portlist")) + parse_fwd_portlist(optarg); + if (!strcmp(lgopts[opt_idx].name, "no-numa")) + numa_support = 0; + if (!strcmp(lgopts[opt_idx].name, "numa")) + numa_support = 1; + if (!strcmp(lgopts[opt_idx].name, "mp-anon")) { + mp_alloc_type = MP_ALLOC_ANON; + } + if (!strcmp(lgopts[opt_idx].name, "mp-alloc")) { + if (!strcmp(optarg, "native")) + mp_alloc_type = MP_ALLOC_NATIVE; + else if (!strcmp(optarg, "anon")) + mp_alloc_type = MP_ALLOC_ANON; + else if (!strcmp(optarg, "xmem")) + mp_alloc_type = MP_ALLOC_XMEM; + else if (!strcmp(optarg, "xmemhuge")) + mp_alloc_type = MP_ALLOC_XMEM_HUGE; + else if (!strcmp(optarg, "xbuf")) + mp_alloc_type = MP_ALLOC_XBUF; + else + rte_exit(EXIT_FAILURE, + "mp-alloc %s invalid - must be: " + "native, anon, xmem or xmemhuge\n", + optarg); + } + if (!strcmp(lgopts[opt_idx].name, "port-numa-config")) { + if (parse_portnuma_config(optarg)) + rte_exit(EXIT_FAILURE, + "invalid port-numa configuration\n"); + } + if (!strcmp(lgopts[opt_idx].name, "ring-numa-config")) + if (parse_ringnuma_config(optarg)) + rte_exit(EXIT_FAILURE, + "invalid ring-numa configuration\n"); + if (!strcmp(lgopts[opt_idx].name, "socket-num")) { + n = atoi(optarg); + if (!new_socket_id((uint8_t)n)) { + socket_num = (uint8_t)n; + } else { + print_invalid_socket_id_error(); + rte_exit(EXIT_FAILURE, + "Invalid socket id"); + } + } + if (!strcmp(lgopts[opt_idx].name, "mbuf-size")) { + n = atoi(optarg); + if (n > 0 && n <= 0xFFFF) + mbuf_data_size = (uint16_t) n; + else + rte_exit(EXIT_FAILURE, + "mbuf-size should be > 0 and < 65536\n"); + } + if (!strcmp(lgopts[opt_idx].name, "total-num-mbufs")) { + n = atoi(optarg); + if (n > 1024) + param_total_num_mbufs = (unsigned)n; + else + rte_exit(EXIT_FAILURE, + "total-num-mbufs should be > 1024\n"); + } + if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) { + n = atoi(optarg); + if (n >= RTE_ETHER_MIN_LEN) { + rx_mode.max_rx_pkt_len = (uint32_t) n; + if (n > RTE_ETHER_MAX_LEN) + rx_offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + } else + rte_exit(EXIT_FAILURE, + "Invalid max-pkt-len=%d - should be > %d\n", + n, RTE_ETHER_MIN_LEN); + } + if (!strcmp(lgopts[opt_idx].name, "max-lro-pkt-size")) { + n = atoi(optarg); + rx_mode.max_lro_pkt_size = (uint32_t) n; + } + if (!strcmp(lgopts[opt_idx].name, "pkt-filter-mode")) { + if (!strcmp(optarg, "signature")) + fdir_conf.mode = + RTE_FDIR_MODE_SIGNATURE; + else if (!strcmp(optarg, "perfect")) + fdir_conf.mode = RTE_FDIR_MODE_PERFECT; + else if (!strcmp(optarg, "perfect-mac-vlan")) + fdir_conf.mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN; + else if (!strcmp(optarg, "perfect-tunnel")) + fdir_conf.mode = RTE_FDIR_MODE_PERFECT_TUNNEL; + else if (!strcmp(optarg, "none")) + fdir_conf.mode = RTE_FDIR_MODE_NONE; + else + rte_exit(EXIT_FAILURE, + "pkt-mode-invalid %s invalid - must be: " + "none, signature, perfect, perfect-mac-vlan" + " or perfect-tunnel\n", + optarg); + } + if (!strcmp(lgopts[opt_idx].name, + "pkt-filter-report-hash")) { + if (!strcmp(optarg, "none")) + fdir_conf.status = + RTE_FDIR_NO_REPORT_STATUS; + else if (!strcmp(optarg, "match")) + fdir_conf.status = + RTE_FDIR_REPORT_STATUS; + else if (!strcmp(optarg, "always")) + fdir_conf.status = + RTE_FDIR_REPORT_STATUS_ALWAYS; + else + rte_exit(EXIT_FAILURE, + "pkt-filter-report-hash %s invalid " + "- must be: none or match or always\n", + optarg); + } + if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) { + if (!strcmp(optarg, "64K")) + fdir_conf.pballoc = + RTE_FDIR_PBALLOC_64K; + else if (!strcmp(optarg, "128K")) + fdir_conf.pballoc = + RTE_FDIR_PBALLOC_128K; + else if (!strcmp(optarg, "256K")) + fdir_conf.pballoc = + RTE_FDIR_PBALLOC_256K; + else + rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -" + " must be: 64K or 128K or 256K\n", + optarg); + } + if (!strcmp(lgopts[opt_idx].name, + "pkt-filter-drop-queue")) { + n = atoi(optarg); + if (n >= 0) + fdir_conf.drop_queue = (uint8_t) n; + else + rte_exit(EXIT_FAILURE, + "drop queue %d invalid - must" + "be >= 0 \n", n); + } +#ifdef RTE_LIBRTE_LATENCY_STATS + if (!strcmp(lgopts[opt_idx].name, + "latencystats")) { + n = atoi(optarg); + if (n >= 0) { + latencystats_lcore_id = (lcoreid_t) n; + latencystats_enabled = 1; + } else + rte_exit(EXIT_FAILURE, + "invalid lcore id %d for latencystats" + " must be >= 0\n", n); + } +#endif +#ifdef RTE_LIBRTE_BITRATE + if (!strcmp(lgopts[opt_idx].name, "bitrate-stats")) { + n = atoi(optarg); + if (n >= 0) { + bitrate_lcore_id = (lcoreid_t) n; + bitrate_enabled = 1; + } else + rte_exit(EXIT_FAILURE, + "invalid lcore id %d for bitrate stats" + " must be >= 0\n", n); + } +#endif + if (!strcmp(lgopts[opt_idx].name, "disable-crc-strip")) + rx_offloads |= DEV_RX_OFFLOAD_KEEP_CRC; + if (!strcmp(lgopts[opt_idx].name, "enable-lro")) + rx_offloads |= DEV_RX_OFFLOAD_TCP_LRO; + if (!strcmp(lgopts[opt_idx].name, "enable-scatter")) + rx_offloads |= DEV_RX_OFFLOAD_SCATTER; + if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum")) + rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM; + if (!strcmp(lgopts[opt_idx].name, + "enable-rx-timestamp")) + rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP; + if (!strcmp(lgopts[opt_idx].name, "enable-hw-vlan")) + rx_offloads |= DEV_RX_OFFLOAD_VLAN; + + if (!strcmp(lgopts[opt_idx].name, + "enable-hw-vlan-filter")) + rx_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + + if (!strcmp(lgopts[opt_idx].name, + "enable-hw-vlan-strip")) + rx_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + + if (!strcmp(lgopts[opt_idx].name, + "enable-hw-vlan-extend")) + rx_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; + + if (!strcmp(lgopts[opt_idx].name, + "enable-hw-qinq-strip")) + rx_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; + + if (!strcmp(lgopts[opt_idx].name, "enable-drop-en")) + rx_drop_en = 1; + + if (!strcmp(lgopts[opt_idx].name, "disable-rss")) + rss_hf = 0; + if (!strcmp(lgopts[opt_idx].name, "port-topology")) { + if (!strcmp(optarg, "paired")) + port_topology = PORT_TOPOLOGY_PAIRED; + else if (!strcmp(optarg, "chained")) + port_topology = PORT_TOPOLOGY_CHAINED; + else if (!strcmp(optarg, "loop")) + port_topology = PORT_TOPOLOGY_LOOP; + else + rte_exit(EXIT_FAILURE, "port-topology %s invalid -" + " must be: paired, chained or loop\n", + optarg); + } + if (!strcmp(lgopts[opt_idx].name, "forward-mode")) + set_pkt_forwarding_mode(optarg); + if (!strcmp(lgopts[opt_idx].name, "rss-ip")) + rss_hf = ETH_RSS_IP; + if (!strcmp(lgopts[opt_idx].name, "rss-udp")) + rss_hf = ETH_RSS_UDP; + if (!strcmp(lgopts[opt_idx].name, "rxq")) { + n = atoi(optarg); + if (n >= 0 && check_nb_rxq((queueid_t)n) == 0) + nb_rxq = (queueid_t) n; + else + rte_exit(EXIT_FAILURE, "rxq %d invalid - must be" + " >= 0 && <= %u\n", n, + get_allowed_max_nb_rxq(&pid)); + } + if (!strcmp(lgopts[opt_idx].name, "txq")) { + n = atoi(optarg); + if (n >= 0 && check_nb_txq((queueid_t)n) == 0) + nb_txq = (queueid_t) n; + else + rte_exit(EXIT_FAILURE, "txq %d invalid - must be" + " >= 0 && <= %u\n", n, + get_allowed_max_nb_txq(&pid)); + } + if (!strcmp(lgopts[opt_idx].name, "hairpinq")) { + n = atoi(optarg); + if (n >= 0 && + check_nb_hairpinq((queueid_t)n) == 0) + nb_hairpinq = (queueid_t) n; + else + rte_exit(EXIT_FAILURE, "txq %d invalid - must be" + " >= 0 && <= %u\n", n, + get_allowed_max_nb_hairpinq + (&pid)); + if ((n + nb_txq) < 0 || + check_nb_txq((queueid_t)(n + nb_txq)) != 0) + rte_exit(EXIT_FAILURE, "txq + hairpinq " + "%d invalid - must be" + " >= 0 && <= %u\n", + n + nb_txq, + get_allowed_max_nb_txq(&pid)); + if ((n + nb_rxq) < 0 || + check_nb_rxq((queueid_t)(n + nb_rxq)) != 0) + rte_exit(EXIT_FAILURE, "rxq + hairpinq " + "%d invalid - must be" + " >= 0 && <= %u\n", + n + nb_rxq, + get_allowed_max_nb_rxq(&pid)); + } + if (!nb_rxq && !nb_txq) { + rte_exit(EXIT_FAILURE, "Either rx or tx queues should " + "be non-zero\n"); + } + if (!strcmp(lgopts[opt_idx].name, "burst")) { + n = atoi(optarg); + if (n == 0) { + /* A burst size of zero means that the + * PMD should be queried for + * recommended Rx burst size. Since + * testpmd uses a single size for all + * ports, port 0 is queried for the + * value, on the assumption that all + * ports are of the same NIC model. + */ + ret = eth_dev_info_get_print_err( + 0, + &dev_info); + if (ret != 0) + return; + + rec_nb_pkts = dev_info + .default_rxportconf.burst_size; + + if (rec_nb_pkts == 0) + rte_exit(EXIT_FAILURE, + "PMD does not recommend a burst size. " + "Provided value must be between " + "1 and %d\n", MAX_PKT_BURST); + else if (rec_nb_pkts > MAX_PKT_BURST) + rte_exit(EXIT_FAILURE, + "PMD recommended burst size of %d" + " exceeds maximum value of %d\n", + rec_nb_pkts, MAX_PKT_BURST); + printf("Using PMD-provided burst value of %d\n", + rec_nb_pkts); + nb_pkt_per_burst = rec_nb_pkts; + } else if (n > MAX_PKT_BURST) + rte_exit(EXIT_FAILURE, + "burst must be between1 and %d\n", + MAX_PKT_BURST); + else + nb_pkt_per_burst = (uint16_t) n; + } + if (!strcmp(lgopts[opt_idx].name, "mbcache")) { + n = atoi(optarg); + if ((n >= 0) && + (n <= RTE_MEMPOOL_CACHE_MAX_SIZE)) + mb_mempool_cache = (uint16_t) n; + else + rte_exit(EXIT_FAILURE, + "mbcache must be >= 0 and <= %d\n", + RTE_MEMPOOL_CACHE_MAX_SIZE); + } + if (!strcmp(lgopts[opt_idx].name, "txfreet")) { + n = atoi(optarg); + if (n >= 0) + tx_free_thresh = (int16_t)n; + else + rte_exit(EXIT_FAILURE, "txfreet must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txrst")) { + n = atoi(optarg); + if (n >= 0) + tx_rs_thresh = (int16_t)n; + else + rte_exit(EXIT_FAILURE, "txrst must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxd")) { + n = atoi(optarg); + if (n > 0) { + if (rx_free_thresh >= n) + rte_exit(EXIT_FAILURE, + "rxd must be > " + "rx_free_thresh(%d)\n", + (int)rx_free_thresh); + else + nb_rxd = (uint16_t) n; + } else + rte_exit(EXIT_FAILURE, + "rxd(%d) invalid - must be > 0\n", + n); + } + if (!strcmp(lgopts[opt_idx].name, "txd")) { + n = atoi(optarg); + if (n > 0) + nb_txd = (uint16_t) n; + else + rte_exit(EXIT_FAILURE, "txd must be in > 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txpt")) { + n = atoi(optarg); + if (n >= 0) + tx_pthresh = (int8_t)n; + else + rte_exit(EXIT_FAILURE, "txpt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txht")) { + n = atoi(optarg); + if (n >= 0) + tx_hthresh = (int8_t)n; + else + rte_exit(EXIT_FAILURE, "txht must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txwt")) { + n = atoi(optarg); + if (n >= 0) + tx_wthresh = (int8_t)n; + else + rte_exit(EXIT_FAILURE, "txwt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxpt")) { + n = atoi(optarg); + if (n >= 0) + rx_pthresh = (int8_t)n; + else + rte_exit(EXIT_FAILURE, "rxpt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxht")) { + n = atoi(optarg); + if (n >= 0) + rx_hthresh = (int8_t)n; + else + rte_exit(EXIT_FAILURE, "rxht must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxwt")) { + n = atoi(optarg); + if (n >= 0) + rx_wthresh = (int8_t)n; + else + rte_exit(EXIT_FAILURE, "rxwt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxfreet")) { + n = atoi(optarg); + if (n >= 0) + rx_free_thresh = (int16_t)n; + else + rte_exit(EXIT_FAILURE, "rxfreet must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "tx-queue-stats-mapping")) { + if (parse_queue_stats_mapping_config(optarg, TX)) { + rte_exit(EXIT_FAILURE, + "invalid TX queue statistics mapping config entered\n"); + } + } + if (!strcmp(lgopts[opt_idx].name, "rx-queue-stats-mapping")) { + if (parse_queue_stats_mapping_config(optarg, RX)) { + rte_exit(EXIT_FAILURE, + "invalid RX queue statistics mapping config entered\n"); + } + } + if (!strcmp(lgopts[opt_idx].name, "txpkts")) { + unsigned seg_lengths[RTE_MAX_SEGS_PER_PKT]; + unsigned int nb_segs; + + nb_segs = parse_item_list(optarg, "txpkt segments", + RTE_MAX_SEGS_PER_PKT, seg_lengths, 0); + if (nb_segs > 0) + set_tx_pkt_segments(seg_lengths, nb_segs); + else + rte_exit(EXIT_FAILURE, "bad txpkts\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txonly-multi-flow")) + txonly_multi_flow = 1; + if (!strcmp(lgopts[opt_idx].name, "no-flush-rx")) + no_flush_rx = 1; + if (!strcmp(lgopts[opt_idx].name, "disable-link-check")) + no_link_check = 1; + if (!strcmp(lgopts[opt_idx].name, "disable-device-start")) + no_device_start = 1; + if (!strcmp(lgopts[opt_idx].name, "no-lsc-interrupt")) + lsc_interrupt = 0; + if (!strcmp(lgopts[opt_idx].name, "no-rmv-interrupt")) + rmv_interrupt = 0; + if (!strcmp(lgopts[opt_idx].name, "flow-isolate-all")) + flow_isolate_all = 1; + if (!strcmp(lgopts[opt_idx].name, "tx-offloads")) { + char *end = NULL; + n = strtoull(optarg, &end, 16); + if (n >= 0) + tx_offloads = (uint64_t)n; + else + rte_exit(EXIT_FAILURE, + "tx-offloads must be >= 0\n"); + } + + if (!strcmp(lgopts[opt_idx].name, "rx-offloads")) { + char *end = NULL; + n = strtoull(optarg, &end, 16); + if (n >= 0) + rx_offloads = (uint64_t)n; + else + rte_exit(EXIT_FAILURE, + "rx-offloads must be >= 0\n"); + } + + if (!strcmp(lgopts[opt_idx].name, "vxlan-gpe-port")) { + n = atoi(optarg); + if (n >= 0) + vxlan_gpe_udp_port = (uint16_t)n; + else + rte_exit(EXIT_FAILURE, + "vxlan-gpe-port must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "print-event")) + if (parse_event_printing_config(optarg, 1)) { + rte_exit(EXIT_FAILURE, + "invalid print-event argument\n"); + } + if (!strcmp(lgopts[opt_idx].name, "mask-event")) + if (parse_event_printing_config(optarg, 0)) { + rte_exit(EXIT_FAILURE, + "invalid mask-event argument\n"); + } + if (!strcmp(lgopts[opt_idx].name, "hot-plug")) + hot_plug = 1; + if (!strcmp(lgopts[opt_idx].name, "mlockall")) + do_mlockall = 1; + if (!strcmp(lgopts[opt_idx].name, "no-mlockall")) + do_mlockall = 0; + if (!strcmp(lgopts[opt_idx].name, + "noisy-tx-sw-buffer-size")) { + n = atoi(optarg); + if (n >= 0) + noisy_tx_sw_bufsz = n; + else + rte_exit(EXIT_FAILURE, + "noisy-tx-sw-buffer-size must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, + "noisy-tx-sw-buffer-flushtime")) { + n = atoi(optarg); + if (n >= 0) + noisy_tx_sw_buf_flush_time = n; + else + rte_exit(EXIT_FAILURE, + "noisy-tx-sw-buffer-flushtime must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, + "noisy-lkup-memory")) { + n = atoi(optarg); + if (n >= 0) + noisy_lkup_mem_sz = n; + else + rte_exit(EXIT_FAILURE, + "noisy-lkup-memory must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, + "noisy-lkup-num-writes")) { + n = atoi(optarg); + if (n >= 0) + noisy_lkup_num_writes = n; + else + rte_exit(EXIT_FAILURE, + "noisy-lkup-num-writes must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, + "noisy-lkup-num-reads")) { + n = atoi(optarg); + if (n >= 0) + noisy_lkup_num_reads = n; + else + rte_exit(EXIT_FAILURE, + "noisy-lkup-num-reads must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, + "noisy-lkup-num-reads-writes")) { + n = atoi(optarg); + if (n >= 0) + noisy_lkup_num_reads_writes = n; + else + rte_exit(EXIT_FAILURE, + "noisy-lkup-num-reads-writes must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "no-iova-contig")) + mempool_flags = MEMPOOL_F_NO_IOVA_CONTIG; + + if (!strcmp(lgopts[opt_idx].name, "rx-mq-mode")) { + char *end = NULL; + n = strtoul(optarg, &end, 16); + if (n >= 0 && n <= ETH_MQ_RX_VMDQ_DCB_RSS) + rx_mq_mode = (enum rte_eth_rx_mq_mode)n; + else + rte_exit(EXIT_FAILURE, + "rx-mq-mode must be >= 0 and <= %d\n", + ETH_MQ_RX_VMDQ_DCB_RSS); + } + break; + case 'h': + usage(argv[0]); + rte_exit(EXIT_SUCCESS, "Displayed help\n"); + break; + default: + usage(argv[0]); + printf("Invalid option: %s\n", argv[optind]); + rte_exit(EXIT_FAILURE, + "Command line is incomplete or incorrect\n"); + break; + } + } + + if (optind != argc) { + usage(argv[0]); + printf("Invalid parameter: %s\n", argv[optind]); + rte_exit(EXIT_FAILURE, "Command line is incorrect\n"); + } + + /* Set offload configuration from command line parameters. */ + rx_mode.offloads = rx_offloads; + tx_mode.offloads = tx_offloads; + + if (mempool_flags & MEMPOOL_F_NO_IOVA_CONTIG && + mp_alloc_type != MP_ALLOC_ANON) { + TESTPMD_LOG(WARNING, "cannot use no-iova-contig without " + "mp-alloc=anon. mempool no-iova-contig is " + "ignored\n"); + mempool_flags = 0; + } +} diff --git a/src/spdk/dpdk/app/test-pmd/rxonly.c b/src/spdk/dpdk/app/test-pmd/rxonly.c new file mode 100644 index 000000000..5c65fc42d --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/rxonly.c @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#include <stdarg.h> +#include <string.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <unistd.h> +#include <inttypes.h> + +#include <sys/queue.h> +#include <sys/stat.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_memcpy.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_string_fns.h> +#include <rte_ip.h> +#include <rte_udp.h> +#include <rte_net.h> +#include <rte_flow.h> + +#include "testpmd.h" + +/* + * Received a burst of packets. + */ +static void +pkt_burst_receive(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + uint16_t nb_rx; + uint16_t i; + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; + + start_tsc = rte_rdtsc(); +#endif + + /* + * Receive a burst of packets. + */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, + nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + return; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + fs->rx_packets += nb_rx; + for (i = 0; i < nb_rx; i++) + rte_pktmbuf_free(pkts_burst[i]); + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +struct fwd_engine rx_only_engine = { + .fwd_mode_name = "rxonly", + .port_fwd_begin = NULL, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_receive, +}; diff --git a/src/spdk/dpdk/app/test-pmd/softnicfwd.c b/src/spdk/dpdk/app/test-pmd/softnicfwd.c new file mode 100644 index 000000000..e9d437364 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/softnicfwd.c @@ -0,0 +1,686 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Intel Corporation + */ +#include <stdio.h> +#include <sys/stat.h> + +#include <rte_cycles.h> +#include <rte_mbuf.h> +#include <rte_malloc.h> +#include <rte_ethdev.h> +#include <rte_flow.h> +#include <rte_meter.h> +#include <rte_eth_softnic.h> +#include <rte_tm.h> + +#include "testpmd.h" + +#define SUBPORT_NODES_PER_PORT 1 +#define PIPE_NODES_PER_SUBPORT 4096 +#define TC_NODES_PER_PIPE 4 +#define QUEUE_NODES_PER_TC 4 + +#define NUM_PIPE_NODES \ + (SUBPORT_NODES_PER_PORT * PIPE_NODES_PER_SUBPORT) + +#define NUM_TC_NODES \ + (NUM_PIPE_NODES * TC_NODES_PER_PIPE) + +#define ROOT_NODE_ID 1000000 +#define SUBPORT_NODES_START_ID 900000 +#define PIPE_NODES_START_ID 800000 +#define TC_NODES_START_ID 700000 + +#define STATS_MASK_DEFAULT \ + (RTE_TM_STATS_N_PKTS | \ + RTE_TM_STATS_N_BYTES | \ + RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \ + RTE_TM_STATS_N_BYTES_GREEN_DROPPED) + +#define STATS_MASK_QUEUE \ + (STATS_MASK_DEFAULT | \ + RTE_TM_STATS_N_PKTS_QUEUED) + +#define BYTES_IN_MBPS (1000 * 1000 / 8) +#define TOKEN_BUCKET_SIZE 1000000 + +/* TM Hierarchy Levels */ +enum tm_hierarchy_level { + TM_NODE_LEVEL_PORT = 0, + TM_NODE_LEVEL_SUBPORT, + TM_NODE_LEVEL_PIPE, + TM_NODE_LEVEL_TC, + TM_NODE_LEVEL_QUEUE, + TM_NODE_LEVEL_MAX, +}; + +struct tm_hierarchy { + /* TM Nodes */ + uint32_t root_node_id; + uint32_t subport_node_id[SUBPORT_NODES_PER_PORT]; + uint32_t pipe_node_id[SUBPORT_NODES_PER_PORT][PIPE_NODES_PER_SUBPORT]; + uint32_t tc_node_id[NUM_PIPE_NODES][TC_NODES_PER_PIPE]; + uint32_t queue_node_id[NUM_TC_NODES][QUEUE_NODES_PER_TC]; + + /* TM Hierarchy Nodes Shaper Rates */ + uint32_t root_node_shaper_rate; + uint32_t subport_node_shaper_rate; + uint32_t pipe_node_shaper_rate; + uint32_t tc_node_shaper_rate; + uint32_t tc_node_shared_shaper_rate; + + uint32_t n_shapers; +}; + +static struct fwd_lcore *softnic_fwd_lcore; +static uint16_t softnic_port_id; +struct fwd_engine softnic_fwd_engine; + +/* + * Softnic packet forward + */ +static void +softnic_fwd(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + uint16_t nb_rx; + uint16_t nb_tx; + uint32_t retry; + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* Packets Receive */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, + pkts_burst, nb_pkt_per_burst); + fs->rx_packets += nb_rx; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + pkts_burst, nb_rx); + + /* Retry if necessary */ + if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) { + retry = 0; + while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + &pkts_burst[nb_tx], nb_rx - nb_tx); + } + } + fs->tx_packets += nb_tx; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + + if (unlikely(nb_tx < nb_rx)) { + fs->fwd_dropped += (nb_rx - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_rx); + } +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +static void +softnic_fwd_run(struct fwd_stream *fs) +{ + rte_pmd_softnic_run(softnic_port_id); + softnic_fwd(fs); +} + +/** + * Softnic init + */ +static int +softnic_begin(void *arg __rte_unused) +{ + for (;;) { + if (!softnic_fwd_lcore->stopped) + break; + } + + do { + /* Run softnic */ + rte_pmd_softnic_run(softnic_port_id); + } while (!softnic_fwd_lcore->stopped); + + return 0; +} + +static int +set_tm_hiearchy_nodes_shaper_rate(portid_t port_id, + struct tm_hierarchy *h) +{ + struct rte_eth_link link_params; + uint64_t tm_port_rate; + int ret; + + memset(&link_params, 0, sizeof(link_params)); + + ret = rte_eth_link_get(port_id, &link_params); + if (ret < 0) { + printf("Error during getting device (port %u) link info: %s\n", + port_id, rte_strerror(-ret)); + return ret; + } + tm_port_rate = (uint64_t)ETH_SPEED_NUM_10G * BYTES_IN_MBPS; + + /* Set tm hierarchy shapers rate */ + h->root_node_shaper_rate = tm_port_rate; + h->subport_node_shaper_rate = + tm_port_rate / SUBPORT_NODES_PER_PORT; + h->pipe_node_shaper_rate + = h->subport_node_shaper_rate / PIPE_NODES_PER_SUBPORT; + h->tc_node_shaper_rate = h->pipe_node_shaper_rate; + h->tc_node_shared_shaper_rate = h->subport_node_shaper_rate; + + return 0; +} + +static int +softport_tm_root_node_add(portid_t port_id, struct tm_hierarchy *h, + struct rte_tm_error *error) +{ + struct rte_tm_node_params rnp; + struct rte_tm_shaper_params rsp; + uint32_t priority, weight, level_id, shaper_profile_id; + + memset(&rsp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&rnp, 0, sizeof(struct rte_tm_node_params)); + + /* Shaper profile Parameters */ + rsp.peak.rate = h->root_node_shaper_rate; + rsp.peak.size = TOKEN_BUCKET_SIZE; + rsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + shaper_profile_id = 0; + + if (rte_tm_shaper_profile_add(port_id, shaper_profile_id, + &rsp, error)) { + printf("%s ERROR(%d)-%s!(shaper_id %u)\n ", + __func__, error->type, error->message, + shaper_profile_id); + return -1; + } + + /* Root Node Parameters */ + h->root_node_id = ROOT_NODE_ID; + weight = 1; + priority = 0; + level_id = TM_NODE_LEVEL_PORT; + rnp.shaper_profile_id = shaper_profile_id; + rnp.nonleaf.n_sp_priorities = 1; + rnp.stats_mask = STATS_MASK_DEFAULT; + + /* Add Node to TM Hierarchy */ + if (rte_tm_node_add(port_id, h->root_node_id, RTE_TM_NODE_ID_NULL, + priority, weight, level_id, &rnp, error)) { + printf("%s ERROR(%d)-%s!(node_id %u, parent_id %u, level %u)\n", + __func__, error->type, error->message, + h->root_node_id, RTE_TM_NODE_ID_NULL, + level_id); + return -1; + } + /* Update */ + h->n_shapers++; + + printf(" Root node added (Start id %u, Count %u, level %u)\n", + h->root_node_id, 1, level_id); + + return 0; +} + +static int +softport_tm_subport_node_add(portid_t port_id, + struct tm_hierarchy *h, + struct rte_tm_error *error) +{ + uint32_t subport_parent_node_id, subport_node_id = 0; + struct rte_tm_node_params snp; + struct rte_tm_shaper_params ssp; + uint32_t priority, weight, level_id, shaper_profile_id; + uint32_t i; + + memset(&ssp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&snp, 0, sizeof(struct rte_tm_node_params)); + + shaper_profile_id = h->n_shapers; + + /* Add Shaper Profile to TM Hierarchy */ + for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) { + ssp.peak.rate = h->subport_node_shaper_rate; + ssp.peak.size = TOKEN_BUCKET_SIZE; + ssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + + if (rte_tm_shaper_profile_add(port_id, shaper_profile_id, + &ssp, error)) { + printf("%s ERROR(%d)-%s!(shaper_id %u)\n ", + __func__, error->type, error->message, + shaper_profile_id); + return -1; + } + + /* Node Parameters */ + h->subport_node_id[i] = SUBPORT_NODES_START_ID + i; + subport_parent_node_id = h->root_node_id; + weight = 1; + priority = 0; + level_id = TM_NODE_LEVEL_SUBPORT; + snp.shaper_profile_id = shaper_profile_id; + snp.nonleaf.n_sp_priorities = 1; + snp.stats_mask = STATS_MASK_DEFAULT; + + /* Add Node to TM Hiearchy */ + if (rte_tm_node_add(port_id, + h->subport_node_id[i], + subport_parent_node_id, + priority, weight, + level_id, + &snp, + error)) { + printf("%s ERROR(%d)-%s!(node %u,parent %u,level %u)\n", + __func__, + error->type, + error->message, + h->subport_node_id[i], + subport_parent_node_id, + level_id); + return -1; + } + shaper_profile_id++; + subport_node_id++; + } + /* Update */ + h->n_shapers = shaper_profile_id; + + printf(" Subport nodes added (Start id %u, Count %u, level %u)\n", + h->subport_node_id[0], SUBPORT_NODES_PER_PORT, level_id); + + return 0; +} + +static int +softport_tm_pipe_node_add(portid_t port_id, + struct tm_hierarchy *h, + struct rte_tm_error *error) +{ + uint32_t pipe_parent_node_id; + struct rte_tm_node_params pnp; + struct rte_tm_shaper_params psp; + uint32_t priority, weight, level_id, shaper_profile_id; + uint32_t i, j; + + memset(&psp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&pnp, 0, sizeof(struct rte_tm_node_params)); + + shaper_profile_id = h->n_shapers; + + /* Shaper Profile Parameters */ + psp.peak.rate = h->pipe_node_shaper_rate; + psp.peak.size = TOKEN_BUCKET_SIZE; + psp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + + /* Pipe Node Parameters */ + weight = 1; + priority = 0; + level_id = TM_NODE_LEVEL_PIPE; + pnp.nonleaf.n_sp_priorities = 4; + pnp.stats_mask = STATS_MASK_DEFAULT; + + /* Add Shaper Profiles and Nodes to TM Hierarchy */ + for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) { + for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) { + if (rte_tm_shaper_profile_add(port_id, + shaper_profile_id, &psp, error)) { + printf("%s ERROR(%d)-%s!(shaper_id %u)\n ", + __func__, error->type, error->message, + shaper_profile_id); + return -1; + } + pnp.shaper_profile_id = shaper_profile_id; + pipe_parent_node_id = h->subport_node_id[i]; + h->pipe_node_id[i][j] = PIPE_NODES_START_ID + + (i * PIPE_NODES_PER_SUBPORT) + j; + + if (rte_tm_node_add(port_id, + h->pipe_node_id[i][j], + pipe_parent_node_id, + priority, weight, level_id, + &pnp, + error)) { + printf("%s ERROR(%d)-%s!(node %u,parent %u )\n", + __func__, + error->type, + error->message, + h->pipe_node_id[i][j], + pipe_parent_node_id); + + return -1; + } + shaper_profile_id++; + } + } + /* Update */ + h->n_shapers = shaper_profile_id; + + printf(" Pipe nodes added (Start id %u, Count %u, level %u)\n", + h->pipe_node_id[0][0], NUM_PIPE_NODES, level_id); + + return 0; +} + +static int +softport_tm_tc_node_add(portid_t port_id, + struct tm_hierarchy *h, + struct rte_tm_error *error) +{ + uint32_t tc_parent_node_id; + struct rte_tm_node_params tnp; + struct rte_tm_shaper_params tsp, tssp; + uint32_t shared_shaper_profile_id[TC_NODES_PER_PIPE]; + uint32_t priority, weight, level_id, shaper_profile_id; + uint32_t pos, n_tc_nodes, i, j, k; + + memset(&tsp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&tssp, 0, sizeof(struct rte_tm_shaper_params)); + memset(&tnp, 0, sizeof(struct rte_tm_node_params)); + + shaper_profile_id = h->n_shapers; + + /* Private Shaper Profile (TC) Parameters */ + tsp.peak.rate = h->tc_node_shaper_rate; + tsp.peak.size = TOKEN_BUCKET_SIZE; + tsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + + /* Shared Shaper Profile (TC) Parameters */ + tssp.peak.rate = h->tc_node_shared_shaper_rate; + tssp.peak.size = TOKEN_BUCKET_SIZE; + tssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS; + + /* TC Node Parameters */ + weight = 1; + level_id = TM_NODE_LEVEL_TC; + tnp.n_shared_shapers = 1; + tnp.nonleaf.n_sp_priorities = 1; + tnp.stats_mask = STATS_MASK_DEFAULT; + + /* Add Shared Shaper Profiles to TM Hierarchy */ + for (i = 0; i < TC_NODES_PER_PIPE; i++) { + shared_shaper_profile_id[i] = shaper_profile_id; + + if (rte_tm_shaper_profile_add(port_id, + shared_shaper_profile_id[i], &tssp, error)) { + printf("%s ERROR(%d)-%s!(Shared shaper profileid %u)\n", + __func__, error->type, error->message, + shared_shaper_profile_id[i]); + + return -1; + } + if (rte_tm_shared_shaper_add_update(port_id, i, + shared_shaper_profile_id[i], error)) { + printf("%s ERROR(%d)-%s!(Shared shaper id %u)\n", + __func__, error->type, error->message, i); + + return -1; + } + shaper_profile_id++; + } + + /* Add Shaper Profiles and Nodes to TM Hierarchy */ + n_tc_nodes = 0; + for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) { + for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) { + for (k = 0; k < TC_NODES_PER_PIPE ; k++) { + priority = k; + tc_parent_node_id = h->pipe_node_id[i][j]; + tnp.shared_shaper_id = + (uint32_t *)calloc(1, sizeof(uint32_t)); + if (tnp.shared_shaper_id == NULL) { + printf("Shared shaper mem alloc err\n"); + return -1; + } + tnp.shared_shaper_id[0] = k; + pos = j + (i * PIPE_NODES_PER_SUBPORT); + h->tc_node_id[pos][k] = + TC_NODES_START_ID + n_tc_nodes; + + if (rte_tm_shaper_profile_add(port_id, + shaper_profile_id, &tsp, error)) { + printf("%s ERROR(%d)-%s!(shaper %u)\n", + __func__, error->type, + error->message, + shaper_profile_id); + + free(tnp.shared_shaper_id); + return -1; + } + tnp.shaper_profile_id = shaper_profile_id; + if (rte_tm_node_add(port_id, + h->tc_node_id[pos][k], + tc_parent_node_id, + priority, weight, + level_id, + &tnp, error)) { + printf("%s ERROR(%d)-%s!(node id %u)\n", + __func__, + error->type, + error->message, + h->tc_node_id[pos][k]); + + free(tnp.shared_shaper_id); + return -1; + } + shaper_profile_id++; + n_tc_nodes++; + } + } + } + /* Update */ + h->n_shapers = shaper_profile_id; + + printf(" TC nodes added (Start id %u, Count %u, level %u)\n", + h->tc_node_id[0][0], n_tc_nodes, level_id); + + return 0; +} + +static int +softport_tm_queue_node_add(portid_t port_id, struct tm_hierarchy *h, + struct rte_tm_error *error) +{ + uint32_t queue_parent_node_id; + struct rte_tm_node_params qnp; + uint32_t priority, weight, level_id, pos; + uint32_t n_queue_nodes, i, j, k; + + memset(&qnp, 0, sizeof(struct rte_tm_node_params)); + + /* Queue Node Parameters */ + priority = 0; + weight = 1; + level_id = TM_NODE_LEVEL_QUEUE; + qnp.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE; + qnp.leaf.cman = RTE_TM_CMAN_TAIL_DROP; + qnp.stats_mask = STATS_MASK_QUEUE; + + /* Add Queue Nodes to TM Hierarchy */ + n_queue_nodes = 0; + for (i = 0; i < NUM_PIPE_NODES; i++) { + for (j = 0; j < TC_NODES_PER_PIPE; j++) { + queue_parent_node_id = h->tc_node_id[i][j]; + for (k = 0; k < QUEUE_NODES_PER_TC; k++) { + pos = j + (i * TC_NODES_PER_PIPE); + h->queue_node_id[pos][k] = n_queue_nodes; + if (rte_tm_node_add(port_id, + h->queue_node_id[pos][k], + queue_parent_node_id, + priority, + weight, + level_id, + &qnp, error)) { + printf("%s ERROR(%d)-%s!(node %u)\n", + __func__, + error->type, + error->message, + h->queue_node_id[pos][k]); + + return -1; + } + n_queue_nodes++; + } + } + } + printf(" Queue nodes added (Start id %u, Count %u, level %u)\n", + h->queue_node_id[0][0], n_queue_nodes, level_id); + + return 0; +} + +static int +softport_tm_hierarchy_specify(portid_t port_id, + struct rte_tm_error *error) +{ + + struct tm_hierarchy h; + int status; + + memset(&h, 0, sizeof(struct tm_hierarchy)); + + /* TM hierarchy shapers rate */ + status = set_tm_hiearchy_nodes_shaper_rate(port_id, &h); + if (status) + return status; + + /* Add root node (level 0) */ + status = softport_tm_root_node_add(port_id, &h, error); + if (status) + return status; + + /* Add subport node (level 1) */ + status = softport_tm_subport_node_add(port_id, &h, error); + if (status) + return status; + + /* Add pipe nodes (level 2) */ + status = softport_tm_pipe_node_add(port_id, &h, error); + if (status) + return status; + + /* Add traffic class nodes (level 3) */ + status = softport_tm_tc_node_add(port_id, &h, error); + if (status) + return status; + + /* Add queue nodes (level 4) */ + status = softport_tm_queue_node_add(port_id, &h, error); + if (status) + return status; + + return 0; +} + +/* + * Softnic TM default configuration + */ +static void +softnic_tm_default_config(portid_t pi) +{ + struct rte_port *port = &ports[pi]; + struct rte_tm_error error; + int status; + + /* Stop port */ + rte_eth_dev_stop(pi); + + /* TM hierarchy specification */ + status = softport_tm_hierarchy_specify(pi, &error); + if (status) { + printf(" TM Hierarchy built error(%d) - %s\n", + error.type, error.message); + return; + } + printf("\n TM Hierarchy Specified!\n"); + + /* TM hierarchy commit */ + status = rte_tm_hierarchy_commit(pi, 0, &error); + if (status) { + printf(" Hierarchy commit error(%d) - %s\n", + error.type, error.message); + return; + } + printf(" Hierarchy Committed (port %u)!\n", pi); + + /* Start port */ + status = rte_eth_dev_start(pi); + if (status) { + printf("\n Port %u start error!\n", pi); + return; + } + + /* Reset the default hierarchy flag */ + port->softport.default_tm_hierarchy_enable = 0; +} + +/* + * Softnic forwarding init + */ +static void +softnic_fwd_begin(portid_t pi) +{ + struct rte_port *port = &ports[pi]; + uint32_t lcore, fwd_core_present = 0, softnic_run_launch = 0; + int status; + + softnic_fwd_lcore = port->softport.fwd_lcore_arg[0]; + softnic_port_id = pi; + + /* Launch softnic_run function on lcores */ + for (lcore = 0; lcore < RTE_MAX_LCORE; lcore++) { + if (!rte_lcore_is_enabled(lcore)) + continue; + + if (lcore == rte_get_master_lcore()) + continue; + + if (fwd_core_present == 0) { + fwd_core_present++; + continue; + } + + status = rte_eal_remote_launch(softnic_begin, NULL, lcore); + if (status) + printf("softnic launch on lcore %u failed (%d)\n", + lcore, status); + + softnic_run_launch = 1; + } + + if (!softnic_run_launch) + softnic_fwd_engine.packet_fwd = softnic_fwd_run; + + /* Softnic TM default configuration */ + if (port->softport.default_tm_hierarchy_enable == 1) + softnic_tm_default_config(pi); +} + +struct fwd_engine softnic_fwd_engine = { + .fwd_mode_name = "softnic", + .port_fwd_begin = softnic_fwd_begin, + .port_fwd_end = NULL, + .packet_fwd = softnic_fwd, +}; diff --git a/src/spdk/dpdk/app/test-pmd/testpmd.c b/src/spdk/dpdk/app/test-pmd/testpmd.c new file mode 100644 index 000000000..4989d22ca --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/testpmd.c @@ -0,0 +1,3859 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#include <stdarg.h> +#include <stdio.h> +#include <stdlib.h> +#include <signal.h> +#include <string.h> +#include <time.h> +#include <fcntl.h> +#include <sys/mman.h> +#include <sys/types.h> +#include <errno.h> +#include <stdbool.h> + +#include <sys/queue.h> +#include <sys/stat.h> + +#include <stdint.h> +#include <unistd.h> +#include <inttypes.h> + +#include <rte_common.h> +#include <rte_errno.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_memcpy.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_alarm.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_malloc.h> +#include <rte_mbuf.h> +#include <rte_mbuf_pool_ops.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_dev.h> +#include <rte_string_fns.h> +#ifdef RTE_LIBRTE_IXGBE_PMD +#include <rte_pmd_ixgbe.h> +#endif +#ifdef RTE_LIBRTE_PDUMP +#include <rte_pdump.h> +#endif +#include <rte_flow.h> +#include <rte_metrics.h> +#ifdef RTE_LIBRTE_BITRATE +#include <rte_bitrate.h> +#endif +#ifdef RTE_LIBRTE_LATENCY_STATS +#include <rte_latencystats.h> +#endif + +#include "testpmd.h" + +#ifndef MAP_HUGETLB +/* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ +#define HUGE_FLAG (0x40000) +#else +#define HUGE_FLAG MAP_HUGETLB +#endif + +#ifndef MAP_HUGE_SHIFT +/* older kernels (or FreeBSD) will not have this define */ +#define HUGE_SHIFT (26) +#else +#define HUGE_SHIFT MAP_HUGE_SHIFT +#endif + +#define EXTMEM_HEAP_NAME "extmem" +#define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M + +uint16_t verbose_level = 0; /**< Silent by default. */ +int testpmd_logtype; /**< Log type for testpmd logs */ + +/* use master core for command line ? */ +uint8_t interactive = 0; +uint8_t auto_start = 0; +uint8_t tx_first; +char cmdline_filename[PATH_MAX] = {0}; + +/* + * NUMA support configuration. + * When set, the NUMA support attempts to dispatch the allocation of the + * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the + * probed ports among the CPU sockets 0 and 1. + * Otherwise, all memory is allocated from CPU socket 0. + */ +uint8_t numa_support = 1; /**< numa enabled by default */ + +/* + * In UMA mode,all memory is allocated from socket 0 if --socket-num is + * not configured. + */ +uint8_t socket_num = UMA_NO_CONFIG; + +/* + * Select mempool allocation type: + * - native: use regular DPDK memory + * - anon: use regular DPDK memory to create mempool, but populate using + * anonymous memory (may not be IOVA-contiguous) + * - xmem: use externally allocated hugepage memory + */ +uint8_t mp_alloc_type = MP_ALLOC_NATIVE; + +/* + * Store specified sockets on which memory pool to be used by ports + * is allocated. + */ +uint8_t port_numa[RTE_MAX_ETHPORTS]; + +/* + * Store specified sockets on which RX ring to be used by ports + * is allocated. + */ +uint8_t rxring_numa[RTE_MAX_ETHPORTS]; + +/* + * Store specified sockets on which TX ring to be used by ports + * is allocated. + */ +uint8_t txring_numa[RTE_MAX_ETHPORTS]; + +/* + * Record the Ethernet address of peer target ports to which packets are + * forwarded. + * Must be instantiated with the ethernet addresses of peer traffic generator + * ports. + */ +struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; +portid_t nb_peer_eth_addrs = 0; + +/* + * Probed Target Environment. + */ +struct rte_port *ports; /**< For all probed ethernet ports. */ +portid_t nb_ports; /**< Number of probed ethernet ports. */ +struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ +lcoreid_t nb_lcores; /**< Number of probed logical cores. */ + +portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ + +/* + * Test Forwarding Configuration. + * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores + * nb_fwd_ports <= nb_cfg_ports <= nb_ports + */ +lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ +lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ +portid_t nb_cfg_ports; /**< Number of configured ports. */ +portid_t nb_fwd_ports; /**< Number of forwarding ports. */ + +unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ +portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ + +struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ +streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ + +/* + * Forwarding engines. + */ +struct fwd_engine * fwd_engines[] = { + &io_fwd_engine, + &mac_fwd_engine, + &mac_swap_engine, + &flow_gen_engine, + &rx_only_engine, + &tx_only_engine, + &csum_fwd_engine, + &icmp_echo_engine, + &noisy_vnf_engine, +#if defined RTE_LIBRTE_PMD_SOFTNIC + &softnic_fwd_engine, +#endif +#ifdef RTE_LIBRTE_IEEE1588 + &ieee1588_fwd_engine, +#endif + NULL, +}; + +struct rte_mempool *mempools[RTE_MAX_NUMA_NODES]; +uint16_t mempool_flags; + +struct fwd_config cur_fwd_config; +struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ +uint32_t retry_enabled; +uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; +uint32_t burst_tx_retry_num = BURST_TX_RETRIES; + +uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ +uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if + * specified on command-line. */ +uint16_t stats_period; /**< Period to show statistics (disabled by default) */ + +/* + * In container, it cannot terminate the process which running with 'stats-period' + * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. + */ +uint8_t f_quit; + +/* + * Configuration of packet segments used by the "txonly" processing engine. + */ +uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ +uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { + TXONLY_DEF_PACKET_LEN, +}; +uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ + +enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; +/**< Split policy for packets to TX. */ + +uint8_t txonly_multi_flow; +/**< Whether multiple flows are generated in TXONLY mode. */ + +uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ +uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ + +/* current configuration is in DCB or not,0 means it is not in DCB mode */ +uint8_t dcb_config = 0; + +/* Whether the dcb is in testing status */ +uint8_t dcb_test = 0; + +/* + * Configurable number of RX/TX queues. + */ +queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */ +queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ +queueid_t nb_txq = 1; /**< Number of TX queues per port. */ + +/* + * Configurable number of RX/TX ring descriptors. + * Defaults are supplied by drivers via ethdev. + */ +#define RTE_TEST_RX_DESC_DEFAULT 0 +#define RTE_TEST_TX_DESC_DEFAULT 0 +uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ +uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ + +#define RTE_PMD_PARAM_UNSET -1 +/* + * Configurable values of RX and TX ring threshold registers. + */ + +int8_t rx_pthresh = RTE_PMD_PARAM_UNSET; +int8_t rx_hthresh = RTE_PMD_PARAM_UNSET; +int8_t rx_wthresh = RTE_PMD_PARAM_UNSET; + +int8_t tx_pthresh = RTE_PMD_PARAM_UNSET; +int8_t tx_hthresh = RTE_PMD_PARAM_UNSET; +int8_t tx_wthresh = RTE_PMD_PARAM_UNSET; + +/* + * Configurable value of RX free threshold. + */ +int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET; + +/* + * Configurable value of RX drop enable. + */ +int8_t rx_drop_en = RTE_PMD_PARAM_UNSET; + +/* + * Configurable value of TX free threshold. + */ +int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; + +/* + * Configurable value of TX RS bit threshold. + */ +int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; + +/* + * Configurable value of buffered packets before sending. + */ +uint16_t noisy_tx_sw_bufsz; + +/* + * Configurable value of packet buffer timeout. + */ +uint16_t noisy_tx_sw_buf_flush_time; + +/* + * Configurable value for size of VNF internal memory area + * used for simulating noisy neighbour behaviour + */ +uint64_t noisy_lkup_mem_sz; + +/* + * Configurable value of number of random writes done in + * VNF simulation memory area. + */ +uint64_t noisy_lkup_num_writes; + +/* + * Configurable value of number of random reads done in + * VNF simulation memory area. + */ +uint64_t noisy_lkup_num_reads; + +/* + * Configurable value of number of random reads/writes done in + * VNF simulation memory area. + */ +uint64_t noisy_lkup_num_reads_writes; + +/* + * Receive Side Scaling (RSS) configuration. + */ +uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ + +/* + * Port topology configuration + */ +uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ + +/* + * Avoids to flush all the RX streams before starts forwarding. + */ +uint8_t no_flush_rx = 0; /* flush by default */ + +/* + * Flow API isolated mode. + */ +uint8_t flow_isolate_all; + +/* + * Avoids to check link status when starting/stopping a port. + */ +uint8_t no_link_check = 0; /* check by default */ + +/* + * Don't automatically start all ports in interactive mode. + */ +uint8_t no_device_start = 0; + +/* + * Enable link status change notification + */ +uint8_t lsc_interrupt = 1; /* enabled by default */ + +/* + * Enable device removal notification. + */ +uint8_t rmv_interrupt = 1; /* enabled by default */ + +uint8_t hot_plug = 0; /**< hotplug disabled by default. */ + +/* After attach, port setup is called on event or by iterator */ +bool setup_on_probe_event = true; + +/* Clear ptypes on port initialization. */ +uint8_t clear_ptypes = true; + +/* Pretty printing of ethdev events */ +static const char * const eth_event_desc[] = { + [RTE_ETH_EVENT_UNKNOWN] = "unknown", + [RTE_ETH_EVENT_INTR_LSC] = "link state change", + [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", + [RTE_ETH_EVENT_INTR_RESET] = "reset", + [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", + [RTE_ETH_EVENT_IPSEC] = "IPsec", + [RTE_ETH_EVENT_MACSEC] = "MACsec", + [RTE_ETH_EVENT_INTR_RMV] = "device removal", + [RTE_ETH_EVENT_NEW] = "device probed", + [RTE_ETH_EVENT_DESTROY] = "device released", + [RTE_ETH_EVENT_FLOW_AGED] = "flow aged", + [RTE_ETH_EVENT_MAX] = NULL, +}; + +/* + * Display or mask ether events + * Default to all events except VF_MBOX + */ +uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | + (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | + (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | + (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | + (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | + (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | + (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) | + (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED); +/* + * Decide if all memory are locked for performance. + */ +int do_mlockall = 0; + +/* + * NIC bypass mode configuration options. + */ + +#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS +/* The NIC bypass watchdog timeout. */ +uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; +#endif + + +#ifdef RTE_LIBRTE_LATENCY_STATS + +/* + * Set when latency stats is enabled in the commandline + */ +uint8_t latencystats_enabled; + +/* + * Lcore ID to serive latency statistics. + */ +lcoreid_t latencystats_lcore_id = -1; + +#endif + +/* + * Ethernet device configuration. + */ +struct rte_eth_rxmode rx_mode = { + .max_rx_pkt_len = RTE_ETHER_MAX_LEN, + /**< Default maximum frame length. */ +}; + +struct rte_eth_txmode tx_mode = { + .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, +}; + +struct rte_fdir_conf fdir_conf = { + .mode = RTE_FDIR_MODE_NONE, + .pballoc = RTE_FDIR_PBALLOC_64K, + .status = RTE_FDIR_REPORT_STATUS, + .mask = { + .vlan_tci_mask = 0xFFEF, + .ipv4_mask = { + .src_ip = 0xFFFFFFFF, + .dst_ip = 0xFFFFFFFF, + }, + .ipv6_mask = { + .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, + .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, + }, + .src_port_mask = 0xFFFF, + .dst_port_mask = 0xFFFF, + .mac_addr_byte_mask = 0xFF, + .tunnel_type_mask = 1, + .tunnel_id_mask = 0xFFFFFFFF, + }, + .drop_queue = 127, +}; + +volatile int test_done = 1; /* stop packet forwarding when set to 1. */ + +struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; +struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; + +struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; +struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; + +uint16_t nb_tx_queue_stats_mappings = 0; +uint16_t nb_rx_queue_stats_mappings = 0; + +/* + * Display zero values by default for xstats + */ +uint8_t xstats_hide_zero; + +unsigned int num_sockets = 0; +unsigned int socket_ids[RTE_MAX_NUMA_NODES]; + +#ifdef RTE_LIBRTE_BITRATE +/* Bitrate statistics */ +struct rte_stats_bitrates *bitrate_data; +lcoreid_t bitrate_lcore_id; +uint8_t bitrate_enabled; +#endif + +struct gro_status gro_ports[RTE_MAX_ETHPORTS]; +uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; + +/* + * hexadecimal bitmask of RX mq mode can be enabled. + */ +enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS; + +/* Forward function declarations */ +static void setup_attached_port(portid_t pi); +static void map_port_queue_stats_mapping_registers(portid_t pi, + struct rte_port *port); +static void check_all_ports_link_status(uint32_t port_mask); +static int eth_event_callback(portid_t port_id, + enum rte_eth_event_type type, + void *param, void *ret_param); +static void dev_event_callback(const char *device_name, + enum rte_dev_event_type type, + void *param); + +/* + * Check if all the ports are started. + * If yes, return positive value. If not, return zero. + */ +static int all_ports_started(void); + +struct gso_status gso_ports[RTE_MAX_ETHPORTS]; +uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; + +/* Holds the registered mbuf dynamic flags names. */ +char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; + +/* + * Helper function to check if socket is already discovered. + * If yes, return positive value. If not, return zero. + */ +int +new_socket_id(unsigned int socket_id) +{ + unsigned int i; + + for (i = 0; i < num_sockets; i++) { + if (socket_ids[i] == socket_id) + return 0; + } + return 1; +} + +/* + * Setup default configuration. + */ +static void +set_default_fwd_lcores_config(void) +{ + unsigned int i; + unsigned int nb_lc; + unsigned int sock_num; + + nb_lc = 0; + for (i = 0; i < RTE_MAX_LCORE; i++) { + if (!rte_lcore_is_enabled(i)) + continue; + sock_num = rte_lcore_to_socket_id(i); + if (new_socket_id(sock_num)) { + if (num_sockets >= RTE_MAX_NUMA_NODES) { + rte_exit(EXIT_FAILURE, + "Total sockets greater than %u\n", + RTE_MAX_NUMA_NODES); + } + socket_ids[num_sockets++] = sock_num; + } + if (i == rte_get_master_lcore()) + continue; + fwd_lcores_cpuids[nb_lc++] = i; + } + nb_lcores = (lcoreid_t) nb_lc; + nb_cfg_lcores = nb_lcores; + nb_fwd_lcores = 1; +} + +static void +set_def_peer_eth_addrs(void) +{ + portid_t i; + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR; + peer_eth_addrs[i].addr_bytes[5] = i; + } +} + +static void +set_default_fwd_ports_config(void) +{ + portid_t pt_id; + int i = 0; + + RTE_ETH_FOREACH_DEV(pt_id) { + fwd_ports_ids[i++] = pt_id; + + /* Update sockets info according to the attached device */ + int socket_id = rte_eth_dev_socket_id(pt_id); + if (socket_id >= 0 && new_socket_id(socket_id)) { + if (num_sockets >= RTE_MAX_NUMA_NODES) { + rte_exit(EXIT_FAILURE, + "Total sockets greater than %u\n", + RTE_MAX_NUMA_NODES); + } + socket_ids[num_sockets++] = socket_id; + } + } + + nb_cfg_ports = nb_ports; + nb_fwd_ports = nb_ports; +} + +void +set_def_fwd_config(void) +{ + set_default_fwd_lcores_config(); + set_def_peer_eth_addrs(); + set_default_fwd_ports_config(); +} + +/* extremely pessimistic estimation of memory required to create a mempool */ +static int +calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) +{ + unsigned int n_pages, mbuf_per_pg, leftover; + uint64_t total_mem, mbuf_mem, obj_sz; + + /* there is no good way to predict how much space the mempool will + * occupy because it will allocate chunks on the fly, and some of those + * will come from default DPDK memory while some will come from our + * external memory, so just assume 128MB will be enough for everyone. + */ + uint64_t hdr_mem = 128 << 20; + + /* account for possible non-contiguousness */ + obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); + if (obj_sz > pgsz) { + TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); + return -1; + } + + mbuf_per_pg = pgsz / obj_sz; + leftover = (nb_mbufs % mbuf_per_pg) > 0; + n_pages = (nb_mbufs / mbuf_per_pg) + leftover; + + mbuf_mem = n_pages * pgsz; + + total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); + + if (total_mem > SIZE_MAX) { + TESTPMD_LOG(ERR, "Memory size too big\n"); + return -1; + } + *out = (size_t)total_mem; + + return 0; +} + +static int +pagesz_flags(uint64_t page_sz) +{ + /* as per mmap() manpage, all page sizes are log2 of page size + * shifted by MAP_HUGE_SHIFT + */ + int log2 = rte_log2_u64(page_sz); + + return (log2 << HUGE_SHIFT); +} + +static void * +alloc_mem(size_t memsz, size_t pgsz, bool huge) +{ + void *addr; + int flags; + + /* allocate anonymous hugepages */ + flags = MAP_ANONYMOUS | MAP_PRIVATE; + if (huge) + flags |= HUGE_FLAG | pagesz_flags(pgsz); + + addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); + if (addr == MAP_FAILED) + return NULL; + + return addr; +} + +struct extmem_param { + void *addr; + size_t len; + size_t pgsz; + rte_iova_t *iova_table; + unsigned int iova_table_len; +}; + +static int +create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, + bool huge) +{ + uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ + RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ + unsigned int cur_page, n_pages, pgsz_idx; + size_t mem_sz, cur_pgsz; + rte_iova_t *iovas = NULL; + void *addr; + int ret; + + for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { + /* skip anything that is too big */ + if (pgsizes[pgsz_idx] > SIZE_MAX) + continue; + + cur_pgsz = pgsizes[pgsz_idx]; + + /* if we were told not to allocate hugepages, override */ + if (!huge) + cur_pgsz = sysconf(_SC_PAGESIZE); + + ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); + if (ret < 0) { + TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); + return -1; + } + + /* allocate our memory */ + addr = alloc_mem(mem_sz, cur_pgsz, huge); + + /* if we couldn't allocate memory with a specified page size, + * that doesn't mean we can't do it with other page sizes, so + * try another one. + */ + if (addr == NULL) + continue; + + /* store IOVA addresses for every page in this memory area */ + n_pages = mem_sz / cur_pgsz; + + iovas = malloc(sizeof(*iovas) * n_pages); + + if (iovas == NULL) { + TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); + goto fail; + } + /* lock memory if it's not huge pages */ + if (!huge) + mlock(addr, mem_sz); + + /* populate IOVA addresses */ + for (cur_page = 0; cur_page < n_pages; cur_page++) { + rte_iova_t iova; + size_t offset; + void *cur; + + offset = cur_pgsz * cur_page; + cur = RTE_PTR_ADD(addr, offset); + + /* touch the page before getting its IOVA */ + *(volatile char *)cur = 0; + + iova = rte_mem_virt2iova(cur); + + iovas[cur_page] = iova; + } + + break; + } + /* if we couldn't allocate anything */ + if (iovas == NULL) + return -1; + + param->addr = addr; + param->len = mem_sz; + param->pgsz = cur_pgsz; + param->iova_table = iovas; + param->iova_table_len = n_pages; + + return 0; +fail: + if (iovas) + free(iovas); + if (addr) + munmap(addr, mem_sz); + + return -1; +} + +static int +setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) +{ + struct extmem_param param; + int socket_id, ret; + + memset(¶m, 0, sizeof(param)); + + /* check if our heap exists */ + socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); + if (socket_id < 0) { + /* create our heap */ + ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); + if (ret < 0) { + TESTPMD_LOG(ERR, "Cannot create heap\n"); + return -1; + } + } + + ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); + if (ret < 0) { + TESTPMD_LOG(ERR, "Cannot create memory area\n"); + return -1; + } + + /* we now have a valid memory area, so add it to heap */ + ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, + param.addr, param.len, param.iova_table, + param.iova_table_len, param.pgsz); + + /* when using VFIO, memory is automatically mapped for DMA by EAL */ + + /* not needed any more */ + free(param.iova_table); + + if (ret < 0) { + TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); + munmap(param.addr, param.len); + return -1; + } + + /* success */ + + TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", + param.len >> 20); + + return 0; +} +static void +dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, + struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) +{ + uint16_t pid = 0; + int ret; + + RTE_ETH_FOREACH_DEV(pid) { + struct rte_eth_dev *dev = + &rte_eth_devices[pid]; + + ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0, + memhdr->len); + if (ret) { + TESTPMD_LOG(DEBUG, + "unable to DMA unmap addr 0x%p " + "for device %s\n", + memhdr->addr, dev->data->name); + } + } + ret = rte_extmem_unregister(memhdr->addr, memhdr->len); + if (ret) { + TESTPMD_LOG(DEBUG, + "unable to un-register addr 0x%p\n", memhdr->addr); + } +} + +static void +dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, + struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) +{ + uint16_t pid = 0; + size_t page_size = sysconf(_SC_PAGESIZE); + int ret; + + ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, + page_size); + if (ret) { + TESTPMD_LOG(DEBUG, + "unable to register addr 0x%p\n", memhdr->addr); + return; + } + RTE_ETH_FOREACH_DEV(pid) { + struct rte_eth_dev *dev = + &rte_eth_devices[pid]; + + ret = rte_dev_dma_map(dev->device, memhdr->addr, 0, + memhdr->len); + if (ret) { + TESTPMD_LOG(DEBUG, + "unable to DMA map addr 0x%p " + "for device %s\n", + memhdr->addr, dev->data->name); + } + } +} + +static unsigned int +setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, + char *pool_name, struct rte_pktmbuf_extmem **ext_mem) +{ + struct rte_pktmbuf_extmem *xmem; + unsigned int ext_num, zone_num, elt_num; + uint16_t elt_size; + + elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE); + elt_num = EXTBUF_ZONE_SIZE / elt_size; + zone_num = (nb_mbufs + elt_num - 1) / elt_num; + + xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num); + if (xmem == NULL) { + TESTPMD_LOG(ERR, "Cannot allocate memory for " + "external buffer descriptors\n"); + *ext_mem = NULL; + return 0; + } + for (ext_num = 0; ext_num < zone_num; ext_num++) { + struct rte_pktmbuf_extmem *xseg = xmem + ext_num; + const struct rte_memzone *mz; + char mz_name[RTE_MEMZONE_NAMESIZE]; + int ret; + + ret = snprintf(mz_name, sizeof(mz_name), + RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num); + if (ret < 0 || ret >= (int)sizeof(mz_name)) { + errno = ENAMETOOLONG; + ext_num = 0; + break; + } + mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE, + socket_id, + RTE_MEMZONE_IOVA_CONTIG | + RTE_MEMZONE_1GB | + RTE_MEMZONE_SIZE_HINT_ONLY, + EXTBUF_ZONE_SIZE); + if (mz == NULL) { + /* + * The caller exits on external buffer creation + * error, so there is no need to free memzones. + */ + errno = ENOMEM; + ext_num = 0; + break; + } + xseg->buf_ptr = mz->addr; + xseg->buf_iova = mz->iova; + xseg->buf_len = EXTBUF_ZONE_SIZE; + xseg->elt_size = elt_size; + } + if (ext_num == 0 && xmem != NULL) { + free(xmem); + xmem = NULL; + } + *ext_mem = xmem; + return ext_num; +} + +/* + * Configuration initialisation done once at init time. + */ +static struct rte_mempool * +mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, + unsigned int socket_id) +{ + char pool_name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool *rte_mp = NULL; + uint32_t mb_size; + + mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); + + TESTPMD_LOG(INFO, + "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", + pool_name, nb_mbuf, mbuf_seg_size, socket_id); + + switch (mp_alloc_type) { + case MP_ALLOC_NATIVE: + { + /* wrapper to rte_mempool_create() */ + TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", + rte_mbuf_best_mempool_ops()); + rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, + mb_mempool_cache, 0, mbuf_seg_size, socket_id); + break; + } + case MP_ALLOC_ANON: + { + rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, + mb_size, (unsigned int) mb_mempool_cache, + sizeof(struct rte_pktmbuf_pool_private), + socket_id, mempool_flags); + if (rte_mp == NULL) + goto err; + + if (rte_mempool_populate_anon(rte_mp) == 0) { + rte_mempool_free(rte_mp); + rte_mp = NULL; + goto err; + } + rte_pktmbuf_pool_init(rte_mp, NULL); + rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); + rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); + break; + } + case MP_ALLOC_XMEM: + case MP_ALLOC_XMEM_HUGE: + { + int heap_socket; + bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; + + if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) + rte_exit(EXIT_FAILURE, "Could not create external memory\n"); + + heap_socket = + rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); + if (heap_socket < 0) + rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); + + TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", + rte_mbuf_best_mempool_ops()); + rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, + mb_mempool_cache, 0, mbuf_seg_size, + heap_socket); + break; + } + case MP_ALLOC_XBUF: + { + struct rte_pktmbuf_extmem *ext_mem; + unsigned int ext_num; + + ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size, + socket_id, pool_name, &ext_mem); + if (ext_num == 0) + rte_exit(EXIT_FAILURE, + "Can't create pinned data buffers\n"); + + TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", + rte_mbuf_best_mempool_ops()); + rte_mp = rte_pktmbuf_pool_create_extbuf + (pool_name, nb_mbuf, mb_mempool_cache, + 0, mbuf_seg_size, socket_id, + ext_mem, ext_num); + free(ext_mem); + break; + } + default: + { + rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); + } + } + +err: + if (rte_mp == NULL) { + rte_exit(EXIT_FAILURE, + "Creation of mbuf pool for socket %u failed: %s\n", + socket_id, rte_strerror(rte_errno)); + } else if (verbose_level > 0) { + rte_mempool_dump(stdout, rte_mp); + } + return rte_mp; +} + +/* + * Check given socket id is valid or not with NUMA mode, + * if valid, return 0, else return -1 + */ +static int +check_socket_id(const unsigned int socket_id) +{ + static int warning_once = 0; + + if (new_socket_id(socket_id)) { + if (!warning_once && numa_support) + printf("Warning: NUMA should be configured manually by" + " using --port-numa-config and" + " --ring-numa-config parameters along with" + " --numa.\n"); + warning_once = 1; + return -1; + } + return 0; +} + +/* + * Get the allowed maximum number of RX queues. + * *pid return the port id which has minimal value of + * max_rx_queues in all ports. + */ +queueid_t +get_allowed_max_nb_rxq(portid_t *pid) +{ + queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT; + bool max_rxq_valid = false; + portid_t pi; + struct rte_eth_dev_info dev_info; + + RTE_ETH_FOREACH_DEV(pi) { + if (eth_dev_info_get_print_err(pi, &dev_info) != 0) + continue; + + max_rxq_valid = true; + if (dev_info.max_rx_queues < allowed_max_rxq) { + allowed_max_rxq = dev_info.max_rx_queues; + *pid = pi; + } + } + return max_rxq_valid ? allowed_max_rxq : 0; +} + +/* + * Check input rxq is valid or not. + * If input rxq is not greater than any of maximum number + * of RX queues of all ports, it is valid. + * if valid, return 0, else return -1 + */ +int +check_nb_rxq(queueid_t rxq) +{ + queueid_t allowed_max_rxq; + portid_t pid = 0; + + allowed_max_rxq = get_allowed_max_nb_rxq(&pid); + if (rxq > allowed_max_rxq) { + printf("Fail: input rxq (%u) can't be greater " + "than max_rx_queues (%u) of port %u\n", + rxq, + allowed_max_rxq, + pid); + return -1; + } + return 0; +} + +/* + * Get the allowed maximum number of TX queues. + * *pid return the port id which has minimal value of + * max_tx_queues in all ports. + */ +queueid_t +get_allowed_max_nb_txq(portid_t *pid) +{ + queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT; + bool max_txq_valid = false; + portid_t pi; + struct rte_eth_dev_info dev_info; + + RTE_ETH_FOREACH_DEV(pi) { + if (eth_dev_info_get_print_err(pi, &dev_info) != 0) + continue; + + max_txq_valid = true; + if (dev_info.max_tx_queues < allowed_max_txq) { + allowed_max_txq = dev_info.max_tx_queues; + *pid = pi; + } + } + return max_txq_valid ? allowed_max_txq : 0; +} + +/* + * Check input txq is valid or not. + * If input txq is not greater than any of maximum number + * of TX queues of all ports, it is valid. + * if valid, return 0, else return -1 + */ +int +check_nb_txq(queueid_t txq) +{ + queueid_t allowed_max_txq; + portid_t pid = 0; + + allowed_max_txq = get_allowed_max_nb_txq(&pid); + if (txq > allowed_max_txq) { + printf("Fail: input txq (%u) can't be greater " + "than max_tx_queues (%u) of port %u\n", + txq, + allowed_max_txq, + pid); + return -1; + } + return 0; +} + +/* + * Get the allowed maximum number of RXDs of every rx queue. + * *pid return the port id which has minimal value of + * max_rxd in all queues of all ports. + */ +static uint16_t +get_allowed_max_nb_rxd(portid_t *pid) +{ + uint16_t allowed_max_rxd = UINT16_MAX; + portid_t pi; + struct rte_eth_dev_info dev_info; + + RTE_ETH_FOREACH_DEV(pi) { + if (eth_dev_info_get_print_err(pi, &dev_info) != 0) + continue; + + if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) { + allowed_max_rxd = dev_info.rx_desc_lim.nb_max; + *pid = pi; + } + } + return allowed_max_rxd; +} + +/* + * Get the allowed minimal number of RXDs of every rx queue. + * *pid return the port id which has minimal value of + * min_rxd in all queues of all ports. + */ +static uint16_t +get_allowed_min_nb_rxd(portid_t *pid) +{ + uint16_t allowed_min_rxd = 0; + portid_t pi; + struct rte_eth_dev_info dev_info; + + RTE_ETH_FOREACH_DEV(pi) { + if (eth_dev_info_get_print_err(pi, &dev_info) != 0) + continue; + + if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) { + allowed_min_rxd = dev_info.rx_desc_lim.nb_min; + *pid = pi; + } + } + + return allowed_min_rxd; +} + +/* + * Check input rxd is valid or not. + * If input rxd is not greater than any of maximum number + * of RXDs of every Rx queues and is not less than any of + * minimal number of RXDs of every Rx queues, it is valid. + * if valid, return 0, else return -1 + */ +int +check_nb_rxd(queueid_t rxd) +{ + uint16_t allowed_max_rxd; + uint16_t allowed_min_rxd; + portid_t pid = 0; + + allowed_max_rxd = get_allowed_max_nb_rxd(&pid); + if (rxd > allowed_max_rxd) { + printf("Fail: input rxd (%u) can't be greater " + "than max_rxds (%u) of port %u\n", + rxd, + allowed_max_rxd, + pid); + return -1; + } + + allowed_min_rxd = get_allowed_min_nb_rxd(&pid); + if (rxd < allowed_min_rxd) { + printf("Fail: input rxd (%u) can't be less " + "than min_rxds (%u) of port %u\n", + rxd, + allowed_min_rxd, + pid); + return -1; + } + + return 0; +} + +/* + * Get the allowed maximum number of TXDs of every rx queues. + * *pid return the port id which has minimal value of + * max_txd in every tx queue. + */ +static uint16_t +get_allowed_max_nb_txd(portid_t *pid) +{ + uint16_t allowed_max_txd = UINT16_MAX; + portid_t pi; + struct rte_eth_dev_info dev_info; + + RTE_ETH_FOREACH_DEV(pi) { + if (eth_dev_info_get_print_err(pi, &dev_info) != 0) + continue; + + if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) { + allowed_max_txd = dev_info.tx_desc_lim.nb_max; + *pid = pi; + } + } + return allowed_max_txd; +} + +/* + * Get the allowed maximum number of TXDs of every tx queues. + * *pid return the port id which has minimal value of + * min_txd in every tx queue. + */ +static uint16_t +get_allowed_min_nb_txd(portid_t *pid) +{ + uint16_t allowed_min_txd = 0; + portid_t pi; + struct rte_eth_dev_info dev_info; + + RTE_ETH_FOREACH_DEV(pi) { + if (eth_dev_info_get_print_err(pi, &dev_info) != 0) + continue; + + if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) { + allowed_min_txd = dev_info.tx_desc_lim.nb_min; + *pid = pi; + } + } + + return allowed_min_txd; +} + +/* + * Check input txd is valid or not. + * If input txd is not greater than any of maximum number + * of TXDs of every Rx queues, it is valid. + * if valid, return 0, else return -1 + */ +int +check_nb_txd(queueid_t txd) +{ + uint16_t allowed_max_txd; + uint16_t allowed_min_txd; + portid_t pid = 0; + + allowed_max_txd = get_allowed_max_nb_txd(&pid); + if (txd > allowed_max_txd) { + printf("Fail: input txd (%u) can't be greater " + "than max_txds (%u) of port %u\n", + txd, + allowed_max_txd, + pid); + return -1; + } + + allowed_min_txd = get_allowed_min_nb_txd(&pid); + if (txd < allowed_min_txd) { + printf("Fail: input txd (%u) can't be less " + "than min_txds (%u) of port %u\n", + txd, + allowed_min_txd, + pid); + return -1; + } + return 0; +} + + +/* + * Get the allowed maximum number of hairpin queues. + * *pid return the port id which has minimal value of + * max_hairpin_queues in all ports. + */ +queueid_t +get_allowed_max_nb_hairpinq(portid_t *pid) +{ + queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT; + portid_t pi; + struct rte_eth_hairpin_cap cap; + + RTE_ETH_FOREACH_DEV(pi) { + if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) { + *pid = pi; + return 0; + } + if (cap.max_nb_queues < allowed_max_hairpinq) { + allowed_max_hairpinq = cap.max_nb_queues; + *pid = pi; + } + } + return allowed_max_hairpinq; +} + +/* + * Check input hairpin is valid or not. + * If input hairpin is not greater than any of maximum number + * of hairpin queues of all ports, it is valid. + * if valid, return 0, else return -1 + */ +int +check_nb_hairpinq(queueid_t hairpinq) +{ + queueid_t allowed_max_hairpinq; + portid_t pid = 0; + + allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid); + if (hairpinq > allowed_max_hairpinq) { + printf("Fail: input hairpin (%u) can't be greater " + "than max_hairpin_queues (%u) of port %u\n", + hairpinq, allowed_max_hairpinq, pid); + return -1; + } + return 0; +} + +static void +init_config(void) +{ + portid_t pid; + struct rte_port *port; + struct rte_mempool *mbp; + unsigned int nb_mbuf_per_pool; + lcoreid_t lc_id; + uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; + struct rte_gro_param gro_param; + uint32_t gso_types; + uint16_t data_size; + bool warning = 0; + int k; + int ret; + + memset(port_per_socket,0,RTE_MAX_NUMA_NODES); + + /* Configuration of logical cores. */ + fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", + sizeof(struct fwd_lcore *) * nb_lcores, + RTE_CACHE_LINE_SIZE); + if (fwd_lcores == NULL) { + rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) " + "failed\n", nb_lcores); + } + for (lc_id = 0; lc_id < nb_lcores; lc_id++) { + fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", + sizeof(struct fwd_lcore), + RTE_CACHE_LINE_SIZE); + if (fwd_lcores[lc_id] == NULL) { + rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) " + "failed\n"); + } + fwd_lcores[lc_id]->cpuid_idx = lc_id; + } + + RTE_ETH_FOREACH_DEV(pid) { + port = &ports[pid]; + /* Apply default TxRx configuration for all ports */ + port->dev_conf.txmode = tx_mode; + port->dev_conf.rxmode = rx_mode; + + ret = eth_dev_info_get_print_err(pid, &port->dev_info); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "rte_eth_dev_info_get() failed\n"); + + if (!(port->dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + port->dev_conf.txmode.offloads &= + ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; + if (numa_support) { + if (port_numa[pid] != NUMA_NO_CONFIG) + port_per_socket[port_numa[pid]]++; + else { + uint32_t socket_id = rte_eth_dev_socket_id(pid); + + /* + * if socket_id is invalid, + * set to the first available socket. + */ + if (check_socket_id(socket_id) < 0) + socket_id = socket_ids[0]; + port_per_socket[socket_id]++; + } + } + + /* Apply Rx offloads configuration */ + for (k = 0; k < port->dev_info.max_rx_queues; k++) + port->rx_conf[k].offloads = + port->dev_conf.rxmode.offloads; + /* Apply Tx offloads configuration */ + for (k = 0; k < port->dev_info.max_tx_queues; k++) + port->tx_conf[k].offloads = + port->dev_conf.txmode.offloads; + + /* set flag to initialize port/queue */ + port->need_reconfig = 1; + port->need_reconfig_queues = 1; + port->tx_metadata = 0; + + /* Check for maximum number of segments per MTU. Accordingly + * update the mbuf data size. + */ + if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && + port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { + data_size = rx_mode.max_rx_pkt_len / + port->dev_info.rx_desc_lim.nb_mtu_seg_max; + + if ((data_size + RTE_PKTMBUF_HEADROOM) > + mbuf_data_size) { + mbuf_data_size = data_size + + RTE_PKTMBUF_HEADROOM; + warning = 1; + } + } + } + + if (warning) + TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n", + mbuf_data_size); + + /* + * Create pools of mbuf. + * If NUMA support is disabled, create a single pool of mbuf in + * socket 0 memory by default. + * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. + * + * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and + * nb_txd can be configured at run time. + */ + if (param_total_num_mbufs) + nb_mbuf_per_pool = param_total_num_mbufs; + else { + nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + + (nb_lcores * mb_mempool_cache) + + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; + nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; + } + + if (numa_support) { + uint8_t i; + + for (i = 0; i < num_sockets; i++) + mempools[i] = mbuf_pool_create(mbuf_data_size, + nb_mbuf_per_pool, + socket_ids[i]); + } else { + if (socket_num == UMA_NO_CONFIG) + mempools[0] = mbuf_pool_create(mbuf_data_size, + nb_mbuf_per_pool, 0); + else + mempools[socket_num] = mbuf_pool_create + (mbuf_data_size, + nb_mbuf_per_pool, + socket_num); + } + + init_port_config(); + + gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; + /* + * Records which Mbuf pool to use by each logical core, if needed. + */ + for (lc_id = 0; lc_id < nb_lcores; lc_id++) { + mbp = mbuf_pool_find( + rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); + + if (mbp == NULL) + mbp = mbuf_pool_find(0); + fwd_lcores[lc_id]->mbp = mbp; + /* initialize GSO context */ + fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; + fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; + fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types; + fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - + RTE_ETHER_CRC_LEN; + fwd_lcores[lc_id]->gso_ctx.flag = 0; + } + + /* Configuration of packet forwarding streams. */ + if (init_fwd_streams() < 0) + rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n"); + + fwd_config_setup(); + + /* create a gro context for each lcore */ + gro_param.gro_types = RTE_GRO_TCP_IPV4; + gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; + gro_param.max_item_per_flow = MAX_PKT_BURST; + for (lc_id = 0; lc_id < nb_lcores; lc_id++) { + gro_param.socket_id = rte_lcore_to_socket_id( + fwd_lcores_cpuids[lc_id]); + fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param); + if (fwd_lcores[lc_id]->gro_ctx == NULL) { + rte_exit(EXIT_FAILURE, + "rte_gro_ctx_create() failed\n"); + } + } + +#if defined RTE_LIBRTE_PMD_SOFTNIC + if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { + RTE_ETH_FOREACH_DEV(pid) { + port = &ports[pid]; + const char *driver = port->dev_info.driver_name; + + if (strcmp(driver, "net_softnic") == 0) + port->softport.fwd_lcore_arg = fwd_lcores; + } + } +#endif + +} + + +void +reconfig(portid_t new_port_id, unsigned socket_id) +{ + struct rte_port *port; + int ret; + + /* Reconfiguration of Ethernet ports. */ + port = &ports[new_port_id]; + + ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info); + if (ret != 0) + return; + + /* set flag to initialize port/queue */ + port->need_reconfig = 1; + port->need_reconfig_queues = 1; + port->socket_id = socket_id; + + init_port_config(); +} + + +int +init_fwd_streams(void) +{ + portid_t pid; + struct rte_port *port; + streamid_t sm_id, nb_fwd_streams_new; + queueid_t q; + + /* set socket id according to numa or not */ + RTE_ETH_FOREACH_DEV(pid) { + port = &ports[pid]; + if (nb_rxq > port->dev_info.max_rx_queues) { + printf("Fail: nb_rxq(%d) is greater than " + "max_rx_queues(%d)\n", nb_rxq, + port->dev_info.max_rx_queues); + return -1; + } + if (nb_txq > port->dev_info.max_tx_queues) { + printf("Fail: nb_txq(%d) is greater than " + "max_tx_queues(%d)\n", nb_txq, + port->dev_info.max_tx_queues); + return -1; + } + if (numa_support) { + if (port_numa[pid] != NUMA_NO_CONFIG) + port->socket_id = port_numa[pid]; + else { + port->socket_id = rte_eth_dev_socket_id(pid); + + /* + * if socket_id is invalid, + * set to the first available socket. + */ + if (check_socket_id(port->socket_id) < 0) + port->socket_id = socket_ids[0]; + } + } + else { + if (socket_num == UMA_NO_CONFIG) + port->socket_id = 0; + else + port->socket_id = socket_num; + } + } + + q = RTE_MAX(nb_rxq, nb_txq); + if (q == 0) { + printf("Fail: Cannot allocate fwd streams as number of queues is 0\n"); + return -1; + } + nb_fwd_streams_new = (streamid_t)(nb_ports * q); + if (nb_fwd_streams_new == nb_fwd_streams) + return 0; + /* clear the old */ + if (fwd_streams != NULL) { + for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { + if (fwd_streams[sm_id] == NULL) + continue; + rte_free(fwd_streams[sm_id]); + fwd_streams[sm_id] = NULL; + } + rte_free(fwd_streams); + fwd_streams = NULL; + } + + /* init new */ + nb_fwd_streams = nb_fwd_streams_new; + if (nb_fwd_streams) { + fwd_streams = rte_zmalloc("testpmd: fwd_streams", + sizeof(struct fwd_stream *) * nb_fwd_streams, + RTE_CACHE_LINE_SIZE); + if (fwd_streams == NULL) + rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" + " (struct fwd_stream *)) failed\n", + nb_fwd_streams); + + for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { + fwd_streams[sm_id] = rte_zmalloc("testpmd:" + " struct fwd_stream", sizeof(struct fwd_stream), + RTE_CACHE_LINE_SIZE); + if (fwd_streams[sm_id] == NULL) + rte_exit(EXIT_FAILURE, "rte_zmalloc" + "(struct fwd_stream) failed\n"); + } + } + + return 0; +} + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS +static void +pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) +{ + unsigned int total_burst; + unsigned int nb_burst; + unsigned int burst_stats[3]; + uint16_t pktnb_stats[3]; + uint16_t nb_pkt; + int burst_percent[3]; + + /* + * First compute the total number of packet bursts and the + * two highest numbers of bursts of the same number of packets. + */ + total_burst = 0; + burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; + pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; + for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { + nb_burst = pbs->pkt_burst_spread[nb_pkt]; + if (nb_burst == 0) + continue; + total_burst += nb_burst; + if (nb_burst > burst_stats[0]) { + burst_stats[1] = burst_stats[0]; + pktnb_stats[1] = pktnb_stats[0]; + burst_stats[0] = nb_burst; + pktnb_stats[0] = nb_pkt; + } else if (nb_burst > burst_stats[1]) { + burst_stats[1] = nb_burst; + pktnb_stats[1] = nb_pkt; + } + } + if (total_burst == 0) + return; + burst_percent[0] = (burst_stats[0] * 100) / total_burst; + printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, + burst_percent[0], (int) pktnb_stats[0]); + if (burst_stats[0] == total_burst) { + printf("]\n"); + return; + } + if (burst_stats[0] + burst_stats[1] == total_burst) { + printf(" + %d%% of %d pkts]\n", + 100 - burst_percent[0], pktnb_stats[1]); + return; + } + burst_percent[1] = (burst_stats[1] * 100) / total_burst; + burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); + if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { + printf(" + %d%% of others]\n", 100 - burst_percent[0]); + return; + } + printf(" + %d%% of %d pkts + %d%% of others]\n", + burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); +} +#endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ + +static void +fwd_stream_stats_display(streamid_t stream_id) +{ + struct fwd_stream *fs; + static const char *fwd_top_stats_border = "-------"; + + fs = fwd_streams[stream_id]; + if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && + (fs->fwd_dropped == 0)) + return; + printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " + "TX Port=%2d/Queue=%2d %s\n", + fwd_top_stats_border, fs->rx_port, fs->rx_queue, + fs->tx_port, fs->tx_queue, fwd_top_stats_border); + printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 + " TX-dropped: %-14"PRIu64, + fs->rx_packets, fs->tx_packets, fs->fwd_dropped); + + /* if checksum mode */ + if (cur_fwd_eng == &csum_fwd_engine) { + printf(" RX- bad IP checksum: %-14"PRIu64 + " Rx- bad L4 checksum: %-14"PRIu64 + " Rx- bad outer L4 checksum: %-14"PRIu64"\n", + fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, + fs->rx_bad_outer_l4_csum); + } else { + printf("\n"); + } + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + pkt_burst_stats_display("RX", &fs->rx_burst_stats); + pkt_burst_stats_display("TX", &fs->tx_burst_stats); +#endif +} + +void +fwd_stats_display(void) +{ + static const char *fwd_stats_border = "----------------------"; + static const char *acc_stats_border = "+++++++++++++++"; + struct { + struct fwd_stream *rx_stream; + struct fwd_stream *tx_stream; + uint64_t tx_dropped; + uint64_t rx_bad_ip_csum; + uint64_t rx_bad_l4_csum; + uint64_t rx_bad_outer_l4_csum; + } ports_stats[RTE_MAX_ETHPORTS]; + uint64_t total_rx_dropped = 0; + uint64_t total_tx_dropped = 0; + uint64_t total_rx_nombuf = 0; + struct rte_eth_stats stats; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t fwd_cycles = 0; +#endif + uint64_t total_recv = 0; + uint64_t total_xmit = 0; + struct rte_port *port; + streamid_t sm_id; + portid_t pt_id; + int i; + + memset(ports_stats, 0, sizeof(ports_stats)); + + for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { + struct fwd_stream *fs = fwd_streams[sm_id]; + + if (cur_fwd_config.nb_fwd_streams > + cur_fwd_config.nb_fwd_ports) { + fwd_stream_stats_display(sm_id); + } else { + ports_stats[fs->tx_port].tx_stream = fs; + ports_stats[fs->rx_port].rx_stream = fs; + } + + ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; + + ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; + ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; + ports_stats[fs->rx_port].rx_bad_outer_l4_csum += + fs->rx_bad_outer_l4_csum; + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + fwd_cycles += fs->core_cycles; +#endif + } + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { + uint8_t j; + + pt_id = fwd_ports_ids[i]; + port = &ports[pt_id]; + + rte_eth_stats_get(pt_id, &stats); + stats.ipackets -= port->stats.ipackets; + stats.opackets -= port->stats.opackets; + stats.ibytes -= port->stats.ibytes; + stats.obytes -= port->stats.obytes; + stats.imissed -= port->stats.imissed; + stats.oerrors -= port->stats.oerrors; + stats.rx_nombuf -= port->stats.rx_nombuf; + + total_recv += stats.ipackets; + total_xmit += stats.opackets; + total_rx_dropped += stats.imissed; + total_tx_dropped += ports_stats[pt_id].tx_dropped; + total_tx_dropped += stats.oerrors; + total_rx_nombuf += stats.rx_nombuf; + + printf("\n %s Forward statistics for port %-2d %s\n", + fwd_stats_border, pt_id, fwd_stats_border); + + if (!port->rx_queue_stats_mapping_enabled && + !port->tx_queue_stats_mapping_enabled) { + printf(" RX-packets: %-14"PRIu64 + " RX-dropped: %-14"PRIu64 + "RX-total: %-"PRIu64"\n", + stats.ipackets, stats.imissed, + stats.ipackets + stats.imissed); + + if (cur_fwd_eng == &csum_fwd_engine) + printf(" Bad-ipcsum: %-14"PRIu64 + " Bad-l4csum: %-14"PRIu64 + "Bad-outer-l4csum: %-14"PRIu64"\n", + ports_stats[pt_id].rx_bad_ip_csum, + ports_stats[pt_id].rx_bad_l4_csum, + ports_stats[pt_id].rx_bad_outer_l4_csum); + if (stats.ierrors + stats.rx_nombuf > 0) { + printf(" RX-error: %-"PRIu64"\n", + stats.ierrors); + printf(" RX-nombufs: %-14"PRIu64"\n", + stats.rx_nombuf); + } + + printf(" TX-packets: %-14"PRIu64 + " TX-dropped: %-14"PRIu64 + "TX-total: %-"PRIu64"\n", + stats.opackets, ports_stats[pt_id].tx_dropped, + stats.opackets + ports_stats[pt_id].tx_dropped); + } else { + printf(" RX-packets: %14"PRIu64 + " RX-dropped:%14"PRIu64 + " RX-total:%14"PRIu64"\n", + stats.ipackets, stats.imissed, + stats.ipackets + stats.imissed); + + if (cur_fwd_eng == &csum_fwd_engine) + printf(" Bad-ipcsum:%14"PRIu64 + " Bad-l4csum:%14"PRIu64 + " Bad-outer-l4csum: %-14"PRIu64"\n", + ports_stats[pt_id].rx_bad_ip_csum, + ports_stats[pt_id].rx_bad_l4_csum, + ports_stats[pt_id].rx_bad_outer_l4_csum); + if ((stats.ierrors + stats.rx_nombuf) > 0) { + printf(" RX-error:%"PRIu64"\n", stats.ierrors); + printf(" RX-nombufs: %14"PRIu64"\n", + stats.rx_nombuf); + } + + printf(" TX-packets: %14"PRIu64 + " TX-dropped:%14"PRIu64 + " TX-total:%14"PRIu64"\n", + stats.opackets, ports_stats[pt_id].tx_dropped, + stats.opackets + ports_stats[pt_id].tx_dropped); + } + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + if (ports_stats[pt_id].rx_stream) + pkt_burst_stats_display("RX", + &ports_stats[pt_id].rx_stream->rx_burst_stats); + if (ports_stats[pt_id].tx_stream) + pkt_burst_stats_display("TX", + &ports_stats[pt_id].tx_stream->tx_burst_stats); +#endif + + if (port->rx_queue_stats_mapping_enabled) { + printf("\n"); + for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { + printf(" Stats reg %2d RX-packets:%14"PRIu64 + " RX-errors:%14"PRIu64 + " RX-bytes:%14"PRIu64"\n", + j, stats.q_ipackets[j], + stats.q_errors[j], stats.q_ibytes[j]); + } + printf("\n"); + } + if (port->tx_queue_stats_mapping_enabled) { + for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { + printf(" Stats reg %2d TX-packets:%14"PRIu64 + " TX-bytes:%14" + PRIu64"\n", + j, stats.q_opackets[j], + stats.q_obytes[j]); + } + } + + printf(" %s--------------------------------%s\n", + fwd_stats_border, fwd_stats_border); + } + + printf("\n %s Accumulated forward statistics for all ports" + "%s\n", + acc_stats_border, acc_stats_border); + printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " + "%-"PRIu64"\n" + " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " + "%-"PRIu64"\n", + total_recv, total_rx_dropped, total_recv + total_rx_dropped, + total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); + if (total_rx_nombuf > 0) + printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); + printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" + "%s\n", + acc_stats_border, acc_stats_border); +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES +#define CYC_PER_MHZ 1E6 + if (total_recv > 0) + printf("\n CPU cycles/packet=%.2F (total cycles=" + "%"PRIu64" / total RX packets=%"PRIu64") at %"PRIu64 + " MHz Clock\n", + (double) fwd_cycles / total_recv, + fwd_cycles, total_recv, + (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ)); +#endif +} + +void +fwd_stats_reset(void) +{ + streamid_t sm_id; + portid_t pt_id; + int i; + + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { + pt_id = fwd_ports_ids[i]; + rte_eth_stats_get(pt_id, &ports[pt_id].stats); + } + for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { + struct fwd_stream *fs = fwd_streams[sm_id]; + + fs->rx_packets = 0; + fs->tx_packets = 0; + fs->fwd_dropped = 0; + fs->rx_bad_ip_csum = 0; + fs->rx_bad_l4_csum = 0; + fs->rx_bad_outer_l4_csum = 0; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); + memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); +#endif +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + fs->core_cycles = 0; +#endif + } +} + +static void +flush_fwd_rx_queues(void) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + portid_t rxp; + portid_t port_id; + queueid_t rxq; + uint16_t nb_rx; + uint16_t i; + uint8_t j; + uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; + uint64_t timer_period; + + /* convert to number of cycles */ + timer_period = rte_get_timer_hz(); /* 1 second timeout */ + + for (j = 0; j < 2; j++) { + for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { + for (rxq = 0; rxq < nb_rxq; rxq++) { + port_id = fwd_ports_ids[rxp]; + /** + * testpmd can stuck in the below do while loop + * if rte_eth_rx_burst() always returns nonzero + * packets. So timer is added to exit this loop + * after 1sec timer expiry. + */ + prev_tsc = rte_rdtsc(); + do { + nb_rx = rte_eth_rx_burst(port_id, rxq, + pkts_burst, MAX_PKT_BURST); + for (i = 0; i < nb_rx; i++) + rte_pktmbuf_free(pkts_burst[i]); + + cur_tsc = rte_rdtsc(); + diff_tsc = cur_tsc - prev_tsc; + timer_tsc += diff_tsc; + } while ((nb_rx > 0) && + (timer_tsc < timer_period)); + timer_tsc = 0; + } + } + rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ + } +} + +static void +run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) +{ + struct fwd_stream **fsm; + streamid_t nb_fs; + streamid_t sm_id; +#ifdef RTE_LIBRTE_BITRATE + uint64_t tics_per_1sec; + uint64_t tics_datum; + uint64_t tics_current; + uint16_t i, cnt_ports; + + cnt_ports = nb_ports; + tics_datum = rte_rdtsc(); + tics_per_1sec = rte_get_timer_hz(); +#endif + fsm = &fwd_streams[fc->stream_idx]; + nb_fs = fc->stream_nb; + do { + for (sm_id = 0; sm_id < nb_fs; sm_id++) + (*pkt_fwd)(fsm[sm_id]); +#ifdef RTE_LIBRTE_BITRATE + if (bitrate_enabled != 0 && + bitrate_lcore_id == rte_lcore_id()) { + tics_current = rte_rdtsc(); + if (tics_current - tics_datum >= tics_per_1sec) { + /* Periodic bitrate calculation */ + for (i = 0; i < cnt_ports; i++) + rte_stats_bitrate_calc(bitrate_data, + ports_ids[i]); + tics_datum = tics_current; + } + } +#endif +#ifdef RTE_LIBRTE_LATENCY_STATS + if (latencystats_enabled != 0 && + latencystats_lcore_id == rte_lcore_id()) + rte_latencystats_update(); +#endif + + } while (! fc->stopped); +} + +static int +start_pkt_forward_on_core(void *fwd_arg) +{ + run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, + cur_fwd_config.fwd_eng->packet_fwd); + return 0; +} + +/* + * Run the TXONLY packet forwarding engine to send a single burst of packets. + * Used to start communication flows in network loopback test configurations. + */ +static int +run_one_txonly_burst_on_core(void *fwd_arg) +{ + struct fwd_lcore *fwd_lc; + struct fwd_lcore tmp_lcore; + + fwd_lc = (struct fwd_lcore *) fwd_arg; + tmp_lcore = *fwd_lc; + tmp_lcore.stopped = 1; + run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); + return 0; +} + +/* + * Launch packet forwarding: + * - Setup per-port forwarding context. + * - launch logical cores with their forwarding configuration. + */ +static void +launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) +{ + port_fwd_begin_t port_fwd_begin; + unsigned int i; + unsigned int lc_id; + int diag; + + port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; + if (port_fwd_begin != NULL) { + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) + (*port_fwd_begin)(fwd_ports_ids[i]); + } + for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { + lc_id = fwd_lcores_cpuids[i]; + if ((interactive == 0) || (lc_id != rte_lcore_id())) { + fwd_lcores[i]->stopped = 0; + diag = rte_eal_remote_launch(pkt_fwd_on_lcore, + fwd_lcores[i], lc_id); + if (diag != 0) + printf("launch lcore %u failed - diag=%d\n", + lc_id, diag); + } + } +} + +/* + * Launch packet forwarding configuration. + */ +void +start_packet_forwarding(int with_tx_first) +{ + port_fwd_begin_t port_fwd_begin; + port_fwd_end_t port_fwd_end; + struct rte_port *port; + unsigned int i; + portid_t pt_id; + + if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) + rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); + + if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq) + rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n"); + + if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 && + strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) && + (!nb_rxq || !nb_txq)) + rte_exit(EXIT_FAILURE, + "Either rxq or txq are 0, cannot use %s fwd mode\n", + cur_fwd_eng->fwd_mode_name); + + if (all_ports_started() == 0) { + printf("Not all ports were started\n"); + return; + } + if (test_done == 0) { + printf("Packet forwarding already started\n"); + return; + } + + + if(dcb_test) { + for (i = 0; i < nb_fwd_ports; i++) { + pt_id = fwd_ports_ids[i]; + port = &ports[pt_id]; + if (!port->dcb_flag) { + printf("In DCB mode, all forwarding ports must " + "be configured in this mode.\n"); + return; + } + } + if (nb_fwd_lcores == 1) { + printf("In DCB mode,the nb forwarding cores " + "should be larger than 1.\n"); + return; + } + } + test_done = 0; + + fwd_config_setup(); + + if(!no_flush_rx) + flush_fwd_rx_queues(); + + pkt_fwd_config_display(&cur_fwd_config); + rxtx_config_display(); + + fwd_stats_reset(); + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { + pt_id = fwd_ports_ids[i]; + port = &ports[pt_id]; + map_port_queue_stats_mapping_registers(pt_id, port); + } + if (with_tx_first) { + port_fwd_begin = tx_only_engine.port_fwd_begin; + if (port_fwd_begin != NULL) { + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) + (*port_fwd_begin)(fwd_ports_ids[i]); + } + while (with_tx_first--) { + launch_packet_forwarding( + run_one_txonly_burst_on_core); + rte_eal_mp_wait_lcore(); + } + port_fwd_end = tx_only_engine.port_fwd_end; + if (port_fwd_end != NULL) { + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) + (*port_fwd_end)(fwd_ports_ids[i]); + } + } + launch_packet_forwarding(start_pkt_forward_on_core); +} + +void +stop_packet_forwarding(void) +{ + port_fwd_end_t port_fwd_end; + lcoreid_t lc_id; + portid_t pt_id; + int i; + + if (test_done) { + printf("Packet forwarding not started\n"); + return; + } + printf("Telling cores to stop..."); + for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) + fwd_lcores[lc_id]->stopped = 1; + printf("\nWaiting for lcores to finish...\n"); + rte_eal_mp_wait_lcore(); + port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; + if (port_fwd_end != NULL) { + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { + pt_id = fwd_ports_ids[i]; + (*port_fwd_end)(pt_id); + } + } + + fwd_stats_display(); + + printf("\nDone.\n"); + test_done = 1; +} + +void +dev_set_link_up(portid_t pid) +{ + if (rte_eth_dev_set_link_up(pid) < 0) + printf("\nSet link up fail.\n"); +} + +void +dev_set_link_down(portid_t pid) +{ + if (rte_eth_dev_set_link_down(pid) < 0) + printf("\nSet link down fail.\n"); +} + +static int +all_ports_started(void) +{ + portid_t pi; + struct rte_port *port; + + RTE_ETH_FOREACH_DEV(pi) { + port = &ports[pi]; + /* Check if there is a port which is not started */ + if ((port->port_status != RTE_PORT_STARTED) && + (port->slave_flag == 0)) + return 0; + } + + /* No port is not started */ + return 1; +} + +int +port_is_stopped(portid_t port_id) +{ + struct rte_port *port = &ports[port_id]; + + if ((port->port_status != RTE_PORT_STOPPED) && + (port->slave_flag == 0)) + return 0; + return 1; +} + +int +all_ports_stopped(void) +{ + portid_t pi; + + RTE_ETH_FOREACH_DEV(pi) { + if (!port_is_stopped(pi)) + return 0; + } + + return 1; +} + +int +port_is_started(portid_t port_id) +{ + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return 0; + + if (ports[port_id].port_status != RTE_PORT_STARTED) + return 0; + + return 1; +} + +/* Configure the Rx and Tx hairpin queues for the selected port. */ +static int +setup_hairpin_queues(portid_t pi) +{ + queueid_t qi; + struct rte_eth_hairpin_conf hairpin_conf = { + .peer_count = 1, + }; + int i; + int diag; + struct rte_port *port = &ports[pi]; + + for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { + hairpin_conf.peers[0].port = pi; + hairpin_conf.peers[0].queue = i + nb_rxq; + diag = rte_eth_tx_hairpin_queue_setup + (pi, qi, nb_txd, &hairpin_conf); + i++; + if (diag == 0) + continue; + + /* Fail to setup rx queue, return */ + if (rte_atomic16_cmpset(&(port->port_status), + RTE_PORT_HANDLING, + RTE_PORT_STOPPED) == 0) + printf("Port %d can not be set back " + "to stopped\n", pi); + printf("Fail to configure port %d hairpin " + "queues\n", pi); + /* try to reconfigure queues next time */ + port->need_reconfig_queues = 1; + return -1; + } + for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { + hairpin_conf.peers[0].port = pi; + hairpin_conf.peers[0].queue = i + nb_txq; + diag = rte_eth_rx_hairpin_queue_setup + (pi, qi, nb_rxd, &hairpin_conf); + i++; + if (diag == 0) + continue; + + /* Fail to setup rx queue, return */ + if (rte_atomic16_cmpset(&(port->port_status), + RTE_PORT_HANDLING, + RTE_PORT_STOPPED) == 0) + printf("Port %d can not be set back " + "to stopped\n", pi); + printf("Fail to configure port %d hairpin " + "queues\n", pi); + /* try to reconfigure queues next time */ + port->need_reconfig_queues = 1; + return -1; + } + return 0; +} + +int +start_port(portid_t pid) +{ + int diag, need_check_link_status = -1; + portid_t pi; + queueid_t qi; + struct rte_port *port; + struct rte_ether_addr mac_addr; + struct rte_eth_hairpin_cap cap; + + if (port_id_is_invalid(pid, ENABLED_WARN)) + return 0; + + if(dcb_config) + dcb_test = 1; + RTE_ETH_FOREACH_DEV(pi) { + if (pid != pi && pid != (portid_t)RTE_PORT_ALL) + continue; + + need_check_link_status = 0; + port = &ports[pi]; + if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, + RTE_PORT_HANDLING) == 0) { + printf("Port %d is now not stopped\n", pi); + continue; + } + + if (port->need_reconfig > 0) { + port->need_reconfig = 0; + + if (flow_isolate_all) { + int ret = port_flow_isolate(pi, 1); + if (ret) { + printf("Failed to apply isolated" + " mode on port %d\n", pi); + return -1; + } + } + configure_rxtx_dump_callbacks(0); + printf("Configuring Port %d (socket %u)\n", pi, + port->socket_id); + if (nb_hairpinq > 0 && + rte_eth_dev_hairpin_capability_get(pi, &cap)) { + printf("Port %d doesn't support hairpin " + "queues\n", pi); + return -1; + } + /* configure port */ + diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq, + nb_txq + nb_hairpinq, + &(port->dev_conf)); + if (diag != 0) { + if (rte_atomic16_cmpset(&(port->port_status), + RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) + printf("Port %d can not be set back " + "to stopped\n", pi); + printf("Fail to configure port %d\n", pi); + /* try to reconfigure port next time */ + port->need_reconfig = 1; + return -1; + } + } + if (port->need_reconfig_queues > 0) { + port->need_reconfig_queues = 0; + /* setup tx queues */ + for (qi = 0; qi < nb_txq; qi++) { + if ((numa_support) && + (txring_numa[pi] != NUMA_NO_CONFIG)) + diag = rte_eth_tx_queue_setup(pi, qi, + port->nb_tx_desc[qi], + txring_numa[pi], + &(port->tx_conf[qi])); + else + diag = rte_eth_tx_queue_setup(pi, qi, + port->nb_tx_desc[qi], + port->socket_id, + &(port->tx_conf[qi])); + + if (diag == 0) + continue; + + /* Fail to setup tx queue, return */ + if (rte_atomic16_cmpset(&(port->port_status), + RTE_PORT_HANDLING, + RTE_PORT_STOPPED) == 0) + printf("Port %d can not be set back " + "to stopped\n", pi); + printf("Fail to configure port %d tx queues\n", + pi); + /* try to reconfigure queues next time */ + port->need_reconfig_queues = 1; + return -1; + } + for (qi = 0; qi < nb_rxq; qi++) { + /* setup rx queues */ + if ((numa_support) && + (rxring_numa[pi] != NUMA_NO_CONFIG)) { + struct rte_mempool * mp = + mbuf_pool_find(rxring_numa[pi]); + if (mp == NULL) { + printf("Failed to setup RX queue:" + "No mempool allocation" + " on the socket %d\n", + rxring_numa[pi]); + return -1; + } + + diag = rte_eth_rx_queue_setup(pi, qi, + port->nb_rx_desc[qi], + rxring_numa[pi], + &(port->rx_conf[qi]), + mp); + } else { + struct rte_mempool *mp = + mbuf_pool_find(port->socket_id); + if (mp == NULL) { + printf("Failed to setup RX queue:" + "No mempool allocation" + " on the socket %d\n", + port->socket_id); + return -1; + } + diag = rte_eth_rx_queue_setup(pi, qi, + port->nb_rx_desc[qi], + port->socket_id, + &(port->rx_conf[qi]), + mp); + } + if (diag == 0) + continue; + + /* Fail to setup rx queue, return */ + if (rte_atomic16_cmpset(&(port->port_status), + RTE_PORT_HANDLING, + RTE_PORT_STOPPED) == 0) + printf("Port %d can not be set back " + "to stopped\n", pi); + printf("Fail to configure port %d rx queues\n", + pi); + /* try to reconfigure queues next time */ + port->need_reconfig_queues = 1; + return -1; + } + /* setup hairpin queues */ + if (setup_hairpin_queues(pi) != 0) + return -1; + } + configure_rxtx_dump_callbacks(verbose_level); + if (clear_ptypes) { + diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN, + NULL, 0); + if (diag < 0) + printf( + "Port %d: Failed to disable Ptype parsing\n", + pi); + } + + /* start port */ + if (rte_eth_dev_start(pi) < 0) { + printf("Fail to start port %d\n", pi); + + /* Fail to setup rx queue, return */ + if (rte_atomic16_cmpset(&(port->port_status), + RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) + printf("Port %d can not be set back to " + "stopped\n", pi); + continue; + } + + if (rte_atomic16_cmpset(&(port->port_status), + RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) + printf("Port %d can not be set into started\n", pi); + + if (eth_macaddr_get_print_err(pi, &mac_addr) == 0) + printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi, + mac_addr.addr_bytes[0], mac_addr.addr_bytes[1], + mac_addr.addr_bytes[2], mac_addr.addr_bytes[3], + mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]); + + /* at least one port started, need checking link status */ + need_check_link_status = 1; + } + + if (need_check_link_status == 1 && !no_link_check) + check_all_ports_link_status(RTE_PORT_ALL); + else if (need_check_link_status == 0) + printf("Please stop the ports first\n"); + + printf("Done\n"); + return 0; +} + +void +stop_port(portid_t pid) +{ + portid_t pi; + struct rte_port *port; + int need_check_link_status = 0; + + if (dcb_test) { + dcb_test = 0; + dcb_config = 0; + } + + if (port_id_is_invalid(pid, ENABLED_WARN)) + return; + + printf("Stopping ports...\n"); + + RTE_ETH_FOREACH_DEV(pi) { + if (pid != pi && pid != (portid_t)RTE_PORT_ALL) + continue; + + if (port_is_forwarding(pi) != 0 && test_done == 0) { + printf("Please remove port %d from forwarding configuration.\n", pi); + continue; + } + + if (port_is_bonding_slave(pi)) { + printf("Please remove port %d from bonded device.\n", pi); + continue; + } + + port = &ports[pi]; + if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, + RTE_PORT_HANDLING) == 0) + continue; + + rte_eth_dev_stop(pi); + + if (rte_atomic16_cmpset(&(port->port_status), + RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) + printf("Port %d can not be set into stopped\n", pi); + need_check_link_status = 1; + } + if (need_check_link_status && !no_link_check) + check_all_ports_link_status(RTE_PORT_ALL); + + printf("Done\n"); +} + +static void +remove_invalid_ports_in(portid_t *array, portid_t *total) +{ + portid_t i; + portid_t new_total = 0; + + for (i = 0; i < *total; i++) + if (!port_id_is_invalid(array[i], DISABLED_WARN)) { + array[new_total] = array[i]; + new_total++; + } + *total = new_total; +} + +static void +remove_invalid_ports(void) +{ + remove_invalid_ports_in(ports_ids, &nb_ports); + remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); + nb_cfg_ports = nb_fwd_ports; +} + +void +close_port(portid_t pid) +{ + portid_t pi; + struct rte_port *port; + + if (port_id_is_invalid(pid, ENABLED_WARN)) + return; + + printf("Closing ports...\n"); + + RTE_ETH_FOREACH_DEV(pi) { + if (pid != pi && pid != (portid_t)RTE_PORT_ALL) + continue; + + if (port_is_forwarding(pi) != 0 && test_done == 0) { + printf("Please remove port %d from forwarding configuration.\n", pi); + continue; + } + + if (port_is_bonding_slave(pi)) { + printf("Please remove port %d from bonded device.\n", pi); + continue; + } + + port = &ports[pi]; + if (rte_atomic16_cmpset(&(port->port_status), + RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { + printf("Port %d is already closed\n", pi); + continue; + } + + if (rte_atomic16_cmpset(&(port->port_status), + RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) { + printf("Port %d is now not stopped\n", pi); + continue; + } + + if (port->flow_list) + port_flow_flush(pi); + rte_eth_dev_close(pi); + + remove_invalid_ports(); + + if (rte_atomic16_cmpset(&(port->port_status), + RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) + printf("Port %d cannot be set to closed\n", pi); + } + + printf("Done\n"); +} + +void +reset_port(portid_t pid) +{ + int diag; + portid_t pi; + struct rte_port *port; + + if (port_id_is_invalid(pid, ENABLED_WARN)) + return; + + if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) || + (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) { + printf("Can not reset port(s), please stop port(s) first.\n"); + return; + } + + printf("Resetting ports...\n"); + + RTE_ETH_FOREACH_DEV(pi) { + if (pid != pi && pid != (portid_t)RTE_PORT_ALL) + continue; + + if (port_is_forwarding(pi) != 0 && test_done == 0) { + printf("Please remove port %d from forwarding " + "configuration.\n", pi); + continue; + } + + if (port_is_bonding_slave(pi)) { + printf("Please remove port %d from bonded device.\n", + pi); + continue; + } + + diag = rte_eth_dev_reset(pi); + if (diag == 0) { + port = &ports[pi]; + port->need_reconfig = 1; + port->need_reconfig_queues = 1; + } else { + printf("Failed to reset port %d. diag=%d\n", pi, diag); + } + } + + printf("Done\n"); +} + +void +attach_port(char *identifier) +{ + portid_t pi; + struct rte_dev_iterator iterator; + + printf("Attaching a new port...\n"); + + if (identifier == NULL) { + printf("Invalid parameters are specified\n"); + return; + } + + if (rte_dev_probe(identifier) < 0) { + TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); + return; + } + + /* first attach mode: event */ + if (setup_on_probe_event) { + /* new ports are detected on RTE_ETH_EVENT_NEW event */ + for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) + if (ports[pi].port_status == RTE_PORT_HANDLING && + ports[pi].need_setup != 0) + setup_attached_port(pi); + return; + } + + /* second attach mode: iterator */ + RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { + /* setup ports matching the devargs used for probing */ + if (port_is_forwarding(pi)) + continue; /* port was already attached before */ + setup_attached_port(pi); + } +} + +static void +setup_attached_port(portid_t pi) +{ + unsigned int socket_id; + int ret; + + socket_id = (unsigned)rte_eth_dev_socket_id(pi); + /* if socket_id is invalid, set to the first available socket. */ + if (check_socket_id(socket_id) < 0) + socket_id = socket_ids[0]; + reconfig(pi, socket_id); + ret = rte_eth_promiscuous_enable(pi); + if (ret != 0) + printf("Error during enabling promiscuous mode for port %u: %s - ignore\n", + pi, rte_strerror(-ret)); + + ports_ids[nb_ports++] = pi; + fwd_ports_ids[nb_fwd_ports++] = pi; + nb_cfg_ports = nb_fwd_ports; + ports[pi].need_setup = 0; + ports[pi].port_status = RTE_PORT_STOPPED; + + printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); + printf("Done\n"); +} + +static void +detach_device(struct rte_device *dev) +{ + portid_t sibling; + + if (dev == NULL) { + printf("Device already removed\n"); + return; + } + + printf("Removing a device...\n"); + + if (rte_dev_remove(dev) < 0) { + TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name); + return; + } + RTE_ETH_FOREACH_DEV_OF(sibling, dev) { + /* reset mapping between old ports and removed device */ + rte_eth_devices[sibling].device = NULL; + if (ports[sibling].port_status != RTE_PORT_CLOSED) { + /* sibling ports are forced to be closed */ + ports[sibling].port_status = RTE_PORT_CLOSED; + printf("Port %u is closed\n", sibling); + } + } + + remove_invalid_ports(); + + printf("Device is detached\n"); + printf("Now total ports is %d\n", nb_ports); + printf("Done\n"); + return; +} + +void +detach_port_device(portid_t port_id) +{ + if (port_id_is_invalid(port_id, ENABLED_WARN)) + return; + + if (ports[port_id].port_status != RTE_PORT_CLOSED) { + if (ports[port_id].port_status != RTE_PORT_STOPPED) { + printf("Port not stopped\n"); + return; + } + printf("Port was not closed\n"); + if (ports[port_id].flow_list) + port_flow_flush(port_id); + } + + detach_device(rte_eth_devices[port_id].device); +} + +void +detach_devargs(char *identifier) +{ + struct rte_dev_iterator iterator; + struct rte_devargs da; + portid_t port_id; + + printf("Removing a device...\n"); + + memset(&da, 0, sizeof(da)); + if (rte_devargs_parsef(&da, "%s", identifier)) { + printf("cannot parse identifier\n"); + if (da.args) + free(da.args); + return; + } + + RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) { + if (ports[port_id].port_status != RTE_PORT_CLOSED) { + if (ports[port_id].port_status != RTE_PORT_STOPPED) { + printf("Port %u not stopped\n", port_id); + rte_eth_iterator_cleanup(&iterator); + return; + } + + /* sibling ports are forced to be closed */ + if (ports[port_id].flow_list) + port_flow_flush(port_id); + ports[port_id].port_status = RTE_PORT_CLOSED; + printf("Port %u is now closed\n", port_id); + } + } + + if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) { + TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n", + da.name, da.bus->name); + return; + } + + remove_invalid_ports(); + + printf("Device %s is detached\n", identifier); + printf("Now total ports is %d\n", nb_ports); + printf("Done\n"); +} + +void +pmd_test_exit(void) +{ + portid_t pt_id; + int ret; + int i; + + if (test_done == 0) + stop_packet_forwarding(); + + for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { + if (mempools[i]) { + if (mp_alloc_type == MP_ALLOC_ANON) + rte_mempool_mem_iter(mempools[i], dma_unmap_cb, + NULL); + } + } + if (ports != NULL) { + no_link_check = 1; + RTE_ETH_FOREACH_DEV(pt_id) { + printf("\nStopping port %d...\n", pt_id); + fflush(stdout); + stop_port(pt_id); + } + RTE_ETH_FOREACH_DEV(pt_id) { + printf("\nShutting down port %d...\n", pt_id); + fflush(stdout); + close_port(pt_id); + } + } + + if (hot_plug) { + ret = rte_dev_event_monitor_stop(); + if (ret) { + RTE_LOG(ERR, EAL, + "fail to stop device event monitor."); + return; + } + + ret = rte_dev_event_callback_unregister(NULL, + dev_event_callback, NULL); + if (ret < 0) { + RTE_LOG(ERR, EAL, + "fail to unregister device event callback.\n"); + return; + } + + ret = rte_dev_hotplug_handle_disable(); + if (ret) { + RTE_LOG(ERR, EAL, + "fail to disable hotplug handling.\n"); + return; + } + } + for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { + if (mempools[i]) + rte_mempool_free(mempools[i]); + } + + printf("\nBye...\n"); +} + +typedef void (*cmd_func_t)(void); +struct pmd_test_command { + const char *cmd_name; + cmd_func_t cmd_func; +}; + +/* Check the link status of all ports in up to 9s, and print them finally */ +static void +check_all_ports_link_status(uint32_t port_mask) +{ +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ + portid_t portid; + uint8_t count, all_ports_up, print_flag = 0; + struct rte_eth_link link; + int ret; + + printf("Checking link statuses...\n"); + fflush(stdout); + for (count = 0; count <= MAX_CHECK_TIME; count++) { + all_ports_up = 1; + RTE_ETH_FOREACH_DEV(portid) { + if ((port_mask & (1 << portid)) == 0) + continue; + memset(&link, 0, sizeof(link)); + ret = rte_eth_link_get_nowait(portid, &link); + if (ret < 0) { + all_ports_up = 0; + if (print_flag == 1) + printf("Port %u link get failed: %s\n", + portid, rte_strerror(-ret)); + continue; + } + /* print link status if flag set */ + if (print_flag == 1) { + if (link.link_status) + printf( + "Port%d Link Up. speed %u Mbps- %s\n", + portid, link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex")); + else + printf("Port %d Link Down\n", portid); + continue; + } + /* clear all_ports_up flag if any link down */ + if (link.link_status == ETH_LINK_DOWN) { + all_ports_up = 0; + break; + } + } + /* after finally printing all link status, get out */ + if (print_flag == 1) + break; + + if (all_ports_up == 0) { + fflush(stdout); + rte_delay_ms(CHECK_INTERVAL); + } + + /* set the print_flag if all ports up or timeout */ + if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { + print_flag = 1; + } + + if (lsc_interrupt) + break; + } +} + +/* + * This callback is for remove a port for a device. It has limitation because + * it is not for multiple port removal for a device. + * TODO: the device detach invoke will plan to be removed from user side to + * eal. And convert all PMDs to free port resources on ether device closing. + */ +static void +rmv_port_callback(void *arg) +{ + int need_to_start = 0; + int org_no_link_check = no_link_check; + portid_t port_id = (intptr_t)arg; + struct rte_device *dev; + + RTE_ETH_VALID_PORTID_OR_RET(port_id); + + if (!test_done && port_is_forwarding(port_id)) { + need_to_start = 1; + stop_packet_forwarding(); + } + no_link_check = 1; + stop_port(port_id); + no_link_check = org_no_link_check; + + /* Save rte_device pointer before closing ethdev port */ + dev = rte_eth_devices[port_id].device; + close_port(port_id); + detach_device(dev); /* might be already removed or have more ports */ + + if (need_to_start) + start_packet_forwarding(0); +} + +/* This function is used by the interrupt thread */ +static int +eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, + void *ret_param) +{ + RTE_SET_USED(param); + RTE_SET_USED(ret_param); + + if (type >= RTE_ETH_EVENT_MAX) { + fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n", + port_id, __func__, type); + fflush(stderr); + } else if (event_print_mask & (UINT32_C(1) << type)) { + printf("\nPort %" PRIu16 ": %s event\n", port_id, + eth_event_desc[type]); + fflush(stdout); + } + + switch (type) { + case RTE_ETH_EVENT_NEW: + ports[port_id].need_setup = 1; + ports[port_id].port_status = RTE_PORT_HANDLING; + break; + case RTE_ETH_EVENT_INTR_RMV: + if (port_id_is_invalid(port_id, DISABLED_WARN)) + break; + if (rte_eal_alarm_set(100000, + rmv_port_callback, (void *)(intptr_t)port_id)) + fprintf(stderr, "Could not set up deferred device removal\n"); + break; + default: + break; + } + return 0; +} + +static int +register_eth_event_callback(void) +{ + int ret; + enum rte_eth_event_type event; + + for (event = RTE_ETH_EVENT_UNKNOWN; + event < RTE_ETH_EVENT_MAX; event++) { + ret = rte_eth_dev_callback_register(RTE_ETH_ALL, + event, + eth_event_callback, + NULL); + if (ret != 0) { + TESTPMD_LOG(ERR, "Failed to register callback for " + "%s event\n", eth_event_desc[event]); + return -1; + } + } + + return 0; +} + +/* This function is used by the interrupt thread */ +static void +dev_event_callback(const char *device_name, enum rte_dev_event_type type, + __rte_unused void *arg) +{ + uint16_t port_id; + int ret; + + if (type >= RTE_DEV_EVENT_MAX) { + fprintf(stderr, "%s called upon invalid event %d\n", + __func__, type); + fflush(stderr); + } + + switch (type) { + case RTE_DEV_EVENT_REMOVE: + RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", + device_name); + ret = rte_eth_dev_get_port_by_name(device_name, &port_id); + if (ret) { + RTE_LOG(ERR, EAL, "can not get port by device %s!\n", + device_name); + return; + } + /* + * Because the user's callback is invoked in eal interrupt + * callback, the interrupt callback need to be finished before + * it can be unregistered when detaching device. So finish + * callback soon and use a deferred removal to detach device + * is need. It is a workaround, once the device detaching be + * moved into the eal in the future, the deferred removal could + * be deleted. + */ + if (rte_eal_alarm_set(100000, + rmv_port_callback, (void *)(intptr_t)port_id)) + RTE_LOG(ERR, EAL, + "Could not set up deferred device removal\n"); + break; + case RTE_DEV_EVENT_ADD: + RTE_LOG(ERR, EAL, "The device: %s has been added!\n", + device_name); + /* TODO: After finish kernel driver binding, + * begin to attach port. + */ + break; + default: + break; + } +} + +static int +set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) +{ + uint16_t i; + int diag; + uint8_t mapping_found = 0; + + for (i = 0; i < nb_tx_queue_stats_mappings; i++) { + if ((tx_queue_stats_mappings[i].port_id == port_id) && + (tx_queue_stats_mappings[i].queue_id < nb_txq )) { + diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, + tx_queue_stats_mappings[i].queue_id, + tx_queue_stats_mappings[i].stats_counter_id); + if (diag != 0) + return diag; + mapping_found = 1; + } + } + if (mapping_found) + port->tx_queue_stats_mapping_enabled = 1; + return 0; +} + +static int +set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) +{ + uint16_t i; + int diag; + uint8_t mapping_found = 0; + + for (i = 0; i < nb_rx_queue_stats_mappings; i++) { + if ((rx_queue_stats_mappings[i].port_id == port_id) && + (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { + diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, + rx_queue_stats_mappings[i].queue_id, + rx_queue_stats_mappings[i].stats_counter_id); + if (diag != 0) + return diag; + mapping_found = 1; + } + } + if (mapping_found) + port->rx_queue_stats_mapping_enabled = 1; + return 0; +} + +static void +map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) +{ + int diag = 0; + + diag = set_tx_queue_stats_mapping_registers(pi, port); + if (diag != 0) { + if (diag == -ENOTSUP) { + port->tx_queue_stats_mapping_enabled = 0; + printf("TX queue stats mapping not supported port id=%d\n", pi); + } + else + rte_exit(EXIT_FAILURE, + "set_tx_queue_stats_mapping_registers " + "failed for port id=%d diag=%d\n", + pi, diag); + } + + diag = set_rx_queue_stats_mapping_registers(pi, port); + if (diag != 0) { + if (diag == -ENOTSUP) { + port->rx_queue_stats_mapping_enabled = 0; + printf("RX queue stats mapping not supported port id=%d\n", pi); + } + else + rte_exit(EXIT_FAILURE, + "set_rx_queue_stats_mapping_registers " + "failed for port id=%d diag=%d\n", + pi, diag); + } +} + +static void +rxtx_port_config(struct rte_port *port) +{ + uint16_t qid; + uint64_t offloads; + + for (qid = 0; qid < nb_rxq; qid++) { + offloads = port->rx_conf[qid].offloads; + port->rx_conf[qid] = port->dev_info.default_rxconf; + if (offloads != 0) + port->rx_conf[qid].offloads = offloads; + + /* Check if any Rx parameters have been passed */ + if (rx_pthresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; + + if (rx_hthresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; + + if (rx_wthresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; + + if (rx_free_thresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_free_thresh = rx_free_thresh; + + if (rx_drop_en != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_drop_en = rx_drop_en; + + port->nb_rx_desc[qid] = nb_rxd; + } + + for (qid = 0; qid < nb_txq; qid++) { + offloads = port->tx_conf[qid].offloads; + port->tx_conf[qid] = port->dev_info.default_txconf; + if (offloads != 0) + port->tx_conf[qid].offloads = offloads; + + /* Check if any Tx parameters have been passed */ + if (tx_pthresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; + + if (tx_hthresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; + + if (tx_wthresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; + + if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; + + if (tx_free_thresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_free_thresh = tx_free_thresh; + + port->nb_tx_desc[qid] = nb_txd; + } +} + +void +init_port_config(void) +{ + portid_t pid; + struct rte_port *port; + int ret; + + RTE_ETH_FOREACH_DEV(pid) { + port = &ports[pid]; + port->dev_conf.fdir_conf = fdir_conf; + + ret = eth_dev_info_get_print_err(pid, &port->dev_info); + if (ret != 0) + return; + + if (nb_rxq > 1) { + port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; + port->dev_conf.rx_adv_conf.rss_conf.rss_hf = + rss_hf & port->dev_info.flow_type_rss_offloads; + } else { + port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; + port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; + } + + if (port->dcb_flag == 0) { + if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) + port->dev_conf.rxmode.mq_mode = + (enum rte_eth_rx_mq_mode) + (rx_mq_mode & ETH_MQ_RX_RSS); + else + port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; + } + + rxtx_port_config(port); + + ret = eth_macaddr_get_print_err(pid, &port->eth_addr); + if (ret != 0) + return; + + map_port_queue_stats_mapping_registers(pid, port); +#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS + rte_pmd_ixgbe_bypass_init(pid); +#endif + + if (lsc_interrupt && + (rte_eth_devices[pid].data->dev_flags & + RTE_ETH_DEV_INTR_LSC)) + port->dev_conf.intr_conf.lsc = 1; + if (rmv_interrupt && + (rte_eth_devices[pid].data->dev_flags & + RTE_ETH_DEV_INTR_RMV)) + port->dev_conf.intr_conf.rmv = 1; + } +} + +void set_port_slave_flag(portid_t slave_pid) +{ + struct rte_port *port; + + port = &ports[slave_pid]; + port->slave_flag = 1; +} + +void clear_port_slave_flag(portid_t slave_pid) +{ + struct rte_port *port; + + port = &ports[slave_pid]; + port->slave_flag = 0; +} + +uint8_t port_is_bonding_slave(portid_t slave_pid) +{ + struct rte_port *port; + + port = &ports[slave_pid]; + if ((rte_eth_devices[slave_pid].data->dev_flags & + RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) + return 1; + return 0; +} + +const uint16_t vlan_tags[] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 +}; + +static int +get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, + enum dcb_mode_enable dcb_mode, + enum rte_eth_nb_tcs num_tcs, + uint8_t pfc_en) +{ + uint8_t i; + int32_t rc; + struct rte_eth_rss_conf rss_conf; + + /* + * Builds up the correct configuration for dcb+vt based on the vlan tags array + * given above, and the number of traffic classes available for use. + */ + if (dcb_mode == DCB_VT_ENABLED) { + struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = + ð_conf->rx_adv_conf.vmdq_dcb_conf; + struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = + ð_conf->tx_adv_conf.vmdq_dcb_tx_conf; + + /* VMDQ+DCB RX and TX configurations */ + vmdq_rx_conf->enable_default_pool = 0; + vmdq_rx_conf->default_pool = 0; + vmdq_rx_conf->nb_queue_pools = + (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); + vmdq_tx_conf->nb_queue_pools = + (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); + + vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; + for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { + vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i]; + vmdq_rx_conf->pool_map[i].pools = + 1 << (i % vmdq_rx_conf->nb_queue_pools); + } + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + vmdq_rx_conf->dcb_tc[i] = i % num_tcs; + vmdq_tx_conf->dcb_tc[i] = i % num_tcs; + } + + /* set DCB mode of RX and TX of multiple queues */ + eth_conf->rxmode.mq_mode = + (enum rte_eth_rx_mq_mode) + (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB); + eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; + } else { + struct rte_eth_dcb_rx_conf *rx_conf = + ð_conf->rx_adv_conf.dcb_rx_conf; + struct rte_eth_dcb_tx_conf *tx_conf = + ð_conf->tx_adv_conf.dcb_tx_conf; + + memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf)); + + rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); + if (rc != 0) + return rc; + + rx_conf->nb_tcs = num_tcs; + tx_conf->nb_tcs = num_tcs; + + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + rx_conf->dcb_tc[i] = i % num_tcs; + tx_conf->dcb_tc[i] = i % num_tcs; + } + + eth_conf->rxmode.mq_mode = + (enum rte_eth_rx_mq_mode) + (rx_mq_mode & ETH_MQ_RX_DCB_RSS); + eth_conf->rx_adv_conf.rss_conf = rss_conf; + eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; + } + + if (pfc_en) + eth_conf->dcb_capability_en = + ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; + else + eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; + + return 0; +} + +int +init_port_dcb_config(portid_t pid, + enum dcb_mode_enable dcb_mode, + enum rte_eth_nb_tcs num_tcs, + uint8_t pfc_en) +{ + struct rte_eth_conf port_conf; + struct rte_port *rte_port; + int retval; + uint16_t i; + + rte_port = &ports[pid]; + + memset(&port_conf, 0, sizeof(struct rte_eth_conf)); + /* Enter DCB configuration status */ + dcb_config = 1; + + port_conf.rxmode = rte_port->dev_conf.rxmode; + port_conf.txmode = rte_port->dev_conf.txmode; + + /*set configuration of DCB in vt mode and DCB in non-vt mode*/ + retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); + if (retval < 0) + return retval; + port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + + /* re-configure the device . */ + retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); + if (retval < 0) + return retval; + + retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info); + if (retval != 0) + return retval; + + /* If dev_info.vmdq_pool_base is greater than 0, + * the queue id of vmdq pools is started after pf queues. + */ + if (dcb_mode == DCB_VT_ENABLED && + rte_port->dev_info.vmdq_pool_base > 0) { + printf("VMDQ_DCB multi-queue mode is nonsensical" + " for port %d.", pid); + return -1; + } + + /* Assume the ports in testpmd have the same dcb capability + * and has the same number of rxq and txq in dcb mode + */ + if (dcb_mode == DCB_VT_ENABLED) { + if (rte_port->dev_info.max_vfs > 0) { + nb_rxq = rte_port->dev_info.nb_rx_queues; + nb_txq = rte_port->dev_info.nb_tx_queues; + } else { + nb_rxq = rte_port->dev_info.max_rx_queues; + nb_txq = rte_port->dev_info.max_tx_queues; + } + } else { + /*if vt is disabled, use all pf queues */ + if (rte_port->dev_info.vmdq_pool_base == 0) { + nb_rxq = rte_port->dev_info.max_rx_queues; + nb_txq = rte_port->dev_info.max_tx_queues; + } else { + nb_rxq = (queueid_t)num_tcs; + nb_txq = (queueid_t)num_tcs; + + } + } + rx_free_thresh = 64; + + memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); + + rxtx_port_config(rte_port); + /* VLAN filter */ + rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + for (i = 0; i < RTE_DIM(vlan_tags); i++) + rx_vft_set(pid, vlan_tags[i], 1); + + retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr); + if (retval != 0) + return retval; + + map_port_queue_stats_mapping_registers(pid, rte_port); + + rte_port->dcb_flag = 1; + + return 0; +} + +static void +init_port(void) +{ + /* Configuration of Ethernet ports. */ + ports = rte_zmalloc("testpmd: ports", + sizeof(struct rte_port) * RTE_MAX_ETHPORTS, + RTE_CACHE_LINE_SIZE); + if (ports == NULL) { + rte_exit(EXIT_FAILURE, + "rte_zmalloc(%d struct rte_port) failed\n", + RTE_MAX_ETHPORTS); + } + + /* Initialize ports NUMA structures */ + memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); + memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); + memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); +} + +static void +force_quit(void) +{ + pmd_test_exit(); + prompt_exit(); +} + +static void +print_stats(void) +{ + uint8_t i; + const char clr[] = { 27, '[', '2', 'J', '\0' }; + const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' }; + + /* Clear screen and move to top left */ + printf("%s%s", clr, top_left); + + printf("\nPort statistics ===================================="); + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) + nic_stats_display(fwd_ports_ids[i]); + + fflush(stdout); +} + +static void +signal_handler(int signum) +{ + if (signum == SIGINT || signum == SIGTERM) { + printf("\nSignal %d received, preparing to exit...\n", + signum); +#ifdef RTE_LIBRTE_PDUMP + /* uninitialize packet capture framework */ + rte_pdump_uninit(); +#endif +#ifdef RTE_LIBRTE_LATENCY_STATS + if (latencystats_enabled != 0) + rte_latencystats_uninit(); +#endif + force_quit(); + /* Set flag to indicate the force termination. */ + f_quit = 1; + /* exit with the expected status */ + signal(signum, SIG_DFL); + kill(getpid(), signum); + } +} + +int +main(int argc, char** argv) +{ + int diag; + portid_t port_id; + uint16_t count; + int ret; + + signal(SIGINT, signal_handler); + signal(SIGTERM, signal_handler); + + testpmd_logtype = rte_log_register("testpmd"); + if (testpmd_logtype < 0) + rte_exit(EXIT_FAILURE, "Cannot register log type"); + rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); + + diag = rte_eal_init(argc, argv); + if (diag < 0) + rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n", + rte_strerror(rte_errno)); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + rte_exit(EXIT_FAILURE, + "Secondary process type not supported.\n"); + + ret = register_eth_event_callback(); + if (ret != 0) + rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); + +#ifdef RTE_LIBRTE_PDUMP + /* initialize packet capture framework */ + rte_pdump_init(); +#endif + + count = 0; + RTE_ETH_FOREACH_DEV(port_id) { + ports_ids[count] = port_id; + count++; + } + nb_ports = (portid_t) count; + if (nb_ports == 0) + TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); + + /* allocate port structures, and init them */ + init_port(); + + set_def_fwd_config(); + if (nb_lcores == 0) + rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n" + "Check the core mask argument\n"); + + /* Bitrate/latency stats disabled by default */ +#ifdef RTE_LIBRTE_BITRATE + bitrate_enabled = 0; +#endif +#ifdef RTE_LIBRTE_LATENCY_STATS + latencystats_enabled = 0; +#endif + + /* on FreeBSD, mlockall() is disabled by default */ +#ifdef RTE_EXEC_ENV_FREEBSD + do_mlockall = 0; +#else + do_mlockall = 1; +#endif + + argc -= diag; + argv += diag; + if (argc > 1) + launch_args_parse(argc, argv); + + if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { + TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", + strerror(errno)); + } + + if (tx_first && interactive) + rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " + "interactive mode.\n"); + + if (tx_first && lsc_interrupt) { + printf("Warning: lsc_interrupt needs to be off when " + " using tx_first. Disabling.\n"); + lsc_interrupt = 0; + } + + if (!nb_rxq && !nb_txq) + printf("Warning: Either rx or tx queues should be non-zero\n"); + + if (nb_rxq > 1 && nb_rxq > nb_txq) + printf("Warning: nb_rxq=%d enables RSS configuration, " + "but nb_txq=%d will prevent to fully test it.\n", + nb_rxq, nb_txq); + + init_config(); + + if (hot_plug) { + ret = rte_dev_hotplug_handle_enable(); + if (ret) { + RTE_LOG(ERR, EAL, + "fail to enable hotplug handling."); + return -1; + } + + ret = rte_dev_event_monitor_start(); + if (ret) { + RTE_LOG(ERR, EAL, + "fail to start device event monitoring."); + return -1; + } + + ret = rte_dev_event_callback_register(NULL, + dev_event_callback, NULL); + if (ret) { + RTE_LOG(ERR, EAL, + "fail to register device event callback\n"); + return -1; + } + } + + if (!no_device_start && start_port(RTE_PORT_ALL) != 0) + rte_exit(EXIT_FAILURE, "Start ports failed\n"); + + /* set all ports to promiscuous mode by default */ + RTE_ETH_FOREACH_DEV(port_id) { + ret = rte_eth_promiscuous_enable(port_id); + if (ret != 0) + printf("Error during enabling promiscuous mode for port %u: %s - ignore\n", + port_id, rte_strerror(-ret)); + } + + /* Init metrics library */ + rte_metrics_init(rte_socket_id()); + +#ifdef RTE_LIBRTE_LATENCY_STATS + if (latencystats_enabled != 0) { + int ret = rte_latencystats_init(1, NULL); + if (ret) + printf("Warning: latencystats init()" + " returned error %d\n", ret); + printf("Latencystats running on lcore %d\n", + latencystats_lcore_id); + } +#endif + + /* Setup bitrate stats */ +#ifdef RTE_LIBRTE_BITRATE + if (bitrate_enabled != 0) { + bitrate_data = rte_stats_bitrate_create(); + if (bitrate_data == NULL) + rte_exit(EXIT_FAILURE, + "Could not allocate bitrate data.\n"); + rte_stats_bitrate_reg(bitrate_data); + } +#endif + +#ifdef RTE_LIBRTE_CMDLINE + if (strlen(cmdline_filename) != 0) + cmdline_read_from_file(cmdline_filename); + + if (interactive == 1) { + if (auto_start) { + printf("Start automatic packet forwarding\n"); + start_packet_forwarding(0); + } + prompt(); + pmd_test_exit(); + } else +#endif + { + char c; + int rc; + + f_quit = 0; + + printf("No commandline core given, start packet forwarding\n"); + start_packet_forwarding(tx_first); + if (stats_period != 0) { + uint64_t prev_time = 0, cur_time, diff_time = 0; + uint64_t timer_period; + + /* Convert to number of cycles */ + timer_period = stats_period * rte_get_timer_hz(); + + while (f_quit == 0) { + cur_time = rte_get_timer_cycles(); + diff_time += cur_time - prev_time; + + if (diff_time >= timer_period) { + print_stats(); + /* Reset the timer */ + diff_time = 0; + } + /* Sleep to avoid unnecessary checks */ + prev_time = cur_time; + sleep(1); + } + } + + printf("Press enter to exit\n"); + rc = read(0, &c, 1); + pmd_test_exit(); + if (rc < 0) + return 1; + } + + ret = rte_eal_cleanup(); + if (ret != 0) + rte_exit(EXIT_FAILURE, + "EAL cleanup failed: %s\n", strerror(-ret)); + + return EXIT_SUCCESS; +} diff --git a/src/spdk/dpdk/app/test-pmd/testpmd.h b/src/spdk/dpdk/app/test-pmd/testpmd.h new file mode 100644 index 000000000..a80375093 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/testpmd.h @@ -0,0 +1,938 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation + */ + +#ifndef _TESTPMD_H_ +#define _TESTPMD_H_ + +#include <stdbool.h> + +#include <rte_pci.h> +#include <rte_bus_pci.h> +#include <rte_gro.h> +#include <rte_gso.h> +#include <cmdline.h> + +#define RTE_PORT_ALL (~(portid_t)0x0) + +#define RTE_TEST_RX_DESC_MAX 2048 +#define RTE_TEST_TX_DESC_MAX 2048 + +#define RTE_PORT_STOPPED (uint16_t)0 +#define RTE_PORT_STARTED (uint16_t)1 +#define RTE_PORT_CLOSED (uint16_t)2 +#define RTE_PORT_HANDLING (uint16_t)3 + +/* + * It is used to allocate the memory for hash key. + * The hash key size is NIC dependent. + */ +#define RSS_HASH_KEY_LENGTH 64 + +/* + * Default size of the mbuf data buffer to receive standard 1518-byte + * Ethernet frames in a mono-segment memory buffer. + */ +#define DEFAULT_MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE +/**< Default size of mbuf data buffer. */ + +/* + * The maximum number of segments per packet is used when creating + * scattered transmit packets composed of a list of mbufs. + */ +#define RTE_MAX_SEGS_PER_PKT 255 /**< nb_segs is a 8-bit unsigned char. */ + +#define MAX_PKT_BURST 512 +#define DEF_PKT_BURST 32 + +#define DEF_MBUF_CACHE 250 + +#define RTE_CACHE_LINE_SIZE_ROUNDUP(size) \ + (RTE_CACHE_LINE_SIZE * ((size + RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE)) + +#define NUMA_NO_CONFIG 0xFF +#define UMA_NO_CONFIG 0xFF + +typedef uint8_t lcoreid_t; +typedef uint16_t portid_t; +typedef uint16_t queueid_t; +typedef uint16_t streamid_t; + +#if defined RTE_LIBRTE_PMD_SOFTNIC +#define SOFTNIC 1 +#else +#define SOFTNIC 0 +#endif + +enum { + PORT_TOPOLOGY_PAIRED, + PORT_TOPOLOGY_CHAINED, + PORT_TOPOLOGY_LOOP, +}; + +enum { + MP_ALLOC_NATIVE, /**< allocate and populate mempool natively */ + MP_ALLOC_ANON, + /**< allocate mempool natively, but populate using anonymous memory */ + MP_ALLOC_XMEM, + /**< allocate and populate mempool using anonymous memory */ + MP_ALLOC_XMEM_HUGE, + /**< allocate and populate mempool using anonymous hugepage memory */ + MP_ALLOC_XBUF + /**< allocate mempool natively, use rte_pktmbuf_pool_create_extbuf */ +}; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS +/** + * The data structure associated with RX and TX packet burst statistics + * that are recorded for each forwarding stream. + */ +struct pkt_burst_stats { + unsigned int pkt_burst_spread[MAX_PKT_BURST]; +}; +#endif + +/** Information for a given RSS type. */ +struct rss_type_info { + const char *str; /**< Type name. */ + uint64_t rss_type; /**< Type value. */ +}; + +/** + * RSS type information table. + * + * An entry with a NULL type name terminates the list. + */ +extern const struct rss_type_info rss_type_table[]; + +/** + * Dynf name array. + * + * Array that holds the name for each dynf. + */ +extern char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; + +/** + * The data structure associated with a forwarding stream between a receive + * port/queue and a transmit port/queue. + */ +struct fwd_stream { + /* "read-only" data */ + portid_t rx_port; /**< port to poll for received packets */ + queueid_t rx_queue; /**< RX queue to poll on "rx_port" */ + portid_t tx_port; /**< forwarding port of received packets */ + queueid_t tx_queue; /**< TX queue to send forwarded packets */ + streamid_t peer_addr; /**< index of peer ethernet address of packets */ + + unsigned int retry_enabled; + + /* "read-write" results */ + uint64_t rx_packets; /**< received packets */ + uint64_t tx_packets; /**< received packets transmitted */ + uint64_t fwd_dropped; /**< received packets not forwarded */ + uint64_t rx_bad_ip_csum ; /**< received packets has bad ip checksum */ + uint64_t rx_bad_l4_csum ; /**< received packets has bad l4 checksum */ + uint64_t rx_bad_outer_l4_csum; + /**< received packets has bad outer l4 checksum */ + unsigned int gro_times; /**< GRO operation times */ +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t core_cycles; /**< used for RX and TX processing */ +#endif +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + struct pkt_burst_stats rx_burst_stats; + struct pkt_burst_stats tx_burst_stats; +#endif +}; + +/** Descriptor for a single flow. */ +struct port_flow { + struct port_flow *next; /**< Next flow in list. */ + struct port_flow *tmp; /**< Temporary linking. */ + uint32_t id; /**< Flow rule ID. */ + struct rte_flow *flow; /**< Opaque flow object returned by PMD. */ + struct rte_flow_conv_rule rule; /* Saved flow rule description. */ + uint8_t data[]; /**< Storage for flow rule description */ +}; + +#ifdef SOFTNIC +/** + * The data structure associate with softnic port + */ +struct softnic_port { + uint32_t default_tm_hierarchy_enable; /**< default tm hierarchy */ + struct fwd_lcore **fwd_lcore_arg; /**< softnic fwd core parameters */ +}; +#endif + +/** + * The data structure associated with each port. + */ +struct rte_port { + struct rte_eth_dev_info dev_info; /**< PCI info + driver name */ + struct rte_eth_conf dev_conf; /**< Port configuration. */ + struct rte_ether_addr eth_addr; /**< Port ethernet address */ + struct rte_eth_stats stats; /**< Last port statistics */ + unsigned int socket_id; /**< For NUMA support */ + uint16_t parse_tunnel:1; /**< Parse internal headers */ + uint16_t tso_segsz; /**< Segmentation offload MSS for non-tunneled packets. */ + uint16_t tunnel_tso_segsz; /**< Segmentation offload MSS for tunneled pkts. */ + uint16_t tx_vlan_id;/**< The tag ID */ + uint16_t tx_vlan_id_outer;/**< The outer tag ID */ + uint8_t tx_queue_stats_mapping_enabled; + uint8_t rx_queue_stats_mapping_enabled; + volatile uint16_t port_status; /**< port started or not */ + uint8_t need_setup; /**< port just attached */ + uint8_t need_reconfig; /**< need reconfiguring port or not */ + uint8_t need_reconfig_queues; /**< need reconfiguring queues or not */ + uint8_t rss_flag; /**< enable rss or not */ + uint8_t dcb_flag; /**< enable dcb */ + uint16_t nb_rx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx desc number */ + uint16_t nb_tx_desc[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx desc number */ + struct rte_eth_rxconf rx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue rx configuration */ + struct rte_eth_txconf tx_conf[RTE_MAX_QUEUES_PER_PORT+1]; /**< per queue tx configuration */ + struct rte_ether_addr *mc_addr_pool; /**< pool of multicast addrs */ + uint32_t mc_addr_nb; /**< nb. of addr. in mc_addr_pool */ + uint8_t slave_flag; /**< bonding slave port */ + struct port_flow *flow_list; /**< Associated flows. */ + const struct rte_eth_rxtx_callback *rx_dump_cb[RTE_MAX_QUEUES_PER_PORT+1]; + const struct rte_eth_rxtx_callback *tx_dump_cb[RTE_MAX_QUEUES_PER_PORT+1]; +#ifdef SOFTNIC + struct softnic_port softport; /**< softnic params */ +#endif + /**< metadata value to insert in Tx packets. */ + uint32_t tx_metadata; + const struct rte_eth_rxtx_callback *tx_set_md_cb[RTE_MAX_QUEUES_PER_PORT+1]; + /**< dynamic flags. */ + uint64_t mbuf_dynf; + const struct rte_eth_rxtx_callback *tx_set_dynf_cb[RTE_MAX_QUEUES_PER_PORT+1]; +}; + +/** + * The data structure associated with each forwarding logical core. + * The logical cores are internally numbered by a core index from 0 to + * the maximum number of logical cores - 1. + * The system CPU identifier of all logical cores are setup in a global + * CPU id. configuration table. + */ +struct fwd_lcore { + struct rte_gso_ctx gso_ctx; /**< GSO context */ + struct rte_mempool *mbp; /**< The mbuf pool to use by this core */ + void *gro_ctx; /**< GRO context */ + streamid_t stream_idx; /**< index of 1st stream in "fwd_streams" */ + streamid_t stream_nb; /**< number of streams in "fwd_streams" */ + lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */ + queueid_t tx_queue; /**< TX queue to send forwarded packets */ + volatile char stopped; /**< stop forwarding when set */ +}; + +/* + * Forwarding mode operations: + * - IO forwarding mode (default mode) + * Forwards packets unchanged. + * + * - MAC forwarding mode + * Set the source and the destination Ethernet addresses of packets + * before forwarding them. + * + * - IEEE1588 forwarding mode + * Check that received IEEE1588 Precise Time Protocol (PTP) packets are + * filtered and timestamped by the hardware. + * Forwards packets unchanged on the same port. + * Check that sent IEEE1588 PTP packets are timestamped by the hardware. + */ +typedef void (*port_fwd_begin_t)(portid_t pi); +typedef void (*port_fwd_end_t)(portid_t pi); +typedef void (*packet_fwd_t)(struct fwd_stream *fs); + +struct fwd_engine { + const char *fwd_mode_name; /**< Forwarding mode name. */ + port_fwd_begin_t port_fwd_begin; /**< NULL if nothing special to do. */ + port_fwd_end_t port_fwd_end; /**< NULL if nothing special to do. */ + packet_fwd_t packet_fwd; /**< Mandatory. */ +}; + +#define BURST_TX_WAIT_US 1 +#define BURST_TX_RETRIES 64 + +extern uint32_t burst_tx_delay_time; +extern uint32_t burst_tx_retry_num; + +extern struct fwd_engine io_fwd_engine; +extern struct fwd_engine mac_fwd_engine; +extern struct fwd_engine mac_swap_engine; +extern struct fwd_engine flow_gen_engine; +extern struct fwd_engine rx_only_engine; +extern struct fwd_engine tx_only_engine; +extern struct fwd_engine csum_fwd_engine; +extern struct fwd_engine icmp_echo_engine; +extern struct fwd_engine noisy_vnf_engine; +#ifdef SOFTNIC +extern struct fwd_engine softnic_fwd_engine; +#endif +#ifdef RTE_LIBRTE_IEEE1588 +extern struct fwd_engine ieee1588_fwd_engine; +#endif + +extern struct fwd_engine * fwd_engines[]; /**< NULL terminated array. */ +extern cmdline_parse_inst_t cmd_set_raw; +extern cmdline_parse_inst_t cmd_show_set_raw; +extern cmdline_parse_inst_t cmd_show_set_raw_all; + +extern uint16_t mempool_flags; + +/** + * Forwarding Configuration + * + */ +struct fwd_config { + struct fwd_engine *fwd_eng; /**< Packet forwarding mode. */ + streamid_t nb_fwd_streams; /**< Nb. of forward streams to process. */ + lcoreid_t nb_fwd_lcores; /**< Nb. of logical cores to launch. */ + portid_t nb_fwd_ports; /**< Nb. of ports involved. */ +}; + +/** + * DCB mode enable + */ +enum dcb_mode_enable +{ + DCB_VT_ENABLED, + DCB_ENABLED +}; + +#define MAX_TX_QUEUE_STATS_MAPPINGS 1024 /* MAX_PORT of 32 @ 32 tx_queues/port */ +#define MAX_RX_QUEUE_STATS_MAPPINGS 4096 /* MAX_PORT of 32 @ 128 rx_queues/port */ + +struct queue_stats_mappings { + portid_t port_id; + uint16_t queue_id; + uint8_t stats_counter_id; +} __rte_cache_aligned; + +extern struct queue_stats_mappings tx_queue_stats_mappings_array[]; +extern struct queue_stats_mappings rx_queue_stats_mappings_array[]; + +/* Assign both tx and rx queue stats mappings to the same default values */ +extern struct queue_stats_mappings *tx_queue_stats_mappings; +extern struct queue_stats_mappings *rx_queue_stats_mappings; + +extern uint16_t nb_tx_queue_stats_mappings; +extern uint16_t nb_rx_queue_stats_mappings; + +extern uint8_t xstats_hide_zero; /**< Hide zero values for xstats display */ + +/* globals used for configuration */ +extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */ +extern int testpmd_logtype; /**< Log type for testpmd logs */ +extern uint8_t interactive; +extern uint8_t auto_start; +extern uint8_t tx_first; +extern char cmdline_filename[PATH_MAX]; /**< offline commands file */ +extern uint8_t numa_support; /**< set by "--numa" parameter */ +extern uint16_t port_topology; /**< set by "--port-topology" parameter */ +extern uint8_t no_flush_rx; /**<set by "--no-flush-rx" parameter */ +extern uint8_t flow_isolate_all; /**< set by "--flow-isolate-all */ +extern uint8_t mp_alloc_type; +/**< set by "--mp-anon" or "--mp-alloc" parameter */ +extern uint8_t no_link_check; /**<set by "--disable-link-check" parameter */ +extern uint8_t no_device_start; /**<set by "--disable-device-start" parameter */ +extern volatile int test_done; /* stop packet forwarding when set to 1. */ +extern uint8_t lsc_interrupt; /**< disabled by "--no-lsc-interrupt" parameter */ +extern uint8_t rmv_interrupt; /**< disabled by "--no-rmv-interrupt" parameter */ +extern uint32_t event_print_mask; +/**< set by "--print-event xxxx" and "--mask-event xxxx parameters */ +extern bool setup_on_probe_event; /**< disabled by port setup-on iterator */ +extern uint8_t hot_plug; /**< enable by "--hot-plug" parameter */ +extern int do_mlockall; /**< set by "--mlockall" or "--no-mlockall" parameter */ +extern uint8_t clear_ptypes; /**< disabled by set ptype cmd */ + +#ifdef RTE_LIBRTE_IXGBE_BYPASS +extern uint32_t bypass_timeout; /**< Store the NIC bypass watchdog timeout */ +#endif + +/* + * Store specified sockets on which memory pool to be used by ports + * is allocated. + */ +extern uint8_t port_numa[RTE_MAX_ETHPORTS]; + +/* + * Store specified sockets on which RX ring to be used by ports + * is allocated. + */ +extern uint8_t rxring_numa[RTE_MAX_ETHPORTS]; + +/* + * Store specified sockets on which TX ring to be used by ports + * is allocated. + */ +extern uint8_t txring_numa[RTE_MAX_ETHPORTS]; + +extern uint8_t socket_num; + +/* + * Configuration of logical cores: + * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores + */ +extern lcoreid_t nb_lcores; /**< Number of logical cores probed at init time. */ +extern lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ +extern lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ +extern unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; +extern unsigned int num_sockets; +extern unsigned int socket_ids[RTE_MAX_NUMA_NODES]; + +/* + * Configuration of Ethernet ports: + * nb_fwd_ports <= nb_cfg_ports <= nb_ports + */ +extern portid_t nb_ports; /**< Number of ethernet ports probed at init time. */ +extern portid_t nb_cfg_ports; /**< Number of configured ports. */ +extern portid_t nb_fwd_ports; /**< Number of forwarding ports. */ +extern portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; +extern struct rte_port *ports; + +extern struct rte_eth_rxmode rx_mode; +extern struct rte_eth_txmode tx_mode; + +extern uint64_t rss_hf; + +extern queueid_t nb_hairpinq; +extern queueid_t nb_rxq; +extern queueid_t nb_txq; + +extern uint16_t nb_rxd; +extern uint16_t nb_txd; + +extern int16_t rx_free_thresh; +extern int8_t rx_drop_en; +extern int16_t tx_free_thresh; +extern int16_t tx_rs_thresh; + +extern uint16_t noisy_tx_sw_bufsz; +extern uint16_t noisy_tx_sw_buf_flush_time; +extern uint64_t noisy_lkup_mem_sz; +extern uint64_t noisy_lkup_num_writes; +extern uint64_t noisy_lkup_num_reads; +extern uint64_t noisy_lkup_num_reads_writes; + +extern uint8_t dcb_config; +extern uint8_t dcb_test; + +extern uint16_t mbuf_data_size; /**< Mbuf data space size. */ +extern uint32_t param_total_num_mbufs; + +extern uint16_t stats_period; + +#ifdef RTE_LIBRTE_LATENCY_STATS +extern uint8_t latencystats_enabled; +extern lcoreid_t latencystats_lcore_id; +#endif + +#ifdef RTE_LIBRTE_BITRATE +extern lcoreid_t bitrate_lcore_id; +extern uint8_t bitrate_enabled; +#endif + +extern struct rte_fdir_conf fdir_conf; + +/* + * Configuration of packet segments used by the "txonly" processing engine. + */ +#define TXONLY_DEF_PACKET_LEN 64 +extern uint16_t tx_pkt_length; /**< Length of TXONLY packet */ +extern uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */ +extern uint8_t tx_pkt_nb_segs; /**< Number of segments in TX packets */ + +enum tx_pkt_split { + TX_PKT_SPLIT_OFF, + TX_PKT_SPLIT_ON, + TX_PKT_SPLIT_RND, +}; + +extern enum tx_pkt_split tx_pkt_split; + +extern uint8_t txonly_multi_flow; + +extern uint16_t nb_pkt_per_burst; +extern uint16_t mb_mempool_cache; +extern int8_t rx_pthresh; +extern int8_t rx_hthresh; +extern int8_t rx_wthresh; +extern int8_t tx_pthresh; +extern int8_t tx_hthresh; +extern int8_t tx_wthresh; + +extern uint16_t tx_udp_src_port; +extern uint16_t tx_udp_dst_port; + +extern uint32_t tx_ip_src_addr; +extern uint32_t tx_ip_dst_addr; + +extern struct fwd_config cur_fwd_config; +extern struct fwd_engine *cur_fwd_eng; +extern uint32_t retry_enabled; +extern struct fwd_lcore **fwd_lcores; +extern struct fwd_stream **fwd_streams; + +extern uint16_t vxlan_gpe_udp_port; /**< UDP port of tunnel VXLAN-GPE. */ + +extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */ +extern struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; + +extern uint32_t burst_tx_delay_time; /**< Burst tx delay time(us) for mac-retry. */ +extern uint32_t burst_tx_retry_num; /**< Burst tx retry number for mac-retry. */ + +#define GRO_DEFAULT_ITEM_NUM_PER_FLOW 32 +#define GRO_DEFAULT_FLOW_NUM (RTE_GRO_MAX_BURST_ITEM_NUM / \ + GRO_DEFAULT_ITEM_NUM_PER_FLOW) + +#define GRO_DEFAULT_FLUSH_CYCLES 1 +#define GRO_MAX_FLUSH_CYCLES 4 + +struct gro_status { + struct rte_gro_param param; + uint8_t enable; +}; +extern struct gro_status gro_ports[RTE_MAX_ETHPORTS]; +extern uint8_t gro_flush_cycles; + +#define GSO_MAX_PKT_BURST 2048 +struct gso_status { + uint8_t enable; +}; +extern struct gso_status gso_ports[RTE_MAX_ETHPORTS]; +extern uint16_t gso_max_segment_size; + +/* VXLAN encap/decap parameters. */ +struct vxlan_encap_conf { + uint32_t select_ipv4:1; + uint32_t select_vlan:1; + uint32_t select_tos_ttl:1; + uint8_t vni[3]; + rte_be16_t udp_src; + rte_be16_t udp_dst; + rte_be32_t ipv4_src; + rte_be32_t ipv4_dst; + uint8_t ipv6_src[16]; + uint8_t ipv6_dst[16]; + rte_be16_t vlan_tci; + uint8_t ip_tos; + uint8_t ip_ttl; + uint8_t eth_src[RTE_ETHER_ADDR_LEN]; + uint8_t eth_dst[RTE_ETHER_ADDR_LEN]; +}; + +extern struct vxlan_encap_conf vxlan_encap_conf; + +/* NVGRE encap/decap parameters. */ +struct nvgre_encap_conf { + uint32_t select_ipv4:1; + uint32_t select_vlan:1; + uint8_t tni[3]; + rte_be32_t ipv4_src; + rte_be32_t ipv4_dst; + uint8_t ipv6_src[16]; + uint8_t ipv6_dst[16]; + rte_be16_t vlan_tci; + uint8_t eth_src[RTE_ETHER_ADDR_LEN]; + uint8_t eth_dst[RTE_ETHER_ADDR_LEN]; +}; + +extern struct nvgre_encap_conf nvgre_encap_conf; + +/* L2 encap parameters. */ +struct l2_encap_conf { + uint32_t select_ipv4:1; + uint32_t select_vlan:1; + rte_be16_t vlan_tci; + uint8_t eth_src[RTE_ETHER_ADDR_LEN]; + uint8_t eth_dst[RTE_ETHER_ADDR_LEN]; +}; +extern struct l2_encap_conf l2_encap_conf; + +/* L2 decap parameters. */ +struct l2_decap_conf { + uint32_t select_vlan:1; +}; +extern struct l2_decap_conf l2_decap_conf; + +/* MPLSoGRE encap parameters. */ +struct mplsogre_encap_conf { + uint32_t select_ipv4:1; + uint32_t select_vlan:1; + uint8_t label[3]; + rte_be32_t ipv4_src; + rte_be32_t ipv4_dst; + uint8_t ipv6_src[16]; + uint8_t ipv6_dst[16]; + rte_be16_t vlan_tci; + uint8_t eth_src[RTE_ETHER_ADDR_LEN]; + uint8_t eth_dst[RTE_ETHER_ADDR_LEN]; +}; +extern struct mplsogre_encap_conf mplsogre_encap_conf; + +/* MPLSoGRE decap parameters. */ +struct mplsogre_decap_conf { + uint32_t select_ipv4:1; + uint32_t select_vlan:1; +}; +extern struct mplsogre_decap_conf mplsogre_decap_conf; + +/* MPLSoUDP encap parameters. */ +struct mplsoudp_encap_conf { + uint32_t select_ipv4:1; + uint32_t select_vlan:1; + uint8_t label[3]; + rte_be16_t udp_src; + rte_be16_t udp_dst; + rte_be32_t ipv4_src; + rte_be32_t ipv4_dst; + uint8_t ipv6_src[16]; + uint8_t ipv6_dst[16]; + rte_be16_t vlan_tci; + uint8_t eth_src[RTE_ETHER_ADDR_LEN]; + uint8_t eth_dst[RTE_ETHER_ADDR_LEN]; +}; +extern struct mplsoudp_encap_conf mplsoudp_encap_conf; + +/* MPLSoUDP decap parameters. */ +struct mplsoudp_decap_conf { + uint32_t select_ipv4:1; + uint32_t select_vlan:1; +}; +extern struct mplsoudp_decap_conf mplsoudp_decap_conf; + +extern enum rte_eth_rx_mq_mode rx_mq_mode; + +static inline unsigned int +lcore_num(void) +{ + unsigned int i; + + for (i = 0; i < RTE_MAX_LCORE; ++i) + if (fwd_lcores_cpuids[i] == rte_lcore_id()) + return i; + + rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n"); +} + +void +parse_fwd_portlist(const char *port); + +static inline struct fwd_lcore * +current_fwd_lcore(void) +{ + return fwd_lcores[lcore_num()]; +} + +/* Mbuf Pools */ +static inline void +mbuf_poolname_build(unsigned int sock_id, char* mp_name, int name_size) +{ + snprintf(mp_name, name_size, "mbuf_pool_socket_%u", sock_id); +} + +static inline struct rte_mempool * +mbuf_pool_find(unsigned int sock_id) +{ + char pool_name[RTE_MEMPOOL_NAMESIZE]; + + mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name)); + return rte_mempool_lookup((const char *)pool_name); +} + +/** + * Read/Write operations on a PCI register of a port. + */ +static inline uint32_t +port_pci_reg_read(struct rte_port *port, uint32_t reg_off) +{ + const struct rte_pci_device *pci_dev; + const struct rte_bus *bus; + void *reg_addr; + uint32_t reg_v; + + if (!port->dev_info.device) { + printf("Invalid device\n"); + return 0; + } + + bus = rte_bus_find_by_device(port->dev_info.device); + if (bus && !strcmp(bus->name, "pci")) { + pci_dev = RTE_DEV_TO_PCI(port->dev_info.device); + } else { + printf("Not a PCI device\n"); + return 0; + } + + reg_addr = ((char *)pci_dev->mem_resource[0].addr + reg_off); + reg_v = *((volatile uint32_t *)reg_addr); + return rte_le_to_cpu_32(reg_v); +} + +#define port_id_pci_reg_read(pt_id, reg_off) \ + port_pci_reg_read(&ports[(pt_id)], (reg_off)) + +static inline void +port_pci_reg_write(struct rte_port *port, uint32_t reg_off, uint32_t reg_v) +{ + const struct rte_pci_device *pci_dev; + const struct rte_bus *bus; + void *reg_addr; + + if (!port->dev_info.device) { + printf("Invalid device\n"); + return; + } + + bus = rte_bus_find_by_device(port->dev_info.device); + if (bus && !strcmp(bus->name, "pci")) { + pci_dev = RTE_DEV_TO_PCI(port->dev_info.device); + } else { + printf("Not a PCI device\n"); + return; + } + + reg_addr = ((char *)pci_dev->mem_resource[0].addr + reg_off); + *((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v); +} + +#define port_id_pci_reg_write(pt_id, reg_off, reg_value) \ + port_pci_reg_write(&ports[(pt_id)], (reg_off), (reg_value)) + +/* Prototypes */ +unsigned int parse_item_list(char* str, const char* item_name, + unsigned int max_items, + unsigned int *parsed_items, int check_unique_values); +void launch_args_parse(int argc, char** argv); +void cmdline_read_from_file(const char *filename); +void prompt(void); +void prompt_exit(void); +void nic_stats_display(portid_t port_id); +void nic_stats_clear(portid_t port_id); +void nic_xstats_display(portid_t port_id); +void nic_xstats_clear(portid_t port_id); +void nic_stats_mapping_display(portid_t port_id); +void device_infos_display(const char *identifier); +void port_infos_display(portid_t port_id); +void port_summary_display(portid_t port_id); +void port_summary_header_display(void); +void port_offload_cap_display(portid_t port_id); +void rx_queue_infos_display(portid_t port_idi, uint16_t queue_id); +void tx_queue_infos_display(portid_t port_idi, uint16_t queue_id); +void fwd_lcores_config_display(void); +void pkt_fwd_config_display(struct fwd_config *cfg); +void rxtx_config_display(void); +void fwd_config_setup(void); +void set_def_fwd_config(void); +void reconfig(portid_t new_port_id, unsigned socket_id); +int init_fwd_streams(void); +void update_fwd_ports(portid_t new_pid); + +void set_fwd_eth_peer(portid_t port_id, char *peer_addr); + +void port_mtu_set(portid_t port_id, uint16_t mtu); +void port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_pos); +void port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, + uint8_t bit_v); +void port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, + uint8_t bit1_pos, uint8_t bit2_pos); +void port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, + uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value); +void port_reg_display(portid_t port_id, uint32_t reg_off); +void port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t value); +int port_flow_validate(portid_t port_id, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions); +int port_flow_create(portid_t port_id, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action *actions); +void update_age_action_context(const struct rte_flow_action *actions, + struct port_flow *pf); +int port_flow_destroy(portid_t port_id, uint32_t n, const uint32_t *rule); +int port_flow_flush(portid_t port_id); +int port_flow_dump(portid_t port_id, const char *file_name); +int port_flow_query(portid_t port_id, uint32_t rule, + const struct rte_flow_action *action); +void port_flow_list(portid_t port_id, uint32_t n, const uint32_t *group); +void port_flow_aged(portid_t port_id, uint8_t destroy); +int port_flow_isolate(portid_t port_id, int set); + +void rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id); +void tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id); + +int set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc); +int set_fwd_lcores_mask(uint64_t lcoremask); +void set_fwd_lcores_number(uint16_t nb_lc); + +void set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt); +void set_fwd_ports_mask(uint64_t portmask); +void set_fwd_ports_number(uint16_t nb_pt); +int port_is_forwarding(portid_t port_id); + +void rx_vlan_strip_set(portid_t port_id, int on); +void rx_vlan_strip_set_on_queue(portid_t port_id, uint16_t queue_id, int on); + +void rx_vlan_filter_set(portid_t port_id, int on); +void rx_vlan_all_filter_set(portid_t port_id, int on); +void rx_vlan_qinq_strip_set(portid_t port_id, int on); +int rx_vft_set(portid_t port_id, uint16_t vlan_id, int on); +void vlan_extend_set(portid_t port_id, int on); +void vlan_tpid_set(portid_t port_id, enum rte_vlan_type vlan_type, + uint16_t tp_id); +void tx_vlan_set(portid_t port_id, uint16_t vlan_id); +void tx_qinq_set(portid_t port_id, uint16_t vlan_id, uint16_t vlan_id_outer); +void tx_vlan_reset(portid_t port_id); +void tx_vlan_pvid_set(portid_t port_id, uint16_t vlan_id, int on); + +void set_qmap(portid_t port_id, uint8_t is_rx, uint16_t queue_id, uint8_t map_value); + +void set_xstats_hide_zero(uint8_t on_off); + +void set_verbose_level(uint16_t vb_level); +void set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs); +void show_tx_pkt_segments(void); +void set_tx_pkt_split(const char *name); +void set_nb_pkt_per_burst(uint16_t pkt_burst); +char *list_pkt_forwarding_modes(void); +char *list_pkt_forwarding_retry_modes(void); +void set_pkt_forwarding_mode(const char *fwd_mode); +void start_packet_forwarding(int with_tx_first); +void fwd_stats_display(void); +void fwd_stats_reset(void); +void stop_packet_forwarding(void); +void dev_set_link_up(portid_t pid); +void dev_set_link_down(portid_t pid); +void init_port_config(void); +void set_port_slave_flag(portid_t slave_pid); +void clear_port_slave_flag(portid_t slave_pid); +uint8_t port_is_bonding_slave(portid_t slave_pid); + +int init_port_dcb_config(portid_t pid, enum dcb_mode_enable dcb_mode, + enum rte_eth_nb_tcs num_tcs, + uint8_t pfc_en); +int start_port(portid_t pid); +void stop_port(portid_t pid); +void close_port(portid_t pid); +void reset_port(portid_t pid); +void attach_port(char *identifier); +void detach_devargs(char *identifier); +void detach_port_device(portid_t port_id); +int all_ports_stopped(void); +int port_is_stopped(portid_t port_id); +int port_is_started(portid_t port_id); +void pmd_test_exit(void); +void fdir_get_infos(portid_t port_id); +void fdir_set_flex_mask(portid_t port_id, + struct rte_eth_fdir_flex_mask *cfg); +void fdir_set_flex_payload(portid_t port_id, + struct rte_eth_flex_payload_cfg *cfg); +void port_rss_reta_info(portid_t port_id, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t nb_entries); + +void set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on); + +int set_queue_rate_limit(portid_t port_id, uint16_t queue_idx, uint16_t rate); +int set_vf_rate_limit(portid_t port_id, uint16_t vf, uint16_t rate, + uint64_t q_msk); + +void port_rss_hash_conf_show(portid_t port_id, int show_rss_key); +void port_rss_hash_key_update(portid_t port_id, char rss_type[], + uint8_t *hash_key, uint hash_key_len); +int rx_queue_id_is_invalid(queueid_t rxq_id); +int tx_queue_id_is_invalid(queueid_t txq_id); +void setup_gro(const char *onoff, portid_t port_id); +void setup_gro_flush_cycles(uint8_t cycles); +void show_gro(portid_t port_id); +void setup_gso(const char *mode, portid_t port_id); +int eth_dev_info_get_print_err(uint16_t port_id, + struct rte_eth_dev_info *dev_info); +void eth_set_promisc_mode(uint16_t port_id, int enable); +void eth_set_allmulticast_mode(uint16_t port, int enable); +int eth_link_get_nowait_print_err(uint16_t port_id, struct rte_eth_link *link); +int eth_macaddr_get_print_err(uint16_t port_id, + struct rte_ether_addr *mac_addr); + +/* Functions to display the set of MAC addresses added to a port*/ +void show_macs(portid_t port_id); +void show_mcast_macs(portid_t port_id); + +/* Functions to manage the set of filtered Multicast MAC addresses */ +void mcast_addr_add(portid_t port_id, struct rte_ether_addr *mc_addr); +void mcast_addr_remove(portid_t port_id, struct rte_ether_addr *mc_addr); +void port_dcb_info_display(portid_t port_id); + +uint8_t *open_file(const char *file_path, uint32_t *size); +int save_file(const char *file_path, uint8_t *buf, uint32_t size); +int close_file(uint8_t *buf); + +void port_queue_region_info_display(portid_t port_id, void *buf); + +enum print_warning { + ENABLED_WARN = 0, + DISABLED_WARN +}; +int port_id_is_invalid(portid_t port_id, enum print_warning warning); +void print_valid_ports(void); +int new_socket_id(unsigned int socket_id); + +queueid_t get_allowed_max_nb_rxq(portid_t *pid); +int check_nb_rxq(queueid_t rxq); +queueid_t get_allowed_max_nb_txq(portid_t *pid); +int check_nb_txq(queueid_t txq); +int check_nb_rxd(queueid_t rxd); +int check_nb_txd(queueid_t txd); +queueid_t get_allowed_max_nb_hairpinq(portid_t *pid); +int check_nb_hairpinq(queueid_t hairpinq); + +uint16_t dump_rx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], + uint16_t nb_pkts, __rte_unused uint16_t max_pkts, + __rte_unused void *user_param); + +uint16_t dump_tx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], + uint16_t nb_pkts, __rte_unused void *user_param); + +void add_rx_dump_callbacks(portid_t portid); +void remove_rx_dump_callbacks(portid_t portid); +void add_tx_dump_callbacks(portid_t portid); +void remove_tx_dump_callbacks(portid_t portid); +void configure_rxtx_dump_callbacks(uint16_t verbose); + +uint16_t tx_pkt_set_md(uint16_t port_id, __rte_unused uint16_t queue, + struct rte_mbuf *pkts[], uint16_t nb_pkts, + __rte_unused void *user_param); +void add_tx_md_callback(portid_t portid); +void remove_tx_md_callback(portid_t portid); + +uint16_t tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue, + struct rte_mbuf *pkts[], uint16_t nb_pkts, + __rte_unused void *user_param); +void add_tx_dynf_callback(portid_t portid); +void remove_tx_dynf_callback(portid_t portid); + +/* + * Work-around of a compilation error with ICC on invocations of the + * rte_be_to_cpu_16() function. + */ +#ifdef __GCC__ +#define RTE_BE_TO_CPU_16(be_16_v) rte_be_to_cpu_16((be_16_v)) +#define RTE_CPU_TO_BE_16(cpu_16_v) rte_cpu_to_be_16((cpu_16_v)) +#else +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN +#define RTE_BE_TO_CPU_16(be_16_v) (be_16_v) +#define RTE_CPU_TO_BE_16(cpu_16_v) (cpu_16_v) +#else +#define RTE_BE_TO_CPU_16(be_16_v) \ + (uint16_t) ((((be_16_v) & 0xFF) << 8) | ((be_16_v) >> 8)) +#define RTE_CPU_TO_BE_16(cpu_16_v) \ + (uint16_t) ((((cpu_16_v) & 0xFF) << 8) | ((cpu_16_v) >> 8)) +#endif +#endif /* __GCC__ */ + +#define TESTPMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, testpmd_logtype, "testpmd: " fmt, ## args) + +#endif /* _TESTPMD_H_ */ diff --git a/src/spdk/dpdk/app/test-pmd/txonly.c b/src/spdk/dpdk/app/test-pmd/txonly.c new file mode 100644 index 000000000..076ccaf8f --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/txonly.c @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + */ + +#include <stdarg.h> +#include <string.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <unistd.h> +#include <inttypes.h> + +#include <sys/queue.h> +#include <sys/stat.h> + +#include <rte_common.h> +#include <rte_byteorder.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_cycles.h> +#include <rte_memory.h> +#include <rte_memcpy.h> +#include <rte_launch.h> +#include <rte_eal.h> +#include <rte_per_lcore.h> +#include <rte_lcore.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_interrupts.h> +#include <rte_pci.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_ip.h> +#include <rte_tcp.h> +#include <rte_udp.h> +#include <rte_string_fns.h> +#include <rte_flow.h> + +#include "testpmd.h" + +/* use RFC863 Discard Protocol */ +uint16_t tx_udp_src_port = 9; +uint16_t tx_udp_dst_port = 9; + +/* use RFC5735 / RFC2544 reserved network test addresses */ +uint32_t tx_ip_src_addr = (198U << 24) | (18 << 16) | (0 << 8) | 1; +uint32_t tx_ip_dst_addr = (198U << 24) | (18 << 16) | (0 << 8) | 2; + +#define IP_DEFTTL 64 /* from RFC 1340. */ + +static struct rte_ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */ +RTE_DEFINE_PER_LCORE(uint8_t, _ip_var); /**< IP address variation */ +static struct rte_udp_hdr pkt_udp_hdr; /**< UDP header of tx packets. */ + +static void +copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt, + unsigned offset) +{ + struct rte_mbuf *seg; + void *seg_buf; + unsigned copy_len; + + seg = pkt; + while (offset >= seg->data_len) { + offset -= seg->data_len; + seg = seg->next; + } + copy_len = seg->data_len - offset; + seg_buf = rte_pktmbuf_mtod_offset(seg, char *, offset); + while (len > copy_len) { + rte_memcpy(seg_buf, buf, (size_t) copy_len); + len -= copy_len; + buf = ((char*) buf + copy_len); + seg = seg->next; + seg_buf = rte_pktmbuf_mtod(seg, char *); + copy_len = seg->data_len; + } + rte_memcpy(seg_buf, buf, (size_t) len); +} + +static inline void +copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset) +{ + if (offset + len <= pkt->data_len) { + rte_memcpy(rte_pktmbuf_mtod_offset(pkt, char *, offset), + buf, (size_t) len); + return; + } + copy_buf_to_pkt_segs(buf, len, pkt, offset); +} + +static void +setup_pkt_udp_ip_headers(struct rte_ipv4_hdr *ip_hdr, + struct rte_udp_hdr *udp_hdr, + uint16_t pkt_data_len) +{ + uint16_t *ptr16; + uint32_t ip_cksum; + uint16_t pkt_len; + + /* + * Initialize UDP header. + */ + pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr)); + udp_hdr->src_port = rte_cpu_to_be_16(tx_udp_src_port); + udp_hdr->dst_port = rte_cpu_to_be_16(tx_udp_dst_port); + udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len); + udp_hdr->dgram_cksum = 0; /* No UDP checksum. */ + + /* + * Initialize IP header. + */ + pkt_len = (uint16_t) (pkt_len + sizeof(struct rte_ipv4_hdr)); + ip_hdr->version_ihl = RTE_IPV4_VHL_DEF; + ip_hdr->type_of_service = 0; + ip_hdr->fragment_offset = 0; + ip_hdr->time_to_live = IP_DEFTTL; + ip_hdr->next_proto_id = IPPROTO_UDP; + ip_hdr->packet_id = 0; + ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len); + ip_hdr->src_addr = rte_cpu_to_be_32(tx_ip_src_addr); + ip_hdr->dst_addr = rte_cpu_to_be_32(tx_ip_dst_addr); + + /* + * Compute IP header checksum. + */ + ptr16 = (unaligned_uint16_t*) ip_hdr; + ip_cksum = 0; + ip_cksum += ptr16[0]; ip_cksum += ptr16[1]; + ip_cksum += ptr16[2]; ip_cksum += ptr16[3]; + ip_cksum += ptr16[4]; + ip_cksum += ptr16[6]; ip_cksum += ptr16[7]; + ip_cksum += ptr16[8]; ip_cksum += ptr16[9]; + + /* + * Reduce 32 bit checksum to 16 bits and complement it. + */ + ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) + + (ip_cksum & 0x0000FFFF); + if (ip_cksum > 65535) + ip_cksum -= 65535; + ip_cksum = (~ip_cksum) & 0x0000FFFF; + if (ip_cksum == 0) + ip_cksum = 0xFFFF; + ip_hdr->hdr_checksum = (uint16_t) ip_cksum; +} + +static inline bool +pkt_burst_prepare(struct rte_mbuf *pkt, struct rte_mempool *mbp, + struct rte_ether_hdr *eth_hdr, const uint16_t vlan_tci, + const uint16_t vlan_tci_outer, const uint64_t ol_flags) +{ + struct rte_mbuf *pkt_segs[RTE_MAX_SEGS_PER_PKT]; + struct rte_mbuf *pkt_seg; + uint32_t nb_segs, pkt_len; + uint8_t i; + + if (unlikely(tx_pkt_split == TX_PKT_SPLIT_RND)) + nb_segs = rte_rand() % tx_pkt_nb_segs + 1; + else + nb_segs = tx_pkt_nb_segs; + + if (nb_segs > 1) { + if (rte_mempool_get_bulk(mbp, (void **)pkt_segs, nb_segs - 1)) + return false; + } + + rte_pktmbuf_reset_headroom(pkt); + pkt->data_len = tx_pkt_seg_lengths[0]; + pkt->ol_flags &= EXT_ATTACHED_MBUF; + pkt->ol_flags |= ol_flags; + pkt->vlan_tci = vlan_tci; + pkt->vlan_tci_outer = vlan_tci_outer; + pkt->l2_len = sizeof(struct rte_ether_hdr); + pkt->l3_len = sizeof(struct rte_ipv4_hdr); + + pkt_len = pkt->data_len; + pkt_seg = pkt; + for (i = 1; i < nb_segs; i++) { + pkt_seg->next = pkt_segs[i - 1]; + pkt_seg = pkt_seg->next; + pkt_seg->data_len = tx_pkt_seg_lengths[i]; + pkt_len += pkt_seg->data_len; + } + pkt_seg->next = NULL; /* Last segment of packet. */ + /* + * Copy headers in first packet segment(s). + */ + copy_buf_to_pkt(eth_hdr, sizeof(*eth_hdr), pkt, 0); + copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt, + sizeof(struct rte_ether_hdr)); + if (txonly_multi_flow) { + uint8_t ip_var = RTE_PER_LCORE(_ip_var); + struct rte_ipv4_hdr *ip_hdr; + uint32_t addr; + + ip_hdr = rte_pktmbuf_mtod_offset(pkt, + struct rte_ipv4_hdr *, + sizeof(struct rte_ether_hdr)); + /* + * Generate multiple flows by varying IP src addr. This + * enables packets are well distributed by RSS in + * receiver side if any and txonly mode can be a decent + * packet generator for developer's quick performance + * regression test. + */ + addr = (tx_ip_dst_addr | (ip_var++ << 8)) + rte_lcore_id(); + ip_hdr->src_addr = rte_cpu_to_be_32(addr); + RTE_PER_LCORE(_ip_var) = ip_var; + } + copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt, + sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr)); + /* + * Complete first mbuf of packet and append it to the + * burst of packets to be transmitted. + */ + pkt->nb_segs = nb_segs; + pkt->pkt_len = pkt_len; + + return true; +} + +/* + * Transmit a burst of multi-segments packets. + */ +static void +pkt_burst_transmit(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_port *txp; + struct rte_mbuf *pkt; + struct rte_mempool *mbp; + struct rte_ether_hdr eth_hdr; + uint16_t nb_tx; + uint16_t nb_pkt; + uint16_t vlan_tci, vlan_tci_outer; + uint32_t retry; + uint64_t ol_flags = 0; + uint64_t tx_offloads; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + mbp = current_fwd_lcore()->mbp; + txp = &ports[fs->tx_port]; + tx_offloads = txp->dev_conf.txmode.offloads; + vlan_tci = txp->tx_vlan_id; + vlan_tci_outer = txp->tx_vlan_id_outer; + if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT) + ol_flags = PKT_TX_VLAN_PKT; + if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT) + ol_flags |= PKT_TX_QINQ_PKT; + if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT) + ol_flags |= PKT_TX_MACSEC; + + /* + * Initialize Ethernet header. + */ + rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr], ð_hdr.d_addr); + rte_ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr); + eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); + + if (rte_mempool_get_bulk(mbp, (void **)pkts_burst, + nb_pkt_per_burst) == 0) { + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + if (unlikely(!pkt_burst_prepare(pkts_burst[nb_pkt], mbp, + ð_hdr, vlan_tci, + vlan_tci_outer, + ol_flags))) { + rte_mempool_put_bulk(mbp, + (void **)&pkts_burst[nb_pkt], + nb_pkt_per_burst - nb_pkt); + break; + } + } + } else { + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + pkt = rte_mbuf_raw_alloc(mbp); + if (pkt == NULL) + break; + if (unlikely(!pkt_burst_prepare(pkt, mbp, ð_hdr, + vlan_tci, + vlan_tci_outer, + ol_flags))) { + rte_pktmbuf_free(pkt); + break; + } + pkts_burst[nb_pkt] = pkt; + } + } + + if (nb_pkt == 0) + return; + + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt); + /* + * Retry if necessary + */ + if (unlikely(nb_tx < nb_pkt) && fs->retry_enabled) { + retry = 0; + while (nb_tx < nb_pkt && retry++ < burst_tx_retry_num) { + rte_delay_us(burst_tx_delay_time); + nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue, + &pkts_burst[nb_tx], nb_pkt - nb_tx); + } + } + fs->tx_packets += nb_tx; + + if (txonly_multi_flow) + RTE_PER_LCORE(_ip_var) -= nb_pkt - nb_tx; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + if (unlikely(nb_tx < nb_pkt)) { + if (verbose_level > 0 && fs->fwd_dropped == 0) + printf("port %d tx_queue %d - drop " + "(nb_pkt:%u - nb_tx:%u)=%u packets\n", + fs->tx_port, fs->tx_queue, + (unsigned) nb_pkt, (unsigned) nb_tx, + (unsigned) (nb_pkt - nb_tx)); + fs->fwd_dropped += (nb_pkt - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_pkt); + } + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +static void +tx_only_begin(__rte_unused portid_t pi) +{ + uint16_t pkt_data_len; + + pkt_data_len = (uint16_t) (tx_pkt_length - ( + sizeof(struct rte_ether_hdr) + + sizeof(struct rte_ipv4_hdr) + + sizeof(struct rte_udp_hdr))); + setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len); +} + +struct fwd_engine tx_only_engine = { + .fwd_mode_name = "txonly", + .port_fwd_begin = tx_only_begin, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_transmit, +}; diff --git a/src/spdk/dpdk/app/test-pmd/util.c b/src/spdk/dpdk/app/test-pmd/util.c new file mode 100644 index 000000000..8488fa1a8 --- /dev/null +++ b/src/spdk/dpdk/app/test-pmd/util.c @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation + * Copyright 2018 Mellanox Technologies, Ltd + */ + +#include <stdio.h> + +#include <rte_net.h> +#include <rte_mbuf.h> +#include <rte_ether.h> +#include <rte_vxlan.h> +#include <rte_ethdev.h> +#include <rte_flow.h> + +#include "testpmd.h" + +static inline void +print_ether_addr(const char *what, const struct rte_ether_addr *eth_addr) +{ + char buf[RTE_ETHER_ADDR_FMT_SIZE]; + rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); + printf("%s%s", what, buf); +} + +static inline void +dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], + uint16_t nb_pkts, int is_rx) +{ + struct rte_mbuf *mb; + const struct rte_ether_hdr *eth_hdr; + struct rte_ether_hdr _eth_hdr; + uint16_t eth_type; + uint64_t ol_flags; + uint16_t i, packet_type; + uint16_t is_encapsulation; + char buf[256]; + struct rte_net_hdr_lens hdr_lens; + uint32_t sw_packet_type; + uint16_t udp_port; + uint32_t vx_vni; + const char *reason; + int dynf_index; + + if (!nb_pkts) + return; + printf("port %u/queue %u: %s %u packets\n", + port_id, queue, + is_rx ? "received" : "sent", + (unsigned int) nb_pkts); + for (i = 0; i < nb_pkts; i++) { + mb = pkts[i]; + eth_hdr = rte_pktmbuf_read(mb, 0, sizeof(_eth_hdr), &_eth_hdr); + eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type); + ol_flags = mb->ol_flags; + packet_type = mb->packet_type; + is_encapsulation = RTE_ETH_IS_TUNNEL_PKT(packet_type); + + print_ether_addr(" src=", ð_hdr->s_addr); + print_ether_addr(" - dst=", ð_hdr->d_addr); + printf(" - type=0x%04x - length=%u - nb_segs=%d", + eth_type, (unsigned int) mb->pkt_len, + (int)mb->nb_segs); + if (ol_flags & PKT_RX_RSS_HASH) { + printf(" - RSS hash=0x%x", (unsigned int) mb->hash.rss); + printf(" - RSS queue=0x%x", (unsigned int) queue); + } + if (ol_flags & PKT_RX_FDIR) { + printf(" - FDIR matched "); + if (ol_flags & PKT_RX_FDIR_ID) + printf("ID=0x%x", + mb->hash.fdir.hi); + else if (ol_flags & PKT_RX_FDIR_FLX) + printf("flex bytes=0x%08x %08x", + mb->hash.fdir.hi, mb->hash.fdir.lo); + else + printf("hash=0x%x ID=0x%x ", + mb->hash.fdir.hash, mb->hash.fdir.id); + } + if (ol_flags & PKT_RX_TIMESTAMP) + printf(" - timestamp %"PRIu64" ", mb->timestamp); + if (ol_flags & PKT_RX_QINQ) + printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x", + mb->vlan_tci, mb->vlan_tci_outer); + else if (ol_flags & PKT_RX_VLAN) + printf(" - VLAN tci=0x%x", mb->vlan_tci); + if (!is_rx && (ol_flags & PKT_TX_DYNF_METADATA)) + printf(" - Tx metadata: 0x%x", + *RTE_FLOW_DYNF_METADATA(mb)); + if (is_rx && (ol_flags & PKT_RX_DYNF_METADATA)) + printf(" - Rx metadata: 0x%x", + *RTE_FLOW_DYNF_METADATA(mb)); + for (dynf_index = 0; dynf_index < 64; dynf_index++) { + if (dynf_names[dynf_index][0] != '\0') + printf(" - dynf %s: %d", + dynf_names[dynf_index], + !!(ol_flags & (1UL << dynf_index))); + } + if (mb->packet_type) { + rte_get_ptype_name(mb->packet_type, buf, sizeof(buf)); + printf(" - hw ptype: %s", buf); + } + sw_packet_type = rte_net_get_ptype(mb, &hdr_lens, + RTE_PTYPE_ALL_MASK); + rte_get_ptype_name(sw_packet_type, buf, sizeof(buf)); + printf(" - sw ptype: %s", buf); + if (sw_packet_type & RTE_PTYPE_L2_MASK) + printf(" - l2_len=%d", hdr_lens.l2_len); + if (sw_packet_type & RTE_PTYPE_L3_MASK) + printf(" - l3_len=%d", hdr_lens.l3_len); + if (sw_packet_type & RTE_PTYPE_L4_MASK) + printf(" - l4_len=%d", hdr_lens.l4_len); + if (sw_packet_type & RTE_PTYPE_TUNNEL_MASK) + printf(" - tunnel_len=%d", hdr_lens.tunnel_len); + if (sw_packet_type & RTE_PTYPE_INNER_L2_MASK) + printf(" - inner_l2_len=%d", hdr_lens.inner_l2_len); + if (sw_packet_type & RTE_PTYPE_INNER_L3_MASK) + printf(" - inner_l3_len=%d", hdr_lens.inner_l3_len); + if (sw_packet_type & RTE_PTYPE_INNER_L4_MASK) + printf(" - inner_l4_len=%d", hdr_lens.inner_l4_len); + if (is_encapsulation) { + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; + struct rte_udp_hdr *udp_hdr; + uint8_t l2_len; + uint8_t l3_len; + uint8_t l4_len; + uint8_t l4_proto; + struct rte_vxlan_hdr *vxlan_hdr; + + l2_len = sizeof(struct rte_ether_hdr); + + /* Do not support ipv4 option field */ + if (RTE_ETH_IS_IPV4_HDR(packet_type)) { + l3_len = sizeof(struct rte_ipv4_hdr); + ipv4_hdr = rte_pktmbuf_mtod_offset(mb, + struct rte_ipv4_hdr *, + l2_len); + l4_proto = ipv4_hdr->next_proto_id; + } else { + l3_len = sizeof(struct rte_ipv6_hdr); + ipv6_hdr = rte_pktmbuf_mtod_offset(mb, + struct rte_ipv6_hdr *, + l2_len); + l4_proto = ipv6_hdr->proto; + } + if (l4_proto == IPPROTO_UDP) { + udp_hdr = rte_pktmbuf_mtod_offset(mb, + struct rte_udp_hdr *, + l2_len + l3_len); + l4_len = sizeof(struct rte_udp_hdr); + vxlan_hdr = rte_pktmbuf_mtod_offset(mb, + struct rte_vxlan_hdr *, + l2_len + l3_len + l4_len); + udp_port = RTE_BE_TO_CPU_16(udp_hdr->dst_port); + vx_vni = rte_be_to_cpu_32(vxlan_hdr->vx_vni); + printf(" - VXLAN packet: packet type =%d, " + "Destination UDP port =%d, VNI = %d", + packet_type, udp_port, vx_vni >> 8); + } + } + printf(" - %s queue=0x%x", is_rx ? "Receive" : "Send", + (unsigned int) queue); + printf("\n"); + rte_get_rx_ol_flag_list(mb->ol_flags, buf, sizeof(buf)); + printf(" ol_flags: %s\n", buf); + if (rte_mbuf_check(mb, 1, &reason) < 0) + printf("INVALID mbuf: %s\n", reason); + } +} + +uint16_t +dump_rx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], + uint16_t nb_pkts, __rte_unused uint16_t max_pkts, + __rte_unused void *user_param) +{ + dump_pkt_burst(port_id, queue, pkts, nb_pkts, 1); + return nb_pkts; +} + +uint16_t +dump_tx_pkts(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], + uint16_t nb_pkts, __rte_unused void *user_param) +{ + dump_pkt_burst(port_id, queue, pkts, nb_pkts, 0); + return nb_pkts; +} + +uint16_t +tx_pkt_set_md(uint16_t port_id, __rte_unused uint16_t queue, + struct rte_mbuf *pkts[], uint16_t nb_pkts, + __rte_unused void *user_param) +{ + uint16_t i = 0; + + /* + * Add metadata value to every Tx packet, + * and set ol_flags accordingly. + */ + if (rte_flow_dynf_metadata_avail()) + for (i = 0; i < nb_pkts; i++) { + *RTE_FLOW_DYNF_METADATA(pkts[i]) = + ports[port_id].tx_metadata; + pkts[i]->ol_flags |= PKT_TX_DYNF_METADATA; + } + return nb_pkts; +} + +void +add_tx_md_callback(portid_t portid) +{ + struct rte_eth_dev_info dev_info; + uint16_t queue; + int ret; + + if (port_id_is_invalid(portid, ENABLED_WARN)) + return; + + ret = eth_dev_info_get_print_err(portid, &dev_info); + if (ret != 0) + return; + + for (queue = 0; queue < dev_info.nb_tx_queues; queue++) + if (!ports[portid].tx_set_md_cb[queue]) + ports[portid].tx_set_md_cb[queue] = + rte_eth_add_tx_callback(portid, queue, + tx_pkt_set_md, NULL); +} + +void +remove_tx_md_callback(portid_t portid) +{ + struct rte_eth_dev_info dev_info; + uint16_t queue; + int ret; + + if (port_id_is_invalid(portid, ENABLED_WARN)) + return; + + ret = eth_dev_info_get_print_err(portid, &dev_info); + if (ret != 0) + return; + + for (queue = 0; queue < dev_info.nb_tx_queues; queue++) + if (ports[portid].tx_set_md_cb[queue]) { + rte_eth_remove_tx_callback(portid, queue, + ports[portid].tx_set_md_cb[queue]); + ports[portid].tx_set_md_cb[queue] = NULL; + } +} + +uint16_t +tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue, + struct rte_mbuf *pkts[], uint16_t nb_pkts, + __rte_unused void *user_param) +{ + uint16_t i = 0; + + if (ports[port_id].mbuf_dynf) + for (i = 0; i < nb_pkts; i++) + pkts[i]->ol_flags |= ports[port_id].mbuf_dynf; + return nb_pkts; +} + +void +add_tx_dynf_callback(portid_t portid) +{ + struct rte_eth_dev_info dev_info; + uint16_t queue; + int ret; + + if (port_id_is_invalid(portid, ENABLED_WARN)) + return; + + ret = eth_dev_info_get_print_err(portid, &dev_info); + if (ret != 0) + return; + + for (queue = 0; queue < dev_info.nb_tx_queues; queue++) + if (!ports[portid].tx_set_dynf_cb[queue]) + ports[portid].tx_set_dynf_cb[queue] = + rte_eth_add_tx_callback(portid, queue, + tx_pkt_set_dynf, NULL); +} + +void +remove_tx_dynf_callback(portid_t portid) +{ + struct rte_eth_dev_info dev_info; + uint16_t queue; + int ret; + + if (port_id_is_invalid(portid, ENABLED_WARN)) + return; + + ret = eth_dev_info_get_print_err(portid, &dev_info); + if (ret != 0) + return; + + for (queue = 0; queue < dev_info.nb_tx_queues; queue++) + if (ports[portid].tx_set_dynf_cb[queue]) { + rte_eth_remove_tx_callback(portid, queue, + ports[portid].tx_set_dynf_cb[queue]); + ports[portid].tx_set_dynf_cb[queue] = NULL; + } +} + +int +eth_dev_info_get_print_err(uint16_t port_id, + struct rte_eth_dev_info *dev_info) +{ + int ret; + + ret = rte_eth_dev_info_get(port_id, dev_info); + if (ret != 0) + printf("Error during getting device (port %u) info: %s\n", + port_id, strerror(-ret)); + + return ret; +} + +void +eth_set_promisc_mode(uint16_t port, int enable) +{ + int ret; + + if (enable) + ret = rte_eth_promiscuous_enable(port); + else + ret = rte_eth_promiscuous_disable(port); + + if (ret != 0) + printf("Error during %s promiscuous mode for port %u: %s\n", + enable ? "enabling" : "disabling", + port, rte_strerror(-ret)); +} + +void +eth_set_allmulticast_mode(uint16_t port, int enable) +{ + int ret; + + if (enable) + ret = rte_eth_allmulticast_enable(port); + else + ret = rte_eth_allmulticast_disable(port); + + if (ret != 0) + printf("Error during %s all-multicast mode for port %u: %s\n", + enable ? "enabling" : "disabling", + port, rte_strerror(-ret)); +} + +int +eth_link_get_nowait_print_err(uint16_t port_id, struct rte_eth_link *link) +{ + int ret; + + ret = rte_eth_link_get_nowait(port_id, link); + if (ret < 0) + printf("Device (port %u) link get (without wait) failed: %s\n", + port_id, rte_strerror(-ret)); + + return ret; +} + +int +eth_macaddr_get_print_err(uint16_t port_id, struct rte_ether_addr *mac_addr) +{ + int ret; + + ret = rte_eth_macaddr_get(port_id, mac_addr); + if (ret != 0) + printf("Error getting device (port %u) mac address: %s\n", + port_id, rte_strerror(-ret)); + + return ret; +} |