diff options
Diffstat (limited to 'src/seastar/dpdk/drivers/net/dpaa2')
21 files changed, 13997 insertions, 0 deletions
diff --git a/src/seastar/dpdk/drivers/net/dpaa2/Makefile b/src/seastar/dpdk/drivers/net/dpaa2/Makefile new file mode 100644 index 000000000..c7262ffd5 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/Makefile @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. +# Copyright 2016 NXP +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_dpaa2.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2 +CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal +CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2 + +# versioning export map +EXPORT_MAP := rte_pmd_dpaa2_version.map + +# library version +LIBABIVER := 2 + +# depends on fslmc bus which uses experimental API +CFLAGS += -DALLOW_EXPERIMENTAL_API + +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_flow.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_mux.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpni.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpkg.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpdmux.c + +LDLIBS += -lrte_bus_fslmc +LDLIBS += -lrte_mempool_dpaa2 +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs +LDLIBS += -lrte_common_dpaax + +# install this header file +SYMLINK-$(CONFIG_RTE_LIBRTE_DPAA2_PMD)-include := rte_pmd_dpaa2.h +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/seastar/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/src/seastar/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c new file mode 100644 index 000000000..56e2e56a3 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c @@ -0,0 +1,385 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016-2018 NXP + * + */ + +#include <time.h> +#include <net/if.h> + +#include <rte_mbuf.h> +#include <rte_ethdev_driver.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_string_fns.h> +#include <rte_cycles.h> +#include <rte_kvargs.h> +#include <rte_dev.h> + +#include <dpaa2_pmd_logs.h> +#include <dpaa2_hw_pvt.h> +#include <dpaa2_hw_mempool.h> + +#include "../dpaa2_ethdev.h" + +int +dpaa2_distset_to_dpkg_profile_cfg( + uint64_t req_dist_set, + struct dpkg_profile_cfg *kg_cfg); + +int +rte_pmd_dpaa2_set_custom_hash(uint16_t port_id, + uint16_t offset, + uint8_t size) +{ + struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id]; + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_rx_tc_dist_cfg tc_cfg; + struct dpkg_profile_cfg kg_cfg; + void *p_params; + int ret, tc_index = 0; + + p_params = rte_zmalloc( + NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + if (!p_params) { + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); + return -ENOMEM; + } + + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_DATA; + kg_cfg.extracts[0].extract.from_data.offset = offset; + kg_cfg.extracts[0].extract.from_data.size = size; + kg_cfg.num_extracts = 1; + + ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); + if (ret) { + DPAA2_PMD_ERR("Unable to prepare extract parameters"); + rte_free(p_params); + return ret; + } + + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + tc_cfg.key_cfg_iova = (size_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.dist_size = eth_dev->data->nb_rx_queues; + tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; + + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, + &tc_cfg); + rte_free(p_params); + if (ret) { + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", + ret); + return ret; + } + + return 0; +} + +int +dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, + uint64_t req_dist_set) +{ + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_rx_tc_dist_cfg tc_cfg; + struct dpkg_profile_cfg kg_cfg; + void *p_params; + int ret, tc_index = 0; + + p_params = rte_malloc( + NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + if (!p_params) { + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); + return -ENOMEM; + } + memset(p_params, 0, DIST_PARAM_IOVA_SIZE); + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + + ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg); + if (ret) { + DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported", + req_dist_set); + rte_free(p_params); + return ret; + } + tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.dist_size = eth_dev->data->nb_rx_queues; + tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; + + ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); + if (ret) { + DPAA2_PMD_ERR("Unable to prepare extract parameters"); + rte_free(p_params); + return ret; + } + + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, + &tc_cfg); + rte_free(p_params); + if (ret) { + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", + ret); + return ret; + } + + return 0; +} + +int dpaa2_remove_flow_dist( + struct rte_eth_dev *eth_dev, + uint8_t tc_index) +{ + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_rx_tc_dist_cfg tc_cfg; + struct dpkg_profile_cfg kg_cfg; + void *p_params; + int ret; + + p_params = rte_malloc( + NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + if (!p_params) { + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); + return -ENOMEM; + } + memset(p_params, 0, DIST_PARAM_IOVA_SIZE); + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + kg_cfg.num_extracts = 0; + tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.dist_size = 0; + tc_cfg.dist_mode = DPNI_DIST_MODE_NONE; + + ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); + if (ret) { + DPAA2_PMD_ERR("Unable to prepare extract parameters"); + rte_free(p_params); + return ret; + } + + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, + &tc_cfg); + rte_free(p_params); + if (ret) + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", + ret); + return ret; +} + +int +dpaa2_distset_to_dpkg_profile_cfg( + uint64_t req_dist_set, + struct dpkg_profile_cfg *kg_cfg) +{ + uint32_t loop = 0, i = 0, dist_field = 0; + int l2_configured = 0, l3_configured = 0; + int l4_configured = 0, sctp_configured = 0; + + memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); + while (req_dist_set) { + if (req_dist_set % 2 != 0) { + dist_field = 1U << loop; + switch (dist_field) { + case ETH_RSS_L2_PAYLOAD: + + if (l2_configured) + break; + l2_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_ETH; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_ETH_TYPE; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + break; + + case ETH_RSS_IPV4: + case ETH_RSS_FRAG_IPV4: + case ETH_RSS_NONFRAG_IPV4_OTHER: + case ETH_RSS_IPV6: + case ETH_RSS_FRAG_IPV6: + case ETH_RSS_NONFRAG_IPV6_OTHER: + case ETH_RSS_IPV6_EX: + + if (l3_configured) + break; + l3_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_IP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_IP_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_IP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_IP_DST; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_IP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_IP_PROTO; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + kg_cfg->num_extracts++; + i++; + break; + + case ETH_RSS_NONFRAG_IPV4_TCP: + case ETH_RSS_NONFRAG_IPV6_TCP: + case ETH_RSS_NONFRAG_IPV4_UDP: + case ETH_RSS_NONFRAG_IPV6_UDP: + case ETH_RSS_IPV6_TCP_EX: + case ETH_RSS_IPV6_UDP_EX: + + if (l4_configured) + break; + l4_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_TCP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_TCP_PORT_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_TCP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_TCP_PORT_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + break; + + case ETH_RSS_NONFRAG_IPV4_SCTP: + case ETH_RSS_NONFRAG_IPV6_SCTP: + + if (sctp_configured) + break; + sctp_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_SCTP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_SCTP_PORT_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_SCTP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_SCTP_PORT_DST; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + break; + + default: + DPAA2_PMD_WARN( + "Unsupported flow dist option %x", + dist_field); + return -EINVAL; + } + } + req_dist_set = req_dist_set >> 1; + loop++; + } + kg_cfg->num_extracts = i; + return 0; +} + +int +dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, + void *blist) +{ + /* Function to attach a DPNI with a buffer pool list. Buffer pool list + * handle is passed in blist. + */ + int32_t retcode; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_pools_cfg bpool_cfg; + struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist; + struct dpni_buffer_layout layout; + int tot_size; + + /* ... rx buffer layout . + * Check alignment for buffer layouts first + */ + + /* ... rx buffer layout ... */ + tot_size = RTE_PKTMBUF_HEADROOM; + tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN); + + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP | + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; + + layout.pass_timestamp = true; + layout.pass_frame_status = 1; + layout.private_data_size = DPAA2_FD_PTA_SIZE; + layout.pass_parser_result = 1; + layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN; + layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE - + DPAA2_MBUF_HW_ANNOTATION; + retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX, &layout); + if (retcode) { + DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)", + retcode); + return retcode; + } + + /*Attach buffer pool to the network interface as described by the user*/ + bpool_cfg.num_dpbp = 1; + bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id; + bpool_cfg.pools[0].backup_pool = 0; + bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size, + DPAA2_PACKET_LAYOUT_ALIGN); + bpool_cfg.pools[0].priority_mask = 0; + + retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg); + if (retcode != 0) { + DPAA2_PMD_ERR("Error configuring buffer pool on interface." + " bpid = %d error code = %d", + bpool_cfg.pools[0].dpbp_id, retcode); + return retcode; + } + + priv->bp_list = bp_list; + return 0; +} diff --git a/src/seastar/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h b/src/seastar/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h new file mode 100644 index 000000000..adb730b71 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h @@ -0,0 +1,291 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016 NXP + * + */ + +/** + * @file + * + * DPNI packet parse results - implementation internal + */ + +#ifndef _DPAA2_HW_DPNI_ANNOT_H_ +#define _DPAA2_HW_DPNI_ANNOT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Annotation valid bits in FD FRC */ +#define DPAA2_FD_FRC_FASV 0x8000 +#define DPAA2_FD_FRC_FAEADV 0x4000 +#define DPAA2_FD_FRC_FAPRV 0x2000 +#define DPAA2_FD_FRC_FAIADV 0x1000 +#define DPAA2_FD_FRC_FASWOV 0x0800 +#define DPAA2_FD_FRC_FAICFDV 0x0400 + +/* Annotation bits in FD CTRL */ +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ +#define DPAA2_FD_CTRL_PTA 0x00800000 +#define DPAA2_FD_CTRL_PTV1 0x00400000 + +/* Frame annotation status */ +struct dpaa2_fas { + uint8_t reserved; + uint8_t ppid; + __le16 ifpid; + __le32 status; +} __attribute__((__packed__)); + +/** + * HW Packet Annotation Register structures + */ +struct dpaa2_annot_hdr { + /**< word1: Frame Annotation Status (8 bytes)*/ + uint64_t word1; + + /**< word2: Time Stamp (8 bytes)*/ + uint64_t word2; + + /**< word3: Next Hdr + FAF Extension + FAF (2 + 2 + 4 bytes)*/ + uint64_t word3; + + /**< word4: Frame Annotation Flags-FAF (8 bytes) */ + uint64_t word4; + + /**< word5: + * ShimOffset_1 + ShimOffset_2 + IPPIDOffset + EthOffset + + * LLC+SNAPOffset + VLANTCIOffset_1 + VLANTCIOffset_n + + * LastETypeOffset (1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes) + */ + uint64_t word5; + + /**< word6: + * PPPoEOffset + MPLSOffset_1 + MPLSOffset_n + ARPorIPOffset_1 + * + IPOffset_norMInEncapO + GREOffset + L4Offset + + * GTPorESPorIPSecOffset(1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes) + */ + uint64_t word6; + + /**< word7: + * RoutingHdrOfset1 + RoutingHdrOfset2 + NxtHdrOffset + * + IPv6FragOffset + GrossRunningSum + * + RunningSum(1 + 1 + 1 + 1 + 2 + 2 bytes) + */ + uint64_t word7; + + /**< word8: + * ParseErrorcode + Soft Parsing Context (1 + 7 bytes) + */ + uint64_t word8; +}; + +/** + * Internal Macros to get/set Packet annotation header + */ + +/** General Macro to define a particular bit position*/ +#define BIT_POS(x) ((uint64_t)1 << ((x))) +/** Set a bit in the variable */ +#define BIT_SET_AT_POS(var, pos) ((var) |= (pos)) +/** Reset the bit in the variable */ +#define BIT_RESET_AT_POS(var, pos) ((var) &= ~(pos)) +/** Check the bit is set in the variable */ +#define BIT_ISSET_AT_POS(var, pos) (((var) & (pos)) ? 1 : 0) +/** + * Macrso to define bit position in word3 + */ +#define NEXT_HDR(var) ((uint64_t)(var) & 0xFFFF000000000000) +#define FAF_EXTN_IPV6_ROUTE_HDR_PRESENT(var) BIT_POS(16) +#define FAF_EXTN_RESERVED(var) ((uint64_t)(var) & 0x00007FFF00000000) +#define FAF_USER_DEFINED_RESERVED(var) ((uint64_t)(var) & 0x00000000FF000000) +#define SHIM_SHELL_SOFT_PARSING_ERRROR BIT_POS(23) +#define PARSING_ERROR BIT_POS(22) +#define L2_ETH_MAC_PRESENT BIT_POS(21) +#define L2_ETH_MAC_UNICAST BIT_POS(20) +#define L2_ETH_MAC_MULTICAST BIT_POS(19) +#define L2_ETH_MAC_BROADCAST BIT_POS(18) +#define L2_ETH_FRAME_IS_BPDU BIT_POS(17) +#define L2_ETH_FCOE_PRESENT BIT_POS(16) +#define L2_ETH_FIP_PRESENT BIT_POS(15) +#define L2_ETH_PARSING_ERROR BIT_POS(14) +#define L2_LLC_SNAP_PRESENT BIT_POS(13) +#define L2_UNKNOWN_LLC_OUI BIT_POS(12) +#define L2_LLC_SNAP_ERROR BIT_POS(11) +#define L2_VLAN_1_PRESENT BIT_POS(10) +#define L2_VLAN_N_PRESENT BIT_POS(9) +#define L2_VLAN_CFI_BIT_PRESENT BIT_POS(8) +#define L2_VLAN_PARSING_ERROR BIT_POS(7) +#define L2_PPPOE_PPP_PRESENT BIT_POS(6) +#define L2_PPPOE_PPP_PARSING_ERROR BIT_POS(5) +#define L2_MPLS_1_PRESENT BIT_POS(4) +#define L2_MPLS_N_PRESENT BIT_POS(3) +#define L2_MPLS_PARSING_ERROR BIT_POS(2) +#define L2_ARP_PRESENT BIT_POS(1) +#define L2_ARP_PARSING_ERROR BIT_POS(0) +/** + * Macrso to define bit position in word4 + */ +#define L2_UNKNOWN_PROTOCOL BIT_POS(63) +#define L2_SOFT_PARSING_ERROR BIT_POS(62) +#define L3_IPV4_1_PRESENT BIT_POS(61) +#define L3_IPV4_1_UNICAST BIT_POS(60) +#define L3_IPV4_1_MULTICAST BIT_POS(59) +#define L3_IPV4_1_BROADCAST BIT_POS(58) +#define L3_IPV4_N_PRESENT BIT_POS(57) +#define L3_IPV4_N_UNICAST BIT_POS(56) +#define L3_IPV4_N_MULTICAST BIT_POS(55) +#define L3_IPV4_N_BROADCAST BIT_POS(54) +#define L3_IPV6_1_PRESENT BIT_POS(53) +#define L3_IPV6_1_UNICAST BIT_POS(52) +#define L3_IPV6_1_MULTICAST BIT_POS(51) +#define L3_IPV6_N_PRESENT BIT_POS(50) +#define L3_IPV6_N_UNICAST BIT_POS(49) +#define L3_IPV6_N_MULTICAST BIT_POS(48) +#define L3_IP_1_OPT_PRESENT BIT_POS(47) +#define L3_IP_1_UNKNOWN_PROTOCOL BIT_POS(46) +#define L3_IP_1_MORE_FRAGMENT BIT_POS(45) +#define L3_IP_1_FIRST_FRAGMENT BIT_POS(44) +#define L3_IP_1_PARSING_ERROR BIT_POS(43) +#define L3_IP_N_OPT_PRESENT BIT_POS(42) +#define L3_IP_N_UNKNOWN_PROTOCOL BIT_POS(41) +#define L3_IP_N_MORE_FRAGMENT BIT_POS(40) +#define L3_IP_N_FIRST_FRAGMENT BIT_POS(39) +#define L3_PROTO_ICMP_PRESENT BIT_POS(38) +#define L3_PROTO_IGMP_PRESENT BIT_POS(37) +#define L3_PROTO_ICMPV6_PRESENT BIT_POS(36) +#define L3_PROTO_UDP_LIGHT_PRESENT BIT_POS(35) +#define L3_IP_N_PARSING_ERROR BIT_POS(34) +#define L3_MIN_ENCAP_PRESENT BIT_POS(33) +#define L3_MIN_ENCAP_SBIT_PRESENT BIT_POS(32) +#define L3_MIN_ENCAP_PARSING_ERROR BIT_POS(31) +#define L3_PROTO_GRE_PRESENT BIT_POS(30) +#define L3_PROTO_GRE_RBIT_PRESENT BIT_POS(29) +#define L3_PROTO_GRE_PARSING_ERROR BIT_POS(28) +#define L3_IP_UNKNOWN_PROTOCOL BIT_POS(27) +#define L3_SOFT_PARSING_ERROR BIT_POS(26) +#define L3_PROTO_UDP_PRESENT BIT_POS(25) +#define L3_PROTO_UDP_PARSING_ERROR BIT_POS(24) +#define L3_PROTO_TCP_PRESENT BIT_POS(23) +#define L3_PROTO_TCP_OPT_PRESENT BIT_POS(22) +#define L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT BIT_POS(21) +#define L3_PROTO_TCP_CTRL_BIT_3_TO_5_PRESENT BIT_POS(20) +#define L3_PROTO_TCP_PARSING_ERROR BIT_POS(19) +#define L3_PROTO_IPSEC_PRESENT BIT_POS(18) +#define L3_PROTO_IPSEC_ESP_PRESENT BIT_POS(17) +#define L3_PROTO_IPSEC_AH_PRESENT BIT_POS(16) +#define L3_PROTO_IPSEC_PARSING_ERROR BIT_POS(15) +#define L3_PROTO_SCTP_PRESENT BIT_POS(14) +#define L3_PROTO_SCTP_PARSING_ERROR BIT_POS(13) +#define L3_PROTO_DCCP_PRESENT BIT_POS(12) +#define L3_PROTO_DCCP_PARSING_ERROR BIT_POS(11) +#define L4_UNKNOWN_PROTOCOL BIT_POS(10) +#define L4_SOFT_PARSING_ERROR BIT_POS(9) +#define L3_PROTO_GTP_PRESENT BIT_POS(8) +#define L3_PROTO_GTP_PARSING_ERROR BIT_POS(7) +#define L3_PROTO_ESP_PRESENT BIT_POS(6) +#define L3_PROTO_ESP_PARSING_ERROR BIT_POS(5) +#define L3_PROTO_ISCSI_PRESENT BIT_POS(4) +#define L3_PROTO_CAPWAN__CTRL_PRESENT BIT_POS(3) +#define L3_PROTO_CAPWAN__DATA_PRESENT BIT_POS(2) +#define L5_SOFT_PARSING_ERROR BIT_POS(1) +#define L3_IPV6_ROUTE_HDR_PRESENT BIT_POS(0) + +#define DPAA2_L3_IPv4 (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \ + L3_IP_1_UNKNOWN_PROTOCOL | L3_IP_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv6 (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \ + L3_IP_1_UNKNOWN_PROTOCOL | L3_IP_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv4_TCP (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \ + L3_PROTO_TCP_PRESENT | L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT | \ + L4_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv4_UDP (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \ + L3_PROTO_UDP_PRESENT | L4_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv6_TCP (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \ + L3_PROTO_TCP_PRESENT | L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT | \ + L4_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv6_UDP (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \ + L3_PROTO_UDP_PRESENT | L4_UNKNOWN_PROTOCOL) + +/** + * Macros to get values in word5 + */ +#define SHIM_OFFSET_1(var) ((uint64_t)(var) & 0xFF00000000000000) +#define SHIM_OFFSET_2(var) ((uint64_t)(var) & 0x00FF000000000000) +#define IP_PID_OFFSET(var) ((uint64_t)(var) & 0x0000FF0000000000) +#define ETH_OFFSET(var) ((uint64_t)(var) & 0x000000FF00000000) +#define LLC_SNAP_OFFSET(var) ((uint64_t)(var) & 0x00000000FF000000) +#define VLAN_TCI_OFFSET_1(var) ((uint64_t)(var) & 0x0000000000FF0000) +#define VLAN_TCI_OFFSET_N(var) ((uint64_t)(var) & 0x000000000000FF00) +#define LAST_ETYPE_OFFSET(var) ((uint64_t)(var) & 0x00000000000000FF) + +/** + * Macros to get values in word6 + */ +#define PPPOE_OFFSET(var) ((uint64_t)(var) & 0xFF00000000000000) +#define MPLS_OFFSET_1(var) ((uint64_t)(var) & 0x00FF000000000000) +#define MPLS_OFFSET_N(var) ((uint64_t)(var) & 0x0000FF0000000000) +#define ARP_OR_IP_OFFSET_1(var) ((uint64_t)(var) & 0x000000FF00000000) +#define IP_N_OR_MIN_ENCAP_OFFSET(var) ((uint64_t)(var) & 0x00000000FF000000) +#define GRE_OFFSET(var) ((uint64_t)(var) & 0x0000000000FF0000) +#define L4_OFFSET(var) ((uint64_t)(var) & 0x000000000000FF00) +#define GTP_OR_ESP_OR_IPSEC_OFFSET(var) ((uint64_t)(var) & 0x00000000000000FF) + +/** + * Macros to get values in word7 + */ +#define IPV6_ROUTING_HDR_OFFSET_1(var) ((uint64_t)(var) & 0xFF00000000000000) +#define IPV6_ROUTING_HDR_OFFSET_2(var) ((uint64_t)(var) & 0x00FF000000000000) +#define NEXT_HDR_OFFSET(var) ((uint64_t)(var) & 0x0000FF0000000000) +#define IPV6_FRAG_OFFSET(var) ((uint64_t)(var) & 0x000000FF00000000) +#define GROSS_RUNNING_SUM(var) ((uint64_t)(var) & 0x00000000FFFF0000) +#define RUNNING_SUM(var) ((uint64_t)(var) & 0x000000000000FFFF) + +/** + * Macros to get values in word8 + */ +#define PARSE_ERROR_CODE(var) ((uint64_t)(var) & 0xFF00000000000000) +#define SOFT_PARSING_CONTEXT(var) ((uint64_t)(var) & 0x00FFFFFFFFFFFFFF) + +/* Debug frame, otherwise supposed to be discarded */ +#define DPAA2_ETH_FAS_DISC 0x80000000 +/* MACSEC frame */ +#define DPAA2_ETH_FAS_MS 0x40000000 +#define DPAA2_ETH_FAS_PTP 0x08000000 +/* Ethernet multicast frame */ +#define DPAA2_ETH_FAS_MC 0x04000000 +/* Ethernet broadcast frame */ +#define DPAA2_ETH_FAS_BC 0x02000000 +#define DPAA2_ETH_FAS_KSE 0x00040000 +#define DPAA2_ETH_FAS_EOFHE 0x00020000 +#define DPAA2_ETH_FAS_MNLE 0x00010000 +#define DPAA2_ETH_FAS_TIDE 0x00008000 +#define DPAA2_ETH_FAS_PIEE 0x00004000 +/* Frame length error */ +#define DPAA2_ETH_FAS_FLE 0x00002000 +/* Frame physical error; our favourite pastime */ +#define DPAA2_ETH_FAS_FPE 0x00001000 +#define DPAA2_ETH_FAS_PTE 0x00000080 +#define DPAA2_ETH_FAS_ISP 0x00000040 +#define DPAA2_ETH_FAS_PHE 0x00000020 +#define DPAA2_ETH_FAS_BLE 0x00000010 +/* L3 csum validation performed */ +#define DPAA2_ETH_FAS_L3CV 0x00000008 +/* L3 csum error */ +#define DPAA2_ETH_FAS_L3CE 0x00000004 +/* L4 csum validation performed */ +#define DPAA2_ETH_FAS_L4CV 0x00000002 +/* L4 csum error */ +#define DPAA2_ETH_FAS_L4CE 0x00000001 + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c new file mode 100644 index 000000000..900182f66 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c @@ -0,0 +1,2388 @@ +/* * SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016 NXP + * + */ + +#include <time.h> +#include <net/if.h> + +#include <rte_mbuf.h> +#include <rte_ethdev_driver.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_string_fns.h> +#include <rte_cycles.h> +#include <rte_kvargs.h> +#include <rte_dev.h> +#include <rte_fslmc.h> +#include <rte_flow_driver.h> + +#include "dpaa2_pmd_logs.h" +#include <fslmc_vfio.h> +#include <dpaa2_hw_pvt.h> +#include <dpaa2_hw_mempool.h> +#include <dpaa2_hw_dpio.h> +#include <mc/fsl_dpmng.h> +#include "dpaa2_ethdev.h" +#include <fsl_qbman_debug.h> + +#define DRIVER_LOOPBACK_MODE "drv_loopback" + +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_JUMBO_FRAME; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_SCATTER; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + +/* enable timestamp in mbuf */ +enum pmd_dpaa2_ts dpaa2_enable_ts; + +struct rte_dpaa2_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint8_t page_id; /* dpni statistics page id */ + uint8_t stats_id; /* stats id in the given page */ +}; + +static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { + {"ingress_multicast_frames", 0, 2}, + {"ingress_multicast_bytes", 0, 3}, + {"ingress_broadcast_frames", 0, 4}, + {"ingress_broadcast_bytes", 0, 5}, + {"egress_multicast_frames", 1, 2}, + {"egress_multicast_bytes", 1, 3}, + {"egress_broadcast_frames", 1, 4}, + {"egress_broadcast_bytes", 1, 5}, + {"ingress_filtered_frames", 2, 0}, + {"ingress_discarded_frames", 2, 1}, + {"ingress_nobuffer_discards", 2, 2}, + {"egress_discarded_frames", 2, 3}, + {"egress_confirmed_frames", 2, 4}, +}; + +static const enum rte_filter_op dpaa2_supported_filter_ops[] = { + RTE_ETH_FILTER_ADD, + RTE_ETH_FILTER_DELETE, + RTE_ETH_FILTER_UPDATE, + RTE_ETH_FILTER_FLUSH, + RTE_ETH_FILTER_GET +}; + +static struct rte_dpaa2_driver rte_dpaa2_pmd; +static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); +static int dpaa2_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); +static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); +static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + +int dpaa2_logtype_pmd; + +__rte_experimental void +rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable) +{ + dpaa2_enable_ts = enable; +} + +static int +dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -1; + } + + if (on) + ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, + priv->token, vlan_id); + else + ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, + priv->token, vlan_id); + + if (ret < 0) + DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", + ret, vlan_id, priv->hw_id); + + return ret; +} + +static int +dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (mask & ETH_VLAN_FILTER_MASK) { + /* VLAN Filter not avaialble */ + if (!priv->max_vlan_filters) { + DPAA2_PMD_INFO("VLAN filter not available"); + goto next_mask; + } + + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) + ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, + priv->token, true); + else + ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, + priv->token, false); + if (ret < 0) + DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); + } +next_mask: + if (mask & ETH_VLAN_EXTEND_MASK) { + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) + DPAA2_PMD_INFO("VLAN extend offload not supported"); + } + + return 0; +} + +static int +dpaa2_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type __rte_unused, + uint16_t tpid) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + int ret = -ENOTSUP; + + PMD_INIT_FUNC_TRACE(); + + /* nothing to be done for standard vlan tpids */ + if (tpid == 0x8100 || tpid == 0x88A8) + return 0; + + ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, tpid); + if (ret < 0) + DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret); + /* if already configured tpids, remove them first */ + if (ret == -EBUSY) { + struct dpni_custom_tpid_cfg tpid_list = {0}; + + ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, &tpid_list); + if (ret < 0) + goto fail; + ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, tpid_list.tpid1); + if (ret < 0) + goto fail; + ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, tpid); + } +fail: + return ret; +} + +static int +dpaa2_fw_version_get(struct rte_eth_dev *dev, + char *fw_version, + size_t fw_size) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct mc_soc_version mc_plat_info = {0}; + struct mc_version mc_ver_info = {0}; + + PMD_INIT_FUNC_TRACE(); + + if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) + DPAA2_PMD_WARN("\tmc_get_soc_version failed"); + + if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) + DPAA2_PMD_WARN("\tmc_get_version failed"); + + ret = snprintf(fw_version, fw_size, + "%x-%d.%d.%d", + mc_plat_info.svr, + mc_ver_info.major, + mc_ver_info.minor, + mc_ver_info.revision); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (uint32_t)ret) + return ret; + else + return 0; +} + +static void +dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + dev_info->if_index = priv->hw_id; + + dev_info->max_mac_addrs = priv->max_mac_filters; + dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; + dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; + dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; + dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; + dev_info->speed_capa = ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_10G; + + dev_info->max_hash_mac_addrs = 0; + dev_info->max_vfs = 0; + dev_info->max_vmdq_pools = ETH_16_POOLS; + dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; +} + +static int +dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + uint16_t dist_idx; + uint32_t vq_id; + uint8_t num_rxqueue_per_tc; + struct dpaa2_queue *mc_q, *mcq; + uint32_t tot_queues; + int i; + struct dpaa2_queue *dpaa2_q; + + PMD_INIT_FUNC_TRACE(); + + num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc); + tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; + mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, + RTE_CACHE_LINE_SIZE); + if (!mc_q) { + DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); + return -1; + } + + for (i = 0; i < priv->nb_rx_queues; i++) { + mc_q->eth_data = dev->data; + priv->rx_vq[i] = mc_q++; + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + dpaa2_q->q_storage = rte_malloc("dq_storage", + sizeof(struct queue_storage_info_t), + RTE_CACHE_LINE_SIZE); + if (!dpaa2_q->q_storage) + goto fail; + + memset(dpaa2_q->q_storage, 0, + sizeof(struct queue_storage_info_t)); + if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) + goto fail; + } + + for (i = 0; i < priv->nb_tx_queues; i++) { + mc_q->eth_data = dev->data; + mc_q->flow_id = 0xffff; + priv->tx_vq[i] = mc_q++; + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + dpaa2_q->cscn = rte_malloc(NULL, + sizeof(struct qbman_result), 16); + if (!dpaa2_q->cscn) + goto fail_tx; + } + + vq_id = 0; + for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { + mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; + mcq->tc_index = dist_idx / num_rxqueue_per_tc; + mcq->flow_id = dist_idx % num_rxqueue_per_tc; + vq_id++; + } + + return 0; +fail_tx: + i -= 1; + while (i >= 0) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + rte_free(dpaa2_q->cscn); + priv->tx_vq[i--] = NULL; + } + i = priv->nb_rx_queues; +fail: + i -= 1; + mc_q = priv->rx_vq[0]; + while (i >= 0) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + dpaa2_free_dq_storage(dpaa2_q->q_storage); + rte_free(dpaa2_q->q_storage); + priv->rx_vq[i--] = NULL; + } + rte_free(mc_q); + return -1; +} + +static void +dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct dpaa2_queue *dpaa2_q; + int i; + + PMD_INIT_FUNC_TRACE(); + + /* Queue allocation base */ + if (priv->rx_vq[0]) { + /* cleaning up queue storage */ + for (i = 0; i < priv->nb_rx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + if (dpaa2_q->q_storage) + rte_free(dpaa2_q->q_storage); + } + /* cleanup tx queue cscn */ + for (i = 0; i < priv->nb_tx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + rte_free(dpaa2_q->cscn); + } + /*free memory for all queues (RX+TX) */ + rte_free(priv->rx_vq[0]); + priv->rx_vq[0] = NULL; + } +} + +static int +dpaa2_eth_dev_configure(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; + int rx_l3_csum_offload = false; + int rx_l4_csum_offload = false; + int tx_l3_csum_offload = false; + int tx_l4_csum_offload = false; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Rx offloads validation */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA2_PMD_WARN( + "Rx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads validation */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA2_PMD_WARN( + "Tx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { + ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, + priv->token, eth_conf->rxmode.max_rx_pkt_len); + if (ret) { + DPAA2_PMD_ERR( + "Unable to set mtu. check config"); + return ret; + } + } else { + return -1; + } + } + + if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { + ret = dpaa2_setup_flow_dist(dev, + eth_conf->rx_adv_conf.rss_conf.rss_hf); + if (ret) { + DPAA2_PMD_ERR("Unable to set flow distribution." + "Check queue config"); + return ret; + } + } + + if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rx_l3_csum_offload = true; + + if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || + (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM)) + rx_l4_csum_offload = true; + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); + return ret; + } + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); + return ret; + } + + if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + tx_l3_csum_offload = true; + + if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) + tx_l4_csum_offload = true; + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); + return ret; + } + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); + return ret; + } + + /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in + * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] + * to 0 for LS2 in the hardware thus disabling data/annotation + * stashing. For LX2 this is fixed in hardware and thus hash result and + * parse results can be received in FD using this option. + */ + if (dpaa2_svr_family == SVR_LX2160A) { + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_FLCTYPE_HASH, true); + if (ret) { + DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); + return ret; + } + } + + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) + dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); + + /* update the current status */ + dpaa2_dev_link_update(dev, 0); + + return 0; +} + +/* Function to setup RX flow information. It contains traffic class ID, + * flow ID, destination configuration etc. + */ +static int +dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct dpaa2_queue *dpaa2_q; + struct dpni_queue cfg; + uint8_t options = 0; + uint8_t flow_id; + uint32_t bpid; + int ret; + + PMD_INIT_FUNC_TRACE(); + + DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", + dev, rx_queue_id, mb_pool, rx_conf); + + if (!priv->bp_list || priv->bp_list->mp != mb_pool) { + bpid = mempool_to_bpid(mb_pool); + ret = dpaa2_attach_bp_list(priv, + rte_dpaa2_bpid_info[bpid].bp_list); + if (ret) + return ret; + } + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; + dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ + dpaa2_q->bp_array = rte_dpaa2_bpid_info; + + /*Get the flow id from given VQ id*/ + flow_id = rx_queue_id % priv->nb_rx_queues; + memset(&cfg, 0, sizeof(struct dpni_queue)); + + options = options | DPNI_QUEUE_OPT_USER_CTX; + cfg.user_context = (size_t)(dpaa2_q); + + /*if ls2088 or rev2 device, enable the stashing */ + + if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { + options |= DPNI_QUEUE_OPT_FLC; + cfg.flc.stash_control = true; + cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; + /* 00 00 00 - last 6 bit represent annotation, context stashing, + * data stashing setting 01 01 00 (0x14) + * (in following order ->DS AS CS) + * to enable 1 line data, 1 line annotation. + * For LX2, this setting should be 01 00 00 (0x10) + */ + if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) + cfg.flc.value |= 0x10; + else + cfg.flc.value |= 0x14; + } + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, + dpaa2_q->tc_index, flow_id, options, &cfg); + if (ret) { + DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); + return -1; + } + + if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { + struct dpni_taildrop taildrop; + + taildrop.enable = 1; + /*enabling per rx queue congestion control */ + taildrop.threshold = CONG_THRESHOLD_RX_Q; + taildrop.units = DPNI_CONGESTION_UNIT_BYTES; + taildrop.oal = CONG_RX_OAL; + DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", + rx_queue_id); + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_QUEUE, DPNI_QUEUE_RX, + dpaa2_q->tc_index, flow_id, &taildrop); + if (ret) { + DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", + ret); + return -1; + } + } + + dev->data->rx_queues[rx_queue_id] = dpaa2_q; + return 0; +} + +static int +dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) + priv->tx_vq[tx_queue_id]; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_queue tx_conf_cfg; + struct dpni_queue tx_flow_cfg; + uint8_t options = 0, flow_id; + uint32_t tc_id; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Return if queue already configured */ + if (dpaa2_q->flow_id != 0xffff) { + dev->data->tx_queues[tx_queue_id] = dpaa2_q; + return 0; + } + + memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); + memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); + + tc_id = tx_queue_id; + flow_id = 0; + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, + tc_id, flow_id, options, &tx_flow_cfg); + if (ret) { + DPAA2_PMD_ERR("Error in setting the tx flow: " + "tc_id=%d, flow=%d err=%d", + tc_id, flow_id, ret); + return -1; + } + + dpaa2_q->flow_id = flow_id; + + if (tx_queue_id == 0) { + /*Set tx-conf and error configuration*/ + ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, + priv->token, + DPNI_CONF_DISABLE); + if (ret) { + DPAA2_PMD_ERR("Error in set tx conf mode settings: " + "err=%d", ret); + return -1; + } + } + dpaa2_q->tc_index = tc_id; + + if (!(priv->flags & DPAA2_TX_CGR_OFF)) { + struct dpni_congestion_notification_cfg cong_notif_cfg; + + cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; + cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; + /* Notify that the queue is not congested when the data in + * the queue is below this thershold. + */ + cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; + cong_notif_cfg.message_ctx = 0; + cong_notif_cfg.message_iova = + (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); + cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; + cong_notif_cfg.notification_mode = + DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | + DPNI_CONG_OPT_COHERENT_WRITE; + cong_notif_cfg.cg_point = DPNI_CP_QUEUE; + + ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, + priv->token, + DPNI_QUEUE_TX, + tc_id, + &cong_notif_cfg); + if (ret) { + DPAA2_PMD_ERR( + "Error in setting tx congestion notification: " + "err=%d", ret); + return -ret; + } + } + dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf; + dev->data->tx_queues[tx_queue_id] = dpaa2_q; + return 0; +} + +static void +dpaa2_dev_rx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static void +dpaa2_dev_tx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static uint32_t +dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + int32_t ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct dpaa2_queue *dpaa2_q; + struct qbman_swp *swp; + struct qbman_fq_query_np_rslt state; + uint32_t frame_cnt = 0; + + PMD_INIT_FUNC_TRACE(); + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR("Failure in affining portal"); + return -EINVAL; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; + + if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { + frame_cnt = qbman_fq_state_frame_count(&state); + DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", + rx_queue_id, frame_cnt); + } + return frame_cnt; +} + +static const uint32_t * +dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /*todo -= add more types */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx || + dev->rx_pkt_burst == dpaa2_dev_loopback_rx) + return ptypes; + return NULL; +} + +/** + * Dpaa2 link Interrupt handler + * + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +dpaa2_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = param; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int ret; + int irq_index = DPNI_IRQ_INDEX; + unsigned int status = 0, clear = 0; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, + irq_index, &status); + if (unlikely(ret)) { + DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); + clear = 0xffffffff; + goto out; + } + + if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { + clear = DPNI_IRQ_EVENT_LINK_CHANGED; + dpaa2_dev_link_update(dev, 0); + /* calling all the apps registered for link status event */ + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } +out: + ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, + irq_index, clear); + if (unlikely(ret)) + DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); +} + +static int +dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) +{ + int err = 0; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int irq_index = DPNI_IRQ_INDEX; + unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; + + PMD_INIT_FUNC_TRACE(); + + err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, + irq_index, mask); + if (err < 0) { + DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, + strerror(-err)); + return err; + } + + err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, + irq_index, enable); + if (err < 0) + DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, + strerror(-err)); + + return err; +} + +static int +dpaa2_dev_start(struct rte_eth_dev *dev) +{ + struct rte_device *rdev = dev->device; + struct rte_dpaa2_device *dpaa2_dev; + struct rte_eth_dev_data *data = dev->data; + struct dpaa2_dev_priv *priv = data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct dpni_queue cfg; + struct dpni_error_cfg err_cfg; + uint16_t qdid; + struct dpni_queue_id qid; + struct dpaa2_queue *dpaa2_q; + int ret, i; + struct rte_intr_handle *intr_handle; + + dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); + intr_handle = &dpaa2_dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", + priv->hw_id, ret); + return ret; + } + + /* Power up the phy. Needed to make the link go UP */ + dpaa2_dev_set_link_up(dev); + + ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX, &qdid); + if (ret) { + DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); + return ret; + } + priv->qdid = qdid; + + for (i = 0; i < data->nb_rx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX, dpaa2_q->tc_index, + dpaa2_q->flow_id, &cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error in getting flow information: " + "err=%d", ret); + return ret; + } + dpaa2_q->fqid = qid.fqid; + } + + /*checksum errors, send them to normal path and set it in annotation */ + err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; + err_cfg.errors |= DPNI_ERROR_PHE; + + err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; + err_cfg.set_frame_annotation = true; + + ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, + priv->token, &err_cfg); + if (ret) { + DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", + ret); + return ret; + } + + /* if the interrupts were configured on this devices*/ + if (intr_handle && (intr_handle->fd) && + (dev->data->dev_conf.intr_conf.lsc != 0)) { + /* Registering LSC interrupt handler */ + rte_intr_callback_register(intr_handle, + dpaa2_interrupt_handler, + (void *)dev); + + /* enable vfio intr/eventfd mapping + * Interrupt index 0 is required, so we can not use + * rte_intr_enable. + */ + rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); + + /* enable dpni_irqs */ + dpaa2_eth_setup_irqs(dev, 1); + } + + /* Change the tx burst function if ordered queues are used */ + if (priv->en_ordered) + dev->tx_pkt_burst = dpaa2_dev_tx_ordered; + + return 0; +} + +/** + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC. + */ +static void +dpaa2_dev_stop(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int ret; + struct rte_eth_link link; + struct rte_intr_handle *intr_handle = dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + /* reset interrupt callback */ + if (intr_handle && (intr_handle->fd) && + (dev->data->dev_conf.intr_conf.lsc != 0)) { + /*disable dpni irqs */ + dpaa2_eth_setup_irqs(dev, 0); + + /* disable vfio intr before callback unregister */ + rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); + + /* Unregistering LSC interrupt handler */ + rte_intr_callback_unregister(intr_handle, + dpaa2_interrupt_handler, + (void *)dev); + } + + dpaa2_dev_set_link_down(dev); + + ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", + ret, priv->hw_id); + return; + } + + /* clear the recorded link status */ + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); +} + +static void +dpaa2_dev_close(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int ret; + struct rte_eth_link link; + + PMD_INIT_FUNC_TRACE(); + + dpaa2_flow_clean(dev); + + /* Clean the device first */ + ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); + return; + } + + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); +} + +static void +dpaa2_dev_promiscuous_enable( + struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); + if (ret < 0) + DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); + + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); + if (ret < 0) + DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); +} + +static void +dpaa2_dev_promiscuous_disable( + struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); + if (ret < 0) + DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); + + if (dev->data->all_multicast == 0) { + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, + priv->token, false); + if (ret < 0) + DPAA2_PMD_ERR("Unable to disable M promisc mode %d", + ret); + } +} + +static void +dpaa2_dev_allmulticast_enable( + struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); + if (ret < 0) + DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); +} + +static void +dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + /* must remain on for all promiscuous */ + if (dev->data->promiscuous == 1) + return; + + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); + if (ret < 0) + DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); +} + +static int +dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + + VLAN_TAG_SIZE; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; + } + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) + return -EINVAL; + + if (frame_size > ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads &= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + /* Set the Max Rx frame length as 'mtu' + + * Maximum Ethernet header length + */ + ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, + frame_size); + if (ret) { + DPAA2_PMD_ERR("Setting the max frame length failed"); + return -1; + } + DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); + return 0; +} + +static int +dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *addr, + __rte_unused uint32_t index, + __rte_unused uint32_t pool) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -1; + } + + ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, + priv->token, addr->addr_bytes); + if (ret) + DPAA2_PMD_ERR( + "error: Adding the MAC ADDR failed: err = %d", ret); + return 0; +} + +static void +dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, + uint32_t index) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct rte_eth_dev_data *data = dev->data; + struct ether_addr *macaddr; + + PMD_INIT_FUNC_TRACE(); + + macaddr = &data->mac_addrs[index]; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, + priv->token, macaddr->addr_bytes); + if (ret) + DPAA2_PMD_ERR( + "error: Removing the MAC ADDR failed: err = %d", ret); +} + +static int +dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *addr) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; + } + + ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, + priv->token, addr->addr_bytes); + + if (ret) + DPAA2_PMD_ERR( + "error: Setting the MAC ADDR failed %d", ret); + + return ret; +} + +static +int dpaa2_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int32_t retcode; + uint8_t page0 = 0, page1 = 1, page2 = 2; + union dpni_statistics value; + int i; + struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq; + + memset(&value, 0, sizeof(union dpni_statistics)); + + PMD_INIT_FUNC_TRACE(); + + if (!dpni) { + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; + } + + if (!stats) { + DPAA2_PMD_ERR("stats is NULL"); + return -EINVAL; + } + + /*Get Counters from page_0*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + page0, 0, &value); + if (retcode) + goto err; + + stats->ipackets = value.page_0.ingress_all_frames; + stats->ibytes = value.page_0.ingress_all_bytes; + + /*Get Counters from page_1*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + page1, 0, &value); + if (retcode) + goto err; + + stats->opackets = value.page_1.egress_all_frames; + stats->obytes = value.page_1.egress_all_bytes; + + /*Get Counters from page_2*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + page2, 0, &value); + if (retcode) + goto err; + + /* Ingress drop frame count due to configured rules */ + stats->ierrors = value.page_2.ingress_filtered_frames; + /* Ingress drop frame count due to error */ + stats->ierrors += value.page_2.ingress_discarded_frames; + + stats->oerrors = value.page_2.egress_discarded_frames; + stats->imissed = value.page_2.ingress_nobuffer_discards; + + /* Fill in per queue stats */ + for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && + (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) { + dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i]; + dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i]; + if (dpaa2_rxq) + stats->q_ipackets[i] = dpaa2_rxq->rx_pkts; + if (dpaa2_txq) + stats->q_opackets[i] = dpaa2_txq->tx_pkts; + + /* Byte counting is not implemented */ + stats->q_ibytes[i] = 0; + stats->q_obytes[i] = 0; + } + + return 0; + +err: + DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); + return retcode; +}; + +static int +dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int32_t retcode; + union dpni_statistics value[3] = {}; + unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); + + if (n < num) + return num; + + if (xstats == NULL) + return 0; + + /* Get Counters from page_0*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 0, 0, &value[0]); + if (retcode) + goto err; + + /* Get Counters from page_1*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 1, 0, &value[1]); + if (retcode) + goto err; + + /* Get Counters from page_2*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 2, 0, &value[2]); + if (retcode) + goto err; + + for (i = 0; i < num; i++) { + xstats[i].id = i; + xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. + raw.counter[dpaa2_xstats_strings[i].stats_id]; + } + return i; +err: + DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); + return retcode; +} + +static int +dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int limit) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); + + if (limit < stat_cnt) + return stat_cnt; + + if (xstats_names != NULL) + for (i = 0; i < stat_cnt; i++) + strlcpy(xstats_names[i].name, + dpaa2_xstats_strings[i].name, + sizeof(xstats_names[i].name)); + + return stat_cnt; +} + +static int +dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); + uint64_t values_copy[stat_cnt]; + + if (!ids) { + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int32_t retcode; + union dpni_statistics value[3] = {}; + + if (n < stat_cnt) + return stat_cnt; + + if (!values) + return 0; + + /* Get Counters from page_0*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 0, 0, &value[0]); + if (retcode) + return 0; + + /* Get Counters from page_1*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 1, 0, &value[1]); + if (retcode) + return 0; + + /* Get Counters from page_2*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 2, 0, &value[2]); + if (retcode) + return 0; + + for (i = 0; i < stat_cnt; i++) { + values[i] = value[dpaa2_xstats_strings[i].page_id]. + raw.counter[dpaa2_xstats_strings[i].stats_id]; + } + return stat_cnt; + } + + dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); + + for (i = 0; i < n; i++) { + if (ids[i] >= stat_cnt) { + DPAA2_PMD_ERR("xstats id value isn't valid"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + return n; +} + +static int +dpaa2_xstats_get_names_by_id( + struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int limit) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); + struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; + + if (!ids) + return dpaa2_xstats_get_names(dev, xstats_names, limit); + + dpaa2_xstats_get_names(dev, xstats_names_copy, limit); + + for (i = 0; i < limit; i++) { + if (ids[i] >= stat_cnt) { + DPAA2_PMD_ERR("xstats id value isn't valid"); + return -1; + } + strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); + } + return limit; +} + +static void +dpaa2_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int32_t retcode; + int i; + struct dpaa2_queue *dpaa2_q; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); + if (retcode) + goto error; + + /* Reset the per queue stats in dpaa2_queue structure */ + for (i = 0; i < priv->nb_rx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + if (dpaa2_q) + dpaa2_q->rx_pkts = 0; + } + + for (i = 0; i < priv->nb_tx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + if (dpaa2_q) + dpaa2_q->tx_pkts = 0; + } + + return; + +error: + DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); + return; +}; + +/* return 0 means link status changed, -1 means not changed */ +static int +dpaa2_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct rte_eth_link link; + struct dpni_link_state state = {0}; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return 0; + } + + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret < 0) { + DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret); + return -1; + } + + memset(&link, 0, sizeof(struct rte_eth_link)); + link.link_status = state.up; + link.link_speed = state.rate; + + if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) + link.link_duplex = ETH_LINK_HALF_DUPLEX; + else + link.link_duplex = ETH_LINK_FULL_DUPLEX; + + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == -1) + DPAA2_PMD_DEBUG("No change in status"); + else + DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, + link.link_status ? "Up" : "Down"); + + return ret; +} + +/** + * Toggle the DPNI to enable, if not already enabled. + * This is not strictly PHY up/down - it is more of logical toggling. + */ +static int +dpaa2_dev_set_link_up(struct rte_eth_dev *dev) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + int en = 0; + struct dpni_link_state state = {0}; + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)priv->hw; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return ret; + } + + /* Check if DPNI is currently enabled */ + ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); + if (ret) { + /* Unable to obtain dpni status; Not continuing */ + DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); + return -EINVAL; + } + + /* Enable link if not already enabled */ + if (!en) { + ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); + return -EINVAL; + } + } + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret < 0) { + DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret); + return -1; + } + + /* changing tx burst function to start enqueues */ + dev->tx_pkt_burst = dpaa2_dev_tx; + dev->data->dev_link.link_status = state.up; + + if (state.up) + DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); + else + DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); + return ret; +} + +/** + * Toggle the DPNI to disable, if not already disabled. + * This is not strictly PHY up/down - it is more of logical toggling. + */ +static int +dpaa2_dev_set_link_down(struct rte_eth_dev *dev) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + int dpni_enabled = 0; + int retries = 10; + + PMD_INIT_FUNC_TRACE(); + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)priv->hw; + + if (dpni == NULL) { + DPAA2_PMD_ERR("Device has not yet been configured"); + return ret; + } + + /*changing tx burst function to avoid any more enqueues */ + dev->tx_pkt_burst = dummy_dev_tx; + + /* Loop while dpni_disable() attempts to drain the egress FQs + * and confirm them back to us. + */ + do { + ret = dpni_disable(dpni, 0, priv->token); + if (ret) { + DPAA2_PMD_ERR("dpni disable failed (%d)", ret); + return ret; + } + ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); + if (ret) { + DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); + return ret; + } + if (dpni_enabled) + /* Allow the MC some slack */ + rte_delay_us(100 * 1000); + } while (dpni_enabled && --retries); + + if (!retries) { + DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); + /* todo- we may have to manually cleanup queues. + */ + } else { + DPAA2_PMD_INFO("Port %d Link DOWN successful", + dev->data->port_id); + } + + dev->data->dev_link.link_status = 0; + + return ret; +} + +static int +dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + struct dpni_link_state state = {0}; + + PMD_INIT_FUNC_TRACE(); + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)priv->hw; + + if (dpni == NULL || fc_conf == NULL) { + DPAA2_PMD_ERR("device not configured"); + return ret; + } + + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret) { + DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); + return ret; + } + + memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); + if (state.options & DPNI_LINK_OPT_PAUSE) { + /* DPNI_LINK_OPT_PAUSE set + * if ASYM_PAUSE not set, + * RX Side flow control (handle received Pause frame) + * TX side flow control (send Pause frame) + * if ASYM_PAUSE set, + * RX Side flow control (handle received Pause frame) + * No TX side flow control (send Pause frame disabled) + */ + if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) + fc_conf->mode = RTE_FC_FULL; + else + fc_conf->mode = RTE_FC_RX_PAUSE; + } else { + /* DPNI_LINK_OPT_PAUSE not set + * if ASYM_PAUSE set, + * TX side flow control (send Pause frame) + * No RX side flow control (No action on pause frame rx) + * if ASYM_PAUSE not set, + * Flow control disabled + */ + if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + } + + return ret; +} + +static int +dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + struct dpni_link_state state = {0}; + struct dpni_link_cfg cfg = {0}; + + PMD_INIT_FUNC_TRACE(); + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)priv->hw; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return ret; + } + + /* It is necessary to obtain the current state before setting fc_conf + * as MC would return error in case rate, autoneg or duplex values are + * different. + */ + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret) { + DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); + return -1; + } + + /* Disable link before setting configuration */ + dpaa2_dev_set_link_down(dev); + + /* Based on fc_conf, update cfg */ + cfg.rate = state.rate; + cfg.options = state.options; + + /* update cfg with fc_conf */ + switch (fc_conf->mode) { + case RTE_FC_FULL: + /* Full flow control; + * OPT_PAUSE set, ASYM_PAUSE not set + */ + cfg.options |= DPNI_LINK_OPT_PAUSE; + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; + break; + case RTE_FC_TX_PAUSE: + /* Enable RX flow control + * OPT_PAUSE not set; + * ASYM_PAUSE set; + */ + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; + cfg.options &= ~DPNI_LINK_OPT_PAUSE; + break; + case RTE_FC_RX_PAUSE: + /* Enable TX Flow control + * OPT_PAUSE set + * ASYM_PAUSE set + */ + cfg.options |= DPNI_LINK_OPT_PAUSE; + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; + break; + case RTE_FC_NONE: + /* Disable Flow control + * OPT_PAUSE not set + * ASYM_PAUSE not set + */ + cfg.options &= ~DPNI_LINK_OPT_PAUSE; + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; + break; + default: + DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", + fc_conf->mode); + return -1; + } + + ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); + if (ret) + DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", + ret); + + /* Enable link */ + dpaa2_dev_set_link_up(dev); + + return ret; +} + +static int +dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_conf *eth_conf = &data->dev_conf; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (rss_conf->rss_hf) { + ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); + if (ret) { + DPAA2_PMD_ERR("Unable to set flow dist"); + return ret; + } + } else { + ret = dpaa2_remove_flow_dist(dev, 0); + if (ret) { + DPAA2_PMD_ERR("Unable to remove flow dist"); + return ret; + } + } + eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; + return 0; +} + +static int +dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_conf *eth_conf = &data->dev_conf; + + /* dpaa2 does not support rss_key, so length should be 0*/ + rss_conf->rss_key_len = 0; + rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; + return 0; +} + +int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, + int eth_rx_queue_id, + uint16_t dpcon_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) +{ + struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; + struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; + uint8_t flow_id = dpaa2_ethq->flow_id; + struct dpni_queue cfg; + uint8_t options; + int ret; + + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) + dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; + else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) + dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; + else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED) + dpaa2_ethq->cb = dpaa2_dev_process_ordered_event; + else + return -EINVAL; + + memset(&cfg, 0, sizeof(struct dpni_queue)); + options = DPNI_QUEUE_OPT_DEST; + cfg.destination.type = DPNI_DEST_DPCON; + cfg.destination.id = dpcon_id; + cfg.destination.priority = queue_conf->ev.priority; + + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { + options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; + cfg.destination.hold_active = 1; + } + + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED && + !eth_priv->en_ordered) { + struct opr_cfg ocfg; + + /* Restoration window size = 256 frames */ + ocfg.oprrws = 3; + /* Restoration window size = 512 frames for LX2 */ + if (dpaa2_svr_family == SVR_LX2160A) + ocfg.oprrws = 4; + /* Auto advance NESN window enabled */ + ocfg.oa = 1; + /* Late arrival window size disabled */ + ocfg.olws = 0; + /* ORL resource exhaustaion advance NESN disabled */ + ocfg.oeane = 0; + /* Loose ordering enabled */ + ocfg.oloe = 1; + eth_priv->en_loose_ordered = 1; + /* Strict ordering enabled if explicitly set */ + if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) { + ocfg.oloe = 0; + eth_priv->en_loose_ordered = 0; + } + + ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token, + dpaa2_ethq->tc_index, flow_id, + OPR_OPT_CREATE, &ocfg); + if (ret) { + DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret); + return ret; + } + + eth_priv->en_ordered = 1; + } + + options |= DPNI_QUEUE_OPT_USER_CTX; + cfg.user_context = (size_t)(dpaa2_ethq); + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, + dpaa2_ethq->tc_index, flow_id, options, &cfg); + if (ret) { + DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); + return ret; + } + + memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); + + return 0; +} + +int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, + int eth_rx_queue_id) +{ + struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; + struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; + uint8_t flow_id = dpaa2_ethq->flow_id; + struct dpni_queue cfg; + uint8_t options; + int ret; + + memset(&cfg, 0, sizeof(struct dpni_queue)); + options = DPNI_QUEUE_OPT_DEST; + cfg.destination.type = DPNI_DEST_NONE; + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, + dpaa2_ethq->tc_index, flow_id, options, &cfg); + if (ret) + DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); + + return ret; +} + +static inline int +dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op) +{ + unsigned int i; + + for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) { + if (dpaa2_supported_filter_ops[i] == filter_op) + return 0; + } + return -ENOTSUP; +} + +static int +dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + if (!dev) + return -ENODEV; + + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (dpaa2_dev_verify_filter_ops(filter_op) < 0) { + ret = -ENOTSUP; + break; + } + *(const void **)arg = &dpaa2_flow_ops; + dpaa2_filter_type |= filter_type; + break; + default: + RTE_LOG(ERR, PMD, "Filter type (%d) not supported", + filter_type); + ret = -ENOTSUP; + break; + } + return ret; +} + +static struct eth_dev_ops dpaa2_ethdev_ops = { + .dev_configure = dpaa2_eth_dev_configure, + .dev_start = dpaa2_dev_start, + .dev_stop = dpaa2_dev_stop, + .dev_close = dpaa2_dev_close, + .promiscuous_enable = dpaa2_dev_promiscuous_enable, + .promiscuous_disable = dpaa2_dev_promiscuous_disable, + .allmulticast_enable = dpaa2_dev_allmulticast_enable, + .allmulticast_disable = dpaa2_dev_allmulticast_disable, + .dev_set_link_up = dpaa2_dev_set_link_up, + .dev_set_link_down = dpaa2_dev_set_link_down, + .link_update = dpaa2_dev_link_update, + .stats_get = dpaa2_dev_stats_get, + .xstats_get = dpaa2_dev_xstats_get, + .xstats_get_by_id = dpaa2_xstats_get_by_id, + .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, + .xstats_get_names = dpaa2_xstats_get_names, + .stats_reset = dpaa2_dev_stats_reset, + .xstats_reset = dpaa2_dev_stats_reset, + .fw_version_get = dpaa2_fw_version_get, + .dev_infos_get = dpaa2_dev_info_get, + .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, + .mtu_set = dpaa2_dev_mtu_set, + .vlan_filter_set = dpaa2_vlan_filter_set, + .vlan_offload_set = dpaa2_vlan_offload_set, + .vlan_tpid_set = dpaa2_vlan_tpid_set, + .rx_queue_setup = dpaa2_dev_rx_queue_setup, + .rx_queue_release = dpaa2_dev_rx_queue_release, + .tx_queue_setup = dpaa2_dev_tx_queue_setup, + .tx_queue_release = dpaa2_dev_tx_queue_release, + .rx_queue_count = dpaa2_dev_rx_queue_count, + .flow_ctrl_get = dpaa2_flow_ctrl_get, + .flow_ctrl_set = dpaa2_flow_ctrl_set, + .mac_addr_add = dpaa2_dev_add_mac_addr, + .mac_addr_remove = dpaa2_dev_remove_mac_addr, + .mac_addr_set = dpaa2_dev_set_mac_addr, + .rss_hash_update = dpaa2_dev_rss_hash_update, + .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, + .filter_ctrl = dpaa2_dev_flow_ctrl, +}; + +/* Populate the mac address from physically available (u-boot/firmware) and/or + * one set by higher layers like MC (restool) etc. + * Returns the table of MAC entries (multiple entries) + */ +static int +populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, + struct ether_addr *mac_entry) +{ + int ret; + struct ether_addr phy_mac, prime_mac; + + memset(&phy_mac, 0, sizeof(struct ether_addr)); + memset(&prime_mac, 0, sizeof(struct ether_addr)); + + /* Get the physical device MAC address */ + ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, + phy_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret); + goto cleanup; + } + + ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, + prime_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret); + goto cleanup; + } + + /* Now that both MAC have been obtained, do: + * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy + * and return phy + * If empty_mac(phy), return prime. + * if both are empty, create random MAC, set as prime and return + */ + if (!is_zero_ether_addr(&phy_mac)) { + /* If the addresses are not same, overwrite prime */ + if (!is_same_ether_addr(&phy_mac, &prime_mac)) { + ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, + priv->token, + phy_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("Unable to set MAC Address: %d", + ret); + goto cleanup; + } + memcpy(&prime_mac, &phy_mac, sizeof(struct ether_addr)); + } + } else if (is_zero_ether_addr(&prime_mac)) { + /* In case phys and prime, both are zero, create random MAC */ + eth_random_addr(prime_mac.addr_bytes); + ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, + priv->token, + prime_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret); + goto cleanup; + } + } + + /* prime_mac the final MAC address */ + memcpy(mac_entry, &prime_mac, sizeof(struct ether_addr)); + return 0; + +cleanup: + return -1; +} + +static int +check_devargs_handler(__rte_unused const char *key, const char *value, + __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +dpaa2_get_devargs(struct rte_devargs *devargs, const char *key) +{ + struct rte_kvargs *kvlist; + + if (!devargs) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (!kvlist) + return 0; + + if (!rte_kvargs_count(kvlist, key)) { + rte_kvargs_free(kvlist); + return 0; + } + + if (rte_kvargs_process(kvlist, key, + check_devargs_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + +static int +dpaa2_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_device *dev = eth_dev->device; + struct rte_dpaa2_device *dpaa2_dev; + struct fsl_mc_io *dpni_dev; + struct dpni_attr attr; + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct dpni_buffer_layout layout; + int ret, hw_id, i; + + PMD_INIT_FUNC_TRACE(); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + /* In case of secondary, only burst and ops API need to be + * plugged. + */ + eth_dev->dev_ops = &dpaa2_ethdev_ops; + if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) + eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; + else + eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; + eth_dev->tx_pkt_burst = dpaa2_dev_tx; + return 0; + } + + dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); + + hw_id = dpaa2_dev->object_id; + + dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); + if (!dpni_dev) { + DPAA2_PMD_ERR("Memory allocation failed for dpni device"); + return -1; + } + + dpni_dev->regs = rte_mcp_ptr_list[0]; + ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); + if (ret) { + DPAA2_PMD_ERR( + "Failure in opening dpni@%d with err code %d", + hw_id, ret); + rte_free(dpni_dev); + return -1; + } + + /* Clean the device first */ + ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", + hw_id, ret); + goto init_err; + } + + ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); + if (ret) { + DPAA2_PMD_ERR( + "Failure in get dpni@%d attribute, err code %d", + hw_id, ret); + goto init_err; + } + + priv->num_rx_tc = attr.num_rx_tcs; + + for (i = 0; i < attr.num_rx_tcs; i++) + priv->nb_rx_queues += attr.num_queues; + + /* Using number of TX queues as number of TX TCs */ + priv->nb_tx_queues = attr.num_tx_tcs; + + DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", + priv->num_rx_tc, priv->nb_rx_queues, + priv->nb_tx_queues); + + priv->hw = dpni_dev; + priv->hw_id = hw_id; + priv->options = attr.options; + priv->max_mac_filters = attr.mac_filter_entries; + priv->max_vlan_filters = attr.vlan_filter_entries; + priv->flags = 0; + + /* Allocate memory for hardware structure for queues */ + ret = dpaa2_alloc_rx_tx_queues(eth_dev); + if (ret) { + DPAA2_PMD_ERR("Queue allocation Failed"); + goto init_err; + } + + /* Allocate memory for storing MAC addresses. + * Table of mac_filter_entries size is allocated so that RTE ether lib + * can add MAC entries when rte_eth_dev_mac_addr_add is called. + */ + eth_dev->data->mac_addrs = rte_zmalloc("dpni", + ETHER_ADDR_LEN * attr.mac_filter_entries, 0); + if (eth_dev->data->mac_addrs == NULL) { + DPAA2_PMD_ERR( + "Failed to allocate %d bytes needed to store MAC addresses", + ETHER_ADDR_LEN * attr.mac_filter_entries); + ret = -ENOMEM; + goto init_err; + } + + ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]); + if (ret) { + DPAA2_PMD_ERR("Unable to fetch MAC Address for device"); + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + goto init_err; + } + + /* ... tx buffer layout ... */ + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + layout.pass_frame_status = 1; + ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX, &layout); + if (ret) { + DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); + goto init_err; + } + + /* ... tx-conf and error buffer layout ... */ + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + layout.pass_frame_status = 1; + ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, &layout); + if (ret) { + DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", + ret); + goto init_err; + } + + eth_dev->dev_ops = &dpaa2_ethdev_ops; + + if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) { + eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; + DPAA2_PMD_INFO("Loopback mode"); + } else { + eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; + } + eth_dev->tx_pkt_burst = dpaa2_dev_tx; + + /*Init fields w.r.t. classficaition*/ + memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg)); + priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64); + if (!priv->extract.qos_extract_param) { + DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow " + " classificaiton ", ret); + goto init_err; + } + for (i = 0; i < MAX_TCS; i++) { + memset(&priv->extract.fs_key_cfg[i], 0, + sizeof(struct dpkg_profile_cfg)); + priv->extract.fs_extract_param[i] = + (size_t)rte_malloc(NULL, 256, 64); + if (!priv->extract.fs_extract_param[i]) { + DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton", + ret); + goto init_err; + } + } + + RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); + return 0; +init_err: + dpaa2_dev_uninit(eth_dev); + return ret; +} + +static int +dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int i, ret; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (!dpni) { + DPAA2_PMD_WARN("Already closed or not started"); + return -1; + } + + dpaa2_dev_close(eth_dev); + + dpaa2_free_rx_tx_queues(eth_dev); + + /* Close the device at underlying layer*/ + ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR( + "Failure closing dpni device with err code %d", + ret); + } + + /* Free the allocated memory for ethernet private data and dpni*/ + priv->hw = NULL; + rte_free(dpni); + + for (i = 0; i < MAX_TCS; i++) { + if (priv->extract.fs_extract_param[i]) + rte_free((void *)(size_t)priv->extract.fs_extract_param[i]); + } + + if (priv->extract.qos_extract_param) + rte_free((void *)(size_t)priv->extract.qos_extract_param); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); + return 0; +} + +static int +rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, + struct rte_dpaa2_device *dpaa2_dev) +{ + struct rte_eth_dev *eth_dev; + int diag; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); + if (!eth_dev) + return -ENODEV; + eth_dev->data->dev_private = rte_zmalloc( + "ethdev private structure", + sizeof(struct dpaa2_dev_priv), + RTE_CACHE_LINE_SIZE); + if (eth_dev->data->dev_private == NULL) { + DPAA2_PMD_CRIT( + "Unable to allocate memory for private data"); + rte_eth_dev_release_port(eth_dev); + return -ENOMEM; + } + } else { + eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); + if (!eth_dev) + return -ENODEV; + } + + eth_dev->device = &dpaa2_dev->device; + + dpaa2_dev->eth_dev = eth_dev; + eth_dev->data->rx_mbuf_alloc_failed = 0; + + if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) + eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + + /* Invoke PMD device initialization function */ + diag = dpaa2_dev_init(eth_dev); + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + rte_eth_dev_release_port(eth_dev); + return diag; +} + +static int +rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) +{ + struct rte_eth_dev *eth_dev; + + eth_dev = dpaa2_dev->eth_dev; + dpaa2_dev_uninit(eth_dev); + + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_dpaa2_driver rte_dpaa2_pmd = { + .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, + .drv_type = DPAA2_ETH, + .probe = rte_dpaa2_probe, + .remove = rte_dpaa2_remove, +}; + +RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); +RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2, + DRIVER_LOOPBACK_MODE "=<int>"); +RTE_INIT(dpaa2_pmd_init_log) +{ + dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); + if (dpaa2_logtype_pmd >= 0) + rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); +} diff --git a/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h new file mode 100644 index 000000000..33b1506aa --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016 NXP + * + */ + +#ifndef _DPAA2_ETHDEV_H +#define _DPAA2_ETHDEV_H + +#include <rte_event_eth_rx_adapter.h> +#include <rte_pmd_dpaa2.h> + +#include <dpaa2_hw_pvt.h> + +#include <mc/fsl_dpni.h> +#include <mc/fsl_mc_sys.h> + +#define DPAA2_MIN_RX_BUF_SIZE 512 +#define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/ + +#define MAX_TCS DPNI_MAX_TC +#define MAX_RX_QUEUES 128 +#define MAX_TX_QUEUES 16 + +/*default tc to be used for ,congestion, distribution etc configuration. */ +#define DPAA2_DEF_TC 0 + +/* Threshold for a Tx queue to *Enter* Congestion state. + */ +#define CONG_ENTER_TX_THRESHOLD 512 + +/* Threshold for a queue to *Exit* Congestion state. + */ +#define CONG_EXIT_TX_THRESHOLD 480 + +#define CONG_RETRY_COUNT 18000 + +/* RX queue tail drop threshold + * currently considering 32 KB packets + */ +#define CONG_THRESHOLD_RX_Q (64 * 1024) +#define CONG_RX_OAL 128 + +/* Size of the input SMMU mapped memory required by MC */ +#define DIST_PARAM_IOVA_SIZE 256 + +/* Enable TX Congestion control support + * default is disable + */ +#define DPAA2_TX_CGR_OFF 0x01 + +/* Disable RX tail drop, default is enable */ +#define DPAA2_RX_TAILDROP_OFF 0x04 + +#define DPAA2_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IP | \ + ETH_RSS_UDP | \ + ETH_RSS_TCP | \ + ETH_RSS_SCTP) + +/* LX2 FRC Parsed values (Little Endian) */ +#define DPAA2_PKT_TYPE_ETHER 0x0060 +#define DPAA2_PKT_TYPE_IPV4 0x0000 +#define DPAA2_PKT_TYPE_IPV6 0x0020 +#define DPAA2_PKT_TYPE_IPV4_EXT \ + (0x0001 | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_EXT \ + (0x0001 | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_TCP \ + (0x000e | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_TCP \ + (0x000e | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_UDP \ + (0x0010 | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_UDP \ + (0x0010 | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_SCTP \ + (0x000f | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_SCTP \ + (0x000f | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_ICMP \ + (0x0003 | DPAA2_PKT_TYPE_IPV4_EXT) +#define DPAA2_PKT_TYPE_IPV6_ICMP \ + (0x0003 | DPAA2_PKT_TYPE_IPV6_EXT) +#define DPAA2_PKT_TYPE_VLAN_1 0x0160 +#define DPAA2_PKT_TYPE_VLAN_2 0x0260 + +/* enable timestamp in mbuf*/ +extern enum pmd_dpaa2_ts dpaa2_enable_ts; + +#define DPAA2_QOS_TABLE_RECONFIGURE 1 +#define DPAA2_FS_TABLE_RECONFIGURE 2 + +/*Externaly defined*/ +extern const struct rte_flow_ops dpaa2_flow_ops; +extern enum rte_filter_type dpaa2_filter_type; + +struct dpaa2_dev_priv { + void *hw; + int32_t hw_id; + int32_t qdid; + uint16_t token; + uint8_t nb_tx_queues; + uint8_t nb_rx_queues; + uint32_t options; + void *rx_vq[MAX_RX_QUEUES]; + void *tx_vq[MAX_TX_QUEUES]; + + struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */ + uint8_t max_mac_filters; + uint8_t max_vlan_filters; + uint8_t num_rx_tc; + uint8_t flags; /*dpaa2 config flags */ + uint8_t en_ordered; + uint8_t en_loose_ordered; + + struct pattern_s { + uint8_t item_count; + uint8_t pattern_type[DPKG_MAX_NUM_OF_EXTRACTS]; + } pattern[MAX_TCS + 1]; + + struct extract_s { + struct dpkg_profile_cfg qos_key_cfg; + struct dpkg_profile_cfg fs_key_cfg[MAX_TCS]; + uint64_t qos_extract_param; + uint64_t fs_extract_param[MAX_TCS]; + } extract; + LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */ +}; + +int dpaa2_distset_to_dpkg_profile_cfg(uint64_t req_dist_set, + struct dpkg_profile_cfg *kg_cfg); + +int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, + uint64_t req_dist_set); + +int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev, + uint8_t tc_index); + +int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist); + +int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, + int eth_rx_queue_id, + uint16_t dpcon_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf); + +int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, + int eth_rx_queue_id); + +uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts); + +uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts); +void dpaa2_dev_process_parallel_event(struct qbman_swp *swp, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev); +void dpaa2_dev_process_atomic_event(struct qbman_swp *swp, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev); +void dpaa2_dev_process_ordered_event(struct qbman_swp *swp, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev); +uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts); +uint16_t dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts); +uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts); +void dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci); +void dpaa2_flow_clean(struct rte_eth_dev *dev); + +#endif /* _DPAA2_ETHDEV_H */ diff --git a/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_flow.c b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_flow.c new file mode 100644 index 000000000..9ef46d47e --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_flow.c @@ -0,0 +1,2015 @@ +/* * SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> + +#include <rte_ethdev.h> +#include <rte_log.h> +#include <rte_malloc.h> +#include <rte_flow_driver.h> +#include <rte_tailq.h> + +#include <fsl_dpni.h> +#include <fsl_dpkg.h> + +#include <dpaa2_ethdev.h> +#include <dpaa2_pmd_logs.h> + +struct rte_flow { + LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ + struct dpni_rule_cfg rule; + uint8_t key_size; + uint8_t tc_id; + uint8_t flow_type; + uint8_t index; + enum rte_flow_action_type action; + uint16_t flow_id; +}; + +/* Layout for rule compositions for supported patterns */ +/* TODO: Current design only supports Ethernet + IPv4 based classification. */ +/* So corresponding offset macros are valid only. Rest are placeholder for */ +/* now. Once support for other netwrok headers will be added then */ +/* corresponding macros will be updated with correct values*/ +#define DPAA2_CLS_RULE_OFFSET_ETH 0 /*Start of buffer*/ +#define DPAA2_CLS_RULE_OFFSET_VLAN 14 /* DPAA2_CLS_RULE_OFFSET_ETH */ + /* + Sizeof Eth fields */ +#define DPAA2_CLS_RULE_OFFSET_IPV4 14 /* DPAA2_CLS_RULE_OFFSET_VLAN */ + /* + Sizeof VLAN fields */ +#define DPAA2_CLS_RULE_OFFSET_IPV6 25 /* DPAA2_CLS_RULE_OFFSET_IPV4 */ + /* + Sizeof IPV4 fields */ +#define DPAA2_CLS_RULE_OFFSET_ICMP 58 /* DPAA2_CLS_RULE_OFFSET_IPV6 */ + /* + Sizeof IPV6 fields */ +#define DPAA2_CLS_RULE_OFFSET_UDP 60 /* DPAA2_CLS_RULE_OFFSET_ICMP */ + /* + Sizeof ICMP fields */ +#define DPAA2_CLS_RULE_OFFSET_TCP 64 /* DPAA2_CLS_RULE_OFFSET_UDP */ + /* + Sizeof UDP fields */ +#define DPAA2_CLS_RULE_OFFSET_SCTP 68 /* DPAA2_CLS_RULE_OFFSET_TCP */ + /* + Sizeof TCP fields */ +#define DPAA2_CLS_RULE_OFFSET_GRE 72 /* DPAA2_CLS_RULE_OFFSET_SCTP */ + /* + Sizeof SCTP fields */ + +static const +enum rte_flow_item_type dpaa2_supported_pattern_type[] = { + RTE_FLOW_ITEM_TYPE_END, + RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_VLAN, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_ICMP, + RTE_FLOW_ITEM_TYPE_UDP, + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_SCTP, + RTE_FLOW_ITEM_TYPE_GRE, +}; + +static const +enum rte_flow_action_type dpaa2_supported_action_type[] = { + RTE_FLOW_ACTION_TYPE_END, + RTE_FLOW_ACTION_TYPE_QUEUE, + RTE_FLOW_ACTION_TYPE_RSS +}; + +enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE; +static const void *default_mask; + +static int +dpaa2_configure_flow_eth(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_eth *spec, *mask; + + /* TODO: Currently upper bound of range parameter is not implemented */ + const struct rte_flow_item_eth *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + /* TODO: pattern is an array of 9 elements where 9th pattern element */ + /* is for QoS table and 1-8th pattern element is for FS tables. */ + /* It can be changed to macro. */ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_eth *)pattern->spec; + last = (const struct rte_flow_item_eth *)pattern->last; + mask = (const struct rte_flow_item_eth *) + (pattern->mask ? pattern->mask : default_mask); + + /* Key rule */ + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ETH; + memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes), + sizeof(struct ether_addr)); + key_iova += sizeof(struct ether_addr); + memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes), + sizeof(struct ether_addr)); + key_iova += sizeof(struct ether_addr); + memcpy((void *)key_iova, (const void *)(&spec->type), + sizeof(rte_be16_t)); + + /* Key mask */ + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ETH; + memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes), + sizeof(struct ether_addr)); + mask_iova += sizeof(struct ether_addr); + memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes), + sizeof(struct ether_addr)); + mask_iova += sizeof(struct ether_addr); + memcpy((void *)mask_iova, (const void *)(&mask->type), + sizeof(rte_be16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ETH + + ((2 * sizeof(struct ether_addr)) + + sizeof(rte_be16_t))); + return device_configured; +} + +static int +dpaa2_configure_flow_vlan(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_vlan *spec, *mask; + + const struct rte_flow_item_vlan *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI; + priv->extract.qos_key_cfg.num_extracts++; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI; + priv->extract.fs_key_cfg[group].num_extracts++; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_vlan *)pattern->spec; + last = (const struct rte_flow_item_vlan *)pattern->last; + mask = (const struct rte_flow_item_vlan *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_VLAN; + memcpy((void *)key_iova, (const void *)(&spec->tci), + sizeof(rte_be16_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_VLAN; + memcpy((void *)mask_iova, (const void *)(&mask->tci), + sizeof(rte_be16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_VLAN + sizeof(rte_be16_t)); + return device_configured; +} + +static int +dpaa2_configure_flow_ipv4(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_ipv4 *spec, *mask; + + const struct rte_flow_item_ipv4 *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_ipv4 *)pattern->spec; + last = (const struct rte_flow_item_ipv4 *)pattern->last; + mask = (const struct rte_flow_item_ipv4 *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4; + memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr, + sizeof(uint32_t)); + key_iova += sizeof(uint32_t); + memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr, + sizeof(uint32_t)); + key_iova += sizeof(uint32_t); + memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id, + sizeof(uint8_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV4; + memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr, + sizeof(uint32_t)); + mask_iova += sizeof(uint32_t); + memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr, + sizeof(uint32_t)); + mask_iova += sizeof(uint32_t); + memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id, + sizeof(uint8_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV4 + + (2 * sizeof(uint32_t)) + sizeof(uint8_t)); + + return device_configured; +} + +static int +dpaa2_configure_flow_ipv6(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_ipv6 *spec, *mask; + + const struct rte_flow_item_ipv6 *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_ipv6 *)pattern->spec; + last = (const struct rte_flow_item_ipv6 *)pattern->last; + mask = (const struct rte_flow_item_ipv6 *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV6; + memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr), + sizeof(spec->hdr.src_addr)); + key_iova += sizeof(spec->hdr.src_addr); + memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr), + sizeof(spec->hdr.dst_addr)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV6; + memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr), + sizeof(mask->hdr.src_addr)); + mask_iova += sizeof(mask->hdr.src_addr); + memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr), + sizeof(mask->hdr.dst_addr)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV6 + + sizeof(spec->hdr.src_addr) + + sizeof(mask->hdr.dst_addr)); + return device_configured; +} + +static int +dpaa2_configure_flow_icmp(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_icmp *spec, *mask; + + const struct rte_flow_item_icmp *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_icmp *)pattern->spec; + last = (const struct rte_flow_item_icmp *)pattern->last; + mask = (const struct rte_flow_item_icmp *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ICMP; + memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type, + sizeof(uint8_t)); + key_iova += sizeof(uint8_t); + memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code, + sizeof(uint8_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ICMP; + memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type, + sizeof(uint8_t)); + key_iova += sizeof(uint8_t); + memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code, + sizeof(uint8_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ICMP + + (2 * sizeof(uint8_t))); + + return device_configured; +} + +static int +dpaa2_configure_flow_udp(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_udp *spec, *mask; + + const struct rte_flow_item_udp *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_udp *)pattern->spec; + last = (const struct rte_flow_item_udp *)pattern->last; + mask = (const struct rte_flow_item_udp *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 + + (2 * sizeof(uint32_t)); + memset((void *)key_iova, 0x11, sizeof(uint8_t)); + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_UDP; + memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port), + sizeof(uint16_t)); + key_iova += sizeof(uint16_t); + memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port), + sizeof(uint16_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_UDP; + memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port), + sizeof(uint16_t)); + mask_iova += sizeof(uint16_t); + memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port), + sizeof(uint16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_UDP + + (2 * sizeof(uint16_t))); + + return device_configured; +} + +static int +dpaa2_configure_flow_tcp(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_tcp *spec, *mask; + + const struct rte_flow_item_tcp *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_tcp *)pattern->spec; + last = (const struct rte_flow_item_tcp *)pattern->last; + mask = (const struct rte_flow_item_tcp *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 + + (2 * sizeof(uint32_t)); + memset((void *)key_iova, 0x06, sizeof(uint8_t)); + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_TCP; + memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port), + sizeof(uint16_t)); + key_iova += sizeof(uint16_t); + memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port), + sizeof(uint16_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_TCP; + memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port), + sizeof(uint16_t)); + mask_iova += sizeof(uint16_t); + memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port), + sizeof(uint16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_TCP + + (2 * sizeof(uint16_t))); + + return device_configured; +} + +static int +dpaa2_configure_flow_sctp(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_sctp *spec, *mask; + + const struct rte_flow_item_sctp *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC; + index++; + + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC; + index++; + + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_sctp *)pattern->spec; + last = (const struct rte_flow_item_sctp *)pattern->last; + mask = (const struct rte_flow_item_sctp *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 + + (2 * sizeof(uint32_t)); + memset((void *)key_iova, 0x84, sizeof(uint8_t)); + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_SCTP; + memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port), + sizeof(uint16_t)); + key_iova += sizeof(uint16_t); + memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port), + sizeof(uint16_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_SCTP; + memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port), + sizeof(uint16_t)); + mask_iova += sizeof(uint16_t); + memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port), + sizeof(uint16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_SCTP + + (2 * sizeof(uint16_t))); + return device_configured; +} + +static int +dpaa2_configure_flow_gre(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item *pattern, + const struct rte_flow_action actions[] __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + int index, j = 0; + size_t key_iova; + size_t mask_iova; + int device_configured = 0, entry_found = 0; + uint32_t group; + const struct rte_flow_item_gre *spec, *mask; + + const struct rte_flow_item_gre *last __rte_unused; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + group = attr->group; + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */ + if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) { + DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n", + DPKG_MAX_NUM_OF_EXTRACTS); + return -ENOTSUP; + } + + for (j = 0; j < priv->pattern[8].item_count; j++) { + if (priv->pattern[8].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[8].pattern_type[j] = pattern->type; + priv->pattern[8].item_count++; + device_configured |= DPAA2_QOS_TABLE_RECONFIGURE; + } + + entry_found = 0; + for (j = 0; j < priv->pattern[group].item_count; j++) { + if (priv->pattern[group].pattern_type[j] != pattern->type) { + continue; + } else { + entry_found = 1; + break; + } + } + + if (!entry_found) { + priv->pattern[group].pattern_type[j] = pattern->type; + priv->pattern[group].item_count++; + device_configured |= DPAA2_FS_TABLE_RECONFIGURE; + } + + /* Get traffic class index and flow id to be configured */ + flow->tc_id = group; + flow->index = attr->priority; + + if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + index = priv->extract.qos_key_cfg.num_extracts; + priv->extract.qos_key_cfg.extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE; + priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE; + index++; + + priv->extract.qos_key_cfg.num_extracts = index; + } + + if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) { + index = priv->extract.fs_key_cfg[group].num_extracts; + priv->extract.fs_key_cfg[group].extracts[index].type = + DPKG_EXTRACT_FROM_HDR; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE; + priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE; + index++; + + priv->extract.fs_key_cfg[group].num_extracts = index; + } + + /* Parse pattern list to get the matching parameters */ + spec = (const struct rte_flow_item_gre *)pattern->spec; + last = (const struct rte_flow_item_gre *)pattern->last; + mask = (const struct rte_flow_item_gre *) + (pattern->mask ? pattern->mask : default_mask); + + key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_GRE; + memcpy((void *)key_iova, (const void *)(&spec->protocol), + sizeof(rte_be16_t)); + + mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_GRE; + memcpy((void *)mask_iova, (const void *)(&mask->protocol), + sizeof(rte_be16_t)); + + flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_GRE + sizeof(rte_be16_t)); + + return device_configured; +} + +static int +dpaa2_generic_flow_set(struct rte_flow *flow, + struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + const struct rte_flow_action_queue *dest_queue; + const struct rte_flow_action_rss *rss_conf; + uint16_t index; + int is_keycfg_configured = 0, end_of_list = 0; + int ret = 0, i = 0, j = 0; + struct dpni_attr nic_attr; + struct dpni_rx_tc_dist_cfg tc_cfg; + struct dpni_qos_tbl_cfg qos_cfg; + struct dpkg_profile_cfg key_cfg; + struct dpni_fs_action_cfg action; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + size_t param; + struct rte_flow *curr = LIST_FIRST(&priv->flows); + + /* Parse pattern list to get the matching parameters */ + while (!end_of_list) { + switch (pattern[i].type) { + case RTE_FLOW_ITEM_TYPE_ETH: + is_keycfg_configured = dpaa2_configure_flow_eth(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + is_keycfg_configured = dpaa2_configure_flow_vlan(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + is_keycfg_configured = dpaa2_configure_flow_ipv4(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + is_keycfg_configured = dpaa2_configure_flow_ipv6(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_ICMP: + is_keycfg_configured = dpaa2_configure_flow_icmp(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_UDP: + is_keycfg_configured = dpaa2_configure_flow_udp(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_TCP: + is_keycfg_configured = dpaa2_configure_flow_tcp(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + is_keycfg_configured = dpaa2_configure_flow_sctp(flow, + dev, attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_GRE: + is_keycfg_configured = dpaa2_configure_flow_gre(flow, + dev, + attr, + &pattern[i], + actions, + error); + break; + case RTE_FLOW_ITEM_TYPE_END: + end_of_list = 1; + break; /*End of List*/ + default: + DPAA2_PMD_ERR("Invalid action type"); + ret = -ENOTSUP; + break; + } + i++; + } + + /* Let's parse action on matching traffic */ + end_of_list = 0; + while (!end_of_list) { + switch (actions[j].type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf); + flow->flow_id = dest_queue->index; + flow->action = RTE_FLOW_ACTION_TYPE_QUEUE; + memset(&action, 0, sizeof(struct dpni_fs_action_cfg)); + action.flow_id = flow->flow_id; + if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) { + if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg, + (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) { + DPAA2_PMD_ERR( + "Unable to prepare extract parameters"); + return -1; + } + + memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg)); + qos_cfg.discard_on_miss = true; + qos_cfg.keep_entries = true; + qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param; + ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, + priv->token, &qos_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( + "Distribution cannot be configured.(%d)" + , ret); + return -1; + } + } + if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) { + if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id], + (uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) { + DPAA2_PMD_ERR( + "Unable to prepare extract parameters"); + return -1; + } + + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc; + tc_cfg.dist_mode = DPNI_DIST_MODE_FS; + tc_cfg.key_cfg_iova = + (uint64_t)priv->extract.fs_extract_param[flow->tc_id]; + tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP; + tc_cfg.fs_cfg.keep_entries = true; + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, + priv->token, + flow->tc_id, &tc_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( + "Distribution cannot be configured.(%d)" + , ret); + return -1; + } + } + /* Configure QoS table first */ + memset(&nic_attr, 0, sizeof(struct dpni_attr)); + ret = dpni_get_attributes(dpni, CMD_PRI_LOW, + priv->token, &nic_attr); + if (ret < 0) { + DPAA2_PMD_ERR( + "Failure to get attribute. dpni@%p err code(%d)\n", + dpni, ret); + return ret; + } + + action.flow_id = action.flow_id % nic_attr.num_rx_tcs; + index = flow->index + (flow->tc_id * nic_attr.fs_entries); + ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, + priv->token, &flow->rule, + flow->tc_id, index); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in addnig entry to QoS table(%d)", ret); + return ret; + } + + /* Then Configure FS table */ + ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token, + flow->tc_id, flow->index, + &flow->rule, &action); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in adding entry to FS table(%d)", ret); + return ret; + } + break; + case RTE_FLOW_ACTION_TYPE_RSS: + ret = dpni_get_attributes(dpni, CMD_PRI_LOW, + priv->token, &nic_attr); + if (ret < 0) { + DPAA2_PMD_ERR( + "Failure to get attribute. dpni@%p err code(%d)\n", + dpni, ret); + return ret; + } + rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf); + for (i = 0; i < (int)rss_conf->queue_num; i++) { + if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) || + rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) { + DPAA2_PMD_ERR( + "Queue/Group combination are not supported\n"); + return -ENOTSUP; + } + } + + flow->action = RTE_FLOW_ACTION_TYPE_RSS; + ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types, + &key_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( + "unable to set flow distribution.please check queue config\n"); + return ret; + } + + /* Allocate DMA'ble memory to write the rules */ + param = (size_t)rte_malloc(NULL, 256, 64); + if (!param) { + DPAA2_PMD_ERR("Memory allocation failure\n"); + return -1; + } + + if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) { + DPAA2_PMD_ERR( + "Unable to prepare extract parameters"); + rte_free((void *)param); + return -1; + } + + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + tc_cfg.dist_size = rss_conf->queue_num; + tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; + tc_cfg.key_cfg_iova = (size_t)param; + tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP; + + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, + priv->token, flow->tc_id, + &tc_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( + "Distribution cannot be configured: %d\n", ret); + rte_free((void *)param); + return -1; + } + + rte_free((void *)param); + if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) { + if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg, + (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) { + DPAA2_PMD_ERR( + "Unable to prepare extract parameters"); + return -1; + } + memset(&qos_cfg, 0, + sizeof(struct dpni_qos_tbl_cfg)); + qos_cfg.discard_on_miss = true; + qos_cfg.keep_entries = true; + qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param; + ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, + priv->token, &qos_cfg); + if (ret < 0) { + DPAA2_PMD_ERR( + "Distribution can not be configured(%d)\n", + ret); + return -1; + } + } + + /* Add Rule into QoS table */ + index = flow->index + (flow->tc_id * nic_attr.fs_entries); + ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token, + &flow->rule, flow->tc_id, + index); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in entry addition in QoS table(%d)", + ret); + return ret; + } + break; + case RTE_FLOW_ACTION_TYPE_END: + end_of_list = 1; + break; + default: + DPAA2_PMD_ERR("Invalid action type"); + ret = -ENOTSUP; + break; + } + j++; + } + + if (!ret) { + /* New rules are inserted. */ + if (!curr) { + LIST_INSERT_HEAD(&priv->flows, flow, next); + } else { + while (LIST_NEXT(curr, next)) + curr = LIST_NEXT(curr, next); + LIST_INSERT_AFTER(curr, flow, next); + } + } + return ret; +} + +static inline int +dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr, + const struct rte_flow_attr *attr) +{ + int ret = 0; + + if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) { + DPAA2_PMD_ERR("Priority group is out of range\n"); + ret = -ENOTSUP; + } + if (unlikely(attr->priority >= dpni_attr->fs_entries)) { + DPAA2_PMD_ERR("Priority within the group is out of range\n"); + ret = -ENOTSUP; + } + if (unlikely(attr->egress)) { + DPAA2_PMD_ERR( + "Flow configuration is not supported on egress side\n"); + ret = -ENOTSUP; + } + if (unlikely(!attr->ingress)) { + DPAA2_PMD_ERR("Ingress flag must be configured\n"); + ret = -EINVAL; + } + return ret; +} + +static inline void +dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern) +{ + switch (pattern->type) { + case RTE_FLOW_ITEM_TYPE_ETH: + default_mask = (const void *)&rte_flow_item_eth_mask; + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + default_mask = (const void *)&rte_flow_item_vlan_mask; + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + default_mask = (const void *)&rte_flow_item_ipv4_mask; + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + default_mask = (const void *)&rte_flow_item_ipv6_mask; + break; + case RTE_FLOW_ITEM_TYPE_ICMP: + default_mask = (const void *)&rte_flow_item_icmp_mask; + break; + case RTE_FLOW_ITEM_TYPE_UDP: + default_mask = (const void *)&rte_flow_item_udp_mask; + break; + case RTE_FLOW_ITEM_TYPE_TCP: + default_mask = (const void *)&rte_flow_item_tcp_mask; + break; + case RTE_FLOW_ITEM_TYPE_SCTP: + default_mask = (const void *)&rte_flow_item_sctp_mask; + break; + case RTE_FLOW_ITEM_TYPE_GRE: + default_mask = (const void *)&rte_flow_item_gre_mask; + break; + default: + DPAA2_PMD_ERR("Invalid pattern type"); + } +} + +static inline int +dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv, + const struct rte_flow_item pattern[]) +{ + unsigned int i, j, k, is_found = 0; + int ret = 0; + + for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) { + for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) { + if (dpaa2_supported_pattern_type[i] == pattern[j].type) { + is_found = 1; + break; + } + } + if (!is_found) { + ret = -ENOTSUP; + break; + } + } + /* Lets verify other combinations of given pattern rules */ + for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) { + if (!pattern[j].spec) { + ret = -EINVAL; + break; + } + if ((pattern[j].last) && (!pattern[j].mask)) + dpaa2_dev_update_default_mask(&pattern[j]); + } + + /* DPAA2 platform has a limitation that extract parameter can not be */ + /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */ + for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) { + for (j = 0; j < MAX_TCS + 1; j++) { + for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; k++) { + if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type) + break; + } + if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) + ret = -ENOTSUP; + } + } + return ret; +} + +static inline int +dpaa2_dev_verify_actions(const struct rte_flow_action actions[]) +{ + unsigned int i, j, is_found = 0; + int ret = 0; + + for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) { + for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) { + if (dpaa2_supported_action_type[i] == actions[j].type) { + is_found = 1; + break; + } + } + if (!is_found) { + ret = -ENOTSUP; + break; + } + } + for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) { + if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf)) + ret = -EINVAL; + } + return ret; +} + +static +int dpaa2_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *flow_attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct dpni_attr dpni_attr; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + uint16_t token = priv->token; + int ret = 0; + + memset(&dpni_attr, 0, sizeof(struct dpni_attr)); + ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr); + if (ret < 0) { + DPAA2_PMD_ERR( + "Failure to get dpni@%p attribute, err code %d\n", + dpni, ret); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ATTR, + flow_attr, "invalid"); + return ret; + } + + /* Verify input attributes */ + ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr); + if (ret < 0) { + DPAA2_PMD_ERR( + "Invalid attributes are given\n"); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ATTR, + flow_attr, "invalid"); + goto not_valid_params; + } + /* Verify input pattern list */ + ret = dpaa2_dev_verify_patterns(priv, pattern); + if (ret < 0) { + DPAA2_PMD_ERR( + "Invalid pattern list is given\n"); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, "invalid"); + goto not_valid_params; + } + /* Verify input action list */ + ret = dpaa2_dev_verify_actions(actions); + if (ret < 0) { + DPAA2_PMD_ERR( + "Invalid action list is given\n"); + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "invalid"); + goto not_valid_params; + } +not_valid_params: + return ret; +} + +static +struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_flow *flow = NULL; + size_t key_iova = 0, mask_iova = 0; + int ret; + + flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE); + if (!flow) { + DPAA2_PMD_ERR("Failure to allocate memory for flow"); + goto mem_failure; + } + /* Allocate DMA'ble memory to write the rules */ + key_iova = (size_t)rte_malloc(NULL, 256, 64); + if (!key_iova) { + DPAA2_PMD_ERR( + "Memory allocation failure for rule configration\n"); + goto mem_failure; + } + mask_iova = (size_t)rte_malloc(NULL, 256, 64); + if (!mask_iova) { + DPAA2_PMD_ERR( + "Memory allocation failure for rule configration\n"); + goto mem_failure; + } + + flow->rule.key_iova = key_iova; + flow->rule.mask_iova = mask_iova; + flow->rule.key_size = 0; + + switch (dpaa2_filter_type) { + case RTE_ETH_FILTER_GENERIC: + ret = dpaa2_generic_flow_set(flow, dev, attr, pattern, + actions, error); + if (ret < 0) { + if (error->type > RTE_FLOW_ERROR_TYPE_ACTION) + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + attr, "unknown"); + DPAA2_PMD_ERR( + "Failure to create flow, return code (%d)", ret); + goto creation_error; + } + break; + default: + DPAA2_PMD_ERR("Filter type (%d) not supported", + dpaa2_filter_type); + break; + } + + return flow; +mem_failure: + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "memory alloc"); +creation_error: + rte_free((void *)flow); + rte_free((void *)key_iova); + rte_free((void *)mask_iova); + + return NULL; +} + +static +int dpaa2_flow_destroy(struct rte_eth_dev *dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int ret = 0; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + switch (flow->action) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + /* Remove entry from QoS table first */ + ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token, + &flow->rule); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in adding entry to QoS table(%d)", ret); + goto error; + } + + /* Then remove entry from FS table */ + ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token, + flow->tc_id, &flow->rule); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in entry addition in FS table(%d)", ret); + goto error; + } + break; + case RTE_FLOW_ACTION_TYPE_RSS: + ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token, + &flow->rule); + if (ret < 0) { + DPAA2_PMD_ERR( + "Error in entry addition in QoS table(%d)", ret); + goto error; + } + break; + default: + DPAA2_PMD_ERR( + "Action type (%d) is not supported", flow->action); + ret = -ENOTSUP; + break; + } + + LIST_REMOVE(flow, next); + /* Now free the flow */ + rte_free(flow); + +error: + if (ret) + rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "unknown"); + return ret; +} + +/** + * Destroy user-configured flow rules. + * + * This function skips internal flows rules. + * + * @see rte_flow_flush() + * @see rte_flow_ops + */ +static int +dpaa2_flow_flush(struct rte_eth_dev *dev, + struct rte_flow_error *error) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct rte_flow *flow = LIST_FIRST(&priv->flows); + + while (flow) { + struct rte_flow *next = LIST_NEXT(flow, next); + + dpaa2_flow_destroy(dev, flow, error); + flow = next; + } + return 0; +} + +static int +dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + const struct rte_flow_action *actions __rte_unused, + void *data __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + return 0; +} + +/** + * Clean up all flow rules. + * + * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow + * rules regardless of whether they are internal or user-configured. + * + * @param priv + * Pointer to private structure. + */ +void +dpaa2_flow_clean(struct rte_eth_dev *dev) +{ + struct rte_flow *flow; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + while ((flow = LIST_FIRST(&priv->flows))) + dpaa2_flow_destroy(dev, flow, NULL); +} + +const struct rte_flow_ops dpaa2_flow_ops = { + .create = dpaa2_flow_create, + .validate = dpaa2_flow_validate, + .destroy = dpaa2_flow_destroy, + .flush = dpaa2_flow_flush, + .query = dpaa2_flow_query, +}; diff --git a/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_mux.c b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_mux.c new file mode 100644 index 000000000..e487c6b54 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_mux.c @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#include <sys/queue.h> +#include <stdio.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> + +#include <rte_ethdev.h> +#include <rte_log.h> +#include <rte_malloc.h> +#include <rte_flow_driver.h> +#include <rte_tailq.h> + +#include <rte_fslmc.h> +#include <fsl_dpdmux.h> +#include <fsl_dpkg.h> + +#include <dpaa2_ethdev.h> +#include <dpaa2_pmd_logs.h> + +struct dpaa2_dpdmux_dev { + TAILQ_ENTRY(dpaa2_dpdmux_dev) next; + /**< Pointer to Next device instance */ + struct fsl_mc_io dpdmux; /** handle to DPDMUX portal object */ + uint16_t token; + uint32_t dpdmux_id; /*HW ID for DPDMUX object */ + uint8_t num_ifs; /* Number of interfaces in DPDMUX */ +}; + +struct rte_flow { + struct dpdmux_rule_cfg rule; +}; + +TAILQ_HEAD(dpdmux_dev_list, dpaa2_dpdmux_dev); +static struct dpdmux_dev_list dpdmux_dev_list = + TAILQ_HEAD_INITIALIZER(dpdmux_dev_list); /*!< DPDMUX device list */ + +static struct dpaa2_dpdmux_dev *get_dpdmux_from_id(uint32_t dpdmux_id) +{ + struct dpaa2_dpdmux_dev *dpdmux_dev = NULL; + + /* Get DPBP dev handle from list using index */ + TAILQ_FOREACH(dpdmux_dev, &dpdmux_dev_list, next) { + if (dpdmux_dev->dpdmux_id == dpdmux_id) + break; + } + + return dpdmux_dev; +} + +struct rte_flow * +rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, + struct rte_flow_item *pattern[], + struct rte_flow_action *actions[]) +{ + struct dpaa2_dpdmux_dev *dpdmux_dev; + struct dpkg_profile_cfg kg_cfg; + const struct rte_flow_item_ipv4 *spec; + const struct rte_flow_action_vf *vf_conf; + struct dpdmux_cls_action dpdmux_action; + struct rte_flow *flow = NULL; + void *key_iova, *mask_iova, *key_cfg_iova = NULL; + int ret; + + if (pattern[0]->type != RTE_FLOW_ITEM_TYPE_IPV4) { + DPAA2_PMD_ERR("Not supported pattern type: %d", + pattern[0]->type); + return NULL; + } + + /* Find the DPDMUX from dpdmux_id in our list */ + dpdmux_dev = get_dpdmux_from_id(dpdmux_id); + if (!dpdmux_dev) { + DPAA2_PMD_ERR("Invalid dpdmux_id: %d", dpdmux_id); + return NULL; + } + + key_cfg_iova = rte_zmalloc(NULL, DIST_PARAM_IOVA_SIZE, + RTE_CACHE_LINE_SIZE); + if (!key_cfg_iova) { + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); + return NULL; + } + + /* Currently taking only IP protocol as an extract type. + * This can be exended to other fields using pattern->type. + */ + memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); + kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_IP; + kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_IP_PROTO; + kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; + kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; + kg_cfg.num_extracts = 1; + + ret = dpkg_prepare_key_cfg(&kg_cfg, key_cfg_iova); + if (ret) { + DPAA2_PMD_ERR("dpkg_prepare_key_cfg failed: err(%d)", ret); + goto creation_error; + } + + ret = dpdmux_set_custom_key(&dpdmux_dev->dpdmux, CMD_PRI_LOW, + dpdmux_dev->token, + (uint64_t)(DPAA2_VADDR_TO_IOVA(key_cfg_iova))); + if (ret) { + DPAA2_PMD_ERR("dpdmux_set_custom_key failed: err(%d)", ret); + goto creation_error; + } + + /* As now our key extract parameters are set, let us configure + * the rule. + */ + flow = rte_zmalloc(NULL, sizeof(struct rte_flow) + + (2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE); + if (!flow) { + DPAA2_PMD_ERR( + "Memory allocation failure for rule configration\n"); + goto creation_error; + } + key_iova = (void *)((size_t)flow + sizeof(struct rte_flow)); + mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE); + + spec = (const struct rte_flow_item_ipv4 *)pattern[0]->spec; + memcpy(key_iova, (const void *)&spec->hdr.next_proto_id, + sizeof(uint8_t)); + memcpy(mask_iova, pattern[0]->mask, sizeof(uint8_t)); + + flow->rule.key_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(key_iova)); + flow->rule.mask_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(mask_iova)); + flow->rule.key_size = sizeof(uint8_t); + + vf_conf = (const struct rte_flow_action_vf *)(actions[0]->conf); + if (vf_conf->id == 0 || vf_conf->id > dpdmux_dev->num_ifs) { + DPAA2_PMD_ERR("Invalid destination id\n"); + goto creation_error; + } + dpdmux_action.dest_if = vf_conf->id; + + ret = dpdmux_add_custom_cls_entry(&dpdmux_dev->dpdmux, CMD_PRI_LOW, + dpdmux_dev->token, &flow->rule, + &dpdmux_action); + if (ret) { + DPAA2_PMD_ERR("dpdmux_add_custom_cls_entry failed: err(%d)", + ret); + goto creation_error; + } + + return flow; + +creation_error: + rte_free((void *)key_cfg_iova); + rte_free((void *)flow); + return NULL; +} + +static int +dpaa2_create_dpdmux_device(int vdev_fd __rte_unused, + struct vfio_device_info *obj_info __rte_unused, + int dpdmux_id) +{ + struct dpaa2_dpdmux_dev *dpdmux_dev; + struct dpdmux_attr attr; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Allocate DPAA2 dpdmux handle */ + dpdmux_dev = rte_malloc(NULL, sizeof(struct dpaa2_dpdmux_dev), 0); + if (!dpdmux_dev) { + DPAA2_PMD_ERR("Memory allocation failed for DPDMUX Device"); + return -1; + } + + /* Open the dpdmux object */ + dpdmux_dev->dpdmux.regs = rte_mcp_ptr_list[MC_PORTAL_INDEX]; + ret = dpdmux_open(&dpdmux_dev->dpdmux, CMD_PRI_LOW, dpdmux_id, + &dpdmux_dev->token); + if (ret) { + DPAA2_PMD_ERR("Unable to open dpdmux object: err(%d)", ret); + goto init_err; + } + + ret = dpdmux_get_attributes(&dpdmux_dev->dpdmux, CMD_PRI_LOW, + dpdmux_dev->token, &attr); + if (ret) { + DPAA2_PMD_ERR("Unable to get dpdmux attr: err(%d)", ret); + goto init_err; + } + + ret = dpdmux_if_set_default(&dpdmux_dev->dpdmux, CMD_PRI_LOW, + dpdmux_dev->token, 1); + if (ret) { + DPAA2_PMD_ERR("setting default interface failed in %s", + __func__); + goto init_err; + } + + dpdmux_dev->dpdmux_id = dpdmux_id; + dpdmux_dev->num_ifs = attr.num_ifs; + + TAILQ_INSERT_TAIL(&dpdmux_dev_list, dpdmux_dev, next); + + return 0; + +init_err: + if (dpdmux_dev) + rte_free(dpdmux_dev); + + return -1; +} + +static struct rte_dpaa2_object rte_dpaa2_dpdmux_obj = { + .dev_type = DPAA2_MUX, + .create = dpaa2_create_dpdmux_device, +}; + +RTE_PMD_REGISTER_DPAA2_OBJECT(dpdmux, rte_dpaa2_dpdmux_obj); diff --git a/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h new file mode 100644 index 000000000..c47ba8e10 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 NXP + */ + +#ifndef _DPAA2_PMD_LOGS_H_ +#define _DPAA2_PMD_LOGS_H_ + +extern int dpaa2_logtype_pmd; + +#define DPAA2_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa2_logtype_pmd, "dpaa2_net: " \ + fmt "\n", ##args) + +#define DPAA2_PMD_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa2_logtype_pmd, "dpaa2_net: %s(): "\ + fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() DPAA2_PMD_DEBUG(">>") + +#define DPAA2_PMD_CRIT(fmt, args...) \ + DPAA2_PMD_LOG(CRIT, fmt, ## args) +#define DPAA2_PMD_INFO(fmt, args...) \ + DPAA2_PMD_LOG(INFO, fmt, ## args) +#define DPAA2_PMD_ERR(fmt, args...) \ + DPAA2_PMD_LOG(ERR, fmt, ## args) +#define DPAA2_PMD_WARN(fmt, args...) \ + DPAA2_PMD_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA2_PMD_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA2_PMD_DP_DEBUG(fmt, args...) \ + DPAA2_PMD_DP_LOG(DEBUG, fmt, ## args) +#define DPAA2_PMD_DP_INFO(fmt, args...) \ + DPAA2_PMD_DP_LOG(INFO, fmt, ## args) +#define DPAA2_PMD_DP_WARN(fmt, args...) \ + DPAA2_PMD_DP_LOG(WARNING, fmt, ## args) + +#endif /* _DPAA2_PMD_LOGS_H_ */ diff --git a/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c new file mode 100644 index 000000000..c6e50123c --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c @@ -0,0 +1,1306 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016-2018 NXP + * + */ + +#include <time.h> +#include <net/if.h> + +#include <rte_mbuf.h> +#include <rte_ethdev_driver.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_string_fns.h> +#include <rte_dev.h> + +#include <rte_fslmc.h> +#include <fslmc_vfio.h> +#include <dpaa2_hw_pvt.h> +#include <dpaa2_hw_dpio.h> +#include <dpaa2_hw_mempool.h> + +#include "dpaa2_pmd_logs.h" +#include "dpaa2_ethdev.h" +#include "base/dpaa2_hw_dpni_annot.h" + +static inline uint32_t __attribute__((hot)) +dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, + struct dpaa2_annot_hdr *annotation); + +#define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \ + DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \ + DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \ + DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \ + DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \ + DPAA2_SET_FD_FRC(_fd, 0); \ + DPAA2_RESET_FD_CTRL(_fd); \ + DPAA2_RESET_FD_FLC(_fd); \ +} while (0) + +static inline void __attribute__((hot)) +dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd) +{ + struct dpaa2_annot_hdr *annotation; + uint16_t frc = DPAA2_GET_FD_FRC_PARSE_SUM(fd); + + m->packet_type = RTE_PTYPE_UNKNOWN; + switch (frc) { + case DPAA2_PKT_TYPE_ETHER: + m->packet_type = RTE_PTYPE_L2_ETHER; + break; + case DPAA2_PKT_TYPE_IPV4: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4; + break; + case DPAA2_PKT_TYPE_IPV6: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6; + break; + case DPAA2_PKT_TYPE_IPV4_EXT: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT; + break; + case DPAA2_PKT_TYPE_IPV6_EXT: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT; + break; + case DPAA2_PKT_TYPE_IPV4_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; + break; + case DPAA2_PKT_TYPE_IPV6_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; + break; + case DPAA2_PKT_TYPE_IPV4_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; + break; + case DPAA2_PKT_TYPE_IPV6_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; + break; + case DPAA2_PKT_TYPE_IPV4_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; + break; + case DPAA2_PKT_TYPE_IPV6_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; + break; + case DPAA2_PKT_TYPE_IPV4_ICMP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP; + break; + case DPAA2_PKT_TYPE_IPV6_ICMP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; + break; + default: + m->packet_type = dpaa2_dev_rx_parse_slow(m, + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); + } + m->hash.rss = fd->simple.flc_hi; + m->ol_flags |= PKT_RX_RSS_HASH; + + if (dpaa2_enable_ts == PMD_DPAA2_ENABLE_TS) { + annotation = (struct dpaa2_annot_hdr *) + ((size_t)DPAA2_IOVA_TO_VADDR( + DPAA2_GET_FD_ADDR(fd)) + DPAA2_FD_PTA_SIZE); + m->timestamp = annotation->word2; + m->ol_flags |= PKT_RX_TIMESTAMP; + DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", m->timestamp); + } + + DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x " + "ol_flags =0x%" PRIx64 "", + frc, m->packet_type, m->ol_flags); +} + +static inline uint32_t __attribute__((hot)) +dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf, + struct dpaa2_annot_hdr *annotation) +{ + uint32_t pkt_type = RTE_PTYPE_UNKNOWN; + uint16_t *vlan_tci; + + DPAA2_PMD_DP_DEBUG("(slow parse)annotation(3)=0x%" PRIx64 "\t" + "(4)=0x%" PRIx64 "\t", + annotation->word3, annotation->word4); + + if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) { + vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, + (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); + mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); + mbuf->ol_flags |= PKT_RX_VLAN; + pkt_type |= RTE_PTYPE_L2_ETHER_VLAN; + } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) { + vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *, + (VLAN_TCI_OFFSET_1(annotation->word5) >> 16)); + mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci); + mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ; + pkt_type |= RTE_PTYPE_L2_ETHER_QINQ; + } + + if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { + pkt_type |= RTE_PTYPE_L2_ETHER_ARP; + goto parse_done; + } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) { + pkt_type |= RTE_PTYPE_L2_ETHER; + } else { + goto parse_done; + } + + if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT | + L3_IPV4_N_PRESENT)) { + pkt_type |= RTE_PTYPE_L3_IPV4; + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | + L3_IP_N_OPT_PRESENT)) + pkt_type |= RTE_PTYPE_L3_IPV4_EXT; + + } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT | + L3_IPV6_N_PRESENT)) { + pkt_type |= RTE_PTYPE_L3_IPV6; + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | + L3_IP_N_OPT_PRESENT)) + pkt_type |= RTE_PTYPE_L3_IPV6_EXT; + } else { + goto parse_done; + } + + if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | + L3_IP_1_MORE_FRAGMENT | + L3_IP_N_FIRST_FRAGMENT | + L3_IP_N_MORE_FRAGMENT)) { + pkt_type |= RTE_PTYPE_L4_FRAG; + goto parse_done; + } else { + pkt_type |= RTE_PTYPE_L4_NONFRAG; + } + + if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_UDP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_TCP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_SCTP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_ICMP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) + pkt_type |= RTE_PTYPE_UNKNOWN; + +parse_done: + return pkt_type; +} + +static inline uint32_t __attribute__((hot)) +dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) +{ + struct dpaa2_annot_hdr *annotation = + (struct dpaa2_annot_hdr *)hw_annot_addr; + + DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", + annotation->word4); + + if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + + mbuf->ol_flags |= PKT_RX_TIMESTAMP; + mbuf->timestamp = annotation->word2; + DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", mbuf->timestamp); + + /* Check detailed parsing requirement */ + if (annotation->word3 & 0x7FFFFC3FFFF) + return dpaa2_dev_rx_parse_slow(mbuf, annotation); + + /* Return some common types from parse processing */ + switch (annotation->word4) { + case DPAA2_L3_IPv4: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; + case DPAA2_L3_IPv6: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; + case DPAA2_L3_IPv4_TCP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_TCP; + case DPAA2_L3_IPv4_UDP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_UDP; + case DPAA2_L3_IPv6_TCP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_TCP; + case DPAA2_L3_IPv6_UDP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_UDP; + default: + break; + } + + return dpaa2_dev_rx_parse_slow(mbuf, annotation); +} + +static inline struct rte_mbuf *__attribute__((hot)) +eth_sg_fd_to_mbuf(const struct qbman_fd *fd) +{ + struct qbman_sge *sgt, *sge; + size_t sg_addr, fd_addr; + int i = 0; + struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; + + fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + + /* Get Scatter gather table address */ + sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); + + sge = &sgt[i++]; + sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); + + /* First Scatter gather entry */ + first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); + /* Prepare all the metadata for first segment */ + first_seg->buf_addr = (uint8_t *)sg_addr; + first_seg->ol_flags = 0; + first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); + first_seg->data_len = sge->length & 0x1FFFF; + first_seg->pkt_len = DPAA2_GET_FD_LEN(fd); + first_seg->nb_segs = 1; + first_seg->next = NULL; + if (dpaa2_svr_family == SVR_LX2160A) + dpaa2_dev_rx_parse_new(first_seg, fd); + else + first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); + + rte_mbuf_refcnt_set(first_seg, 1); + cur_seg = first_seg; + while (!DPAA2_SG_IS_FINAL(sge)) { + sge = &sgt[i++]; + sg_addr = (size_t)DPAA2_IOVA_TO_VADDR( + DPAA2_GET_FLE_ADDR(sge)); + next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, + rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); + next_seg->buf_addr = (uint8_t *)sg_addr; + next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); + next_seg->data_len = sge->length & 0x1FFFF; + first_seg->nb_segs += 1; + rte_mbuf_refcnt_set(next_seg, 1); + cur_seg->next = next_seg; + next_seg->next = NULL; + cur_seg = next_seg; + } + temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr, + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); + rte_mbuf_refcnt_set(temp, 1); + rte_pktmbuf_free_seg(temp); + + return (void *)first_seg; +} + +static inline struct rte_mbuf *__attribute__((hot)) +eth_fd_to_mbuf(const struct qbman_fd *fd) +{ + struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( + DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); + + /* need to repopulated some of the fields, + * as they may have changed in last transmission + */ + mbuf->nb_segs = 1; + mbuf->ol_flags = 0; + mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); + mbuf->data_len = DPAA2_GET_FD_LEN(fd); + mbuf->pkt_len = mbuf->data_len; + mbuf->next = NULL; + rte_mbuf_refcnt_set(mbuf, 1); + + /* Parse the packet */ + /* parse results for LX2 are there in FRC field of FD. + * For other DPAA2 platforms , parse results are after + * the private - sw annotation area + */ + + if (dpaa2_svr_family == SVR_LX2160A) + dpaa2_dev_rx_parse_new(mbuf, fd); + else + mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); + + DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," + "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", + mbuf, mbuf->buf_addr, mbuf->data_off, + DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); + + return mbuf; +} + +static int __attribute__ ((noinline)) __attribute__((hot)) +eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) +{ + struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp; + struct qbman_sge *sgt, *sge = NULL; + int i; + + temp = rte_pktmbuf_alloc(mbuf->pool); + if (temp == NULL) { + DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); + return -ENOMEM; + } + + DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp)); + DPAA2_SET_FD_LEN(fd, mbuf->pkt_len); + DPAA2_SET_ONLY_FD_BPID(fd, bpid); + DPAA2_SET_FD_OFFSET(fd, temp->data_off); + DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); + DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); + /*Set Scatter gather table and Scatter gather entries*/ + sgt = (struct qbman_sge *)( + (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_GET_FD_OFFSET(fd)); + + for (i = 0; i < mbuf->nb_segs; i++) { + sge = &sgt[i]; + /*Resetting the buffer pool id and offset field*/ + sge->fin_bpid_offset = 0; + DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg)); + DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off); + sge->length = cur_seg->data_len; + if (RTE_MBUF_DIRECT(cur_seg)) { + if (rte_mbuf_refcnt_read(cur_seg) > 1) { + /* If refcnt > 1, invalid bpid is set to ensure + * buffer is not freed by HW + */ + DPAA2_SET_FLE_IVP(sge); + rte_mbuf_refcnt_update(cur_seg, -1); + } else + DPAA2_SET_FLE_BPID(sge, + mempool_to_bpid(cur_seg->pool)); + cur_seg = cur_seg->next; + } else { + /* Get owner MBUF from indirect buffer */ + mi = rte_mbuf_from_indirect(cur_seg); + if (rte_mbuf_refcnt_read(mi) > 1) { + /* If refcnt > 1, invalid bpid is set to ensure + * owner buffer is not freed by HW + */ + DPAA2_SET_FLE_IVP(sge); + } else { + DPAA2_SET_FLE_BPID(sge, + mempool_to_bpid(mi->pool)); + rte_mbuf_refcnt_update(mi, 1); + } + prev_seg = cur_seg; + cur_seg = cur_seg->next; + prev_seg->next = NULL; + rte_pktmbuf_free(prev_seg); + } + } + DPAA2_SG_SET_FINAL(sge, true); + return 0; +} + +static void +eth_mbuf_to_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) __attribute__((unused)); + +static void __attribute__ ((noinline)) __attribute__((hot)) +eth_mbuf_to_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) +{ + DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); + + DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," + "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", + mbuf, mbuf->buf_addr, mbuf->data_off, + DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); + if (RTE_MBUF_DIRECT(mbuf)) { + if (rte_mbuf_refcnt_read(mbuf) > 1) { + DPAA2_SET_FD_IVP(fd); + rte_mbuf_refcnt_update(mbuf, -1); + } + } else { + struct rte_mbuf *mi; + + mi = rte_mbuf_from_indirect(mbuf); + if (rte_mbuf_refcnt_read(mi) > 1) + DPAA2_SET_FD_IVP(fd); + else + rte_mbuf_refcnt_update(mi, 1); + rte_pktmbuf_free(mbuf); + } +} + +static inline int __attribute__((hot)) +eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) +{ + struct rte_mbuf *m; + void *mb = NULL; + + if (rte_dpaa2_mbuf_alloc_bulk( + rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { + DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n"); + return -1; + } + m = (struct rte_mbuf *)mb; + memcpy((char *)m->buf_addr + mbuf->data_off, + (void *)((char *)mbuf->buf_addr + mbuf->data_off), + mbuf->pkt_len); + + /* Copy required fields */ + m->data_off = mbuf->data_off; + m->ol_flags = mbuf->ol_flags; + m->packet_type = mbuf->packet_type; + m->tx_offload = mbuf->tx_offload; + + DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); + + DPAA2_PMD_DP_DEBUG( + "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d," + " meta: %d, off: %d, len: %d\n", + (void *)mbuf, + mbuf->buf_addr, + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_OFFSET(fd), + DPAA2_GET_FD_LEN(fd)); + +return 0; +} + +/* This function assumes that caller will be keep the same value for nb_pkts + * across calls per queue, if that is not the case, better use non-prefetch + * version of rx call. + * It will return the packets as requested in previous call without honoring + * the current nb_pkts or bufs space. + */ +uint16_t +dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + /* Function receive frames for a given device and VQ*/ + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct qbman_result *dq_storage, *dq_storage1 = NULL; + uint32_t fqid = dpaa2_q->fqid; + int ret, num_rx = 0, pull_size; + uint8_t pending, status; + struct qbman_swp *swp; + const struct qbman_fd *fd, *next_fd; + struct qbman_pull_desc pulldesc; + struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + + if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { + ret = dpaa2_affine_qbman_ethrx_swp(); + if (ret) { + DPAA2_PMD_ERR("Failure in affining portal"); + return 0; + } + } + + if (unlikely(!rte_dpaa2_bpid_info && + rte_eal_process_type() == RTE_PROC_SECONDARY)) + rte_dpaa2_bpid_info = dpaa2_q->bp_array; + + swp = DPAA2_PER_LCORE_ETHRX_PORTAL; + pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; + if (unlikely(!q_storage->active_dqs)) { + q_storage->toggle = 0; + dq_storage = q_storage->dq_storage[q_storage->toggle]; + q_storage->last_num_pkts = pull_size; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_numframes(&pulldesc, + q_storage->last_num_pkts); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage, + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { + while (!qbman_check_command_complete( + get_swp_active_dqs( + DPAA2_PER_LCORE_ETHRX_DPIO->index))) + ; + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); + } + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + " QBMAN is busy (1)\n"); + /* Portal was busy, try again */ + continue; + } + break; + } + q_storage->active_dqs = dq_storage; + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, + dq_storage); + } + + dq_storage = q_storage->active_dqs; + rte_prefetch0((void *)(size_t)(dq_storage)); + rte_prefetch0((void *)(size_t)(dq_storage + 1)); + + /* Prepare next pull descriptor. This will give space for the + * prefething done on DQRR entries + */ + q_storage->toggle ^= 1; + dq_storage1 = q_storage->dq_storage[q_storage->toggle]; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_numframes(&pulldesc, pull_size); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage1, + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); + + /* Check if the previous issued command is completed. + * Also seems like the SWP is shared between the Ethernet Driver + * and the SEC driver. + */ + while (!qbman_check_command_complete(dq_storage)) + ; + if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) + clear_swp_active_dqs(q_storage->active_dpio_id); + + pending = 1; + + do { + /* Loop until the dq_storage is updated with + * new token by QBMAN + */ + while (!qbman_check_new_result(dq_storage)) + ; + rte_prefetch0((void *)((size_t)(dq_storage + 2))); + /* Check whether Last Pull command is Expired and + * setting Condition for Loop termination + */ + if (qbman_result_DQ_is_pull_complete(dq_storage)) { + pending = 0; + /* Check for valid frame. */ + status = qbman_result_DQ_flags(dq_storage); + if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) + continue; + } + fd = qbman_result_DQ_fd(dq_storage); + + if (dpaa2_svr_family != SVR_LX2160A) { + next_fd = qbman_result_DQ_fd(dq_storage + 1); + /* Prefetch Annotation address for the parse results */ + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR( + next_fd) + DPAA2_FD_PTA_SIZE + 16)); + } + + if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) + bufs[num_rx] = eth_sg_fd_to_mbuf(fd); + else + bufs[num_rx] = eth_fd_to_mbuf(fd); + bufs[num_rx]->port = eth_data->port_id; + + if (eth_data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_STRIP) + rte_vlan_strip(bufs[num_rx]); + + dq_storage++; + num_rx++; + } while (pending); + + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { + while (!qbman_check_command_complete( + get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) + ; + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); + } + /* issue a volatile dequeue command for next pull */ + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + "QBMAN is busy (2)\n"); + continue; + } + break; + } + q_storage->active_dqs = dq_storage1; + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); + + dpaa2_q->rx_pkts += num_rx; + + return num_rx; +} + +void __attribute__((hot)) +dpaa2_dev_process_parallel_event(struct qbman_swp *swp, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev) +{ + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + + DPAA2_FD_PTA_SIZE + 16)); + + ev->flow_id = rxq->ev.flow_id; + ev->sub_event_type = rxq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = rxq->ev.sched_type; + ev->queue_id = rxq->ev.queue_id; + ev->priority = rxq->ev.priority; + + ev->mbuf = eth_fd_to_mbuf(fd); + + qbman_swp_dqrr_consume(swp, dq); +} + +void __attribute__((hot)) +dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev) +{ + uint8_t dqrr_index; + + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + + DPAA2_FD_PTA_SIZE + 16)); + + ev->flow_id = rxq->ev.flow_id; + ev->sub_event_type = rxq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = rxq->ev.sched_type; + ev->queue_id = rxq->ev.queue_id; + ev->priority = rxq->ev.priority; + + ev->mbuf = eth_fd_to_mbuf(fd); + + dqrr_index = qbman_get_dqrr_idx(dq); + ev->mbuf->seqn = dqrr_index + 1; + DPAA2_PER_LCORE_DQRR_SIZE++; + DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; + DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; +} + +void __attribute__((hot)) +dpaa2_dev_process_ordered_event(struct qbman_swp *swp, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev) +{ + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + + DPAA2_FD_PTA_SIZE + 16)); + + ev->flow_id = rxq->ev.flow_id; + ev->sub_event_type = rxq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = rxq->ev.sched_type; + ev->queue_id = rxq->ev.queue_id; + ev->priority = rxq->ev.priority; + + ev->mbuf = eth_fd_to_mbuf(fd); + + ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP; + ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT; + ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT; + + qbman_swp_dqrr_consume(swp, dq); +} + +/* + * Callback to handle sending packets through WRIOP based interface + */ +uint16_t +dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + /* Function to transmit the frames to given device and VQ*/ + uint32_t loop, retry_count; + int32_t ret; + struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; + struct rte_mbuf *mi; + uint32_t frames_to_send; + struct rte_mempool *mp; + struct qbman_eq_desc eqdesc; + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct qbman_swp *swp; + uint16_t num_tx = 0; + uint16_t bpid; + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + struct dpaa2_dev_priv *priv = eth_data->dev_private; + uint32_t flags[MAX_TX_RING_SLOTS] = {0}; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR("Failure in affining portal"); + return 0; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n", + eth_data, dpaa2_q->fqid); + + /*Prepare enqueue descriptor*/ + qbman_eq_desc_clear(&eqdesc); + qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); + qbman_eq_desc_set_qd(&eqdesc, priv->qdid, + dpaa2_q->flow_id, dpaa2_q->tc_index); + /*Clear the unused FD fields before sending*/ + while (nb_pkts) { + /*Check if the queue is congested*/ + retry_count = 0; + while (qbman_result_SCN_state(dpaa2_q->cscn)) { + retry_count++; + /* Retry for some time before giving up */ + if (retry_count > CONG_RETRY_COUNT) + goto skip_tx; + } + + frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? + dpaa2_eqcr_size : nb_pkts; + + for (loop = 0; loop < frames_to_send; loop++) { + if ((*bufs)->seqn) { + uint8_t dqrr_index = (*bufs)->seqn - 1; + + flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | + dqrr_index; + DPAA2_PER_LCORE_DQRR_SIZE--; + DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); + (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; + } + + if (likely(RTE_MBUF_DIRECT(*bufs))) { + mp = (*bufs)->pool; + /* Check the basic scenario and set + * the FD appropriately here itself. + */ + if (likely(mp && mp->ops_index == + priv->bp_list->dpaa2_ops_index && + (*bufs)->nb_segs == 1 && + rte_mbuf_refcnt_read((*bufs)) == 1)) { + if (unlikely(((*bufs)->ol_flags + & PKT_TX_VLAN_PKT) || + (eth_data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { + ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } + DPAA2_MBUF_TO_CONTIG_FD((*bufs), + &fd_arr[loop], mempool_to_bpid(mp)); + bufs++; + continue; + } + } else { + mi = rte_mbuf_from_indirect(*bufs); + mp = mi->pool; + } + /* Not a hw_pkt pool allocated frame */ + if (unlikely(!mp || !priv->bp_list)) { + DPAA2_PMD_ERR("Err: No buffer pool attached"); + goto send_n_return; + } + + if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) || + (eth_data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { + int ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } + if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { + DPAA2_PMD_WARN("Non DPAA2 buffer pool"); + /* alloc should be from the default buffer pool + * attached to this interface + */ + bpid = priv->bp_list->buf_pool.bpid; + + if (unlikely((*bufs)->nb_segs > 1)) { + DPAA2_PMD_ERR("S/G support not added" + " for non hw offload buffer"); + goto send_n_return; + } + if (eth_copy_mbuf_to_fd(*bufs, + &fd_arr[loop], bpid)) { + goto send_n_return; + } + /* free the original packet */ + rte_pktmbuf_free(*bufs); + } else { + bpid = mempool_to_bpid(mp); + if (unlikely((*bufs)->nb_segs > 1)) { + if (eth_mbuf_to_sg_fd(*bufs, + &fd_arr[loop], bpid)) + goto send_n_return; + } else { + eth_mbuf_to_fd(*bufs, + &fd_arr[loop], bpid); + } + } + bufs++; + } + loop = 0; + while (loop < frames_to_send) { + loop += qbman_swp_enqueue_multiple(swp, &eqdesc, + &fd_arr[loop], &flags[loop], + frames_to_send - loop); + } + + num_tx += frames_to_send; + nb_pkts -= frames_to_send; + } + dpaa2_q->tx_pkts += num_tx; + return num_tx; + +send_n_return: + /* send any already prepared fd */ + if (loop) { + unsigned int i = 0; + + while (i < loop) { + i += qbman_swp_enqueue_multiple(swp, &eqdesc, + &fd_arr[i], + &flags[loop], + loop - i); + } + num_tx += loop; + } +skip_tx: + dpaa2_q->tx_pkts += num_tx; + return num_tx; +} + +void +dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci) +{ + struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; + struct qbman_fd *fd; + struct rte_mbuf *m; + + fd = qbman_result_eqresp_fd(&dpio_dev->eqresp[eqresp_ci]); + m = eth_fd_to_mbuf(fd); + rte_pktmbuf_free(m); +} + +static void +dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q, + struct rte_mbuf *m, + struct qbman_eq_desc *eqdesc) +{ + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + struct dpaa2_dev_priv *priv = eth_data->dev_private; + struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO; + struct eqresp_metadata *eqresp_meta; + uint16_t orpid, seqnum; + uint8_t dq_idx; + + qbman_eq_desc_set_qd(eqdesc, priv->qdid, dpaa2_q->flow_id, + dpaa2_q->tc_index); + + if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) { + orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >> + DPAA2_EQCR_OPRID_SHIFT; + seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >> + DPAA2_EQCR_SEQNUM_SHIFT; + + if (!priv->en_loose_ordered) { + qbman_eq_desc_set_orp(eqdesc, 1, orpid, seqnum, 0); + qbman_eq_desc_set_response(eqdesc, (uint64_t) + DPAA2_VADDR_TO_IOVA(&dpio_dev->eqresp[ + dpio_dev->eqresp_pi]), 1); + qbman_eq_desc_set_token(eqdesc, 1); + + eqresp_meta = &dpio_dev->eqresp_meta[ + dpio_dev->eqresp_pi]; + eqresp_meta->dpaa2_q = dpaa2_q; + eqresp_meta->mp = m->pool; + + dpio_dev->eqresp_pi + 1 < MAX_EQ_RESP_ENTRIES ? + dpio_dev->eqresp_pi++ : + (dpio_dev->eqresp_pi = 0); + } else { + qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0); + } + } else { + dq_idx = m->seqn - 1; + qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0); + DPAA2_PER_LCORE_DQRR_SIZE--; + DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx); + } + m->seqn = DPAA2_INVALID_MBUF_SEQN; +} + +/* Callback to handle sending ordered packets through WRIOP based interface */ +uint16_t +dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + /* Function to transmit the frames to given device and VQ*/ + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + struct dpaa2_dev_priv *priv = eth_data->dev_private; + struct dpaa2_queue *order_sendq = (struct dpaa2_queue *)priv->tx_vq[0]; + struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; + struct rte_mbuf *mi; + struct rte_mempool *mp; + struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; + struct qbman_swp *swp; + uint32_t frames_to_send, num_free_eq_desc; + uint32_t loop, retry_count; + int32_t ret; + uint16_t num_tx = 0; + uint16_t bpid; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR("Failure in affining portal"); + return 0; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + DPAA2_PMD_DP_DEBUG("===> eth_data =%p, fqid =%d\n", + eth_data, dpaa2_q->fqid); + + /* This would also handle normal and atomic queues as any type + * of packet can be enqueued when ordered queues are being used. + */ + while (nb_pkts) { + /*Check if the queue is congested*/ + retry_count = 0; + while (qbman_result_SCN_state(dpaa2_q->cscn)) { + retry_count++; + /* Retry for some time before giving up */ + if (retry_count > CONG_RETRY_COUNT) + goto skip_tx; + } + + frames_to_send = (nb_pkts > dpaa2_eqcr_size) ? + dpaa2_eqcr_size : nb_pkts; + + if (!priv->en_loose_ordered) { + if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) { + num_free_eq_desc = dpaa2_free_eq_descriptors(); + if (num_free_eq_desc < frames_to_send) + frames_to_send = num_free_eq_desc; + } + } + + for (loop = 0; loop < frames_to_send; loop++) { + /*Prepare enqueue descriptor*/ + qbman_eq_desc_clear(&eqdesc[loop]); + + if ((*bufs)->seqn) { + /* Use only queue 0 for Tx in case of atomic/ + * ordered packets as packets can get unordered + * when being tranmitted out from the interface + */ + dpaa2_set_enqueue_descriptor(order_sendq, + (*bufs), + &eqdesc[loop]); + } else { + qbman_eq_desc_set_no_orp(&eqdesc[loop], + DPAA2_EQ_RESP_ERR_FQ); + qbman_eq_desc_set_qd(&eqdesc[loop], priv->qdid, + dpaa2_q->flow_id, + dpaa2_q->tc_index); + } + + if (likely(RTE_MBUF_DIRECT(*bufs))) { + mp = (*bufs)->pool; + /* Check the basic scenario and set + * the FD appropriately here itself. + */ + if (likely(mp && mp->ops_index == + priv->bp_list->dpaa2_ops_index && + (*bufs)->nb_segs == 1 && + rte_mbuf_refcnt_read((*bufs)) == 1)) { + if (unlikely((*bufs)->ol_flags + & PKT_TX_VLAN_PKT)) { + ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } + DPAA2_MBUF_TO_CONTIG_FD((*bufs), + &fd_arr[loop], + mempool_to_bpid(mp)); + bufs++; + continue; + } + } else { + mi = rte_mbuf_from_indirect(*bufs); + mp = mi->pool; + } + /* Not a hw_pkt pool allocated frame */ + if (unlikely(!mp || !priv->bp_list)) { + DPAA2_PMD_ERR("Err: No buffer pool attached"); + goto send_n_return; + } + + if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { + DPAA2_PMD_WARN("Non DPAA2 buffer pool"); + /* alloc should be from the default buffer pool + * attached to this interface + */ + bpid = priv->bp_list->buf_pool.bpid; + + if (unlikely((*bufs)->nb_segs > 1)) { + DPAA2_PMD_ERR( + "S/G not supp for non hw offload buffer"); + goto send_n_return; + } + if (eth_copy_mbuf_to_fd(*bufs, + &fd_arr[loop], bpid)) { + goto send_n_return; + } + /* free the original packet */ + rte_pktmbuf_free(*bufs); + } else { + bpid = mempool_to_bpid(mp); + if (unlikely((*bufs)->nb_segs > 1)) { + if (eth_mbuf_to_sg_fd(*bufs, + &fd_arr[loop], + bpid)) + goto send_n_return; + } else { + eth_mbuf_to_fd(*bufs, + &fd_arr[loop], bpid); + } + } + bufs++; + } + loop = 0; + while (loop < frames_to_send) { + loop += qbman_swp_enqueue_multiple_desc(swp, + &eqdesc[loop], &fd_arr[loop], + frames_to_send - loop); + } + + num_tx += frames_to_send; + nb_pkts -= frames_to_send; + } + dpaa2_q->tx_pkts += num_tx; + return num_tx; + +send_n_return: + /* send any already prepared fd */ + if (loop) { + unsigned int i = 0; + + while (i < loop) { + i += qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop], + &fd_arr[i], loop - i); + } + num_tx += loop; + } +skip_tx: + dpaa2_q->tx_pkts += num_tx; + return num_tx; +} + +/** + * Dummy DPDK callback for TX. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +uint16_t +dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + (void)queue; + (void)bufs; + (void)nb_pkts; + return 0; +} + +#if defined(RTE_TOOLCHAIN_GCC) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-qual" +#elif defined(RTE_TOOLCHAIN_CLANG) +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcast-qual" +#endif + +/* This function loopbacks all the received packets.*/ +uint16_t +dpaa2_dev_loopback_rx(void *queue, + struct rte_mbuf **bufs __rte_unused, + uint16_t nb_pkts) +{ + /* Function receive frames for a given device and VQ*/ + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct qbman_result *dq_storage, *dq_storage1 = NULL; + uint32_t fqid = dpaa2_q->fqid; + int ret, num_rx = 0, num_tx = 0, pull_size; + uint8_t pending, status; + struct qbman_swp *swp; + struct qbman_fd *fd[DPAA2_LX2_DQRR_RING_SIZE]; + struct qbman_pull_desc pulldesc; + struct qbman_eq_desc eqdesc; + struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; + struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data; + struct dpaa2_dev_priv *priv = eth_data->dev_private; + struct dpaa2_queue *tx_q = priv->tx_vq[0]; + /* todo - currently we are using 1st TX queue only for loopback*/ + + if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { + ret = dpaa2_affine_qbman_ethrx_swp(); + if (ret) { + DPAA2_PMD_ERR("Failure in affining portal"); + return 0; + } + } + swp = DPAA2_PER_LCORE_ETHRX_PORTAL; + pull_size = (nb_pkts > dpaa2_dqrr_size) ? dpaa2_dqrr_size : nb_pkts; + if (unlikely(!q_storage->active_dqs)) { + q_storage->toggle = 0; + dq_storage = q_storage->dq_storage[q_storage->toggle]; + q_storage->last_num_pkts = pull_size; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_numframes(&pulldesc, + q_storage->last_num_pkts); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage, + (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { + while (!qbman_check_command_complete( + get_swp_active_dqs( + DPAA2_PER_LCORE_ETHRX_DPIO->index))) + ; + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); + } + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG( + "VDQ command not issued.QBMAN busy\n"); + /* Portal was busy, try again */ + continue; + } + break; + } + q_storage->active_dqs = dq_storage; + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, + dq_storage); + } + + dq_storage = q_storage->active_dqs; + rte_prefetch0((void *)(size_t)(dq_storage)); + rte_prefetch0((void *)(size_t)(dq_storage + 1)); + + /* Prepare next pull descriptor. This will give space for the + * prefething done on DQRR entries + */ + q_storage->toggle ^= 1; + dq_storage1 = q_storage->dq_storage[q_storage->toggle]; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_numframes(&pulldesc, pull_size); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage1, + (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); + + /*Prepare enqueue descriptor*/ + qbman_eq_desc_clear(&eqdesc); + qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); + qbman_eq_desc_set_response(&eqdesc, 0, 0); + qbman_eq_desc_set_fq(&eqdesc, tx_q->fqid); + + /* Check if the previous issued command is completed. + * Also seems like the SWP is shared between the Ethernet Driver + * and the SEC driver. + */ + while (!qbman_check_command_complete(dq_storage)) + ; + if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) + clear_swp_active_dqs(q_storage->active_dpio_id); + + pending = 1; + + do { + /* Loop until the dq_storage is updated with + * new token by QBMAN + */ + while (!qbman_check_new_result(dq_storage)) + ; + rte_prefetch0((void *)((size_t)(dq_storage + 2))); + /* Check whether Last Pull command is Expired and + * setting Condition for Loop termination + */ + if (qbman_result_DQ_is_pull_complete(dq_storage)) { + pending = 0; + /* Check for valid frame. */ + status = qbman_result_DQ_flags(dq_storage); + if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) + continue; + } + fd[num_rx] = (struct qbman_fd *)qbman_result_DQ_fd(dq_storage); + + dq_storage++; + num_rx++; + } while (pending); + + while (num_tx < num_rx) { + num_tx += qbman_swp_enqueue_multiple_fd(swp, &eqdesc, + &fd[num_tx], 0, num_rx - num_tx); + } + + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { + while (!qbman_check_command_complete( + get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) + ; + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); + } + /* issue a volatile dequeue command for next pull */ + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + "QBMAN is busy (2)\n"); + continue; + } + break; + } + q_storage->active_dqs = dq_storage1; + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); + + dpaa2_q->rx_pkts += num_rx; + dpaa2_q->tx_pkts += num_tx; + + return 0; +} +#if defined(RTE_TOOLCHAIN_GCC) +#pragma GCC diagnostic pop +#elif defined(RTE_TOOLCHAIN_CLANG) +#pragma clang diagnostic pop +#endif diff --git a/src/seastar/dpdk/drivers/net/dpaa2/mc/dpdmux.c b/src/seastar/dpdk/drivers/net/dpaa2/mc/dpdmux.c new file mode 100644 index 000000000..7962213b7 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/mc/dpdmux.c @@ -0,0 +1,929 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2018 NXP + * + */ +#include <fsl_mc_sys.h> +#include <fsl_mc_cmd.h> +#include <fsl_dpdmux.h> +#include <fsl_dpdmux_cmd.h> + +/** @addtogroup dpdmux + * @{ + */ + +/** + * dpdmux_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpdmux_id: DPDMUX unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpdmux_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpdmux_id, + uint16_t *token) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_open *cmd_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpdmux_cmd_open *)cmd.params; + cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpdmux_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_create() - Create the DPDMUX object + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @obj_id: returned object id + * + * Create the DPDMUX object, allocate required resources and + * perform required initialization. + * + * The object can be created either by declaring it in the + * DPL file, or by calling this function. + * + * The function accepts an authentication token of a parent + * container that this object should be assigned to. The token + * can be '0' so the object will be assigned to the default container. + * The newly created object can be opened with the returned + * object id and using the container's associated tokens and MC portals. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpdmux_cfg *cfg, + uint32_t *obj_id) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_create *cmd_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE, + cmd_flags, + dprc_token); + cmd_params = (struct dpdmux_cmd_create *)cmd.params; + cmd_params->method = cfg->method; + cmd_params->manip = cfg->manip; + cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs); + cmd_params->adv_max_dmat_entries = + cpu_to_le16(cfg->adv.max_dmat_entries); + cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups); + cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids); + cmd_params->options = cpu_to_le64(cfg->adv.options); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *obj_id = mc_cmd_read_object_id(&cmd); + + return 0; +} + +/** + * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @object_id: The object id; it must be a valid id within the container that + * created this object; + * + * The function accepts the authentication token of the parent container that + * created the object (not the one that currently owns the object). The object + * is searched within parent using the provided 'object_id'. + * All tokens to the object must be closed before calling destroy. + * + * Return: '0' on Success; error code otherwise. + */ +int dpdmux_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_destroy *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY, + cmd_flags, + dprc_token); + cmd_params = (struct dpdmux_cmd_destroy *)cmd.params; + cmd_params->dpdmux_id = cpu_to_le32(object_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_enable() - Enable DPDMUX functionality + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_disable() - Disable DPDMUX functionality + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_is_enabled() - Check if the DPDMUX is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_rsp_is_enabled *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params; + *en = dpdmux_get_field(rsp_params->en, ENABLE); + + return 0; +} + +/** + * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_get_attributes() - Retrieve DPDMUX attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @attr: Returned object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_rsp_get_attr *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params; + attr->id = le32_to_cpu(rsp_params->id); + attr->options = le64_to_cpu(rsp_params->options); + attr->method = rsp_params->method; + attr->manip = rsp_params->manip; + attr->num_ifs = le16_to_cpu(rsp_params->num_ifs); + attr->mem_size = le16_to_cpu(rsp_params->mem_size); + + return 0; +} + +/** + * dpdmux_if_enable() - Enable Interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpdmux_if_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id) +{ + struct dpdmux_cmd_if *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_disable() - Disable Interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface Identifier + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpdmux_if_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id) +{ + struct dpdmux_cmd_if *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @max_frame_length: The required maximum frame length + * + * Update the maximum frame length on all DMUX interfaces. + * In case of VEPA, the maximum frame length on all dmux interfaces + * will be updated with the minimum value of the mfls of the connected + * dpnis and the actual value of dmux mfl. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t max_frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_set_max_frame_length *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params; + cmd_params->max_frame_length = cpu_to_le16(max_frame_length); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_ul_reset_counters() - Function resets the uplink counter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_set_accepted_frames() - Set the accepted frame types + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface ID (0 for uplink, or 1-num_ifs); + * @cfg: Frame types configuration + * + * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or + * priority-tagged frames are discarded. + * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or + * priority-tagged frames are accepted. + * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged, + * untagged and priority-tagged frame are accepted; + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_accepted_frames *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_set_accepted_frames *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + dpdmux_set_field(cmd_params->frames_options, + ACCEPTED_FRAMES_TYPE, + cfg->type); + dpdmux_set_field(cmd_params->frames_options, + UNACCEPTED_FRAMES_ACTION, + cfg->unaccept_act); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface ID (0 for uplink, or 1-num_ifs); + * @attr: Interface attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_if_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if *cmd_params; + struct dpdmux_rsp_if_get_attr *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params; + attr->rate = le32_to_cpu(rsp_params->rate); + attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE); + attr->is_default = dpdmux_get_field(rsp_params->enabled, IS_DEFAULT); + attr->accept_frame_type = dpdmux_get_field( + rsp_params->accepted_frames_type, + ACCEPTED_FRAMES_TYPE); + + return 0; +} + +/** + * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Destination interface ID + * @rule: L2 rule + * + * Function removes a L2 rule from DPDMUX table + * or adds an interface to an existing multicast address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_l2_rule *rule) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_l2_rule *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id); + cmd_params->mac_addr5 = rule->mac_addr[5]; + cmd_params->mac_addr4 = rule->mac_addr[4]; + cmd_params->mac_addr3 = rule->mac_addr[3]; + cmd_params->mac_addr2 = rule->mac_addr[2]; + cmd_params->mac_addr1 = rule->mac_addr[1]; + cmd_params->mac_addr0 = rule->mac_addr[0]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Destination interface ID + * @rule: L2 rule + * + * Function adds a L2 rule into DPDMUX table + * or adds an interface to an existing multicast address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_l2_rule *rule) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_l2_rule *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(rule->vlan_id); + cmd_params->mac_addr5 = rule->mac_addr[5]; + cmd_params->mac_addr4 = rule->mac_addr[4]; + cmd_params->mac_addr3 = rule->mac_addr[3]; + cmd_params->mac_addr2 = rule->mac_addr[2]; + cmd_params->mac_addr1 = rule->mac_addr[1]; + cmd_params->mac_addr0 = rule->mac_addr[0]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_counter() - Functions obtains specific counter of an interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPDMUX object + * @if_id: Interface Id + * @counter_type: counter type + * @counter: Returned specific counter information + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + enum dpdmux_counter_type counter_type, + uint64_t *counter) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_get_counter *cmd_params; + struct dpdmux_rsp_if_get_counter *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->counter_type = counter_type; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params; + *counter = le64_to_cpu(rsp_params->counter); + + return 0; +} + +/** + * dpdmux_if_set_link_cfg() - set the link configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: interface id + * @cfg: Link configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_link_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_set_link_cfg *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->rate = cpu_to_le32(cfg->rate); + cmd_params->options = cpu_to_le64(cfg->options); + cmd_params->advertising = cpu_to_le64(cfg->advertising); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_link_state - Return the link state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: interface id + * @state: link state + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_link_state *state) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_cmd_if_get_link_state *cmd_params; + struct dpdmux_rsp_if_get_link_state *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params; + state->rate = le32_to_cpu(rsp_params->rate); + state->options = le64_to_cpu(rsp_params->options); + state->up = dpdmux_get_field(rsp_params->up, UP); + state->state_valid = dpdmux_get_field(rsp_params->up, STATE_VALID); + state->supported = le64_to_cpu(rsp_params->supported); + state->advertising = le64_to_cpu(rsp_params->advertising); + + return 0; +} + +/** + * dpdmux_if_set_default - Set default interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: interface id + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_if_set_default(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id) +{ + struct dpdmux_cmd_if *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_DEFAULT, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_if_get_default - Get default interface + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: interface id + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_if_get_default(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *if_id) +{ + struct dpdmux_cmd_if *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_DEFAULT, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpdmux_cmd_if *)cmd.params; + *if_id = le16_to_cpu(rsp_params->if_id); + + return 0; +} + +/** + * dpdmux_set_custom_key - Set a custom classification key. + * + * This API is only available for DPDMUX instance created with + * DPDMUX_METHOD_CUSTOM. This API must be called before populating the + * classification table using dpdmux_add_custom_cls_entry. + * + * Calls to dpdmux_set_custom_key remove all existing classification entries + * that may have been added previously using dpdmux_add_custom_cls_entry. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface id + * @key_cfg_iova: DMA address of a configuration structure set up using + * dpkg_prepare_key_cfg. Maximum key size is 24 bytes + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t key_cfg_iova) +{ + struct dpdmux_set_custom_key *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY, + cmd_flags, + token); + cmd_params = (struct dpdmux_set_custom_key *)cmd.params; + cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_add_custom_cls_entry - Adds a custom classification entry. + * + * This API is only available for DPDMUX instances created with + * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key + * composition rule must be set up using dpdmux_set_custom_key. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @rule: Classification rule to insert. Rules cannot be duplicated, if a + * matching rule already exists, the action will be replaced. + * @action: Action to perform for matching traffic. + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_rule_cfg *rule, + struct dpdmux_cls_action *action) +{ + struct dpdmux_cmd_add_custom_cls_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY, + cmd_flags, + token); + + cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params; + cmd_params->key_size = rule->key_size; + cmd_params->dest_if = cpu_to_le16(action->dest_if); + cmd_params->key_iova = cpu_to_le64(rule->key_iova); + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_remove_custom_cls_entry - Removes a custom classification entry. + * + * This API is only available for DPDMUX instances created with + * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification + * entries previously inserted using dpdmux_add_custom_cls_entry. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @rule: Classification rule to remove + * + * @returns '0' on Success; Error code otherwise. + */ +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_rule_cfg *rule) +{ + struct dpdmux_cmd_remove_custom_cls_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY, + cmd_flags, + token); + cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params; + cmd_params->key_size = rule->key_size; + cmd_params->key_iova = cpu_to_le64(rule->key_iova); + cmd_params->mask_iova = cpu_to_le64(rule->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpdmux_get_api_version() - Get Data Path Demux API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path demux API + * @minor_ver: Minor version of data path demux API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpdmux_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver) +{ + struct mc_command cmd = { 0 }; + struct dpdmux_rsp_get_api_version *rsp_params; + int err; + + cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION, + cmd_flags, + 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} diff --git a/src/seastar/dpdk/drivers/net/dpaa2/mc/dpkg.c b/src/seastar/dpdk/drivers/net/dpaa2/mc/dpkg.c new file mode 100644 index 000000000..80f94f40e --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/mc/dpkg.c @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2017 NXP + * + */ +#include <fsl_mc_sys.h> +#include <fsl_mc_cmd.h> +#include <fsl_dpkg.h> + +/** + * dpkg_prepare_key_cfg() - function prepare extract parameters + * @cfg: defining a full Key Generation profile (rule) + * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA + * + * This function has to be called before the following functions: + * - dpni_set_rx_tc_dist() + * - dpni_set_qos_table() + * - dpkg_prepare_key_cfg() + */ +int +dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf) +{ + int i, j; + struct dpni_ext_set_rx_tc_dist *dpni_ext; + struct dpni_dist_extract *extr; + + if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS) + return -EINVAL; + + dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf; + dpni_ext->num_extracts = cfg->num_extracts; + + for (i = 0; i < cfg->num_extracts; i++) { + extr = &dpni_ext->extracts[i]; + + switch (cfg->extracts[i].type) { + case DPKG_EXTRACT_FROM_HDR: + extr->prot = cfg->extracts[i].extract.from_hdr.prot; + dpkg_set_field(extr->efh_type, EFH_TYPE, + cfg->extracts[i].extract.from_hdr.type); + extr->size = cfg->extracts[i].extract.from_hdr.size; + extr->offset = cfg->extracts[i].extract.from_hdr.offset; + extr->field = cpu_to_le32( + cfg->extracts[i].extract.from_hdr.field); + extr->hdr_index = + cfg->extracts[i].extract.from_hdr.hdr_index; + break; + case DPKG_EXTRACT_FROM_DATA: + extr->size = cfg->extracts[i].extract.from_data.size; + extr->offset = + cfg->extracts[i].extract.from_data.offset; + break; + case DPKG_EXTRACT_FROM_PARSE: + extr->size = cfg->extracts[i].extract.from_parse.size; + extr->offset = + cfg->extracts[i].extract.from_parse.offset; + break; + default: + return -EINVAL; + } + + extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks; + dpkg_set_field(extr->extract_type, EXTRACT_TYPE, + cfg->extracts[i].type); + + for (j = 0; j < DPKG_NUM_OF_MASKS; j++) { + extr->masks[j].mask = cfg->extracts[i].masks[j].mask; + extr->masks[j].offset = + cfg->extracts[i].masks[j].offset; + } + } + + return 0; +} diff --git a/src/seastar/dpdk/drivers/net/dpaa2/mc/dpni.c b/src/seastar/dpdk/drivers/net/dpaa2/mc/dpni.c new file mode 100644 index 000000000..362cd476f --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/mc/dpni.c @@ -0,0 +1,2486 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + * + */ +#include <fsl_mc_sys.h> +#include <fsl_mc_cmd.h> +#include <fsl_dpni.h> +#include <fsl_dpni_cmd.h> + +/** + * dpni_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpni_id: DPNI unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpni_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpni_id, + uint16_t *token) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_open *cmd_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpni_cmd_open *)cmd.params; + cmd_params->dpni_id = cpu_to_le32(dpni_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpni_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_create() - Create the DPNI object + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @obj_id: Returned object id + * + * Create the DPNI object, allocate required resources and + * perform required initialization. + * + * The object can be created either by declaring it in the + * DPL file, or by calling this function. + * + * The function accepts an authentication token of a parent + * container that this object should be assigned to. The token + * can be '0' so the object will be assigned to the default container. + * The newly created object can be opened with the returned + * object id and using the container's associated tokens and MC portals. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpni_cfg *cfg, + uint32_t *obj_id) +{ + struct dpni_cmd_create *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE, + cmd_flags, + dprc_token); + cmd_params = (struct dpni_cmd_create *)cmd.params; + cmd_params->options = cpu_to_le32(cfg->options); + cmd_params->num_queues = cfg->num_queues; + cmd_params->num_tcs = cfg->num_tcs; + cmd_params->mac_filter_entries = cfg->mac_filter_entries; + cmd_params->num_rx_tcs = cfg->num_rx_tcs; + cmd_params->vlan_filter_entries = cfg->vlan_filter_entries; + cmd_params->qos_entries = cfg->qos_entries; + cmd_params->fs_entries = cpu_to_le16(cfg->fs_entries); + cmd_params->num_cgs = cfg->num_cgs; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *obj_id = mc_cmd_read_object_id(&cmd); + + return 0; +} + +/** + * dpni_destroy() - Destroy the DPNI object and release all its resources. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @object_id: The object id; it must be a valid id within the container that + * created this object; + * + * The function accepts the authentication token of the parent container that + * created the object (not the one that currently owns the object). The object + * is searched within parent using the provided 'object_id'. + * All tokens to the object must be closed before calling destroy. + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id) +{ + struct dpni_cmd_destroy *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY, + cmd_flags, + dprc_token); + /* set object id to destroy */ + cmd_params = (struct dpni_cmd_destroy *)cmd.params; + cmd_params->dpsw_id = cpu_to_le32(object_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_pools() - Set buffer pools configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Buffer pools configuration + * + * mandatory for DPNI operation + * warning:Allowed only when DPNI is disabled + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_pools(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_pools_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_pools *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_pools *)cmd.params; + cmd_params->num_dpbp = cfg->num_dpbp; + for (i = 0; i < cmd_params->num_dpbp; i++) { + cmd_params->pool[i].dpbp_id = + cpu_to_le16(cfg->pools[i].dpbp_id); + cmd_params->pool[i].priority_mask = + cfg->pools[i].priority_mask; + cmd_params->buffer_size[i] = + cpu_to_le16(cfg->pools[i].buffer_size); + cmd_params->backup_pool_mask |= + DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i); + } + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_enable() - Enable the DPNI, allow sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_disable() - Disable the DPNI, stop sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_is_enabled() - Check if the DPNI is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_is_enabled *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_is_enabled *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_reset() - Reset the DPNI, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_irq_enable() - Set overall interrupt state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Interrupt state: - enable = 1, disable = 0 + * + * Allows GPP software to control when interrupts are generated. + * Each interrupt can have up to 32 causes. The enable/disable control's the + * overall interrupt state. if the interrupt is disabled no causes will cause + * an interrupt. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_enable *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_enable() - Get overall interrupt state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Returned interrupt state - enable = 1, disable = 0 + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_enable *cmd_params; + struct dpni_rsp_get_irq_enable *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_irq_mask() - Set interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: Event mask to trigger interrupt; + * each bit: + * 0 = ignore event + * 1 = consider event for asserting IRQ + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t mask) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_mask *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params; + cmd_params->mask = cpu_to_le32(mask); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_mask() - Get interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: Returned event mask to trigger interrupt + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *mask) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_mask *cmd_params; + struct dpni_rsp_get_irq_mask *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params; + *mask = le32_to_cpu(rsp_params->mask); + + return 0; +} + +/** + * dpni_get_irq_status() - Get the current status of any pending interrupts. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: Returned interrupts status - one bit per cause: + * 0 = no interrupt pending + * 1 = interrupt pending + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *status) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_status *cmd_params; + struct dpni_rsp_get_irq_status *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(*status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params; + *status = le32_to_cpu(rsp_params->status); + + return 0; +} + +/** + * dpni_clear_irq_status() - Clear a pending interrupt's status + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: bits to clear (W1C) - one bit per cause: + * 0 = don't change + * 1 = clear status bit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t status) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_clear_irq_status *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params; + cmd_params->irq_index = irq_index; + cmd_params->status = cpu_to_le32(status); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_attributes() - Retrieve DPNI attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @attr: Object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_attr *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_attr *)cmd.params; + attr->options = le32_to_cpu(rsp_params->options); + attr->num_queues = rsp_params->num_queues; + attr->num_rx_tcs = rsp_params->num_rx_tcs; + attr->num_tx_tcs = rsp_params->num_tx_tcs; + attr->mac_filter_entries = rsp_params->mac_filter_entries; + attr->vlan_filter_entries = rsp_params->vlan_filter_entries; + attr->qos_entries = rsp_params->qos_entries; + attr->fs_entries = le16_to_cpu(rsp_params->fs_entries); + attr->qos_key_size = rsp_params->qos_key_size; + attr->fs_key_size = rsp_params->fs_key_size; + attr->wriop_version = le16_to_cpu(rsp_params->wriop_version); + attr->num_cgs = rsp_params->num_cgs; + + return 0; +} + +/** + * dpni_set_errors_behavior() - Set errors behavior + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Errors configuration + * + * This function may be called numerous times with different + * error masks + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_error_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_errors_behavior *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params; + cmd_params->errors = cpu_to_le32(cfg->errors); + dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action); + dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_buffer_layout() - Retrieve buffer layout attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to retrieve configuration for + * @layout: Returns buffer layout attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_buffer_layout *cmd_params; + struct dpni_rsp_get_buffer_layout *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params; + layout->pass_timestamp = + (int)dpni_get_field(rsp_params->flags, PASS_TS); + layout->pass_parser_result = + (int)dpni_get_field(rsp_params->flags, PASS_PR); + layout->pass_frame_status = + (int)dpni_get_field(rsp_params->flags, PASS_FS); + layout->pass_sw_opaque = + (int)dpni_get_field(rsp_params->flags, PASS_SWO); + layout->private_data_size = le16_to_cpu(rsp_params->private_data_size); + layout->data_align = le16_to_cpu(rsp_params->data_align); + layout->data_head_room = le16_to_cpu(rsp_params->head_room); + layout->data_tail_room = le16_to_cpu(rsp_params->tail_room); + + return 0; +} + +/** + * dpni_set_buffer_layout() - Set buffer layout configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue this configuration applies to + * @layout: Buffer layout configuration + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_buffer_layout *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->options = cpu_to_le16((uint16_t)layout->options); + dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp); + dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result); + dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status); + dpni_set_field(cmd_params->flags, PASS_SWO, layout->pass_sw_opaque); + cmd_params->private_data_size = cpu_to_le16(layout->private_data_size); + cmd_params->data_align = cpu_to_le16(layout->data_align); + cmd_params->head_room = cpu_to_le16(layout->data_head_room); + cmd_params->tail_room = cpu_to_le16(layout->data_tail_room); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_offload() - Set DPNI offload configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @type: Type of DPNI offload + * @config: Offload configuration. + * For checksum offloads, non-zero value enables the offload + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ + +int dpni_set_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t config) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_offload *cmd_params; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_offload *)cmd.params; + cmd_params->dpni_offload = type; + cmd_params->config = cpu_to_le32(config); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_offload() - Get DPNI offload configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @type: Type of DPNI offload + * @config: Offload configuration. + * For checksum offloads, a value of 1 indicates that the + * offload is enabled. + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ +int dpni_get_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t *config) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_offload *cmd_params; + struct dpni_rsp_get_offload *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_offload *)cmd.params; + cmd_params->dpni_offload = type; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_offload *)cmd.params; + *config = le32_to_cpu(rsp_params->config); + + return 0; +} + +/** + * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used + * for enqueue operations + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to receive QDID for + * @qdid: Returned virtual QDID value that should be used as an argument + * in all enqueue operations + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_qdid(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint16_t *qdid) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_qdid *cmd_params; + struct dpni_rsp_get_qdid *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_qdid *)cmd.params; + *qdid = le16_to_cpu(rsp_params->qdid); + + return 0; +} + +/** + * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @data_offset: Tx data offset (from start of buffer) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *data_offset) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_tx_data_offset *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params; + *data_offset = le16_to_cpu(rsp_params->data_offset); + + return 0; +} + +/** + * dpni_set_link_cfg() - set the link configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Link configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_link_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_link_cfg *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params; + cmd_params->rate = cpu_to_le32(cfg->rate); + cmd_params->options = cpu_to_le64(cfg->options); + cmd_params->advertising = cpu_to_le64(cfg->advertising); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_link_state() - Return the link state (either up or down) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @state: Returned link state; + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_link_state(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_link_state *state) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_link_state *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_link_state *)cmd.params; + state->up = dpni_get_field(rsp_params->flags, LINK_STATE); + state->state_valid = dpni_get_field(rsp_params->flags, STATE_VALID); + state->rate = le32_to_cpu(rsp_params->rate); + state->options = le64_to_cpu(rsp_params->options); + state->supported = le64_to_cpu(rsp_params->supported); + state->advertising = le64_to_cpu(rsp_params->advertising); + + return 0; +} + +/** + * dpni_set_max_frame_length() - Set the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in bytes); + * frame is discarded if its length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t max_frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_max_frame_length *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params; + cmd_params->max_frame_length = cpu_to_le16(max_frame_length); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_max_frame_length() - Get the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in bytes); + * frame is discarded if its length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *max_frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_max_frame_length *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params; + *max_frame_length = le16_to_cpu(rsp_params->max_frame_length); + + return 0; +} + +/** + * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_multicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_multicast_promisc() - Get multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_multicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_unicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_unicast_promisc() - Get unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_unicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_primary_mac_addr() - Set the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to set as primary address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_primary_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_primary_mac_addr() - Get the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: Returned MAC address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_primary_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_add_mac_addr() - Add MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to add + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_add_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_mac_addr() - Remove MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_remove_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @unicast: Set to '1' to clear unicast addresses + * @multicast: Set to '1' to clear multicast addresses + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int unicast, + int multicast) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_clear_mac_filters *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params; + dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast); + dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical + * port the DPNI is attached to + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address of the physical port, if any, otherwise 0 + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_port_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en) +{ + struct dpni_cmd_enable_vlan_filter *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params; + dpni_set_field(cmd_params->en, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_vlan_id() - Add VLAN ID filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @vlan_id: VLAN ID to add + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id) +{ + struct dpni_cmd_vlan_id *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_vlan_id *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_vlan_id() - Remove VLAN ID filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @vlan_id: VLAN ID to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id) +{ + struct dpni_cmd_vlan_id *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_vlan_id *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_vlan_filters() - Clear all VLAN filters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @cfg: Traffic class distribution configuration + * + * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpkg_prepare_key_cfg() + * first to prepare the key_cfg_iova parameter + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + const struct dpni_rx_tc_dist_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_rx_tc_dist *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + cmd_params->tc_id = tc_id; + cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id); + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + dpni_set_field(cmd_params->flags, + DIST_MODE, + cfg->dist_mode); + dpni_set_field(cmd_params->flags, + MISS_ACTION, + cfg->fs_cfg.miss_action); + dpni_set_field(cmd_params->keep_hash_key, + KEEP_HASH_KEY, + cfg->fs_cfg.keep_hash_key); + dpni_set_field(cmd_params->keep_hash_key, + KEEP_ENTRIES, + cfg->fs_cfg.keep_entries); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_tx_confirmation_mode() - Tx confirmation mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mode: Tx confirmation mode + * + * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not + * selected at DPNI creation. + * Calling this function with 'mode' set to DPNI_CONF_DISABLE disables all + * transmit confirmation (including the private confirmation queues), regardless + * of previous settings; Note that in this case, Tx error frames are still + * enqueued to the general transmit errors queue. + * Calling this function with 'mode' set to DPNI_CONF_SINGLE switches all + * Tx confirmations to a shared Tx conf queue. 'index' field in dpni_get_queue + * command will be ignored. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_confirmation_mode mode) +{ + struct dpni_tx_confirmation_mode *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONFIRMATION_MODE, + cmd_flags, + token); + cmd_params = (struct dpni_tx_confirmation_mode *)cmd.params; + cmd_params->confirmation_mode = mode; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_qos_table() - Set QoS mapping table + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: QoS table configuration + * + * This function and all QoS-related functions require that + *'max_tcs > 1' was set at DPNI creation. + * + * warning: Before calling this function, call dpkg_prepare_key_cfg() to + * prepare the key_cfg_iova parameter + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_qos_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_qos_tbl_cfg *cfg) +{ + struct dpni_cmd_set_qos_table *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params; + cmd_params->default_tc = cfg->default_tc; + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + dpni_set_field(cmd_params->discard_on_miss, + ENABLE, + cfg->discard_on_miss); + dpni_set_field(cmd_params->discard_on_miss, + KEEP_QOS_ENTRIES, + cfg->keep_entries); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: QoS rule to add + * @tc_id: Traffic class selection (0-7) + * @index: Location in the QoS table where to insert the entry. + * Only relevant if MASKING is enabled for QoS classification on + * this DPNI, it is ignored for exact match. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_qos_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_rule_cfg *cfg, + uint8_t tc_id, + uint16_t index) +{ + struct dpni_cmd_add_qos_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->index = cpu_to_le16(index); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_qos_entry() - Remove QoS mapping entry + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: QoS rule to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_rule_cfg *cfg) +{ + struct dpni_cmd_remove_qos_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params; + cmd_params->key_size = cfg->key_size; + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_qos_table() - Clear all QoS mapping entries + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Following this function call, all frames are directed to + * the default traffic class (0) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_qos_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class + * (to select a flow ID) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @index: Location in the QoS table where to insert the entry. + * Only relevant if MASKING is enabled for QoS classification + * on this DPNI, it is ignored for exact match. + * @cfg: Flow steering rule to add + * @action: Action to be taken as result of a classification hit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + uint16_t index, + const struct dpni_rule_cfg *cfg, + const struct dpni_fs_action_cfg *action) +{ + struct dpni_cmd_add_fs_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_fs_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->index = cpu_to_le16(index); + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + cmd_params->options = cpu_to_le16(action->options); + cmd_params->flow_id = cpu_to_le16(action->flow_id); + cmd_params->flc = cpu_to_le64(action->flc); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific + * traffic class + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @cfg: Flow steering rule to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + const struct dpni_rule_cfg *cfg) +{ + struct dpni_cmd_remove_fs_entry *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_fs_entry *)cmd.params; + cmd_params->tc_id = tc_id; + cmd_params->key_size = cfg->key_size; + cmd_params->key_iova = cpu_to_le64(cfg->key_iova); + cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific + * traffic class + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id) +{ + struct dpni_cmd_clear_fs_entries *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_fs_entries *)cmd.params; + cmd_params->tc_id = tc_id; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_congestion_notification() - Set traffic class congestion + * notification configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported + * @tc_id: Traffic class selection (0-7) + * @cfg: congestion notification configuration + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + const struct dpni_congestion_notification_cfg *cfg) +{ + struct dpni_cmd_set_congestion_notification *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header( + DPNI_CMDID_SET_CONGESTION_NOTIFICATION, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc_id; + cmd_params->congestion_point = cfg->cg_point; + cmd_params->cgid = (uint8_t)cfg->cgid; + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode); + cmd_params->dest_priority = cfg->dest_cfg.priority; + cmd_params->message_iova = cpu_to_le64(cfg->message_iova); + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx); + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry); + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit); + dpni_set_field(cmd_params->type_units, + DEST_TYPE, + cfg->dest_cfg.dest_type); + dpni_set_field(cmd_params->type_units, + CONG_UNITS, + cfg->units); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_congestion_notification() - Get traffic class congestion + * notification configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported + * @tc_id: Traffic class selection (0-7) + * @cfg: congestion notification configuration + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_get_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + struct dpni_congestion_notification_cfg *cfg) +{ + struct dpni_rsp_get_congestion_notification *rsp_params; + struct dpni_cmd_get_congestion_notification *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header( + DPNI_CMDID_GET_CONGESTION_NOTIFICATION, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc_id; + cmd_params->congestion_point = cfg->cg_point; + cmd_params->cgid = cfg->cgid; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params; + cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS); + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry); + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit); + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx); + cfg->message_iova = le64_to_cpu(rsp_params->message_iova); + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode); + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); + cfg->dest_cfg.priority = rsp_params->dest_priority; + cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units, + DEST_TYPE); + + return 0; +} + +/** + * dpni_get_api_version() - Get Data Path Network Interface API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path network interface API + * @minor_ver: Minor version of data path network interface API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver) +{ + struct dpni_rsp_get_api_version *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION, + cmd_flags, + 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpni_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} + +/** + * dpni_set_queue() - Set queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported, although + * the command is ignored for Tx + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @options: A combination of DPNI_QUEUE_OPT_ values that control what + * configuration options are set on the queue + * @queue: Queue structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + uint8_t options, + const struct dpni_queue *queue) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_queue *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + cmd_params->options = options; + cmd_params->dest_id = cpu_to_le32(queue->destination.id); + cmd_params->dest_prio = queue->destination.priority; + dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type); + dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control); + dpni_set_field(cmd_params->flags, HOLD_ACTIVE, + queue->destination.hold_active); + cmd_params->flc = cpu_to_le64(queue->flc.value); + cmd_params->user_context = cpu_to_le64(queue->user_context); + cmd_params->cgid = queue->cgid; + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_queue() - Get queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @queue: Queue configuration structure + * @qid: Queue identification + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_queue *queue, + struct dpni_queue_id *qid) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_queue *cmd_params; + struct dpni_rsp_get_queue *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_queue *)cmd.params; + queue->destination.id = le32_to_cpu(rsp_params->dest_id); + queue->destination.priority = rsp_params->dest_prio; + queue->destination.type = dpni_get_field(rsp_params->flags, + DEST_TYPE); + queue->flc.stash_control = dpni_get_field(rsp_params->flags, + STASH_CTRL); + queue->destination.hold_active = dpni_get_field(rsp_params->flags, + HOLD_ACTIVE); + queue->flc.value = le64_to_cpu(rsp_params->flc); + queue->user_context = le64_to_cpu(rsp_params->user_context); + qid->fqid = le32_to_cpu(rsp_params->fqid); + qid->qdbin = le16_to_cpu(rsp_params->qdbin); + if (dpni_get_field(rsp_params->flags, CGID_VALID)) + queue->cgid = rsp_params->cgid; + else + queue->cgid = -1; + + return 0; +} + +/** + * dpni_get_statistics() - Get DPNI statistics + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @page: Selects the statistics page to retrieve, see + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 3. + * @param: Custom parameter for some pages used to select + * a certain statistic source, for example the TC. + * @stat: Structure containing the statistics + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t page, + uint16_t param, + union dpni_statistics *stat) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_statistics *cmd_params; + struct dpni_rsp_get_statistics *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_statistics *)cmd.params; + cmd_params->page_number = page; + cmd_params->param = param; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_statistics *)cmd.params; + for (i = 0; i < DPNI_STATISTICS_CNT; i++) + stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]); + + return 0; +} + +/** + * dpni_reset_statistics() - Clears DPNI statistics + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_reset_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_taildrop() - Set taildrop per queue or TC + * + * Setting a per-TC taildrop (cg_point = DPNI_CP_GROUP) will reset any current + * congestion notification or early drop (WRED) configuration previously applied + * to the same TC. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point, DPNI_CP_QUEUE is only supported in + * combination with DPNI_QUEUE_RX. + * @q_type: Queue type, can be DPNI_QUEUE_RX or DPNI_QUEUE_TX. + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. + * Ignored if CONGESTION_POINT is not DPNI_CP_QUEUE. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_taildrop *taildrop) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_taildrop *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + cmd_params->units = taildrop->units; + cmd_params->threshold = cpu_to_le32(taildrop->threshold); + dpni_set_field(cmd_params->enable_oal_lo, ENABLE, taildrop->enable); + dpni_set_field(cmd_params->enable_oal_lo, OAL_LO, taildrop->oal); + dpni_set_field(cmd_params->oal_hi, + OAL_HI, + taildrop->oal >> DPNI_OAL_LO_SIZE); + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_taildrop() - Get taildrop information + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point + * @q_type: Queue type on which the taildrop is configured. + * Only Rx queues are supported for now + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. Ignored if CONGESTION_POINT is not 0. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_taildrop *taildrop) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_taildrop *cmd_params; + struct dpni_rsp_get_taildrop *rsp_params; + uint8_t oal_lo, oal_hi; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params; + taildrop->enable = dpni_get_field(rsp_params->enable_oal_lo, ENABLE); + taildrop->units = rsp_params->units; + taildrop->threshold = le32_to_cpu(rsp_params->threshold); + oal_lo = dpni_get_field(rsp_params->enable_oal_lo, OAL_LO); + oal_hi = dpni_get_field(rsp_params->oal_hi, OAL_HI); + taildrop->oal = oal_hi << DPNI_OAL_LO_SIZE | oal_lo; + + /* Fill the first 4 bits, 'oal' is a 2's complement value of 12 bits */ + if (taildrop->oal >= 0x0800) + taildrop->oal |= 0xF000; + + return 0; +} + +/** + * dpni_set_opr() - Set Order Restoration configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated + * for the same TC. Value must be in range 0 to + * NUM_QUEUES - 1 + * @options: Configuration mode options + * can be OPR_OPT_CREATE or OPR_OPT_RETIRE + * @cfg: Configuration options for the OPR + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_opr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc, + uint8_t index, + uint8_t options, + struct opr_cfg *cfg) +{ + struct dpni_cmd_set_opr *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header( + DPNI_CMDID_SET_OPR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_opr *)cmd.params; + cmd_params->tc_id = tc; + cmd_params->index = index; + cmd_params->options = options; + cmd_params->oloe = cfg->oloe; + cmd_params->oeane = cfg->oeane; + cmd_params->olws = cfg->olws; + cmd_params->oa = cfg->oa; + cmd_params->oprrws = cfg->oprrws; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_opr() - Retrieve Order Restoration config and query. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated + * for the same TC. Value must be in range 0 to + * NUM_QUEUES - 1 + * @cfg: Returned OPR configuration + * @qry: Returned OPR query + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_opr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc, + uint8_t index, + struct opr_cfg *cfg, + struct opr_qry *qry) +{ + struct dpni_rsp_get_opr *rsp_params; + struct dpni_cmd_get_opr *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OPR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_opr *)cmd.params; + cmd_params->index = index; + cmd_params->tc_id = tc; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_opr *)cmd.params; + cfg->oloe = rsp_params->oloe; + cfg->oeane = rsp_params->oeane; + cfg->olws = rsp_params->olws; + cfg->oa = rsp_params->oa; + cfg->oprrws = rsp_params->oprrws; + qry->rip = dpni_get_field(rsp_params->flags, RIP); + qry->enable = dpni_get_field(rsp_params->flags, OPR_ENABLE); + qry->nesn = le16_to_cpu(rsp_params->nesn); + qry->ndsn = le16_to_cpu(rsp_params->ndsn); + qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq); + qry->tseq_nlis = dpni_get_field(rsp_params->tseq_nlis, TSEQ_NLIS); + qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq); + qry->hseq_nlis = dpni_get_field(rsp_params->hseq_nlis, HSEQ_NLIS); + qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr); + qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr); + qry->opr_vid = le16_to_cpu(rsp_params->opr_vid); + qry->opr_id = le16_to_cpu(rsp_params->opr_id); + + return 0; +} + +/** + * dpni_set_rx_fs_dist() - Set Rx traffic class FS distribution + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Distribution configuration + * If the FS is already enabled with a previous call the classification + * key will be changed but all the table rules are kept. If the + * existing rules do not match the key the results will not be + * predictable. It is the user responsibility to keep key integrity + * If cfg.enable is set to 1 the command will create a flow steering table + * and will classify packets according to this table. The packets + * that miss all the table rules will be classified according to + * settings made in dpni_set_rx_hash_dist() + * If cfg.enable is set to 0 the command will clear flow steering table. The + * packets will be classified according to settings made in + * dpni_set_rx_hash_dist() + */ +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, const struct dpni_rx_dist_cfg *cfg) +{ + struct dpni_cmd_set_rx_fs_dist *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FS_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_fs_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable); + cmd_params->tc = cfg->tc; + cmd_params->miss_flow_id = cpu_to_le16(cfg->fs_miss_flow_id); + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_rx_hash_dist() - Set Rx traffic class HASH distribution + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Distribution configuration + * If cfg.enable is set to 1 the packets will be classified using a hash + * function based on the key received in cfg.key_cfg_iova parameter + * If cfg.enable is set to 0 the packets will be sent to the queue configured in + * dpni_set_rx_dist_default_queue() call + */ +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, const struct dpni_rx_dist_cfg *cfg) +{ + struct dpni_cmd_set_rx_hash_dist *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_HASH_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_hash_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + dpni_set_field(cmd_params->enable, RX_FS_DIST_ENABLE, cfg->enable); + cmd_params->tc_id = cfg->tc; + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_custom_tpid() - Configures a distinct Ethertype value + * (or TPID value) to indicate VLAN tag in addition to the common + * TPID values 0x8100 and 0x88A8 + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tpid: New value for TPID + * + * Only two custom values are accepted. If the function is called for the third + * time it will return error. + * To replace an existing value use dpni_remove_custom_tpid() to remove + * a previous TPID and after that use again the function. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, uint16_t tpid) +{ + struct dpni_cmd_add_custom_tpid *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_CUSTOM_TPID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_custom_tpid *)cmd.params; + cmd_params->tpid = cpu_to_le16(tpid); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_custom_tpid() - Removes a distinct Ethertype value added + * previously with dpni_add_custom_tpid() + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tpid: New value for TPID + * + * Use this function when a TPID value added with dpni_add_custom_tpid() needs + * to be replaced. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, uint16_t tpid) +{ + struct dpni_cmd_remove_custom_tpid *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_CUSTOM_TPID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_custom_tpid *)cmd.params; + cmd_params->tpid = cpu_to_le16(tpid); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_custom_tpid() - Returns custom TPID (vlan tags) values configured + * to detect 802.1q frames + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tpid: TPID values. Only nonzero members of the structure are valid. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, struct dpni_custom_tpid_cfg *tpid) +{ + struct dpni_rsp_get_custom_tpid *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_CUSTOM_TPID, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* read command response */ + rsp_params = (struct dpni_rsp_get_custom_tpid *)cmd.params; + tpid->tpid1 = le16_to_cpu(rsp_params->tpid1); + tpid->tpid2 = le16_to_cpu(rsp_params->tpid2); + + return err; +} diff --git a/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux.h b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux.h new file mode 100644 index 000000000..c69cb7aab --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux.h @@ -0,0 +1,410 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2018 NXP + * + */ +#ifndef __FSL_DPDMUX_H +#define __FSL_DPDMUX_H + +#include <fsl_net.h> + +struct fsl_mc_io; + +/** @addtogroup dpdmux Data Path Demux API + * Contains API for handling DPDMUX topology and functionality + * @{ + */ + +int dpdmux_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpdmux_id, + uint16_t *token); + +int dpdmux_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * DPDMUX general options + */ + +/** + * Enable bridging between internal interfaces + */ +#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL + +/** + * Mask support for classification + */ +#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL + +#define DPDMUX_IRQ_INDEX_IF 0x0000 +#define DPDMUX_IRQ_INDEX 0x0001 + +/** + * IRQ event - Indicates that the link state changed + */ +#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001 + +/** + * enum dpdmux_manip - DPDMUX manipulation operations + * @DPDMUX_MANIP_NONE: No manipulation on frames + * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress + */ +enum dpdmux_manip { + DPDMUX_MANIP_NONE = 0x0, + DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1 +}; + +/** + * enum dpdmux_method - DPDMUX method options + * @DPDMUX_METHOD_NONE: no DPDMUX method + * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address + * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address + * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN + * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN + */ +enum dpdmux_method { + DPDMUX_METHOD_NONE = 0x0, + DPDMUX_METHOD_C_VLAN_MAC = 0x1, + DPDMUX_METHOD_MAC = 0x2, + DPDMUX_METHOD_C_VLAN = 0x3, + DPDMUX_METHOD_S_VLAN = 0x4, + DPDMUX_METHOD_CUSTOM = 0x5, +}; + +/** + * struct dpdmux_cfg - DPDMUX configuration parameters + * @method: Defines the operation method for the DPDMUX address table + * @manip: Required manipulation operation + * @num_ifs: Number of interfaces (excluding the uplink interface) + * @adv: Advanced parameters; default is all zeros; + * use this structure to change default settings + * @adv.options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags. + * @adv.max_dmat_entries: Maximum entries in DPDMUX address table + * 0 - indicates default: 64 entries per interface. + * @adv.max_mc_groups: Number of multicast groups in DPDMUX table + * 0 - indicates default: 32 multicast groups. + * @adv.max_vlan_ids: Maximum vlan ids allowed in the system - + * relevant only case of working in mac+vlan method. + * 0 - indicates default 16 vlan ids. + */ +struct dpdmux_cfg { + enum dpdmux_method method; + enum dpdmux_manip manip; + uint16_t num_ifs; + struct { + uint64_t options; + uint16_t max_dmat_entries; + uint16_t max_mc_groups; + uint16_t max_vlan_ids; + } adv; +}; + +int dpdmux_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpdmux_cfg *cfg, + uint32_t *obj_id); + +int dpdmux_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id); + +int dpdmux_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpdmux_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpdmux_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpdmux_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dpdmux_attr - Structure representing DPDMUX attributes + * @id: DPDMUX object ID + * @options: Configuration options (bitmap) + * @method: DPDMUX address table method + * @manip: DPDMUX manipulation type + * @num_ifs: Number of interfaces (excluding the uplink interface) + * @mem_size: DPDMUX frame storage memory size + */ +struct dpdmux_attr { + int id; + uint64_t options; + enum dpdmux_method method; + enum dpdmux_manip manip; + uint16_t num_ifs; + uint16_t mem_size; +}; + +int dpdmux_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_attr *attr); + +int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t max_frame_length); + +/** + * enum dpdmux_counter_type - Counter types + * @DPDMUX_CNT_ING_FRAME: Counts ingress frames + * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes + * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames + * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames + * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames + * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes + * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames + * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes + * @DPDMUX_CNT_EGR_FRAME: Counts egress frames + * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes + * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames + */ +enum dpdmux_counter_type { + DPDMUX_CNT_ING_FRAME = 0x0, + DPDMUX_CNT_ING_BYTE = 0x1, + DPDMUX_CNT_ING_FLTR_FRAME = 0x2, + DPDMUX_CNT_ING_FRAME_DISCARD = 0x3, + DPDMUX_CNT_ING_MCAST_FRAME = 0x4, + DPDMUX_CNT_ING_MCAST_BYTE = 0x5, + DPDMUX_CNT_ING_BCAST_FRAME = 0x6, + DPDMUX_CNT_ING_BCAST_BYTES = 0x7, + DPDMUX_CNT_EGR_FRAME = 0x8, + DPDMUX_CNT_EGR_BYTE = 0x9, + DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa +}; + +/** + * enum dpdmux_accepted_frames_type - DPDMUX frame types + * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and + * priority-tagged frames + * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or + * priority-tagged frames that are received on this + * interface + * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames + * received on this interface are accepted + */ +enum dpdmux_accepted_frames_type { + DPDMUX_ADMIT_ALL = 0, + DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1, + DPDMUX_ADMIT_ONLY_UNTAGGED = 2 +}; + +/** + * enum dpdmux_action - DPDMUX action for un-accepted frames + * @DPDMUX_ACTION_DROP: Drop un-accepted frames + * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the + * control interface + */ +enum dpdmux_action { + DPDMUX_ACTION_DROP = 0, + DPDMUX_ACTION_REDIRECT_TO_CTRL = 1 +}; + +/** + * struct dpdmux_accepted_frames - Frame types configuration + * @type: Defines ingress accepted frames + * @unaccept_act: Defines action on frames not accepted + */ +struct dpdmux_accepted_frames { + enum dpdmux_accepted_frames_type type; + enum dpdmux_action unaccept_act; +}; + +int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_accepted_frames *cfg); + +/** + * struct dpdmux_if_attr - Structure representing frame types configuration + * @rate: Configured interface rate (in bits per second) + * @enabled: Indicates if interface is enabled + * @accept_frame_type: Indicates type of accepted frames for the interface + */ +struct dpdmux_if_attr { + uint32_t rate; + int enabled; + int is_default; + enum dpdmux_accepted_frames_type accept_frame_type; +}; + +int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_if_attr *attr); + +int dpdmux_if_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id); + +int dpdmux_if_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id); + +/** + * struct dpdmux_l2_rule - Structure representing L2 rule + * @mac_addr: MAC address + * @vlan_id: VLAN ID + */ +struct dpdmux_l2_rule { + uint8_t mac_addr[6]; + uint16_t vlan_id; +}; + +int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_l2_rule *rule); + +int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + const struct dpdmux_l2_rule *rule); + +int dpdmux_if_get_counter(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + enum dpdmux_counter_type counter_type, + uint64_t *counter); + +int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * Enable auto-negotiation + */ +#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL +/** + * Enable half-duplex mode + */ +#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +/** + * Enable pause frames + */ +#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL +/** + * Enable a-symmetric pause frames + */ +#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL + +/** + * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values + */ +struct dpdmux_link_cfg { + uint32_t rate; + uint64_t options; + uint64_t advertising; +}; + +int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_link_cfg *cfg); +/** + * struct dpdmux_link_state - Structure representing DPDMUX link state + * @rate: Rate + * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values + * @up: 0 - down, 1 - up + * @state_valid: Ignore/Update the state of the link + * @supported: Speeds capability of the phy (bitmap) + * @advertising: Speeds that are advertised for autoneg (bitmap) + */ +struct dpdmux_link_state { + uint32_t rate; + uint64_t options; + int up; + int state_valid; + uint64_t supported; + uint64_t advertising; +}; + +int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id, + struct dpdmux_link_state *state); + +int dpdmux_if_set_default(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t if_id); + +int dpdmux_if_get_default(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *if_id); + +int dpdmux_set_custom_key(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint64_t key_cfg_iova); + +/** + * struct dpdmux_rule_cfg - Custom classification rule. + * + * @key_iova: DMA address of buffer storing the look-up value + * @mask_iova: DMA address of the mask used for TCAM classification + * @key_size: size, in bytes, of the look-up value. This must match the size + * of the look-up key defined using dpdmux_set_custom_key, otherwise the + * entry will never be hit + */ +struct dpdmux_rule_cfg { + uint64_t key_iova; + uint64_t mask_iova; + uint8_t key_size; +}; + +/** + * struct dpdmux_cls_action - Action to execute for frames matching the + * classification entry + * + * @dest_if: Interface to forward the frames to. Port numbering is similar to + * the one used to connect interfaces: + * - 0 is the uplink port, + * - all others are downlink ports. + */ +struct dpdmux_cls_action { + uint16_t dest_if; +}; + +int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_rule_cfg *rule, + struct dpdmux_cls_action *action); + +int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpdmux_rule_cfg *rule); + +int dpdmux_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver); + +#endif /* __FSL_DPDMUX_H */ diff --git a/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h new file mode 100644 index 000000000..a36349feb --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpdmux_cmd.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2018 NXP + * + */ +#ifndef _FSL_DPDMUX_CMD_H +#define _FSL_DPDMUX_CMD_H + +/* DPDMUX Version */ +#define DPDMUX_VER_MAJOR 6 +#define DPDMUX_VER_MINOR 3 + +#define DPDMUX_CMD_BASE_VERSION 1 +#define DPDMUX_CMD_VERSION_2 2 +#define DPDMUX_CMD_ID_OFFSET 4 + +#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) |\ + DPDMUX_CMD_BASE_VERSION) +#define DPDMUX_CMD_V2(id) (((id) << DPDMUX_CMD_ID_OFFSET) | \ + DPDMUX_CMD_VERSION_2) + +/* Command IDs */ +#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800) +#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806) +#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906) +#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986) +#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06) + +#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002) +#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003) +#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004) +#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005) +#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006) + +#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1) + +#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3) + +#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7) +#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8) +#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9) +#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa) + +#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0) +#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1) +#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2) +#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD_V2(0x0b3) +#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD_V2(0x0b4) + +#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5) +#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6) +#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7) + +#define DPDMUX_CMDID_IF_SET_DEFAULT DPDMUX_CMD(0x0b8) +#define DPDMUX_CMDID_IF_GET_DEFAULT DPDMUX_CMD(0x0b9) + +#define DPDMUX_MASK(field) \ + GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \ + DPDMUX_##field##_SHIFT) +#define dpdmux_set_field(var, field, val) \ + ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field))) +#define dpdmux_get_field(var, field) \ + (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT) + +#pragma pack(push, 1) +struct dpdmux_cmd_open { + uint32_t dpdmux_id; +}; + +struct dpdmux_cmd_create { + uint8_t method; + uint8_t manip; + uint16_t num_ifs; + uint32_t pad; + + uint16_t adv_max_dmat_entries; + uint16_t adv_max_mc_groups; + uint16_t adv_max_vlan_ids; + uint16_t pad1; + + uint64_t options; +}; + +struct dpdmux_cmd_destroy { + uint32_t dpdmux_id; +}; + +#define DPDMUX_ENABLE_SHIFT 0 +#define DPDMUX_ENABLE_SIZE 1 +#define DPDMUX_IS_DEFAULT_SHIFT 1 +#define DPDMUX_IS_DEFAULT_SIZE 1 + +struct dpdmux_rsp_is_enabled { + uint8_t en; +}; + +struct dpdmux_rsp_get_attr { + uint8_t method; + uint8_t manip; + uint16_t num_ifs; + uint16_t mem_size; + uint16_t pad; + + uint64_t pad1; + + uint32_t id; + uint32_t pad2; + + uint64_t options; +}; + +struct dpdmux_cmd_set_max_frame_length { + uint16_t max_frame_length; +}; + +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0 +#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4 +#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4 + +struct dpdmux_cmd_if_set_accepted_frames { + uint16_t if_id; + uint8_t frames_options; +}; + +struct dpdmux_cmd_if { + uint16_t if_id; +}; + +struct dpdmux_rsp_if_get_attr { + uint8_t pad[3]; + uint8_t enabled; + uint8_t pad1[3]; + uint8_t accepted_frames_type; + uint32_t rate; +}; + +struct dpdmux_cmd_if_l2_rule { + uint16_t if_id; + uint8_t mac_addr5; + uint8_t mac_addr4; + uint8_t mac_addr3; + uint8_t mac_addr2; + uint8_t mac_addr1; + uint8_t mac_addr0; + + uint32_t pad; + uint16_t vlan_id; +}; + +struct dpdmux_cmd_if_get_counter { + uint16_t if_id; + uint8_t counter_type; +}; + +struct dpdmux_rsp_if_get_counter { + uint64_t pad; + uint64_t counter; +}; + +struct dpdmux_cmd_if_set_link_cfg { + uint16_t if_id; + uint16_t pad[3]; + + uint32_t rate; + uint32_t pad1; + + uint64_t options; + uint64_t advertising; +}; + +struct dpdmux_cmd_if_get_link_state { + uint16_t if_id; +}; + +#define DPDMUX_UP_SHIFT 0 +#define DPDMUX_UP_SIZE 1 +#define DPDMUX_STATE_VALID_SHIFT 1 +#define DPDMUX_STATE_VALID_SIZE 1 +struct dpdmux_rsp_if_get_link_state { + uint32_t pad; + uint8_t up; + uint8_t pad1[3]; + + uint32_t rate; + uint32_t pad2; + + uint64_t options; + uint64_t supported; + uint64_t advertising; +}; + +struct dpdmux_rsp_get_api_version { + uint16_t major; + uint16_t minor; +}; + +struct dpdmux_set_custom_key { + uint64_t pad[6]; + uint64_t key_cfg_iova; +}; + +struct dpdmux_cmd_add_custom_cls_entry { + uint8_t pad[3]; + uint8_t key_size; + uint16_t pad1; + uint16_t dest_if; + uint64_t key_iova; + uint64_t mask_iova; +}; + +struct dpdmux_cmd_remove_custom_cls_entry { + uint8_t pad[3]; + uint8_t key_size; + uint32_t pad1; + uint64_t key_iova; + uint64_t mask_iova; +}; +#pragma pack(pop) +#endif /* _FSL_DPDMUX_CMD_H */ diff --git a/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h new file mode 100644 index 000000000..02fe8d50e --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright 2013-2015 Freescale Semiconductor Inc. + * Copyright 2016-2017 NXP + * + */ +#ifndef __FSL_DPKG_H_ +#define __FSL_DPKG_H_ + +#include <fsl_net.h> + +/* Data Path Key Generator API + * Contains initialization APIs and runtime APIs for the Key Generator + */ + +/** Key Generator properties */ + +/** + * Number of masks per key extraction + */ +#define DPKG_NUM_OF_MASKS 4 +/** + * Number of extractions per key profile + */ +#define DPKG_MAX_NUM_OF_EXTRACTS 10 + +/** + * enum dpkg_extract_from_hdr_type - Selecting extraction by header types + * @DPKG_FROM_HDR: Extract selected bytes from header, by offset + * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field + * @DPKG_FULL_FIELD: Extract a full field + */ +enum dpkg_extract_from_hdr_type { + DPKG_FROM_HDR = 0, + DPKG_FROM_FIELD = 1, + DPKG_FULL_FIELD = 2 +}; + +/** + * enum dpkg_extract_type - Enumeration for selecting extraction type + * @DPKG_EXTRACT_FROM_HDR: Extract from the header + * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header + * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; + * e.g. can be used to extract header existence; + * please refer to 'Parse Result definition' section in the parser BG + */ +enum dpkg_extract_type { + DPKG_EXTRACT_FROM_HDR = 0, + DPKG_EXTRACT_FROM_DATA = 1, + DPKG_EXTRACT_FROM_PARSE = 3 +}; + +/** + * struct dpkg_mask - A structure for defining a single extraction mask + * @mask: Byte mask for the extracted content + * @offset: Offset within the extracted content + */ +struct dpkg_mask { + uint8_t mask; + uint8_t offset; +}; + +/* Macros for accessing command fields smaller than 1byte */ +#define DPKG_MASK(field) \ + GENMASK(DPKG_##field##_SHIFT + DPKG_##field##_SIZE - 1, \ + DPKG_##field##_SHIFT) +#define dpkg_set_field(var, field, val) \ + ((var) |= (((val) << DPKG_##field##_SHIFT) & DPKG_MASK(field))) +#define dpkg_get_field(var, field) \ + (((var) & DPKG_MASK(field)) >> DPKG_##field##_SHIFT) + +/** + * struct dpkg_extract - A structure for defining a single extraction + * @type: Determines how the union below is interpreted: + * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; + * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; + * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' + * @extract: Selects extraction method + * @extract.from_hdr: Used when 'type = DPKG_EXTRACT_FROM_HDR' + * @extract.from_data: Used when 'type = DPKG_EXTRACT_FROM_DATA' + * @extract.from_parse: Used when 'type = DPKG_EXTRACT_FROM_PARSE' + * @extract.from_hdr.prot: Any of the supported headers + * @extract.from_hdr.type: Defines the type of header extraction: + * DPKG_FROM_HDR: use size & offset below; + * DPKG_FROM_FIELD: use field, size and offset below; + * DPKG_FULL_FIELD: use field below + * @extract.from_hdr.field: One of the supported fields (NH_FLD_) + * @extract.from_hdr.size: Size in bytes + * @extract.from_hdr.offset: Byte offset + * @extract.from_hdr.hdr_index: Clear for cases not listed below; + * Used for protocols that may have more than a single + * header, 0 indicates an outer header; + * Supported protocols (possible values): + * NET_PROT_VLAN (0, HDR_INDEX_LAST); + * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); + * NET_PROT_IP(0, HDR_INDEX_LAST); + * NET_PROT_IPv4(0, HDR_INDEX_LAST); + * NET_PROT_IPv6(0, HDR_INDEX_LAST); + * @extract.from_data.size: Size in bytes + * @extract.from_data.offset: Byte offset + * @extract.from_parse.size: Size in bytes + * @extract.from_parse.offset: Byte offset + * @num_of_byte_masks: Defines the number of valid entries in the array below; + * This is also the number of bytes to be used as masks + * @masks: Masks parameters + */ +struct dpkg_extract { + enum dpkg_extract_type type; + union { + struct { + enum net_prot prot; + enum dpkg_extract_from_hdr_type type; + uint32_t field; + uint8_t size; + uint8_t offset; + uint8_t hdr_index; + } from_hdr; + struct { + uint8_t size; + uint8_t offset; + } from_data; + struct { + uint8_t size; + uint8_t offset; + } from_parse; + } extract; + + uint8_t num_of_byte_masks; + struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; +}; + +/** + * struct dpkg_profile_cfg - A structure for defining a full Key Generation + * profile (rule) + * @num_extracts: Defines the number of valid entries in the array below + * @extracts: Array of required extractions + */ +struct dpkg_profile_cfg { + uint8_t num_extracts; + struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; +}; + +/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at + * key_cfg_iova) + */ +struct dpni_mask_cfg { + uint8_t mask; + uint8_t offset; +}; + +#define DPKG_EFH_TYPE_SHIFT 0 +#define DPKG_EFH_TYPE_SIZE 4 +#define DPKG_EXTRACT_TYPE_SHIFT 0 +#define DPKG_EXTRACT_TYPE_SIZE 4 + +struct dpni_dist_extract { + /* word 0 */ + uint8_t prot; + /* EFH type stored in the 4 least significant bits */ + uint8_t efh_type; + uint8_t size; + uint8_t offset; + uint32_t field; + /* word 1 */ + uint8_t hdr_index; + uint8_t constant; + uint8_t num_of_repeats; + uint8_t num_of_byte_masks; + /* Extraction type is stored in the 4 LSBs */ + uint8_t extract_type; + uint8_t pad[3]; + /* word 2 */ + struct dpni_mask_cfg masks[4]; +}; + +struct dpni_ext_set_rx_tc_dist { + /* extension word 0 */ + uint8_t num_extracts; + uint8_t pad[7]; + /* words 1..25 */ + struct dpni_dist_extract extracts[10]; +}; + +int dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, + uint8_t *key_cfg_buf); + +#endif /* __FSL_DPKG_H_ */ diff --git a/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h new file mode 100644 index 000000000..8b1cfbac7 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h @@ -0,0 +1,1427 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2017 NXP + * + */ +#ifndef __FSL_DPNI_H +#define __FSL_DPNI_H + +#include <fsl_dpkg.h> +#include <fsl_dpopr.h> + +struct fsl_mc_io; + +/** + * Data Path Network Interface API + * Contains initialization APIs and runtime control APIs for DPNI + */ + +/** General DPNI macros */ + +/** + * Maximum number of traffic classes + */ +#define DPNI_MAX_TC 8 +/** + * Maximum number of buffer pools per DPNI + */ +#define DPNI_MAX_DPBP 8 +/** + * Maximum number of storage-profiles per DPNI + */ +#define DPNI_MAX_SP 2 + +/** + * All traffic classes considered; see dpni_set_queue() + */ +#define DPNI_ALL_TCS (uint8_t)(-1) +/** + * All flows within traffic class considered; see dpni_set_queue() + */ +#define DPNI_ALL_TC_FLOWS (uint16_t)(-1) + +/** + * Tx traffic is always released to a buffer pool on transmit, there are no + * resources allocated to have the frames confirmed back to the source after + * transmission. + */ +#define DPNI_OPT_TX_FRM_RELEASE 0x000001 +/** + * Disables support for MAC address filtering for addresses other than primary + * MAC address. This affects both unicast and multicast. Promiscuous mode can + * still be enabled/disabled for both unicast and multicast. If promiscuous mode + * is disabled, only traffic matching the primary MAC address will be accepted. + */ +#define DPNI_OPT_NO_MAC_FILTER 0x000002 +/** + * Allocate policers for this DPNI. They can be used to rate-limit traffic per + * traffic class (TC) basis. + */ +#define DPNI_OPT_HAS_POLICING 0x000004 +/** + * Congestion can be managed in several ways, allowing the buffer pool to + * deplete on ingress, taildrop on each queue or use congestion groups for sets + * of queues. If set, it configures a single congestion groups across all TCs. + * If reset, a congestion group is allocated for each TC. Only relevant if the + * DPNI has multiple traffic classes. + */ +#define DPNI_OPT_SHARED_CONGESTION 0x000008 +/** + * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all + * look-ups are exact match. Note that TCAM is not available on LS1088 and its + * variants. Setting this bit on these SoCs will trigger an error. + */ +#define DPNI_OPT_HAS_KEY_MASKING 0x000010 +/** + * Disables the flow steering table. + */ +#define DPNI_OPT_NO_FS 0x000020 + +/** + * Enable the Order Restoration support + */ +#define DPNI_OPT_HAS_OPR 0x000040 + +/** + * Order Point Records are shared for the entire TC + */ +#define DPNI_OPT_OPR_PER_TC 0x000080 +/** + * All Tx traffic classes will use a single sender (ignore num_queueus for tx) + */ +#define DPNI_OPT_SINGLE_SENDER 0x000100 +/** + * Define a custom number of congestion groups + */ +#define DPNI_OPT_CUSTOM_CG 0x000200 + +int dpni_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpni_id, + uint16_t *token); + +int dpni_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dpni_cfg - Structure representing DPNI configuration + * @options: Any combination of the following options: + * DPNI_OPT_TX_FRM_RELEASE + * DPNI_OPT_NO_MAC_FILTER + * DPNI_OPT_HAS_POLICING + * DPNI_OPT_SHARED_CONGESTION + * DPNI_OPT_HAS_KEY_MASKING + * DPNI_OPT_NO_FS + * DPNI_OPT_SINGLE_SENDER + * @fs_entries: Number of entries in the flow steering table. + * This table is used to select the ingress queue for + * ingress traffic, targeting a GPP core or another. + * In addition it can be used to discard traffic that + * matches the set rule. It is either an exact match table + * or a TCAM table, depending on DPNI_OPT_ HAS_KEY_MASKING + * bit in OPTIONS field. This field is ignored if + * DPNI_OPT_NO_FS bit is set in OPTIONS field. Otherwise, + * value 0 defaults to 64. Maximum supported value is 1024. + * Note that the total number of entries is limited on the + * SoC to as low as 512 entries if TCAM is used. + * @vlan_filter_entries: Number of entries in the VLAN address filtering + * table. This is an exact match table used to filter + * ingress traffic based on VLAN IDs. Value 0 disables VLAN + * filtering. Maximum supported value is 16. + * @mac_filter_entries: Number of entries in the MAC address filtering + * table. This is an exact match table and allows both + * unicast and multicast entries. The primary MAC address + * of the network interface is not part of this table, + * this contains only entries in addition to it. This + * field is ignored if DPNI_OPT_ NO_MAC_FILTER is set in + * OPTIONS field. Otherwise, value 0 defaults to 80. + * Maximum supported value is 80. + * @num_queues: Number of Tx and Rx queues used for traffic + * distribution. This is orthogonal to QoS and is only + * used to distribute traffic to multiple GPP cores. + * This configuration affects the number of Tx queues + * (logical FQs, all associated with a single CEETM queue), + * Rx queues and Tx confirmation queues, if applicable. + * Value 0 defaults to one queue. Maximum supported value + * is 8. + * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI. + * TCs can have different priority levels for the purpose + * of Tx scheduling (see DPNI_SET_TX_PRIORITIES), different + * BPs (DPNI_ SET_POOLS), policers. There are dedicated QM + * queues for traffic classes (including class queues on + * Tx). Value 0 defaults to one TC. Maximum supported value + * is 16. There are maximum 16 TCs for Tx and 8 TCs for Rx. + * When num_tcs>8 Tx will use this value but Rx will have + * only 8 traffic classes. + * @num_rx_tcs: if set to other value than zero represents number + * of TCs used for Rx. Maximum value is 8. If set to zero the + * number of Rx TCs will be initialized with the value provided + * in num_tcs parameter. + * @qos_entries: Number of entries in the QoS classification table. This + * table is used to select the TC for ingress traffic. It + * is either an exact match or a TCAM table, depending on + * DPNI_OPT_ HAS_KEY_MASKING bit in OPTIONS field. This + * field is ignored if the DPNI has a single TC. Otherwise, + * a value of 0 defaults to 64. Maximum supported value + * is 64. + */ +struct dpni_cfg { + uint32_t options; + uint16_t fs_entries; + uint8_t vlan_filter_entries; + uint8_t mac_filter_entries; + uint8_t num_queues; + uint8_t num_tcs; + uint8_t num_rx_tcs; + uint8_t qos_entries; + uint8_t num_cgs; +}; + +int dpni_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpni_cfg *cfg, + uint32_t *obj_id); + +int dpni_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id); + +/** + * struct dpni_pools_cfg - Structure representing buffer pools configuration + * @num_dpbp: Number of DPBPs + * @pools: Array of buffer pools parameters; The number of valid entries + * must match 'num_dpbp' value + * @pools.dpbp_id: DPBP object ID + * @pools.priority: Priority mask that indicates TC's used with this buffer. + * I set to 0x00 MC will assume value 0xff. + * @pools.buffer_size: Buffer size + * @pools.backup_pool: Backup pool + */ +struct dpni_pools_cfg { + uint8_t num_dpbp; + struct { + int dpbp_id; + uint8_t priority_mask; + uint16_t buffer_size; + int backup_pool; + } pools[DPNI_MAX_DPBP]; +}; + +int dpni_set_pools(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_pools_cfg *cfg); + +int dpni_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpni_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpni_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpni_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * DPNI IRQ Index and Events + */ + +/** + * IRQ index + */ +#define DPNI_IRQ_INDEX 0 +/** + * IRQ event - indicates a change in link state + */ +#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 + +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t en); + +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t *en); + +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t mask); + +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *mask); + +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *status); + +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t status); + +/** + * struct dpni_attr - Structure representing DPNI attributes + * @options: Any combination of the following options: + * DPNI_OPT_TX_FRM_RELEASE + * DPNI_OPT_NO_MAC_FILTER + * DPNI_OPT_HAS_POLICING + * DPNI_OPT_SHARED_CONGESTION + * DPNI_OPT_HAS_KEY_MASKING + * DPNI_OPT_NO_FS + * @num_queues: Number of Tx and Rx queues used for traffic distribution. + * @num_rx_tcs: Number of RX traffic classes (TCs), reserved for the DPNI. + * @num_tx_tcs: Number of TX traffic classes (TCs), reserved for the DPNI. + * @mac_filter_entries: Number of entries in the MAC address filtering + * table. + * @vlan_filter_entries: Number of entries in the VLAN address filtering + * table. + * @qos_entries: Number of entries in the QoS classification table. + * @fs_entries: Number of entries in the flow steering table. + * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger + * than this when adding QoS entries will result + * in an error. + * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a + * key larger than this when composing the hash + FS key + * will result in an error. + * @wriop_version: Version of WRIOP HW block. + * The 3 version values are stored on 6, 5, 5 bits + * respectively. + * Values returned: + * - 0x400 - WRIOP version 1.0.0, used on LS2080 and + * variants, + * - 0x421 - WRIOP version 1.1.1, used on LS2088 and + * variants, + * - 0x422 - WRIOP version 1.1.2, used on LS1088 and + * variants. + * - 0xC00 - WRIOP version 3.0.0, used on LX2160 and + * variants. + */ +struct dpni_attr { + uint32_t options; + uint8_t num_queues; + uint8_t num_rx_tcs; + uint8_t num_tx_tcs; + uint8_t mac_filter_entries; + uint8_t vlan_filter_entries; + uint8_t qos_entries; + uint16_t fs_entries; + uint8_t qos_key_size; + uint8_t fs_key_size; + uint16_t wriop_version; + uint8_t num_cgs; +}; + +int dpni_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_attr *attr); + +/** + * DPNI errors + */ + +/** + * Discard error. When set all discarded frames in wriop will be enqueued to + * error queue. To be used in dpni_set_errors_behavior() only if error_action + * parameter is set to DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE. + */ +#define DPNI_ERROR_DISC 0x80000000 + +/** + * Extract out of frame header error + */ +#define DPNI_ERROR_EOFHE 0x00020000 +/** + * Frame length error + */ +#define DPNI_ERROR_FLE 0x00002000 +/** + * Frame physical error + */ +#define DPNI_ERROR_FPE 0x00001000 +/** + * Parsing header error + */ +#define DPNI_ERROR_PHE 0x00000020 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L3CE 0x00000004 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L4CE 0x00000001 + +/** + * enum dpni_error_action - Defines DPNI behavior for errors + * @DPNI_ERROR_ACTION_DISCARD: Discard the frame + * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow + * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue + */ +enum dpni_error_action { + DPNI_ERROR_ACTION_DISCARD = 0, + DPNI_ERROR_ACTION_CONTINUE = 1, + DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 +}; + +/** + * struct dpni_error_cfg - Structure representing DPNI errors treatment + * @errors: Errors mask; use 'DPNI_ERROR__<X> + * @error_action: The desired action for the errors mask + * @set_frame_annotation: Set to '1' to mark the errors in frame + * annotation status (FAS); relevant only + * for the non-discard action + */ +struct dpni_error_cfg { + uint32_t errors; + enum dpni_error_action error_action; + int set_frame_annotation; +}; + +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_error_cfg *cfg); + +/** + * DPNI buffer layout modification options + */ + +/** + * Select to modify the time-stamp setting + */ +#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 +/** + * Select to modify the parser-result setting; not applicable for Tx + */ +#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 +/** + * Select to modify the frame-status setting + */ +#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 +/** + * Select to modify the private-data-size setting + */ +#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 +/** + * Select to modify the data-alignment setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 +/** + * Select to modify the data-head-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 +/** + * Select to modify the data-tail-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 +/** + * Select to modify the sw-opaque value setting + */ +#define DPNI_BUF_LAYOUT_OPT_SW_OPAQUE 0x00000080 + +/** + * struct dpni_buffer_layout - Structure representing DPNI buffer layout + * @options: Flags representing the suggested modifications to the + * buffer layout; + * Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags + * @pass_timestamp: Pass timestamp value + * @pass_parser_result: Pass parser results + * @pass_frame_status: Pass frame status + * @private_data_size: Size kept for private data (in bytes) + * @data_align: Data alignment + * @data_head_room: Data head room + * @data_tail_room: Data tail room + */ +struct dpni_buffer_layout { + uint32_t options; + int pass_timestamp; + int pass_parser_result; + int pass_frame_status; + int pass_sw_opaque; + uint16_t private_data_size; + uint16_t data_align; + uint16_t data_head_room; + uint16_t data_tail_room; +}; + +/** + * enum dpni_queue_type - Identifies a type of queue targeted by the command + * @DPNI_QUEUE_RX: Rx queue + * @DPNI_QUEUE_TX: Tx queue + * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue + * @DPNI_QUEUE_RX_ERR: Rx error queue + */ +enum dpni_queue_type { + DPNI_QUEUE_RX, + DPNI_QUEUE_TX, + DPNI_QUEUE_TX_CONFIRM, + DPNI_QUEUE_RX_ERR, +}; + +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout); + +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout); + +/** + * enum dpni_offload - Identifies a type of offload targeted by the command + * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation + * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation + * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation + * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation + * @DPNI_OPT_FLCTYPE_HASH: flow context will be generated by WRIOP for AIOP or + * for CPU + */ +enum dpni_offload { + DPNI_OFF_RX_L3_CSUM, + DPNI_OFF_RX_L4_CSUM, + DPNI_OFF_TX_L3_CSUM, + DPNI_OFF_TX_L4_CSUM, + DPNI_FLCTYPE_HASH, +}; + +int dpni_set_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t config); + +int dpni_get_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t *config); + +int dpni_get_qdid(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint16_t *qdid); + +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *data_offset); + +#define DPNI_STATISTICS_CNT 7 + +/** + * union dpni_statistics - Union describing the DPNI statistics + * @page_0: Page_0 statistics structure + * @page_0.ingress_all_frames: Ingress frame count + * @page_0.ingress_all_bytes: Ingress byte count + * @page_0.ingress_multicast_frames: Ingress multicast frame count + * @page_0.ingress_multicast_bytes: Ingress multicast byte count + * @page_0.ingress_broadcast_frames: Ingress broadcast frame count + * @page_0.ingress_broadcast_bytes: Ingress broadcast byte count + * @page_1: Page_1 statistics structure + * @page_1.egress_all_frames: Egress frame count + * @page_1.egress_all_bytes: Egress byte count + * @page_1.egress_multicast_frames: Egress multicast frame count + * @page_1.egress_multicast_bytes: Egress multicast byte count + * @page_1.egress_broadcast_frames: Egress broadcast frame count + * @page_1.egress_broadcast_bytes: Egress broadcast byte count + * @page_2: Page_2 statistics structure + * @page_2.ingress_filtered_frames: Ingress filtered frame count + * @page_2.ingress_discarded_frames: Ingress discarded frame count + * @page_2.ingress_nobuffer_discards: Ingress discarded frame count due to + * lack of buffers + * @page_2.egress_discarded_frames: Egress discarded frame count + * @page_2.egress_confirmed_frames: Egress confirmed frame count + * @page_3: Page_3 statistics structure with values for the selected TC + * @page_3.ceetm_dequeue_bytes: Cumulative count of the number of bytes dequeued + * @page_3.ceetm_dequeue_frames: Cumulative count of the number of frames + * dequeued + * @page_3.ceetm_reject_bytes: Cumulative count of the number of bytes in all + * frames whose enqueue was rejected + * @page_3.ceetm_reject_frames: Cumulative count of all frame enqueues rejected + * @page_4: congestion point drops for seleted TC + * @page_4.cgr_reject_frames: number of rejected frames due to congestion point + * @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point + * @page_5: policer statistics per TC + * @page_5.policer_cnt_red: NUmber of red colored frames + * @page_5.policer_cnt_yellow: number of yellow colored frames + * @page_5.policer_cnt_green: number of green colored frames + * @page_5.policer_cnt_re_red: number of recolored red frames + * @page_5.policer_cnt_re_yellow: number of recolored yellow frames + * @raw: raw statistics structure, used to index counters + */ +union dpni_statistics { + struct { + uint64_t ingress_all_frames; + uint64_t ingress_all_bytes; + uint64_t ingress_multicast_frames; + uint64_t ingress_multicast_bytes; + uint64_t ingress_broadcast_frames; + uint64_t ingress_broadcast_bytes; + } page_0; + struct { + uint64_t egress_all_frames; + uint64_t egress_all_bytes; + uint64_t egress_multicast_frames; + uint64_t egress_multicast_bytes; + uint64_t egress_broadcast_frames; + uint64_t egress_broadcast_bytes; + } page_1; + struct { + uint64_t ingress_filtered_frames; + uint64_t ingress_discarded_frames; + uint64_t ingress_nobuffer_discards; + uint64_t egress_discarded_frames; + uint64_t egress_confirmed_frames; + } page_2; + struct { + uint64_t ceetm_dequeue_bytes; + uint64_t ceetm_dequeue_frames; + uint64_t ceetm_reject_bytes; + uint64_t ceetm_reject_frames; + } page_3; + struct { + uint64_t cgr_reject_frames; + uint64_t cgr_reject_bytes; + } page_4; + struct { + uint64_t policer_cnt_red; + uint64_t policer_cnt_yellow; + uint64_t policer_cnt_green; + uint64_t policer_cnt_re_red; + uint64_t policer_cnt_re_yellow; + } page_5; + struct { + uint64_t counter[DPNI_STATISTICS_CNT]; + } raw; +}; + +/** + * Enable auto-negotiation + */ +#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL +/** + * Enable half-duplex mode + */ +#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +/** + * Enable pause frames + */ +#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL +/** + * Enable a-symmetric pause frames + */ +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL +/** + * Enable priority flow control pause frames + */ +#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL + +/** + * Advertise 10MB full duplex + */ +#define DPNI_ADVERTISED_10BASET_FULL 0x0000000000000001ULL +/** + * Advertise 100MB full duplex + */ +#define DPNI_ADVERTISED_100BASET_FULL 0x0000000000000002ULL +/** + * Advertise 1GB full duplex + */ +#define DPNI_ADVERTISED_1000BASET_FULL 0x0000000000000004ULL +/** + * Advertise auto-negotiation enable + */ +#define DPNI_ADVERTISED_AUTONEG 0x0000000000000008ULL +/** + * Advertise 10GB full duplex + */ +#define DPNI_ADVERTISED_10000BASET_FULL 0x0000000000000010ULL +/** + * Advertise 2.5GB full duplex + */ +#define DPNI_ADVERTISED_2500BASEX_FULL 0x0000000000000020ULL +/** + * Advertise 5GB full duplex + */ +#define DPNI_ADVERTISED_5000BASET_FULL 0x0000000000000040ULL + + +/** + * struct - Structure representing DPNI link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values + * @advertising: Speeds that are advertised for autoneg (bitmap) + */ +struct dpni_link_cfg { + uint32_t rate; + uint64_t options; + uint64_t advertising; +}; + +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_link_cfg *cfg); + +/** + * struct dpni_link_state - Structure representing DPNI link state + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values + * @up: Link state; '0' for down, '1' for up + * @state_valid: Ignore/Update the state of the link + * @supported: Speeds capability of the phy (bitmap) + * @advertising: Speeds that are advertised for autoneg (bitmap) + */ +struct dpni_link_state { + uint32_t rate; + uint64_t options; + int up; + int state_valid; + uint64_t supported; + uint64_t advertising; +}; + +int dpni_get_link_state(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_link_state *state); + +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t max_frame_length); + +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *max_frame_length); + +int dpni_set_mtu(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t mtu); + +int dpni_get_mtu(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *mtu); + +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en); + +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en); + +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]); + +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]); + +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]); + +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]); + +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int unicast, + int multicast); + +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]); + +int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en); + +int dpni_add_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id); + +int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id); + +int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * enum dpni_dist_mode - DPNI distribution mode + * @DPNI_DIST_MODE_NONE: No distribution + * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if + * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation + * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if + * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation + */ +enum dpni_dist_mode { + DPNI_DIST_MODE_NONE = 0, + DPNI_DIST_MODE_HASH = 1, + DPNI_DIST_MODE_FS = 2 +}; + +/** + * enum dpni_fs_miss_action - DPNI Flow Steering miss action + * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame + * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id + * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash + */ +enum dpni_fs_miss_action { + DPNI_FS_MISS_DROP = 0, + DPNI_FS_MISS_EXPLICIT_FLOWID = 1, + DPNI_FS_MISS_HASH = 2 +}; + +/** + * struct dpni_fs_tbl_cfg - Flow Steering table configuration + * @miss_action: Miss action selection + * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' + * @keep_hash_key: used only when miss_action is set to DPNI_FS_MISS_HASH. When + * set to one unclassified frames will be distributed according to previous + * used hash key. If set to zero hash key will be replaced with the key + * provided for flow steering. + * @keep_entries: if set to one command will not delete the entries that already + * exist into FS table. Use this option with caution: if the table + * entries are not compatible with the distribution key the packets + * will not be classified properly. + */ +struct dpni_fs_tbl_cfg { + enum dpni_fs_miss_action miss_action; + uint16_t default_flow_id; + char keep_hash_key; + uint8_t keep_entries; +}; + +/** + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration + * @dist_size: Set the distribution size; + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, + * 112,128,192,224,256,384,448,512,768,896,1024 + * @dist_mode: Distribution mode + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * the extractions to be used for the distribution key by calling + * dpkg_prepare_key_cfg() relevant only when + * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' + * @fs_cfg: Flow Steering table configuration; only relevant if + * 'dist_mode = DPNI_DIST_MODE_FS' + */ +struct dpni_rx_tc_dist_cfg { + uint16_t dist_size; + enum dpni_dist_mode dist_mode; + uint64_t key_cfg_iova; + struct dpni_fs_tbl_cfg fs_cfg; +}; + +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + const struct dpni_rx_tc_dist_cfg *cfg); + +/** + * enum dpni_congestion_unit - DPNI congestion units + * @DPNI_CONGESTION_UNIT_BYTES: bytes units + * @DPNI_CONGESTION_UNIT_FRAMES: frames units + */ +enum dpni_congestion_unit { + DPNI_CONGESTION_UNIT_BYTES = 0, + DPNI_CONGESTION_UNIT_FRAMES +}; + +/** + * enum dpni_dest - DPNI destination types + * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and + * does not generate FQDAN notifications; user is expected to + * dequeue from the queue based on polling or other user-defined + * method + * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN + * notifications to the specified DPIO; user is expected to dequeue + * from the queue only after notification is received + * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate + * FQDAN notifications, but is connected to the specified DPCON + * object; user is expected to dequeue from the DPCON channel + */ +enum dpni_dest { + DPNI_DEST_NONE = 0, + DPNI_DEST_DPIO = 1, + DPNI_DEST_DPCON = 2 +}; + +/** + * struct dpni_dest_cfg - Structure representing DPNI destination parameters + * @dest_type: Destination type + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type + * @priority: Priority selection within the DPIO or DPCON channel; valid values + * are 0-1 or 0-7, depending on the number of priorities in that + * channel; not relevant for 'DPNI_DEST_NONE' option + */ +struct dpni_dest_cfg { + enum dpni_dest dest_type; + int dest_id; + uint8_t priority; +}; + +/* DPNI congestion options */ + +/** + * CSCN message is written to message_iova once entering a + * congestion state (see 'threshold_entry') + */ +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 +/** + * CSCN message is written to message_iova once exiting a + * congestion state (see 'threshold_exit') + */ +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 +/** + * CSCN write will attempt to allocate into a cache (coherent write); + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected + */ +#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to + * DPIO/DPCON's WQ channel once entering a congestion state + * (see 'threshold_entry') + */ +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to + * DPIO/DPCON's WQ channel once exiting a congestion state + * (see 'threshold_exit') + */ +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) + */ +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 +/** + * This congestion will trigger flow control or priority flow control. This + * will have effect only if flow control is enabled with dpni_set_link_cfg() + */ +#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040 + +/** + * enum dpni_congestion_point - Structure representing congestion point + * @DPNI_CP_QUEUE: Set congestion per queue, identified by QUEUE_TYPE, TC + * and QUEUE_INDEX + * @DPNI_CP_GROUP: Set congestion per queue group. Depending on options + * used to define the DPNI this can be either per + * TC (default) or per interface + * (DPNI_OPT_SHARED_CONGESTION set at DPNI create). + * QUEUE_INDEX is ignored if this type is used. + * @DPNI_CP_CONGESTION_GROUP: Set per congestion group id. This will work + * only if the DPNI is created with DPNI_OPT_CUSTOM_CG option + */ + +enum dpni_congestion_point { + DPNI_CP_QUEUE, + DPNI_CP_GROUP, + DPNI_CP_CONGESTION_GROUP, +}; + +/** + * struct dpni_congestion_notification_cfg - congestion notification + * configuration + * @units: units type + * @threshold_entry: above this threshold we enter a congestion state. + * set it to '0' to disable it + * @threshold_exit: below this threshold we exit the congestion state. + * @message_ctx: The context that will be part of the CSCN message + * @message_iova: I/O virtual address (must be in DMA-able memory), + * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is + * contained in 'options' + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values + * @cg_point: Congestion point settings + * @cgid: id of the congestion group. The index is relative to dpni. + */ + +struct dpni_congestion_notification_cfg { + enum dpni_congestion_unit units; + uint32_t threshold_entry; + uint32_t threshold_exit; + uint64_t message_ctx; + uint64_t message_iova; + struct dpni_dest_cfg dest_cfg; + uint16_t notification_mode; + enum dpni_congestion_point cg_point; + int cgid; +}; + +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + const struct dpni_congestion_notification_cfg *cfg); + +int dpni_get_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + struct dpni_congestion_notification_cfg *cfg); + +/* DPNI FLC stash options */ + +/** + * stashes the whole annotation area (up to 192 bytes) + */ +#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001 + +/** + * struct dpni_queue - Queue structure + * @destination - Destination structure + * @destination.id: ID of the destination, only relevant if DEST_TYPE is > 0. + * Identifies either a DPIO or a DPCON object. + * Not relevant for Tx queues. + * @destination.type: May be one of the following: + * 0 - No destination, queue can be manually + * queried, but will not push traffic or + * notifications to a DPIO; + * 1 - The destination is a DPIO. When traffic + * becomes available in the queue a FQDAN + * (FQ data available notification) will be + * generated to selected DPIO; + * 2 - The destination is a DPCON. The queue is + * associated with a DPCON object for the + * purpose of scheduling between multiple + * queues. The DPCON may be independently + * configured to generate notifications. + * Not relevant for Tx queues. + * @destination.hold_active: Hold active, maintains a queue scheduled for longer + * in a DPIO during dequeue to reduce spread of traffic. + * Only relevant if queues are + * not affined to a single DPIO. + * @user_context: User data, presented to the user along with any frames + * from this queue. Not relevant for Tx queues. + * @flc: FD FLow Context structure + * @flc.value: Default FLC value for traffic dequeued from + * this queue. Please check description of FD + * structure for more information. + * Note that FLC values set using dpni_add_fs_entry, + * if any, take precedence over values per queue. + * @flc.stash_control: Boolean, indicates whether the 6 lowest + * - significant bits are used for stash control. + * significant bits are used for stash control. If set, the 6 + * least significant bits in value are interpreted as follows: + * - bits 0-1: indicates the number of 64 byte units of context + * that are stashed. FLC value is interpreted as a memory address + * in this case, excluding the 6 LS bits. + * - bits 2-3: indicates the number of 64 byte units of frame + * annotation to be stashed. Annotation is placed at FD[ADDR]. + * - bits 4-5: indicates the number of 64 byte units of frame + * data to be stashed. Frame data is placed at FD[ADDR] + + * FD[OFFSET]. + * For more details check the Frame Descriptor section in the + * hardware documentation. + *@cgid :indicate the cgid to set relative to dpni + */ +struct dpni_queue { + struct { + uint16_t id; + enum dpni_dest type; + char hold_active; + uint8_t priority; + } destination; + uint64_t user_context; + struct { + uint64_t value; + char stash_control; + } flc; + int cgid; +}; + +/** + * struct dpni_queue_id - Queue identification, used for enqueue commands + * or queue control + * @fqid: FQID used for enqueueing to and/or configuration of this + * specific FQ + * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. + * Only relevant for Tx queues. + */ +struct dpni_queue_id { + uint32_t fqid; + uint16_t qdbin; +}; + +/** + * enum dpni_confirmation_mode - Defines DPNI options supported for Tx + * confirmation + * @DPNI_CONF_AFFINE: For each Tx queue set associated with a sender there is + * an affine Tx Confirmation queue + * @DPNI_CONF_SINGLE: All Tx queues are associated with a single Tx + * confirmation queue + * @DPNI_CONF_DISABLE: Tx frames are not confirmed. This must be associated + * with proper FD set-up to have buffers release to a Buffer Pool, otherwise + * buffers will be leaked + */ +enum dpni_confirmation_mode { + DPNI_CONF_AFFINE, + DPNI_CONF_SINGLE, + DPNI_CONF_DISABLE, +}; + +int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_confirmation_mode mode); + +int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_confirmation_mode *mode); + +/** + * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * key extractions to be used as the QoS criteria by calling + * dpkg_prepare_key_cfg() + * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); + * '0' to use the 'default_tc' in such cases + * @keep_entries: if set to one will not delele existing table entries. This + * option will work properly only for dpni objects created with + * DPNI_OPT_HAS_KEY_MASKING option. All previous QoS entries must + * be compatible with new key composition rule. + * It is the caller's job to delete incompatible entries before + * executing this function. + * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 + */ +struct dpni_qos_tbl_cfg { + uint64_t key_cfg_iova; + int discard_on_miss; + int keep_entries; + uint8_t default_tc; +}; + +int dpni_set_qos_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_qos_tbl_cfg *cfg); + +/** + * struct dpni_rule_cfg - Rule configuration for table lookup + * @key_iova: I/O virtual address of the key (must be in DMA-able memory) + * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) + * @key_size: key and mask size (in bytes) + */ +struct dpni_rule_cfg { + uint64_t key_iova; + uint64_t mask_iova; + uint8_t key_size; +}; + +int dpni_add_qos_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_rule_cfg *cfg, + uint8_t tc_id, + uint16_t index); + +int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_rule_cfg *cfg); + +int dpni_clear_qos_table(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * Discard matching traffic. If set, this takes precedence over any other + * configuration and matching traffic is always discarded. + */ + #define DPNI_FS_OPT_DISCARD 0x1 + +/** + * Set FLC value. If set, flc member of truct dpni_fs_action_cfg is used to + * override the FLC value set per queue. + * For more details check the Frame Descriptor section in the hardware + * documentation. + */ +#define DPNI_FS_OPT_SET_FLC 0x2 + +/* + * Indicates whether the 6 lowest significant bits of FLC are used for stash + * control. If set, the 6 least significant bits in value are interpreted as + * follows: + * - bits 0-1: indicates the number of 64 byte units of context that are + * stashed. FLC value is interpreted as a memory address in this case, + * excluding the 6 LS bits. + * - bits 2-3: indicates the number of 64 byte units of frame annotation + * to be stashed. Annotation is placed at FD[ADDR]. + * - bits 4-5: indicates the number of 64 byte units of frame data to be + * stashed. Frame data is placed at FD[ADDR] + FD[OFFSET]. + * This flag is ignored if DPNI_FS_OPT_SET_FLC is not specified. + */ +#define DPNI_FS_OPT_SET_STASH_CONTROL 0x4 + +/** + * struct dpni_fs_action_cfg - Action configuration for table look-up + * @flc: FLC value for traffic matching this rule. Please check the Frame + * Descriptor section in the hardware documentation for more information. + * @flow_id: Identifies the Rx queue used for matching traffic. Supported + * values are in range 0 to num_queue-1. + * @options: Any combination of DPNI_FS_OPT_ values. + */ +struct dpni_fs_action_cfg { + uint64_t flc; + uint16_t flow_id; + uint16_t options; +}; + +int dpni_add_fs_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + uint16_t index, + const struct dpni_rule_cfg *cfg, + const struct dpni_fs_action_cfg *action); + +int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + const struct dpni_rule_cfg *cfg); + +int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id); + +int dpni_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver); + +/** + * Set User Context + */ +#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 + +/** + * Set queue destination configuration + */ +#define DPNI_QUEUE_OPT_DEST 0x00000002 + +/** + * Set FD[FLC] configuration for traffic on this queue. Note that FLC values + * set with dpni_add_fs_entry, if any, take precedence over values per queue. + */ +#define DPNI_QUEUE_OPT_FLC 0x00000004 + +/** + * Set the queue to hold active mode. This prevents the queue from being + * rescheduled between DPIOs while it carries traffic and is active on one + * DPNI. Can help reduce reordering when servicing one queue on multiple + * CPUs, but the queue is also less likely to push data to multiple CPUs + * especially when congested. + */ +#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008 + +#define DPNI_QUEUE_OPT_SET_CGID 0x00000040 +#define DPNI_QUEUE_OPT_CLEAR_CGID 0x00000080 + +int dpni_set_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + uint8_t options, + const struct dpni_queue *queue); + +int dpni_get_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_queue *queue, + struct dpni_queue_id *qid); + +int dpni_get_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t page, + uint16_t param, + union dpni_statistics *stat); + +int dpni_reset_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dpni_taildrop - Structure representing the taildrop + * @enable: Indicates whether the taildrop is active or not. + * @units: Indicates the unit of THRESHOLD. Queue taildrop only + * supports byte units, this field is ignored and + * assumed = 0 if CONGESTION_POINT is 0. + * @threshold: Threshold value, in units identified by UNITS field. Value 0 + * cannot be used as a valid taildrop threshold, + * THRESHOLD must be > 0 if the taildrop is + * enabled. + * @oal : Overhead Accounting Length, a 12-bit, 2's complement value + * with range (-2048 to +2047) representing a fixed per-frame + * overhead to be added to the actual length of a frame when + * performing WRED and tail drop calculations and threshold + * comparisons. + */ +struct dpni_taildrop { + char enable; + enum dpni_congestion_unit units; + uint32_t threshold; + int16_t oal; +}; + +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + uint8_t tc, + uint8_t q_index, + struct dpni_taildrop *taildrop); + +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + uint8_t tc, + uint8_t q_index, + struct dpni_taildrop *taildrop); + +int dpni_set_opr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc, + uint8_t index, + uint8_t options, + struct opr_cfg *cfg); + +int dpni_get_opr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc, + uint8_t index, + struct opr_cfg *cfg, + struct opr_qry *qry); + +/** + * When used for queue_idx in function dpni_set_rx_dist_default_queue will + * signal to dpni to drop all unclassified frames + */ +#define DPNI_FS_MISS_DROP ((uint16_t)-1) + +/** + * struct dpni_rx_dist_cfg - distribution configuration + * @dist_size: distribution size; supported values: 1,2,3,4,6,7,8, + * 12,14,16,24,28,32,48,56,64,96,112,128,192,224,256,384,448, + * 512,768,896,1024 + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * the extractions to be used for the distribution key by calling + * dpkg_prepare_key_cfg() relevant only when enable!=0 otherwise + * it can be '0' + * @enable: enable/disable the distribution. + * @tc: TC id for which distribution is set + * @fs_miss_flow_id: when packet misses all rules from flow steering table and + * hash is disabled it will be put into this queue id; use + * DPNI_FS_MISS_DROP to drop frames. The value of this field is + * used only when flow steering distribution is enabled and hash + * distribution is disabled + */ +struct dpni_rx_dist_cfg { + uint16_t dist_size; + uint64_t key_cfg_iova; + uint8_t enable; + uint8_t tc; + uint16_t fs_miss_flow_id; +}; + +int dpni_set_rx_fs_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, const struct dpni_rx_dist_cfg *cfg); + +int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, const struct dpni_rx_dist_cfg *cfg); + +int dpni_add_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, uint16_t tpid); + +int dpni_remove_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, uint16_t tpid); + +/** + * struct dpni_custom_tpid_cfg - custom TPID configuration. Contains custom TPID + * values used in current dpni object to detect 802.1q frames. + * @tpid1: first tag. Not used if zero. + * @tpid2: second tag. Not used if zero. + */ +struct dpni_custom_tpid_cfg { + uint16_t tpid1; + uint16_t tpid2; +}; + +int dpni_get_custom_tpid(struct fsl_mc_io *mc_io, uint32_t cmd_flags, + uint16_t token, struct dpni_custom_tpid_cfg *tpid); + +#endif /* __FSL_DPNI_H */ diff --git a/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h new file mode 100644 index 000000000..5effbb300 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h @@ -0,0 +1,802 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2017 NXP + * + */ +#ifndef _FSL_DPNI_CMD_H +#define _FSL_DPNI_CMD_H + +/* DPNI Version */ +#define DPNI_VER_MAJOR 7 +#define DPNI_VER_MINOR 9 + +#define DPNI_CMD_BASE_VERSION 1 +#define DPNI_CMD_VERSION_2 2 +#define DPNI_CMD_VERSION_3 3 +#define DPNI_CMD_ID_OFFSET 4 + +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) +#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_2) +#define DPNI_CMD_V3(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_3) + +/* Command IDs */ +#define DPNI_CMDID_OPEN DPNI_CMD(0x801) +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800) +#define DPNI_CMDID_CREATE DPNI_CMD_V3(0x901) +#define DPNI_CMDID_DESTROY DPNI_CMD(0x981) +#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01) + +#define DPNI_CMDID_ENABLE DPNI_CMD(0x002) +#define DPNI_CMDID_DISABLE DPNI_CMD(0x003) +#define DPNI_CMDID_GET_ATTR DPNI_CMD_V3(0x004) +#define DPNI_CMDID_RESET DPNI_CMD(0x005) +#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006) + +#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012) +#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013) +#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014) +#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015) +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) + +#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200) +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) + +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) +#define DPNI_CMDID_GET_SP_INFO DPNI_CMD(0x211) +#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212) +#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD_V2(0x215) +#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216) +#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217) +#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD_V2(0x21A) +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B) + +#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220) +#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221) +#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222) +#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223) +#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224) +#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225) +#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226) +#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227) +#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228) + +#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230) +#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD(0x231) +#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232) +#define DPNI_CMDID_CLR_VLAN_FILTERS DPNI_CMD(0x233) + +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD_V3(0x235) + +#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD_V2(0x240) +#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241) +#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242) +#define DPNI_CMDID_CLR_QOS_TBL DPNI_CMD(0x243) +#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244) +#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245) +#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246) + +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V3(0x25D) +#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E) +#define DPNI_CMDID_GET_QUEUE DPNI_CMD_V2(0x25F) +#define DPNI_CMDID_SET_QUEUE DPNI_CMD_V2(0x260) +#define DPNI_CMDID_GET_TAILDROP DPNI_CMD_V2(0x261) +#define DPNI_CMDID_SET_TAILDROP DPNI_CMD_V2(0x262) + +#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263) + +#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD_V2(0x264) +#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD_V2(0x265) + +#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD_V2(0x267) +#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD_V2(0x268) +#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD_V2(0x269) +#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V2(0x26A) +#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B) +#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C) +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266) +#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D) +#define DPNI_CMDID_SET_OPR DPNI_CMD(0x26e) +#define DPNI_CMDID_GET_OPR DPNI_CMD(0x26f) +#define DPNI_CMDID_SET_RX_FS_DIST DPNI_CMD(0x273) +#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274) +#define DPNI_CMDID_ADD_CUSTOM_TPID DPNI_CMD(0x275) +#define DPNI_CMDID_REMOVE_CUSTOM_TPID DPNI_CMD(0x276) +#define DPNI_CMDID_GET_CUSTOM_TPID DPNI_CMD(0x277) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPNI_MASK(field) \ + GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \ + DPNI_##field##_SHIFT) +#define dpni_set_field(var, field, val) \ + ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field))) +#define dpni_get_field(var, field) \ + (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT) + +#pragma pack(push, 1) +struct dpni_cmd_open { + uint32_t dpni_id; +}; + +struct dpni_cmd_create { + uint32_t options; + uint8_t num_queues; + uint8_t num_tcs; + uint8_t mac_filter_entries; + uint8_t pad1; + uint8_t vlan_filter_entries; + uint8_t pad2; + uint8_t qos_entries; + uint8_t pad3; + uint16_t fs_entries; + uint8_t num_rx_tcs; + uint8_t pad4; + uint8_t num_cgs; +}; + +struct dpni_cmd_destroy { + uint32_t dpsw_id; +}; + +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) + +struct dpni_cmd_pool { + uint16_t dpbp_id; + uint8_t priority_mask; + uint8_t pad; +}; + +struct dpni_cmd_set_pools { + uint8_t num_dpbp; + uint8_t backup_pool_mask; + uint16_t pad; + struct dpni_cmd_pool pool[8]; + uint16_t buffer_size[8]; +}; + +/* The enable indication is always the least significant bit */ +#define DPNI_ENABLE_SHIFT 0 +#define DPNI_ENABLE_SIZE 1 + +struct dpni_rsp_is_enabled { + uint8_t enabled; +}; + +struct dpni_cmd_set_irq_enable { + uint8_t enable; + uint8_t pad[3]; + uint8_t irq_index; +}; + +struct dpni_cmd_get_irq_enable { + uint32_t pad; + uint8_t irq_index; +}; + +struct dpni_rsp_get_irq_enable { + uint8_t enabled; +}; + +struct dpni_cmd_set_irq_mask { + uint32_t mask; + uint8_t irq_index; +}; + +struct dpni_cmd_get_irq_mask { + uint32_t pad; + uint8_t irq_index; +}; + +struct dpni_rsp_get_irq_mask { + uint32_t mask; +}; + +struct dpni_cmd_get_irq_status { + uint32_t status; + uint8_t irq_index; +}; + +struct dpni_rsp_get_irq_status { + uint32_t status; +}; + +struct dpni_cmd_clear_irq_status { + uint32_t status; + uint8_t irq_index; +}; + +struct dpni_rsp_get_attr { + /* response word 0 */ + uint32_t options; + uint8_t num_queues; + uint8_t num_rx_tcs; + uint8_t mac_filter_entries; + uint8_t num_tx_tcs; + /* response word 1 */ + uint8_t vlan_filter_entries; + uint8_t pad1; + uint8_t qos_entries; + uint8_t pad2; + uint16_t fs_entries; + uint16_t pad3; + /* response word 2 */ + uint8_t qos_key_size; + uint8_t fs_key_size; + uint16_t wriop_version; + uint8_t num_cgs; +}; + +#define DPNI_ERROR_ACTION_SHIFT 0 +#define DPNI_ERROR_ACTION_SIZE 4 +#define DPNI_FRAME_ANN_SHIFT 4 +#define DPNI_FRAME_ANN_SIZE 1 + +struct dpni_cmd_set_errors_behavior { + uint32_t errors; + /* from least significant bit: error_action:4, set_frame_annotation:1 */ + uint8_t flags; +}; + +/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation + * buffer layouts, but they all share the same parameters. + * If one of the functions changes, below structure needs to be split. + */ + +#define DPNI_PASS_TS_SHIFT 0 +#define DPNI_PASS_TS_SIZE 1 +#define DPNI_PASS_PR_SHIFT 1 +#define DPNI_PASS_PR_SIZE 1 +#define DPNI_PASS_FS_SHIFT 2 +#define DPNI_PASS_FS_SIZE 1 +#define DPNI_PASS_SWO_SHIFT 3 +#define DPNI_PASS_SWO_SIZE 1 + +struct dpni_cmd_get_buffer_layout { + uint8_t qtype; +}; + +struct dpni_rsp_get_buffer_layout { + /* response word 0 */ + uint8_t pad0[6]; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + uint8_t flags; + uint8_t pad1; + /* response word 1 */ + uint16_t private_data_size; + uint16_t data_align; + uint16_t head_room; + uint16_t tail_room; +}; + +struct dpni_cmd_set_buffer_layout { + /* cmd word 0 */ + uint8_t qtype; + uint8_t pad0[3]; + uint16_t options; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + uint8_t flags; + uint8_t pad1; + /* cmd word 1 */ + uint16_t private_data_size; + uint16_t data_align; + uint16_t head_room; + uint16_t tail_room; +}; + +struct dpni_cmd_set_offload { + uint8_t pad[3]; + uint8_t dpni_offload; + uint32_t config; +}; + +struct dpni_cmd_get_offload { + uint8_t pad[3]; + uint8_t dpni_offload; +}; + +struct dpni_rsp_get_offload { + uint32_t pad; + uint32_t config; +}; + +struct dpni_cmd_get_qdid { + uint8_t qtype; +}; + +struct dpni_rsp_get_qdid { + uint16_t qdid; +}; + +struct dpni_rsp_get_sp_info { + uint16_t spids[2]; +}; + +struct dpni_rsp_get_tx_data_offset { + uint16_t data_offset; +}; + +struct dpni_cmd_get_statistics { + uint8_t page_number; + uint16_t param; +}; + +struct dpni_rsp_get_statistics { + uint64_t counter[7]; +}; + +struct dpni_cmd_set_link_cfg { + uint64_t pad0; + uint32_t rate; + uint32_t pad1; + uint64_t options; + uint64_t advertising; +}; + +#define DPNI_LINK_STATE_SHIFT 0 +#define DPNI_LINK_STATE_SIZE 1 +#define DPNI_STATE_VALID_SHIFT 1 +#define DPNI_STATE_VALID_SIZE 1 + +struct dpni_rsp_get_link_state { + uint32_t pad0; + /* from LSB: up:1 */ + uint8_t flags; + uint8_t pad1[3]; + uint32_t rate; + uint32_t pad2; + uint64_t options; + uint64_t supported; + uint64_t advertising; +}; + +struct dpni_cmd_set_max_frame_length { + uint16_t max_frame_length; +}; + +struct dpni_rsp_get_max_frame_length { + uint16_t max_frame_length; +}; + +struct dpni_cmd_set_multicast_promisc { + uint8_t enable; +}; + +struct dpni_rsp_get_multicast_promisc { + uint8_t enabled; +}; + +struct dpni_cmd_set_unicast_promisc { + uint8_t enable; +}; + +struct dpni_rsp_get_unicast_promisc { + uint8_t enabled; +}; + +struct dpni_cmd_set_primary_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +struct dpni_rsp_get_primary_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +struct dpni_rsp_get_port_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +struct dpni_cmd_add_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +struct dpni_cmd_remove_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +#define DPNI_UNICAST_FILTERS_SHIFT 0 +#define DPNI_UNICAST_FILTERS_SIZE 1 +#define DPNI_MULTICAST_FILTERS_SHIFT 1 +#define DPNI_MULTICAST_FILTERS_SIZE 1 + +struct dpni_cmd_clear_mac_filters { + /* from LSB: unicast:1, multicast:1 */ + uint8_t flags; +}; + +struct dpni_cmd_enable_vlan_filter { + /* only the LSB */ + uint8_t en; +}; + +struct dpni_cmd_vlan_id { + uint32_t pad; + uint16_t vlan_id; +}; + +#define DPNI_SEPARATE_GRP_SHIFT 0 +#define DPNI_SEPARATE_GRP_SIZE 1 +#define DPNI_MODE_1_SHIFT 0 +#define DPNI_MODE_1_SIZE 4 +#define DPNI_MODE_2_SHIFT 4 +#define DPNI_MODE_2_SIZE 4 + +struct dpni_cmd_set_tx_priorities { + uint16_t flags; + uint8_t prio_group_A; + uint8_t prio_group_B; + uint32_t pad0; + uint8_t modes[4]; + uint32_t pad1; + uint64_t pad2; + uint16_t delta_bandwidth[8]; +}; + +#define DPNI_DIST_MODE_SHIFT 0 +#define DPNI_DIST_MODE_SIZE 4 +#define DPNI_MISS_ACTION_SHIFT 4 +#define DPNI_MISS_ACTION_SIZE 4 +#define DPNI_KEEP_HASH_KEY_SHIFT 7 +#define DPNI_KEEP_HASH_KEY_SIZE 1 +#define DPNI_KEEP_ENTRIES_SHIFT 6 +#define DPNI_KEEP_ENTRIES_SIZE 1 + +struct dpni_cmd_set_rx_tc_dist { + uint16_t dist_size; + uint8_t tc_id; + /* from LSB: dist_mode:4, miss_action:4 */ + uint8_t flags; + uint8_t pad0; + /* only the LSB */ + uint8_t keep_hash_key; + uint16_t default_flow_id; + uint64_t pad1[5]; + uint64_t key_cfg_iova; +}; + +struct dpni_cmd_get_queue { + uint8_t qtype; + uint8_t tc; + uint8_t index; +}; + +#define DPNI_DEST_TYPE_SHIFT 0 +#define DPNI_DEST_TYPE_SIZE 4 +#define DPNI_CGID_VALID_SHIFT 5 +#define DPNI_CGID_VALID_SIZE 1 +#define DPNI_STASH_CTRL_SHIFT 6 +#define DPNI_STASH_CTRL_SIZE 1 +#define DPNI_HOLD_ACTIVE_SHIFT 7 +#define DPNI_HOLD_ACTIVE_SIZE 1 + +struct dpni_rsp_get_queue { + /* response word 0 */ + uint64_t pad0; + /* response word 1 */ + uint32_t dest_id; + uint16_t pad1; + uint8_t dest_prio; + /* From LSB: + * dest_type:4, pad:1, cgid_valid:1, flc_stash_ctrl:1, hold_active:1 + */ + uint8_t flags; + /* response word 2 */ + uint64_t flc; + /* response word 3 */ + uint64_t user_context; + /* response word 4 */ + uint32_t fqid; + uint16_t qdbin; + uint16_t pad2; + /* response word 5*/ + uint8_t cgid; +}; + +struct dpni_cmd_set_queue { + /* cmd word 0 */ + uint8_t qtype; + uint8_t tc; + uint8_t index; + uint8_t options; + uint32_t pad0; + /* cmd word 1 */ + uint32_t dest_id; + uint16_t pad1; + uint8_t dest_prio; + uint8_t flags; + /* cmd word 2 */ + uint64_t flc; + /* cmd word 3 */ + uint64_t user_context; + /* cmd word 4 */ + uint8_t cgid; +}; + +#define DPNI_DISCARD_ON_MISS_SHIFT 0 +#define DPNI_DISCARD_ON_MISS_SIZE 1 +#define DPNI_KEEP_QOS_ENTRIES_SHIFT 1 +#define DPNI_KEEP_QOS_ENTRIES_SIZE 1 + +struct dpni_cmd_set_qos_table { + uint32_t pad; + uint8_t default_tc; + /* only the LSB */ + uint8_t discard_on_miss; + uint16_t pad1[21]; + uint64_t key_cfg_iova; +}; + +struct dpni_cmd_add_qos_entry { + uint16_t pad; + uint8_t tc_id; + uint8_t key_size; + uint16_t index; + uint16_t pad2; + uint64_t key_iova; + uint64_t mask_iova; +}; + +struct dpni_cmd_remove_qos_entry { + uint8_t pad1[3]; + uint8_t key_size; + uint32_t pad2; + uint64_t key_iova; + uint64_t mask_iova; +}; + +struct dpni_cmd_add_fs_entry { + uint16_t options; + uint8_t tc_id; + uint8_t key_size; + uint16_t index; + uint16_t flow_id; + uint64_t key_iova; + uint64_t mask_iova; + uint64_t flc; +}; + +struct dpni_cmd_remove_fs_entry { + uint16_t pad1; + uint8_t tc_id; + uint8_t key_size; + uint32_t pad2; + uint64_t key_iova; + uint64_t mask_iova; +}; + +struct dpni_cmd_clear_fs_entries { + uint16_t pad; + uint8_t tc_id; +}; + +#define DPNI_DROP_ENABLE_SHIFT 0 +#define DPNI_DROP_ENABLE_SIZE 1 +#define DPNI_DROP_UNITS_SHIFT 2 +#define DPNI_DROP_UNITS_SIZE 2 + +struct dpni_early_drop { + /* from LSB: enable:1 units:2 */ + uint8_t flags; + uint8_t pad0[3]; + uint32_t pad1; + uint8_t green_drop_probability; + uint8_t pad2[7]; + uint64_t green_max_threshold; + uint64_t green_min_threshold; + uint64_t pad3; + uint8_t yellow_drop_probability; + uint8_t pad4[7]; + uint64_t yellow_max_threshold; + uint64_t yellow_min_threshold; + uint64_t pad5; + uint8_t red_drop_probability; + uint8_t pad6[7]; + uint64_t red_max_threshold; + uint64_t red_min_threshold; +}; + +struct dpni_cmd_early_drop { + uint8_t qtype; + uint8_t tc; + uint8_t pad[6]; + uint64_t early_drop_iova; +}; + +struct dpni_rsp_get_api_version { + uint16_t major; + uint16_t minor; +}; + +struct dpni_cmd_get_taildrop { + uint8_t congestion_point; + uint8_t qtype; + uint8_t tc; + uint8_t index; +}; + +struct dpni_rsp_get_taildrop { + /* cmd word 0 */ + uint64_t pad0; + /* cmd word 1 */ + /* from LSB: enable:1 oal_lo:7 */ + uint8_t enable_oal_lo; + /* from LSB: oal_hi:5 */ + uint8_t oal_hi; + uint8_t units; + uint8_t pad2; + uint32_t threshold; +}; + +#define DPNI_OAL_LO_SHIFT 1 +#define DPNI_OAL_LO_SIZE 7 +#define DPNI_OAL_HI_SHIFT 0 +#define DPNI_OAL_HI_SIZE 5 + +struct dpni_cmd_set_taildrop { + /* cmd word 0 */ + uint8_t congestion_point; + uint8_t qtype; + uint8_t tc; + uint8_t index; + uint32_t pad0; + /* cmd word 1 */ + /* from LSB: enable:1 oal_lo:7 */ + uint8_t enable_oal_lo; + /* from LSB: oal_hi:5 */ + uint8_t oal_hi; + uint8_t units; + uint8_t pad2; + uint32_t threshold; +}; + +struct dpni_tx_confirmation_mode { + uint32_t pad; + uint8_t confirmation_mode; +}; + +#define DPNI_DEST_TYPE_SHIFT 0 +#define DPNI_DEST_TYPE_SIZE 4 +#define DPNI_CONG_UNITS_SHIFT 4 +#define DPNI_CONG_UNITS_SIZE 2 + +struct dpni_cmd_set_congestion_notification { + uint8_t qtype; + uint8_t tc; + uint8_t pad; + uint8_t congestion_point; + uint8_t cgid; + uint8_t pad2[3]; + uint32_t dest_id; + uint16_t notification_mode; + uint8_t dest_priority; + /* from LSB: dest_type: 4 units:2 */ + uint8_t type_units; + uint64_t message_iova; + uint64_t message_ctx; + uint32_t threshold_entry; + uint32_t threshold_exit; +}; + +struct dpni_cmd_get_congestion_notification { + uint8_t qtype; + uint8_t tc; + uint8_t pad; + uint8_t congestion_point; + uint8_t cgid; +}; + +struct dpni_rsp_get_congestion_notification { + uint64_t pad; + uint32_t dest_id; + uint16_t notification_mode; + uint8_t dest_priority; + /* from LSB: dest_type: 4 units:2 */ + uint8_t type_units; + uint64_t message_iova; + uint64_t message_ctx; + uint32_t threshold_entry; + uint32_t threshold_exit; +}; + +struct dpni_cmd_set_opr { + uint8_t pad0; + uint8_t tc_id; + uint8_t index; + uint8_t options; + uint8_t pad1[7]; + uint8_t oloe; + uint8_t oeane; + uint8_t olws; + uint8_t oa; + uint8_t oprrws; +}; + +struct dpni_cmd_get_opr { + uint8_t pad; + uint8_t tc_id; + uint8_t index; +}; + +#define DPNI_RIP_SHIFT 0 +#define DPNI_RIP_SIZE 1 +#define DPNI_OPR_ENABLE_SHIFT 1 +#define DPNI_OPR_ENABLE_SIZE 1 +#define DPNI_TSEQ_NLIS_SHIFT 0 +#define DPNI_TSEQ_NLIS_SIZE 1 +#define DPNI_HSEQ_NLIS_SHIFT 0 +#define DPNI_HSEQ_NLIS_SIZE 1 + +struct dpni_rsp_get_opr { + uint64_t pad0; + /* from LSB: rip:1 enable:1 */ + uint8_t flags; + uint16_t pad1; + uint8_t oloe; + uint8_t oeane; + uint8_t olws; + uint8_t oa; + uint8_t oprrws; + uint16_t nesn; + uint16_t pad8; + uint16_t ndsn; + uint16_t pad2; + uint16_t ea_tseq; + /* only the LSB */ + uint8_t tseq_nlis; + uint8_t pad3; + uint16_t ea_hseq; + /* only the LSB */ + uint8_t hseq_nlis; + uint8_t pad4; + uint16_t ea_hptr; + uint16_t pad5; + uint16_t ea_tptr; + uint16_t pad6; + uint16_t opr_vid; + uint16_t pad7; + uint16_t opr_id; +}; + +struct dpni_cmd_add_custom_tpid { + uint16_t pad; + uint16_t tpid; +}; + +struct dpni_cmd_remove_custom_tpid { + uint16_t pad; + uint16_t tpid; +}; + +struct dpni_rsp_get_custom_tpid { + uint16_t tpid1; + uint16_t tpid2; +}; + +#define DPNI_RX_FS_DIST_ENABLE_SHIFT 0 +#define DPNI_RX_FS_DIST_ENABLE_SIZE 1 +struct dpni_cmd_set_rx_fs_dist { + uint16_t dist_size; + uint8_t enable; + uint8_t tc; + uint16_t miss_flow_id; + uint16_t pad1; + uint64_t key_cfg_iova; +}; + +#define DPNI_RX_HASH_DIST_ENABLE_SHIFT 0 +#define DPNI_RX_HASH_DIST_ENABLE_SIZE 1 +struct dpni_cmd_set_rx_hash_dist { + uint16_t dist_size; + uint8_t enable; + uint8_t tc_id; + uint32_t pad; + uint64_t key_cfg_iova; +}; + +#pragma pack(pop) +#endif /* _FSL_DPNI_CMD_H */ diff --git a/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_net.h b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_net.h new file mode 100644 index 000000000..0dc0131bb --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/mc/fsl_net.h @@ -0,0 +1,454 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2015 Freescale Semiconductor Inc. + * + */ +#ifndef __FSL_NET_H +#define __FSL_NET_H + +#define LAST_HDR_INDEX 0xFFFFFFFF + +/*****************************************************************************/ +/* Protocol fields */ +/*****************************************************************************/ + +/************************* Ethernet fields *********************************/ +#define NH_FLD_ETH_DA (1) +#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) +#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) +#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) +#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) +#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) +#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) + +#define NH_FLD_ETH_ADDR_SIZE 6 + +/*************************** VLAN fields ***********************************/ +#define NH_FLD_VLAN_VPRI (1) +#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) +#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) +#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) +#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) +#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) + +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ + NH_FLD_VLAN_CFI | \ + NH_FLD_VLAN_VID) + +/************************ IP (generic) fields ******************************/ +#define NH_FLD_IP_VER (1) +#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) +#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) +#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) +#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) +#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) +#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) +#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) +#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) + +#define NH_FLD_IP_PROTO_SIZE 1 + +/***************************** IPV4 fields *********************************/ +#define NH_FLD_IPV4_VER (1) +#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) +#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) +#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) +#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) +#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) +#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) +#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) +#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) +#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) +#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) +#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) +#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) +#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) +#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) +#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) + +#define NH_FLD_IPV4_ADDR_SIZE 4 +#define NH_FLD_IPV4_PROTO_SIZE 1 + +/***************************** IPV6 fields *********************************/ +#define NH_FLD_IPV6_VER (1) +#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) +#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) +#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) +#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) +#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) +#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) +#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) +#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) + +#define NH_FLD_IPV6_ADDR_SIZE 16 +#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 + +/***************************** ICMP fields *********************************/ +#define NH_FLD_ICMP_TYPE (1) +#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) +#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) +#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) +#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) +#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) + +#define NH_FLD_ICMP_CODE_SIZE 1 +#define NH_FLD_ICMP_TYPE_SIZE 1 + +/***************************** IGMP fields *********************************/ +#define NH_FLD_IGMP_VERSION (1) +#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) +#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) +#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) +#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) + +/***************************** TCP fields **********************************/ +#define NH_FLD_TCP_PORT_SRC (1) +#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) +#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) +#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) +#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) +#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) +#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) +#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) +#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) +#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) +#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) +#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) + +#define NH_FLD_TCP_PORT_SIZE 2 + +/***************************** UDP fields **********************************/ +#define NH_FLD_UDP_PORT_SRC (1) +#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) +#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) +#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) +#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) + +#define NH_FLD_UDP_PORT_SIZE 2 + +/*************************** UDP-lite fields *******************************/ +#define NH_FLD_UDP_LITE_PORT_SRC (1) +#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) +#define NH_FLD_UDP_LITE_ALL_FIELDS \ + ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) + +#define NH_FLD_UDP_LITE_PORT_SIZE 2 + +/*************************** UDP-encap-ESP fields **************************/ +#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) +#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) +#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) +#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) +#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ + ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) + +#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 +#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 + +/***************************** SCTP fields *********************************/ +#define NH_FLD_SCTP_PORT_SRC (1) +#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) +#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) +#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) +#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) + +#define NH_FLD_SCTP_PORT_SIZE 2 + +/***************************** DCCP fields *********************************/ +#define NH_FLD_DCCP_PORT_SRC (1) +#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) +#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) + +#define NH_FLD_DCCP_PORT_SIZE 2 + +/***************************** IPHC fields *********************************/ +#define NH_FLD_IPHC_CID (1) +#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) +#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) +#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) +#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) +#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) + +/***************************** SCTP fields *********************************/ +#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) +#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) +#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ + ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) + +/*************************** L2TPV2 fields *********************************/ +#define NH_FLD_L2TPV2_TYPE_BIT (1) +#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) +#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) +#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) +#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) +#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) +#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) +#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) +#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) +#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) +#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) +#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) +#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) +#define NH_FLD_L2TPV2_ALL_FIELDS \ + ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) + +/*************************** L2TPV3 fields *********************************/ +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) +#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) +#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) +#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) +#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) +#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ + ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) + +#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) +#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) +#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) +#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ + ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) + +/**************************** PPP fields ***********************************/ +#define NH_FLD_PPP_PID (1) +#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) +#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) + +/************************** PPPoE fields ***********************************/ +#define NH_FLD_PPPOE_VER (1) +#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) +#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) +#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) +#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) +#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) +#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) +#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) + +/************************* PPP-Mux fields **********************************/ +#define NH_FLD_PPPMUX_PID (1) +#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) +#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) +#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) + +/*********************** PPP-Mux sub-frame fields **************************/ +#define NH_FLD_PPPMUX_SUBFRM_PFF (1) +#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) +#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) +#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) +#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ + ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) + +/*************************** LLC fields ************************************/ +#define NH_FLD_LLC_DSAP (1) +#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) +#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) +#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) + +/*************************** NLPID fields **********************************/ +#define NH_FLD_NLPID_NLPID (1) +#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) + +/*************************** SNAP fields ***********************************/ +#define NH_FLD_SNAP_OUI (1) +#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) +#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) + +/*************************** LLC SNAP fields *******************************/ +#define NH_FLD_LLC_SNAP_TYPE (1) +#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) + +#define NH_FLD_ARP_HTYPE (1) +#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) +#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) +#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) +#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) +#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) +#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) +#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) +#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) +#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) + +/*************************** RFC2684 fields ********************************/ +#define NH_FLD_RFC2684_LLC (1) +#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) +#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) +#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) +#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) +#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) +#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) + +/*************************** User defined fields ***************************/ +#define NH_FLD_USER_DEFINED_SRCPORT (1) +#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) +#define NH_FLD_USER_DEFINED_ALL_FIELDS \ + ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) + +/*************************** Payload fields ********************************/ +#define NH_FLD_PAYLOAD_BUFFER (1) +#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) +#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) +#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) +#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) +#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) +#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) + +/*************************** GRE fields ************************************/ +#define NH_FLD_GRE_TYPE (1) +#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) + +/*************************** MINENCAP fields *******************************/ +#define NH_FLD_MINENCAP_SRC_IP (1) +#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) +#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) +#define NH_FLD_MINENCAP_ALL_FIELDS \ + ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) + +/*************************** IPSEC AH fields *******************************/ +#define NH_FLD_IPSEC_AH_SPI (1) +#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) +#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) + +/*************************** IPSEC ESP fields ******************************/ +#define NH_FLD_IPSEC_ESP_SPI (1) +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) +#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) + +#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 + +/*************************** MPLS fields ***********************************/ +#define NH_FLD_MPLS_LABEL_STACK (1) +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ + ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) + +/*************************** MACSEC fields *********************************/ +#define NH_FLD_MACSEC_SECTAG (1) +#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) + +/*************************** GTP fields ************************************/ +#define NH_FLD_GTP_TEID (1) + +/* Protocol options */ + +/* Ethernet options */ +#define NH_OPT_ETH_BROADCAST 1 +#define NH_OPT_ETH_MULTICAST 2 +#define NH_OPT_ETH_UNICAST 3 +#define NH_OPT_ETH_BPDU 4 + +#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) +/* also applicable for broadcast */ + +/* VLAN options */ +#define NH_OPT_VLAN_CFI 1 + +/* IPV4 options */ +#define NH_OPT_IPV4_UNICAST 1 +#define NH_OPT_IPV4_MULTICAST 2 +#define NH_OPT_IPV4_BROADCAST 3 +#define NH_OPT_IPV4_OPTION 4 +#define NH_OPT_IPV4_FRAG 5 +#define NH_OPT_IPV4_INITIAL_FRAG 6 + +/* IPV6 options */ +#define NH_OPT_IPV6_UNICAST 1 +#define NH_OPT_IPV6_MULTICAST 2 +#define NH_OPT_IPV6_OPTION 3 +#define NH_OPT_IPV6_FRAG 4 +#define NH_OPT_IPV6_INITIAL_FRAG 5 + +/* General IP options (may be used for any version) */ +#define NH_OPT_IP_FRAG 1 +#define NH_OPT_IP_INITIAL_FRAG 2 +#define NH_OPT_IP_OPTION 3 + +/* Minenc. options */ +#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 + +/* GRE. options */ +#define NH_OPT_GRE_ROUTING_PRESENT 1 + +/* TCP options */ +#define NH_OPT_TCP_OPTIONS 1 +#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 +#define NH_OPT_TCP_CONTROL_LOW_BITS 3 + +/* CAPWAP options */ +#define NH_OPT_CAPWAP_DTLS 1 + +enum net_prot { + NET_PROT_NONE = 0, + NET_PROT_PAYLOAD, + NET_PROT_ETH, + NET_PROT_VLAN, + NET_PROT_IPV4, + NET_PROT_IPV6, + NET_PROT_IP, + NET_PROT_TCP, + NET_PROT_UDP, + NET_PROT_UDP_LITE, + NET_PROT_IPHC, + NET_PROT_SCTP, + NET_PROT_SCTP_CHUNK_DATA, + NET_PROT_PPPOE, + NET_PROT_PPP, + NET_PROT_PPPMUX, + NET_PROT_PPPMUX_SUBFRM, + NET_PROT_L2TPV2, + NET_PROT_L2TPV3_CTRL, + NET_PROT_L2TPV3_SESS, + NET_PROT_LLC, + NET_PROT_LLC_SNAP, + NET_PROT_NLPID, + NET_PROT_SNAP, + NET_PROT_MPLS, + NET_PROT_IPSEC_AH, + NET_PROT_IPSEC_ESP, + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ + NET_PROT_MACSEC, + NET_PROT_GRE, + NET_PROT_MINENCAP, + NET_PROT_DCCP, + NET_PROT_ICMP, + NET_PROT_IGMP, + NET_PROT_ARP, + NET_PROT_CAPWAP_DATA, + NET_PROT_CAPWAP_CTRL, + NET_PROT_RFC2684, + NET_PROT_ICMPV6, + NET_PROT_FCOE, + NET_PROT_FIP, + NET_PROT_ISCSI, + NET_PROT_GTP, + NET_PROT_USER_DEFINED_L2, + NET_PROT_USER_DEFINED_L3, + NET_PROT_USER_DEFINED_L4, + NET_PROT_USER_DEFINED_L5, + NET_PROT_USER_DEFINED_SHIM1, + NET_PROT_USER_DEFINED_SHIM2, + + NET_PROT_DUMMY_LAST +}; + +/*! IEEE8021.Q */ +#define NH_IEEE8021Q_ETYPE 0x8100 +#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ + ((((uint32_t)(etype & 0xFFFF)) << 16) | \ + (((uint32_t)(pcp & 0x07)) << 13) | \ + (((uint32_t)(dei & 0x01)) << 12) | \ + (((uint32_t)(vlan_id & 0xFFF)))) + +#endif /* __FSL_NET_H */ diff --git a/src/seastar/dpdk/drivers/net/dpaa2/meson.build b/src/seastar/dpdk/drivers/net/dpaa2/meson.build new file mode 100644 index 000000000..a0ea99238 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/meson.build @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +version = 2 + +if not is_linux + build = false +endif + +deps += ['mempool_dpaa2'] +sources = files('base/dpaa2_hw_dpni.c', + 'dpaa2_mux.c', + 'dpaa2_ethdev.c', + 'dpaa2_flow.c', + 'dpaa2_rxtx.c', + 'mc/dpkg.c', + 'mc/dpdmux.c', + 'mc/dpni.c') + +includes += include_directories('base', 'mc') + +# depends on fslmc bus which uses experimental API +allow_experimental_apis = true + +install_headers('rte_pmd_dpaa2.h') diff --git a/src/seastar/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2.h b/src/seastar/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2.h new file mode 100644 index 000000000..7052d9da9 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2018 NXP + */ + +#ifndef _RTE_PMD_DPAA2_H +#define _RTE_PMD_DPAA2_H + +/** + * @file rte_pmd_dpaa2.h + * + * NXP dpaa2 PMD specific functions. + * + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + */ + +#include <rte_flow.h> + +enum pmd_dpaa2_ts { + PMD_DPAA2_DISABLE_TS, + PMD_DPAA2_ENABLE_TS +}; + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Enable/Disable timestamping update in mbuf for LX2160 kind of devices. + * For LS2088/LS1088 devices, timestamping will be updated in mbuf without + * calling this API. + * + * @param pmd_dpaa2_ts + * Enum to enable/disable timestamp update in mbuf for LX2160 devices. + */ +__rte_experimental +void rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Create a flow rule to demultiplex ethernet traffic to separate network + * interfaces. + * + * @param dpdmux_id + * ID of the DPDMUX MC object. + * @param[in] pattern + * Pattern specification. + * @param[in] actions + * Associated actions. + * + * @return + * A valid handle in case of success, NULL otherwise. + */ +__rte_experimental +struct rte_flow * +rte_pmd_dpaa2_mux_flow_create(uint32_t dpdmux_id, + struct rte_flow_item *pattern[], + struct rte_flow_action *actions[]); + +/** + * @warning + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice + * + * Create a custom hash key on basis of offset of start of packet and size. + * for e.g. if we need GRE packets (non-vlan and without any extra headers) + * to be hashed on basis of inner IP header, we will provide offset as: + * 14 (eth) + 20 (IP) + 4 (GRE) + 12 (Inner Src offset) = 50 and size + * as 8 bytes. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param offset + * Offset from the start of packet which needs to be included to + * calculate hash + * @param size + * Size of the hash input key + * + * @return + * - 0 if successful. + * - Negative in case of failure. + */ +__rte_experimental +int +rte_pmd_dpaa2_set_custom_hash(uint16_t port_id, + uint16_t offset, + uint8_t size); + +#endif /* _RTE_PMD_DPAA2_H */ diff --git a/src/seastar/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map b/src/seastar/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map new file mode 100644 index 000000000..d1b4cdb23 --- /dev/null +++ b/src/seastar/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map @@ -0,0 +1,20 @@ +DPDK_17.05 { + + local: *; +}; + +DPDK_17.11 { + global: + + dpaa2_eth_eventq_attach; + dpaa2_eth_eventq_detach; + +} DPDK_17.05; + +EXPERIMENTAL { + global: + + rte_pmd_dpaa2_mux_flow_create; + rte_pmd_dpaa2_set_custom_hash; + rte_pmd_dpaa2_set_timestamp; +} DPDK_17.11; |