diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 18:24:20 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 18:24:20 +0000 |
commit | 483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch) | |
tree | e5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/spdk/dpdk/drivers/net/dpaa2 | |
parent | Initial commit. (diff) | |
download | ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.tar.xz ceph-483eb2f56657e8e7f419ab1a4fab8dce9ade8609.zip |
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/spdk/dpdk/drivers/net/dpaa2')
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/Makefile | 44 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c | 333 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h | 251 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c | 2061 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h | 134 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h | 41 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c | 842 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c | 74 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c | 1943 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h | 203 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h | 1135 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h | 605 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h | 454 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/meson.build | 18 | ||||
-rw-r--r-- | src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map | 12 |
15 files changed, 8150 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/net/dpaa2/Makefile b/src/spdk/dpdk/drivers/net/dpaa2/Makefile new file mode 100644 index 00000000..9b0b1433 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/Makefile @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. +# Copyright 2016 NXP +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_dpaa2.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2 +CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc +CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal +CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2 +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal + +# versioning export map +EXPORT_MAP := rte_pmd_dpaa2_version.map + +# library version +LIBABIVER := 1 + +# depends on fslmc bus which uses experimental API +CFLAGS += -DALLOW_EXPERIMENTAL_API + +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpni.c +SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpkg.c + +LDLIBS += -lrte_bus_fslmc +LDLIBS += -lrte_mempool_dpaa2 +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c new file mode 100644 index 00000000..713a41bf --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni.c @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016 NXP + * + */ + +#include <time.h> +#include <net/if.h> + +#include <rte_mbuf.h> +#include <rte_ethdev_driver.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_string_fns.h> +#include <rte_cycles.h> +#include <rte_kvargs.h> +#include <rte_dev.h> + +#include <dpaa2_pmd_logs.h> +#include <dpaa2_hw_pvt.h> +#include <dpaa2_hw_mempool.h> + +#include "../dpaa2_ethdev.h" + +static int +dpaa2_distset_to_dpkg_profile_cfg( + uint64_t req_dist_set, + struct dpkg_profile_cfg *kg_cfg); + +int +dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, + uint64_t req_dist_set) +{ + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_rx_tc_dist_cfg tc_cfg; + struct dpkg_profile_cfg kg_cfg; + void *p_params; + int ret, tc_index = 0; + + p_params = rte_malloc( + NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + if (!p_params) { + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); + return -ENOMEM; + } + memset(p_params, 0, DIST_PARAM_IOVA_SIZE); + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + + ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg); + if (ret) { + DPAA2_PMD_ERR("Given RSS Hash (%" PRIx64 ") not supported", + req_dist_set); + rte_free(p_params); + return ret; + } + tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.dist_size = eth_dev->data->nb_rx_queues; + tc_cfg.dist_mode = DPNI_DIST_MODE_HASH; + + ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); + if (ret) { + DPAA2_PMD_ERR("Unable to prepare extract parameters"); + rte_free(p_params); + return ret; + } + + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, + &tc_cfg); + rte_free(p_params); + if (ret) { + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", + ret); + return ret; + } + + return 0; +} + +int dpaa2_remove_flow_dist( + struct rte_eth_dev *eth_dev, + uint8_t tc_index) +{ + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_rx_tc_dist_cfg tc_cfg; + struct dpkg_profile_cfg kg_cfg; + void *p_params; + int ret; + + p_params = rte_malloc( + NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE); + if (!p_params) { + DPAA2_PMD_ERR("Unable to allocate flow-dist parameters"); + return -ENOMEM; + } + memset(p_params, 0, DIST_PARAM_IOVA_SIZE); + memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg)); + kg_cfg.num_extracts = 0; + tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params)); + tc_cfg.dist_size = 0; + tc_cfg.dist_mode = DPNI_DIST_MODE_NONE; + + ret = dpkg_prepare_key_cfg(&kg_cfg, p_params); + if (ret) { + DPAA2_PMD_ERR("Unable to prepare extract parameters"); + rte_free(p_params); + return ret; + } + + ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index, + &tc_cfg); + rte_free(p_params); + if (ret) + DPAA2_PMD_ERR( + "Setting distribution for Rx failed with err: %d", + ret); + return ret; +} + +static int +dpaa2_distset_to_dpkg_profile_cfg( + uint64_t req_dist_set, + struct dpkg_profile_cfg *kg_cfg) +{ + uint32_t loop = 0, i = 0, dist_field = 0; + int l2_configured = 0, l3_configured = 0; + int l4_configured = 0, sctp_configured = 0; + + memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg)); + while (req_dist_set) { + if (req_dist_set % 2 != 0) { + dist_field = 1U << loop; + switch (dist_field) { + case ETH_RSS_L2_PAYLOAD: + + if (l2_configured) + break; + l2_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_ETH; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_ETH_TYPE; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + break; + + case ETH_RSS_IPV4: + case ETH_RSS_FRAG_IPV4: + case ETH_RSS_NONFRAG_IPV4_OTHER: + case ETH_RSS_IPV6: + case ETH_RSS_FRAG_IPV6: + case ETH_RSS_NONFRAG_IPV6_OTHER: + case ETH_RSS_IPV6_EX: + + if (l3_configured) + break; + l3_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_IP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_IP_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_IP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_IP_DST; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_IP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_IP_PROTO; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + kg_cfg->num_extracts++; + i++; + break; + + case ETH_RSS_NONFRAG_IPV4_TCP: + case ETH_RSS_NONFRAG_IPV6_TCP: + case ETH_RSS_NONFRAG_IPV4_UDP: + case ETH_RSS_NONFRAG_IPV6_UDP: + case ETH_RSS_IPV6_TCP_EX: + case ETH_RSS_IPV6_UDP_EX: + + if (l4_configured) + break; + l4_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_TCP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_TCP_PORT_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_TCP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_TCP_PORT_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + break; + + case ETH_RSS_NONFRAG_IPV4_SCTP: + case ETH_RSS_NONFRAG_IPV6_SCTP: + + if (sctp_configured) + break; + sctp_configured = 1; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_SCTP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_SCTP_PORT_SRC; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + + kg_cfg->extracts[i].extract.from_hdr.prot = + NET_PROT_SCTP; + kg_cfg->extracts[i].extract.from_hdr.field = + NH_FLD_SCTP_PORT_DST; + kg_cfg->extracts[i].type = + DPKG_EXTRACT_FROM_HDR; + kg_cfg->extracts[i].extract.from_hdr.type = + DPKG_FULL_FIELD; + i++; + break; + + default: + DPAA2_PMD_WARN( + "Unsupported flow dist option %x", + dist_field); + return -EINVAL; + } + } + req_dist_set = req_dist_set >> 1; + loop++; + } + kg_cfg->num_extracts = i; + return 0; +} + +int +dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, + void *blist) +{ + /* Function to attach a DPNI with a buffer pool list. Buffer pool list + * handle is passed in blist. + */ + int32_t retcode; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_pools_cfg bpool_cfg; + struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist; + struct dpni_buffer_layout layout; + int tot_size; + + /* ... rx buffer layout . + * Check alignment for buffer layouts first + */ + + /* ... rx buffer layout ... */ + tot_size = RTE_PKTMBUF_HEADROOM; + tot_size = RTE_ALIGN_CEIL(tot_size, DPAA2_PACKET_LAYOUT_ALIGN); + + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | + DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | + DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; + + layout.pass_frame_status = 1; + layout.private_data_size = DPAA2_FD_PTA_SIZE; + layout.pass_parser_result = 1; + layout.data_align = DPAA2_PACKET_LAYOUT_ALIGN; + layout.data_head_room = tot_size - DPAA2_FD_PTA_SIZE - + DPAA2_MBUF_HW_ANNOTATION; + retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX, &layout); + if (retcode) { + DPAA2_PMD_ERR("Error configuring buffer pool Rx layout (%d)", + retcode); + return retcode; + } + + /*Attach buffer pool to the network interface as described by the user*/ + bpool_cfg.num_dpbp = 1; + bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id; + bpool_cfg.pools[0].backup_pool = 0; + bpool_cfg.pools[0].buffer_size = RTE_ALIGN_CEIL(bp_list->buf_pool.size, + DPAA2_PACKET_LAYOUT_ALIGN); + bpool_cfg.pools[0].priority_mask = 0; + + retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg); + if (retcode != 0) { + DPAA2_PMD_ERR("Error configuring buffer pool on interface." + " bpid = %d error code = %d", + bpool_cfg.pools[0].dpbp_id, retcode); + return retcode; + } + + priv->bp_list = bp_list; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h new file mode 100644 index 00000000..779cdf2b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h @@ -0,0 +1,251 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016 NXP + * + */ + +/** + * @file + * + * DPNI packet parse results - implementation internal + */ + +#ifndef _DPAA2_HW_DPNI_ANNOT_H_ +#define _DPAA2_HW_DPNI_ANNOT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Annotation valid bits in FD FRC */ +#define DPAA2_FD_FRC_FASV 0x8000 +#define DPAA2_FD_FRC_FAEADV 0x4000 +#define DPAA2_FD_FRC_FAPRV 0x2000 +#define DPAA2_FD_FRC_FAIADV 0x1000 +#define DPAA2_FD_FRC_FASWOV 0x0800 +#define DPAA2_FD_FRC_FAICFDV 0x0400 + +/* Annotation bits in FD CTRL */ +#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ +#define DPAA2_FD_CTRL_PTA 0x00800000 +#define DPAA2_FD_CTRL_PTV1 0x00400000 + +/* Frame annotation status */ +struct dpaa2_fas { + uint8_t reserved; + uint8_t ppid; + __le16 ifpid; + __le32 status; +} __attribute__((__packed__)); + +/** + * HW Packet Annotation Register structures + */ +struct dpaa2_annot_hdr { + /**< word1: Frame Annotation Status (8 bytes)*/ + uint64_t word1; + + /**< word2: Time Stamp (8 bytes)*/ + uint64_t word2; + + /**< word3: Next Hdr + FAF Extension + FAF (2 + 2 + 4 bytes)*/ + uint64_t word3; + + /**< word4: Frame Annotation Flags-FAF (8 bytes) */ + uint64_t word4; + + /**< word5: + * ShimOffset_1 + ShimOffset_2 + IPPIDOffset + EthOffset + + * LLC+SNAPOffset + VLANTCIOffset_1 + VLANTCIOffset_n + + * LastETypeOffset (1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes) + */ + uint64_t word5; + + /**< word6: + * PPPoEOffset + MPLSOffset_1 + MPLSOffset_n + ARPorIPOffset_1 + * + IPOffset_norMInEncapO + GREOffset + L4Offset + + * GTPorESPorIPSecOffset(1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes) + */ + uint64_t word6; + + /**< word7: + * RoutingHdrOfset1 + RoutingHdrOfset2 + NxtHdrOffset + * + IPv6FragOffset + GrossRunningSum + * + RunningSum(1 + 1 + 1 + 1 + 2 + 2 bytes) + */ + uint64_t word7; + + /**< word8: + * ParseErrorcode + Soft Parsing Context (1 + 7 bytes) + */ + uint64_t word8; +}; + +/** + * Internal Macros to get/set Packet annotation header + */ + +/** General Macro to define a particular bit position*/ +#define BIT_POS(x) ((uint64_t)1 << ((x))) +/** Set a bit in the variable */ +#define BIT_SET_AT_POS(var, pos) ((var) |= (pos)) +/** Reset the bit in the variable */ +#define BIT_RESET_AT_POS(var, pos) ((var) &= ~(pos)) +/** Check the bit is set in the variable */ +#define BIT_ISSET_AT_POS(var, pos) (((var) & (pos)) ? 1 : 0) +/** + * Macrso to define bit position in word3 + */ +#define NEXT_HDR(var) ((uint64_t)(var) & 0xFFFF000000000000) +#define FAF_EXTN_IPV6_ROUTE_HDR_PRESENT(var) BIT_POS(16) +#define FAF_EXTN_RESERVED(var) ((uint64_t)(var) & 0x00007FFF00000000) +#define FAF_USER_DEFINED_RESERVED(var) ((uint64_t)(var) & 0x00000000FF000000) +#define SHIM_SHELL_SOFT_PARSING_ERRROR BIT_POS(23) +#define PARSING_ERROR BIT_POS(22) +#define L2_ETH_MAC_PRESENT BIT_POS(21) +#define L2_ETH_MAC_UNICAST BIT_POS(20) +#define L2_ETH_MAC_MULTICAST BIT_POS(19) +#define L2_ETH_MAC_BROADCAST BIT_POS(18) +#define L2_ETH_FRAME_IS_BPDU BIT_POS(17) +#define L2_ETH_FCOE_PRESENT BIT_POS(16) +#define L2_ETH_FIP_PRESENT BIT_POS(15) +#define L2_ETH_PARSING_ERROR BIT_POS(14) +#define L2_LLC_SNAP_PRESENT BIT_POS(13) +#define L2_UNKNOWN_LLC_OUI BIT_POS(12) +#define L2_LLC_SNAP_ERROR BIT_POS(11) +#define L2_VLAN_1_PRESENT BIT_POS(10) +#define L2_VLAN_N_PRESENT BIT_POS(9) +#define L2_VLAN_CFI_BIT_PRESENT BIT_POS(8) +#define L2_VLAN_PARSING_ERROR BIT_POS(7) +#define L2_PPPOE_PPP_PRESENT BIT_POS(6) +#define L2_PPPOE_PPP_PARSING_ERROR BIT_POS(5) +#define L2_MPLS_1_PRESENT BIT_POS(4) +#define L2_MPLS_N_PRESENT BIT_POS(3) +#define L2_MPLS_PARSING_ERROR BIT_POS(2) +#define L2_ARP_PRESENT BIT_POS(1) +#define L2_ARP_PARSING_ERROR BIT_POS(0) +/** + * Macrso to define bit position in word4 + */ +#define L2_UNKNOWN_PROTOCOL BIT_POS(63) +#define L2_SOFT_PARSING_ERROR BIT_POS(62) +#define L3_IPV4_1_PRESENT BIT_POS(61) +#define L3_IPV4_1_UNICAST BIT_POS(60) +#define L3_IPV4_1_MULTICAST BIT_POS(59) +#define L3_IPV4_1_BROADCAST BIT_POS(58) +#define L3_IPV4_N_PRESENT BIT_POS(57) +#define L3_IPV4_N_UNICAST BIT_POS(56) +#define L3_IPV4_N_MULTICAST BIT_POS(55) +#define L3_IPV4_N_BROADCAST BIT_POS(54) +#define L3_IPV6_1_PRESENT BIT_POS(53) +#define L3_IPV6_1_UNICAST BIT_POS(52) +#define L3_IPV6_1_MULTICAST BIT_POS(51) +#define L3_IPV6_N_PRESENT BIT_POS(50) +#define L3_IPV6_N_UNICAST BIT_POS(49) +#define L3_IPV6_N_MULTICAST BIT_POS(48) +#define L3_IP_1_OPT_PRESENT BIT_POS(47) +#define L3_IP_1_UNKNOWN_PROTOCOL BIT_POS(46) +#define L3_IP_1_MORE_FRAGMENT BIT_POS(45) +#define L3_IP_1_FIRST_FRAGMENT BIT_POS(44) +#define L3_IP_1_PARSING_ERROR BIT_POS(43) +#define L3_IP_N_OPT_PRESENT BIT_POS(42) +#define L3_IP_N_UNKNOWN_PROTOCOL BIT_POS(41) +#define L3_IP_N_MORE_FRAGMENT BIT_POS(40) +#define L3_IP_N_FIRST_FRAGMENT BIT_POS(39) +#define L3_PROTO_ICMP_PRESENT BIT_POS(38) +#define L3_PROTO_IGMP_PRESENT BIT_POS(37) +#define L3_PROTO_ICMPV6_PRESENT BIT_POS(36) +#define L3_PROTO_UDP_LIGHT_PRESENT BIT_POS(35) +#define L3_IP_N_PARSING_ERROR BIT_POS(34) +#define L3_MIN_ENCAP_PRESENT BIT_POS(33) +#define L3_MIN_ENCAP_SBIT_PRESENT BIT_POS(32) +#define L3_MIN_ENCAP_PARSING_ERROR BIT_POS(31) +#define L3_PROTO_GRE_PRESENT BIT_POS(30) +#define L3_PROTO_GRE_RBIT_PRESENT BIT_POS(29) +#define L3_PROTO_GRE_PARSING_ERROR BIT_POS(28) +#define L3_IP_UNKNOWN_PROTOCOL BIT_POS(27) +#define L3_SOFT_PARSING_ERROR BIT_POS(26) +#define L3_PROTO_UDP_PRESENT BIT_POS(25) +#define L3_PROTO_UDP_PARSING_ERROR BIT_POS(24) +#define L3_PROTO_TCP_PRESENT BIT_POS(23) +#define L3_PROTO_TCP_OPT_PRESENT BIT_POS(22) +#define L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT BIT_POS(21) +#define L3_PROTO_TCP_CTRL_BIT_3_TO_5_PRESENT BIT_POS(20) +#define L3_PROTO_TCP_PARSING_ERROR BIT_POS(19) +#define L3_PROTO_IPSEC_PRESENT BIT_POS(18) +#define L3_PROTO_IPSEC_ESP_PRESENT BIT_POS(17) +#define L3_PROTO_IPSEC_AH_PRESENT BIT_POS(16) +#define L3_PROTO_IPSEC_PARSING_ERROR BIT_POS(15) +#define L3_PROTO_SCTP_PRESENT BIT_POS(14) +#define L3_PROTO_SCTP_PARSING_ERROR BIT_POS(13) +#define L3_PROTO_DCCP_PRESENT BIT_POS(12) +#define L3_PROTO_DCCP_PARSING_ERROR BIT_POS(11) +#define L4_UNKNOWN_PROTOCOL BIT_POS(10) +#define L4_SOFT_PARSING_ERROR BIT_POS(9) +#define L3_PROTO_GTP_PRESENT BIT_POS(8) +#define L3_PROTO_GTP_PARSING_ERROR BIT_POS(7) +#define L3_PROTO_ESP_PRESENT BIT_POS(6) +#define L3_PROTO_ESP_PARSING_ERROR BIT_POS(5) +#define L3_PROTO_ISCSI_PRESENT BIT_POS(4) +#define L3_PROTO_CAPWAN__CTRL_PRESENT BIT_POS(3) +#define L3_PROTO_CAPWAN__DATA_PRESENT BIT_POS(2) +#define L5_SOFT_PARSING_ERROR BIT_POS(1) +#define L3_IPV6_ROUTE_HDR_PRESENT BIT_POS(0) + +#define DPAA2_L3_IPv4 (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \ + L3_IP_1_UNKNOWN_PROTOCOL | L3_IP_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv6 (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \ + L3_IP_1_UNKNOWN_PROTOCOL | L3_IP_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv4_TCP (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \ + L3_PROTO_TCP_PRESENT | L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT | \ + L4_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv4_UDP (L3_IPV4_1_PRESENT | L3_IPV4_1_UNICAST | \ + L3_PROTO_UDP_PRESENT | L4_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv6_TCP (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \ + L3_PROTO_TCP_PRESENT | L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT | \ + L4_UNKNOWN_PROTOCOL) + +#define DPAA2_L3_IPv6_UDP (L3_IPV6_1_PRESENT | L3_IPV6_1_UNICAST | \ + L3_PROTO_UDP_PRESENT | L4_UNKNOWN_PROTOCOL) + +/* Debug frame, otherwise supposed to be discarded */ +#define DPAA2_ETH_FAS_DISC 0x80000000 +/* MACSEC frame */ +#define DPAA2_ETH_FAS_MS 0x40000000 +#define DPAA2_ETH_FAS_PTP 0x08000000 +/* Ethernet multicast frame */ +#define DPAA2_ETH_FAS_MC 0x04000000 +/* Ethernet broadcast frame */ +#define DPAA2_ETH_FAS_BC 0x02000000 +#define DPAA2_ETH_FAS_KSE 0x00040000 +#define DPAA2_ETH_FAS_EOFHE 0x00020000 +#define DPAA2_ETH_FAS_MNLE 0x00010000 +#define DPAA2_ETH_FAS_TIDE 0x00008000 +#define DPAA2_ETH_FAS_PIEE 0x00004000 +/* Frame length error */ +#define DPAA2_ETH_FAS_FLE 0x00002000 +/* Frame physical error; our favourite pastime */ +#define DPAA2_ETH_FAS_FPE 0x00001000 +#define DPAA2_ETH_FAS_PTE 0x00000080 +#define DPAA2_ETH_FAS_ISP 0x00000040 +#define DPAA2_ETH_FAS_PHE 0x00000020 +#define DPAA2_ETH_FAS_BLE 0x00000010 +/* L3 csum validation performed */ +#define DPAA2_ETH_FAS_L3CV 0x00000008 +/* L3 csum error */ +#define DPAA2_ETH_FAS_L3CE 0x00000004 +/* L4 csum validation performed */ +#define DPAA2_ETH_FAS_L4CV 0x00000002 +/* L4 csum error */ +#define DPAA2_ETH_FAS_L4CE 0x00000001 + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c new file mode 100644 index 00000000..c5047367 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.c @@ -0,0 +1,2061 @@ +/* * SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016 NXP + * + */ + +#include <time.h> +#include <net/if.h> + +#include <rte_mbuf.h> +#include <rte_ethdev_driver.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_string_fns.h> +#include <rte_cycles.h> +#include <rte_kvargs.h> +#include <rte_dev.h> +#include <rte_fslmc.h> + +#include "dpaa2_pmd_logs.h" +#include <fslmc_vfio.h> +#include <dpaa2_hw_pvt.h> +#include <dpaa2_hw_mempool.h> +#include <dpaa2_hw_dpio.h> +#include <mc/fsl_dpmng.h> +#include "dpaa2_ethdev.h" +#include <fsl_qbman_debug.h> + +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_JUMBO_FRAME; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_CRC_STRIP | + DEV_RX_OFFLOAD_SCATTER; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_MULTI_SEGS | + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + +struct rte_dpaa2_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + uint8_t page_id; /* dpni statistics page id */ + uint8_t stats_id; /* stats id in the given page */ +}; + +static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { + {"ingress_multicast_frames", 0, 2}, + {"ingress_multicast_bytes", 0, 3}, + {"ingress_broadcast_frames", 0, 4}, + {"ingress_broadcast_bytes", 0, 5}, + {"egress_multicast_frames", 1, 2}, + {"egress_multicast_bytes", 1, 3}, + {"egress_broadcast_frames", 1, 4}, + {"egress_broadcast_bytes", 1, 5}, + {"ingress_filtered_frames", 2, 0}, + {"ingress_discarded_frames", 2, 1}, + {"ingress_nobuffer_discards", 2, 2}, + {"egress_discarded_frames", 2, 3}, + {"egress_confirmed_frames", 2, 4}, +}; + +static struct rte_dpaa2_driver rte_dpaa2_pmd; +static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); +static int dpaa2_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); +static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); +static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + +int dpaa2_logtype_pmd; + +static int +dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -1; + } + + if (on) + ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, + priv->token, vlan_id); + else + ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, + priv->token, vlan_id); + + if (ret < 0) + DPAA2_PMD_ERR("ret = %d Unable to add/rem vlan %d hwid =%d", + ret, vlan_id, priv->hw_id); + + return ret; +} + +static int +dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (mask & ETH_VLAN_FILTER_MASK) { + /* VLAN Filter not avaialble */ + if (!priv->max_vlan_filters) { + DPAA2_PMD_INFO("VLAN filter not available"); + goto next_mask; + } + + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) + ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, + priv->token, true); + else + ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, + priv->token, false); + if (ret < 0) + DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); + } +next_mask: + if (mask & ETH_VLAN_EXTEND_MASK) { + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) + DPAA2_PMD_INFO("VLAN extend offload not supported"); + } + + return 0; +} + +static int +dpaa2_fw_version_get(struct rte_eth_dev *dev, + char *fw_version, + size_t fw_size) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct mc_soc_version mc_plat_info = {0}; + struct mc_version mc_ver_info = {0}; + + PMD_INIT_FUNC_TRACE(); + + if (mc_get_soc_version(dpni, CMD_PRI_LOW, &mc_plat_info)) + DPAA2_PMD_WARN("\tmc_get_soc_version failed"); + + if (mc_get_version(dpni, CMD_PRI_LOW, &mc_ver_info)) + DPAA2_PMD_WARN("\tmc_get_version failed"); + + ret = snprintf(fw_version, fw_size, + "%x-%d.%d.%d", + mc_plat_info.svr, + mc_ver_info.major, + mc_ver_info.minor, + mc_ver_info.revision); + + ret += 1; /* add the size of '\0' */ + if (fw_size < (uint32_t)ret) + return ret; + else + return 0; +} + +static void +dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + dev_info->if_index = priv->hw_id; + + dev_info->max_mac_addrs = priv->max_mac_filters; + dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; + dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; + dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; + dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; + dev_info->speed_capa = ETH_LINK_SPEED_1G | + ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_10G; + + dev_info->max_hash_mac_addrs = 0; + dev_info->max_vfs = 0; + dev_info->max_vmdq_pools = ETH_16_POOLS; + dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; +} + +static int +dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + uint16_t dist_idx; + uint32_t vq_id; + struct dpaa2_queue *mc_q, *mcq; + uint32_t tot_queues; + int i; + struct dpaa2_queue *dpaa2_q; + + PMD_INIT_FUNC_TRACE(); + + tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; + mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, + RTE_CACHE_LINE_SIZE); + if (!mc_q) { + DPAA2_PMD_ERR("Memory allocation failed for rx/tx queues"); + return -1; + } + + for (i = 0; i < priv->nb_rx_queues; i++) { + mc_q->dev = dev; + priv->rx_vq[i] = mc_q++; + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + dpaa2_q->q_storage = rte_malloc("dq_storage", + sizeof(struct queue_storage_info_t), + RTE_CACHE_LINE_SIZE); + if (!dpaa2_q->q_storage) + goto fail; + + memset(dpaa2_q->q_storage, 0, + sizeof(struct queue_storage_info_t)); + if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) + goto fail; + } + + for (i = 0; i < priv->nb_tx_queues; i++) { + mc_q->dev = dev; + mc_q->flow_id = 0xffff; + priv->tx_vq[i] = mc_q++; + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + dpaa2_q->cscn = rte_malloc(NULL, + sizeof(struct qbman_result), 16); + if (!dpaa2_q->cscn) + goto fail_tx; + } + + vq_id = 0; + for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { + mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; + mcq->tc_index = DPAA2_DEF_TC; + mcq->flow_id = dist_idx; + vq_id++; + } + + return 0; +fail_tx: + i -= 1; + while (i >= 0) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + rte_free(dpaa2_q->cscn); + priv->tx_vq[i--] = NULL; + } + i = priv->nb_rx_queues; +fail: + i -= 1; + mc_q = priv->rx_vq[0]; + while (i >= 0) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + dpaa2_free_dq_storage(dpaa2_q->q_storage); + rte_free(dpaa2_q->q_storage); + priv->rx_vq[i--] = NULL; + } + rte_free(mc_q); + return -1; +} + +static int +dpaa2_eth_dev_configure(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = priv->hw; + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; + int rx_l3_csum_offload = false; + int rx_l4_csum_offload = false; + int tx_l3_csum_offload = false; + int tx_l4_csum_offload = false; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Rx offloads validation */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA2_PMD_WARN( + "Rx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads validation */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA2_PMD_WARN( + "Tx offloads non configurable - requested 0x%" PRIx64 + " ignored 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { + ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, + priv->token, eth_conf->rxmode.max_rx_pkt_len); + if (ret) { + DPAA2_PMD_ERR( + "Unable to set mtu. check config"); + return ret; + } + } else { + return -1; + } + } + + if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { + ret = dpaa2_setup_flow_dist(dev, + eth_conf->rx_adv_conf.rss_conf.rss_hf); + if (ret) { + DPAA2_PMD_ERR("Unable to set flow distribution." + "Check queue config"); + return ret; + } + } + + if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rx_l3_csum_offload = true; + + if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || + (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM)) + rx_l4_csum_offload = true; + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); + return ret; + } + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); + return ret; + } + + if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + tx_l3_csum_offload = true; + + if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) + tx_l4_csum_offload = true; + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); + return ret; + } + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); + if (ret) { + DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); + return ret; + } + + /* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in + * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC] + * to 0 for LS2 in the hardware thus disabling data/annotation + * stashing. For LX2 this is fixed in hardware and thus hash result and + * parse results can be received in FD using this option. + */ + if (dpaa2_svr_family == SVR_LX2160A) { + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_FLCTYPE_HASH, true); + if (ret) { + DPAA2_PMD_ERR("Error setting FLCTYPE: Err = %d", ret); + return ret; + } + } + + dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); + + /* update the current status */ + dpaa2_dev_link_update(dev, 0); + + return 0; +} + +/* Function to setup RX flow information. It contains traffic class ID, + * flow ID, destination configuration etc. + */ +static int +dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_rxconf *rx_conf __rte_unused, + struct rte_mempool *mb_pool) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct dpaa2_queue *dpaa2_q; + struct dpni_queue cfg; + uint8_t options = 0; + uint8_t flow_id; + uint32_t bpid; + int ret; + + PMD_INIT_FUNC_TRACE(); + + DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", + dev, rx_queue_id, mb_pool, rx_conf); + + if (!priv->bp_list || priv->bp_list->mp != mb_pool) { + bpid = mempool_to_bpid(mb_pool); + ret = dpaa2_attach_bp_list(priv, + rte_dpaa2_bpid_info[bpid].bp_list); + if (ret) + return ret; + } + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; + dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ + + /*Get the flow id from given VQ id*/ + flow_id = rx_queue_id % priv->nb_rx_queues; + memset(&cfg, 0, sizeof(struct dpni_queue)); + + options = options | DPNI_QUEUE_OPT_USER_CTX; + cfg.user_context = (size_t)(dpaa2_q); + + /*if ls2088 or rev2 device, enable the stashing */ + + if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { + options |= DPNI_QUEUE_OPT_FLC; + cfg.flc.stash_control = true; + cfg.flc.value &= 0xFFFFFFFFFFFFFFC0; + /* 00 00 00 - last 6 bit represent annotation, context stashing, + * data stashing setting 01 01 00 (0x14) + * (in following order ->DS AS CS) + * to enable 1 line data, 1 line annotation. + * For LX2, this setting should be 01 00 00 (0x10) + */ + if ((dpaa2_svr_family & 0xffff0000) == SVR_LX2160A) + cfg.flc.value |= 0x10; + else + cfg.flc.value |= 0x14; + } + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX, + dpaa2_q->tc_index, flow_id, options, &cfg); + if (ret) { + DPAA2_PMD_ERR("Error in setting the rx flow: = %d", ret); + return -1; + } + + if (!(priv->flags & DPAA2_RX_TAILDROP_OFF)) { + struct dpni_taildrop taildrop; + + taildrop.enable = 1; + /*enabling per rx queue congestion control */ + taildrop.threshold = CONG_THRESHOLD_RX_Q; + taildrop.units = DPNI_CONGESTION_UNIT_BYTES; + taildrop.oal = CONG_RX_OAL; + DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", + rx_queue_id); + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_QUEUE, DPNI_QUEUE_RX, + dpaa2_q->tc_index, flow_id, &taildrop); + if (ret) { + DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", + ret); + return -1; + } + } + + dev->data->rx_queues[rx_queue_id] = dpaa2_q; + return 0; +} + +static int +dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc __rte_unused, + unsigned int socket_id __rte_unused, + const struct rte_eth_txconf *tx_conf __rte_unused) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) + priv->tx_vq[tx_queue_id]; + struct fsl_mc_io *dpni = priv->hw; + struct dpni_queue tx_conf_cfg; + struct dpni_queue tx_flow_cfg; + uint8_t options = 0, flow_id; + uint32_t tc_id; + int ret; + + PMD_INIT_FUNC_TRACE(); + + /* Return if queue already configured */ + if (dpaa2_q->flow_id != 0xffff) { + dev->data->tx_queues[tx_queue_id] = dpaa2_q; + return 0; + } + + memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); + memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); + + tc_id = tx_queue_id; + flow_id = 0; + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, + tc_id, flow_id, options, &tx_flow_cfg); + if (ret) { + DPAA2_PMD_ERR("Error in setting the tx flow: " + "tc_id=%d, flow=%d err=%d", + tc_id, flow_id, ret); + return -1; + } + + dpaa2_q->flow_id = flow_id; + + if (tx_queue_id == 0) { + /*Set tx-conf and error configuration*/ + ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, + priv->token, + DPNI_CONF_DISABLE); + if (ret) { + DPAA2_PMD_ERR("Error in set tx conf mode settings: " + "err=%d", ret); + return -1; + } + } + dpaa2_q->tc_index = tc_id; + + if (!(priv->flags & DPAA2_TX_CGR_OFF)) { + struct dpni_congestion_notification_cfg cong_notif_cfg; + + cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; + cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; + /* Notify that the queue is not congested when the data in + * the queue is below this thershold. + */ + cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; + cong_notif_cfg.message_ctx = 0; + cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn; + cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; + cong_notif_cfg.notification_mode = + DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | + DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | + DPNI_CONG_OPT_COHERENT_WRITE; + + ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, + priv->token, + DPNI_QUEUE_TX, + tc_id, + &cong_notif_cfg); + if (ret) { + DPAA2_PMD_ERR( + "Error in setting tx congestion notification: " + "err=%d", ret); + return -ret; + } + } + dev->data->tx_queues[tx_queue_id] = dpaa2_q; + return 0; +} + +static void +dpaa2_dev_rx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static void +dpaa2_dev_tx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static uint32_t +dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + int32_t ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct dpaa2_queue *dpaa2_q; + struct qbman_swp *swp; + struct qbman_fq_query_np_rslt state; + uint32_t frame_cnt = 0; + + PMD_INIT_FUNC_TRACE(); + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR("Failure in affining portal"); + return -EINVAL; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; + + if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { + frame_cnt = qbman_fq_state_frame_count(&state); + DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", + rx_queue_id, frame_cnt); + } + return frame_cnt; +} + +static const uint32_t * +dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /*todo -= add more types */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) + return ptypes; + return NULL; +} + +/** + * Dpaa2 link Interrupt handler + * + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +dpaa2_interrupt_handler(void *param) +{ + struct rte_eth_dev *dev = param; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int ret; + int irq_index = DPNI_IRQ_INDEX; + unsigned int status = 0, clear = 0; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_get_irq_status(dpni, CMD_PRI_LOW, priv->token, + irq_index, &status); + if (unlikely(ret)) { + DPAA2_PMD_ERR("Can't get irq status (err %d)", ret); + clear = 0xffffffff; + goto out; + } + + if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { + clear = DPNI_IRQ_EVENT_LINK_CHANGED; + dpaa2_dev_link_update(dev, 0); + /* calling all the apps registered for link status event */ + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, + NULL); + } +out: + ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, + irq_index, clear); + if (unlikely(ret)) + DPAA2_PMD_ERR("Can't clear irq status (err %d)", ret); +} + +static int +dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) +{ + int err = 0; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int irq_index = DPNI_IRQ_INDEX; + unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; + + PMD_INIT_FUNC_TRACE(); + + err = dpni_set_irq_mask(dpni, CMD_PRI_LOW, priv->token, + irq_index, mask); + if (err < 0) { + DPAA2_PMD_ERR("Error: dpni_set_irq_mask():%d (%s)", err, + strerror(-err)); + return err; + } + + err = dpni_set_irq_enable(dpni, CMD_PRI_LOW, priv->token, + irq_index, enable); + if (err < 0) + DPAA2_PMD_ERR("Error: dpni_set_irq_enable():%d (%s)", err, + strerror(-err)); + + return err; +} + +static int +dpaa2_dev_start(struct rte_eth_dev *dev) +{ + struct rte_device *rdev = dev->device; + struct rte_dpaa2_device *dpaa2_dev; + struct rte_eth_dev_data *data = dev->data; + struct dpaa2_dev_priv *priv = data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct dpni_queue cfg; + struct dpni_error_cfg err_cfg; + uint16_t qdid; + struct dpni_queue_id qid; + struct dpaa2_queue *dpaa2_q; + int ret, i; + struct rte_intr_handle *intr_handle; + + dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); + intr_handle = &dpaa2_dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d", + priv->hw_id, ret); + return ret; + } + + /* Power up the phy. Needed to make the link go UP */ + dpaa2_dev_set_link_up(dev); + + ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX, &qdid); + if (ret) { + DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret); + return ret; + } + priv->qdid = qdid; + + for (i = 0; i < data->nb_rx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i]; + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX, dpaa2_q->tc_index, + dpaa2_q->flow_id, &cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error in getting flow information: " + "err=%d", ret); + return ret; + } + dpaa2_q->fqid = qid.fqid; + } + + /*checksum errors, send them to normal path and set it in annotation */ + err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; + + err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; + err_cfg.set_frame_annotation = true; + + ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, + priv->token, &err_cfg); + if (ret) { + DPAA2_PMD_ERR("Error to dpni_set_errors_behavior: code = %d", + ret); + return ret; + } + + /* if the interrupts were configured on this devices*/ + if (intr_handle && (intr_handle->fd) && + (dev->data->dev_conf.intr_conf.lsc != 0)) { + /* Registering LSC interrupt handler */ + rte_intr_callback_register(intr_handle, + dpaa2_interrupt_handler, + (void *)dev); + + /* enable vfio intr/eventfd mapping + * Interrupt index 0 is required, so we can not use + * rte_intr_enable. + */ + rte_dpaa2_intr_enable(intr_handle, DPNI_IRQ_INDEX); + + /* enable dpni_irqs */ + dpaa2_eth_setup_irqs(dev, 1); + } + + return 0; +} + +/** + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC. + */ +static void +dpaa2_dev_stop(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int ret; + struct rte_eth_link link; + struct rte_intr_handle *intr_handle = dev->intr_handle; + + PMD_INIT_FUNC_TRACE(); + + /* reset interrupt callback */ + if (intr_handle && (intr_handle->fd) && + (dev->data->dev_conf.intr_conf.lsc != 0)) { + /*disable dpni irqs */ + dpaa2_eth_setup_irqs(dev, 0); + + /* disable vfio intr before callback unregister */ + rte_dpaa2_intr_disable(intr_handle, DPNI_IRQ_INDEX); + + /* Unregistering LSC interrupt handler */ + rte_intr_callback_unregister(intr_handle, + dpaa2_interrupt_handler, + (void *)dev); + } + + dpaa2_dev_set_link_down(dev); + + ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", + ret, priv->hw_id); + return; + } + + /* clear the recorded link status */ + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); +} + +static void +dpaa2_dev_close(struct rte_eth_dev *dev) +{ + struct rte_eth_dev_data *data = dev->data; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int i, ret; + struct rte_eth_link link; + struct dpaa2_queue *dpaa2_q; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < data->nb_tx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; + if (!dpaa2_q->cscn) { + rte_free(dpaa2_q->cscn); + dpaa2_q->cscn = NULL; + } + } + + /* Clean the device first */ + ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); + return; + } + + memset(&link, 0, sizeof(link)); + rte_eth_linkstatus_set(dev, &link); +} + +static void +dpaa2_dev_promiscuous_enable( + struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); + if (ret < 0) + DPAA2_PMD_ERR("Unable to enable U promisc mode %d", ret); + + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); + if (ret < 0) + DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); +} + +static void +dpaa2_dev_promiscuous_disable( + struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); + if (ret < 0) + DPAA2_PMD_ERR("Unable to disable U promisc mode %d", ret); + + if (dev->data->all_multicast == 0) { + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, + priv->token, false); + if (ret < 0) + DPAA2_PMD_ERR("Unable to disable M promisc mode %d", + ret); + } +} + +static void +dpaa2_dev_allmulticast_enable( + struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); + if (ret < 0) + DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); +} + +static void +dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + /* must remain on for all promiscuous */ + if (dev->data->promiscuous == 1) + return; + + ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); + if (ret < 0) + DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); +} + +static int +dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + + VLAN_TAG_SIZE; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; + } + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) + return -EINVAL; + + if (frame_size > ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads &= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + /* Set the Max Rx frame length as 'mtu' + + * Maximum Ethernet header length + */ + ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, + frame_size); + if (ret) { + DPAA2_PMD_ERR("Setting the max frame length failed"); + return -1; + } + DPAA2_PMD_INFO("MTU configured for the device: %d", mtu); + return 0; +} + +static int +dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *addr, + __rte_unused uint32_t index, + __rte_unused uint32_t pool) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -1; + } + + ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, + priv->token, addr->addr_bytes); + if (ret) + DPAA2_PMD_ERR( + "error: Adding the MAC ADDR failed: err = %d", ret); + return 0; +} + +static void +dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, + uint32_t index) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct rte_eth_dev_data *data = dev->data; + struct ether_addr *macaddr; + + PMD_INIT_FUNC_TRACE(); + + macaddr = &data->mac_addrs[index]; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + ret = dpni_remove_mac_addr(dpni, CMD_PRI_LOW, + priv->token, macaddr->addr_bytes); + if (ret) + DPAA2_PMD_ERR( + "error: Removing the MAC ADDR failed: err = %d", ret); +} + +static int +dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, + struct ether_addr *addr) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; + } + + ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, + priv->token, addr->addr_bytes); + + if (ret) + DPAA2_PMD_ERR( + "error: Setting the MAC ADDR failed %d", ret); + + return ret; +} + +static +int dpaa2_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int32_t retcode; + uint8_t page0 = 0, page1 = 1, page2 = 2; + union dpni_statistics value; + + memset(&value, 0, sizeof(union dpni_statistics)); + + PMD_INIT_FUNC_TRACE(); + + if (!dpni) { + DPAA2_PMD_ERR("dpni is NULL"); + return -EINVAL; + } + + if (!stats) { + DPAA2_PMD_ERR("stats is NULL"); + return -EINVAL; + } + + /*Get Counters from page_0*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + page0, 0, &value); + if (retcode) + goto err; + + stats->ipackets = value.page_0.ingress_all_frames; + stats->ibytes = value.page_0.ingress_all_bytes; + + /*Get Counters from page_1*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + page1, 0, &value); + if (retcode) + goto err; + + stats->opackets = value.page_1.egress_all_frames; + stats->obytes = value.page_1.egress_all_bytes; + + /*Get Counters from page_2*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + page2, 0, &value); + if (retcode) + goto err; + + /* Ingress drop frame count due to configured rules */ + stats->ierrors = value.page_2.ingress_filtered_frames; + /* Ingress drop frame count due to error */ + stats->ierrors += value.page_2.ingress_discarded_frames; + + stats->oerrors = value.page_2.egress_discarded_frames; + stats->imissed = value.page_2.ingress_nobuffer_discards; + + return 0; + +err: + DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); + return retcode; +}; + +static int +dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned int n) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int32_t retcode; + union dpni_statistics value[3] = {}; + unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); + + if (n < num) + return num; + + if (xstats == NULL) + return 0; + + /* Get Counters from page_0*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 0, 0, &value[0]); + if (retcode) + goto err; + + /* Get Counters from page_1*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 1, 0, &value[1]); + if (retcode) + goto err; + + /* Get Counters from page_2*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 2, 0, &value[2]); + if (retcode) + goto err; + + for (i = 0; i < num; i++) { + xstats[i].id = i; + xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. + raw.counter[dpaa2_xstats_strings[i].stats_id]; + } + return i; +err: + DPAA2_PMD_ERR("Error in obtaining extended stats (%d)", retcode); + return retcode; +} + +static int +dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned int limit) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); + + if (limit < stat_cnt) + return stat_cnt; + + if (xstats_names != NULL) + for (i = 0; i < stat_cnt; i++) + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", + dpaa2_xstats_strings[i].name); + + return stat_cnt; +} + +static int +dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, + uint64_t *values, unsigned int n) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); + uint64_t values_copy[stat_cnt]; + + if (!ids) { + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int32_t retcode; + union dpni_statistics value[3] = {}; + + if (n < stat_cnt) + return stat_cnt; + + if (!values) + return 0; + + /* Get Counters from page_0*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 0, 0, &value[0]); + if (retcode) + return 0; + + /* Get Counters from page_1*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 1, 0, &value[1]); + if (retcode) + return 0; + + /* Get Counters from page_2*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 2, 0, &value[2]); + if (retcode) + return 0; + + for (i = 0; i < stat_cnt; i++) { + values[i] = value[dpaa2_xstats_strings[i].page_id]. + raw.counter[dpaa2_xstats_strings[i].stats_id]; + } + return stat_cnt; + } + + dpaa2_xstats_get_by_id(dev, NULL, values_copy, stat_cnt); + + for (i = 0; i < n; i++) { + if (ids[i] >= stat_cnt) { + DPAA2_PMD_ERR("xstats id value isn't valid"); + return -1; + } + values[i] = values_copy[ids[i]]; + } + return n; +} + +static int +dpaa2_xstats_get_names_by_id( + struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + const uint64_t *ids, + unsigned int limit) +{ + unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); + struct rte_eth_xstat_name xstats_names_copy[stat_cnt]; + + if (!ids) + return dpaa2_xstats_get_names(dev, xstats_names, limit); + + dpaa2_xstats_get_names(dev, xstats_names_copy, limit); + + for (i = 0; i < limit; i++) { + if (ids[i] >= stat_cnt) { + DPAA2_PMD_ERR("xstats id value isn't valid"); + return -1; + } + strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name); + } + return limit; +} + +static void +dpaa2_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int32_t retcode; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return; + } + + retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); + if (retcode) + goto error; + + return; + +error: + DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); + return; +}; + +/* return 0 means link status changed, -1 means not changed */ +static int +dpaa2_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct rte_eth_link link; + struct dpni_link_state state = {0}; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return 0; + } + + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret < 0) { + DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); + return -1; + } + + memset(&link, 0, sizeof(struct rte_eth_link)); + link.link_status = state.up; + link.link_speed = state.rate; + + if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) + link.link_duplex = ETH_LINK_HALF_DUPLEX; + else + link.link_duplex = ETH_LINK_FULL_DUPLEX; + + ret = rte_eth_linkstatus_set(dev, &link); + if (ret == -1) + DPAA2_PMD_DEBUG("No change in status"); + else + DPAA2_PMD_INFO("Port %d Link is %s\n", dev->data->port_id, + link.link_status ? "Up" : "Down"); + + return ret; +} + +/** + * Toggle the DPNI to enable, if not already enabled. + * This is not strictly PHY up/down - it is more of logical toggling. + */ +static int +dpaa2_dev_set_link_up(struct rte_eth_dev *dev) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + int en = 0; + struct dpni_link_state state = {0}; + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)priv->hw; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return ret; + } + + /* Check if DPNI is currently enabled */ + ret = dpni_is_enabled(dpni, CMD_PRI_LOW, priv->token, &en); + if (ret) { + /* Unable to obtain dpni status; Not continuing */ + DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); + return -EINVAL; + } + + /* Enable link if not already enabled */ + if (!en) { + ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Interface Link UP failed (%d)", ret); + return -EINVAL; + } + } + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret < 0) { + DPAA2_PMD_ERR("Unable to get link state (%d)", ret); + return -1; + } + + /* changing tx burst function to start enqueues */ + dev->tx_pkt_burst = dpaa2_dev_tx; + dev->data->dev_link.link_status = state.up; + + if (state.up) + DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); + else + DPAA2_PMD_INFO("Port %d Link is Down", dev->data->port_id); + return ret; +} + +/** + * Toggle the DPNI to disable, if not already disabled. + * This is not strictly PHY up/down - it is more of logical toggling. + */ +static int +dpaa2_dev_set_link_down(struct rte_eth_dev *dev) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + int dpni_enabled = 0; + int retries = 10; + + PMD_INIT_FUNC_TRACE(); + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)priv->hw; + + if (dpni == NULL) { + DPAA2_PMD_ERR("Device has not yet been configured"); + return ret; + } + + /*changing tx burst function to avoid any more enqueues */ + dev->tx_pkt_burst = dummy_dev_tx; + + /* Loop while dpni_disable() attempts to drain the egress FQs + * and confirm them back to us. + */ + do { + ret = dpni_disable(dpni, 0, priv->token); + if (ret) { + DPAA2_PMD_ERR("dpni disable failed (%d)", ret); + return ret; + } + ret = dpni_is_enabled(dpni, 0, priv->token, &dpni_enabled); + if (ret) { + DPAA2_PMD_ERR("dpni enable check failed (%d)", ret); + return ret; + } + if (dpni_enabled) + /* Allow the MC some slack */ + rte_delay_us(100 * 1000); + } while (dpni_enabled && --retries); + + if (!retries) { + DPAA2_PMD_WARN("Retry count exceeded disabling dpni"); + /* todo- we may have to manually cleanup queues. + */ + } else { + DPAA2_PMD_INFO("Port %d Link DOWN successful", + dev->data->port_id); + } + + dev->data->dev_link.link_status = 0; + + return ret; +} + +static int +dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + struct dpni_link_state state = {0}; + + PMD_INIT_FUNC_TRACE(); + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)priv->hw; + + if (dpni == NULL || fc_conf == NULL) { + DPAA2_PMD_ERR("device not configured"); + return ret; + } + + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret) { + DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); + return ret; + } + + memset(fc_conf, 0, sizeof(struct rte_eth_fc_conf)); + if (state.options & DPNI_LINK_OPT_PAUSE) { + /* DPNI_LINK_OPT_PAUSE set + * if ASYM_PAUSE not set, + * RX Side flow control (handle received Pause frame) + * TX side flow control (send Pause frame) + * if ASYM_PAUSE set, + * RX Side flow control (handle received Pause frame) + * No TX side flow control (send Pause frame disabled) + */ + if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) + fc_conf->mode = RTE_FC_FULL; + else + fc_conf->mode = RTE_FC_RX_PAUSE; + } else { + /* DPNI_LINK_OPT_PAUSE not set + * if ASYM_PAUSE set, + * TX side flow control (send Pause frame) + * No RX side flow control (No action on pause frame rx) + * if ASYM_PAUSE not set, + * Flow control disabled + */ + if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + } + + return ret; +} + +static int +dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + int ret = -EINVAL; + struct dpaa2_dev_priv *priv; + struct fsl_mc_io *dpni; + struct dpni_link_state state = {0}; + struct dpni_link_cfg cfg = {0}; + + PMD_INIT_FUNC_TRACE(); + + priv = dev->data->dev_private; + dpni = (struct fsl_mc_io *)priv->hw; + + if (dpni == NULL) { + DPAA2_PMD_ERR("dpni is NULL"); + return ret; + } + + /* It is necessary to obtain the current state before setting fc_conf + * as MC would return error in case rate, autoneg or duplex values are + * different. + */ + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); + if (ret) { + DPAA2_PMD_ERR("Unable to get link state (err=%d)", ret); + return -1; + } + + /* Disable link before setting configuration */ + dpaa2_dev_set_link_down(dev); + + /* Based on fc_conf, update cfg */ + cfg.rate = state.rate; + cfg.options = state.options; + + /* update cfg with fc_conf */ + switch (fc_conf->mode) { + case RTE_FC_FULL: + /* Full flow control; + * OPT_PAUSE set, ASYM_PAUSE not set + */ + cfg.options |= DPNI_LINK_OPT_PAUSE; + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; + break; + case RTE_FC_TX_PAUSE: + /* Enable RX flow control + * OPT_PAUSE not set; + * ASYM_PAUSE set; + */ + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; + cfg.options &= ~DPNI_LINK_OPT_PAUSE; + break; + case RTE_FC_RX_PAUSE: + /* Enable TX Flow control + * OPT_PAUSE set + * ASYM_PAUSE set + */ + cfg.options |= DPNI_LINK_OPT_PAUSE; + cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; + break; + case RTE_FC_NONE: + /* Disable Flow control + * OPT_PAUSE not set + * ASYM_PAUSE not set + */ + cfg.options &= ~DPNI_LINK_OPT_PAUSE; + cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; + break; + default: + DPAA2_PMD_ERR("Incorrect Flow control flag (%d)", + fc_conf->mode); + return -1; + } + + ret = dpni_set_link_cfg(dpni, CMD_PRI_LOW, priv->token, &cfg); + if (ret) + DPAA2_PMD_ERR("Unable to set Link configuration (err=%d)", + ret); + + /* Enable link */ + dpaa2_dev_set_link_up(dev); + + return ret; +} + +static int +dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_conf *eth_conf = &data->dev_conf; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (rss_conf->rss_hf) { + ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); + if (ret) { + DPAA2_PMD_ERR("Unable to set flow dist"); + return ret; + } + } else { + ret = dpaa2_remove_flow_dist(dev, 0); + if (ret) { + DPAA2_PMD_ERR("Unable to remove flow dist"); + return ret; + } + } + eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; + return 0; +} + +static int +dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct rte_eth_dev_data *data = dev->data; + struct rte_eth_conf *eth_conf = &data->dev_conf; + + /* dpaa2 does not support rss_key, so length should be 0*/ + rss_conf->rss_key_len = 0; + rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf; + return 0; +} + +int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, + int eth_rx_queue_id, + uint16_t dpcon_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) +{ + struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; + struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; + uint8_t flow_id = dpaa2_ethq->flow_id; + struct dpni_queue cfg; + uint8_t options; + int ret; + + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) + dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; + else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) + dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; + else + return -EINVAL; + + memset(&cfg, 0, sizeof(struct dpni_queue)); + options = DPNI_QUEUE_OPT_DEST; + cfg.destination.type = DPNI_DEST_DPCON; + cfg.destination.id = dpcon_id; + cfg.destination.priority = queue_conf->ev.priority; + + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { + options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; + cfg.destination.hold_active = 1; + } + + options |= DPNI_QUEUE_OPT_USER_CTX; + cfg.user_context = (size_t)(dpaa2_ethq); + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, + dpaa2_ethq->tc_index, flow_id, options, &cfg); + if (ret) { + DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); + return ret; + } + + memcpy(&dpaa2_ethq->ev, &queue_conf->ev, sizeof(struct rte_event)); + + return 0; +} + +int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, + int eth_rx_queue_id) +{ + struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; + struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; + uint8_t flow_id = dpaa2_ethq->flow_id; + struct dpni_queue cfg; + uint8_t options; + int ret; + + memset(&cfg, 0, sizeof(struct dpni_queue)); + options = DPNI_QUEUE_OPT_DEST; + cfg.destination.type = DPNI_DEST_NONE; + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX, + dpaa2_ethq->tc_index, flow_id, options, &cfg); + if (ret) + DPAA2_PMD_ERR("Error in dpni_set_queue: ret: %d", ret); + + return ret; +} + +static struct eth_dev_ops dpaa2_ethdev_ops = { + .dev_configure = dpaa2_eth_dev_configure, + .dev_start = dpaa2_dev_start, + .dev_stop = dpaa2_dev_stop, + .dev_close = dpaa2_dev_close, + .promiscuous_enable = dpaa2_dev_promiscuous_enable, + .promiscuous_disable = dpaa2_dev_promiscuous_disable, + .allmulticast_enable = dpaa2_dev_allmulticast_enable, + .allmulticast_disable = dpaa2_dev_allmulticast_disable, + .dev_set_link_up = dpaa2_dev_set_link_up, + .dev_set_link_down = dpaa2_dev_set_link_down, + .link_update = dpaa2_dev_link_update, + .stats_get = dpaa2_dev_stats_get, + .xstats_get = dpaa2_dev_xstats_get, + .xstats_get_by_id = dpaa2_xstats_get_by_id, + .xstats_get_names_by_id = dpaa2_xstats_get_names_by_id, + .xstats_get_names = dpaa2_xstats_get_names, + .stats_reset = dpaa2_dev_stats_reset, + .xstats_reset = dpaa2_dev_stats_reset, + .fw_version_get = dpaa2_fw_version_get, + .dev_infos_get = dpaa2_dev_info_get, + .dev_supported_ptypes_get = dpaa2_supported_ptypes_get, + .mtu_set = dpaa2_dev_mtu_set, + .vlan_filter_set = dpaa2_vlan_filter_set, + .vlan_offload_set = dpaa2_vlan_offload_set, + .rx_queue_setup = dpaa2_dev_rx_queue_setup, + .rx_queue_release = dpaa2_dev_rx_queue_release, + .tx_queue_setup = dpaa2_dev_tx_queue_setup, + .tx_queue_release = dpaa2_dev_tx_queue_release, + .rx_queue_count = dpaa2_dev_rx_queue_count, + .flow_ctrl_get = dpaa2_flow_ctrl_get, + .flow_ctrl_set = dpaa2_flow_ctrl_set, + .mac_addr_add = dpaa2_dev_add_mac_addr, + .mac_addr_remove = dpaa2_dev_remove_mac_addr, + .mac_addr_set = dpaa2_dev_set_mac_addr, + .rss_hash_update = dpaa2_dev_rss_hash_update, + .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, +}; + +static int +dpaa2_dev_init(struct rte_eth_dev *eth_dev) +{ + struct rte_device *dev = eth_dev->device; + struct rte_dpaa2_device *dpaa2_dev; + struct fsl_mc_io *dpni_dev; + struct dpni_attr attr; + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct dpni_buffer_layout layout; + int ret, hw_id; + + PMD_INIT_FUNC_TRACE(); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); + + hw_id = dpaa2_dev->object_id; + + dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); + if (!dpni_dev) { + DPAA2_PMD_ERR("Memory allocation failed for dpni device"); + return -1; + } + + dpni_dev->regs = rte_mcp_ptr_list[0]; + ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); + if (ret) { + DPAA2_PMD_ERR( + "Failure in opening dpni@%d with err code %d", + hw_id, ret); + rte_free(dpni_dev); + return -1; + } + + /* Clean the device first */ + ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure cleaning dpni@%d with err code %d", + hw_id, ret); + goto init_err; + } + + ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr); + if (ret) { + DPAA2_PMD_ERR( + "Failure in get dpni@%d attribute, err code %d", + hw_id, ret); + goto init_err; + } + + priv->num_rx_tc = attr.num_rx_tcs; + + /* Resetting the "num_rx_queues" to equal number of queues in first TC + * as only one TC is supported on Rx Side. Once Multiple TCs will be + * in use for Rx processing then this will be changed or removed. + */ + priv->nb_rx_queues = attr.num_queues; + + /* Using number of TX queues as number of TX TCs */ + priv->nb_tx_queues = attr.num_tx_tcs; + + DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", + priv->num_rx_tc, priv->nb_rx_queues, + priv->nb_tx_queues); + + priv->hw = dpni_dev; + priv->hw_id = hw_id; + priv->options = attr.options; + priv->max_mac_filters = attr.mac_filter_entries; + priv->max_vlan_filters = attr.vlan_filter_entries; + priv->flags = 0; + + /* Allocate memory for hardware structure for queues */ + ret = dpaa2_alloc_rx_tx_queues(eth_dev); + if (ret) { + DPAA2_PMD_ERR("Queue allocation Failed"); + goto init_err; + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("dpni", + ETHER_ADDR_LEN * attr.mac_filter_entries, 0); + if (eth_dev->data->mac_addrs == NULL) { + DPAA2_PMD_ERR( + "Failed to allocate %d bytes needed to store MAC addresses", + ETHER_ADDR_LEN * attr.mac_filter_entries); + ret = -ENOMEM; + goto init_err; + } + + ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, + priv->token, + (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); + if (ret) { + DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d", + ret); + goto init_err; + } + + /* ... tx buffer layout ... */ + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + layout.pass_frame_status = 1; + ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX, &layout); + if (ret) { + DPAA2_PMD_ERR("Error (%d) in setting tx buffer layout", ret); + goto init_err; + } + + /* ... tx-conf and error buffer layout ... */ + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + layout.pass_frame_status = 1; + ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, &layout); + if (ret) { + DPAA2_PMD_ERR("Error (%d) in setting tx-conf buffer layout", + ret); + goto init_err; + } + + eth_dev->dev_ops = &dpaa2_ethdev_ops; + + eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; + eth_dev->tx_pkt_burst = dpaa2_dev_tx; + + RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); + return 0; +init_err: + dpaa2_dev_uninit(eth_dev); + return ret; +} + +static int +dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + int i, ret; + struct dpaa2_queue *dpaa2_q; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (!dpni) { + DPAA2_PMD_WARN("Already closed or not started"); + return -1; + } + + dpaa2_dev_close(eth_dev); + + if (priv->rx_vq[0]) { + /* cleaning up queue storage */ + for (i = 0; i < priv->nb_rx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + if (dpaa2_q->q_storage) + rte_free(dpaa2_q->q_storage); + } + /*free the all queue memory */ + rte_free(priv->rx_vq[0]); + priv->rx_vq[0] = NULL; + } + + /* free memory for storing MAC addresses */ + if (eth_dev->data->mac_addrs) { + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + } + + /* Close the device at underlying layer*/ + ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR( + "Failure closing dpni device with err code %d", + ret); + } + + /* Free the allocated memory for ethernet private data and dpni*/ + priv->hw = NULL; + rte_free(dpni); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); + return 0; +} + +static int +rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, + struct rte_dpaa2_device *dpaa2_dev) +{ + struct rte_eth_dev *eth_dev; + int diag; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); + if (!eth_dev) + return -ENODEV; + eth_dev->data->dev_private = rte_zmalloc( + "ethdev private structure", + sizeof(struct dpaa2_dev_priv), + RTE_CACHE_LINE_SIZE); + if (eth_dev->data->dev_private == NULL) { + DPAA2_PMD_CRIT( + "Unable to allocate memory for private data"); + rte_eth_dev_release_port(eth_dev); + return -ENOMEM; + } + } else { + eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); + if (!eth_dev) + return -ENODEV; + } + + eth_dev->device = &dpaa2_dev->device; + eth_dev->device->driver = &dpaa2_drv->driver; + + dpaa2_dev->eth_dev = eth_dev; + eth_dev->data->rx_mbuf_alloc_failed = 0; + + if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) + eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + + /* Invoke PMD device initialization function */ + diag = dpaa2_dev_init(eth_dev); + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_free(eth_dev->data->dev_private); + rte_eth_dev_release_port(eth_dev); + return diag; +} + +static int +rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) +{ + struct rte_eth_dev *eth_dev; + + eth_dev = dpaa2_dev->eth_dev; + dpaa2_dev_uninit(eth_dev); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_free(eth_dev->data->dev_private); + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_dpaa2_driver rte_dpaa2_pmd = { + .drv_flags = RTE_DPAA2_DRV_INTR_LSC | RTE_DPAA2_DRV_IOVA_AS_VA, + .drv_type = DPAA2_ETH, + .probe = rte_dpaa2_probe, + .remove = rte_dpaa2_remove, +}; + +RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); + +RTE_INIT(dpaa2_pmd_init_log) +{ + dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); + if (dpaa2_logtype_pmd >= 0) + rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h new file mode 100644 index 00000000..bd69f523 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_ethdev.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016 NXP + * + */ + +#ifndef _DPAA2_ETHDEV_H +#define _DPAA2_ETHDEV_H + +#include <rte_event_eth_rx_adapter.h> + +#include <mc/fsl_dpni.h> +#include <mc/fsl_mc_sys.h> + +#define DPAA2_MIN_RX_BUF_SIZE 512 +#define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/ + +#define MAX_TCS DPNI_MAX_TC +#define MAX_RX_QUEUES 16 +#define MAX_TX_QUEUES 16 + +/*default tc to be used for ,congestion, distribution etc configuration. */ +#define DPAA2_DEF_TC 0 + +/* Threshold for a Tx queue to *Enter* Congestion state. + */ +#define CONG_ENTER_TX_THRESHOLD 512 + +/* Threshold for a queue to *Exit* Congestion state. + */ +#define CONG_EXIT_TX_THRESHOLD 480 + +#define CONG_RETRY_COUNT 18000 + +/* RX queue tail drop threshold + * currently considering 32 KB packets + */ +#define CONG_THRESHOLD_RX_Q (64 * 1024) +#define CONG_RX_OAL 128 + +/* Size of the input SMMU mapped memory required by MC */ +#define DIST_PARAM_IOVA_SIZE 256 + +/* Enable TX Congestion control support + * default is disable + */ +#define DPAA2_TX_CGR_OFF 0x01 + +/* Disable RX tail drop, default is enable */ +#define DPAA2_RX_TAILDROP_OFF 0x04 + +#define DPAA2_RSS_OFFLOAD_ALL ( \ + ETH_RSS_IP | \ + ETH_RSS_UDP | \ + ETH_RSS_TCP | \ + ETH_RSS_SCTP) + +/* LX2 FRC Parsed values (Little Endian) */ +#define DPAA2_PKT_TYPE_ETHER 0x0060 +#define DPAA2_PKT_TYPE_IPV4 0x0000 +#define DPAA2_PKT_TYPE_IPV6 0x0020 +#define DPAA2_PKT_TYPE_IPV4_EXT \ + (0x0001 | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_EXT \ + (0x0001 | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_TCP \ + (0x000e | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_TCP \ + (0x000e | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_UDP \ + (0x0010 | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_UDP \ + (0x0010 | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_SCTP \ + (0x000f | DPAA2_PKT_TYPE_IPV4) +#define DPAA2_PKT_TYPE_IPV6_SCTP \ + (0x000f | DPAA2_PKT_TYPE_IPV6) +#define DPAA2_PKT_TYPE_IPV4_ICMP \ + (0x0003 | DPAA2_PKT_TYPE_IPV4_EXT) +#define DPAA2_PKT_TYPE_IPV6_ICMP \ + (0x0003 | DPAA2_PKT_TYPE_IPV6_EXT) +#define DPAA2_PKT_TYPE_VLAN_1 0x0160 +#define DPAA2_PKT_TYPE_VLAN_2 0x0260 + +struct dpaa2_dev_priv { + void *hw; + int32_t hw_id; + int32_t qdid; + uint16_t token; + uint8_t nb_tx_queues; + uint8_t nb_rx_queues; + void *rx_vq[MAX_RX_QUEUES]; + void *tx_vq[MAX_TX_QUEUES]; + + struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */ + uint32_t options; + uint8_t max_mac_filters; + uint8_t max_vlan_filters; + uint8_t num_rx_tc; + uint8_t flags; /*dpaa2 config flags */ +}; + +int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev, + uint64_t req_dist_set); + +int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev, + uint8_t tc_index); + +int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist); + +int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, + int eth_rx_queue_id, + uint16_t dpcon_id, + const struct rte_event_eth_rx_adapter_queue_conf *queue_conf); + +int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, + int eth_rx_queue_id); + +uint16_t dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, + uint16_t nb_pkts); +void dpaa2_dev_process_parallel_event(struct qbman_swp *swp, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev); +void dpaa2_dev_process_atomic_event(struct qbman_swp *swp, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev); +uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts); +uint16_t dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts); +#endif /* _DPAA2_ETHDEV_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h new file mode 100644 index 00000000..c04babdb --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_pmd_logs.h @@ -0,0 +1,41 @@ +/*- + * SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 NXP + */ + +#ifndef _DPAA2_PMD_LOGS_H_ +#define _DPAA2_PMD_LOGS_H_ + +extern int dpaa2_logtype_pmd; + +#define DPAA2_PMD_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa2_logtype_pmd, "dpaa2_net: " \ + fmt "\n", ##args) + +#define DPAA2_PMD_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa2_logtype_pmd, "dpaa2_net: %s(): "\ + fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() DPAA2_PMD_DEBUG(">>") + +#define DPAA2_PMD_CRIT(fmt, args...) \ + DPAA2_PMD_LOG(CRIT, fmt, ## args) +#define DPAA2_PMD_INFO(fmt, args...) \ + DPAA2_PMD_LOG(INFO, fmt, ## args) +#define DPAA2_PMD_ERR(fmt, args...) \ + DPAA2_PMD_LOG(ERR, fmt, ## args) +#define DPAA2_PMD_WARN(fmt, args...) \ + DPAA2_PMD_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA2_PMD_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA2_PMD_DP_DEBUG(fmt, args...) \ + DPAA2_PMD_DP_LOG(DEBUG, fmt, ## args) +#define DPAA2_PMD_DP_INFO(fmt, args...) \ + DPAA2_PMD_DP_LOG(INFO, fmt, ## args) +#define DPAA2_PMD_DP_WARN(fmt, args...) \ + DPAA2_PMD_DP_LOG(WARNING, fmt, ## args) + +#endif /* _DPAA2_PMD_LOGS_H_ */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c new file mode 100644 index 00000000..ef109a62 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/dpaa2_rxtx.c @@ -0,0 +1,842 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. + * Copyright 2016 NXP + * + */ + +#include <time.h> +#include <net/if.h> + +#include <rte_mbuf.h> +#include <rte_ethdev_driver.h> +#include <rte_malloc.h> +#include <rte_memcpy.h> +#include <rte_string_fns.h> +#include <rte_dev.h> + +#include <rte_fslmc.h> +#include <fslmc_vfio.h> +#include <dpaa2_hw_pvt.h> +#include <dpaa2_hw_dpio.h> +#include <dpaa2_hw_mempool.h> + +#include "dpaa2_pmd_logs.h" +#include "dpaa2_ethdev.h" +#include "base/dpaa2_hw_dpni_annot.h" + +#define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \ + DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \ + DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \ + DPAA2_SET_ONLY_FD_BPID(_fd, _bpid); \ + DPAA2_SET_FD_OFFSET(_fd, _mbuf->data_off); \ + DPAA2_SET_FD_ASAL(_fd, DPAA2_ASAL_VAL); \ +} while (0) + +static inline void __attribute__((hot)) +dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc) +{ + DPAA2_PMD_DP_DEBUG("frc = 0x%x\t", frc); + + m->packet_type = RTE_PTYPE_UNKNOWN; + switch (frc) { + case DPAA2_PKT_TYPE_ETHER: + m->packet_type = RTE_PTYPE_L2_ETHER; + break; + case DPAA2_PKT_TYPE_IPV4: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4; + break; + case DPAA2_PKT_TYPE_IPV6: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6; + break; + case DPAA2_PKT_TYPE_IPV4_EXT: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT; + break; + case DPAA2_PKT_TYPE_IPV6_EXT: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT; + break; + case DPAA2_PKT_TYPE_IPV4_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP; + break; + case DPAA2_PKT_TYPE_IPV6_TCP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP; + break; + case DPAA2_PKT_TYPE_IPV4_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP; + break; + case DPAA2_PKT_TYPE_IPV6_UDP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP; + break; + case DPAA2_PKT_TYPE_IPV4_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP; + break; + case DPAA2_PKT_TYPE_IPV6_SCTP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP; + break; + case DPAA2_PKT_TYPE_IPV4_ICMP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_ICMP; + break; + case DPAA2_PKT_TYPE_IPV6_ICMP: + m->packet_type = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_ICMP; + break; + case DPAA2_PKT_TYPE_VLAN_1: + case DPAA2_PKT_TYPE_VLAN_2: + m->ol_flags |= PKT_RX_VLAN; + break; + /* More switch cases can be added */ + /* TODO: Add handling for checksum error check from FRC */ + default: + m->packet_type = RTE_PTYPE_UNKNOWN; + } +} + +static inline uint32_t __attribute__((hot)) +dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation) +{ + uint32_t pkt_type = RTE_PTYPE_UNKNOWN; + + DPAA2_PMD_DP_DEBUG("(slow parse) Annotation = 0x%" PRIx64 "\t", + annotation->word4); + if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) { + pkt_type = RTE_PTYPE_L2_ETHER_ARP; + goto parse_done; + } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) { + pkt_type = RTE_PTYPE_L2_ETHER; + } else { + goto parse_done; + } + + if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT | + L3_IPV4_N_PRESENT)) { + pkt_type |= RTE_PTYPE_L3_IPV4; + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | + L3_IP_N_OPT_PRESENT)) + pkt_type |= RTE_PTYPE_L3_IPV4_EXT; + + } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT | + L3_IPV6_N_PRESENT)) { + pkt_type |= RTE_PTYPE_L3_IPV6; + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT | + L3_IP_N_OPT_PRESENT)) + pkt_type |= RTE_PTYPE_L3_IPV6_EXT; + } else { + goto parse_done; + } + + if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT | + L3_IP_1_MORE_FRAGMENT | + L3_IP_N_FIRST_FRAGMENT | + L3_IP_N_MORE_FRAGMENT)) { + pkt_type |= RTE_PTYPE_L4_FRAG; + goto parse_done; + } else { + pkt_type |= RTE_PTYPE_L4_NONFRAG; + } + + if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_UDP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_TCP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_SCTP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT)) + pkt_type |= RTE_PTYPE_L4_ICMP; + + else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL)) + pkt_type |= RTE_PTYPE_UNKNOWN; + +parse_done: + return pkt_type; +} + +static inline uint32_t __attribute__((hot)) +dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr) +{ + struct dpaa2_annot_hdr *annotation = + (struct dpaa2_annot_hdr *)hw_annot_addr; + + DPAA2_PMD_DP_DEBUG("(fast parse) Annotation = 0x%" PRIx64 "\t", + annotation->word4); + + /* Check offloads first */ + if (BIT_ISSET_AT_POS(annotation->word3, + L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT)) + mbuf->ol_flags |= PKT_RX_VLAN; + + if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE)) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE)) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + + /* Return some common types from parse processing */ + switch (annotation->word4) { + case DPAA2_L3_IPv4: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; + case DPAA2_L3_IPv6: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; + case DPAA2_L3_IPv4_TCP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_TCP; + case DPAA2_L3_IPv4_UDP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | + RTE_PTYPE_L4_UDP; + case DPAA2_L3_IPv6_TCP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_TCP; + case DPAA2_L3_IPv6_UDP: + return RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | + RTE_PTYPE_L4_UDP; + default: + break; + } + + return dpaa2_dev_rx_parse_slow(annotation); +} + +static inline struct rte_mbuf *__attribute__((hot)) +eth_sg_fd_to_mbuf(const struct qbman_fd *fd) +{ + struct qbman_sge *sgt, *sge; + size_t sg_addr, fd_addr; + int i = 0; + struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp; + + fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); + + /* Get Scatter gather table address */ + sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd)); + + sge = &sgt[i++]; + sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge)); + + /* First Scatter gather entry */ + first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); + /* Prepare all the metadata for first segment */ + first_seg->buf_addr = (uint8_t *)sg_addr; + first_seg->ol_flags = 0; + first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); + first_seg->data_len = sge->length & 0x1FFFF; + first_seg->pkt_len = DPAA2_GET_FD_LEN(fd); + first_seg->nb_segs = 1; + first_seg->next = NULL; + if (dpaa2_svr_family == SVR_LX2160A) + dpaa2_dev_rx_parse_frc(first_seg, + DPAA2_GET_FD_FRC_PARSE_SUM(fd)); + else + first_seg->packet_type = dpaa2_dev_rx_parse(first_seg, + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); + + rte_mbuf_refcnt_set(first_seg, 1); + cur_seg = first_seg; + while (!DPAA2_SG_IS_FINAL(sge)) { + sge = &sgt[i++]; + sg_addr = (size_t)DPAA2_IOVA_TO_VADDR( + DPAA2_GET_FLE_ADDR(sge)); + next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr, + rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size); + next_seg->buf_addr = (uint8_t *)sg_addr; + next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge); + next_seg->data_len = sge->length & 0x1FFFF; + first_seg->nb_segs += 1; + rte_mbuf_refcnt_set(next_seg, 1); + cur_seg->next = next_seg; + next_seg->next = NULL; + cur_seg = next_seg; + } + temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr, + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); + rte_mbuf_refcnt_set(temp, 1); + rte_pktmbuf_free_seg(temp); + + return (void *)first_seg; +} + +static inline struct rte_mbuf *__attribute__((hot)) +eth_fd_to_mbuf(const struct qbman_fd *fd) +{ + struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF( + DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); + + /* need to repopulated some of the fields, + * as they may have changed in last transmission + */ + mbuf->nb_segs = 1; + mbuf->ol_flags = 0; + mbuf->data_off = DPAA2_GET_FD_OFFSET(fd); + mbuf->data_len = DPAA2_GET_FD_LEN(fd); + mbuf->pkt_len = mbuf->data_len; + mbuf->next = NULL; + rte_mbuf_refcnt_set(mbuf, 1); + + /* Parse the packet */ + /* parse results for LX2 are there in FRC field of FD. + * For other DPAA2 platforms , parse results are after + * the private - sw annotation area + */ + + if (dpaa2_svr_family == SVR_LX2160A) + dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd)); + else + mbuf->packet_type = dpaa2_dev_rx_parse(mbuf, + (void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_FD_PTA_SIZE)); + + DPAA2_PMD_DP_DEBUG("to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d," + "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", + mbuf, mbuf->buf_addr, mbuf->data_off, + DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); + + return mbuf; +} + +static int __attribute__ ((noinline)) __attribute__((hot)) +eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) +{ + struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp; + struct qbman_sge *sgt, *sge = NULL; + int i; + + temp = rte_pktmbuf_alloc(mbuf->pool); + if (temp == NULL) { + DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n"); + return -ENOMEM; + } + + DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp)); + DPAA2_SET_FD_LEN(fd, mbuf->pkt_len); + DPAA2_SET_ONLY_FD_BPID(fd, bpid); + DPAA2_SET_FD_OFFSET(fd, temp->data_off); + DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL); + DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg); + /*Set Scatter gather table and Scatter gather entries*/ + sgt = (struct qbman_sge *)( + (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) + + DPAA2_GET_FD_OFFSET(fd)); + + for (i = 0; i < mbuf->nb_segs; i++) { + sge = &sgt[i]; + /*Resetting the buffer pool id and offset field*/ + sge->fin_bpid_offset = 0; + DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg)); + DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off); + sge->length = cur_seg->data_len; + if (RTE_MBUF_DIRECT(cur_seg)) { + if (rte_mbuf_refcnt_read(cur_seg) > 1) { + /* If refcnt > 1, invalid bpid is set to ensure + * buffer is not freed by HW + */ + DPAA2_SET_FLE_IVP(sge); + rte_mbuf_refcnt_update(cur_seg, -1); + } else + DPAA2_SET_FLE_BPID(sge, + mempool_to_bpid(cur_seg->pool)); + cur_seg = cur_seg->next; + } else { + /* Get owner MBUF from indirect buffer */ + mi = rte_mbuf_from_indirect(cur_seg); + if (rte_mbuf_refcnt_read(mi) > 1) { + /* If refcnt > 1, invalid bpid is set to ensure + * owner buffer is not freed by HW + */ + DPAA2_SET_FLE_IVP(sge); + } else { + DPAA2_SET_FLE_BPID(sge, + mempool_to_bpid(mi->pool)); + rte_mbuf_refcnt_update(mi, 1); + } + prev_seg = cur_seg; + cur_seg = cur_seg->next; + prev_seg->next = NULL; + rte_pktmbuf_free(prev_seg); + } + } + DPAA2_SG_SET_FINAL(sge, true); + return 0; +} + +static void +eth_mbuf_to_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) __attribute__((unused)); + +static void __attribute__ ((noinline)) __attribute__((hot)) +eth_mbuf_to_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) +{ + DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid); + + DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d," + "fd_off=%d fd =%" PRIx64 ", meta = %d bpid =%d, len=%d\n", + mbuf, mbuf->buf_addr, mbuf->data_off, + DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd)); + if (RTE_MBUF_DIRECT(mbuf)) { + if (rte_mbuf_refcnt_read(mbuf) > 1) { + DPAA2_SET_FD_IVP(fd); + rte_mbuf_refcnt_update(mbuf, -1); + } + } else { + struct rte_mbuf *mi; + + mi = rte_mbuf_from_indirect(mbuf); + if (rte_mbuf_refcnt_read(mi) > 1) + DPAA2_SET_FD_IVP(fd); + else + rte_mbuf_refcnt_update(mi, 1); + rte_pktmbuf_free(mbuf); + } +} + +static inline int __attribute__((hot)) +eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf, + struct qbman_fd *fd, uint16_t bpid) +{ + struct rte_mbuf *m; + void *mb = NULL; + + if (rte_dpaa2_mbuf_alloc_bulk( + rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) { + DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n"); + return -1; + } + m = (struct rte_mbuf *)mb; + memcpy((char *)m->buf_addr + mbuf->data_off, + (void *)((char *)mbuf->buf_addr + mbuf->data_off), + mbuf->pkt_len); + + /* Copy required fields */ + m->data_off = mbuf->data_off; + m->ol_flags = mbuf->ol_flags; + m->packet_type = mbuf->packet_type; + m->tx_offload = mbuf->tx_offload; + + DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid); + + DPAA2_PMD_DP_DEBUG( + "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d," + " meta: %d, off: %d, len: %d\n", + (void *)mbuf, + mbuf->buf_addr, + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_OFFSET(fd), + DPAA2_GET_FD_LEN(fd)); + +return 0; +} + +/* This function assumes that caller will be keep the same value for nb_pkts + * across calls per queue, if that is not the case, better use non-prefetch + * version of rx call. + * It will return the packets as requested in previous call without honoring + * the current nb_pkts or bufs space. + */ +uint16_t +dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + /* Function receive frames for a given device and VQ*/ + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct qbman_result *dq_storage, *dq_storage1 = NULL; + uint32_t fqid = dpaa2_q->fqid; + int ret, num_rx = 0, pull_size; + uint8_t pending, status; + struct qbman_swp *swp; + const struct qbman_fd *fd, *next_fd; + struct qbman_pull_desc pulldesc; + struct queue_storage_info_t *q_storage = dpaa2_q->q_storage; + struct rte_eth_dev *dev = dpaa2_q->dev; + + if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) { + ret = dpaa2_affine_qbman_ethrx_swp(); + if (ret) { + DPAA2_PMD_ERR("Failure in affining portal"); + return 0; + } + } + swp = DPAA2_PER_LCORE_ETHRX_PORTAL; + pull_size = (nb_pkts > DPAA2_DQRR_RING_SIZE) ? + DPAA2_DQRR_RING_SIZE : nb_pkts; + if (unlikely(!q_storage->active_dqs)) { + q_storage->toggle = 0; + dq_storage = q_storage->dq_storage[q_storage->toggle]; + q_storage->last_num_pkts = pull_size; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_numframes(&pulldesc, + q_storage->last_num_pkts); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage, + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1); + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { + while (!qbman_check_command_complete( + get_swp_active_dqs( + DPAA2_PER_LCORE_ETHRX_DPIO->index))) + ; + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); + } + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + " QBMAN is busy (1)\n"); + /* Portal was busy, try again */ + continue; + } + break; + } + q_storage->active_dqs = dq_storage; + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, + dq_storage); + } + + dq_storage = q_storage->active_dqs; + rte_prefetch0((void *)(size_t)(dq_storage)); + rte_prefetch0((void *)(size_t)(dq_storage + 1)); + + /* Prepare next pull descriptor. This will give space for the + * prefething done on DQRR entries + */ + q_storage->toggle ^= 1; + dq_storage1 = q_storage->dq_storage[q_storage->toggle]; + qbman_pull_desc_clear(&pulldesc); + qbman_pull_desc_set_numframes(&pulldesc, pull_size); + qbman_pull_desc_set_fq(&pulldesc, fqid); + qbman_pull_desc_set_storage(&pulldesc, dq_storage1, + (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1); + + /* Check if the previous issued command is completed. + * Also seems like the SWP is shared between the Ethernet Driver + * and the SEC driver. + */ + while (!qbman_check_command_complete(dq_storage)) + ; + if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id)) + clear_swp_active_dqs(q_storage->active_dpio_id); + + pending = 1; + + do { + /* Loop until the dq_storage is updated with + * new token by QBMAN + */ + while (!qbman_check_new_result(dq_storage)) + ; + rte_prefetch0((void *)((size_t)(dq_storage + 2))); + /* Check whether Last Pull command is Expired and + * setting Condition for Loop termination + */ + if (qbman_result_DQ_is_pull_complete(dq_storage)) { + pending = 0; + /* Check for valid frame. */ + status = qbman_result_DQ_flags(dq_storage); + if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) + continue; + } + fd = qbman_result_DQ_fd(dq_storage); + + next_fd = qbman_result_DQ_fd(dq_storage + 1); + /* Prefetch Annotation address for the parse results */ + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd) + + DPAA2_FD_PTA_SIZE + 16)); + + if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg)) + bufs[num_rx] = eth_sg_fd_to_mbuf(fd); + else + bufs[num_rx] = eth_fd_to_mbuf(fd); + bufs[num_rx]->port = dev->data->port_id; + + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) + rte_vlan_strip(bufs[num_rx]); + + dq_storage++; + num_rx++; + } while (pending); + + if (check_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index)) { + while (!qbman_check_command_complete( + get_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index))) + ; + clear_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index); + } + /* issue a volatile dequeue command for next pull */ + while (1) { + if (qbman_swp_pull(swp, &pulldesc)) { + DPAA2_PMD_DP_DEBUG("VDQ command is not issued." + "QBMAN is busy (2)\n"); + continue; + } + break; + } + q_storage->active_dqs = dq_storage1; + q_storage->active_dpio_id = DPAA2_PER_LCORE_ETHRX_DPIO->index; + set_swp_active_dqs(DPAA2_PER_LCORE_ETHRX_DPIO->index, dq_storage1); + + dpaa2_q->rx_pkts += num_rx; + + return num_rx; +} + +void __attribute__((hot)) +dpaa2_dev_process_parallel_event(struct qbman_swp *swp, + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev) +{ + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + + DPAA2_FD_PTA_SIZE + 16)); + + ev->flow_id = rxq->ev.flow_id; + ev->sub_event_type = rxq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = rxq->ev.sched_type; + ev->queue_id = rxq->ev.queue_id; + ev->priority = rxq->ev.priority; + + ev->mbuf = eth_fd_to_mbuf(fd); + + qbman_swp_dqrr_consume(swp, dq); +} + +void __attribute__((hot)) +dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)), + const struct qbman_fd *fd, + const struct qbman_result *dq, + struct dpaa2_queue *rxq, + struct rte_event *ev) +{ + uint8_t dqrr_index; + + rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) + + DPAA2_FD_PTA_SIZE + 16)); + + ev->flow_id = rxq->ev.flow_id; + ev->sub_event_type = rxq->ev.sub_event_type; + ev->event_type = RTE_EVENT_TYPE_ETHDEV; + ev->op = RTE_EVENT_OP_NEW; + ev->sched_type = rxq->ev.sched_type; + ev->queue_id = rxq->ev.queue_id; + ev->priority = rxq->ev.priority; + + ev->mbuf = eth_fd_to_mbuf(fd); + + dqrr_index = qbman_get_dqrr_idx(dq); + ev->mbuf->seqn = dqrr_index + 1; + DPAA2_PER_LCORE_DQRR_SIZE++; + DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; + DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; +} + +/* + * Callback to handle sending packets through WRIOP based interface + */ +uint16_t +dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + /* Function to transmit the frames to given device and VQ*/ + uint32_t loop, retry_count; + int32_t ret; + struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; + struct rte_mbuf *mi; + uint32_t frames_to_send; + struct rte_mempool *mp; + struct qbman_eq_desc eqdesc; + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue; + struct qbman_swp *swp; + uint16_t num_tx = 0; + uint16_t bpid; + struct rte_eth_dev *dev = dpaa2_q->dev; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + uint32_t flags[MAX_TX_RING_SLOTS] = {0}; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR("Failure in affining portal"); + return 0; + } + } + swp = DPAA2_PER_LCORE_PORTAL; + + DPAA2_PMD_DP_DEBUG("===> dev =%p, fqid =%d\n", dev, dpaa2_q->fqid); + + /*Prepare enqueue descriptor*/ + qbman_eq_desc_clear(&eqdesc); + qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ); + qbman_eq_desc_set_response(&eqdesc, 0, 0); + qbman_eq_desc_set_qd(&eqdesc, priv->qdid, + dpaa2_q->flow_id, dpaa2_q->tc_index); + /*Clear the unused FD fields before sending*/ + while (nb_pkts) { + /*Check if the queue is congested*/ + retry_count = 0; + while (qbman_result_SCN_state(dpaa2_q->cscn)) { + retry_count++; + /* Retry for some time before giving up */ + if (retry_count > CONG_RETRY_COUNT) + goto skip_tx; + } + + frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts; + + for (loop = 0; loop < frames_to_send; loop++) { + if ((*bufs)->seqn) { + uint8_t dqrr_index = (*bufs)->seqn - 1; + + flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | + dqrr_index; + DPAA2_PER_LCORE_DQRR_SIZE--; + DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); + (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN; + } + + fd_arr[loop].simple.frc = 0; + DPAA2_RESET_FD_CTRL((&fd_arr[loop])); + DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL); + if (likely(RTE_MBUF_DIRECT(*bufs))) { + mp = (*bufs)->pool; + /* Check the basic scenario and set + * the FD appropriately here itself. + */ + if (likely(mp && mp->ops_index == + priv->bp_list->dpaa2_ops_index && + (*bufs)->nb_segs == 1 && + rte_mbuf_refcnt_read((*bufs)) == 1)) { + if (unlikely(((*bufs)->ol_flags + & PKT_TX_VLAN_PKT) || + (dev->data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { + ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } + DPAA2_MBUF_TO_CONTIG_FD((*bufs), + &fd_arr[loop], mempool_to_bpid(mp)); + bufs++; + continue; + } + } else { + mi = rte_mbuf_from_indirect(*bufs); + mp = mi->pool; + } + /* Not a hw_pkt pool allocated frame */ + if (unlikely(!mp || !priv->bp_list)) { + DPAA2_PMD_ERR("Err: No buffer pool attached"); + goto send_n_return; + } + + if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) || + (dev->data->dev_conf.txmode.offloads + & DEV_TX_OFFLOAD_VLAN_INSERT))) { + int ret = rte_vlan_insert(bufs); + if (ret) + goto send_n_return; + } + if (mp->ops_index != priv->bp_list->dpaa2_ops_index) { + DPAA2_PMD_WARN("Non DPAA2 buffer pool"); + /* alloc should be from the default buffer pool + * attached to this interface + */ + bpid = priv->bp_list->buf_pool.bpid; + + if (unlikely((*bufs)->nb_segs > 1)) { + DPAA2_PMD_ERR("S/G support not added" + " for non hw offload buffer"); + goto send_n_return; + } + if (eth_copy_mbuf_to_fd(*bufs, + &fd_arr[loop], bpid)) { + goto send_n_return; + } + /* free the original packet */ + rte_pktmbuf_free(*bufs); + } else { + bpid = mempool_to_bpid(mp); + if (unlikely((*bufs)->nb_segs > 1)) { + if (eth_mbuf_to_sg_fd(*bufs, + &fd_arr[loop], bpid)) + goto send_n_return; + } else { + eth_mbuf_to_fd(*bufs, + &fd_arr[loop], bpid); + } + } + bufs++; + } + loop = 0; + while (loop < frames_to_send) { + loop += qbman_swp_enqueue_multiple(swp, &eqdesc, + &fd_arr[loop], &flags[loop], + frames_to_send - loop); + } + + num_tx += frames_to_send; + nb_pkts -= frames_to_send; + } + dpaa2_q->tx_pkts += num_tx; + return num_tx; + +send_n_return: + /* send any already prepared fd */ + if (loop) { + unsigned int i = 0; + + while (i < loop) { + i += qbman_swp_enqueue_multiple(swp, &eqdesc, + &fd_arr[i], + &flags[loop], + loop - i); + } + num_tx += loop; + } +skip_tx: + dpaa2_q->tx_pkts += num_tx; + return num_tx; +} + +/** + * Dummy DPDK callback for TX. + * + * This function is used to temporarily replace the real callback during + * unsafe control operations on the queue, or in case of error. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +uint16_t +dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) +{ + (void)queue; + (void)bufs; + (void)nb_pkts; + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c new file mode 100644 index 00000000..80f94f40 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpkg.c @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2017 NXP + * + */ +#include <fsl_mc_sys.h> +#include <fsl_mc_cmd.h> +#include <fsl_dpkg.h> + +/** + * dpkg_prepare_key_cfg() - function prepare extract parameters + * @cfg: defining a full Key Generation profile (rule) + * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA + * + * This function has to be called before the following functions: + * - dpni_set_rx_tc_dist() + * - dpni_set_qos_table() + * - dpkg_prepare_key_cfg() + */ +int +dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, uint8_t *key_cfg_buf) +{ + int i, j; + struct dpni_ext_set_rx_tc_dist *dpni_ext; + struct dpni_dist_extract *extr; + + if (cfg->num_extracts > DPKG_MAX_NUM_OF_EXTRACTS) + return -EINVAL; + + dpni_ext = (struct dpni_ext_set_rx_tc_dist *)key_cfg_buf; + dpni_ext->num_extracts = cfg->num_extracts; + + for (i = 0; i < cfg->num_extracts; i++) { + extr = &dpni_ext->extracts[i]; + + switch (cfg->extracts[i].type) { + case DPKG_EXTRACT_FROM_HDR: + extr->prot = cfg->extracts[i].extract.from_hdr.prot; + dpkg_set_field(extr->efh_type, EFH_TYPE, + cfg->extracts[i].extract.from_hdr.type); + extr->size = cfg->extracts[i].extract.from_hdr.size; + extr->offset = cfg->extracts[i].extract.from_hdr.offset; + extr->field = cpu_to_le32( + cfg->extracts[i].extract.from_hdr.field); + extr->hdr_index = + cfg->extracts[i].extract.from_hdr.hdr_index; + break; + case DPKG_EXTRACT_FROM_DATA: + extr->size = cfg->extracts[i].extract.from_data.size; + extr->offset = + cfg->extracts[i].extract.from_data.offset; + break; + case DPKG_EXTRACT_FROM_PARSE: + extr->size = cfg->extracts[i].extract.from_parse.size; + extr->offset = + cfg->extracts[i].extract.from_parse.offset; + break; + default: + return -EINVAL; + } + + extr->num_of_byte_masks = cfg->extracts[i].num_of_byte_masks; + dpkg_set_field(extr->extract_type, EXTRACT_TYPE, + cfg->extracts[i].type); + + for (j = 0; j < DPKG_NUM_OF_MASKS; j++) { + extr->masks[j].mask = cfg->extracts[i].masks[j].mask; + extr->masks[j].offset = + cfg->extracts[i].masks[j].offset; + } + } + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c new file mode 100644 index 00000000..9f228169 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/dpni.c @@ -0,0 +1,1943 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016 NXP + * + */ +#include <fsl_mc_sys.h> +#include <fsl_mc_cmd.h> +#include <fsl_dpni.h> +#include <fsl_dpni_cmd.h> + +/** + * dpni_open() - Open a control session for the specified object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @dpni_id: DPNI unique ID + * @token: Returned token; use in subsequent API calls + * + * This function can be used to open a control session for an + * already created object; an object may have been declared in + * the DPL or by calling the dpni_create() function. + * This function returns a unique authentication token, + * associated with the specific object ID and the specific MC + * portal; this token must be used in all subsequent commands for + * this specific object. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpni_id, + uint16_t *token) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_open *cmd_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, + cmd_flags, + 0); + cmd_params = (struct dpni_cmd_open *)cmd.params; + cmd_params->dpni_id = cpu_to_le32(dpni_id); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *token = mc_cmd_hdr_read_token(&cmd); + + return 0; +} + +/** + * dpni_close() - Close the control session of the object + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * After this function is called, no further operations are + * allowed on the object without opening a new control session. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_create() - Create the DPNI object + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @cfg: Configuration structure + * @obj_id: Returned object id + * + * Create the DPNI object, allocate required resources and + * perform required initialization. + * + * The object can be created either by declaring it in the + * DPL file, or by calling this function. + * + * The function accepts an authentication token of a parent + * container that this object should be assigned to. The token + * can be '0' so the object will be assigned to the default container. + * The newly created object can be opened with the returned + * object id and using the container's associated tokens and MC portals. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpni_cfg *cfg, + uint32_t *obj_id) +{ + struct dpni_cmd_create *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE, + cmd_flags, + dprc_token); + cmd_params = (struct dpni_cmd_create *)cmd.params; + cmd_params->options = cpu_to_le32(cfg->options); + cmd_params->num_queues = cfg->num_queues; + cmd_params->num_tcs = cfg->num_tcs; + cmd_params->mac_filter_entries = cfg->mac_filter_entries; + cmd_params->vlan_filter_entries = cfg->vlan_filter_entries; + cmd_params->qos_entries = cfg->qos_entries; + cmd_params->fs_entries = cpu_to_le16(cfg->fs_entries); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + *obj_id = mc_cmd_read_object_id(&cmd); + + return 0; +} + +/** + * dpni_destroy() - Destroy the DPNI object and release all its resources. + * @mc_io: Pointer to MC portal's I/O object + * @dprc_token: Parent container token; '0' for default container + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @object_id: The object id; it must be a valid id within the container that + * created this object; + * + * The function accepts the authentication token of the parent container that + * created the object (not the one that currently owns the object). The object + * is searched within parent using the provided 'object_id'. + * All tokens to the object must be closed before calling destroy. + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id) +{ + struct dpni_cmd_destroy *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY, + cmd_flags, + dprc_token); + /* set object id to destroy */ + cmd_params = (struct dpni_cmd_destroy *)cmd.params; + cmd_params->dpsw_id = cpu_to_le32(object_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_pools() - Set buffer pools configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Buffer pools configuration + * + * mandatory for DPNI operation + * warning:Allowed only when DPNI is disabled + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_pools(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_pools_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_pools *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_pools *)cmd.params; + cmd_params->num_dpbp = cfg->num_dpbp; + for (i = 0; i < cmd_params->num_dpbp; i++) { + cmd_params->pool[i].dpbp_id = + cpu_to_le16(cfg->pools[i].dpbp_id); + cmd_params->pool[i].priority_mask = + cfg->pools[i].priority_mask; + cmd_params->buffer_size[i] = + cpu_to_le16(cfg->pools[i].buffer_size); + cmd_params->backup_pool_mask |= + DPNI_BACKUP_POOL(cfg->pools[i].backup_pool, i); + } + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_enable() - Enable the DPNI, allow sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_disable() - Disable the DPNI, stop sending and receiving frames. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_is_enabled() - Check if the DPNI is enabled. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if object is enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_is_enabled *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_is_enabled *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_reset() - Reset the DPNI, returns the object to initial state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_irq_enable() - Set overall interrupt state. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Interrupt state: - enable = 1, disable = 0 + * + * Allows GPP software to control when interrupts are generated. + * Each interrupt can have up to 32 causes. The enable/disable control's the + * overall interrupt state. if the interrupt is disabled no causes will cause + * an interrupt. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_enable *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_enable *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_enable() - Get overall interrupt state + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @en: Returned interrupt state - enable = 1, disable = 0 + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_enable *cmd_params; + struct dpni_rsp_get_irq_enable *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_enable *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_enable *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_irq_mask() - Set interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: Event mask to trigger interrupt; + * each bit: + * 0 = ignore event + * 1 = consider event for asserting IRQ + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t mask) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_irq_mask *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_irq_mask *)cmd.params; + cmd_params->mask = cpu_to_le32(mask); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_irq_mask() - Get interrupt mask. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @mask: Returned event mask to trigger interrupt + * + * Every interrupt can have up to 32 causes and the interrupt model supports + * masking/unmasking each cause independently + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *mask) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_mask *cmd_params; + struct dpni_rsp_get_irq_mask *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_mask *)cmd.params; + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_mask *)cmd.params; + *mask = le32_to_cpu(rsp_params->mask); + + return 0; +} + +/** + * dpni_get_irq_status() - Get the current status of any pending interrupts. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: Returned interrupts status - one bit per cause: + * 0 = no interrupt pending + * 1 = interrupt pending + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *status) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_irq_status *cmd_params; + struct dpni_rsp_get_irq_status *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_irq_status *)cmd.params; + cmd_params->status = cpu_to_le32(*status); + cmd_params->irq_index = irq_index; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_irq_status *)cmd.params; + *status = le32_to_cpu(rsp_params->status); + + return 0; +} + +/** + * dpni_clear_irq_status() - Clear a pending interrupt's status + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @irq_index: The interrupt index to configure + * @status: bits to clear (W1C) - one bit per cause: + * 0 = don't change + * 1 = clear status bit + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t status) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_clear_irq_status *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_irq_status *)cmd.params; + cmd_params->irq_index = irq_index; + cmd_params->status = cpu_to_le32(status); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_attributes() - Retrieve DPNI attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @attr: Object's attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_attr *attr) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_attr *rsp_params; + + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_attr *)cmd.params; + attr->options = le32_to_cpu(rsp_params->options); + attr->num_queues = rsp_params->num_queues; + attr->num_rx_tcs = rsp_params->num_rx_tcs; + attr->num_tx_tcs = rsp_params->num_tx_tcs; + attr->mac_filter_entries = rsp_params->mac_filter_entries; + attr->vlan_filter_entries = rsp_params->vlan_filter_entries; + attr->qos_entries = rsp_params->qos_entries; + attr->fs_entries = le16_to_cpu(rsp_params->fs_entries); + attr->qos_key_size = rsp_params->qos_key_size; + attr->fs_key_size = rsp_params->fs_key_size; + attr->wriop_version = le16_to_cpu(rsp_params->wriop_version); + + return 0; +} + +/** + * dpni_set_errors_behavior() - Set errors behavior + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Errors configuration + * + * This function may be called numerous times with different + * error masks + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_error_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_errors_behavior *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_errors_behavior *)cmd.params; + cmd_params->errors = cpu_to_le32(cfg->errors); + dpni_set_field(cmd_params->flags, ERROR_ACTION, cfg->error_action); + dpni_set_field(cmd_params->flags, FRAME_ANN, cfg->set_frame_annotation); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_buffer_layout() - Retrieve buffer layout attributes. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to retrieve configuration for + * @layout: Returns buffer layout attributes + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_buffer_layout *cmd_params; + struct dpni_rsp_get_buffer_layout *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_buffer_layout *)cmd.params; + layout->pass_timestamp = dpni_get_field(rsp_params->flags, PASS_TS); + layout->pass_parser_result = dpni_get_field(rsp_params->flags, PASS_PR); + layout->pass_frame_status = dpni_get_field(rsp_params->flags, PASS_FS); + layout->private_data_size = le16_to_cpu(rsp_params->private_data_size); + layout->data_align = le16_to_cpu(rsp_params->data_align); + layout->data_head_room = le16_to_cpu(rsp_params->head_room); + layout->data_tail_room = le16_to_cpu(rsp_params->tail_room); + + return 0; +} + +/** + * dpni_set_buffer_layout() - Set buffer layout configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue this configuration applies to + * @layout: Buffer layout configuration + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_buffer_layout *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_buffer_layout *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->options = cpu_to_le16(layout->options); + dpni_set_field(cmd_params->flags, PASS_TS, layout->pass_timestamp); + dpni_set_field(cmd_params->flags, PASS_PR, layout->pass_parser_result); + dpni_set_field(cmd_params->flags, PASS_FS, layout->pass_frame_status); + cmd_params->private_data_size = cpu_to_le16(layout->private_data_size); + cmd_params->data_align = cpu_to_le16(layout->data_align); + cmd_params->head_room = cpu_to_le16(layout->data_head_room); + cmd_params->tail_room = cpu_to_le16(layout->data_tail_room); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_offload() - Set DPNI offload configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @type: Type of DPNI offload + * @config: Offload configuration. + * For checksum offloads, non-zero value enables the offload + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ + +int dpni_set_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t config) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_offload *cmd_params; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_offload *)cmd.params; + cmd_params->dpni_offload = type; + cmd_params->config = cpu_to_le32(config); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_offload() - Get DPNI offload configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @type: Type of DPNI offload + * @config: Offload configuration. + * For checksum offloads, a value of 1 indicates that the + * offload is enabled. + * + * Return: '0' on Success; Error code otherwise. + * + * @warning Allowed only when DPNI is disabled + */ +int dpni_get_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t *config) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_offload *cmd_params; + struct dpni_rsp_get_offload *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_offload *)cmd.params; + cmd_params->dpni_offload = type; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_offload *)cmd.params; + *config = le32_to_cpu(rsp_params->config); + + return 0; +} + +/** + * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used + * for enqueue operations + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue to receive QDID for + * @qdid: Returned virtual QDID value that should be used as an argument + * in all enqueue operations + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_qdid(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint16_t *qdid) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_qdid *cmd_params; + struct dpni_rsp_get_qdid *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_qdid *)cmd.params; + cmd_params->qtype = qtype; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_qdid *)cmd.params; + *qdid = le16_to_cpu(rsp_params->qdid); + + return 0; +} + +/** + * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @data_offset: Tx data offset (from start of buffer) + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *data_offset) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_tx_data_offset *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_tx_data_offset *)cmd.params; + *data_offset = le16_to_cpu(rsp_params->data_offset); + + return 0; +} + +/** + * dpni_set_link_cfg() - set the link configuration. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cfg: Link configuration + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_link_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_link_cfg *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_link_cfg *)cmd.params; + cmd_params->rate = cpu_to_le32(cfg->rate); + cmd_params->options = cpu_to_le64(cfg->options); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_link_state() - Return the link state (either up or down) + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @state: Returned link state; + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_link_state(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_link_state *state) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_link_state *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_link_state *)cmd.params; + state->up = dpni_get_field(rsp_params->flags, LINK_STATE); + state->rate = le32_to_cpu(rsp_params->rate); + state->options = le64_to_cpu(rsp_params->options); + + return 0; +} + +/** + * dpni_set_max_frame_length() - Set the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in bytes); + * frame is discarded if its length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t max_frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_max_frame_length *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_max_frame_length *)cmd.params; + cmd_params->max_frame_length = cpu_to_le16(max_frame_length); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_max_frame_length() - Get the maximum received frame length. + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @max_frame_length: Maximum received frame length (in bytes); + * frame is discarded if its length exceeds this value + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *max_frame_length) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_max_frame_length *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_max_frame_length *)cmd.params; + *max_frame_length = le16_to_cpu(rsp_params->max_frame_length); + + return 0; +} + +/** + * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_multicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_multicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_multicast_promisc() - Get multicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_multicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_multicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_unicast_promisc *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_unicast_promisc *)cmd.params; + dpni_set_field(cmd_params->enable, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_unicast_promisc() - Get unicast promiscuous mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Returns '1' if enabled; '0' otherwise + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_unicast_promisc *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_unicast_promisc *)cmd.params; + *en = dpni_get_field(rsp_params->enabled, ENABLE); + + return 0; +} + +/** + * dpni_set_primary_mac_addr() - Set the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to set as primary address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_primary_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_primary_mac_addr() - Get the primary MAC address + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: Returned MAC address + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_primary_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_primary_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_add_mac_addr() - Add MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to add + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_add_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_add_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_mac_addr() - Remove MAC address filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_remove_mac_addr *cmd_params; + int i; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_remove_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + cmd_params->mac_addr[i] = mac_addr[5 - i]; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @unicast: Set to '1' to clear unicast addresses + * @multicast: Set to '1' to clear multicast addresses + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int unicast, + int multicast) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_clear_mac_filters *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_clear_mac_filters *)cmd.params; + dpni_set_field(cmd_params->flags, UNICAST_FILTERS, unicast); + dpni_set_field(cmd_params->flags, MULTICAST_FILTERS, multicast); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical + * port the DPNI is attached to + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mac_addr: MAC address of the physical port, if any, otherwise 0 + * + * The primary MAC address is not cleared by this operation. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]) +{ + struct mc_command cmd = { 0 }; + struct dpni_rsp_get_port_mac_addr *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PORT_MAC_ADDR, + cmd_flags, + token); + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_port_mac_addr *)cmd.params; + for (i = 0; i < 6; i++) + mac_addr[5 - i] = rsp_params->mac_addr[i]; + + return 0; +} + +/** + * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @en: Set to '1' to enable; '0' to disable + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en) +{ + struct dpni_cmd_enable_vlan_filter *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params; + dpni_set_field(cmd_params->en, ENABLE, en); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_add_vlan_id() - Add VLAN ID filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @vlan_id: VLAN ID to add + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_add_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id) +{ + struct dpni_cmd_vlan_id *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_vlan_id *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_remove_vlan_id() - Remove VLAN ID filter + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @vlan_id: VLAN ID to remove + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id) +{ + struct dpni_cmd_vlan_id *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_vlan_id *)cmd.params; + cmd_params->vlan_id = cpu_to_le16(vlan_id); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_clear_vlan_filters() - Clear all VLAN filters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @tc_id: Traffic class selection (0-7) + * @cfg: Traffic class distribution configuration + * + * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpkg_prepare_key_cfg() + * first to prepare the key_cfg_iova parameter + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + const struct dpni_rx_tc_dist_cfg *cfg) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_rx_tc_dist *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_rx_tc_dist *)cmd.params; + cmd_params->dist_size = cpu_to_le16(cfg->dist_size); + cmd_params->tc_id = tc_id; + cmd_params->default_flow_id = cpu_to_le16(cfg->fs_cfg.default_flow_id); + cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova); + dpni_set_field(cmd_params->flags, + DIST_MODE, + cfg->dist_mode); + dpni_set_field(cmd_params->flags, + MISS_ACTION, + cfg->fs_cfg.miss_action); + dpni_set_field(cmd_params->keep_hash_key, + KEEP_HASH_KEY, + cfg->fs_cfg.keep_hash_key); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_tx_confirmation_mode() - Tx confirmation mode + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @mode: Tx confirmation mode + * + * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not + * selected at DPNI creation. + * Calling this function with 'mode' set to DPNI_CONF_DISABLE disables all + * transmit confirmation (including the private confirmation queues), regardless + * of previous settings; Note that in this case, Tx error frames are still + * enqueued to the general transmit errors queue. + * Calling this function with 'mode' set to DPNI_CONF_SINGLE switches all + * Tx confirmations to a shared Tx conf queue. 'index' field in dpni_get_queue + * command will be ignored. + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_confirmation_mode mode) +{ + struct dpni_tx_confirmation_mode *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONFIRMATION_MODE, + cmd_flags, + token); + cmd_params = (struct dpni_tx_confirmation_mode *)cmd.params; + cmd_params->confirmation_mode = mode; + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_congestion_notification() - Set traffic class congestion + * notification configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported + * @tc_id: Traffic class selection (0-7) + * @cfg: congestion notification configuration + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + const struct dpni_congestion_notification_cfg *cfg) +{ + struct dpni_cmd_set_congestion_notification *cmd_params; + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header( + DPNI_CMDID_SET_CONGESTION_NOTIFICATION, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc_id; + cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); + cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode); + cmd_params->dest_priority = cfg->dest_cfg.priority; + cmd_params->message_iova = cpu_to_le64(cfg->message_iova); + cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx); + cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry); + cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit); + dpni_set_field(cmd_params->type_units, + DEST_TYPE, + cfg->dest_cfg.dest_type); + dpni_set_field(cmd_params->type_units, + CONG_UNITS, + cfg->units); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_congestion_notification() - Get traffic class congestion + * notification configuration + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - Rx, Tx and Tx confirm types are supported + * @tc_id: Traffic class selection (0-7) + * @cfg: congestion notification configuration + * + * Return: '0' on Success; error code otherwise. + */ +int dpni_get_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + struct dpni_congestion_notification_cfg *cfg) +{ + struct dpni_rsp_get_congestion_notification *rsp_params; + struct dpni_cmd_get_congestion_notification *cmd_params; + struct mc_command cmd = { 0 }; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header( + DPNI_CMDID_GET_CONGESTION_NOTIFICATION, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_congestion_notification *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc_id; + + /* send command to mc*/ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpni_rsp_get_congestion_notification *)cmd.params; + cfg->units = dpni_get_field(rsp_params->type_units, CONG_UNITS); + cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry); + cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit); + cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx); + cfg->message_iova = le64_to_cpu(rsp_params->message_iova); + cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode); + cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); + cfg->dest_cfg.priority = rsp_params->dest_priority; + cfg->dest_cfg.dest_type = dpni_get_field(rsp_params->type_units, + DEST_TYPE); + + return 0; +} + +/** + * dpni_get_api_version() - Get Data Path Network Interface API version + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @major_ver: Major version of data path network interface API + * @minor_ver: Minor version of data path network interface API + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver) +{ + struct dpni_rsp_get_api_version *rsp_params; + struct mc_command cmd = { 0 }; + int err; + + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION, + cmd_flags, + 0); + + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + rsp_params = (struct dpni_rsp_get_api_version *)cmd.params; + *major_ver = le16_to_cpu(rsp_params->major); + *minor_ver = le16_to_cpu(rsp_params->minor); + + return 0; +} + +/** + * dpni_set_queue() - Set queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported, although + * the command is ignored for Tx + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @options: A combination of DPNI_QUEUE_OPT_ values that control what + * configuration options are set on the queue + * @queue: Queue structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + uint8_t options, + const struct dpni_queue *queue) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_queue *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + cmd_params->options = options; + cmd_params->dest_id = cpu_to_le32(queue->destination.id); + cmd_params->dest_prio = queue->destination.priority; + dpni_set_field(cmd_params->flags, DEST_TYPE, queue->destination.type); + dpni_set_field(cmd_params->flags, STASH_CTRL, queue->flc.stash_control); + dpni_set_field(cmd_params->flags, HOLD_ACTIVE, + queue->destination.hold_active); + cmd_params->flc = cpu_to_le64(queue->flc.value); + cmd_params->user_context = cpu_to_le64(queue->user_context); + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_queue() - Get queue parameters + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @qtype: Type of queue - all queue types are supported + * @tc: Traffic class, in range 0 to NUM_TCS - 1 + * @index: Selects the specific queue out of the set allocated for the + * same TC. Value must be in range 0 to NUM_QUEUES - 1 + * @queue: Queue configuration structure + * @qid: Queue identification + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_queue *queue, + struct dpni_queue_id *qid) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_queue *cmd_params; + struct dpni_rsp_get_queue *rsp_params; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_queue *)cmd.params; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_queue *)cmd.params; + queue->destination.id = le32_to_cpu(rsp_params->dest_id); + queue->destination.priority = rsp_params->dest_prio; + queue->destination.type = dpni_get_field(rsp_params->flags, + DEST_TYPE); + queue->flc.stash_control = dpni_get_field(rsp_params->flags, + STASH_CTRL); + queue->destination.hold_active = dpni_get_field(rsp_params->flags, + HOLD_ACTIVE); + queue->flc.value = le64_to_cpu(rsp_params->flc); + queue->user_context = le64_to_cpu(rsp_params->user_context); + qid->fqid = le32_to_cpu(rsp_params->fqid); + qid->qdbin = le16_to_cpu(rsp_params->qdbin); + + return 0; +} + +/** + * dpni_get_statistics() - Get DPNI statistics + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @page: Selects the statistics page to retrieve, see + * DPNI_GET_STATISTICS output. Pages are numbered 0 to 2. + * @param: Custom parameter for some pages used to select + * a certain statistic source, for example the TC. + * @stat: Structure containing the statistics + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t page, + uint8_t param, + union dpni_statistics *stat) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_statistics *cmd_params; + struct dpni_rsp_get_statistics *rsp_params; + int i, err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_statistics *)cmd.params; + cmd_params->page_number = page; + cmd_params->param = param; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_statistics *)cmd.params; + for (i = 0; i < DPNI_STATISTICS_CNT; i++) + stat->raw.counter[i] = le64_to_cpu(rsp_params->counter[i]); + + return 0; +} + +/** + * dpni_reset_statistics() - Clears DPNI statistics + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_reset_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token) +{ + struct mc_command cmd = { 0 }; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS, + cmd_flags, + token); + + /* send command to mc*/ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_set_taildrop() - Set taildrop per queue or TC + * + * Setting a per-TC taildrop (cg_point = DPNI_CP_GROUP) will reset any current + * congestion notification or early drop (WRED) configuration previously applied + * to the same TC. + * + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point, DPNI_CP_QUEUE is only supported in + * combination with DPNI_QUEUE_RX. + * @q_type: Queue type, can be DPNI_QUEUE_RX or DPNI_QUEUE_TX. + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. + * Ignored if CONGESTION_POINT is not DPNI_CP_QUEUE. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_taildrop *taildrop) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_set_taildrop *cmd_params; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_set_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + cmd_params->units = taildrop->units; + cmd_params->threshold = cpu_to_le32(taildrop->threshold); + dpni_set_field(cmd_params->enable_oal_lo, ENABLE, taildrop->enable); + dpni_set_field(cmd_params->enable_oal_lo, OAL_LO, taildrop->oal); + dpni_set_field(cmd_params->oal_hi, + OAL_HI, + taildrop->oal >> DPNI_OAL_LO_SIZE); + + /* send command to mc */ + return mc_send_command(mc_io, &cmd); +} + +/** + * dpni_get_taildrop() - Get taildrop information + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPNI object + * @cg_point: Congestion point + * @q_type: Queue type on which the taildrop is configured. + * Only Rx queues are supported for now + * @tc: Traffic class to apply this taildrop to + * @q_index: Index of the queue if the DPNI supports multiple queues for + * traffic distribution. Ignored if CONGESTION_POINT is not 0. + * @taildrop: Taildrop structure + * + * Return: '0' on Success; Error code otherwise. + */ +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_taildrop *taildrop) +{ + struct mc_command cmd = { 0 }; + struct dpni_cmd_get_taildrop *cmd_params; + struct dpni_rsp_get_taildrop *rsp_params; + uint8_t oal_lo, oal_hi; + int err; + + /* prepare command */ + cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TAILDROP, + cmd_flags, + token); + cmd_params = (struct dpni_cmd_get_taildrop *)cmd.params; + cmd_params->congestion_point = cg_point; + cmd_params->qtype = qtype; + cmd_params->tc = tc; + cmd_params->index = index; + + /* send command to mc */ + err = mc_send_command(mc_io, &cmd); + if (err) + return err; + + /* retrieve response parameters */ + rsp_params = (struct dpni_rsp_get_taildrop *)cmd.params; + taildrop->enable = dpni_get_field(rsp_params->enable_oal_lo, ENABLE); + taildrop->units = rsp_params->units; + taildrop->threshold = le32_to_cpu(rsp_params->threshold); + oal_lo = dpni_get_field(rsp_params->enable_oal_lo, OAL_LO); + oal_hi = dpni_get_field(rsp_params->oal_hi, OAL_HI); + taildrop->oal = oal_hi << DPNI_OAL_LO_SIZE | oal_lo; + + /* Fill the first 4 bits, 'oal' is a 2's complement value of 12 bits */ + if (taildrop->oal >= 0x0800) + taildrop->oal |= 0xF000; + + return 0; +} diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h new file mode 100644 index 00000000..4de70f30 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpkg.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright 2013-2015 Freescale Semiconductor Inc. + * Copyright 2016-2017 NXP + * + */ +#ifndef __FSL_DPKG_H_ +#define __FSL_DPKG_H_ + +#include <fsl_net.h> + +/* Data Path Key Generator API + * Contains initialization APIs and runtime APIs for the Key Generator + */ + +/** Key Generator properties */ + +/** + * Number of masks per key extraction + */ +#define DPKG_NUM_OF_MASKS 4 +/** + * Number of extractions per key profile + */ +#define DPKG_MAX_NUM_OF_EXTRACTS 10 + +/** + * enum dpkg_extract_from_hdr_type - Selecting extraction by header types + * @DPKG_FROM_HDR: Extract selected bytes from header, by offset + * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field + * @DPKG_FULL_FIELD: Extract a full field + */ +enum dpkg_extract_from_hdr_type { + DPKG_FROM_HDR = 0, + DPKG_FROM_FIELD = 1, + DPKG_FULL_FIELD = 2 +}; + +/** + * enum dpkg_extract_type - Enumeration for selecting extraction type + * @DPKG_EXTRACT_FROM_HDR: Extract from the header + * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header + * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; + * e.g. can be used to extract header existence; + * please refer to 'Parse Result definition' section in the parser BG + */ +enum dpkg_extract_type { + DPKG_EXTRACT_FROM_HDR = 0, + DPKG_EXTRACT_FROM_DATA = 1, + DPKG_EXTRACT_FROM_PARSE = 3 +}; + +/** + * struct dpkg_mask - A structure for defining a single extraction mask + * @mask: Byte mask for the extracted content + * @offset: Offset within the extracted content + */ +struct dpkg_mask { + uint8_t mask; + uint8_t offset; +}; + +/* Macros for accessing command fields smaller than 1byte */ +#define DPKG_MASK(field) \ + GENMASK(DPKG_##field##_SHIFT + DPKG_##field##_SIZE - 1, \ + DPKG_##field##_SHIFT) +#define dpkg_set_field(var, field, val) \ + ((var) |= (((val) << DPKG_##field##_SHIFT) & DPKG_MASK(field))) +#define dpkg_get_field(var, field) \ + (((var) & DPKG_MASK(field)) >> DPKG_##field##_SHIFT) + +/** + * struct dpkg_extract - A structure for defining a single extraction + * @type: Determines how the union below is interpreted: + * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; + * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; + * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' + * @extract: Selects extraction method + * @num_of_byte_masks: Defines the number of valid entries in the array below; + * This is also the number of bytes to be used as masks + * @masks: Masks parameters + */ +struct dpkg_extract { + enum dpkg_extract_type type; + /** + * union extract - Selects extraction method + * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' + * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' + * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' + */ + union { + /** + * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' + * @prot: Any of the supported headers + * @type: Defines the type of header extraction: + * DPKG_FROM_HDR: use size & offset below; + * DPKG_FROM_FIELD: use field, size and offset below; + * DPKG_FULL_FIELD: use field below + * @field: One of the supported fields (NH_FLD_) + * + * @size: Size in bytes + * @offset: Byte offset + * @hdr_index: Clear for cases not listed below; + * Used for protocols that may have more than a single + * header, 0 indicates an outer header; + * Supported protocols (possible values): + * NET_PROT_VLAN (0, HDR_INDEX_LAST); + * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); + * NET_PROT_IP(0, HDR_INDEX_LAST); + * NET_PROT_IPv4(0, HDR_INDEX_LAST); + * NET_PROT_IPv6(0, HDR_INDEX_LAST); + */ + + struct { + enum net_prot prot; + enum dpkg_extract_from_hdr_type type; + uint32_t field; + uint8_t size; + uint8_t offset; + uint8_t hdr_index; + } from_hdr; + /** + * struct from_data + * Used when 'type = DPKG_EXTRACT_FROM_DATA' + * @size: Size in bytes + * @offset: Byte offset + */ + struct { + uint8_t size; + uint8_t offset; + } from_data; + + /** + * struct from_parse + * Used when 'type = DPKG_EXTRACT_FROM_PARSE' + * @size: Size in bytes + * @offset: Byte offset + */ + struct { + uint8_t size; + uint8_t offset; + } from_parse; + } extract; + + uint8_t num_of_byte_masks; + struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; +}; + +/** + * struct dpkg_profile_cfg - A structure for defining a full Key Generation + * profile (rule) + * @num_extracts: Defines the number of valid entries in the array below + * @extracts: Array of required extractions + */ +struct dpkg_profile_cfg { + uint8_t num_extracts; + struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; +}; + +/* dpni_set_rx_tc_dist extension (structure of the DMA-able memory at + * key_cfg_iova) + */ +struct dpni_mask_cfg { + uint8_t mask; + uint8_t offset; +}; + +#define DPKG_EFH_TYPE_SHIFT 0 +#define DPKG_EFH_TYPE_SIZE 4 +#define DPKG_EXTRACT_TYPE_SHIFT 0 +#define DPKG_EXTRACT_TYPE_SIZE 4 + +struct dpni_dist_extract { + /* word 0 */ + uint8_t prot; + /* EFH type stored in the 4 least significant bits */ + uint8_t efh_type; + uint8_t size; + uint8_t offset; + uint32_t field; + /* word 1 */ + uint8_t hdr_index; + uint8_t constant; + uint8_t num_of_repeats; + uint8_t num_of_byte_masks; + /* Extraction type is stored in the 4 LSBs */ + uint8_t extract_type; + uint8_t pad[3]; + /* word 2 */ + struct dpni_mask_cfg masks[4]; +}; + +struct dpni_ext_set_rx_tc_dist { + /* extension word 0 */ + uint8_t num_extracts; + uint8_t pad[7]; + /* words 1..25 */ + struct dpni_dist_extract extracts[10]; +}; + +int dpkg_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, + uint8_t *key_cfg_buf); + +#endif /* __FSL_DPKG_H_ */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h new file mode 100644 index 00000000..f0edcd27 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni.h @@ -0,0 +1,1135 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2017 NXP + * + */ +#ifndef __FSL_DPNI_H +#define __FSL_DPNI_H + +#include <fsl_dpkg.h> + +struct fsl_mc_io; + +/** + * Data Path Network Interface API + * Contains initialization APIs and runtime control APIs for DPNI + */ + +/** General DPNI macros */ + +/** + * Maximum number of traffic classes + */ +#define DPNI_MAX_TC 8 +/** + * Maximum number of buffer pools per DPNI + */ +#define DPNI_MAX_DPBP 8 +/** + * Maximum number of storage-profiles per DPNI + */ +#define DPNI_MAX_SP 2 + +/** + * All traffic classes considered; see dpni_set_queue() + */ +#define DPNI_ALL_TCS (uint8_t)(-1) +/** + * All flows within traffic class considered; see dpni_set_queue() + */ +#define DPNI_ALL_TC_FLOWS (uint16_t)(-1) + +/** + * Tx traffic is always released to a buffer pool on transmit, there are no + * resources allocated to have the frames confirmed back to the source after + * transmission. + */ +#define DPNI_OPT_TX_FRM_RELEASE 0x000001 +/** + * Disables support for MAC address filtering for addresses other than primary + * MAC address. This affects both unicast and multicast. Promiscuous mode can + * still be enabled/disabled for both unicast and multicast. If promiscuous mode + * is disabled, only traffic matching the primary MAC address will be accepted. + */ +#define DPNI_OPT_NO_MAC_FILTER 0x000002 +/** + * Allocate policers for this DPNI. They can be used to rate-limit traffic per + * traffic class (TC) basis. + */ +#define DPNI_OPT_HAS_POLICING 0x000004 +/** + * Congestion can be managed in several ways, allowing the buffer pool to + * deplete on ingress, taildrop on each queue or use congestion groups for sets + * of queues. If set, it configures a single congestion groups across all TCs. + * If reset, a congestion group is allocated for each TC. Only relevant if the + * DPNI has multiple traffic classes. + */ +#define DPNI_OPT_SHARED_CONGESTION 0x000008 +/** + * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all + * look-ups are exact match. Note that TCAM is not available on LS1088 and its + * variants. Setting this bit on these SoCs will trigger an error. + */ +#define DPNI_OPT_HAS_KEY_MASKING 0x000010 +/** + * Disables the flow steering table. + */ +#define DPNI_OPT_NO_FS 0x000020 + +int dpni_open(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + int dpni_id, + uint16_t *token); + +int dpni_close(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * struct dpni_cfg - Structure representing DPNI configuration + * @mac_addr: Primary MAC address + * @adv: Advanced parameters; default is all zeros; + * use this structure to change default settings + */ +struct dpni_cfg { + /** + * @options: Any combination of the following options: + * DPNI_OPT_TX_FRM_RELEASE + * DPNI_OPT_NO_MAC_FILTER + * DPNI_OPT_HAS_POLICING + * DPNI_OPT_SHARED_CONGESTION + * DPNI_OPT_HAS_KEY_MASKING + * DPNI_OPT_NO_FS + * @fs_entries: Number of entries in the flow steering table. + * This table is used to select the ingress queue for + * ingress traffic, targeting a GPP core or another. + * In addition it can be used to discard traffic that + * matches the set rule. It is either an exact match table + * or a TCAM table, depending on DPNI_OPT_ HAS_KEY_MASKING + * bit in OPTIONS field. This field is ignored if + * DPNI_OPT_NO_FS bit is set in OPTIONS field. Otherwise, + * value 0 defaults to 64. Maximum supported value is 1024. + * Note that the total number of entries is limited on the + * SoC to as low as 512 entries if TCAM is used. + * @vlan_filter_entries: Number of entries in the VLAN address filtering + * table. This is an exact match table used to filter + * ingress traffic based on VLAN IDs. Value 0 disables VLAN + * filtering. Maximum supported value is 16. + * @mac_filter_entries: Number of entries in the MAC address filtering + * table. This is an exact match table and allows both + * unicast and multicast entries. The primary MAC address + * of the network interface is not part of this table, + * this contains only entries in addition to it. This + * field is ignored if DPNI_OPT_ NO_MAC_FILTER is set in + * OPTIONS field. Otherwise, value 0 defaults to 80. + * Maximum supported value is 80. + * @num_queues: Number of Tx and Rx queues used for traffic + * distribution. This is orthogonal to QoS and is only + * used to distribute traffic to multiple GPP cores. + * This configuration affects the number of Tx queues + * (logical FQs, all associated with a single CEETM queue), + * Rx queues and Tx confirmation queues, if applicable. + * Value 0 defaults to one queue. Maximum supported value + * is 8. + * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI. + * TCs can have different priority levels for the purpose + * of Tx scheduling (see DPNI_SET_TX_SELECTION), different + * BPs (DPNI_ SET_POOLS), policers. There are dedicated QM + * queues for traffic classes (including class queues on + * Tx). Value 0 defaults to one TC. Maximum supported value + * is 8. + * @qos_entries: Number of entries in the QoS classification table. This + * table is used to select the TC for ingress traffic. It + * is either an exact match or a TCAM table, depending on + * DPNI_OPT_ HAS_KEY_MASKING bit in OPTIONS field. This + * field is ignored if the DPNI has a single TC. Otherwise, + * a value of 0 defaults to 64. Maximum supported value + * is 64. + */ + uint32_t options; + uint16_t fs_entries; + uint8_t vlan_filter_entries; + uint8_t mac_filter_entries; + uint8_t num_queues; + uint8_t num_tcs; + uint8_t qos_entries; +}; + +int dpni_create(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + const struct dpni_cfg *cfg, + uint32_t *obj_id); + +int dpni_destroy(struct fsl_mc_io *mc_io, + uint16_t dprc_token, + uint32_t cmd_flags, + uint32_t object_id); + +/** + * struct dpni_pools_cfg - Structure representing buffer pools configuration + * @num_dpbp: Number of DPBPs + * @pools: Array of buffer pools parameters; The number of valid entries + * must match 'num_dpbp' value + */ +struct dpni_pools_cfg { + uint8_t num_dpbp; + /** + * struct pools - Buffer pools parameters + * @dpbp_id: DPBP object ID + * @priority: priority mask that indicates TC's used with this buffer. + * I set to 0x00 MC will assume value 0xff. + * @buffer_size: Buffer size + * @backup_pool: Backup pool + */ + struct { + int dpbp_id; + uint8_t priority_mask; + uint16_t buffer_size; + int backup_pool; + } pools[DPNI_MAX_DPBP]; +}; + +int dpni_set_pools(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_pools_cfg *cfg); + +int dpni_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpni_disable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +int dpni_is_enabled(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpni_reset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * DPNI IRQ Index and Events + */ + +/** + * IRQ index + */ +#define DPNI_IRQ_INDEX 0 +/** + * IRQ event - indicates a change in link state + */ +#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 + +int dpni_set_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t en); + +int dpni_get_irq_enable(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint8_t *en); + +int dpni_set_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t mask); + +int dpni_get_irq_mask(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *mask); + +int dpni_get_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t *status); + +int dpni_clear_irq_status(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t irq_index, + uint32_t status); + +/** + * struct dpni_attr - Structure representing DPNI attributes + * @options: Any combination of the following options: + * DPNI_OPT_TX_FRM_RELEASE + * DPNI_OPT_NO_MAC_FILTER + * DPNI_OPT_HAS_POLICING + * DPNI_OPT_SHARED_CONGESTION + * DPNI_OPT_HAS_KEY_MASKING + * DPNI_OPT_NO_FS + * @num_queues: Number of Tx and Rx queues used for traffic distribution. + * @num_rx_tcs: Number of RX traffic classes (TCs), reserved for the DPNI. + * @num_tx_tcs: Number of TX traffic classes (TCs), reserved for the DPNI. + * @mac_filter_entries: Number of entries in the MAC address filtering + * table. + * @vlan_filter_entries: Number of entries in the VLAN address filtering + * table. + * @qos_entries: Number of entries in the QoS classification table. + * @fs_entries: Number of entries in the flow steering table. + * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger + * than this when adding QoS entries will result + * in an error. + * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a + * key larger than this when composing the hash + FS key + * will result in an error. + * @wriop_version: Version of WRIOP HW block. + * The 3 version values are stored on 6, 5, 5 bits + * respectively. + * Values returned: + * - 0x400 - WRIOP version 1.0.0, used on LS2080 and + * variants, + * - 0x421 - WRIOP version 1.1.1, used on LS2088 and + * variants, + * - 0x422 - WRIOP version 1.1.2, used on LS1088 and + * variants. + */ +struct dpni_attr { + uint32_t options; + uint8_t num_queues; + uint8_t num_rx_tcs; + uint8_t num_tx_tcs; + uint8_t mac_filter_entries; + uint8_t vlan_filter_entries; + uint8_t qos_entries; + uint16_t fs_entries; + uint8_t qos_key_size; + uint8_t fs_key_size; + uint16_t wriop_version; +}; + +int dpni_get_attributes(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_attr *attr); + +/** + * DPNI errors + */ + +/** + * Extract out of frame header error + */ +#define DPNI_ERROR_EOFHE 0x00020000 +/** + * Frame length error + */ +#define DPNI_ERROR_FLE 0x00002000 +/** + * Frame physical error + */ +#define DPNI_ERROR_FPE 0x00001000 +/** + * Parsing header error + */ +#define DPNI_ERROR_PHE 0x00000020 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L3CE 0x00000004 +/** + * Parser L3 checksum error + */ +#define DPNI_ERROR_L4CE 0x00000001 + +/** + * enum dpni_error_action - Defines DPNI behavior for errors + * @DPNI_ERROR_ACTION_DISCARD: Discard the frame + * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow + * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue + */ +enum dpni_error_action { + DPNI_ERROR_ACTION_DISCARD = 0, + DPNI_ERROR_ACTION_CONTINUE = 1, + DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 +}; + +/** + * struct dpni_error_cfg - Structure representing DPNI errors treatment + * @errors: Errors mask; use 'DPNI_ERROR__<X> + * @error_action: The desired action for the errors mask + * @set_frame_annotation: Set to '1' to mark the errors in frame + * annotation status (FAS); relevant only + * for the non-discard action + */ +struct dpni_error_cfg { + uint32_t errors; + enum dpni_error_action error_action; + int set_frame_annotation; +}; + +int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_error_cfg *cfg); + +/** + * DPNI buffer layout modification options + */ + +/** + * Select to modify the time-stamp setting + */ +#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 +/** + * Select to modify the parser-result setting; not applicable for Tx + */ +#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 +/** + * Select to modify the frame-status setting + */ +#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 +/** + * Select to modify the private-data-size setting + */ +#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 +/** + * Select to modify the data-alignment setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 +/** + * Select to modify the data-head-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 +/** + * Select to modify the data-tail-room setting + */ +#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 + +/** + * struct dpni_buffer_layout - Structure representing DPNI buffer layout + * @options: Flags representing the suggested modifications to the + * buffer layout; + * Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags + * @pass_timestamp: Pass timestamp value + * @pass_parser_result: Pass parser results + * @pass_frame_status: Pass frame status + * @private_data_size: Size kept for private data (in bytes) + * @data_align: Data alignment + * @data_head_room: Data head room + * @data_tail_room: Data tail room + */ +struct dpni_buffer_layout { + uint32_t options; + int pass_timestamp; + int pass_parser_result; + int pass_frame_status; + uint16_t private_data_size; + uint16_t data_align; + uint16_t data_head_room; + uint16_t data_tail_room; +}; + +/** + * enum dpni_queue_type - Identifies a type of queue targeted by the command + * @DPNI_QUEUE_RX: Rx queue + * @DPNI_QUEUE_TX: Tx queue + * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue + * @DPNI_QUEUE_RX_ERR: Rx error queue + */ +enum dpni_queue_type { + DPNI_QUEUE_RX, + DPNI_QUEUE_TX, + DPNI_QUEUE_TX_CONFIRM, + DPNI_QUEUE_RX_ERR, +}; + +int dpni_get_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + struct dpni_buffer_layout *layout); + +int dpni_set_buffer_layout(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + const struct dpni_buffer_layout *layout); + +/** + * enum dpni_offload - Identifies a type of offload targeted by the command + * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation + * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation + * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation + * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation + * @DPNI_OPT_FLCTYPE_HASH: flow context will be generated by WRIOP for AIOP or + * for CPU + */ +enum dpni_offload { + DPNI_OFF_RX_L3_CSUM, + DPNI_OFF_RX_L4_CSUM, + DPNI_OFF_TX_L3_CSUM, + DPNI_OFF_TX_L4_CSUM, + DPNI_FLCTYPE_HASH, +}; + +int dpni_set_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t config); + +int dpni_get_offload(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_offload type, + uint32_t *config); + +int dpni_get_qdid(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint16_t *qdid); + +int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *data_offset); + +#define DPNI_STATISTICS_CNT 7 + +union dpni_statistics { + /** + * struct page_0 - Page_0 statistics structure + * @ingress_all_frames: Ingress frame count + * @ingress_all_bytes: Ingress byte count + * @ingress_multicast_frames: Ingress multicast frame count + * @ingress_multicast_bytes: Ingress multicast byte count + * @ingress_broadcast_frames: Ingress broadcast frame count + * @ingress_broadcast_bytes: Ingress broadcast byte count + */ + struct { + uint64_t ingress_all_frames; + uint64_t ingress_all_bytes; + uint64_t ingress_multicast_frames; + uint64_t ingress_multicast_bytes; + uint64_t ingress_broadcast_frames; + uint64_t ingress_broadcast_bytes; + } page_0; + /** + * struct page_1 - Page_1 statistics structure + * @egress_all_frames: Egress frame count + * @egress_all_bytes: Egress byte count + * @egress_multicast_frames: Egress multicast frame count + * @egress_multicast_bytes: Egress multicast byte count + * @egress_broadcast_frames: Egress broadcast frame count + * @egress_broadcast_bytes: Egress broadcast byte count + */ + struct { + uint64_t egress_all_frames; + uint64_t egress_all_bytes; + uint64_t egress_multicast_frames; + uint64_t egress_multicast_bytes; + uint64_t egress_broadcast_frames; + uint64_t egress_broadcast_bytes; + } page_1; + /** + * struct page_2 - Page_2 statistics structure + * @ingress_filtered_frames: Ingress filtered frame count + * @ingress_discarded_frames: Ingress discarded frame count + * @ingress_nobuffer_discards: Ingress discarded frame count due to + * lack of buffers + * @egress_discarded_frames: Egress discarded frame count + * @egress_confirmed_frames: Egress confirmed frame count + */ + struct { + uint64_t ingress_filtered_frames; + uint64_t ingress_discarded_frames; + uint64_t ingress_nobuffer_discards; + uint64_t egress_discarded_frames; + uint64_t egress_confirmed_frames; + } page_2; + /** + * struct page_3 - Page_3 statistics structure with values for the + * selected TC + * @ceetm_dequeue_bytes: Cumulative count of the number of bytes + * dequeued + * @ceetm_dequeue_frames: Cumulative count of the number of frames + * dequeued + * @ceetm_reject_bytes: Cumulative count of the number of bytes in all + * frames whose enqueue was rejected + * @ceetm_reject_frames: Cumulative count of all frame enqueues rejected + */ + struct { + uint64_t ceetm_dequeue_bytes; + uint64_t ceetm_dequeue_frames; + uint64_t ceetm_reject_bytes; + uint64_t ceetm_reject_frames; + } page_3; + /** + * struct raw - raw statistics structure, used to index counters + */ + struct { + uint64_t counter[DPNI_STATISTICS_CNT]; + } raw; +}; + +/** + * Enable auto-negotiation + */ +#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL +/** + * Enable half-duplex mode + */ +#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL +/** + * Enable pause frames + */ +#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL +/** + * Enable a-symmetric pause frames + */ +#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL +/** + * Enable priority flow control pause frames + */ +#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL + +/** + * struct - Structure representing DPNI link configuration + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values + */ +struct dpni_link_cfg { + uint32_t rate; + uint64_t options; +}; + +int dpni_set_link_cfg(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const struct dpni_link_cfg *cfg); + +/** + * struct dpni_link_state - Structure representing DPNI link state + * @rate: Rate + * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values + * @up: Link state; '0' for down, '1' for up + */ +struct dpni_link_state { + uint32_t rate; + uint64_t options; + int up; +}; + +int dpni_get_link_state(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + struct dpni_link_state *state); + +int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t max_frame_length); + +int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *max_frame_length); + +int dpni_set_mtu(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t mtu); + +int dpni_get_mtu(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t *mtu); + +int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en); + +int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en); + +int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int *en); + +int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]); + +int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]); + +int dpni_add_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]); + +int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + const uint8_t mac_addr[6]); + +int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int unicast, + int multicast); + +int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t mac_addr[6]); + +int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + int en); + +int dpni_add_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id); + +int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint16_t vlan_id); + +int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * enum dpni_dist_mode - DPNI distribution mode + * @DPNI_DIST_MODE_NONE: No distribution + * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if + * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation + * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if + * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation + */ +enum dpni_dist_mode { + DPNI_DIST_MODE_NONE = 0, + DPNI_DIST_MODE_HASH = 1, + DPNI_DIST_MODE_FS = 2 +}; + +/** + * enum dpni_fs_miss_action - DPNI Flow Steering miss action + * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame + * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id + * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash + */ +enum dpni_fs_miss_action { + DPNI_FS_MISS_DROP = 0, + DPNI_FS_MISS_EXPLICIT_FLOWID = 1, + DPNI_FS_MISS_HASH = 2 +}; + +/** + * struct dpni_fs_tbl_cfg - Flow Steering table configuration + * @miss_action: Miss action selection + * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' + */ +struct dpni_fs_tbl_cfg { + enum dpni_fs_miss_action miss_action; + uint16_t default_flow_id; + char keep_hash_key; +}; + +/** + * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration + * @dist_size: Set the distribution size; + * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, + * 112,128,192,224,256,384,448,512,768,896,1024 + * @dist_mode: Distribution mode + * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with + * the extractions to be used for the distribution key by calling + * dpkg_prepare_key_cfg() relevant only when + * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' + * @fs_cfg: Flow Steering table configuration; only relevant if + * 'dist_mode = DPNI_DIST_MODE_FS' + */ +struct dpni_rx_tc_dist_cfg { + uint16_t dist_size; + enum dpni_dist_mode dist_mode; + uint64_t key_cfg_iova; + struct dpni_fs_tbl_cfg fs_cfg; +}; + +int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t tc_id, + const struct dpni_rx_tc_dist_cfg *cfg); + +/** + * enum dpni_congestion_unit - DPNI congestion units + * @DPNI_CONGESTION_UNIT_BYTES: bytes units + * @DPNI_CONGESTION_UNIT_FRAMES: frames units + */ +enum dpni_congestion_unit { + DPNI_CONGESTION_UNIT_BYTES = 0, + DPNI_CONGESTION_UNIT_FRAMES +}; + + +/** + * enum dpni_dest - DPNI destination types + * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and + * does not generate FQDAN notifications; user is expected to + * dequeue from the queue based on polling or other user-defined + * method + * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN + * notifications to the specified DPIO; user is expected to dequeue + * from the queue only after notification is received + * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate + * FQDAN notifications, but is connected to the specified DPCON + * object; user is expected to dequeue from the DPCON channel + */ +enum dpni_dest { + DPNI_DEST_NONE = 0, + DPNI_DEST_DPIO = 1, + DPNI_DEST_DPCON = 2 +}; + +/** + * struct dpni_dest_cfg - Structure representing DPNI destination parameters + * @dest_type: Destination type + * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type + * @priority: Priority selection within the DPIO or DPCON channel; valid values + * are 0-1 or 0-7, depending on the number of priorities in that + * channel; not relevant for 'DPNI_DEST_NONE' option + */ +struct dpni_dest_cfg { + enum dpni_dest dest_type; + int dest_id; + uint8_t priority; +}; + +/* DPNI congestion options */ + +/** + * CSCN message is written to message_iova once entering a + * congestion state (see 'threshold_entry') + */ +#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 +/** + * CSCN message is written to message_iova once exiting a + * congestion state (see 'threshold_exit') + */ +#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 +/** + * CSCN write will attempt to allocate into a cache (coherent write); + * valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is selected + */ +#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to + * DPIO/DPCON's WQ channel once entering a congestion state + * (see 'threshold_entry') + */ +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to + * DPIO/DPCON's WQ channel once exiting a congestion state + * (see 'threshold_exit') + */ +#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 +/** + * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the + * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) + */ +#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 +/** + * This congestion will trigger flow control or priority flow control. This + * will have effect only if flow control is enabled with dpni_set_link_cfg() + */ +#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040 + +/** + * struct dpni_congestion_notification_cfg - congestion notification + * configuration + * @units: units type + * @threshold_entry: above this threshold we enter a congestion state. + * set it to '0' to disable it + * @threshold_exit: below this threshold we exit the congestion state. + * @message_ctx: The context that will be part of the CSCN message + * @message_iova: I/O virtual address (must be in DMA-able memory), + * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>' is + * contained in 'options' + * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel + * @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values + */ + +struct dpni_congestion_notification_cfg { + enum dpni_congestion_unit units; + uint32_t threshold_entry; + uint32_t threshold_exit; + uint64_t message_ctx; + uint64_t message_iova; + struct dpni_dest_cfg dest_cfg; + uint16_t notification_mode; +}; + +int dpni_set_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + const struct dpni_congestion_notification_cfg *cfg); + +int dpni_get_congestion_notification(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc_id, + struct dpni_congestion_notification_cfg *cfg); + +/* DPNI FLC stash options */ + +/** + * stashes the whole annotation area (up to 192 bytes) + */ +#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001 + +/** + * struct dpni_queue - Queue structure + * @user_context: User data, presented to the user along with any frames + * from this queue. Not relevant for Tx queues. + */ +struct dpni_queue { + /** + * struct destination - Destination structure + * @id: ID of the destination, only relevant if DEST_TYPE is > 0. + * Identifies either a DPIO or a DPCON object. + * Not relevant for Tx queues. + * @type: May be one of the following: + * 0 - No destination, queue can be manually + * queried, but will not push traffic or + * notifications to a DPIO; + * 1 - The destination is a DPIO. When traffic + * becomes available in the queue a FQDAN + * (FQ data available notification) will be + * generated to selected DPIO; + * 2 - The destination is a DPCON. The queue is + * associated with a DPCON object for the + * purpose of scheduling between multiple + * queues. The DPCON may be independently + * configured to generate notifications. + * Not relevant for Tx queues. + * @hold_active: Hold active, maintains a queue scheduled for longer + * in a DPIO during dequeue to reduce spread of traffic. + * Only relevant if queues are + * not affined to a single DPIO. + */ + struct { + uint16_t id; + enum dpni_dest type; + char hold_active; + uint8_t priority; + } destination; + uint64_t user_context; + /** + * struct flc - FD FLow Context structure + * @value: Default FLC value for traffic dequeued from + * this queue. Please check description of FD + * structure for more information. + * Note that FLC values set using dpni_add_fs_entry, + * if any, take precedence over values per queue. + * @stash_control: Boolean, indicates whether the 6 lowest + * - significant bits are used for stash control. + * significant bits are used for stash control. If set, the 6 + * least significant bits in value are interpreted as follows: + * - bits 0-1: indicates the number of 64 byte units of context + * that are stashed. FLC value is interpreted as a memory address + * in this case, excluding the 6 LS bits. + * - bits 2-3: indicates the number of 64 byte units of frame + * annotation to be stashed. Annotation is placed at FD[ADDR]. + * - bits 4-5: indicates the number of 64 byte units of frame + * data to be stashed. Frame data is placed at FD[ADDR] + + * FD[OFFSET]. + * For more details check the Frame Descriptor section in the + * hardware documentation. + */ + struct { + uint64_t value; + char stash_control; + } flc; +}; + +/** + * struct dpni_queue_id - Queue identification, used for enqueue commands + * or queue control + * @fqid: FQID used for enqueueing to and/or configuration of this + * specific FQ + * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI. + * Only relevant for Tx queues. + */ +struct dpni_queue_id { + uint32_t fqid; + uint16_t qdbin; +}; + +/** + * enum dpni_confirmation_mode - Defines DPNI options supported for Tx + * confirmation + * @DPNI_CONF_AFFINE: For each Tx queue set associated with a sender there is + * an affine Tx Confirmation queue + * @DPNI_CONF_SINGLE: All Tx queues are associated with a single Tx + * confirmation queue + * @DPNI_CONF_DISABLE: Tx frames are not confirmed. This must be associated + * with proper FD set-up to have buffers release to a Buffer Pool, otherwise + * buffers will be leaked + */ +enum dpni_confirmation_mode { + DPNI_CONF_AFFINE, + DPNI_CONF_SINGLE, + DPNI_CONF_DISABLE, +}; + +int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_confirmation_mode mode); + +int dpni_get_tx_confirmation_mode(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_confirmation_mode *mode); + +int dpni_get_api_version(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t *major_ver, + uint16_t *minor_ver); + +/** + * Set User Context + */ +#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 + +/** + * Set queue destination configuration + */ +#define DPNI_QUEUE_OPT_DEST 0x00000002 + +/** + * Set FD[FLC] configuration for traffic on this queue. Note that FLC values + * set with dpni_add_fs_entry, if any, take precedence over values per queue. + */ +#define DPNI_QUEUE_OPT_FLC 0x00000004 + +/** + * Set the queue to hold active mode. This prevents the queue from being + * rescheduled between DPIOs while it carries traffic and is active on one + * DPNI. Can help reduce reordering when servicing one queue on multiple + * CPUs, but the queue is also less likely to push data to multiple CPUs + * especially when congested. + */ +#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008 + +int dpni_set_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + uint8_t options, + const struct dpni_queue *queue); + +int dpni_get_queue(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_queue_type qtype, + uint8_t tc, + uint8_t index, + struct dpni_queue *queue, + struct dpni_queue_id *qid); + +int dpni_get_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + uint8_t page, + uint8_t param, + union dpni_statistics *stat); + +int dpni_reset_statistics(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token); + +/** + * enum dpni_congestion_point - Structure representing congestion point + * @DPNI_CP_QUEUE: Set taildrop per queue, identified by QUEUE_TYPE, TC and + * QUEUE_INDEX + * @DPNI_CP_GROUP: Set taildrop per queue group. Depending on options used + * to define the DPNI this can be either per + * TC (default) or per interface + * (DPNI_OPT_SHARED_CONGESTION set at DPNI create). + * QUEUE_INDEX is ignored if this type is used. + */ +enum dpni_congestion_point { + DPNI_CP_QUEUE, + DPNI_CP_GROUP, +}; + +/** + * struct dpni_taildrop - Structure representing the taildrop + * @enable: Indicates whether the taildrop is active or not. + * @units: Indicates the unit of THRESHOLD. Queue taildrop only + * supports byte units, this field is ignored and + * assumed = 0 if CONGESTION_POINT is 0. + * @threshold: Threshold value, in units identified by UNITS field. Value 0 + * cannot be used as a valid taildrop threshold, + * THRESHOLD must be > 0 if the taildrop is + * enabled. + * @oal : Overhead Accounting Length, a 12-bit, 2's complement value + * with range (-2048 to +2047) representing a fixed per-frame + * overhead to be added to the actual length of a frame when + * performing WRED and tail drop calculations and threshold + * comparisons. + */ +struct dpni_taildrop { + char enable; + enum dpni_congestion_unit units; + uint32_t threshold; + int16_t oal; +}; + +int dpni_set_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + uint8_t tc, + uint8_t q_index, + struct dpni_taildrop *taildrop); + +int dpni_get_taildrop(struct fsl_mc_io *mc_io, + uint32_t cmd_flags, + uint16_t token, + enum dpni_congestion_point cg_point, + enum dpni_queue_type q_type, + uint8_t tc, + uint8_t q_index, + struct dpni_taildrop *taildrop); +#endif /* __FSL_DPNI_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h new file mode 100644 index 00000000..eb3e9987 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_dpni_cmd.h @@ -0,0 +1,605 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2016 Freescale Semiconductor Inc. + * Copyright 2016-2017 NXP + * + */ +#ifndef _FSL_DPNI_CMD_H +#define _FSL_DPNI_CMD_H + +/* DPNI Version */ +#define DPNI_VER_MAJOR 7 +#define DPNI_VER_MINOR 3 + +#define DPNI_CMD_BASE_VERSION 1 +#define DPNI_CMD_VERSION_2 2 +#define DPNI_CMD_ID_OFFSET 4 + +#define DPNI_CMD(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_BASE_VERSION) +#define DPNI_CMD_V2(id) (((id) << DPNI_CMD_ID_OFFSET) | DPNI_CMD_VERSION_2) + +/* Command IDs */ +#define DPNI_CMDID_OPEN DPNI_CMD(0x801) +#define DPNI_CMDID_CLOSE DPNI_CMD(0x800) +#define DPNI_CMDID_CREATE DPNI_CMD(0x901) +#define DPNI_CMDID_DESTROY DPNI_CMD(0x981) +#define DPNI_CMDID_GET_API_VERSION DPNI_CMD(0xa01) + +#define DPNI_CMDID_ENABLE DPNI_CMD(0x002) +#define DPNI_CMDID_DISABLE DPNI_CMD(0x003) +#define DPNI_CMDID_GET_ATTR DPNI_CMD_V2(0x004) +#define DPNI_CMDID_RESET DPNI_CMD(0x005) +#define DPNI_CMDID_IS_ENABLED DPNI_CMD(0x006) + +#define DPNI_CMDID_SET_IRQ_ENABLE DPNI_CMD(0x012) +#define DPNI_CMDID_GET_IRQ_ENABLE DPNI_CMD(0x013) +#define DPNI_CMDID_SET_IRQ_MASK DPNI_CMD(0x014) +#define DPNI_CMDID_GET_IRQ_MASK DPNI_CMD(0x015) +#define DPNI_CMDID_GET_IRQ_STATUS DPNI_CMD(0x016) +#define DPNI_CMDID_CLEAR_IRQ_STATUS DPNI_CMD(0x017) + +#define DPNI_CMDID_SET_POOLS DPNI_CMD_V2(0x200) +#define DPNI_CMDID_SET_ERRORS_BEHAVIOR DPNI_CMD(0x20B) + +#define DPNI_CMDID_GET_QDID DPNI_CMD(0x210) +#define DPNI_CMDID_GET_SP_INFO DPNI_CMD(0x211) +#define DPNI_CMDID_GET_TX_DATA_OFFSET DPNI_CMD(0x212) +#define DPNI_CMDID_GET_LINK_STATE DPNI_CMD(0x215) +#define DPNI_CMDID_SET_MAX_FRAME_LENGTH DPNI_CMD(0x216) +#define DPNI_CMDID_GET_MAX_FRAME_LENGTH DPNI_CMD(0x217) +#define DPNI_CMDID_SET_LINK_CFG DPNI_CMD(0x21A) +#define DPNI_CMDID_SET_TX_SHAPING DPNI_CMD_V2(0x21B) + +#define DPNI_CMDID_SET_MCAST_PROMISC DPNI_CMD(0x220) +#define DPNI_CMDID_GET_MCAST_PROMISC DPNI_CMD(0x221) +#define DPNI_CMDID_SET_UNICAST_PROMISC DPNI_CMD(0x222) +#define DPNI_CMDID_GET_UNICAST_PROMISC DPNI_CMD(0x223) +#define DPNI_CMDID_SET_PRIM_MAC DPNI_CMD(0x224) +#define DPNI_CMDID_GET_PRIM_MAC DPNI_CMD(0x225) +#define DPNI_CMDID_ADD_MAC_ADDR DPNI_CMD(0x226) +#define DPNI_CMDID_REMOVE_MAC_ADDR DPNI_CMD(0x227) +#define DPNI_CMDID_CLR_MAC_FILTERS DPNI_CMD(0x228) + +#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230) +#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD(0x231) +#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232) +#define DPNI_CMDID_CLR_VLAN_FILTERS DPNI_CMD(0x233) + +#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD_V2(0x235) + +#define DPNI_CMDID_GET_STATISTICS DPNI_CMD_V2(0x25D) +#define DPNI_CMDID_RESET_STATISTICS DPNI_CMD(0x25E) +#define DPNI_CMDID_GET_QUEUE DPNI_CMD(0x25F) +#define DPNI_CMDID_SET_QUEUE DPNI_CMD(0x260) +#define DPNI_CMDID_GET_TAILDROP DPNI_CMD_V2(0x261) +#define DPNI_CMDID_SET_TAILDROP DPNI_CMD_V2(0x262) + +#define DPNI_CMDID_GET_PORT_MAC_ADDR DPNI_CMD(0x263) + +#define DPNI_CMDID_GET_BUFFER_LAYOUT DPNI_CMD(0x264) +#define DPNI_CMDID_SET_BUFFER_LAYOUT DPNI_CMD(0x265) + +#define DPNI_CMDID_SET_CONGESTION_NOTIFICATION DPNI_CMD(0x267) +#define DPNI_CMDID_GET_CONGESTION_NOTIFICATION DPNI_CMD(0x268) +#define DPNI_CMDID_SET_EARLY_DROP DPNI_CMD_V2(0x269) +#define DPNI_CMDID_GET_EARLY_DROP DPNI_CMD_V2(0x26A) +#define DPNI_CMDID_GET_OFFLOAD DPNI_CMD(0x26B) +#define DPNI_CMDID_SET_OFFLOAD DPNI_CMD(0x26C) +#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE DPNI_CMD(0x266) +#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE DPNI_CMD(0x26D) + +/* Macros for accessing command fields smaller than 1byte */ +#define DPNI_MASK(field) \ + GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \ + DPNI_##field##_SHIFT) +#define dpni_set_field(var, field, val) \ + ((var) |= (((val) << DPNI_##field##_SHIFT) & DPNI_MASK(field))) +#define dpni_get_field(var, field) \ + (((var) & DPNI_MASK(field)) >> DPNI_##field##_SHIFT) + +#pragma pack(push, 1) +struct dpni_cmd_open { + uint32_t dpni_id; +}; + +struct dpni_cmd_create { + uint32_t options; + uint8_t num_queues; + uint8_t num_tcs; + uint8_t mac_filter_entries; + uint8_t pad1; + uint8_t vlan_filter_entries; + uint8_t pad2; + uint8_t qos_entries; + uint8_t pad3; + uint16_t fs_entries; +}; + +struct dpni_cmd_destroy { + uint32_t dpsw_id; +}; + +#define DPNI_BACKUP_POOL(val, order) (((val) & 0x1) << (order)) + +struct dpni_cmd_pool { + uint16_t dpbp_id; + uint8_t priority_mask; + uint8_t pad; +}; + +struct dpni_cmd_set_pools { + uint8_t num_dpbp; + uint8_t backup_pool_mask; + uint16_t pad; + struct dpni_cmd_pool pool[8]; + uint16_t buffer_size[8]; +}; + +/* The enable indication is always the least significant bit */ +#define DPNI_ENABLE_SHIFT 0 +#define DPNI_ENABLE_SIZE 1 + +struct dpni_rsp_is_enabled { + uint8_t enabled; +}; + +struct dpni_cmd_set_irq_enable { + uint8_t enable; + uint8_t pad[3]; + uint8_t irq_index; +}; + +struct dpni_cmd_get_irq_enable { + uint32_t pad; + uint8_t irq_index; +}; + +struct dpni_rsp_get_irq_enable { + uint8_t enabled; +}; + +struct dpni_cmd_set_irq_mask { + uint32_t mask; + uint8_t irq_index; +}; + +struct dpni_cmd_get_irq_mask { + uint32_t pad; + uint8_t irq_index; +}; + +struct dpni_rsp_get_irq_mask { + uint32_t mask; +}; + +struct dpni_cmd_get_irq_status { + uint32_t status; + uint8_t irq_index; +}; + +struct dpni_rsp_get_irq_status { + uint32_t status; +}; + +struct dpni_cmd_clear_irq_status { + uint32_t status; + uint8_t irq_index; +}; + +struct dpni_rsp_get_attr { + /* response word 0 */ + uint32_t options; + uint8_t num_queues; + uint8_t num_rx_tcs; + uint8_t mac_filter_entries; + uint8_t num_tx_tcs; + /* response word 1 */ + uint8_t vlan_filter_entries; + uint8_t pad1; + uint8_t qos_entries; + uint8_t pad2; + uint16_t fs_entries; + uint16_t pad3; + /* response word 2 */ + uint8_t qos_key_size; + uint8_t fs_key_size; + uint16_t wriop_version; +}; + +#define DPNI_ERROR_ACTION_SHIFT 0 +#define DPNI_ERROR_ACTION_SIZE 4 +#define DPNI_FRAME_ANN_SHIFT 4 +#define DPNI_FRAME_ANN_SIZE 1 + +struct dpni_cmd_set_errors_behavior { + uint32_t errors; + /* from least significant bit: error_action:4, set_frame_annotation:1 */ + uint8_t flags; +}; + +/* There are 3 separate commands for configuring Rx, Tx and Tx confirmation + * buffer layouts, but they all share the same parameters. + * If one of the functions changes, below structure needs to be split. + */ + +#define DPNI_PASS_TS_SHIFT 0 +#define DPNI_PASS_TS_SIZE 1 +#define DPNI_PASS_PR_SHIFT 1 +#define DPNI_PASS_PR_SIZE 1 +#define DPNI_PASS_FS_SHIFT 2 +#define DPNI_PASS_FS_SIZE 1 + +struct dpni_cmd_get_buffer_layout { + uint8_t qtype; +}; + +struct dpni_rsp_get_buffer_layout { + /* response word 0 */ + uint8_t pad0[6]; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + uint8_t flags; + uint8_t pad1; + /* response word 1 */ + uint16_t private_data_size; + uint16_t data_align; + uint16_t head_room; + uint16_t tail_room; +}; + +struct dpni_cmd_set_buffer_layout { + /* cmd word 0 */ + uint8_t qtype; + uint8_t pad0[3]; + uint16_t options; + /* from LSB: pass_timestamp:1, parser_result:1, frame_status:1 */ + uint8_t flags; + uint8_t pad1; + /* cmd word 1 */ + uint16_t private_data_size; + uint16_t data_align; + uint16_t head_room; + uint16_t tail_room; +}; + +struct dpni_cmd_set_offload { + uint8_t pad[3]; + uint8_t dpni_offload; + uint32_t config; +}; + +struct dpni_cmd_get_offload { + uint8_t pad[3]; + uint8_t dpni_offload; +}; + +struct dpni_rsp_get_offload { + uint32_t pad; + uint32_t config; +}; + +struct dpni_cmd_get_qdid { + uint8_t qtype; +}; + +struct dpni_rsp_get_qdid { + uint16_t qdid; +}; + +struct dpni_rsp_get_sp_info { + uint16_t spids[2]; +}; + +struct dpni_rsp_get_tx_data_offset { + uint16_t data_offset; +}; + +struct dpni_cmd_get_statistics { + uint8_t page_number; + uint8_t param; +}; + +struct dpni_rsp_get_statistics { + uint64_t counter[7]; +}; + +struct dpni_cmd_set_link_cfg { + uint64_t pad0; + uint32_t rate; + uint32_t pad1; + uint64_t options; +}; + +#define DPNI_LINK_STATE_SHIFT 0 +#define DPNI_LINK_STATE_SIZE 1 + +struct dpni_rsp_get_link_state { + uint32_t pad0; + /* from LSB: up:1 */ + uint8_t flags; + uint8_t pad1[3]; + uint32_t rate; + uint32_t pad2; + uint64_t options; +}; + +struct dpni_cmd_set_max_frame_length { + uint16_t max_frame_length; +}; + +struct dpni_rsp_get_max_frame_length { + uint16_t max_frame_length; +}; + +struct dpni_cmd_set_multicast_promisc { + uint8_t enable; +}; + +struct dpni_rsp_get_multicast_promisc { + uint8_t enabled; +}; + +struct dpni_cmd_set_unicast_promisc { + uint8_t enable; +}; + +struct dpni_rsp_get_unicast_promisc { + uint8_t enabled; +}; + +struct dpni_cmd_set_primary_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +struct dpni_rsp_get_primary_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +struct dpni_rsp_get_port_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +struct dpni_cmd_add_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +struct dpni_cmd_remove_mac_addr { + uint16_t pad; + uint8_t mac_addr[6]; +}; + +#define DPNI_UNICAST_FILTERS_SHIFT 0 +#define DPNI_UNICAST_FILTERS_SIZE 1 +#define DPNI_MULTICAST_FILTERS_SHIFT 1 +#define DPNI_MULTICAST_FILTERS_SIZE 1 + +struct dpni_cmd_clear_mac_filters { + /* from LSB: unicast:1, multicast:1 */ + uint8_t flags; +}; + +struct dpni_cmd_enable_vlan_filter { + /* only the LSB */ + uint8_t en; +}; + +struct dpni_cmd_vlan_id { + uint32_t pad; + uint16_t vlan_id; +}; + +#define DPNI_SEPARATE_GRP_SHIFT 0 +#define DPNI_SEPARATE_GRP_SIZE 1 +#define DPNI_MODE_1_SHIFT 0 +#define DPNI_MODE_1_SIZE 4 +#define DPNI_MODE_2_SHIFT 4 +#define DPNI_MODE_2_SIZE 4 + +struct dpni_cmd_set_tx_priorities { + uint16_t flags; + uint8_t prio_group_A; + uint8_t prio_group_B; + uint32_t pad0; + uint8_t modes[4]; + uint32_t pad1; + uint64_t pad2; + uint16_t delta_bandwidth[8]; +}; + +#define DPNI_DIST_MODE_SHIFT 0 +#define DPNI_DIST_MODE_SIZE 4 +#define DPNI_MISS_ACTION_SHIFT 4 +#define DPNI_MISS_ACTION_SIZE 4 +#define DPNI_KEEP_HASH_KEY_SHIFT 7 +#define DPNI_KEEP_HASH_KEY_SIZE 1 + +struct dpni_cmd_set_rx_tc_dist { + uint16_t dist_size; + uint8_t tc_id; + /* from LSB: dist_mode:4, miss_action:4 */ + uint8_t flags; + uint8_t pad0; + /* only the LSB */ + uint8_t keep_hash_key; + uint16_t default_flow_id; + uint64_t pad1[5]; + uint64_t key_cfg_iova; +}; + +struct dpni_cmd_get_queue { + uint8_t qtype; + uint8_t tc; + uint8_t index; +}; + +#define DPNI_DEST_TYPE_SHIFT 0 +#define DPNI_DEST_TYPE_SIZE 4 +#define DPNI_STASH_CTRL_SHIFT 6 +#define DPNI_STASH_CTRL_SIZE 1 +#define DPNI_HOLD_ACTIVE_SHIFT 7 +#define DPNI_HOLD_ACTIVE_SIZE 1 + +struct dpni_rsp_get_queue { + /* response word 0 */ + uint64_t pad0; + /* response word 1 */ + uint32_t dest_id; + uint16_t pad1; + uint8_t dest_prio; + /* From LSB: dest_type:4, pad:2, flc_stash_ctrl:1, hold_active:1 */ + uint8_t flags; + /* response word 2 */ + uint64_t flc; + /* response word 3 */ + uint64_t user_context; + /* response word 4 */ + uint32_t fqid; + uint16_t qdbin; +}; + +struct dpni_cmd_set_queue { + /* cmd word 0 */ + uint8_t qtype; + uint8_t tc; + uint8_t index; + uint8_t options; + uint32_t pad0; + /* cmd word 1 */ + uint32_t dest_id; + uint16_t pad1; + uint8_t dest_prio; + uint8_t flags; + /* cmd word 2 */ + uint64_t flc; + /* cmd word 3 */ + uint64_t user_context; +}; + +#define DPNI_DROP_ENABLE_SHIFT 0 +#define DPNI_DROP_ENABLE_SIZE 1 +#define DPNI_DROP_UNITS_SHIFT 2 +#define DPNI_DROP_UNITS_SIZE 2 + +struct dpni_early_drop { + /* from LSB: enable:1 units:2 */ + uint8_t flags; + uint8_t pad0[3]; + uint32_t pad1; + uint8_t green_drop_probability; + uint8_t pad2[7]; + uint64_t green_max_threshold; + uint64_t green_min_threshold; + uint64_t pad3; + uint8_t yellow_drop_probability; + uint8_t pad4[7]; + uint64_t yellow_max_threshold; + uint64_t yellow_min_threshold; + uint64_t pad5; + uint8_t red_drop_probability; + uint8_t pad6[7]; + uint64_t red_max_threshold; + uint64_t red_min_threshold; +}; + +struct dpni_cmd_early_drop { + uint8_t qtype; + uint8_t tc; + uint8_t pad[6]; + uint64_t early_drop_iova; +}; + +struct dpni_rsp_get_api_version { + uint16_t major; + uint16_t minor; +}; + +struct dpni_cmd_get_taildrop { + uint8_t congestion_point; + uint8_t qtype; + uint8_t tc; + uint8_t index; +}; + +struct dpni_rsp_get_taildrop { + /* cmd word 0 */ + uint64_t pad0; + /* cmd word 1 */ + /* from LSB: enable:1 oal_lo:7 */ + uint8_t enable_oal_lo; + /* from LSB: oal_hi:5 */ + uint8_t oal_hi; + uint8_t units; + uint8_t pad2; + uint32_t threshold; +}; + +#define DPNI_OAL_LO_SHIFT 1 +#define DPNI_OAL_LO_SIZE 7 +#define DPNI_OAL_HI_SHIFT 0 +#define DPNI_OAL_HI_SIZE 5 + +struct dpni_cmd_set_taildrop { + /* cmd word 0 */ + uint8_t congestion_point; + uint8_t qtype; + uint8_t tc; + uint8_t index; + uint32_t pad0; + /* cmd word 1 */ + /* from LSB: enable:1 oal_lo:7 */ + uint8_t enable_oal_lo; + /* from LSB: oal_hi:5 */ + uint8_t oal_hi; + uint8_t units; + uint8_t pad2; + uint32_t threshold; +}; + +struct dpni_tx_confirmation_mode { + uint32_t pad; + uint8_t confirmation_mode; +}; + +#define DPNI_DEST_TYPE_SHIFT 0 +#define DPNI_DEST_TYPE_SIZE 4 +#define DPNI_CONG_UNITS_SHIFT 4 +#define DPNI_CONG_UNITS_SIZE 2 + +struct dpni_cmd_set_congestion_notification { + uint8_t qtype; + uint8_t tc; + uint8_t pad[6]; + uint32_t dest_id; + uint16_t notification_mode; + uint8_t dest_priority; + /* from LSB: dest_type: 4 units:2 */ + uint8_t type_units; + uint64_t message_iova; + uint64_t message_ctx; + uint32_t threshold_entry; + uint32_t threshold_exit; +}; + +struct dpni_cmd_get_congestion_notification { + uint8_t qtype; + uint8_t tc; +}; + +struct dpni_rsp_get_congestion_notification { + uint64_t pad; + uint32_t dest_id; + uint16_t notification_mode; + uint8_t dest_priority; + /* from LSB: dest_type: 4 units:2 */ + uint8_t type_units; + uint64_t message_iova; + uint64_t message_ctx; + uint32_t threshold_entry; + uint32_t threshold_exit; +}; + +#pragma pack(pop) +#endif /* _FSL_DPNI_CMD_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h new file mode 100644 index 00000000..964870ba --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/mc/fsl_net.h @@ -0,0 +1,454 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * + * Copyright 2013-2015 Freescale Semiconductor Inc. + * + */ +#ifndef __FSL_NET_H +#define __FSL_NET_H + +#define LAST_HDR_INDEX 0xFFFFFFFF + +/*****************************************************************************/ +/* Protocol fields */ +/*****************************************************************************/ + +/************************* Ethernet fields *********************************/ +#define NH_FLD_ETH_DA (1) +#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) +#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) +#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) +#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) +#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) +#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) + +#define NH_FLD_ETH_ADDR_SIZE 6 + +/*************************** VLAN fields ***********************************/ +#define NH_FLD_VLAN_VPRI (1) +#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) +#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) +#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) +#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) +#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) + +#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ + NH_FLD_VLAN_CFI | \ + NH_FLD_VLAN_VID) + +/************************ IP (generic) fields ******************************/ +#define NH_FLD_IP_VER (1) +#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) +#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) +#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) +#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) +#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) +#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) +#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) +#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) + +#define NH_FLD_IP_PROTO_SIZE 1 + +/***************************** IPV4 fields *********************************/ +#define NH_FLD_IPV4_VER (1) +#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) +#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) +#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) +#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) +#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) +#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) +#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) +#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) +#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) +#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) +#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) +#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) +#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) +#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) +#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) + +#define NH_FLD_IPV4_ADDR_SIZE 4 +#define NH_FLD_IPV4_PROTO_SIZE 1 + +/***************************** IPV6 fields *********************************/ +#define NH_FLD_IPV6_VER (1) +#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) +#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) +#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) +#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) +#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) +#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) +#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) +#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) + +#define NH_FLD_IPV6_ADDR_SIZE 16 +#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 + +/***************************** ICMP fields *********************************/ +#define NH_FLD_ICMP_TYPE (1) +#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) +#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) +#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) +#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) +#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) + +#define NH_FLD_ICMP_CODE_SIZE 1 +#define NH_FLD_ICMP_TYPE_SIZE 1 + +/***************************** IGMP fields *********************************/ +#define NH_FLD_IGMP_VERSION (1) +#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) +#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) +#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) +#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) + +/***************************** TCP fields **********************************/ +#define NH_FLD_TCP_PORT_SRC (1) +#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) +#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) +#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) +#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) +#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) +#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) +#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) +#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) +#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) +#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) +#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) + +#define NH_FLD_TCP_PORT_SIZE 2 + +/***************************** UDP fields **********************************/ +#define NH_FLD_UDP_PORT_SRC (1) +#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) +#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) +#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) +#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) + +#define NH_FLD_UDP_PORT_SIZE 2 + +/*************************** UDP-lite fields *******************************/ +#define NH_FLD_UDP_LITE_PORT_SRC (1) +#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) +#define NH_FLD_UDP_LITE_ALL_FIELDS \ + ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) + +#define NH_FLD_UDP_LITE_PORT_SIZE 2 + +/*************************** UDP-encap-ESP fields **************************/ +#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) +#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) +#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) +#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) +#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) +#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) +#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ + ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) + +#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 +#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 + +/***************************** SCTP fields *********************************/ +#define NH_FLD_SCTP_PORT_SRC (1) +#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) +#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) +#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) +#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) + +#define NH_FLD_SCTP_PORT_SIZE 2 + +/***************************** DCCP fields *********************************/ +#define NH_FLD_DCCP_PORT_SRC (1) +#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) +#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) + +#define NH_FLD_DCCP_PORT_SIZE 2 + +/***************************** IPHC fields *********************************/ +#define NH_FLD_IPHC_CID (1) +#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) +#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) +#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) +#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) +#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) + +/***************************** SCTP fields *********************************/ +#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) +#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) +#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) +#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) +#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) +#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) +#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) +#define NH_FLD_SCTP_CHUNK_DATA_BEGGINNING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) +#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) +#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ + ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) + +/*************************** L2TPV2 fields *********************************/ +#define NH_FLD_L2TPV2_TYPE_BIT (1) +#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) +#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) +#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) +#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) +#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) +#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) +#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) +#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) +#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) +#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) +#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) +#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) +#define NH_FLD_L2TPV2_ALL_FIELDS \ + ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) + +/*************************** L2TPV3 fields *********************************/ +#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) +#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) +#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) +#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) +#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) +#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) +#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) +#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) +#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) +#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ + ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) + +#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) +#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) +#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) +#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) +#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ + ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) + +/**************************** PPP fields ***********************************/ +#define NH_FLD_PPP_PID (1) +#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) +#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) + +/************************** PPPoE fields ***********************************/ +#define NH_FLD_PPPOE_VER (1) +#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) +#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) +#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) +#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) +#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) +#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) +#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) + +/************************* PPP-Mux fields **********************************/ +#define NH_FLD_PPPMUX_PID (1) +#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) +#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) +#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) + +/*********************** PPP-Mux sub-frame fields **************************/ +#define NH_FLD_PPPMUX_SUBFRM_PFF (1) +#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) +#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) +#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) +#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) +#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ + ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) + +/*************************** LLC fields ************************************/ +#define NH_FLD_LLC_DSAP (1) +#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) +#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) +#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) + +/*************************** NLPID fields **********************************/ +#define NH_FLD_NLPID_NLPID (1) +#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) + +/*************************** SNAP fields ***********************************/ +#define NH_FLD_SNAP_OUI (1) +#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) +#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) + +/*************************** LLC SNAP fields *******************************/ +#define NH_FLD_LLC_SNAP_TYPE (1) +#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) + +#define NH_FLD_ARP_HTYPE (1) +#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) +#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) +#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) +#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) +#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) +#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) +#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) +#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) +#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) + +/*************************** RFC2684 fields ********************************/ +#define NH_FLD_RFC2684_LLC (1) +#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) +#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) +#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) +#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) +#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) +#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) + +/*************************** User defined fields ***************************/ +#define NH_FLD_USER_DEFINED_SRCPORT (1) +#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) +#define NH_FLD_USER_DEFINED_ALL_FIELDS \ + ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) + +/*************************** Payload fields ********************************/ +#define NH_FLD_PAYLOAD_BUFFER (1) +#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) +#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) +#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) +#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) +#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) +#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) + +/*************************** GRE fields ************************************/ +#define NH_FLD_GRE_TYPE (1) +#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) + +/*************************** MINENCAP fields *******************************/ +#define NH_FLD_MINENCAP_SRC_IP (1) +#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) +#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) +#define NH_FLD_MINENCAP_ALL_FIELDS \ + ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) + +/*************************** IPSEC AH fields *******************************/ +#define NH_FLD_IPSEC_AH_SPI (1) +#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) +#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) + +/*************************** IPSEC ESP fields ******************************/ +#define NH_FLD_IPSEC_ESP_SPI (1) +#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) +#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) + +#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 + +/*************************** MPLS fields ***********************************/ +#define NH_FLD_MPLS_LABEL_STACK (1) +#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ + ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) + +/*************************** MACSEC fields *********************************/ +#define NH_FLD_MACSEC_SECTAG (1) +#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) + +/*************************** GTP fields ************************************/ +#define NH_FLD_GTP_TEID (1) + +/* Protocol options */ + +/* Ethernet options */ +#define NH_OPT_ETH_BROADCAST 1 +#define NH_OPT_ETH_MULTICAST 2 +#define NH_OPT_ETH_UNICAST 3 +#define NH_OPT_ETH_BPDU 4 + +#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) +/* also applicable for broadcast */ + +/* VLAN options */ +#define NH_OPT_VLAN_CFI 1 + +/* IPV4 options */ +#define NH_OPT_IPV4_UNICAST 1 +#define NH_OPT_IPV4_MULTICAST 2 +#define NH_OPT_IPV4_BROADCAST 3 +#define NH_OPT_IPV4_OPTION 4 +#define NH_OPT_IPV4_FRAG 5 +#define NH_OPT_IPV4_INITIAL_FRAG 6 + +/* IPV6 options */ +#define NH_OPT_IPV6_UNICAST 1 +#define NH_OPT_IPV6_MULTICAST 2 +#define NH_OPT_IPV6_OPTION 3 +#define NH_OPT_IPV6_FRAG 4 +#define NH_OPT_IPV6_INITIAL_FRAG 5 + +/* General IP options (may be used for any version) */ +#define NH_OPT_IP_FRAG 1 +#define NH_OPT_IP_INITIAL_FRAG 2 +#define NH_OPT_IP_OPTION 3 + +/* Minenc. options */ +#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 + +/* GRE. options */ +#define NH_OPT_GRE_ROUTING_PRESENT 1 + +/* TCP options */ +#define NH_OPT_TCP_OPTIONS 1 +#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 +#define NH_OPT_TCP_CONTROL_LOW_BITS 3 + +/* CAPWAP options */ +#define NH_OPT_CAPWAP_DTLS 1 + +enum net_prot { + NET_PROT_NONE = 0, + NET_PROT_PAYLOAD, + NET_PROT_ETH, + NET_PROT_VLAN, + NET_PROT_IPV4, + NET_PROT_IPV6, + NET_PROT_IP, + NET_PROT_TCP, + NET_PROT_UDP, + NET_PROT_UDP_LITE, + NET_PROT_IPHC, + NET_PROT_SCTP, + NET_PROT_SCTP_CHUNK_DATA, + NET_PROT_PPPOE, + NET_PROT_PPP, + NET_PROT_PPPMUX, + NET_PROT_PPPMUX_SUBFRM, + NET_PROT_L2TPV2, + NET_PROT_L2TPV3_CTRL, + NET_PROT_L2TPV3_SESS, + NET_PROT_LLC, + NET_PROT_LLC_SNAP, + NET_PROT_NLPID, + NET_PROT_SNAP, + NET_PROT_MPLS, + NET_PROT_IPSEC_AH, + NET_PROT_IPSEC_ESP, + NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ + NET_PROT_MACSEC, + NET_PROT_GRE, + NET_PROT_MINENCAP, + NET_PROT_DCCP, + NET_PROT_ICMP, + NET_PROT_IGMP, + NET_PROT_ARP, + NET_PROT_CAPWAP_DATA, + NET_PROT_CAPWAP_CTRL, + NET_PROT_RFC2684, + NET_PROT_ICMPV6, + NET_PROT_FCOE, + NET_PROT_FIP, + NET_PROT_ISCSI, + NET_PROT_GTP, + NET_PROT_USER_DEFINED_L2, + NET_PROT_USER_DEFINED_L3, + NET_PROT_USER_DEFINED_L4, + NET_PROT_USER_DEFINED_L5, + NET_PROT_USER_DEFINED_SHIM1, + NET_PROT_USER_DEFINED_SHIM2, + + NET_PROT_DUMMY_LAST +}; + +/*! IEEE8021.Q */ +#define NH_IEEE8021Q_ETYPE 0x8100 +#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ + ((((uint32_t)(etype & 0xFFFF)) << 16) | \ + (((uint32_t)(pcp & 0x07)) << 13) | \ + (((uint32_t)(dei & 0x01)) << 12) | \ + (((uint32_t)(vlan_id & 0xFFF)))) + +#endif /* __FSL_NET_H */ diff --git a/src/spdk/dpdk/drivers/net/dpaa2/meson.build b/src/spdk/dpdk/drivers/net/dpaa2/meson.build new file mode 100644 index 00000000..213f0d72 --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/meson.build @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['mempool_dpaa2'] +sources = files('base/dpaa2_hw_dpni.c', + 'dpaa2_ethdev.c', + 'dpaa2_rxtx.c', + 'mc/dpkg.c', + 'mc/dpni.c') + +includes += include_directories('base', 'mc') + +# depends on fslmc bus which uses experimental API +allow_experimental_apis = true diff --git a/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map b/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map new file mode 100644 index 00000000..09f4364b --- /dev/null +++ b/src/spdk/dpdk/drivers/net/dpaa2/rte_pmd_dpaa2_version.map @@ -0,0 +1,12 @@ +DPDK_17.05 { + + local: *; +}; + +DPDK_17.11 { + global: + + dpaa2_eth_eventq_attach; + dpaa2_eth_eventq_detach; + +} DPDK_17.05; |