summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/net/octeontx2
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/dpdk/drivers/net/octeontx2')
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/Makefile63
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/meson.build44
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.c2553
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.h592
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_debug.c811
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_devargs.c193
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c494
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c629
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.c842
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.h139
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec_tx.h181
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.c1007
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.h397
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c252
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_parse.c1046
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_utils.c959
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_link.c264
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_lookup.c352
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_mac.c149
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_mcast.c339
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_ptp.c442
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_rss.c392
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.c424
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.h541
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_stats.c396
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.c3216
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.h171
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.c1060
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.h744
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/otx2_vlan.c1040
-rw-r--r--src/spdk/dpdk/drivers/net/octeontx2/rte_pmd_octeontx2_version.map3
31 files changed, 19735 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/Makefile b/src/spdk/dpdk/drivers/net/octeontx2/Makefile
new file mode 100644
index 000000000..0de43e36a
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/Makefile
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_octeontx2.a
+
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/cpt
+CFLAGS += -I$(RTE_SDK)/drivers/common/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx2
+CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx2
+CFLAGS += -O3
+ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS += -flax-vector-conversions
+endif
+
+ifneq ($(CONFIG_RTE_ARCH_64),y)
+CFLAGS += -Wno-int-to-pointer-cast
+CFLAGS += -Wno-pointer-to-int-cast
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS += -diag-disable 2259
+endif
+endif
+
+EXPORT_MAP := rte_pmd_octeontx2_version.map
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX2_PMD) += \
+ otx2_rx.c \
+ otx2_tx.c \
+ otx2_tm.c \
+ otx2_rss.c \
+ otx2_mac.c \
+ otx2_ptp.c \
+ otx2_flow.c \
+ otx2_link.c \
+ otx2_vlan.c \
+ otx2_stats.c \
+ otx2_mcast.c \
+ otx2_lookup.c \
+ otx2_ethdev.c \
+ otx2_flow_ctrl.c \
+ otx2_flow_parse.c \
+ otx2_flow_utils.c \
+ otx2_ethdev_irq.c \
+ otx2_ethdev_ops.c \
+ otx2_ethdev_sec.c \
+ otx2_ethdev_debug.c \
+ otx2_ethdev_devargs.c
+
+LDLIBS += -lrte_common_octeontx2 -lrte_mempool_octeontx2 -lrte_eal -lrte_net
+LDLIBS += -lrte_ethdev -lrte_bus_pci -lrte_kvargs -lrte_mbuf -lrte_mempool -lm
+LDLIBS += -lrte_cryptodev -lrte_eventdev -lrte_security
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/meson.build b/src/spdk/dpdk/drivers/net/octeontx2/meson.build
new file mode 100644
index 000000000..599ade672
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/meson.build
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2019 Marvell International Ltd.
+#
+
+sources = files('otx2_rx.c',
+ 'otx2_tx.c',
+ 'otx2_tm.c',
+ 'otx2_rss.c',
+ 'otx2_mac.c',
+ 'otx2_ptp.c',
+ 'otx2_flow.c',
+ 'otx2_link.c',
+ 'otx2_vlan.c',
+ 'otx2_stats.c',
+ 'otx2_mcast.c',
+ 'otx2_lookup.c',
+ 'otx2_ethdev.c',
+ 'otx2_flow_ctrl.c',
+ 'otx2_flow_parse.c',
+ 'otx2_flow_utils.c',
+ 'otx2_ethdev_irq.c',
+ 'otx2_ethdev_ops.c',
+ 'otx2_ethdev_sec.c',
+ 'otx2_ethdev_debug.c',
+ 'otx2_ethdev_devargs.c'
+ )
+
+deps += ['bus_pci', 'cryptodev', 'eventdev', 'security']
+deps += ['common_octeontx2', 'mempool_octeontx2']
+
+extra_flags = ['-flax-vector-conversions']
+# This integrated controller runs only on a arm64 machine, remove 32bit warnings
+if not dpdk_conf.get('RTE_ARCH_64')
+ extra_flags += ['-Wno-int-to-pointer-cast', '-Wno-pointer-to-int-cast']
+endif
+
+foreach flag: extra_flags
+ if cc.has_argument(flag)
+ cflags += flag
+ endif
+endforeach
+
+includes += include_directories('../../common/cpt')
+includes += include_directories('../../crypto/octeontx2')
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.c
new file mode 100644
index 000000000..3f3f0a693
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.c
@@ -0,0 +1,2553 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <inttypes.h>
+
+#include <rte_ethdev_pci.h>
+#include <rte_io.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_mempool.h>
+
+#include "otx2_ethdev.h"
+#include "otx2_ethdev_sec.h"
+
+static inline uint64_t
+nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
+{
+ uint64_t capa = NIX_RX_OFFLOAD_CAPA;
+
+ if (otx2_dev_is_vf(dev) ||
+ dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
+ capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+
+ return capa;
+}
+
+static inline uint64_t
+nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
+{
+ uint64_t capa = NIX_TX_OFFLOAD_CAPA;
+
+ /* TSO not supported for earlier chip revisions */
+ if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
+ capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
+ return capa;
+}
+
+static const struct otx2_dev_ops otx2_dev_ops = {
+ .link_status_update = otx2_eth_dev_link_status_update,
+ .ptp_info_update = otx2_eth_dev_ptp_info_update
+};
+
+static int
+nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_lf_alloc_req *req;
+ struct nix_lf_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
+ req->rq_cnt = nb_rxq;
+ req->sq_cnt = nb_txq;
+ req->cq_cnt = nb_rxq;
+ /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
+ RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
+ req->xqe_sz = NIX_XQESZ_W16;
+ req->rss_sz = dev->rss_info.rss_size;
+ req->rss_grps = NIX_RSS_GRPS;
+ req->npa_func = otx2_npa_pf_func_get();
+ req->sso_func = otx2_sso_pf_func_get();
+ req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM)) {
+ req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
+ req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
+ }
+ req->rx_cfg |= (BIT_ULL(32 /* DROP_RE */) |
+ BIT_ULL(33 /* Outer L2 Length */) |
+ BIT_ULL(38 /* Inner L4 UDP Length */) |
+ BIT_ULL(39 /* Inner L3 Length */) |
+ BIT_ULL(40 /* Outer L4 UDP Length */) |
+ BIT_ULL(41 /* Outer L3 Length */));
+
+ if (dev->rss_tag_as_xor == 0)
+ req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ dev->sqb_size = rsp->sqb_size;
+ dev->tx_chan_base = rsp->tx_chan_base;
+ dev->rx_chan_base = rsp->rx_chan_base;
+ dev->rx_chan_cnt = rsp->rx_chan_cnt;
+ dev->tx_chan_cnt = rsp->tx_chan_cnt;
+ dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
+ dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
+ dev->lf_tx_stats = rsp->lf_tx_stats;
+ dev->lf_rx_stats = rsp->lf_rx_stats;
+ dev->cints = rsp->cints;
+ dev->qints = rsp->qints;
+ dev->npc_flow.channel = dev->rx_chan_base;
+ dev->ptp_en = rsp->hw_rx_tstamp_en;
+
+ return 0;
+}
+
+static int
+nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct npc_set_pkind *req;
+ struct msg_resp *rsp;
+ int rc;
+
+ if (dev->npc_flow.switch_header_type == 0)
+ return 0;
+
+ if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_LEN_90B &&
+ !otx2_dev_is_sdp(dev)) {
+ otx2_err("chlen90b is not supported on non-SDP device");
+ return -EINVAL;
+ }
+
+ /* Notify AF about higig2 config */
+ req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
+ req->mode = dev->npc_flow.switch_header_type;
+ if (enable == 0)
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+ req->dir = PKIND_RX;
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
+ req->mode = dev->npc_flow.switch_header_type;
+ if (enable == 0)
+ req->mode = OTX2_PRIV_FLAGS_DEFAULT;
+ req->dir = PKIND_TX;
+ return otx2_mbox_process_msg(mbox, (void *)&rsp);
+}
+
+static int
+nix_lf_free(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_lf_free_req *req;
+ struct ndc_sync_op *ndc_req;
+ int rc;
+
+ /* Sync NDC-NIX for LF */
+ ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
+ ndc_req->nix_lf_tx_sync = 1;
+ ndc_req->nix_lf_rx_sync = 1;
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
+
+ req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
+ /* Let AF driver free all this nix lf's
+ * NPC entries allocated using NPC MBOX.
+ */
+ req->flags = 0;
+
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+npc_rx_enable(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ otx2_mbox_alloc_msg_nix_lf_start_rx(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+npc_rx_disable(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+nix_cgx_start_link_event(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (en && otx2_dev_is_vf_or_sdp(dev))
+ return -ENOTSUP;
+
+ if (en)
+ otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
+ else
+ otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+nix_cgx_stop_link_event(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static inline void
+nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
+{
+ rxq->head = 0;
+ rxq->available = 0;
+}
+
+static inline uint32_t
+nix_qsize_to_val(enum nix_q_size_e qsize)
+{
+ return (16UL << (qsize * 2));
+}
+
+static inline enum nix_q_size_e
+nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
+{
+ int i;
+
+ if (otx2_ethdev_fixup_is_min_4k_q(dev))
+ i = nix_q_size_4K;
+ else
+ i = nix_q_size_16;
+
+ for (; i < nix_q_size_max; i++)
+ if (val <= nix_qsize_to_val(i))
+ break;
+
+ if (i >= nix_q_size_max)
+ i = nix_q_size_max - 1;
+
+ return i;
+}
+
+static int
+nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
+ uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ const struct rte_memzone *rz;
+ uint32_t ring_size, cq_size;
+ struct nix_aq_enq_req *aq;
+ uint16_t first_skip;
+ int rc;
+
+ cq_size = rxq->qlen;
+ ring_size = cq_size * NIX_CQ_ENTRY_SZ;
+ rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
+ NIX_CQ_ALIGN, dev->node);
+ if (rz == NULL) {
+ otx2_err("Failed to allocate mem for cq hw ring");
+ rc = -ENOMEM;
+ goto fail;
+ }
+ memset(rz->addr, 0, rz->len);
+ rxq->desc = (uintptr_t)rz->addr;
+ rxq->qmask = cq_size - 1;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ aq->cq.ena = 1;
+ aq->cq.caching = 1;
+ aq->cq.qsize = rxq->qsize;
+ aq->cq.base = rz->iova;
+ aq->cq.avg_level = 0xff;
+ aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
+ aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
+
+ /* Many to one reduction */
+ aq->cq.qint_idx = qid % dev->qints;
+ /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
+ aq->cq.cint_idx = qid;
+
+ if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
+ const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
+ uint16_t min_rx_drop;
+
+ min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
+ aq->cq.drop = min_rx_drop;
+ aq->cq.drop_ena = 1;
+ rxq->cq_drop = min_rx_drop;
+ } else {
+ rxq->cq_drop = NIX_CQ_THRESH_LEVEL;
+ aq->cq.drop = rxq->cq_drop;
+ aq->cq.drop_ena = 1;
+ }
+
+ /* TX pause frames enable flowctrl on RX side */
+ if (dev->fc_info.tx_pause) {
+ /* Single bpid is allocated for all rx channels for now */
+ aq->cq.bpid = dev->fc_info.bpid[0];
+ aq->cq.bp = rxq->cq_drop;
+ aq->cq.bp_ena = 1;
+ }
+
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ otx2_err("Failed to init cq context");
+ goto fail;
+ }
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ aq->rq.sso_ena = 0;
+
+ if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
+ aq->rq.ipsech_ena = 1;
+
+ aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
+ aq->rq.spb_ena = 0;
+ aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
+ first_skip = (sizeof(struct rte_mbuf));
+ first_skip += RTE_PKTMBUF_HEADROOM;
+ first_skip += rte_pktmbuf_priv_size(mp);
+ rxq->data_off = first_skip;
+
+ first_skip /= 8; /* Expressed in number of dwords */
+ aq->rq.first_skip = first_skip;
+ aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
+ aq->rq.flow_tagw = 32; /* 32-bits */
+ aq->rq.lpb_sizem1 = mp->elt_size / 8;
+ aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
+ aq->rq.ena = 1;
+ aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+ aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
+ aq->rq.rq_int_ena = 0;
+ /* Many to one reduction */
+ aq->rq.qint_idx = qid % dev->qints;
+
+ aq->rq.xqe_drop_ena = 1;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ otx2_err("Failed to init rq context");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return rc;
+}
+
+static int
+nix_rq_enb_dis(struct rte_eth_dev *eth_dev,
+ struct otx2_eth_rxq *rxq, const bool enb)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *aq;
+
+ /* Pkts will be dropped silently if RQ is disabled */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = rxq->rq;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->rq.ena = enb;
+ aq->rq_mask.ena = ~(aq->rq_mask.ena);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *aq;
+ int rc;
+
+ /* RQ is already disabled */
+ /* Disable CQ */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = rxq->rq;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->cq.ena = 0;
+ aq->cq_mask.ena = ~(aq->cq_mask.ena);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("Failed to disable cq context");
+ return rc;
+ }
+
+ return 0;
+}
+
+static inline int
+nix_get_data_off(struct otx2_eth_dev *dev)
+{
+ return otx2_ethdev_is_ptp_en(dev) ? NIX_TIMESYNC_RX_OFFSET : 0;
+}
+
+uint64_t
+otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
+{
+ struct rte_mbuf mb_def;
+ uint64_t *tmp;
+
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
+ offsetof(struct rte_mbuf, data_off) != 2);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
+ offsetof(struct rte_mbuf, data_off) != 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
+ offsetof(struct rte_mbuf, data_off) != 6);
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
+ mb_def.port = port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* Prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ tmp = (uint64_t *)&mb_def.rearm_data;
+
+ return *tmp;
+}
+
+static void
+otx2_nix_rx_queue_release(void *rx_queue)
+{
+ struct otx2_eth_rxq *rxq = rx_queue;
+
+ if (!rxq)
+ return;
+
+ otx2_nix_dbg("Releasing rxq %u", rxq->rq);
+ nix_cq_rq_uninit(rxq->eth_dev, rxq);
+ rte_free(rx_queue);
+}
+
+static int
+otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
+ uint16_t nb_desc, unsigned int socket,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_mempool_ops *ops;
+ struct otx2_eth_rxq *rxq;
+ const char *platform_ops;
+ enum nix_q_size_e qsize;
+ uint64_t offloads;
+ int rc;
+
+ rc = -EINVAL;
+
+ /* Compile time check to make sure all fast path elements in a CL */
+ RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
+
+ /* Sanity checks */
+ if (rx_conf->rx_deferred_start == 1) {
+ otx2_err("Deferred Rx start is not supported");
+ goto fail;
+ }
+
+ platform_ops = rte_mbuf_platform_mempool_ops();
+ /* This driver needs octeontx2_npa mempool ops to work */
+ ops = rte_mempool_get_ops(mp->ops_index);
+ if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
+ otx2_err("mempool ops should be of octeontx2_npa type");
+ goto fail;
+ }
+
+ if (mp->pool_id == 0) {
+ otx2_err("Invalid pool_id");
+ goto fail;
+ }
+
+ /* Free memory prior to re-allocation if needed */
+ if (eth_dev->data->rx_queues[rq] != NULL) {
+ otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
+ otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
+ eth_dev->data->rx_queues[rq] = NULL;
+ }
+
+ offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
+ dev->rx_offloads |= offloads;
+
+ /* Find the CQ queue size */
+ qsize = nix_qsize_clampup_get(dev, nb_desc);
+ /* Allocate rxq memory */
+ rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
+ if (rxq == NULL) {
+ otx2_err("Failed to allocate rq=%d", rq);
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ rxq->eth_dev = eth_dev;
+ rxq->rq = rq;
+ rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
+ rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
+ rxq->wdata = (uint64_t)rq << 32;
+ rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
+ rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
+ eth_dev->data->port_id);
+ rxq->offloads = offloads;
+ rxq->pool = mp;
+ rxq->qlen = nix_qsize_to_val(qsize);
+ rxq->qsize = qsize;
+ rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
+ rxq->tstamp = &dev->tstamp;
+
+ /* Alloc completion queue */
+ rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
+ if (rc) {
+ otx2_err("Failed to allocate rxq=%u", rq);
+ goto free_rxq;
+ }
+
+ rxq->qconf.socket_id = socket;
+ rxq->qconf.nb_desc = nb_desc;
+ rxq->qconf.mempool = mp;
+ memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
+
+ nix_rx_queue_reset(rxq);
+ otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
+ rq, mp->name, qsize, nb_desc, rxq->qlen);
+
+ eth_dev->data->rx_queues[rq] = rxq;
+ eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ /* Calculating delta and freq mult between PTP HI clock and tsc.
+ * These are needed in deriving raw clock value from tsc counter.
+ * read_clock eth op returns raw clock value.
+ */
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+ otx2_ethdev_is_ptp_en(dev)) {
+ rc = otx2_nix_raw_clock_tsc_conv(dev);
+ if (rc) {
+ otx2_err("Failed to calculate delta and freq mult");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+free_rxq:
+ otx2_nix_rx_queue_release(rxq);
+fail:
+ return rc;
+}
+
+static inline uint8_t
+nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
+{
+ /*
+ * Maximum three segments can be supported with W8, Choose
+ * NIX_MAXSQESZ_W16 for multi segment offload.
+ */
+ if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ return NIX_MAXSQESZ_W16;
+ else
+ return NIX_MAXSQESZ_W8;
+}
+
+static uint16_t
+nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ uint16_t flags = 0;
+
+ if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
+ (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ flags |= NIX_RX_OFFLOAD_RSS_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM))
+ flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ flags |= NIX_RX_MULTI_SEG_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP))
+ flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
+
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ flags |= NIX_RX_OFFLOAD_SECURITY_F;
+
+ if (!dev->ptype_disable)
+ flags |= NIX_RX_OFFLOAD_PTYPE_F;
+
+ return flags;
+}
+
+static uint16_t
+nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t conf = dev->tx_offloads;
+ uint16_t flags = 0;
+
+ /* Fastpath is dependent on these enums */
+ RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
+ RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
+ RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
+ RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
+ RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
+ RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
+ RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
+ RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
+ RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
+ RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
+ offsetof(struct rte_mbuf, buf_iova) + 8);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
+ offsetof(struct rte_mbuf, buf_iova) + 16);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
+ offsetof(struct rte_mbuf, ol_flags) + 12);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
+ offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
+
+ if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
+ conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+ flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+
+ if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
+
+ if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+ conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
+ conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
+ conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
+
+ if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
+
+ if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+ flags |= NIX_TX_MULTI_SEG_F;
+
+ /* Enable Inner checksum for TSO */
+ if (conf & DEV_TX_OFFLOAD_TCP_TSO)
+ flags |= (NIX_TX_OFFLOAD_TSO_F |
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+
+ /* Enable Inner and Outer checksum for Tunnel TSO */
+ if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO))
+ flags |= (NIX_TX_OFFLOAD_TSO_F |
+ NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F);
+
+ if (conf & DEV_TX_OFFLOAD_SECURITY)
+ flags |= NIX_TX_OFFLOAD_SECURITY_F;
+
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_TX_OFFLOAD_TSTAMP_F;
+
+ return flags;
+}
+
+static int
+nix_sq_init(struct otx2_eth_txq *txq)
+{
+ struct otx2_eth_dev *dev = txq->dev;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *sq;
+ uint32_t rr_quantum;
+ uint16_t smq;
+ int rc;
+
+ if (txq->sqb_pool->pool_id == 0)
+ return -EINVAL;
+
+ rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq);
+ if (rc) {
+ otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc);
+ return rc;
+ }
+
+ sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ sq->qidx = txq->sq;
+ sq->ctype = NIX_AQ_CTYPE_SQ;
+ sq->op = NIX_AQ_INSTOP_INIT;
+ sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq);
+
+ sq->sq.smq = smq;
+ sq->sq.smq_rr_quantum = rr_quantum;
+ sq->sq.default_chan = dev->tx_chan_base;
+ sq->sq.sqe_stype = NIX_STYPE_STF;
+ sq->sq.ena = 1;
+ if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
+ sq->sq.sqe_stype = NIX_STYPE_STP;
+ sq->sq.sqb_aura =
+ npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id);
+ sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
+ sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
+ sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
+ sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
+
+ /* Many to one reduction */
+ sq->sq.qint_idx = txq->sq % dev->qints;
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+nix_sq_uninit(struct otx2_eth_txq *txq)
+{
+ struct otx2_eth_dev *dev = txq->dev;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct ndc_sync_op *ndc_req;
+ struct nix_aq_enq_rsp *rsp;
+ struct nix_aq_enq_req *aq;
+ uint16_t sqes_per_sqb;
+ void *sqb_buf;
+ int rc, count;
+
+ otx2_nix_dbg("Cleaning up sq %u", txq->sq);
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = txq->sq;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ /* Check if sq is already cleaned up */
+ if (!rsp->sq.ena)
+ return 0;
+
+ /* Disable sq */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = txq->sq;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->sq_mask.ena = ~aq->sq_mask.ena;
+ aq->sq.ena = 0;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ /* Read SQ and free sqb's */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = txq->sq;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (aq->sq.smq_pend)
+ otx2_err("SQ has pending sqe's");
+
+ count = aq->sq.sqb_count;
+ sqes_per_sqb = 1 << txq->sqes_per_sqb_log2;
+ /* Free SQB's that are used */
+ sqb_buf = (void *)rsp->sq.head_sqb;
+ while (count) {
+ void *next_sqb;
+
+ next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t)
+ ((sqes_per_sqb - 1) *
+ nix_sq_max_sqe_sz(txq)));
+ npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
+ (uint64_t)sqb_buf);
+ sqb_buf = next_sqb;
+ count--;
+ }
+
+ /* Free next to use sqb */
+ if (rsp->sq.next_sqb)
+ npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
+ rsp->sq.next_sqb);
+
+ /* Sync NDC-NIX-TX for LF */
+ ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
+ ndc_req->nix_lf_tx_sync = 1;
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc);
+
+ return rc;
+}
+
+static int
+nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
+{
+ struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
+ struct npa_aq_enq_req *aura_req;
+
+ aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
+ aura_req->ctype = NPA_AQ_CTYPE_AURA;
+ aura_req->op = NPA_AQ_INSTOP_WRITE;
+
+ aura_req->aura.limit = nb_sqb_bufs;
+ aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
+
+ return otx2_mbox_process(npa_lf->mbox);
+}
+
+static int
+nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
+{
+ struct otx2_eth_dev *dev = txq->dev;
+ uint16_t sqes_per_sqb, nb_sqb_bufs;
+ char name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool_objsz sz;
+ struct npa_aura_s *aura;
+ uint32_t tmp, blk_sz;
+
+ aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN);
+ snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq);
+ blk_sz = dev->sqb_size;
+
+ if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16)
+ sqes_per_sqb = (dev->sqb_size / 8) / 16;
+ else
+ sqes_per_sqb = (dev->sqb_size / 8) / 8;
+
+ nb_sqb_bufs = nb_desc / sqes_per_sqb;
+ /* Clamp up to devarg passed SQB count */
+ nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_DEF_SQB,
+ nb_sqb_bufs + NIX_SQB_LIST_SPACE));
+
+ txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
+ 0, 0, dev->node,
+ MEMPOOL_F_NO_SPREAD);
+ txq->nb_sqb_bufs = nb_sqb_bufs;
+ txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
+ txq->nb_sqb_bufs_adj = nb_sqb_bufs -
+ RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb;
+ txq->nb_sqb_bufs_adj =
+ (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
+
+ if (txq->sqb_pool == NULL) {
+ otx2_err("Failed to allocate sqe mempool");
+ goto fail;
+ }
+
+ memset(aura, 0, sizeof(*aura));
+ aura->fc_ena = 1;
+ aura->fc_addr = txq->fc_iova;
+ aura->fc_hyst_bits = 0; /* Store count on all updates */
+ if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) {
+ otx2_err("Failed to set ops for sqe mempool");
+ goto fail;
+ }
+ if (rte_mempool_populate_default(txq->sqb_pool) < 0) {
+ otx2_err("Failed to populate sqe mempool");
+ goto fail;
+ }
+
+ tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
+ if (dev->sqb_size != sz.elt_size) {
+ otx2_err("sqe pool block size is not expected %d != %d",
+ dev->sqb_size, tmp);
+ goto fail;
+ }
+
+ nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
+
+ return 0;
+fail:
+ return -ENOMEM;
+}
+
+void
+otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
+{
+ struct nix_send_ext_s *send_hdr_ext;
+ struct nix_send_hdr_s *send_hdr;
+ struct nix_send_mem_s *send_mem;
+ union nix_send_sg_s *sg;
+
+ /* Initialize the fields based on basic single segment packet */
+ memset(&txq->cmd, 0, sizeof(txq->cmd));
+
+ if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
+ send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
+ /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
+ send_hdr->w0.sizem1 = 2;
+
+ send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
+ send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
+ if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ /* Default: one seg packet would have:
+ * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
+ * => 8/2 - 1 = 3
+ */
+ send_hdr->w0.sizem1 = 3;
+ send_hdr_ext->w0.tstmp = 1;
+
+ /* To calculate the offset for send_mem,
+ * send_hdr->w0.sizem1 * 2
+ */
+ send_mem = (struct nix_send_mem_s *)(txq->cmd +
+ (send_hdr->w0.sizem1 << 1));
+ send_mem->subdc = NIX_SUBDC_MEM;
+ send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
+ send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
+ }
+ sg = (union nix_send_sg_s *)&txq->cmd[4];
+ } else {
+ send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
+ /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
+ send_hdr->w0.sizem1 = 1;
+ sg = (union nix_send_sg_s *)&txq->cmd[2];
+ }
+
+ send_hdr->w0.sq = txq->sq;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 1;
+ sg->ld_type = NIX_SENDLDTYPE_LDD;
+
+ rte_smp_wmb();
+}
+
+static void
+otx2_nix_tx_queue_release(void *_txq)
+{
+ struct otx2_eth_txq *txq = _txq;
+ struct rte_eth_dev *eth_dev;
+
+ if (!txq)
+ return;
+
+ eth_dev = txq->dev->eth_dev;
+
+ otx2_nix_dbg("Releasing txq %u", txq->sq);
+
+ /* Flush and disable tm */
+ otx2_nix_sq_flush_pre(txq, eth_dev->data->dev_started);
+
+ /* Free sqb's and disable sq */
+ nix_sq_uninit(txq);
+
+ if (txq->sqb_pool) {
+ rte_mempool_free(txq->sqb_pool);
+ txq->sqb_pool = NULL;
+ }
+ otx2_nix_sq_flush_post(txq);
+ rte_free(txq);
+}
+
+
+static int
+otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ const struct rte_memzone *fc;
+ struct otx2_eth_txq *txq;
+ uint64_t offloads;
+ int rc;
+
+ rc = -EINVAL;
+
+ /* Compile time check to make sure all fast path elements in a CL */
+ RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128);
+
+ if (tx_conf->tx_deferred_start) {
+ otx2_err("Tx deferred start is not supported");
+ goto fail;
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (eth_dev->data->tx_queues[sq] != NULL) {
+ otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
+ otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
+ eth_dev->data->tx_queues[sq] = NULL;
+ }
+
+ /* Find the expected offloads for this queue */
+ offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
+
+ /* Allocating tx queue data structure */
+ txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq),
+ OTX2_ALIGN, socket_id);
+ if (txq == NULL) {
+ otx2_err("Failed to alloc txq=%d", sq);
+ rc = -ENOMEM;
+ goto fail;
+ }
+ txq->sq = sq;
+ txq->dev = dev;
+ txq->sqb_pool = NULL;
+ txq->offloads = offloads;
+ dev->tx_offloads |= offloads;
+
+ /*
+ * Allocate memory for flow control updates from HW.
+ * Alloc one cache line, so that fits all FC_STYPE modes.
+ */
+ fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq,
+ OTX2_ALIGN + sizeof(struct npa_aura_s),
+ OTX2_ALIGN, dev->node);
+ if (fc == NULL) {
+ otx2_err("Failed to allocate mem for fcmem");
+ rc = -ENOMEM;
+ goto free_txq;
+ }
+ txq->fc_iova = fc->iova;
+ txq->fc_mem = fc->addr;
+
+ /* Initialize the aura sqb pool */
+ rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc);
+ if (rc) {
+ otx2_err("Failed to alloc sqe pool rc=%d", rc);
+ goto free_txq;
+ }
+
+ /* Initialize the SQ */
+ rc = nix_sq_init(txq);
+ if (rc) {
+ otx2_err("Failed to init sq=%d context", sq);
+ goto free_txq;
+ }
+
+ txq->fc_cache_pkts = 0;
+ txq->io_addr = dev->base + NIX_LF_OP_SENDX(0);
+ /* Evenly distribute LMT slot for each sq */
+ txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12));
+
+ txq->qconf.socket_id = socket_id;
+ txq->qconf.nb_desc = nb_desc;
+ memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
+
+ otx2_nix_form_default_desc(txq);
+
+ otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
+ " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
+ fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
+ txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
+ eth_dev->data->tx_queues[sq] = txq;
+ eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+
+free_txq:
+ otx2_nix_tx_queue_release(txq);
+fail:
+ return rc;
+}
+
+static int
+nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_eth_qconf *tx_qconf = NULL;
+ struct otx2_eth_qconf *rx_qconf = NULL;
+ struct otx2_eth_txq **txq;
+ struct otx2_eth_rxq **rxq;
+ int i, nb_rxq, nb_txq;
+
+ nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
+ nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
+
+ tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
+ if (tx_qconf == NULL) {
+ otx2_err("Failed to allocate memory for tx_qconf");
+ goto fail;
+ }
+
+ rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
+ if (rx_qconf == NULL) {
+ otx2_err("Failed to allocate memory for rx_qconf");
+ goto fail;
+ }
+
+ txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
+ for (i = 0; i < nb_txq; i++) {
+ if (txq[i] == NULL) {
+ tx_qconf[i].valid = false;
+ otx2_info("txq[%d] is already released", i);
+ continue;
+ }
+ memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
+ tx_qconf[i].valid = true;
+ otx2_nix_tx_queue_release(txq[i]);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+
+ rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
+ for (i = 0; i < nb_rxq; i++) {
+ if (rxq[i] == NULL) {
+ rx_qconf[i].valid = false;
+ otx2_info("rxq[%d] is already released", i);
+ continue;
+ }
+ memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
+ rx_qconf[i].valid = true;
+ otx2_nix_rx_queue_release(rxq[i]);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+
+ dev->tx_qconf = tx_qconf;
+ dev->rx_qconf = rx_qconf;
+ return 0;
+
+fail:
+ if (tx_qconf)
+ free(tx_qconf);
+ if (rx_qconf)
+ free(rx_qconf);
+
+ return -ENOMEM;
+}
+
+static int
+nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
+ struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
+ struct otx2_eth_txq **txq;
+ struct otx2_eth_rxq **rxq;
+ int rc, i, nb_rxq, nb_txq;
+
+ nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
+ nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
+
+ rc = -ENOMEM;
+ /* Setup tx & rx queues with previous configuration so
+ * that the queues can be functional in cases like ports
+ * are started without re configuring queues.
+ *
+ * Usual re config sequence is like below:
+ * port_configure() {
+ * if(reconfigure) {
+ * queue_release()
+ * queue_setup()
+ * }
+ * queue_configure() {
+ * queue_release()
+ * queue_setup()
+ * }
+ * }
+ * port_start()
+ *
+ * In some application's control path, queue_configure() would
+ * NOT be invoked for TXQs/RXQs in port_configure().
+ * In such cases, queues can be functional after start as the
+ * queues are already setup in port_configure().
+ */
+ for (i = 0; i < nb_txq; i++) {
+ if (!tx_qconf[i].valid)
+ continue;
+ rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
+ tx_qconf[i].socket_id,
+ &tx_qconf[i].conf.tx);
+ if (rc) {
+ otx2_err("Failed to setup tx queue rc=%d", rc);
+ txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
+ for (i -= 1; i >= 0; i--)
+ otx2_nix_tx_queue_release(txq[i]);
+ goto fail;
+ }
+ }
+
+ free(tx_qconf); tx_qconf = NULL;
+
+ for (i = 0; i < nb_rxq; i++) {
+ if (!rx_qconf[i].valid)
+ continue;
+ rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
+ rx_qconf[i].socket_id,
+ &rx_qconf[i].conf.rx,
+ rx_qconf[i].mempool);
+ if (rc) {
+ otx2_err("Failed to setup rx queue rc=%d", rc);
+ rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
+ for (i -= 1; i >= 0; i--)
+ otx2_nix_rx_queue_release(rxq[i]);
+ goto release_tx_queues;
+ }
+ }
+
+ free(rx_qconf); rx_qconf = NULL;
+
+ return 0;
+
+release_tx_queues:
+ txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ otx2_nix_tx_queue_release(txq[i]);
+fail:
+ if (tx_qconf)
+ free(tx_qconf);
+ if (rx_qconf)
+ free(rx_qconf);
+
+ return rc;
+}
+
+static uint16_t
+nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
+{
+ RTE_SET_USED(queue);
+ RTE_SET_USED(mbufs);
+ RTE_SET_USED(pkts);
+
+ return 0;
+}
+
+static void
+nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
+{
+ /* These dummy functions are required for supporting
+ * some applications which reconfigure queues without
+ * stopping tx burst and rx burst threads(eg kni app)
+ * When the queues context is saved, txq/rxqs are released
+ * which caused app crash since rx/tx burst is still
+ * on different lcores
+ */
+ eth_dev->tx_pkt_burst = nix_eth_nop_burst;
+ eth_dev->rx_pkt_burst = nix_eth_nop_burst;
+ rte_mb();
+}
+
+static void
+nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4)
+{
+ volatile struct nix_lso_format *field;
+
+ /* Format works only with TCP packet marked by OL3/OL4 */
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static void
+nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req,
+ bool outer_v4, bool inner_v4)
+{
+ volatile struct nix_lso_format *field;
+
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 len */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = outer_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (outer_v4) {
+ /* IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Outer UDP length */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+
+ /* Inner IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = inner_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (inner_v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static void
+nix_lso_tun_tcp(struct nix_lso_format_cfg *req,
+ bool outer_v4, bool inner_v4)
+{
+ volatile struct nix_lso_format *field;
+
+ field = (volatile struct nix_lso_format *)&req->fields[0];
+ req->field_mask = NIX_LSO_FIELD_MASK;
+ /* Outer IPv4/IPv6 len */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = outer_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (outer_v4) {
+ /* IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Inner IPv4/IPv6 */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = inner_v4 ? 2 : 4;
+ field->sizem1 = 1; /* 2B */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+ if (inner_v4) {
+ /* IPID field */
+ field->layer = NIX_TXLAYER_IL3;
+ field->offset = 4;
+ field->sizem1 = 1;
+ /* Incremented linearly per segment */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* TCP sequence number update */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 4;
+ field->sizem1 = 3; /* 4 bytes */
+ field->alg = NIX_LSOALG_ADD_OFFSET;
+ field++;
+
+ /* TCP flags field */
+ field->layer = NIX_TXLAYER_IL4;
+ field->offset = 12;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_TCP_FLAGS;
+ field++;
+}
+
+static int
+nix_setup_lso_formats(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_lso_format_cfg_rsp *rsp;
+ struct nix_lso_format_cfg *req;
+ uint8_t base;
+ int rc;
+
+ /* Skip if TSO was not requested */
+ if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F))
+ return 0;
+ /*
+ * IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tcp(req, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ base = rsp->lso_format_idx;
+ if (base != NIX_LSO_FORMAT_IDX_TSOV4)
+ return -EFAULT;
+ dev->lso_base_idx = base;
+ otx2_nix_dbg("tcpv4 lso fmt=%u", base);
+
+
+ /*
+ * IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tcp(req, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 1)
+ return -EFAULT;
+ otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1);
+
+ /*
+ * IPv4/UDP/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, true, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 2)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2);
+
+ /*
+ * IPv4/UDP/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, true, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 3)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3);
+
+ /*
+ * IPv6/UDP/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, false, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 4)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4);
+
+ /*
+ * IPv6/UDP/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_udp_tun_tcp(req, false, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ if (rsp->lso_format_idx != base + 5)
+ return -EFAULT;
+ otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5);
+
+ /*
+ * IPv4/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, true, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 6)
+ return -EFAULT;
+ otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6);
+
+ /*
+ * IPv4/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, true, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 7)
+ return -EFAULT;
+ otx2_nix_dbg("tun v4v6 fmt=%u\n", base + 7);
+
+ /*
+ * IPv6/TUN HDR/IPv4/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, false, true);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (rsp->lso_format_idx != base + 8)
+ return -EFAULT;
+ otx2_nix_dbg("tun v6v4 fmt=%u\n", base + 8);
+
+ /*
+ * IPv6/TUN HDR/IPv6/TCP LSO
+ */
+ req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
+ nix_lso_tun_tcp(req, false, false);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ if (rsp->lso_format_idx != base + 9)
+ return -EFAULT;
+ otx2_nix_dbg("tun v6v6 fmt=%u\n", base + 9);
+ return 0;
+}
+
+static int
+otx2_nix_configure(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ struct rte_eth_txmode *txmode = &conf->txmode;
+ char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
+ struct rte_ether_addr *ea;
+ uint8_t nb_rxq, nb_txq;
+ int rc;
+
+ rc = -EINVAL;
+
+ /* Sanity checks */
+ if (rte_eal_has_hugepages() == 0) {
+ otx2_err("Huge page is not configured");
+ goto fail_configure;
+ }
+
+ if (conf->dcb_capability_en == 1) {
+ otx2_err("dcb enable is not supported");
+ goto fail_configure;
+ }
+
+ if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ otx2_err("Flow director is not supported");
+ goto fail_configure;
+ }
+
+ if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
+ rxmode->mq_mode != ETH_MQ_RX_RSS) {
+ otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
+ goto fail_configure;
+ }
+
+ if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+ otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
+ goto fail_configure;
+ }
+
+ if (otx2_dev_is_Ax(dev) &&
+ (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
+ ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+ (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+ otx2_err("Outer IP and SCTP checksum unsupported");
+ goto fail_configure;
+ }
+
+ /* Free the resources allocated from the previous configure */
+ if (dev->configured == 1) {
+ otx2_eth_sec_fini(eth_dev);
+ otx2_nix_rxchan_bpid_cfg(eth_dev, false);
+ otx2_nix_vlan_fini(eth_dev);
+ otx2_nix_mc_addr_list_uninstall(eth_dev);
+ otx2_flow_free_all_resources(dev);
+ oxt2_nix_unregister_queue_irqs(eth_dev);
+ if (eth_dev->data->dev_conf.intr_conf.rxq)
+ oxt2_nix_unregister_cq_irqs(eth_dev);
+ nix_set_nop_rxtx_function(eth_dev);
+ rc = nix_store_queue_cfg_and_then_release(eth_dev);
+ if (rc)
+ goto fail_configure;
+ otx2_nix_tm_fini(eth_dev);
+ nix_lf_free(dev);
+ }
+
+ dev->rx_offloads = rxmode->offloads;
+ dev->tx_offloads = txmode->offloads;
+ dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
+ dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
+ dev->rss_info.rss_grps = NIX_RSS_GRPS;
+
+ nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
+ nb_txq = RTE_MAX(data->nb_tx_queues, 1);
+
+ /* Alloc a nix lf */
+ rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
+ if (rc) {
+ otx2_err("Failed to init nix_lf rc=%d", rc);
+ goto fail_offloads;
+ }
+
+ otx2_nix_err_intr_enb_dis(eth_dev, true);
+ otx2_nix_ras_intr_enb_dis(eth_dev, true);
+
+ if (dev->ptp_en &&
+ dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
+ otx2_err("Both PTP and switch header enabled");
+ goto free_nix_lf;
+ }
+
+ rc = nix_lf_switch_header_type_enable(dev, true);
+ if (rc) {
+ otx2_err("Failed to enable switch type nix_lf rc=%d", rc);
+ goto free_nix_lf;
+ }
+
+ rc = nix_setup_lso_formats(dev);
+ if (rc) {
+ otx2_err("failed to setup nix lso format fields, rc=%d", rc);
+ goto free_nix_lf;
+ }
+
+ /* Configure RSS */
+ rc = otx2_nix_rss_config(eth_dev);
+ if (rc) {
+ otx2_err("Failed to configure rss rc=%d", rc);
+ goto free_nix_lf;
+ }
+
+ /* Init the default TM scheduler hierarchy */
+ rc = otx2_nix_tm_init_default(eth_dev);
+ if (rc) {
+ otx2_err("Failed to init traffic manager rc=%d", rc);
+ goto free_nix_lf;
+ }
+
+ rc = otx2_nix_vlan_offload_init(eth_dev);
+ if (rc) {
+ otx2_err("Failed to init vlan offload rc=%d", rc);
+ goto tm_fini;
+ }
+
+ /* Register queue IRQs */
+ rc = oxt2_nix_register_queue_irqs(eth_dev);
+ if (rc) {
+ otx2_err("Failed to register queue interrupts rc=%d", rc);
+ goto vlan_fini;
+ }
+
+ /* Register cq IRQs */
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ if (eth_dev->data->nb_rx_queues > dev->cints) {
+ otx2_err("Rx interrupt cannot be enabled, rxq > %d",
+ dev->cints);
+ goto q_irq_fini;
+ }
+ /* Rx interrupt feature cannot work with vector mode because,
+ * vector mode doesn't process packets unless min 4 pkts are
+ * received, while cq interrupts are generated even for 1 pkt
+ * in the CQ.
+ */
+ dev->scalar_ena = true;
+
+ rc = oxt2_nix_register_cq_irqs(eth_dev);
+ if (rc) {
+ otx2_err("Failed to register CQ interrupts rc=%d", rc);
+ goto q_irq_fini;
+ }
+ }
+
+ /* Configure loop back mode */
+ rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
+ if (rc) {
+ otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
+ goto cq_fini;
+ }
+
+ rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
+ if (rc) {
+ otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
+ goto cq_fini;
+ }
+
+ /* Enable security */
+ rc = otx2_eth_sec_init(eth_dev);
+ if (rc)
+ goto cq_fini;
+
+ rc = otx2_nix_flow_ctrl_init(eth_dev);
+ if (rc) {
+ otx2_err("Failed to init flow ctrl mode %d", rc);
+ goto cq_fini;
+ }
+
+ rc = otx2_nix_mc_addr_list_install(eth_dev);
+ if (rc < 0) {
+ otx2_err("Failed to install mc address list rc=%d", rc);
+ goto sec_fini;
+ }
+
+ /*
+ * Restore queue config when reconfigure followed by
+ * reconfigure and no queue configure invoked from application case.
+ */
+ if (dev->configured == 1) {
+ rc = nix_restore_queue_cfg(eth_dev);
+ if (rc)
+ goto uninstall_mc_list;
+ }
+
+ /* Update the mac address */
+ ea = eth_dev->data->mac_addrs;
+ memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
+ if (rte_is_zero_ether_addr(ea))
+ rte_eth_random_addr((uint8_t *)ea);
+
+ rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
+
+ /* Apply new link configurations if changed */
+ rc = otx2_apply_link_speed(eth_dev);
+ if (rc) {
+ otx2_err("Failed to set link configuration");
+ goto uninstall_mc_list;
+ }
+
+ otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
+ " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
+ " rx_flags=0x%x tx_flags=0x%x",
+ eth_dev->data->port_id, ea_fmt, nb_rxq,
+ nb_txq, dev->rx_offloads, dev->tx_offloads,
+ dev->rx_offload_flags, dev->tx_offload_flags);
+
+ /* All good */
+ dev->configured = 1;
+ dev->configured_nb_rx_qs = data->nb_rx_queues;
+ dev->configured_nb_tx_qs = data->nb_tx_queues;
+ return 0;
+
+uninstall_mc_list:
+ otx2_nix_mc_addr_list_uninstall(eth_dev);
+sec_fini:
+ otx2_eth_sec_fini(eth_dev);
+cq_fini:
+ oxt2_nix_unregister_cq_irqs(eth_dev);
+q_irq_fini:
+ oxt2_nix_unregister_queue_irqs(eth_dev);
+vlan_fini:
+ otx2_nix_vlan_fini(eth_dev);
+tm_fini:
+ otx2_nix_tm_fini(eth_dev);
+free_nix_lf:
+ nix_lf_free(dev);
+fail_offloads:
+ dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev);
+ dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev);
+fail_configure:
+ dev->configured = 0;
+ return rc;
+}
+
+int
+otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct otx2_eth_txq *txq;
+ int rc = -EINVAL;
+
+ txq = eth_dev->data->tx_queues[qidx];
+
+ if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ rc = otx2_nix_sq_sqb_aura_fc(txq, true);
+ if (rc) {
+ otx2_err("Failed to enable sqb aura fc, txq=%u, rc=%d",
+ qidx, rc);
+ goto done;
+ }
+
+ data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+
+done:
+ return rc;
+}
+
+int
+otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct otx2_eth_txq *txq;
+ int rc;
+
+ txq = eth_dev->data->tx_queues[qidx];
+
+ if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ txq->fc_cache_pkts = 0;
+
+ rc = otx2_nix_sq_sqb_aura_fc(txq, false);
+ if (rc) {
+ otx2_err("Failed to disable sqb aura fc, txq=%u, rc=%d",
+ qidx, rc);
+ goto done;
+ }
+
+ data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+done:
+ return rc;
+}
+
+static int
+otx2_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+ struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
+ struct rte_eth_dev_data *data = eth_dev->data;
+ int rc;
+
+ if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ rc = nix_rq_enb_dis(rxq->eth_dev, rxq, true);
+ if (rc) {
+ otx2_err("Failed to enable rxq=%u, rc=%d", qidx, rc);
+ goto done;
+ }
+
+ data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
+
+done:
+ return rc;
+}
+
+static int
+otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
+{
+ struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
+ struct rte_eth_dev_data *data = eth_dev->data;
+ int rc;
+
+ if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ rc = nix_rq_enb_dis(rxq->eth_dev, rxq, false);
+ if (rc) {
+ otx2_err("Failed to disable rxq=%u, rc=%d", qidx, rc);
+ goto done;
+ }
+
+ data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+done:
+ return rc;
+}
+
+static void
+otx2_nix_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_mbuf *rx_pkts[32];
+ struct otx2_eth_rxq *rxq;
+ int count, i, j, rc;
+
+ nix_lf_switch_header_type_enable(dev, false);
+ nix_cgx_stop_link_event(dev);
+ npc_rx_disable(dev);
+
+ /* Stop rx queues and free up pkts pending */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rc = otx2_nix_rx_queue_stop(eth_dev, i);
+ if (rc)
+ continue;
+
+ rxq = eth_dev->data->rx_queues[i];
+ count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+ while (count) {
+ for (j = 0; j < count; j++)
+ rte_pktmbuf_free(rx_pkts[j]);
+ count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+ }
+ }
+
+ /* Stop tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ otx2_nix_tx_queue_stop(eth_dev, i);
+}
+
+static int
+otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc, i;
+
+ /* MTU recalculate should be avoided here if PTP is enabled by PF, as
+ * otx2_nix_recalc_mtu would be invoked during otx2_nix_ptp_enable_vf
+ * call below.
+ */
+ if (eth_dev->data->nb_rx_queues != 0 && !otx2_ethdev_is_ptp_en(dev)) {
+ rc = otx2_nix_recalc_mtu(eth_dev);
+ if (rc)
+ return rc;
+ }
+
+ /* Start rx queues */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rc = otx2_nix_rx_queue_start(eth_dev, i);
+ if (rc)
+ return rc;
+ }
+
+ /* Start tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = otx2_nix_tx_queue_start(eth_dev, i);
+ if (rc)
+ return rc;
+ }
+
+ rc = otx2_nix_update_flow_ctrl_mode(eth_dev);
+ if (rc) {
+ otx2_err("Failed to update flow ctrl mode %d", rc);
+ return rc;
+ }
+
+ /* Enable PTP if it was requested by the app or if it is already
+ * enabled in PF owning this VF
+ */
+ memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
+ otx2_ethdev_is_ptp_en(dev))
+ otx2_nix_timesync_enable(eth_dev);
+ else
+ otx2_nix_timesync_disable(eth_dev);
+
+ /* Update VF about data off shifted by 8 bytes if PTP already
+ * enabled in PF owning this VF
+ */
+ if (otx2_ethdev_is_ptp_en(dev) && otx2_dev_is_vf(dev))
+ otx2_nix_ptp_enable_vf(eth_dev);
+
+ rc = npc_rx_enable(dev);
+ if (rc) {
+ otx2_err("Failed to enable NPC rx %d", rc);
+ return rc;
+ }
+
+ otx2_nix_toggle_flag_link_cfg(dev, true);
+
+ rc = nix_cgx_start_link_event(dev);
+ if (rc) {
+ otx2_err("Failed to start cgx link event %d", rc);
+ goto rx_disable;
+ }
+
+ otx2_nix_toggle_flag_link_cfg(dev, false);
+ otx2_eth_set_tx_function(eth_dev);
+ otx2_eth_set_rx_function(eth_dev);
+
+ return 0;
+
+rx_disable:
+ npc_rx_disable(dev);
+ otx2_nix_toggle_flag_link_cfg(dev, false);
+ return rc;
+}
+
+static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev);
+static void otx2_nix_dev_close(struct rte_eth_dev *eth_dev);
+
+/* Initialize and register driver with DPDK Application */
+static const struct eth_dev_ops otx2_eth_dev_ops = {
+ .dev_infos_get = otx2_nix_info_get,
+ .dev_configure = otx2_nix_configure,
+ .link_update = otx2_nix_link_update,
+ .tx_queue_setup = otx2_nix_tx_queue_setup,
+ .tx_queue_release = otx2_nix_tx_queue_release,
+ .tm_ops_get = otx2_nix_tm_ops_get,
+ .rx_queue_setup = otx2_nix_rx_queue_setup,
+ .rx_queue_release = otx2_nix_rx_queue_release,
+ .dev_start = otx2_nix_dev_start,
+ .dev_stop = otx2_nix_dev_stop,
+ .dev_close = otx2_nix_dev_close,
+ .tx_queue_start = otx2_nix_tx_queue_start,
+ .tx_queue_stop = otx2_nix_tx_queue_stop,
+ .rx_queue_start = otx2_nix_rx_queue_start,
+ .rx_queue_stop = otx2_nix_rx_queue_stop,
+ .dev_set_link_up = otx2_nix_dev_set_link_up,
+ .dev_set_link_down = otx2_nix_dev_set_link_down,
+ .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
+ .dev_ptypes_set = otx2_nix_ptypes_set,
+ .dev_reset = otx2_nix_dev_reset,
+ .stats_get = otx2_nix_dev_stats_get,
+ .stats_reset = otx2_nix_dev_stats_reset,
+ .get_reg = otx2_nix_dev_get_reg,
+ .mtu_set = otx2_nix_mtu_set,
+ .mac_addr_add = otx2_nix_mac_addr_add,
+ .mac_addr_remove = otx2_nix_mac_addr_del,
+ .mac_addr_set = otx2_nix_mac_addr_set,
+ .set_mc_addr_list = otx2_nix_set_mc_addr_list,
+ .promiscuous_enable = otx2_nix_promisc_enable,
+ .promiscuous_disable = otx2_nix_promisc_disable,
+ .allmulticast_enable = otx2_nix_allmulticast_enable,
+ .allmulticast_disable = otx2_nix_allmulticast_disable,
+ .queue_stats_mapping_set = otx2_nix_queue_stats_mapping,
+ .reta_update = otx2_nix_dev_reta_update,
+ .reta_query = otx2_nix_dev_reta_query,
+ .rss_hash_update = otx2_nix_rss_hash_update,
+ .rss_hash_conf_get = otx2_nix_rss_hash_conf_get,
+ .xstats_get = otx2_nix_xstats_get,
+ .xstats_get_names = otx2_nix_xstats_get_names,
+ .xstats_reset = otx2_nix_xstats_reset,
+ .xstats_get_by_id = otx2_nix_xstats_get_by_id,
+ .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
+ .rxq_info_get = otx2_nix_rxq_info_get,
+ .txq_info_get = otx2_nix_txq_info_get,
+ .rx_burst_mode_get = otx2_rx_burst_mode_get,
+ .tx_burst_mode_get = otx2_tx_burst_mode_get,
+ .rx_queue_count = otx2_nix_rx_queue_count,
+ .rx_descriptor_done = otx2_nix_rx_descriptor_done,
+ .rx_descriptor_status = otx2_nix_rx_descriptor_status,
+ .tx_descriptor_status = otx2_nix_tx_descriptor_status,
+ .tx_done_cleanup = otx2_nix_tx_done_cleanup,
+ .set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit,
+ .pool_ops_supported = otx2_nix_pool_ops_supported,
+ .filter_ctrl = otx2_nix_dev_filter_ctrl,
+ .get_module_info = otx2_nix_get_module_info,
+ .get_module_eeprom = otx2_nix_get_module_eeprom,
+ .fw_version_get = otx2_nix_fw_version_get,
+ .flow_ctrl_get = otx2_nix_flow_ctrl_get,
+ .flow_ctrl_set = otx2_nix_flow_ctrl_set,
+ .timesync_enable = otx2_nix_timesync_enable,
+ .timesync_disable = otx2_nix_timesync_disable,
+ .timesync_read_rx_timestamp = otx2_nix_timesync_read_rx_timestamp,
+ .timesync_read_tx_timestamp = otx2_nix_timesync_read_tx_timestamp,
+ .timesync_adjust_time = otx2_nix_timesync_adjust_time,
+ .timesync_read_time = otx2_nix_timesync_read_time,
+ .timesync_write_time = otx2_nix_timesync_write_time,
+ .vlan_offload_set = otx2_nix_vlan_offload_set,
+ .vlan_filter_set = otx2_nix_vlan_filter_set,
+ .vlan_strip_queue_set = otx2_nix_vlan_strip_queue_set,
+ .vlan_tpid_set = otx2_nix_vlan_tpid_set,
+ .vlan_pvid_set = otx2_nix_vlan_pvid_set,
+ .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable,
+ .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable,
+ .read_clock = otx2_nix_read_clock,
+};
+
+static inline int
+nix_lf_attach(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct rsrc_attach_req *req;
+
+ /* Attach NIX(lf) */
+ req = otx2_mbox_alloc_msg_attach_resources(mbox);
+ req->modify = true;
+ req->nixlf = true;
+
+ return otx2_mbox_process(mbox);
+}
+
+static inline int
+nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct msix_offset_rsp *msix_rsp;
+ int rc;
+
+ /* Get NPA and NIX MSIX vector offsets */
+ otx2_mbox_alloc_msg_msix_offset(mbox);
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
+
+ dev->nix_msixoff = msix_rsp->nix_msixoff;
+
+ return rc;
+}
+
+static inline int
+otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
+{
+ struct rsrc_detach_req *req;
+
+ req = otx2_mbox_alloc_msg_detach_resources(mbox);
+
+ /* Detach all except npa lf */
+ req->partial = true;
+ req->nixlf = true;
+ req->sso = true;
+ req->ssow = true;
+ req->timlfs = true;
+ req->cptlfs = true;
+
+ return otx2_mbox_process(mbox);
+}
+
+static bool
+otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev)
+{
+ if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_PF ||
+ pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
+ return true;
+ return false;
+}
+
+static int
+otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_pci_device *pci_dev;
+ int rc, max_entries;
+
+ eth_dev->dev_ops = &otx2_eth_dev_ops;
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ /* Setup callbacks for secondary process */
+ otx2_eth_set_tx_function(eth_dev);
+ otx2_eth_set_rx_function(eth_dev);
+ return 0;
+ }
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+
+ /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
+ memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
+ offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
+
+ /* Parse devargs string */
+ rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
+ if (rc) {
+ otx2_err("Failed to parse devargs rc=%d", rc);
+ goto error;
+ }
+
+ if (!dev->mbox_active) {
+ /* Initialize the base otx2_dev object
+ * only if already present
+ */
+ rc = otx2_dev_init(pci_dev, dev);
+ if (rc) {
+ otx2_err("Failed to initialize otx2_dev rc=%d", rc);
+ goto error;
+ }
+ }
+ if (otx2_eth_dev_is_sdp(pci_dev))
+ dev->sdp_link = true;
+ else
+ dev->sdp_link = false;
+ /* Device generic callbacks */
+ dev->ops = &otx2_dev_ops;
+ dev->eth_dev = eth_dev;
+
+ /* Grab the NPA LF if required */
+ rc = otx2_npa_lf_init(pci_dev, dev);
+ if (rc)
+ goto otx2_dev_uninit;
+
+ dev->configured = 0;
+ dev->drv_inited = true;
+ dev->ptype_disable = 0;
+ dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
+ dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
+
+ /* Attach NIX LF */
+ rc = nix_lf_attach(dev);
+ if (rc)
+ goto otx2_npa_uninit;
+
+ /* Get NIX MSIX offset */
+ rc = nix_lf_get_msix_offset(dev);
+ if (rc)
+ goto otx2_npa_uninit;
+
+ /* Register LF irq handlers */
+ rc = otx2_nix_register_irqs(eth_dev);
+ if (rc)
+ goto mbox_detach;
+
+ /* Get maximum number of supported MAC entries */
+ max_entries = otx2_cgx_mac_max_entries_get(dev);
+ if (max_entries < 0) {
+ otx2_err("Failed to get max entries for mac addr");
+ rc = -ENOTSUP;
+ goto unregister_irq;
+ }
+
+ /* For VFs, returned max_entries will be 0. But to keep default MAC
+ * address, one entry must be allocated. So setting up to 1.
+ */
+ if (max_entries == 0)
+ max_entries = 1;
+
+ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
+ RTE_ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ otx2_err("Failed to allocate memory for mac addr");
+ rc = -ENOMEM;
+ goto unregister_irq;
+ }
+
+ dev->max_mac_entries = max_entries;
+
+ rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
+ if (rc)
+ goto free_mac_addrs;
+
+ /* Update the mac address */
+ memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
+
+ /* Also sync same MAC address to CGX table */
+ otx2_cgx_mac_addr_set(eth_dev, &eth_dev->data->mac_addrs[0]);
+
+ /* Initialize the tm data structures */
+ otx2_nix_tm_conf_init(eth_dev);
+
+ dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
+ dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
+
+ if (otx2_dev_is_96xx_A0(dev) ||
+ otx2_dev_is_95xx_Ax(dev)) {
+ dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
+ dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
+ }
+
+ /* Create security ctx */
+ rc = otx2_eth_sec_ctx_create(eth_dev);
+ if (rc)
+ goto free_mac_addrs;
+ dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
+ dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
+
+ /* Initialize rte-flow */
+ rc = otx2_flow_init(dev);
+ if (rc)
+ goto sec_ctx_destroy;
+
+ otx2_nix_mc_filter_init(dev);
+
+ otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
+ " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
+ eth_dev->data->port_id, dev->pf, dev->vf,
+ OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
+ dev->rx_offload_capa, dev->tx_offload_capa);
+ return 0;
+
+sec_ctx_destroy:
+ otx2_eth_sec_ctx_destroy(eth_dev);
+free_mac_addrs:
+ rte_free(eth_dev->data->mac_addrs);
+unregister_irq:
+ otx2_nix_unregister_irqs(eth_dev);
+mbox_detach:
+ otx2_eth_dev_lf_detach(dev->mbox);
+otx2_npa_uninit:
+ otx2_npa_lf_fini();
+otx2_dev_uninit:
+ otx2_dev_fini(pci_dev, dev);
+error:
+ otx2_err("Failed to init nix eth_dev rc=%d", rc);
+ return rc;
+}
+
+static int
+otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_pci_device *pci_dev;
+ int rc, i;
+
+ /* Nothing to be done for secondary processes */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Clear the flag since we are closing down */
+ dev->configured = 0;
+
+ /* Disable nix bpid config */
+ otx2_nix_rxchan_bpid_cfg(eth_dev, false);
+
+ npc_rx_disable(dev);
+
+ /* Disable vlan offloads */
+ otx2_nix_vlan_fini(eth_dev);
+
+ /* Disable other rte_flow entries */
+ otx2_flow_fini(dev);
+
+ /* Free multicast filter list */
+ otx2_nix_mc_filter_fini(dev);
+
+ /* Disable PTP if already enabled */
+ if (otx2_ethdev_is_ptp_en(dev))
+ otx2_nix_timesync_disable(eth_dev);
+
+ nix_cgx_stop_link_event(dev);
+
+ /* Free up SQs */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+ eth_dev->data->nb_tx_queues = 0;
+
+ /* Free up RQ's and CQ's */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+ eth_dev->data->nb_rx_queues = 0;
+
+ /* Free tm resources */
+ rc = otx2_nix_tm_fini(eth_dev);
+ if (rc)
+ otx2_err("Failed to cleanup tm, rc=%d", rc);
+
+ /* Unregister queue irqs */
+ oxt2_nix_unregister_queue_irqs(eth_dev);
+
+ /* Unregister cq irqs */
+ if (eth_dev->data->dev_conf.intr_conf.rxq)
+ oxt2_nix_unregister_cq_irqs(eth_dev);
+
+ rc = nix_lf_free(dev);
+ if (rc)
+ otx2_err("Failed to free nix lf, rc=%d", rc);
+
+ rc = otx2_npa_lf_fini();
+ if (rc)
+ otx2_err("Failed to cleanup npa lf, rc=%d", rc);
+
+ /* Disable security */
+ otx2_eth_sec_fini(eth_dev);
+
+ /* Destroy security ctx */
+ otx2_eth_sec_ctx_destroy(eth_dev);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ dev->drv_inited = false;
+
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ otx2_nix_unregister_irqs(eth_dev);
+
+ rc = otx2_eth_dev_lf_detach(dev->mbox);
+ if (rc)
+ otx2_err("Failed to detach resources, rc=%d", rc);
+
+ /* Check if mbox close is needed */
+ if (!mbox_close)
+ return 0;
+
+ if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
+ /* Will be freed later by PMD */
+ eth_dev->data->dev_private = NULL;
+ return 0;
+ }
+
+ otx2_dev_fini(pci_dev, dev);
+ return 0;
+}
+
+static void
+otx2_nix_dev_close(struct rte_eth_dev *eth_dev)
+{
+ otx2_eth_dev_uninit(eth_dev, true);
+}
+
+static int
+otx2_nix_dev_reset(struct rte_eth_dev *eth_dev)
+{
+ int rc;
+
+ rc = otx2_eth_dev_uninit(eth_dev, false);
+ if (rc)
+ return rc;
+
+ return otx2_eth_dev_init(eth_dev);
+}
+
+static int
+nix_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ struct otx2_idev_cfg *idev;
+ struct otx2_dev *otx2_dev;
+ int rc;
+
+ eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
+ if (eth_dev) {
+ /* Cleanup eth dev */
+ rc = otx2_eth_dev_uninit(eth_dev, true);
+ if (rc)
+ return rc;
+
+ rte_eth_dev_pci_release(eth_dev);
+ }
+
+ /* Nothing to be done for secondary processes */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ /* Check for common resources */
+ idev = otx2_intra_dev_get_cfg();
+ if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
+ return 0;
+
+ otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
+
+ if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
+ goto exit;
+
+ /* Safe to cleanup mbox as no more users */
+ otx2_dev_fini(pci_dev, otx2_dev);
+ rte_free(otx2_dev);
+ return 0;
+
+exit:
+ otx2_info("%s: common resource in use by other devices", pci_dev->name);
+ return -EAGAIN;
+}
+
+static int
+nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ int rc;
+
+ RTE_SET_USED(pci_drv);
+
+ rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
+ otx2_eth_dev_init);
+
+ /* On error on secondary, recheck if port exists in primary or
+ * in mid of detach state.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
+ if (!rte_eth_dev_allocated(pci_dev->device.name))
+ return 0;
+ return rc;
+}
+
+static const struct rte_pci_id pci_nix_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF_VF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_SDP_PF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_nix = {
+ .id_table = pci_nix_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
+ RTE_PCI_DRV_INTR_LSC,
+ .probe = nix_probe,
+ .remove = nix_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix);
+RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci");
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.h
new file mode 100644
index 000000000..0fbf68b8e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev.h
@@ -0,0 +1,592 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_ETHDEV_H__
+#define __OTX2_ETHDEV_H__
+
+#include <math.h>
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_kvargs.h>
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_string_fns.h>
+#include <rte_time.h>
+
+#include "otx2_common.h"
+#include "otx2_dev.h"
+#include "otx2_flow.h"
+#include "otx2_irq.h"
+#include "otx2_mempool.h"
+#include "otx2_rx.h"
+#include "otx2_tm.h"
+#include "otx2_tx.h"
+
+#define OTX2_ETH_DEV_PMD_VERSION "1.0"
+
+/* Ethdev HWCAP and Fixup flags. Use from MSB bits to avoid conflict with dev */
+
+/* Minimum CQ size should be 4K */
+#define OTX2_FIXUP_F_MIN_4K_Q BIT_ULL(63)
+#define otx2_ethdev_fixup_is_min_4k_q(dev) \
+ ((dev)->hwcap & OTX2_FIXUP_F_MIN_4K_Q)
+/* Limit CQ being full */
+#define OTX2_FIXUP_F_LIMIT_CQ_FULL BIT_ULL(62)
+#define otx2_ethdev_fixup_is_limit_cq_full(dev) \
+ ((dev)->hwcap & OTX2_FIXUP_F_LIMIT_CQ_FULL)
+
+/* Used for struct otx2_eth_dev::flags */
+#define OTX2_LINK_CFG_IN_PROGRESS_F BIT_ULL(0)
+
+/* VLAN tag inserted by NIX_TX_VTAG_ACTION.
+ * In Tx space is always reserved for this in FRS.
+ */
+#define NIX_MAX_VTAG_INS 2
+#define NIX_MAX_VTAG_ACT_SIZE (4 * NIX_MAX_VTAG_INS)
+
+/* ETH_HLEN+ETH_FCS+2*VLAN_HLEN */
+#define NIX_L2_OVERHEAD \
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 8)
+
+/* HW config of frame size doesn't include FCS */
+#define NIX_MAX_HW_FRS 9212
+#define NIX_MIN_HW_FRS 60
+
+/* Since HW FRS includes NPC VTAG insertion space, user has reduced FRS */
+#define NIX_MAX_FRS \
+ (NIX_MAX_HW_FRS + RTE_ETHER_CRC_LEN - NIX_MAX_VTAG_ACT_SIZE)
+
+#define NIX_MIN_FRS \
+ (NIX_MIN_HW_FRS + RTE_ETHER_CRC_LEN)
+
+#define NIX_MAX_MTU \
+ (NIX_MAX_FRS - NIX_L2_OVERHEAD)
+
+#define NIX_MAX_SQB 512
+#define NIX_DEF_SQB 16
+#define NIX_MIN_SQB 8
+#define NIX_SQB_LIST_SPACE 2
+#define NIX_RSS_RETA_SIZE_MAX 256
+/* Group 0 will be used for RSS, 1 -7 will be used for rte_flow RSS action*/
+#define NIX_RSS_GRPS 8
+#define NIX_HASH_KEY_SIZE 48 /* 352 Bits */
+#define NIX_RSS_RETA_SIZE 64
+#define NIX_RX_MIN_DESC 16
+#define NIX_RX_MIN_DESC_ALIGN 16
+#define NIX_RX_NB_SEG_MAX 6
+#define NIX_CQ_ENTRY_SZ 128
+#define NIX_CQ_ALIGN 512
+#define NIX_SQB_LOWER_THRESH 70
+#define LMT_SLOT_MASK 0x7f
+#define NIX_RX_DEFAULT_RING_SZ 4096
+
+/* If PTP is enabled additional SEND MEM DESC is required which
+ * takes 2 words, hence max 7 iova address are possible
+ */
+#if defined(RTE_LIBRTE_IEEE1588)
+#define NIX_TX_NB_SEG_MAX 7
+#else
+#define NIX_TX_NB_SEG_MAX 9
+#endif
+
+#define NIX_TX_MSEG_SG_DWORDS \
+ ((RTE_ALIGN_MUL_CEIL(NIX_TX_NB_SEG_MAX, 3) / 3) \
+ + NIX_TX_NB_SEG_MAX)
+
+/* Apply BP/DROP when CQ is 95% full */
+#define NIX_CQ_THRESH_LEVEL (5 * 256 / 100)
+#define NIX_CQ_FULL_ERRATA_SKID (1024ull * 256)
+
+#define CQ_OP_STAT_OP_ERR 63
+#define CQ_OP_STAT_CQ_ERR 46
+
+#define OP_ERR BIT_ULL(CQ_OP_STAT_OP_ERR)
+#define CQ_ERR BIT_ULL(CQ_OP_STAT_CQ_ERR)
+
+#define CQ_CQE_THRESH_DEFAULT 0x1ULL /* IRQ triggered when
+ * NIX_LF_CINTX_CNT[QCOUNT]
+ * crosses this value
+ */
+#define CQ_TIMER_THRESH_DEFAULT 0xAULL /* ~1usec i.e (0xA * 100nsec) */
+#define CQ_TIMER_THRESH_MAX 255
+
+#define NIX_RSS_L3_L4_SRC_DST (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY \
+ | ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)
+
+#define NIX_RSS_OFFLOAD (ETH_RSS_PORT | ETH_RSS_IP | ETH_RSS_UDP |\
+ ETH_RSS_TCP | ETH_RSS_SCTP | \
+ ETH_RSS_TUNNEL | ETH_RSS_L2_PAYLOAD | \
+ NIX_RSS_L3_L4_SRC_DST)
+
+#define NIX_TX_OFFLOAD_CAPA ( \
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE | \
+ DEV_TX_OFFLOAD_MT_LOCKFREE | \
+ DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_QINQ_INSERT | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_SCTP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO | \
+ DEV_TX_OFFLOAD_GENEVE_TNL_TSO | \
+ DEV_TX_OFFLOAD_GRE_TNL_TSO | \
+ DEV_TX_OFFLOAD_MULTI_SEGS | \
+ DEV_TX_OFFLOAD_IPV4_CKSUM)
+
+#define NIX_RX_OFFLOAD_CAPA ( \
+ DEV_RX_OFFLOAD_CHECKSUM | \
+ DEV_RX_OFFLOAD_SCTP_CKSUM | \
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_SCATTER | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_QINQ_STRIP | \
+ DEV_RX_OFFLOAD_TIMESTAMP | \
+ DEV_RX_OFFLOAD_RSS_HASH)
+
+#define NIX_DEFAULT_RSS_CTX_GROUP 0
+#define NIX_DEFAULT_RSS_MCAM_IDX -1
+
+#define otx2_ethdev_is_ptp_en(dev) ((dev)->ptp_en)
+
+#define NIX_TIMESYNC_TX_CMD_LEN 8
+/* Additional timesync values. */
+#define OTX2_CYCLECOUNTER_MASK 0xffffffffffffffffULL
+
+enum nix_q_size_e {
+ nix_q_size_16, /* 16 entries */
+ nix_q_size_64, /* 64 entries */
+ nix_q_size_256,
+ nix_q_size_1K,
+ nix_q_size_4K,
+ nix_q_size_16K,
+ nix_q_size_64K,
+ nix_q_size_256K,
+ nix_q_size_1M, /* Million entries */
+ nix_q_size_max
+};
+
+struct otx2_qint {
+ struct rte_eth_dev *eth_dev;
+ uint8_t qintx;
+};
+
+struct otx2_rss_info {
+ uint64_t nix_rss;
+ uint32_t flowkey_cfg;
+ uint16_t rss_size;
+ uint8_t rss_grps;
+ uint8_t alg_idx; /* Selected algo index */
+ uint16_t ind_tbl[NIX_RSS_RETA_SIZE_MAX];
+ uint8_t key[NIX_HASH_KEY_SIZE];
+};
+
+struct otx2_eth_qconf {
+ union {
+ struct rte_eth_txconf tx;
+ struct rte_eth_rxconf rx;
+ } conf;
+ void *mempool;
+ uint32_t socket_id;
+ uint16_t nb_desc;
+ uint8_t valid;
+};
+
+struct otx2_fc_info {
+ enum rte_eth_fc_mode mode; /**< Link flow control mode */
+ uint8_t rx_pause;
+ uint8_t tx_pause;
+ uint8_t chan_cnt;
+ uint16_t bpid[NIX_MAX_CHAN];
+};
+
+struct vlan_mkex_info {
+ struct npc_xtract_info la_xtract;
+ struct npc_xtract_info lb_xtract;
+ uint64_t lb_lt_offset;
+};
+
+struct mcast_entry {
+ struct rte_ether_addr mcast_mac;
+ uint16_t mcam_index;
+ TAILQ_ENTRY(mcast_entry) next;
+};
+
+TAILQ_HEAD(otx2_nix_mc_filter_tbl, mcast_entry);
+
+struct vlan_entry {
+ uint32_t mcam_idx;
+ uint16_t vlan_id;
+ TAILQ_ENTRY(vlan_entry) next;
+};
+
+TAILQ_HEAD(otx2_vlan_filter_tbl, vlan_entry);
+
+struct otx2_vlan_info {
+ struct otx2_vlan_filter_tbl fltr_tbl;
+ /* MKEX layer info */
+ struct mcam_entry def_tx_mcam_ent;
+ struct mcam_entry def_rx_mcam_ent;
+ struct vlan_mkex_info mkex;
+ /* Default mcam entry that matches vlan packets */
+ uint32_t def_rx_mcam_idx;
+ uint32_t def_tx_mcam_idx;
+ /* MCAM entry that matches double vlan packets */
+ uint32_t qinq_mcam_idx;
+ /* Indices of tx_vtag def registers */
+ uint32_t outer_vlan_idx;
+ uint32_t inner_vlan_idx;
+ uint16_t outer_vlan_tpid;
+ uint16_t inner_vlan_tpid;
+ uint16_t pvid;
+ /* QinQ entry allocated before default one */
+ uint8_t qinq_before_def;
+ uint8_t pvid_insert_on;
+ /* Rx vtag action type */
+ uint8_t vtag_type_idx;
+ uint8_t filter_on;
+ uint8_t strip_on;
+ uint8_t qinq_on;
+ uint8_t promisc_on;
+};
+
+struct otx2_eth_dev {
+ OTX2_DEV; /* Base class */
+ RTE_MARKER otx2_eth_dev_data_start;
+ uint16_t sqb_size;
+ uint16_t rx_chan_base;
+ uint16_t tx_chan_base;
+ uint8_t rx_chan_cnt;
+ uint8_t tx_chan_cnt;
+ uint8_t lso_tsov4_idx;
+ uint8_t lso_tsov6_idx;
+ uint8_t lso_base_idx;
+ uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
+ uint8_t mkex_pfl_name[MKEX_NAME_LEN];
+ uint8_t max_mac_entries;
+ uint8_t lf_tx_stats;
+ uint8_t lf_rx_stats;
+ uint16_t flags;
+ uint16_t cints;
+ uint16_t qints;
+ uint8_t configured;
+ uint8_t configured_qints;
+ uint8_t configured_cints;
+ uint8_t configured_nb_rx_qs;
+ uint8_t configured_nb_tx_qs;
+ uint8_t ptype_disable;
+ uint16_t nix_msixoff;
+ uintptr_t base;
+ uintptr_t lmt_addr;
+ uint16_t scalar_ena;
+ uint16_t rss_tag_as_xor;
+ uint16_t max_sqb_count;
+ uint16_t rx_offload_flags; /* Selected Rx offload flags(NIX_RX_*_F) */
+ uint64_t rx_offloads;
+ uint16_t tx_offload_flags; /* Selected Tx offload flags(NIX_TX_*_F) */
+ uint64_t tx_offloads;
+ uint64_t rx_offload_capa;
+ uint64_t tx_offload_capa;
+ struct otx2_qint qints_mem[RTE_MAX_QUEUES_PER_PORT];
+ struct otx2_qint cints_mem[RTE_MAX_QUEUES_PER_PORT];
+ uint16_t txschq[NIX_TXSCH_LVL_CNT];
+ uint16_t txschq_contig[NIX_TXSCH_LVL_CNT];
+ uint16_t txschq_index[NIX_TXSCH_LVL_CNT];
+ uint16_t txschq_contig_index[NIX_TXSCH_LVL_CNT];
+ /* Dis-contiguous queues */
+ uint16_t txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ /* Contiguous queues */
+ uint16_t txschq_contig_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ uint16_t otx2_tm_root_lvl;
+ uint16_t link_cfg_lvl;
+ uint16_t tm_flags;
+ uint16_t tm_leaf_cnt;
+ uint64_t tm_rate_min;
+ struct otx2_nix_tm_node_list node_list;
+ struct otx2_nix_tm_shaper_profile_list shaper_profile_list;
+ struct otx2_rss_info rss_info;
+ struct otx2_fc_info fc_info;
+ uint32_t txmap[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+ uint32_t rxmap[RTE_ETHDEV_QUEUE_STAT_CNTRS];
+ struct otx2_npc_flow_info npc_flow;
+ struct otx2_vlan_info vlan_info;
+ struct otx2_eth_qconf *tx_qconf;
+ struct otx2_eth_qconf *rx_qconf;
+ struct rte_eth_dev *eth_dev;
+ eth_rx_burst_t rx_pkt_burst_no_offload;
+ /* PTP counters */
+ bool ptp_en;
+ struct otx2_timesync_info tstamp;
+ struct rte_timecounter systime_tc;
+ struct rte_timecounter rx_tstamp_tc;
+ struct rte_timecounter tx_tstamp_tc;
+ double clk_freq_mult;
+ uint64_t clk_delta;
+ bool mc_tbl_set;
+ struct otx2_nix_mc_filter_tbl mc_fltr_tbl;
+ bool sdp_link; /* SDP flag */
+ /* Inline IPsec params */
+ uint16_t ipsec_in_max_spi;
+ uint8_t duplex;
+ uint32_t speed;
+} __rte_cache_aligned;
+
+struct otx2_eth_txq {
+ uint64_t cmd[8];
+ int64_t fc_cache_pkts;
+ uint64_t *fc_mem;
+ void *lmt_addr;
+ rte_iova_t io_addr;
+ rte_iova_t fc_iova;
+ uint16_t sqes_per_sqb_log2;
+ int16_t nb_sqb_bufs_adj;
+ RTE_MARKER slow_path_start;
+ uint16_t nb_sqb_bufs;
+ uint16_t sq;
+ uint64_t offloads;
+ struct otx2_eth_dev *dev;
+ struct rte_mempool *sqb_pool;
+ struct otx2_eth_qconf qconf;
+} __rte_cache_aligned;
+
+struct otx2_eth_rxq {
+ uint64_t mbuf_initializer;
+ uint64_t data_off;
+ uintptr_t desc;
+ void *lookup_mem;
+ uintptr_t cq_door;
+ uint64_t wdata;
+ int64_t *cq_status;
+ uint32_t head;
+ uint32_t qmask;
+ uint32_t available;
+ uint16_t rq;
+ struct otx2_timesync_info *tstamp;
+ RTE_MARKER slow_path_start;
+ uint64_t aura;
+ uint64_t offloads;
+ uint32_t qlen;
+ struct rte_mempool *pool;
+ enum nix_q_size_e qsize;
+ struct rte_eth_dev *eth_dev;
+ struct otx2_eth_qconf qconf;
+ uint16_t cq_drop;
+} __rte_cache_aligned;
+
+static inline struct otx2_eth_dev *
+otx2_eth_pmd_priv(struct rte_eth_dev *eth_dev)
+{
+ return eth_dev->data->dev_private;
+}
+
+/* Ops */
+int otx2_nix_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info);
+int otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg);
+int otx2_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+ size_t fw_size);
+int otx2_nix_get_module_info(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_module_info *modinfo);
+int otx2_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
+ struct rte_dev_eeprom_info *info);
+int otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool);
+void otx2_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+void otx2_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
+int otx2_rx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_burst_mode *mode);
+int otx2_tx_burst_mode_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_burst_mode *mode);
+uint32_t otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t qidx);
+int otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
+int otx2_nix_rx_descriptor_done(void *rxq, uint16_t offset);
+int otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
+void otx2_nix_promisc_config(struct rte_eth_dev *eth_dev, int en);
+int otx2_nix_promisc_enable(struct rte_eth_dev *eth_dev);
+int otx2_nix_promisc_disable(struct rte_eth_dev *eth_dev);
+int otx2_nix_allmulticast_enable(struct rte_eth_dev *eth_dev);
+int otx2_nix_allmulticast_disable(struct rte_eth_dev *eth_dev);
+int otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx);
+int otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx);
+uint64_t otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id);
+
+/* Multicast filter APIs */
+void otx2_nix_mc_filter_init(struct otx2_eth_dev *dev);
+void otx2_nix_mc_filter_fini(struct otx2_eth_dev *dev);
+int otx2_nix_mc_addr_list_install(struct rte_eth_dev *eth_dev);
+int otx2_nix_mc_addr_list_uninstall(struct rte_eth_dev *eth_dev);
+int otx2_nix_set_mc_addr_list(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr);
+
+/* MTU */
+int otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
+int otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev);
+
+/* Link */
+void otx2_nix_toggle_flag_link_cfg(struct otx2_eth_dev *dev, bool set);
+int otx2_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);
+void otx2_eth_dev_link_status_update(struct otx2_dev *dev,
+ struct cgx_link_user_info *link);
+int otx2_nix_dev_set_link_up(struct rte_eth_dev *eth_dev);
+int otx2_nix_dev_set_link_down(struct rte_eth_dev *eth_dev);
+int otx2_apply_link_speed(struct rte_eth_dev *eth_dev);
+
+/* IRQ */
+int otx2_nix_register_irqs(struct rte_eth_dev *eth_dev);
+int oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev);
+int oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev);
+void otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev);
+void oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev);
+void oxt2_nix_unregister_cq_irqs(struct rte_eth_dev *eth_dev);
+void otx2_nix_err_intr_enb_dis(struct rte_eth_dev *eth_dev, bool enb);
+void otx2_nix_ras_intr_enb_dis(struct rte_eth_dev *eth_dev, bool enb);
+
+int otx2_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id);
+int otx2_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id);
+
+/* Debug */
+int otx2_nix_reg_dump(struct otx2_eth_dev *dev, uint64_t *data);
+int otx2_nix_dev_get_reg(struct rte_eth_dev *eth_dev,
+ struct rte_dev_reg_info *regs);
+int otx2_nix_queues_ctx_dump(struct rte_eth_dev *eth_dev);
+void otx2_nix_cqe_dump(const struct nix_cqe_hdr_s *cq);
+void otx2_nix_tm_dump(struct otx2_eth_dev *dev);
+
+/* Stats */
+int otx2_nix_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *stats);
+int otx2_nix_dev_stats_reset(struct rte_eth_dev *eth_dev);
+
+int otx2_nix_queue_stats_mapping(struct rte_eth_dev *dev,
+ uint16_t queue_id, uint8_t stat_idx,
+ uint8_t is_rx);
+int otx2_nix_xstats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat *xstats, unsigned int n);
+int otx2_nix_xstats_get_names(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit);
+int otx2_nix_xstats_reset(struct rte_eth_dev *eth_dev);
+
+int otx2_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev,
+ const uint64_t *ids,
+ uint64_t *values, unsigned int n);
+int otx2_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids, unsigned int limit);
+
+/* RSS */
+void otx2_nix_rss_set_key(struct otx2_eth_dev *dev,
+ uint8_t *key, uint32_t key_len);
+uint32_t otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev,
+ uint64_t ethdev_rss, uint8_t rss_level);
+int otx2_rss_set_hf(struct otx2_eth_dev *dev,
+ uint32_t flowkey_cfg, uint8_t *alg_idx,
+ uint8_t group, int mcam_index);
+int otx2_nix_rss_tbl_init(struct otx2_eth_dev *dev, uint8_t group,
+ uint16_t *ind_tbl);
+int otx2_nix_rss_config(struct rte_eth_dev *eth_dev);
+
+int otx2_nix_dev_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+int otx2_nix_dev_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
+int otx2_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+int otx2_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf);
+
+/* CGX */
+int otx2_cgx_rxtx_start(struct otx2_eth_dev *dev);
+int otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev);
+int otx2_cgx_mac_addr_set(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *addr);
+
+/* Flow Control */
+int otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev);
+
+int otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf);
+
+int otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf);
+
+int otx2_nix_rxchan_bpid_cfg(struct rte_eth_dev *eth_dev, bool enb);
+
+int otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev);
+
+/* VLAN */
+int otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev);
+int otx2_nix_vlan_fini(struct rte_eth_dev *eth_dev);
+int otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask);
+void otx2_nix_vlan_update_promisc(struct rte_eth_dev *eth_dev, int enable);
+int otx2_nix_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id,
+ int on);
+void otx2_nix_vlan_strip_queue_set(struct rte_eth_dev *dev,
+ uint16_t queue, int on);
+int otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
+ enum rte_vlan_type type, uint16_t tpid);
+int otx2_nix_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
+
+/* Lookup configuration */
+void *otx2_nix_fastpath_lookup_mem_get(void);
+
+/* PTYPES */
+const uint32_t *otx2_nix_supported_ptypes_get(struct rte_eth_dev *dev);
+int otx2_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask);
+
+/* Mac address handling */
+int otx2_nix_mac_addr_set(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *addr);
+int otx2_nix_mac_addr_get(struct rte_eth_dev *eth_dev, uint8_t *addr);
+int otx2_nix_mac_addr_add(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *addr,
+ uint32_t index, uint32_t pool);
+void otx2_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index);
+int otx2_cgx_mac_max_entries_get(struct otx2_eth_dev *dev);
+
+/* Devargs */
+int otx2_ethdev_parse_devargs(struct rte_devargs *devargs,
+ struct otx2_eth_dev *dev);
+
+/* Rx and Tx routines */
+void otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev);
+void otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev);
+void otx2_nix_form_default_desc(struct otx2_eth_txq *txq);
+
+/* Timesync - PTP routines */
+int otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev);
+int otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev);
+int otx2_nix_timesync_read_rx_timestamp(struct rte_eth_dev *eth_dev,
+ struct timespec *timestamp,
+ uint32_t flags);
+int otx2_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
+ struct timespec *timestamp);
+int otx2_nix_timesync_adjust_time(struct rte_eth_dev *eth_dev, int64_t delta);
+int otx2_nix_timesync_write_time(struct rte_eth_dev *eth_dev,
+ const struct timespec *ts);
+int otx2_nix_timesync_read_time(struct rte_eth_dev *eth_dev,
+ struct timespec *ts);
+int otx2_eth_dev_ptp_info_update(struct otx2_dev *dev, bool ptp_en);
+int otx2_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *time);
+int otx2_nix_raw_clock_tsc_conv(struct otx2_eth_dev *dev);
+void otx2_nix_ptp_enable_vf(struct rte_eth_dev *eth_dev);
+
+#endif /* __OTX2_ETHDEV_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_debug.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_debug.c
new file mode 100644
index 000000000..6d951bc7e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_debug.c
@@ -0,0 +1,811 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_ethdev.h"
+
+#define nix_dump(fmt, ...) fprintf(stderr, fmt "\n", ##__VA_ARGS__)
+#define NIX_REG_INFO(reg) {reg, #reg}
+#define NIX_REG_NAME_SZ 48
+
+struct nix_lf_reg_info {
+ uint32_t offset;
+ const char *name;
+};
+
+static const struct
+nix_lf_reg_info nix_lf_reg[] = {
+ NIX_REG_INFO(NIX_LF_RX_SECRETX(0)),
+ NIX_REG_INFO(NIX_LF_RX_SECRETX(1)),
+ NIX_REG_INFO(NIX_LF_RX_SECRETX(2)),
+ NIX_REG_INFO(NIX_LF_RX_SECRETX(3)),
+ NIX_REG_INFO(NIX_LF_RX_SECRETX(4)),
+ NIX_REG_INFO(NIX_LF_RX_SECRETX(5)),
+ NIX_REG_INFO(NIX_LF_CFG),
+ NIX_REG_INFO(NIX_LF_GINT),
+ NIX_REG_INFO(NIX_LF_GINT_W1S),
+ NIX_REG_INFO(NIX_LF_GINT_ENA_W1C),
+ NIX_REG_INFO(NIX_LF_GINT_ENA_W1S),
+ NIX_REG_INFO(NIX_LF_ERR_INT),
+ NIX_REG_INFO(NIX_LF_ERR_INT_W1S),
+ NIX_REG_INFO(NIX_LF_ERR_INT_ENA_W1C),
+ NIX_REG_INFO(NIX_LF_ERR_INT_ENA_W1S),
+ NIX_REG_INFO(NIX_LF_RAS),
+ NIX_REG_INFO(NIX_LF_RAS_W1S),
+ NIX_REG_INFO(NIX_LF_RAS_ENA_W1C),
+ NIX_REG_INFO(NIX_LF_RAS_ENA_W1S),
+ NIX_REG_INFO(NIX_LF_SQ_OP_ERR_DBG),
+ NIX_REG_INFO(NIX_LF_MNQ_ERR_DBG),
+ NIX_REG_INFO(NIX_LF_SEND_ERR_DBG),
+};
+
+static int
+nix_lf_get_reg_count(struct otx2_eth_dev *dev)
+{
+ int reg_count = 0;
+
+ reg_count = RTE_DIM(nix_lf_reg);
+ /* NIX_LF_TX_STATX */
+ reg_count += dev->lf_tx_stats;
+ /* NIX_LF_RX_STATX */
+ reg_count += dev->lf_rx_stats;
+ /* NIX_LF_QINTX_CNT*/
+ reg_count += dev->qints;
+ /* NIX_LF_QINTX_INT */
+ reg_count += dev->qints;
+ /* NIX_LF_QINTX_ENA_W1S */
+ reg_count += dev->qints;
+ /* NIX_LF_QINTX_ENA_W1C */
+ reg_count += dev->qints;
+ /* NIX_LF_CINTX_CNT */
+ reg_count += dev->cints;
+ /* NIX_LF_CINTX_WAIT */
+ reg_count += dev->cints;
+ /* NIX_LF_CINTX_INT */
+ reg_count += dev->cints;
+ /* NIX_LF_CINTX_INT_W1S */
+ reg_count += dev->cints;
+ /* NIX_LF_CINTX_ENA_W1S */
+ reg_count += dev->cints;
+ /* NIX_LF_CINTX_ENA_W1C */
+ reg_count += dev->cints;
+
+ return reg_count;
+}
+
+int
+otx2_nix_reg_dump(struct otx2_eth_dev *dev, uint64_t *data)
+{
+ uintptr_t nix_lf_base = dev->base;
+ bool dump_stdout;
+ uint64_t reg;
+ uint32_t i;
+
+ dump_stdout = data ? 0 : 1;
+
+ for (i = 0; i < RTE_DIM(nix_lf_reg); i++) {
+ reg = otx2_read64(nix_lf_base + nix_lf_reg[i].offset);
+ if (dump_stdout && reg)
+ nix_dump("%32s = 0x%" PRIx64,
+ nix_lf_reg[i].name, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_TX_STATX */
+ for (i = 0; i < dev->lf_tx_stats; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_TX_STATX(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_TX_STATX", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_RX_STATX */
+ for (i = 0; i < dev->lf_rx_stats; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_RX_STATX(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_RX_STATX", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_QINTX_CNT*/
+ for (i = 0; i < dev->qints; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_QINTX_CNT(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_QINTX_CNT", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_QINTX_INT */
+ for (i = 0; i < dev->qints; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_QINTX_INT(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_QINTX_INT", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_QINTX_ENA_W1S */
+ for (i = 0; i < dev->qints; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_QINTX_ENA_W1S(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_QINTX_ENA_W1S", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_QINTX_ENA_W1C */
+ for (i = 0; i < dev->qints; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_QINTX_ENA_W1C(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_QINTX_ENA_W1C", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_CINTX_CNT */
+ for (i = 0; i < dev->cints; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_CNT(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_CINTX_CNT", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_CINTX_WAIT */
+ for (i = 0; i < dev->cints; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_WAIT(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_CINTX_WAIT", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_CINTX_INT */
+ for (i = 0; i < dev->cints; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_INT(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_CINTX_INT", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_CINTX_INT_W1S */
+ for (i = 0; i < dev->cints; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_INT_W1S(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_CINTX_INT_W1S", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_CINTX_ENA_W1S */
+ for (i = 0; i < dev->cints; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_ENA_W1S(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_CINTX_ENA_W1S", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+
+ /* NIX_LF_CINTX_ENA_W1C */
+ for (i = 0; i < dev->cints; i++) {
+ reg = otx2_read64(nix_lf_base + NIX_LF_CINTX_ENA_W1C(i));
+ if (dump_stdout && reg)
+ nix_dump("%32s_%d = 0x%" PRIx64,
+ "NIX_LF_CINTX_ENA_W1C", i, reg);
+ if (data)
+ *data++ = reg;
+ }
+ return 0;
+}
+
+int
+otx2_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t *data = regs->data;
+
+ if (data == NULL) {
+ regs->length = nix_lf_get_reg_count(dev);
+ regs->width = 8;
+ return 0;
+ }
+
+ if (!regs->length ||
+ regs->length == (uint32_t)nix_lf_get_reg_count(dev)) {
+ otx2_nix_reg_dump(dev, data);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static inline void
+nix_lf_sq_dump(__otx2_io struct nix_sq_ctx_s *ctx)
+{
+ nix_dump("W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d",
+ ctx->sqe_way_mask, ctx->cq);
+ nix_dump("W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x",
+ ctx->sdp_mcast, ctx->substream);
+ nix_dump("W0: qint_idx \t\t\t%d\nW0: ena \t\t\t%d\n",
+ ctx->qint_idx, ctx->ena);
+
+ nix_dump("W1: sqb_count \t\t\t%d\nW1: default_chan \t\t%d",
+ ctx->sqb_count, ctx->default_chan);
+ nix_dump("W1: smq_rr_quantum \t\t%d\nW1: sso_ena \t\t\t%d",
+ ctx->smq_rr_quantum, ctx->sso_ena);
+ nix_dump("W1: xoff \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: smq\t\t\t\t%d\n",
+ ctx->xoff, ctx->cq_ena, ctx->smq);
+
+ nix_dump("W2: sqe_stype \t\t\t%d\nW2: sq_int_ena \t\t\t%d",
+ ctx->sqe_stype, ctx->sq_int_ena);
+ nix_dump("W2: sq_int \t\t\t%d\nW2: sqb_aura \t\t\t%d",
+ ctx->sq_int, ctx->sqb_aura);
+ nix_dump("W2: smq_rr_count \t\t%d\n", ctx->smq_rr_count);
+
+ nix_dump("W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d",
+ ctx->smq_next_sq_vld, ctx->smq_pend);
+ nix_dump("W3: smenq_next_sqb_vld \t%d\nW3: head_offset\t\t\t%d",
+ ctx->smenq_next_sqb_vld, ctx->head_offset);
+ nix_dump("W3: smenq_offset\t\t%d\nW3: tail_offset \t\t%d",
+ ctx->smenq_offset, ctx->tail_offset);
+ nix_dump("W3: smq_lso_segnum \t\t%d\nW3: smq_next_sq \t\t%d",
+ ctx->smq_lso_segnum, ctx->smq_next_sq);
+ nix_dump("W3: mnq_dis \t\t\t%d\nW3: lmt_dis \t\t\t%d",
+ ctx->mnq_dis, ctx->lmt_dis);
+ nix_dump("W3: cq_limit\t\t\t%d\nW3: max_sqe_size\t\t%d\n",
+ ctx->cq_limit, ctx->max_sqe_size);
+
+ nix_dump("W4: next_sqb \t\t\t0x%" PRIx64 "", ctx->next_sqb);
+ nix_dump("W5: tail_sqb \t\t\t0x%" PRIx64 "", ctx->tail_sqb);
+ nix_dump("W6: smenq_sqb \t\t\t0x%" PRIx64 "", ctx->smenq_sqb);
+ nix_dump("W7: smenq_next_sqb \t\t0x%" PRIx64 "", ctx->smenq_next_sqb);
+ nix_dump("W8: head_sqb \t\t\t0x%" PRIx64 "", ctx->head_sqb);
+
+ nix_dump("W9: vfi_lso_vld \t\t%d\nW9: vfi_lso_vlan1_ins_ena\t%d",
+ ctx->vfi_lso_vld, ctx->vfi_lso_vlan1_ins_ena);
+ nix_dump("W9: vfi_lso_vlan0_ins_ena\t%d\nW9: vfi_lso_mps\t\t\t%d",
+ ctx->vfi_lso_vlan0_ins_ena, ctx->vfi_lso_mps);
+ nix_dump("W9: vfi_lso_sb \t\t\t%d\nW9: vfi_lso_sizem1\t\t%d",
+ ctx->vfi_lso_sb, ctx->vfi_lso_sizem1);
+ nix_dump("W9: vfi_lso_total\t\t%d", ctx->vfi_lso_total);
+
+ nix_dump("W10: scm_lso_rem \t\t0x%" PRIx64 "",
+ (uint64_t)ctx->scm_lso_rem);
+ nix_dump("W11: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
+ nix_dump("W12: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
+ nix_dump("W14: dropped_octs \t\t0x%" PRIx64 "",
+ (uint64_t)ctx->drop_octs);
+ nix_dump("W15: dropped_pkts \t\t0x%" PRIx64 "",
+ (uint64_t)ctx->drop_pkts);
+}
+
+static inline void
+nix_lf_rq_dump(__otx2_io struct nix_rq_ctx_s *ctx)
+{
+ nix_dump("W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x",
+ ctx->wqe_aura, ctx->substream);
+ nix_dump("W0: cq \t\t\t\t%d\nW0: ena_wqwd \t\t\t%d",
+ ctx->cq, ctx->ena_wqwd);
+ nix_dump("W0: ipsech_ena \t\t\t%d\nW0: sso_ena \t\t\t%d",
+ ctx->ipsech_ena, ctx->sso_ena);
+ nix_dump("W0: ena \t\t\t%d\n", ctx->ena);
+
+ nix_dump("W1: lpb_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d",
+ ctx->lpb_drop_ena, ctx->spb_drop_ena);
+ nix_dump("W1: xqe_drop_ena \t\t%d\nW1: wqe_caching \t\t%d",
+ ctx->xqe_drop_ena, ctx->wqe_caching);
+ nix_dump("W1: pb_caching \t\t\t%d\nW1: sso_tt \t\t\t%d",
+ ctx->pb_caching, ctx->sso_tt);
+ nix_dump("W1: sso_grp \t\t\t%d\nW1: lpb_aura \t\t\t%d",
+ ctx->sso_grp, ctx->lpb_aura);
+ nix_dump("W1: spb_aura \t\t\t%d\n", ctx->spb_aura);
+
+ nix_dump("W2: xqe_hdr_split \t\t%d\nW2: xqe_imm_copy \t\t%d",
+ ctx->xqe_hdr_split, ctx->xqe_imm_copy);
+ nix_dump("W2: xqe_imm_size \t\t%d\nW2: later_skip \t\t\t%d",
+ ctx->xqe_imm_size, ctx->later_skip);
+ nix_dump("W2: first_skip \t\t\t%d\nW2: lpb_sizem1 \t\t\t%d",
+ ctx->first_skip, ctx->lpb_sizem1);
+ nix_dump("W2: spb_ena \t\t\t%d\nW2: wqe_skip \t\t\t%d",
+ ctx->spb_ena, ctx->wqe_skip);
+ nix_dump("W2: spb_sizem1 \t\t\t%d\n", ctx->spb_sizem1);
+
+ nix_dump("W3: spb_pool_pass \t\t%d\nW3: spb_pool_drop \t\t%d",
+ ctx->spb_pool_pass, ctx->spb_pool_drop);
+ nix_dump("W3: spb_aura_pass \t\t%d\nW3: spb_aura_drop \t\t%d",
+ ctx->spb_aura_pass, ctx->spb_aura_drop);
+ nix_dump("W3: wqe_pool_pass \t\t%d\nW3: wqe_pool_drop \t\t%d",
+ ctx->wqe_pool_pass, ctx->wqe_pool_drop);
+ nix_dump("W3: xqe_pass \t\t\t%d\nW3: xqe_drop \t\t\t%d\n",
+ ctx->xqe_pass, ctx->xqe_drop);
+
+ nix_dump("W4: qint_idx \t\t\t%d\nW4: rq_int_ena \t\t\t%d",
+ ctx->qint_idx, ctx->rq_int_ena);
+ nix_dump("W4: rq_int \t\t\t%d\nW4: lpb_pool_pass \t\t%d",
+ ctx->rq_int, ctx->lpb_pool_pass);
+ nix_dump("W4: lpb_pool_drop \t\t%d\nW4: lpb_aura_pass \t\t%d",
+ ctx->lpb_pool_drop, ctx->lpb_aura_pass);
+ nix_dump("W4: lpb_aura_drop \t\t%d\n", ctx->lpb_aura_drop);
+
+ nix_dump("W5: flow_tagw \t\t\t%d\nW5: bad_utag \t\t\t%d",
+ ctx->flow_tagw, ctx->bad_utag);
+ nix_dump("W5: good_utag \t\t\t%d\nW5: ltag \t\t\t%d\n",
+ ctx->good_utag, ctx->ltag);
+
+ nix_dump("W6: octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->octs);
+ nix_dump("W7: pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->pkts);
+ nix_dump("W8: drop_octs \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_octs);
+ nix_dump("W9: drop_pkts \t\t\t0x%" PRIx64 "", (uint64_t)ctx->drop_pkts);
+ nix_dump("W10: re_pkts \t\t\t0x%" PRIx64 "\n", (uint64_t)ctx->re_pkts);
+}
+
+static inline void
+nix_lf_cq_dump(__otx2_io struct nix_cq_ctx_s *ctx)
+{
+ nix_dump("W0: base \t\t\t0x%" PRIx64 "\n", ctx->base);
+
+ nix_dump("W1: wrptr \t\t\t%" PRIx64 "", (uint64_t)ctx->wrptr);
+ nix_dump("W1: avg_con \t\t\t%d\nW1: cint_idx \t\t\t%d",
+ ctx->avg_con, ctx->cint_idx);
+ nix_dump("W1: cq_err \t\t\t%d\nW1: qint_idx \t\t\t%d",
+ ctx->cq_err, ctx->qint_idx);
+ nix_dump("W1: bpid \t\t\t%d\nW1: bp_ena \t\t\t%d\n",
+ ctx->bpid, ctx->bp_ena);
+
+ nix_dump("W2: update_time \t\t%d\nW2: avg_level \t\t\t%d",
+ ctx->update_time, ctx->avg_level);
+ nix_dump("W2: head \t\t\t%d\nW2: tail \t\t\t%d\n",
+ ctx->head, ctx->tail);
+
+ nix_dump("W3: cq_err_int_ena \t\t%d\nW3: cq_err_int \t\t\t%d",
+ ctx->cq_err_int_ena, ctx->cq_err_int);
+ nix_dump("W3: qsize \t\t\t%d\nW3: caching \t\t\t%d",
+ ctx->qsize, ctx->caching);
+ nix_dump("W3: substream \t\t\t0x%03x\nW3: ena \t\t\t%d",
+ ctx->substream, ctx->ena);
+ nix_dump("W3: drop_ena \t\t\t%d\nW3: drop \t\t\t%d",
+ ctx->drop_ena, ctx->drop);
+ nix_dump("W3: bp \t\t\t\t%d\n", ctx->bp);
+}
+
+int
+otx2_nix_queues_ctx_dump(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc, q, rq = eth_dev->data->nb_rx_queues;
+ int sq = eth_dev->data->nb_tx_queues;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct npa_aq_enq_rsp *npa_rsp;
+ struct npa_aq_enq_req *npa_aq;
+ struct otx2_npa_lf *npa_lf;
+ struct nix_aq_enq_rsp *rsp;
+ struct nix_aq_enq_req *aq;
+
+ npa_lf = otx2_npa_lf_obj_get();
+
+ for (q = 0; q < rq; q++) {
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = q;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to get cq context");
+ goto fail;
+ }
+ nix_dump("============== port=%d cq=%d ===============",
+ eth_dev->data->port_id, q);
+ nix_lf_cq_dump(&rsp->cq);
+ }
+
+ for (q = 0; q < rq; q++) {
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = q;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(mbox, (void **)&rsp);
+ if (rc) {
+ otx2_err("Failed to get rq context");
+ goto fail;
+ }
+ nix_dump("============== port=%d rq=%d ===============",
+ eth_dev->data->port_id, q);
+ nix_lf_rq_dump(&rsp->rq);
+ }
+ for (q = 0; q < sq; q++) {
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = q;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to get sq context");
+ goto fail;
+ }
+ nix_dump("============== port=%d sq=%d ===============",
+ eth_dev->data->port_id, q);
+ nix_lf_sq_dump(&rsp->sq);
+
+ if (!npa_lf) {
+ otx2_err("NPA LF doesn't exist");
+ continue;
+ }
+
+ /* Dump SQB Aura minimal info */
+ npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
+ npa_aq->aura_id = rsp->sq.sqb_aura;
+ npa_aq->ctype = NPA_AQ_CTYPE_AURA;
+ npa_aq->op = NPA_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(npa_lf->mbox, (void *)&npa_rsp);
+ if (rc) {
+ otx2_err("Failed to get sq's sqb_aura context");
+ continue;
+ }
+
+ nix_dump("\nSQB Aura W0: Pool addr\t\t0x%"PRIx64"",
+ npa_rsp->aura.pool_addr);
+ nix_dump("SQB Aura W1: ena\t\t\t%d",
+ npa_rsp->aura.ena);
+ nix_dump("SQB Aura W2: count\t\t%"PRIx64"",
+ (uint64_t)npa_rsp->aura.count);
+ nix_dump("SQB Aura W3: limit\t\t%"PRIx64"",
+ (uint64_t)npa_rsp->aura.limit);
+ nix_dump("SQB Aura W3: fc_ena\t\t%d",
+ npa_rsp->aura.fc_ena);
+ nix_dump("SQB Aura W4: fc_addr\t\t0x%"PRIx64"\n",
+ npa_rsp->aura.fc_addr);
+ }
+
+fail:
+ return rc;
+}
+
+/* Dumps struct nix_cqe_hdr_s and struct nix_rx_parse_s */
+void
+otx2_nix_cqe_dump(const struct nix_cqe_hdr_s *cq)
+{
+ const struct nix_rx_parse_s *rx =
+ (const struct nix_rx_parse_s *)((const uint64_t *)cq + 1);
+
+ nix_dump("tag \t\t0x%x\tq \t\t%d\t\tnode \t\t%d\tcqe_type \t%d",
+ cq->tag, cq->q, cq->node, cq->cqe_type);
+
+ nix_dump("W0: chan \t%d\t\tdesc_sizem1 \t%d",
+ rx->chan, rx->desc_sizem1);
+ nix_dump("W0: imm_copy \t%d\t\texpress \t%d",
+ rx->imm_copy, rx->express);
+ nix_dump("W0: wqwd \t%d\t\terrlev \t\t%d\t\terrcode \t%d",
+ rx->wqwd, rx->errlev, rx->errcode);
+ nix_dump("W0: latype \t%d\t\tlbtype \t\t%d\t\tlctype \t\t%d",
+ rx->latype, rx->lbtype, rx->lctype);
+ nix_dump("W0: ldtype \t%d\t\tletype \t\t%d\t\tlftype \t\t%d",
+ rx->ldtype, rx->letype, rx->lftype);
+ nix_dump("W0: lgtype \t%d \t\tlhtype \t\t%d",
+ rx->lgtype, rx->lhtype);
+
+ nix_dump("W1: pkt_lenm1 \t%d", rx->pkt_lenm1);
+ nix_dump("W1: l2m \t%d\t\tl2b \t\t%d\t\tl3m \t\t%d\tl3b \t\t%d",
+ rx->l2m, rx->l2b, rx->l3m, rx->l3b);
+ nix_dump("W1: vtag0_valid %d\t\tvtag0_gone \t%d",
+ rx->vtag0_valid, rx->vtag0_gone);
+ nix_dump("W1: vtag1_valid %d\t\tvtag1_gone \t%d",
+ rx->vtag1_valid, rx->vtag1_gone);
+ nix_dump("W1: pkind \t%d", rx->pkind);
+ nix_dump("W1: vtag0_tci \t%d\t\tvtag1_tci \t%d",
+ rx->vtag0_tci, rx->vtag1_tci);
+
+ nix_dump("W2: laflags \t%d\t\tlbflags\t\t%d\t\tlcflags \t%d",
+ rx->laflags, rx->lbflags, rx->lcflags);
+ nix_dump("W2: ldflags \t%d\t\tleflags\t\t%d\t\tlfflags \t%d",
+ rx->ldflags, rx->leflags, rx->lfflags);
+ nix_dump("W2: lgflags \t%d\t\tlhflags \t%d",
+ rx->lgflags, rx->lhflags);
+
+ nix_dump("W3: eoh_ptr \t%d\t\twqe_aura \t%d\t\tpb_aura \t%d",
+ rx->eoh_ptr, rx->wqe_aura, rx->pb_aura);
+ nix_dump("W3: match_id \t%d", rx->match_id);
+
+ nix_dump("W4: laptr \t%d\t\tlbptr \t\t%d\t\tlcptr \t\t%d",
+ rx->laptr, rx->lbptr, rx->lcptr);
+ nix_dump("W4: ldptr \t%d\t\tleptr \t\t%d\t\tlfptr \t\t%d",
+ rx->ldptr, rx->leptr, rx->lfptr);
+ nix_dump("W4: lgptr \t%d\t\tlhptr \t\t%d", rx->lgptr, rx->lhptr);
+
+ nix_dump("W5: vtag0_ptr \t%d\t\tvtag1_ptr \t%d\t\tflow_key_alg \t%d",
+ rx->vtag0_ptr, rx->vtag1_ptr, rx->flow_key_alg);
+}
+
+static uint8_t
+prepare_nix_tm_reg_dump(uint16_t hw_lvl, uint16_t schq, uint16_t link,
+ uint64_t *reg, char regstr[][NIX_REG_NAME_SZ])
+{
+ uint8_t k = 0;
+
+ switch (hw_lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ reg[k] = NIX_AF_SMQX_CFG(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_SMQ[%u]_CFG", schq);
+
+ reg[k] = NIX_AF_MDQX_PARENT(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_MDQ[%u]_PARENT", schq);
+
+ reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_MDQ[%u]_SCHEDULE", schq);
+
+ reg[k] = NIX_AF_MDQX_PIR(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_MDQ[%u]_PIR", schq);
+
+ reg[k] = NIX_AF_MDQX_CIR(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_MDQ[%u]_CIR", schq);
+
+ reg[k] = NIX_AF_MDQX_SHAPE(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_MDQ[%u]_SHAPE", schq);
+
+ reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_MDQ[%u]_SW_XOFF", schq);
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg[k] = NIX_AF_TL4X_PARENT(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL4[%u]_PARENT", schq);
+
+ reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL4[%u]_TOPOLOGY", schq);
+
+ reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL4[%u]_SDP_LINK_CFG", schq);
+
+ reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL4[%u]_SCHEDULE", schq);
+
+ reg[k] = NIX_AF_TL4X_PIR(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL4[%u]_PIR", schq);
+
+ reg[k] = NIX_AF_TL4X_CIR(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL4[%u]_CIR", schq);
+
+ reg[k] = NIX_AF_TL4X_SHAPE(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL4[%u]_SHAPE", schq);
+
+ reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL4[%u]_SW_XOFF", schq);
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg[k] = NIX_AF_TL3X_PARENT(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL3[%u]_PARENT", schq);
+
+ reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL3[%u]_TOPOLOGY", schq);
+
+ reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL3_TL2[%u]_LINK[%u]_CFG", schq, link);
+
+ reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL3[%u]_SCHEDULE", schq);
+
+ reg[k] = NIX_AF_TL3X_PIR(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL3[%u]_PIR", schq);
+
+ reg[k] = NIX_AF_TL3X_CIR(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL3[%u]_CIR", schq);
+
+ reg[k] = NIX_AF_TL3X_SHAPE(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL3[%u]_SHAPE", schq);
+
+ reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL3[%u]_SW_XOFF", schq);
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg[k] = NIX_AF_TL2X_PARENT(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL2[%u]_PARENT", schq);
+
+ reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL2[%u]_TOPOLOGY", schq);
+
+ reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, link);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL3_TL2[%u]_LINK[%u]_CFG", schq, link);
+
+ reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL2[%u]_SCHEDULE", schq);
+
+ reg[k] = NIX_AF_TL2X_PIR(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL2[%u]_PIR", schq);
+
+ reg[k] = NIX_AF_TL2X_CIR(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL2[%u]_CIR", schq);
+
+ reg[k] = NIX_AF_TL2X_SHAPE(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL2[%u]_SHAPE", schq);
+
+ reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL2[%u]_SW_XOFF", schq);
+ break;
+ case NIX_TXSCH_LVL_TL1:
+
+ reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL1[%u]_TOPOLOGY", schq);
+
+ reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL1[%u]_SCHEDULE", schq);
+
+ reg[k] = NIX_AF_TL1X_CIR(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL1[%u]_CIR", schq);
+
+ reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL1[%u]_SW_XOFF", schq);
+
+ reg[k] = NIX_AF_TL1X_DROPPED_PACKETS(schq);
+ snprintf(regstr[k++], NIX_REG_NAME_SZ,
+ "NIX_AF_TL1[%u]_DROPPED_PACKETS", schq);
+ break;
+ default:
+ break;
+ }
+
+ if (k > MAX_REGS_PER_MBOX_MSG) {
+ nix_dump("\t!!!NIX TM Registers request overflow!!!");
+ return 0;
+ }
+ return k;
+}
+
+/* Dump TM hierarchy and registers */
+void
+otx2_nix_tm_dump(struct otx2_eth_dev *dev)
+{
+ char regstr[MAX_REGS_PER_MBOX_MSG * 2][NIX_REG_NAME_SZ];
+ struct otx2_nix_tm_node *tm_node, *root_node, *parent;
+ uint64_t reg[MAX_REGS_PER_MBOX_MSG * 2];
+ struct nix_txschq_config *req;
+ const char *lvlstr, *parent_lvlstr;
+ struct nix_txschq_config *rsp;
+ uint32_t schq, parent_schq;
+ int hw_lvl, j, k, rc;
+
+ nix_dump("===TM hierarchy and registers dump of %s===",
+ dev->eth_dev->data->name);
+
+ root_node = NULL;
+
+ for (hw_lvl = 0; hw_lvl <= NIX_TXSCH_LVL_CNT; hw_lvl++) {
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (tm_node->hw_lvl != hw_lvl)
+ continue;
+
+ parent = tm_node->parent;
+ if (hw_lvl == NIX_TXSCH_LVL_CNT) {
+ lvlstr = "SQ";
+ schq = tm_node->id;
+ } else {
+ lvlstr = nix_hwlvl2str(tm_node->hw_lvl);
+ schq = tm_node->hw_id;
+ }
+
+ if (parent) {
+ parent_schq = parent->hw_id;
+ parent_lvlstr =
+ nix_hwlvl2str(parent->hw_lvl);
+ } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) {
+ parent_schq = otx2_nix_get_link(dev);
+ parent_lvlstr = "LINK";
+ } else {
+ parent_schq = tm_node->parent_hw_id;
+ parent_lvlstr =
+ nix_hwlvl2str(tm_node->hw_lvl + 1);
+ }
+
+ nix_dump("%s_%d->%s_%d", lvlstr, schq,
+ parent_lvlstr, parent_schq);
+
+ if (!(tm_node->flags & NIX_TM_NODE_HWRES))
+ continue;
+
+ /* Need to dump TL1 when root is TL2 */
+ if (tm_node->hw_lvl == dev->otx2_tm_root_lvl)
+ root_node = tm_node;
+
+ /* Dump registers only when HWRES is present */
+ k = prepare_nix_tm_reg_dump(tm_node->hw_lvl, schq,
+ otx2_nix_get_link(dev), reg,
+ regstr);
+ if (!k)
+ continue;
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
+ req->read = 1;
+ req->lvl = tm_node->hw_lvl;
+ req->num_regs = k;
+ otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
+ rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp);
+ if (!rc) {
+ for (j = 0; j < k; j++)
+ nix_dump("\t%s=0x%016"PRIx64,
+ regstr[j], rsp->regval[j]);
+ } else {
+ nix_dump("\t!!!Failed to dump registers!!!");
+ }
+ }
+ nix_dump("\n");
+ }
+
+ /* Dump TL1 node data when root level is TL2 */
+ if (root_node && root_node->hw_lvl == NIX_TXSCH_LVL_TL2) {
+ k = prepare_nix_tm_reg_dump(NIX_TXSCH_LVL_TL1,
+ root_node->parent_hw_id,
+ otx2_nix_get_link(dev),
+ reg, regstr);
+ if (!k)
+ return;
+
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
+ req->read = 1;
+ req->lvl = NIX_TXSCH_LVL_TL1;
+ req->num_regs = k;
+ otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
+ rc = otx2_mbox_process_msg(dev->mbox, (void **)&rsp);
+ if (!rc) {
+ for (j = 0; j < k; j++)
+ nix_dump("\t%s=0x%016"PRIx64,
+ regstr[j], rsp->regval[j]);
+ } else {
+ nix_dump("\t!!!Failed to dump registers!!!");
+ }
+ }
+
+ otx2_nix_queues_ctx_dump(dev->eth_dev);
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_devargs.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_devargs.c
new file mode 100644
index 000000000..e8ddaa69f
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_devargs.c
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <inttypes.h>
+#include <math.h>
+
+#include "otx2_ethdev.h"
+
+static int
+parse_flow_max_priority(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint16_t val;
+
+ val = atoi(value);
+
+ /* Limit the max priority to 32 */
+ if (val < 1 || val > 32)
+ return -EINVAL;
+
+ *(uint16_t *)extra_args = val;
+
+ return 0;
+}
+
+static int
+parse_flow_prealloc_size(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint16_t val;
+
+ val = atoi(value);
+
+ /* Limit the prealloc size to 32 */
+ if (val < 1 || val > 32)
+ return -EINVAL;
+
+ *(uint16_t *)extra_args = val;
+
+ return 0;
+}
+
+static int
+parse_reta_size(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint32_t val;
+
+ val = atoi(value);
+
+ if (val <= ETH_RSS_RETA_SIZE_64)
+ val = ETH_RSS_RETA_SIZE_64;
+ else if (val > ETH_RSS_RETA_SIZE_64 && val <= ETH_RSS_RETA_SIZE_128)
+ val = ETH_RSS_RETA_SIZE_128;
+ else if (val > ETH_RSS_RETA_SIZE_128 && val <= ETH_RSS_RETA_SIZE_256)
+ val = ETH_RSS_RETA_SIZE_256;
+ else
+ val = NIX_RSS_RETA_SIZE;
+
+ *(uint16_t *)extra_args = val;
+
+ return 0;
+}
+
+static int
+parse_ipsec_in_max_spi(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint32_t val;
+
+ val = atoi(value);
+
+ *(uint16_t *)extra_args = val;
+
+ return 0;
+}
+
+static int
+parse_flag(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+
+ *(uint16_t *)extra_args = atoi(value);
+
+ return 0;
+}
+
+static int
+parse_sqb_count(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+ uint32_t val;
+
+ val = atoi(value);
+
+ if (val < NIX_MIN_SQB || val > NIX_MAX_SQB)
+ return -EINVAL;
+
+ *(uint16_t *)extra_args = val;
+
+ return 0;
+}
+
+static int
+parse_switch_header_type(const char *key, const char *value, void *extra_args)
+{
+ RTE_SET_USED(key);
+
+ if (strcmp(value, "higig2") == 0)
+ *(uint16_t *)extra_args = OTX2_PRIV_FLAGS_HIGIG;
+
+ if (strcmp(value, "dsa") == 0)
+ *(uint16_t *)extra_args = OTX2_PRIV_FLAGS_EDSA;
+
+ if (strcmp(value, "chlen90b") == 0)
+ *(uint16_t *)extra_args = OTX2_PRIV_FLAGS_LEN_90B;
+ return 0;
+}
+
+#define OTX2_RSS_RETA_SIZE "reta_size"
+#define OTX2_IPSEC_IN_MAX_SPI "ipsec_in_max_spi"
+#define OTX2_SCL_ENABLE "scalar_enable"
+#define OTX2_MAX_SQB_COUNT "max_sqb_count"
+#define OTX2_FLOW_PREALLOC_SIZE "flow_prealloc_size"
+#define OTX2_FLOW_MAX_PRIORITY "flow_max_priority"
+#define OTX2_SWITCH_HEADER_TYPE "switch_header"
+#define OTX2_RSS_TAG_AS_XOR "tag_as_xor"
+
+int
+otx2_ethdev_parse_devargs(struct rte_devargs *devargs, struct otx2_eth_dev *dev)
+{
+ uint16_t rss_size = NIX_RSS_RETA_SIZE;
+ uint16_t sqb_count = NIX_MAX_SQB;
+ uint16_t flow_prealloc_size = 8;
+ uint16_t switch_header_type = 0;
+ uint16_t flow_max_priority = 3;
+ uint16_t ipsec_in_max_spi = 1;
+ uint16_t scalar_enable = 0;
+ uint16_t rss_tag_as_xor = 0;
+ struct rte_kvargs *kvlist;
+
+ if (devargs == NULL)
+ goto null_devargs;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (kvlist == NULL)
+ goto exit;
+
+ rte_kvargs_process(kvlist, OTX2_RSS_RETA_SIZE,
+ &parse_reta_size, &rss_size);
+ rte_kvargs_process(kvlist, OTX2_IPSEC_IN_MAX_SPI,
+ &parse_ipsec_in_max_spi, &ipsec_in_max_spi);
+ rte_kvargs_process(kvlist, OTX2_SCL_ENABLE,
+ &parse_flag, &scalar_enable);
+ rte_kvargs_process(kvlist, OTX2_MAX_SQB_COUNT,
+ &parse_sqb_count, &sqb_count);
+ rte_kvargs_process(kvlist, OTX2_FLOW_PREALLOC_SIZE,
+ &parse_flow_prealloc_size, &flow_prealloc_size);
+ rte_kvargs_process(kvlist, OTX2_FLOW_MAX_PRIORITY,
+ &parse_flow_max_priority, &flow_max_priority);
+ rte_kvargs_process(kvlist, OTX2_SWITCH_HEADER_TYPE,
+ &parse_switch_header_type, &switch_header_type);
+ rte_kvargs_process(kvlist, OTX2_RSS_TAG_AS_XOR,
+ &parse_flag, &rss_tag_as_xor);
+ otx2_parse_common_devargs(kvlist);
+ rte_kvargs_free(kvlist);
+
+null_devargs:
+ dev->ipsec_in_max_spi = ipsec_in_max_spi;
+ dev->scalar_ena = scalar_enable;
+ dev->rss_tag_as_xor = rss_tag_as_xor;
+ dev->max_sqb_count = sqb_count;
+ dev->rss_info.rss_size = rss_size;
+ dev->npc_flow.flow_prealloc_size = flow_prealloc_size;
+ dev->npc_flow.flow_max_priority = flow_max_priority;
+ dev->npc_flow.switch_header_type = switch_header_type;
+ return 0;
+
+exit:
+ return -EINVAL;
+}
+
+RTE_PMD_REGISTER_PARAM_STRING(net_octeontx2,
+ OTX2_RSS_RETA_SIZE "=<64|128|256>"
+ OTX2_IPSEC_IN_MAX_SPI "=<1-65535>"
+ OTX2_SCL_ENABLE "=1"
+ OTX2_MAX_SQB_COUNT "=<8-512>"
+ OTX2_FLOW_PREALLOC_SIZE "=<1-32>"
+ OTX2_FLOW_MAX_PRIORITY "=<1-32>"
+ OTX2_SWITCH_HEADER_TYPE "=<higig2|dsa|chlen90b>"
+ OTX2_RSS_TAG_AS_XOR "=1"
+ OTX2_NPA_LOCK_MASK "=<1-65535>");
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c
new file mode 100644
index 000000000..b121488fa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_irq.c
@@ -0,0 +1,494 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <inttypes.h>
+
+#include <rte_bus_pci.h>
+#include <rte_malloc.h>
+
+#include "otx2_ethdev.h"
+
+static void
+nix_lf_err_irq(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t intr;
+
+ intr = otx2_read64(dev->base + NIX_LF_ERR_INT);
+ if (intr == 0)
+ return;
+
+ otx2_err("Err_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
+
+ /* Clear interrupt */
+ otx2_write64(intr, dev->base + NIX_LF_ERR_INT);
+
+ /* Dump registers to std out */
+ otx2_nix_reg_dump(dev, NULL);
+ otx2_nix_queues_ctx_dump(eth_dev);
+}
+
+static int
+nix_lf_register_err_irq(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc, vec;
+
+ vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT;
+
+ /* Clear err interrupt */
+ otx2_nix_err_intr_enb_dis(eth_dev, false);
+ /* Set used interrupt vectors */
+ rc = otx2_register_irq(handle, nix_lf_err_irq, eth_dev, vec);
+ /* Enable all dev interrupt except for RQ_DISABLED */
+ otx2_nix_err_intr_enb_dis(eth_dev, true);
+
+ return rc;
+}
+
+static void
+nix_lf_unregister_err_irq(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int vec;
+
+ vec = dev->nix_msixoff + NIX_LF_INT_VEC_ERR_INT;
+
+ /* Clear err interrupt */
+ otx2_nix_err_intr_enb_dis(eth_dev, false);
+ otx2_unregister_irq(handle, nix_lf_err_irq, eth_dev, vec);
+}
+
+static void
+nix_lf_ras_irq(void *param)
+{
+ struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t intr;
+
+ intr = otx2_read64(dev->base + NIX_LF_RAS);
+ if (intr == 0)
+ return;
+
+ otx2_err("Ras_intr=0x%" PRIx64 " pf=%d, vf=%d", intr, dev->pf, dev->vf);
+
+ /* Clear interrupt */
+ otx2_write64(intr, dev->base + NIX_LF_RAS);
+
+ /* Dump registers to std out */
+ otx2_nix_reg_dump(dev, NULL);
+ otx2_nix_queues_ctx_dump(eth_dev);
+}
+
+static int
+nix_lf_register_ras_irq(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc, vec;
+
+ vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON;
+
+ /* Clear err interrupt */
+ otx2_nix_ras_intr_enb_dis(eth_dev, false);
+ /* Set used interrupt vectors */
+ rc = otx2_register_irq(handle, nix_lf_ras_irq, eth_dev, vec);
+ /* Enable dev interrupt */
+ otx2_nix_ras_intr_enb_dis(eth_dev, true);
+
+ return rc;
+}
+
+static void
+nix_lf_unregister_ras_irq(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int vec;
+
+ vec = dev->nix_msixoff + NIX_LF_INT_VEC_POISON;
+
+ /* Clear err interrupt */
+ otx2_nix_ras_intr_enb_dis(eth_dev, false);
+ otx2_unregister_irq(handle, nix_lf_ras_irq, eth_dev, vec);
+}
+
+static inline uint8_t
+nix_lf_q_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t q,
+ uint32_t off, uint64_t mask)
+{
+ uint64_t reg, wdata;
+ uint8_t qint;
+
+ wdata = (uint64_t)q << 44;
+ reg = otx2_atomic64_add_nosync(wdata, (int64_t *)(dev->base + off));
+
+ if (reg & BIT_ULL(42) /* OP_ERR */) {
+ otx2_err("Failed execute irq get off=0x%x", off);
+ return 0;
+ }
+
+ qint = reg & 0xff;
+ wdata &= mask;
+ otx2_write64(wdata | qint, dev->base + off);
+
+ return qint;
+}
+
+static inline uint8_t
+nix_lf_rq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t rq)
+{
+ return nix_lf_q_irq_get_and_clear(dev, rq, NIX_LF_RQ_OP_INT, ~0xff00);
+}
+
+static inline uint8_t
+nix_lf_cq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t cq)
+{
+ return nix_lf_q_irq_get_and_clear(dev, cq, NIX_LF_CQ_OP_INT, ~0xff00);
+}
+
+static inline uint8_t
+nix_lf_sq_irq_get_and_clear(struct otx2_eth_dev *dev, uint16_t sq)
+{
+ return nix_lf_q_irq_get_and_clear(dev, sq, NIX_LF_SQ_OP_INT, ~0x1ff00);
+}
+
+static inline void
+nix_lf_sq_debug_reg(struct otx2_eth_dev *dev, uint32_t off)
+{
+ uint64_t reg;
+
+ reg = otx2_read64(dev->base + off);
+ if (reg & BIT_ULL(44))
+ otx2_err("SQ=%d err_code=0x%x",
+ (int)((reg >> 8) & 0xfffff), (uint8_t)(reg & 0xff));
+}
+
+static void
+nix_lf_cq_irq(void *param)
+{
+ struct otx2_qint *cint = (struct otx2_qint *)param;
+ struct rte_eth_dev *eth_dev = cint->eth_dev;
+ struct otx2_eth_dev *dev;
+
+ dev = otx2_eth_pmd_priv(eth_dev);
+ /* Clear interrupt */
+ otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_INT(cint->qintx));
+}
+
+static void
+nix_lf_q_irq(void *param)
+{
+ struct otx2_qint *qint = (struct otx2_qint *)param;
+ struct rte_eth_dev *eth_dev = qint->eth_dev;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint8_t irq, qintx = qint->qintx;
+ int q, cq, rq, sq;
+ uint64_t intr;
+
+ intr = otx2_read64(dev->base + NIX_LF_QINTX_INT(qintx));
+ if (intr == 0)
+ return;
+
+ otx2_err("Queue_intr=0x%" PRIx64 " qintx=%d pf=%d, vf=%d",
+ intr, qintx, dev->pf, dev->vf);
+
+ /* Handle RQ interrupts */
+ for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
+ rq = q % dev->qints;
+ irq = nix_lf_rq_irq_get_and_clear(dev, rq);
+
+ if (irq & BIT_ULL(NIX_RQINT_DROP))
+ otx2_err("RQ=%d NIX_RQINT_DROP", rq);
+
+ if (irq & BIT_ULL(NIX_RQINT_RED))
+ otx2_err("RQ=%d NIX_RQINT_RED", rq);
+ }
+
+ /* Handle CQ interrupts */
+ for (q = 0; q < eth_dev->data->nb_rx_queues; q++) {
+ cq = q % dev->qints;
+ irq = nix_lf_cq_irq_get_and_clear(dev, cq);
+
+ if (irq & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
+ otx2_err("CQ=%d NIX_CQERRINT_DOOR_ERR", cq);
+
+ if (irq & BIT_ULL(NIX_CQERRINT_WR_FULL))
+ otx2_err("CQ=%d NIX_CQERRINT_WR_FULL", cq);
+
+ if (irq & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
+ otx2_err("CQ=%d NIX_CQERRINT_CQE_FAULT", cq);
+ }
+
+ /* Handle SQ interrupts */
+ for (q = 0; q < eth_dev->data->nb_tx_queues; q++) {
+ sq = q % dev->qints;
+ irq = nix_lf_sq_irq_get_and_clear(dev, sq);
+
+ if (irq & BIT_ULL(NIX_SQINT_LMT_ERR)) {
+ otx2_err("SQ=%d NIX_SQINT_LMT_ERR", sq);
+ nix_lf_sq_debug_reg(dev, NIX_LF_SQ_OP_ERR_DBG);
+ }
+ if (irq & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
+ otx2_err("SQ=%d NIX_SQINT_MNQ_ERR", sq);
+ nix_lf_sq_debug_reg(dev, NIX_LF_MNQ_ERR_DBG);
+ }
+ if (irq & BIT_ULL(NIX_SQINT_SEND_ERR)) {
+ otx2_err("SQ=%d NIX_SQINT_SEND_ERR", sq);
+ nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
+ }
+ if (irq & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) {
+ otx2_err("SQ=%d NIX_SQINT_SQB_ALLOC_FAIL", sq);
+ nix_lf_sq_debug_reg(dev, NIX_LF_SEND_ERR_DBG);
+ }
+ }
+
+ /* Clear interrupt */
+ otx2_write64(intr, dev->base + NIX_LF_QINTX_INT(qintx));
+
+ /* Dump registers to std out */
+ otx2_nix_reg_dump(dev, NULL);
+ otx2_nix_queues_ctx_dump(eth_dev);
+}
+
+int
+oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int vec, q, sqs, rqs, qs, rc = 0;
+
+ /* Figure out max qintx required */
+ rqs = RTE_MIN(dev->qints, eth_dev->data->nb_rx_queues);
+ sqs = RTE_MIN(dev->qints, eth_dev->data->nb_tx_queues);
+ qs = RTE_MAX(rqs, sqs);
+
+ dev->configured_qints = qs;
+
+ for (q = 0; q < qs; q++) {
+ vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
+
+ /* Clear QINT CNT */
+ otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
+
+ /* Clear interrupt */
+ otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
+
+ dev->qints_mem[q].eth_dev = eth_dev;
+ dev->qints_mem[q].qintx = q;
+
+ /* Sync qints_mem update */
+ rte_smp_wmb();
+
+ /* Register queue irq vector */
+ rc = otx2_register_irq(handle, nix_lf_q_irq,
+ &dev->qints_mem[q], vec);
+ if (rc)
+ break;
+
+ otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
+ otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
+ /* Enable QINT interrupt */
+ otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1S(q));
+ }
+
+ return rc;
+}
+
+void
+oxt2_nix_unregister_queue_irqs(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int vec, q;
+
+ for (q = 0; q < dev->configured_qints; q++) {
+ vec = dev->nix_msixoff + NIX_LF_INT_VEC_QINT_START + q;
+
+ /* Clear QINT CNT */
+ otx2_write64(0, dev->base + NIX_LF_QINTX_CNT(q));
+ otx2_write64(0, dev->base + NIX_LF_QINTX_INT(q));
+
+ /* Clear interrupt */
+ otx2_write64(~0ull, dev->base + NIX_LF_QINTX_ENA_W1C(q));
+
+ /* Unregister queue irq vector */
+ otx2_unregister_irq(handle, nix_lf_q_irq,
+ &dev->qints_mem[q], vec);
+ }
+}
+
+int
+oxt2_nix_register_cq_irqs(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint8_t rc = 0, vec, q;
+
+ dev->configured_cints = RTE_MIN(dev->cints,
+ eth_dev->data->nb_rx_queues);
+
+ for (q = 0; q < dev->configured_cints; q++) {
+ vec = dev->nix_msixoff + NIX_LF_INT_VEC_CINT_START + q;
+
+ /* Clear CINT CNT */
+ otx2_write64(0, dev->base + NIX_LF_CINTX_CNT(q));
+
+ /* Clear interrupt */
+ otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_ENA_W1C(q));
+
+ dev->cints_mem[q].eth_dev = eth_dev;
+ dev->cints_mem[q].qintx = q;
+
+ /* Sync cints_mem update */
+ rte_smp_wmb();
+
+ /* Register queue irq vector */
+ rc = otx2_register_irq(handle, nix_lf_cq_irq,
+ &dev->cints_mem[q], vec);
+ if (rc) {
+ otx2_err("Fail to register CQ irq, rc=%d", rc);
+ return rc;
+ }
+
+ if (!handle->intr_vec) {
+ handle->intr_vec = rte_zmalloc("intr_vec",
+ dev->configured_cints *
+ sizeof(int), 0);
+ if (!handle->intr_vec) {
+ otx2_err("Failed to allocate %d rx intr_vec",
+ dev->configured_cints);
+ return -ENOMEM;
+ }
+ }
+ /* VFIO vector zero is resereved for misc interrupt so
+ * doing required adjustment. (b13bfab4cd)
+ */
+ handle->intr_vec[q] = RTE_INTR_VEC_RXTX_OFFSET + vec;
+
+ /* Configure CQE interrupt coalescing parameters */
+ otx2_write64(((CQ_CQE_THRESH_DEFAULT) |
+ (CQ_CQE_THRESH_DEFAULT << 32) |
+ (CQ_TIMER_THRESH_DEFAULT << 48)),
+ dev->base + NIX_LF_CINTX_WAIT((q)));
+
+ /* Keeping the CQ interrupt disabled as the rx interrupt
+ * feature needs to be enabled/disabled on demand.
+ */
+ }
+
+ return rc;
+}
+
+void
+oxt2_nix_unregister_cq_irqs(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *handle = &pci_dev->intr_handle;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int vec, q;
+
+ for (q = 0; q < dev->configured_cints; q++) {
+ vec = dev->nix_msixoff + NIX_LF_INT_VEC_CINT_START + q;
+
+ /* Clear CINT CNT */
+ otx2_write64(0, dev->base + NIX_LF_CINTX_CNT(q));
+
+ /* Clear interrupt */
+ otx2_write64(BIT_ULL(0), dev->base + NIX_LF_CINTX_ENA_W1C(q));
+
+ /* Unregister queue irq vector */
+ otx2_unregister_irq(handle, nix_lf_cq_irq,
+ &dev->cints_mem[q], vec);
+ }
+}
+
+int
+otx2_nix_register_irqs(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc;
+
+ if (dev->nix_msixoff == MSIX_VECTOR_INVALID) {
+ otx2_err("Invalid NIXLF MSIX vector offset vector: 0x%x",
+ dev->nix_msixoff);
+ return -EINVAL;
+ }
+
+ /* Register lf err interrupt */
+ rc = nix_lf_register_err_irq(eth_dev);
+ /* Register RAS interrupt */
+ rc |= nix_lf_register_ras_irq(eth_dev);
+
+ return rc;
+}
+
+void
+otx2_nix_unregister_irqs(struct rte_eth_dev *eth_dev)
+{
+ nix_lf_unregister_err_irq(eth_dev);
+ nix_lf_unregister_ras_irq(eth_dev);
+}
+
+int
+otx2_nix_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ /* Enable CINT interrupt */
+ otx2_write64(BIT_ULL(0), dev->base +
+ NIX_LF_CINTX_ENA_W1S(rx_queue_id));
+
+ return 0;
+}
+
+int
+otx2_nix_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ /* Clear and disable CINT interrupt */
+ otx2_write64(BIT_ULL(0), dev->base +
+ NIX_LF_CINTX_ENA_W1C(rx_queue_id));
+
+ return 0;
+}
+
+void
+otx2_nix_err_intr_enb_dis(struct rte_eth_dev *eth_dev, bool enb)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ /* Enable all nix lf error interrupts except
+ * RQ_DISABLED and CQ_DISABLED.
+ */
+ if (enb)
+ otx2_write64(~(BIT_ULL(11) | BIT_ULL(24)),
+ dev->base + NIX_LF_ERR_INT_ENA_W1S);
+ else
+ otx2_write64(~0ull, dev->base + NIX_LF_ERR_INT_ENA_W1C);
+}
+
+void
+otx2_nix_ras_intr_enb_dis(struct rte_eth_dev *eth_dev, bool enb)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ if (enb)
+ otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1S);
+ else
+ otx2_write64(~0ull, dev->base + NIX_LF_RAS_ENA_W1C);
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c
new file mode 100644
index 000000000..80ac2b96e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_ops.c
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_mbuf_pool_ops.h>
+
+#include "otx2_ethdev.h"
+
+int
+otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ uint32_t buffsz, frame_size = mtu + NIX_L2_OVERHEAD;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_frs_cfg *req;
+ int rc;
+
+ frame_size += NIX_TIMESYNC_RX_OFFSET * otx2_ethdev_is_ptp_en(dev);
+
+ /* Check if MTU is within the allowed range */
+ if (frame_size < NIX_MIN_FRS || frame_size > NIX_MAX_FRS)
+ return -EINVAL;
+
+ buffsz = data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
+
+ /* Refuse MTU that requires the support of scattered packets
+ * when this feature has not been enabled before.
+ */
+ if (data->dev_started && frame_size > buffsz &&
+ !(dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER))
+ return -EINVAL;
+
+ /* Check <seg size> * <max_seg> >= max_frame */
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER) &&
+ (frame_size > buffsz * NIX_RX_NB_SEG_MAX))
+ return -EINVAL;
+
+ req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
+ req->update_smq = true;
+ if (otx2_dev_is_sdp(dev))
+ req->sdp_link = true;
+ /* FRS HW config should exclude FCS but include NPC VTAG insert size */
+ req->maxlen = frame_size - RTE_ETHER_CRC_LEN + NIX_MAX_VTAG_ACT_SIZE;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ /* Now just update Rx MAXLEN */
+ req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
+ req->maxlen = frame_size - RTE_ETHER_CRC_LEN;
+ if (otx2_dev_is_sdp(dev))
+ req->sdp_link = true;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ if (frame_size > RTE_ETHER_MAX_LEN)
+ dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ /* Update max_rx_pkt_len */
+ data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return rc;
+}
+
+int
+otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct otx2_eth_rxq *rxq;
+ uint32_t buffsz;
+ uint16_t mtu;
+ int rc;
+
+ /* Get rx buffer size */
+ rxq = data->rx_queues[0];
+ mbp_priv = rte_mempool_get_priv(rxq->pool);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ /* Setup scatter mode if needed by jumbo */
+ if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz)
+ dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+
+ /* Setup MTU based on max_rx_pkt_len */
+ mtu = data->dev_conf.rxmode.max_rx_pkt_len - NIX_L2_OVERHEAD;
+
+ rc = otx2_nix_mtu_set(eth_dev, mtu);
+ if (rc)
+ otx2_err("Failed to set default MTU size %d", rc);
+
+ return rc;
+}
+
+static void
+nix_cgx_promisc_config(struct rte_eth_dev *eth_dev, int en)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return;
+
+ if (en)
+ otx2_mbox_alloc_msg_cgx_promisc_enable(mbox);
+ else
+ otx2_mbox_alloc_msg_cgx_promisc_disable(mbox);
+
+ otx2_mbox_process(mbox);
+}
+
+void
+otx2_nix_promisc_config(struct rte_eth_dev *eth_dev, int en)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_rx_mode *req;
+
+ if (otx2_dev_is_vf(dev))
+ return;
+
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox);
+
+ if (en)
+ req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC;
+
+ otx2_mbox_process(mbox);
+ eth_dev->data->promiscuous = en;
+ otx2_nix_vlan_update_promisc(eth_dev, en);
+}
+
+int
+otx2_nix_promisc_enable(struct rte_eth_dev *eth_dev)
+{
+ otx2_nix_promisc_config(eth_dev, 1);
+ nix_cgx_promisc_config(eth_dev, 1);
+
+ return 0;
+}
+
+int
+otx2_nix_promisc_disable(struct rte_eth_dev *eth_dev)
+{
+ otx2_nix_promisc_config(eth_dev, 0);
+ nix_cgx_promisc_config(eth_dev, 0);
+
+ return 0;
+}
+
+static void
+nix_allmulticast_config(struct rte_eth_dev *eth_dev, int en)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_rx_mode *req;
+
+ if (otx2_dev_is_vf(dev))
+ return;
+
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(mbox);
+
+ if (en)
+ req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_ALLMULTI;
+ else if (eth_dev->data->promiscuous)
+ req->mode = NIX_RX_MODE_UCAST | NIX_RX_MODE_PROMISC;
+
+ otx2_mbox_process(mbox);
+}
+
+int
+otx2_nix_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ nix_allmulticast_config(eth_dev, 1);
+
+ return 0;
+}
+
+int
+otx2_nix_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ nix_allmulticast_config(eth_dev, 0);
+
+ return 0;
+}
+
+void
+otx2_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct otx2_eth_rxq *rxq;
+
+ rxq = eth_dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->pool;
+ qinfo->scattered_rx = eth_dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->qconf.nb_desc;
+
+ qinfo->conf.rx_free_thresh = 0;
+ qinfo->conf.rx_drop_en = 0;
+ qinfo->conf.rx_deferred_start = 0;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+otx2_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct otx2_eth_txq *txq;
+
+ txq = eth_dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->qconf.nb_desc;
+
+ qinfo->conf.tx_thresh.pthresh = 0;
+ qinfo->conf.tx_thresh.hthresh = 0;
+ qinfo->conf.tx_thresh.wthresh = 0;
+
+ qinfo->conf.tx_free_thresh = 0;
+ qinfo->conf.tx_rs_thresh = 0;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = 0;
+}
+
+int
+otx2_rx_burst_mode_get(struct rte_eth_dev *eth_dev,
+ __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ const struct burst_info {
+ uint16_t flags;
+ const char *output;
+ } rx_offload_map[] = {
+ {NIX_RX_OFFLOAD_RSS_F, "RSS,"},
+ {NIX_RX_OFFLOAD_PTYPE_F, " Ptype,"},
+ {NIX_RX_OFFLOAD_CHECKSUM_F, " Checksum,"},
+ {NIX_RX_OFFLOAD_VLAN_STRIP_F, " VLAN Strip,"},
+ {NIX_RX_OFFLOAD_MARK_UPDATE_F, " Mark Update,"},
+ {NIX_RX_OFFLOAD_TSTAMP_F, " Timestamp,"},
+ {NIX_RX_MULTI_SEG_F, " Scattered,"}
+ };
+ static const char *const burst_mode[] = {"Vector Neon, Rx Offloads:",
+ "Scalar, Rx Offloads:"
+ };
+ uint32_t i;
+
+ /* Update burst mode info */
+ rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
+ str_size - bytes);
+ if (rc < 0)
+ goto done;
+
+ bytes += rc;
+
+ /* Update Rx offload info */
+ for (i = 0; i < RTE_DIM(rx_offload_map); i++) {
+ if (dev->rx_offload_flags & rx_offload_map[i].flags) {
+ rc = rte_strscpy(mode->info + bytes,
+ rx_offload_map[i].output,
+ str_size - bytes);
+ if (rc < 0)
+ goto done;
+
+ bytes += rc;
+ }
+ }
+
+done:
+ return 0;
+}
+
+int
+otx2_tx_burst_mode_get(struct rte_eth_dev *eth_dev,
+ __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ ssize_t bytes = 0, str_size = RTE_ETH_BURST_MODE_INFO_SIZE, rc;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ const struct burst_info {
+ uint16_t flags;
+ const char *output;
+ } tx_offload_map[] = {
+ {NIX_TX_OFFLOAD_L3_L4_CSUM_F, " Inner L3/L4 csum,"},
+ {NIX_TX_OFFLOAD_OL3_OL4_CSUM_F, " Outer L3/L4 csum,"},
+ {NIX_TX_OFFLOAD_VLAN_QINQ_F, " VLAN Insertion,"},
+ {NIX_TX_OFFLOAD_MBUF_NOFF_F, " MBUF free disable,"},
+ {NIX_TX_OFFLOAD_TSTAMP_F, " Timestamp,"},
+ {NIX_TX_OFFLOAD_TSO_F, " TSO,"},
+ {NIX_TX_MULTI_SEG_F, " Scattered,"}
+ };
+ static const char *const burst_mode[] = {"Vector Neon, Tx Offloads:",
+ "Scalar, Tx Offloads:"
+ };
+ uint32_t i;
+
+ /* Update burst mode info */
+ rc = rte_strscpy(mode->info + bytes, burst_mode[dev->scalar_ena],
+ str_size - bytes);
+ if (rc < 0)
+ goto done;
+
+ bytes += rc;
+
+ /* Update Tx offload info */
+ for (i = 0; i < RTE_DIM(tx_offload_map); i++) {
+ if (dev->tx_offload_flags & tx_offload_map[i].flags) {
+ rc = rte_strscpy(mode->info + bytes,
+ tx_offload_map[i].output,
+ str_size - bytes);
+ if (rc < 0)
+ goto done;
+
+ bytes += rc;
+ }
+ }
+
+done:
+ return 0;
+}
+
+static void
+nix_rx_head_tail_get(struct otx2_eth_dev *dev,
+ uint32_t *head, uint32_t *tail, uint16_t queue_idx)
+{
+ uint64_t reg, val;
+
+ if (head == NULL || tail == NULL)
+ return;
+
+ reg = (((uint64_t)queue_idx) << 32);
+ val = otx2_atomic64_add_nosync(reg, (int64_t *)
+ (dev->base + NIX_LF_CQ_OP_STATUS));
+ if (val & (OP_ERR | CQ_ERR))
+ val = 0;
+
+ *tail = (uint32_t)(val & 0xFFFFF);
+ *head = (uint32_t)((val >> 20) & 0xFFFFF);
+}
+
+uint32_t
+otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t queue_idx)
+{
+ struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[queue_idx];
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint32_t head, tail;
+
+ nix_rx_head_tail_get(dev, &head, &tail, queue_idx);
+ return (tail - head) % rxq->qlen;
+}
+
+static inline int
+nix_offset_has_packet(uint32_t head, uint32_t tail, uint16_t offset)
+{
+ /* Check given offset(queue index) has packet filled by HW */
+ if (tail > head && offset <= tail && offset >= head)
+ return 1;
+ /* Wrap around case */
+ if (head > tail && (offset >= head || offset <= tail))
+ return 1;
+
+ return 0;
+}
+
+int
+otx2_nix_rx_descriptor_done(void *rx_queue, uint16_t offset)
+{
+ struct otx2_eth_rxq *rxq = rx_queue;
+ uint32_t head, tail;
+
+ nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
+ &head, &tail, rxq->rq);
+
+ return nix_offset_has_packet(head, tail, offset);
+}
+
+int
+otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct otx2_eth_rxq *rxq = rx_queue;
+ uint32_t head, tail;
+
+ if (rxq->qlen <= offset)
+ return -EINVAL;
+
+ nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
+ &head, &tail, rxq->rq);
+
+ if (nix_offset_has_packet(head, tail, offset))
+ return RTE_ETH_RX_DESC_DONE;
+ else
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+static void
+nix_tx_head_tail_get(struct otx2_eth_dev *dev,
+ uint32_t *head, uint32_t *tail, uint16_t queue_idx)
+{
+ uint64_t reg, val;
+
+ if (head == NULL || tail == NULL)
+ return;
+
+ reg = (((uint64_t)queue_idx) << 32);
+ val = otx2_atomic64_add_nosync(reg, (int64_t *)
+ (dev->base + NIX_LF_SQ_OP_STATUS));
+ if (val & OP_ERR)
+ val = 0;
+
+ *tail = (uint32_t)((val >> 28) & 0x3F);
+ *head = (uint32_t)((val >> 20) & 0x3F);
+}
+
+int
+otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct otx2_eth_txq *txq = tx_queue;
+ uint32_t head, tail;
+
+ if (txq->qconf.nb_desc <= offset)
+ return -EINVAL;
+
+ nix_tx_head_tail_get(txq->dev, &head, &tail, txq->sq);
+
+ if (nix_offset_has_packet(head, tail, offset))
+ return RTE_ETH_TX_DESC_DONE;
+ else
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+/* It is a NOP for octeontx2 as HW frees the buffer on xmit */
+int
+otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ RTE_SET_USED(txq);
+ RTE_SET_USED(free_cnt);
+
+ return 0;
+}
+
+int
+otx2_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+ size_t fw_size)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc = (int)fw_size;
+
+ if (fw_size > sizeof(dev->mkex_pfl_name))
+ rc = sizeof(dev->mkex_pfl_name);
+
+ rc = strlcpy(fw_version, (char *)dev->mkex_pfl_name, rc);
+
+ rc += 1; /* Add the size of '\0' */
+ if (fw_size < (uint32_t)rc)
+ return rc;
+
+ return 0;
+}
+
+int
+otx2_nix_pool_ops_supported(struct rte_eth_dev *eth_dev, const char *pool)
+{
+ RTE_SET_USED(eth_dev);
+
+ if (!strcmp(pool, rte_mbuf_platform_mempool_ops()))
+ return 0;
+
+ return -ENOTSUP;
+}
+
+int
+otx2_nix_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op, void *arg)
+{
+ RTE_SET_USED(eth_dev);
+
+ if (filter_type != RTE_ETH_FILTER_GENERIC) {
+ otx2_err("Unsupported filter type %d", filter_type);
+ return -ENOTSUP;
+ }
+
+ if (filter_op == RTE_ETH_FILTER_GET) {
+ *(const void **)arg = &otx2_flow_ops;
+ return 0;
+ }
+
+ otx2_err("Invalid filter_op %d", filter_op);
+ return -EINVAL;
+}
+
+static struct cgx_fw_data *
+nix_get_fwdata(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct cgx_fw_data *rsp = NULL;
+ int rc;
+
+ otx2_mbox_alloc_msg_cgx_get_aux_link_info(mbox);
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to get fw data: %d", rc);
+ return NULL;
+ }
+
+ return rsp;
+}
+
+int
+otx2_nix_get_module_info(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_module_info *modinfo)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct cgx_fw_data *rsp;
+
+ rsp = nix_get_fwdata(dev);
+ if (rsp == NULL)
+ return -EIO;
+
+ modinfo->type = rsp->fwdata.sfp_eeprom.sff_id;
+ modinfo->eeprom_len = SFP_EEPROM_SIZE;
+
+ return 0;
+}
+
+int
+otx2_nix_get_module_eeprom(struct rte_eth_dev *eth_dev,
+ struct rte_dev_eeprom_info *info)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct cgx_fw_data *rsp;
+
+ if (!info->data || !info->length ||
+ (info->offset + info->length > SFP_EEPROM_SIZE))
+ return -EINVAL;
+
+ rsp = nix_get_fwdata(dev);
+ if (rsp == NULL)
+ return -EIO;
+
+ otx2_mbox_memcpy(info->data, rsp->fwdata.sfp_eeprom.buf + info->offset,
+ info->length);
+
+ return 0;
+}
+
+int
+otx2_nix_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *devinfo)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ devinfo->min_rx_bufsize = NIX_MIN_FRS;
+ devinfo->max_rx_pktlen = NIX_MAX_FRS;
+ devinfo->max_rx_queues = RTE_MAX_QUEUES_PER_PORT;
+ devinfo->max_tx_queues = RTE_MAX_QUEUES_PER_PORT;
+ devinfo->max_mac_addrs = dev->max_mac_entries;
+ devinfo->max_vfs = pci_dev->max_vfs;
+ devinfo->max_mtu = devinfo->max_rx_pktlen - NIX_L2_OVERHEAD;
+ devinfo->min_mtu = devinfo->min_rx_bufsize - NIX_L2_OVERHEAD;
+
+ devinfo->rx_offload_capa = dev->rx_offload_capa;
+ devinfo->tx_offload_capa = dev->tx_offload_capa;
+ devinfo->rx_queue_offload_capa = 0;
+ devinfo->tx_queue_offload_capa = 0;
+
+ devinfo->reta_size = dev->rss_info.rss_size;
+ devinfo->hash_key_size = NIX_HASH_KEY_SIZE;
+ devinfo->flow_type_rss_offloads = NIX_RSS_OFFLOAD;
+
+ devinfo->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_drop_en = 0,
+ .offloads = 0,
+ };
+
+ devinfo->default_txconf = (struct rte_eth_txconf) {
+ .offloads = 0,
+ };
+
+ devinfo->default_rxportconf = (struct rte_eth_dev_portconf) {
+ .ring_size = NIX_RX_DEFAULT_RING_SZ,
+ };
+
+ devinfo->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = UINT16_MAX,
+ .nb_min = NIX_RX_MIN_DESC,
+ .nb_align = NIX_RX_MIN_DESC_ALIGN,
+ .nb_seg_max = NIX_RX_NB_SEG_MAX,
+ .nb_mtu_seg_max = NIX_RX_NB_SEG_MAX,
+ };
+ devinfo->rx_desc_lim.nb_max =
+ RTE_ALIGN_MUL_FLOOR(devinfo->rx_desc_lim.nb_max,
+ NIX_RX_MIN_DESC_ALIGN);
+
+ devinfo->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = UINT16_MAX,
+ .nb_min = 1,
+ .nb_align = 1,
+ .nb_seg_max = NIX_TX_NB_SEG_MAX,
+ .nb_mtu_seg_max = NIX_TX_NB_SEG_MAX,
+ };
+
+ /* Auto negotiation disabled */
+ devinfo->speed_capa = ETH_LINK_SPEED_FIXED;
+ if (!otx2_dev_is_vf_or_sdp(dev) && !otx2_dev_is_lbk(dev)) {
+ devinfo->speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G;
+
+ /* 50G and 100G to be supported for board version C0
+ * and above.
+ */
+ if (!otx2_dev_is_Ax(dev))
+ devinfo->speed_capa |= ETH_LINK_SPEED_50G |
+ ETH_LINK_SPEED_100G;
+ }
+
+ devinfo->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.c
new file mode 100644
index 000000000..5f6140f70
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.c
@@ -0,0 +1,842 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_esp.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_ip.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_security.h>
+#include <rte_security_driver.h>
+#include <rte_udp.h>
+
+#include "otx2_common.h"
+#include "otx2_cryptodev_qp.h"
+#include "otx2_ethdev.h"
+#include "otx2_ethdev_sec.h"
+#include "otx2_ipsec_fp.h"
+#include "otx2_sec_idev.h"
+
+#define AH_HDR_LEN 12
+#define AES_GCM_IV_LEN 8
+#define AES_GCM_MAC_LEN 16
+#define AES_CBC_IV_LEN 16
+#define SHA1_HMAC_LEN 12
+
+#define AES_GCM_ROUNDUP_BYTE_LEN 4
+#define AES_CBC_ROUNDUP_BYTE_LEN 16
+
+struct eth_sec_tag_const {
+ RTE_STD_C11
+ union {
+ struct {
+ uint32_t rsvd_11_0 : 12;
+ uint32_t port : 8;
+ uint32_t event_type : 4;
+ uint32_t rsvd_31_24 : 8;
+ };
+ uint32_t u32;
+ };
+};
+
+static struct rte_cryptodev_capabilities otx2_eth_sec_crypto_caps[] = {
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 12,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 20,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability otx2_eth_sec_capabilities[] = {
+ { /* IPsec Inline Protocol ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = otx2_eth_sec_crypto_caps,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ { /* IPsec Inline Protocol ESP Tunnel Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = otx2_eth_sec_crypto_caps,
+ .ol_flags = RTE_SECURITY_TX_OLOAD_NEED_MDATA
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+
+static void
+lookup_mem_sa_tbl_clear(struct rte_eth_dev *eth_dev)
+{
+ static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
+ uint16_t port = eth_dev->data->port_id;
+ const struct rte_memzone *mz;
+ uint64_t **sa_tbl;
+ uint8_t *mem;
+
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL)
+ return;
+
+ mem = mz->addr;
+
+ sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
+ if (sa_tbl[port] == NULL)
+ return;
+
+ rte_free(sa_tbl[port]);
+ sa_tbl[port] = NULL;
+}
+
+static int
+lookup_mem_sa_index_update(struct rte_eth_dev *eth_dev, int spi, void *sa)
+{
+ static const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t port = eth_dev->data->port_id;
+ const struct rte_memzone *mz;
+ uint64_t **sa_tbl;
+ uint8_t *mem;
+
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ otx2_err("Could not find fastpath lookup table");
+ return -EINVAL;
+ }
+
+ mem = mz->addr;
+
+ sa_tbl = (uint64_t **)RTE_PTR_ADD(mem, OTX2_NIX_SA_TBL_START);
+
+ if (sa_tbl[port] == NULL) {
+ sa_tbl[port] = rte_malloc(NULL, dev->ipsec_in_max_spi *
+ sizeof(uint64_t), 0);
+ }
+
+ sa_tbl[port][spi] = (uint64_t)sa;
+
+ return 0;
+}
+
+static inline void
+in_sa_mz_name_get(char *name, int size, uint16_t port)
+{
+ snprintf(name, size, "otx2_ipsec_in_sadb_%u", port);
+}
+
+static struct otx2_ipsec_fp_in_sa *
+in_sa_get(uint16_t port, int sa_index)
+{
+ char name[RTE_MEMZONE_NAMESIZE];
+ struct otx2_ipsec_fp_in_sa *sa;
+ const struct rte_memzone *mz;
+
+ in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL) {
+ otx2_err("Could not get the memzone reserved for IN SA DB");
+ return NULL;
+ }
+
+ sa = mz->addr;
+
+ return sa + sa_index;
+}
+
+static int
+ipsec_sa_const_set(struct rte_security_ipsec_xform *ipsec,
+ struct rte_crypto_sym_xform *xform,
+ struct otx2_sec_session_ipsec_ip *sess)
+{
+ struct rte_crypto_sym_xform *cipher_xform, *auth_xform;
+
+ sess->partial_len = sizeof(struct rte_ipv4_hdr);
+
+ if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_ESP) {
+ sess->partial_len += sizeof(struct rte_esp_hdr);
+ sess->roundup_len = sizeof(struct rte_esp_tail);
+ } else if (ipsec->proto == RTE_SECURITY_IPSEC_SA_PROTO_AH) {
+ sess->partial_len += AH_HDR_LEN;
+ } else {
+ return -EINVAL;
+ }
+
+ if (ipsec->options.udp_encap)
+ sess->partial_len += sizeof(struct rte_udp_hdr);
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ sess->partial_len += AES_GCM_IV_LEN;
+ sess->partial_len += AES_GCM_MAC_LEN;
+ sess->roundup_byte = AES_GCM_ROUNDUP_BYTE_LEN;
+ }
+ return 0;
+ }
+
+ if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ } else if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ } else {
+ return -EINVAL;
+ }
+ if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_CBC) {
+ sess->partial_len += AES_CBC_IV_LEN;
+ sess->roundup_byte = AES_CBC_ROUNDUP_BYTE_LEN;
+ } else {
+ return -EINVAL;
+ }
+
+ if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
+ sess->partial_len += SHA1_HMAC_LEN;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+hmac_init(struct otx2_ipsec_fp_sa_ctl *ctl, struct otx2_cpt_qp *qp,
+ const uint8_t *auth_key, int len, uint8_t *hmac_key)
+{
+ struct inst_data {
+ struct otx2_cpt_res cpt_res;
+ uint8_t buffer[64];
+ } *md;
+
+ volatile struct otx2_cpt_res *res;
+ uint64_t timeout, lmt_status;
+ struct otx2_cpt_inst_s inst;
+ rte_iova_t md_iova;
+ int ret;
+
+ memset(&inst, 0, sizeof(struct otx2_cpt_inst_s));
+
+ md = rte_zmalloc(NULL, sizeof(struct inst_data), OTX2_CPT_RES_ALIGN);
+ if (md == NULL)
+ return -ENOMEM;
+
+ memcpy(md->buffer, auth_key, len);
+
+ md_iova = rte_malloc_virt2iova(md);
+ if (md_iova == RTE_BAD_IOVA) {
+ ret = -EINVAL;
+ goto free_md;
+ }
+
+ inst.res_addr = md_iova + offsetof(struct inst_data, cpt_res);
+ inst.opcode = OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD;
+ inst.param2 = ctl->auth_type;
+ inst.dlen = len;
+ inst.dptr = md_iova + offsetof(struct inst_data, buffer);
+ inst.rptr = inst.dptr;
+ inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
+
+ md->cpt_res.compcode = 0;
+ md->cpt_res.uc_compcode = 0xff;
+
+ timeout = rte_get_timer_cycles() + 5 * rte_get_timer_hz();
+
+ rte_cio_wmb();
+
+ do {
+ otx2_lmt_mov(qp->lmtline, &inst, 2);
+ lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
+ } while (lmt_status == 0);
+
+ res = (volatile struct otx2_cpt_res *)&md->cpt_res;
+
+ /* Wait until instruction completes or times out */
+ while (res->uc_compcode == 0xff) {
+ if (rte_get_timer_cycles() > timeout)
+ break;
+ }
+
+ if (res->u16[0] != OTX2_SEC_COMP_GOOD) {
+ ret = -EIO;
+ goto free_md;
+ }
+
+ /* Retrieve the ipad and opad from rptr */
+ memcpy(hmac_key, md->buffer, 48);
+
+ ret = 0;
+
+free_md:
+ rte_free(md);
+ return ret;
+}
+
+static int
+eth_sec_ipsec_out_sess_create(struct rte_eth_dev *eth_dev,
+ struct rte_security_ipsec_xform *ipsec,
+ struct rte_crypto_sym_xform *crypto_xform,
+ struct rte_security_session *sec_sess)
+{
+ struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
+ struct otx2_sec_session_ipsec_ip *sess;
+ uint16_t port = eth_dev->data->port_id;
+ int cipher_key_len, auth_key_len, ret;
+ const uint8_t *cipher_key, *auth_key;
+ struct otx2_ipsec_fp_sa_ctl *ctl;
+ struct otx2_ipsec_fp_out_sa *sa;
+ struct otx2_sec_session *priv;
+ struct otx2_cpt_inst_s inst;
+ struct otx2_cpt_qp *qp;
+
+ priv = get_sec_session_private_data(sec_sess);
+ sess = &priv->ipsec.ip;
+
+ sa = &sess->out_sa;
+ ctl = &sa->ctl;
+ if (ctl->valid) {
+ otx2_err("SA already registered");
+ return -EINVAL;
+ }
+
+ memset(sess, 0, sizeof(struct otx2_sec_session_ipsec_ip));
+
+ sess->seq = 1;
+
+ ret = ipsec_sa_const_set(ipsec, crypto_xform, sess);
+ if (ret < 0)
+ return ret;
+
+ if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ memcpy(sa->nonce, &ipsec->salt, 4);
+
+ if (ipsec->options.udp_encap == 1) {
+ sa->udp_src = 4500;
+ sa->udp_dst = 4500;
+ }
+
+ if (ipsec->mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+ /* Start ip id from 1 */
+ sess->ip_id = 1;
+
+ if (ipsec->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
+ memcpy(&sa->ip_src, &ipsec->tunnel.ipv4.src_ip,
+ sizeof(struct in_addr));
+ memcpy(&sa->ip_dst, &ipsec->tunnel.ipv4.dst_ip,
+ sizeof(struct in_addr));
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ cipher_xform = crypto_xform;
+ auth_xform = crypto_xform->next;
+
+ cipher_key_len = 0;
+ auth_key_len = 0;
+ auth_key = NULL;
+
+ if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ cipher_key = crypto_xform->aead.key.data;
+ cipher_key_len = crypto_xform->aead.key.length;
+ } else {
+ cipher_key = cipher_xform->cipher.key.data;
+ cipher_key_len = cipher_xform->cipher.key.length;
+ auth_key = auth_xform->auth.key.data;
+ auth_key_len = auth_xform->auth.key.length;
+ }
+
+ if (cipher_key_len != 0)
+ memcpy(sa->cipher_key, cipher_key, cipher_key_len);
+ else
+ return -EINVAL;
+
+ /* Determine word 7 of CPT instruction */
+ inst.u64[7] = 0;
+ inst.egrp = OTX2_CPT_EGRP_INLINE_IPSEC;
+ inst.cptr = rte_mempool_virt2iova(sa);
+ sess->inst_w7 = inst.u64[7];
+
+ /* Get CPT QP to be used for this SA */
+ ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
+ if (ret)
+ return ret;
+
+ sess->qp = qp;
+
+ sess->cpt_lmtline = qp->lmtline;
+ sess->cpt_nq_reg = qp->lf_nq_reg;
+
+ /* Populate control word */
+ ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
+ if (ret)
+ goto cpt_put;
+
+ if (auth_key_len && auth_key) {
+ ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
+ if (ret)
+ goto cpt_put;
+ }
+
+ return 0;
+cpt_put:
+ otx2_sec_idev_tx_cpt_qp_put(sess->qp);
+ return ret;
+}
+
+static int
+eth_sec_ipsec_in_sess_create(struct rte_eth_dev *eth_dev,
+ struct rte_security_ipsec_xform *ipsec,
+ struct rte_crypto_sym_xform *crypto_xform,
+ struct rte_security_session *sec_sess)
+{
+ struct rte_crypto_sym_xform *auth_xform, *cipher_xform;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_sec_session_ipsec_ip *sess;
+ uint16_t port = eth_dev->data->port_id;
+ int cipher_key_len, auth_key_len, ret;
+ const uint8_t *cipher_key, *auth_key;
+ struct otx2_ipsec_fp_sa_ctl *ctl;
+ struct otx2_ipsec_fp_in_sa *sa;
+ struct otx2_sec_session *priv;
+ struct otx2_cpt_qp *qp;
+
+ if (ipsec->spi >= dev->ipsec_in_max_spi) {
+ otx2_err("SPI exceeds max supported");
+ return -EINVAL;
+ }
+
+ sa = in_sa_get(port, ipsec->spi);
+ ctl = &sa->ctl;
+
+ priv = get_sec_session_private_data(sec_sess);
+ sess = &priv->ipsec.ip;
+
+ if (ctl->valid) {
+ otx2_err("SA already registered");
+ return -EINVAL;
+ }
+
+ memset(sa, 0, sizeof(struct otx2_ipsec_fp_in_sa));
+
+ auth_xform = crypto_xform;
+ cipher_xform = crypto_xform->next;
+
+ cipher_key_len = 0;
+ auth_key_len = 0;
+ auth_key = NULL;
+
+ if (crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (crypto_xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ memcpy(sa->nonce, &ipsec->salt, 4);
+ cipher_key = crypto_xform->aead.key.data;
+ cipher_key_len = crypto_xform->aead.key.length;
+ } else {
+ cipher_key = cipher_xform->cipher.key.data;
+ cipher_key_len = cipher_xform->cipher.key.length;
+ auth_key = auth_xform->auth.key.data;
+ auth_key_len = auth_xform->auth.key.length;
+ }
+
+ if (cipher_key_len != 0)
+ memcpy(sa->cipher_key, cipher_key, cipher_key_len);
+ else
+ return -EINVAL;
+
+ sess->in_sa = sa;
+
+ sa->userdata = priv->userdata;
+
+ if (lookup_mem_sa_index_update(eth_dev, ipsec->spi, sa))
+ return -EINVAL;
+
+ ret = ipsec_fp_sa_ctl_set(ipsec, crypto_xform, ctl);
+ if (ret)
+ return ret;
+
+ if (auth_key_len && auth_key) {
+ /* Get a queue pair for HMAC init */
+ ret = otx2_sec_idev_tx_cpt_qp_get(port, &qp);
+ if (ret)
+ return ret;
+ ret = hmac_init(ctl, qp, auth_key, auth_key_len, sa->hmac_key);
+ otx2_sec_idev_tx_cpt_qp_put(qp);
+ }
+ return ret;
+}
+
+static int
+eth_sec_ipsec_sess_create(struct rte_eth_dev *eth_dev,
+ struct rte_security_ipsec_xform *ipsec,
+ struct rte_crypto_sym_xform *crypto_xform,
+ struct rte_security_session *sess)
+{
+ int ret;
+
+ ret = ipsec_fp_xform_verify(ipsec, crypto_xform);
+ if (ret)
+ return ret;
+
+ if (ipsec->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS)
+ return eth_sec_ipsec_in_sess_create(eth_dev, ipsec,
+ crypto_xform, sess);
+ else
+ return eth_sec_ipsec_out_sess_create(eth_dev, ipsec,
+ crypto_xform, sess);
+}
+
+static int
+otx2_eth_sec_session_create(void *device,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mempool)
+{
+ struct otx2_sec_session *priv;
+ int ret;
+
+ if (conf->action_type != RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL)
+ return -ENOTSUP;
+
+ if (rte_mempool_get(mempool, (void **)&priv)) {
+ otx2_err("Could not allocate security session private data");
+ return -ENOMEM;
+ }
+
+ set_sec_session_private_data(sess, priv);
+
+ /*
+ * Save userdata provided by the application. For ingress packets, this
+ * could be used to identify the SA.
+ */
+ priv->userdata = conf->userdata;
+
+ if (conf->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
+ ret = eth_sec_ipsec_sess_create(device, &conf->ipsec,
+ conf->crypto_xform,
+ sess);
+ else
+ ret = -ENOTSUP;
+
+ if (ret)
+ goto mempool_put;
+
+ return 0;
+
+mempool_put:
+ rte_mempool_put(mempool, priv);
+ set_sec_session_private_data(sess, NULL);
+ return ret;
+}
+
+static int
+otx2_eth_sec_session_destroy(void *device __rte_unused,
+ struct rte_security_session *sess)
+{
+ struct otx2_sec_session_ipsec_ip *sess_ip;
+ struct otx2_sec_session *priv;
+ struct rte_mempool *sess_mp;
+ int ret;
+
+ priv = get_sec_session_private_data(sess);
+ if (priv == NULL)
+ return -EINVAL;
+
+ sess_ip = &priv->ipsec.ip;
+
+ /* Release CPT LF used for this session */
+ if (sess_ip->qp != NULL) {
+ ret = otx2_sec_idev_tx_cpt_qp_put(sess_ip->qp);
+ if (ret)
+ return ret;
+ }
+
+ sess_mp = rte_mempool_from_obj(priv);
+
+ set_sec_session_private_data(sess, NULL);
+ rte_mempool_put(sess_mp, priv);
+
+ return 0;
+}
+
+static unsigned int
+otx2_eth_sec_session_get_size(void *device __rte_unused)
+{
+ return sizeof(struct otx2_sec_session);
+}
+
+static int
+otx2_eth_sec_set_pkt_mdata(void *device __rte_unused,
+ struct rte_security_session *session,
+ struct rte_mbuf *m, void *params __rte_unused)
+{
+ /* Set security session as the pkt metadata */
+ m->udata64 = (uint64_t)session;
+
+ return 0;
+}
+
+static int
+otx2_eth_sec_get_userdata(void *device __rte_unused, uint64_t md,
+ void **userdata)
+{
+ /* Retrieve userdata */
+ *userdata = (void *)md;
+
+ return 0;
+}
+
+static const struct rte_security_capability *
+otx2_eth_sec_capabilities_get(void *device __rte_unused)
+{
+ return otx2_eth_sec_capabilities;
+}
+
+static struct rte_security_ops otx2_eth_sec_ops = {
+ .session_create = otx2_eth_sec_session_create,
+ .session_destroy = otx2_eth_sec_session_destroy,
+ .session_get_size = otx2_eth_sec_session_get_size,
+ .set_pkt_metadata = otx2_eth_sec_set_pkt_mdata,
+ .get_userdata = otx2_eth_sec_get_userdata,
+ .capabilities_get = otx2_eth_sec_capabilities_get
+};
+
+int
+otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev)
+{
+ struct rte_security_ctx *ctx;
+ int ret;
+
+ ctx = rte_malloc("otx2_eth_sec_ctx",
+ sizeof(struct rte_security_ctx), 0);
+ if (ctx == NULL)
+ return -ENOMEM;
+
+ ret = otx2_sec_idev_cfg_init(eth_dev->data->port_id);
+ if (ret) {
+ rte_free(ctx);
+ return ret;
+ }
+
+ /* Populate ctx */
+
+ ctx->device = eth_dev;
+ ctx->ops = &otx2_eth_sec_ops;
+ ctx->sess_cnt = 0;
+
+ eth_dev->security_ctx = ctx;
+
+ return 0;
+}
+
+void
+otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev)
+{
+ rte_free(eth_dev->security_ctx);
+}
+
+static int
+eth_sec_ipsec_cfg(struct rte_eth_dev *eth_dev, uint8_t tt)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t port = eth_dev->data->port_id;
+ struct nix_inline_ipsec_lf_cfg *req;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct eth_sec_tag_const tag_const;
+ char name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+
+ in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
+ mz = rte_memzone_lookup(name);
+ if (mz == NULL)
+ return -EINVAL;
+
+ req = otx2_mbox_alloc_msg_nix_inline_ipsec_lf_cfg(mbox);
+ req->enable = 1;
+ req->sa_base_addr = mz->iova;
+
+ req->ipsec_cfg0.tt = tt;
+
+ tag_const.u32 = 0;
+ tag_const.event_type = RTE_EVENT_TYPE_ETHDEV;
+ tag_const.port = port;
+ req->ipsec_cfg0.tag_const = tag_const.u32;
+
+ req->ipsec_cfg0.sa_pow2_size =
+ rte_log2_u32(sizeof(struct otx2_ipsec_fp_in_sa));
+ req->ipsec_cfg0.lenm1_max = NIX_MAX_FRS - 1;
+
+ req->ipsec_cfg1.sa_idx_w = rte_log2_u32(dev->ipsec_in_max_spi);
+ req->ipsec_cfg1.sa_idx_max = dev->ipsec_in_max_spi - 1;
+
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_rsp *rsp;
+ struct nix_aq_enq_req *aq;
+ int ret;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = 0; /* Read RQ:0 context */
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+
+ ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (ret < 0) {
+ otx2_err("Could not read RQ context");
+ return ret;
+ }
+
+ /* Update tag type */
+ ret = eth_sec_ipsec_cfg(eth_dev, rsp->rq.sso_tt);
+ if (ret < 0)
+ otx2_err("Could not update sec eth tag type");
+
+ return ret;
+}
+
+int
+otx2_eth_sec_init(struct rte_eth_dev *eth_dev)
+{
+ const size_t sa_width = sizeof(struct otx2_ipsec_fp_in_sa);
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t port = eth_dev->data->port_id;
+ char name[RTE_MEMZONE_NAMESIZE];
+ const struct rte_memzone *mz;
+ int mz_sz, ret;
+ uint16_t nb_sa;
+
+ RTE_BUILD_BUG_ON(sa_width < 32 || sa_width > 512 ||
+ !RTE_IS_POWER_OF_2(sa_width));
+
+ if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
+ !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ return 0;
+
+ nb_sa = dev->ipsec_in_max_spi;
+ mz_sz = nb_sa * sa_width;
+ in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
+ mz = rte_memzone_reserve_aligned(name, mz_sz, rte_socket_id(),
+ RTE_MEMZONE_IOVA_CONTIG, OTX2_ALIGN);
+
+ if (mz == NULL) {
+ otx2_err("Could not allocate inbound SA DB");
+ return -ENOMEM;
+ }
+
+ memset(mz->addr, 0, mz_sz);
+
+ ret = eth_sec_ipsec_cfg(eth_dev, SSO_TT_ORDERED);
+ if (ret < 0) {
+ otx2_err("Could not configure inline IPsec");
+ goto sec_fini;
+ }
+
+ return 0;
+
+sec_fini:
+ otx2_err("Could not configure device for security");
+ otx2_eth_sec_fini(eth_dev);
+ return ret;
+}
+
+void
+otx2_eth_sec_fini(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t port = eth_dev->data->port_id;
+ char name[RTE_MEMZONE_NAMESIZE];
+
+ if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY) &&
+ !(dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY))
+ return;
+
+ lookup_mem_sa_tbl_clear(eth_dev);
+
+ in_sa_mz_name_get(name, RTE_MEMZONE_NAMESIZE, port);
+ rte_memzone_free(rte_memzone_lookup(name));
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.h
new file mode 100644
index 000000000..e24358a05
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_ETHDEV_SEC_H__
+#define __OTX2_ETHDEV_SEC_H__
+
+#include <rte_ethdev.h>
+
+#include "otx2_ipsec_fp.h"
+
+#define OTX2_CPT_RES_ALIGN 16
+#define OTX2_NIX_SEND_DESC_ALIGN 16
+#define OTX2_CPT_INST_SIZE 64
+
+#define OTX2_CPT_EGRP_INLINE_IPSEC 1
+
+#define OTX2_CPT_OP_INLINE_IPSEC_OUTB (0x40 | 0x25)
+#define OTX2_CPT_OP_INLINE_IPSEC_INB (0x40 | 0x26)
+#define OTX2_CPT_OP_WRITE_HMAC_IPAD_OPAD (0x40 | 0x27)
+
+#define OTX2_SEC_CPT_COMP_GOOD 0x1
+#define OTX2_SEC_UC_COMP_GOOD 0x0
+#define OTX2_SEC_COMP_GOOD (OTX2_SEC_UC_COMP_GOOD << 8 | \
+ OTX2_SEC_CPT_COMP_GOOD)
+
+/* CPT Result */
+struct otx2_cpt_res {
+ union {
+ struct {
+ uint64_t compcode:8;
+ uint64_t uc_compcode:8;
+ uint64_t doneint:1;
+ uint64_t reserved_17_63:47;
+ uint64_t reserved_64_127;
+ };
+ uint16_t u16[8];
+ };
+};
+
+struct otx2_cpt_inst_s {
+ union {
+ struct {
+ /* W0 */
+ uint64_t nixtxl : 3;
+ uint64_t doneint : 1;
+ uint64_t nixtx_addr : 60;
+ /* W1 */
+ uint64_t res_addr : 64;
+ /* W2 */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t grp : 10;
+ uint64_t rsvd_175_172 : 4;
+ uint64_t rvu_pf_func : 16;
+ /* W3 */
+ uint64_t qord : 1;
+ uint64_t rsvd_194_193 : 2;
+ uint64_t wqe_ptr : 61;
+ /* W4 */
+ uint64_t dlen : 16;
+ uint64_t param2 : 16;
+ uint64_t param1 : 16;
+ uint64_t opcode : 16;
+ /* W5 */
+ uint64_t dptr : 64;
+ /* W6 */
+ uint64_t rptr : 64;
+ /* W7 */
+ uint64_t cptr : 61;
+ uint64_t egrp : 3;
+ };
+ uint64_t u64[8];
+ };
+};
+
+/*
+ * Security session for inline IPsec protocol offload. This is private data of
+ * inline capable PMD.
+ */
+struct otx2_sec_session_ipsec_ip {
+ RTE_STD_C11
+ union {
+ /*
+ * Inbound SA would accessed by crypto block. And so the memory
+ * is allocated differently and shared with the h/w. Only
+ * holding a pointer to this memory in the session private
+ * space.
+ */
+ void *in_sa;
+ /* Outbound SA */
+ struct otx2_ipsec_fp_out_sa out_sa;
+ };
+
+ /* Address of CPT LMTLINE */
+ void *cpt_lmtline;
+ /* CPT LF enqueue register address */
+ rte_iova_t cpt_nq_reg;
+
+ /* Pre calculated lengths and data for a session */
+ uint8_t partial_len;
+ uint8_t roundup_len;
+ uint8_t roundup_byte;
+ uint16_t ip_id;
+ union {
+ uint64_t esn;
+ struct {
+ uint32_t seq;
+ uint32_t esn_hi;
+ };
+ };
+
+ uint64_t inst_w7;
+
+ /* CPT QP used by SA */
+ struct otx2_cpt_qp *qp;
+};
+
+struct otx2_sec_session_ipsec {
+ struct otx2_sec_session_ipsec_ip ip;
+};
+
+struct otx2_sec_session {
+ struct otx2_sec_session_ipsec ipsec;
+ void *userdata;
+ /**< Userdata registered by the application */
+} __rte_cache_aligned;
+
+int otx2_eth_sec_ctx_create(struct rte_eth_dev *eth_dev);
+
+void otx2_eth_sec_ctx_destroy(struct rte_eth_dev *eth_dev);
+
+int otx2_eth_sec_update_tag_type(struct rte_eth_dev *eth_dev);
+
+int otx2_eth_sec_init(struct rte_eth_dev *eth_dev);
+
+void otx2_eth_sec_fini(struct rte_eth_dev *eth_dev);
+
+#endif /* __OTX2_ETHDEV_SEC_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec_tx.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec_tx.h
new file mode 100644
index 000000000..2e35a8c77
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ethdev_sec_tx.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2020 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_ETHDEV_SEC_TX_H__
+#define __OTX2_ETHDEV_SEC_TX_H__
+
+#include <rte_security.h>
+#include <rte_mbuf.h>
+
+#include "otx2_ethdev_sec.h"
+
+struct otx2_ipsec_fp_out_hdr {
+ uint32_t ip_id;
+ uint32_t seq;
+ uint8_t iv[16];
+};
+
+static __rte_always_inline int32_t
+otx2_ipsec_fp_out_rlen_get(struct otx2_sec_session_ipsec_ip *sess,
+ uint32_t plen)
+{
+ uint32_t enc_payload_len;
+
+ enc_payload_len = RTE_ALIGN_CEIL(plen + sess->roundup_len,
+ sess->roundup_byte);
+
+ return sess->partial_len + enc_payload_len;
+}
+
+static __rte_always_inline void
+otx2_ssogws_head_wait(struct otx2_ssogws *ws);
+
+static __rte_always_inline int
+otx2_sec_event_tx(struct otx2_ssogws *ws, struct rte_event *ev,
+ struct rte_mbuf *m, const struct otx2_eth_txq *txq,
+ const uint32_t offload_flags)
+{
+ uint32_t dlen, rlen, desc_headroom, extend_head, extend_tail;
+ struct otx2_sec_session_ipsec_ip *sess;
+ struct otx2_ipsec_fp_out_hdr *hdr;
+ struct otx2_ipsec_fp_out_sa *sa;
+ uint64_t data_addr, desc_addr;
+ struct otx2_sec_session *priv;
+ struct otx2_cpt_inst_s inst;
+ uint64_t lmt_status;
+ char *data;
+
+ struct desc {
+ struct otx2_cpt_res cpt_res __rte_aligned(OTX2_CPT_RES_ALIGN);
+ struct nix_send_hdr_s nix_hdr
+ __rte_aligned(OTX2_NIX_SEND_DESC_ALIGN);
+ union nix_send_sg_s nix_sg;
+ struct nix_iova_s nix_iova;
+ } *sd;
+
+ priv = get_sec_session_private_data((void *)(m->udata64));
+ sess = &priv->ipsec.ip;
+ sa = &sess->out_sa;
+
+ RTE_ASSERT(sess->cpt_lmtline != NULL);
+ RTE_ASSERT(!(offload_flags & (NIX_TX_OFFLOAD_MBUF_NOFF_F |
+ NIX_TX_OFFLOAD_VLAN_QINQ_F)));
+
+ dlen = rte_pktmbuf_pkt_len(m) + sizeof(*hdr) - RTE_ETHER_HDR_LEN;
+ rlen = otx2_ipsec_fp_out_rlen_get(sess, dlen - sizeof(*hdr));
+
+ RTE_BUILD_BUG_ON(OTX2_CPT_RES_ALIGN % OTX2_NIX_SEND_DESC_ALIGN);
+ RTE_BUILD_BUG_ON(sizeof(sd->cpt_res) % OTX2_NIX_SEND_DESC_ALIGN);
+
+ extend_head = sizeof(*hdr);
+ extend_tail = rlen - dlen;
+
+ desc_headroom = (OTX2_CPT_RES_ALIGN - 1) + sizeof(*sd);
+
+ if (unlikely(!rte_pktmbuf_is_contiguous(m)) ||
+ unlikely(rte_pktmbuf_headroom(m) < extend_head + desc_headroom) ||
+ unlikely(rte_pktmbuf_tailroom(m) < extend_tail)) {
+ goto drop;
+ }
+
+ /*
+ * Extend mbuf data to point to the expected packet buffer for NIX.
+ * This includes the Ethernet header followed by the encrypted IPsec
+ * payload
+ */
+ rte_pktmbuf_append(m, extend_tail);
+ data = rte_pktmbuf_prepend(m, extend_head);
+ data_addr = rte_pktmbuf_mtophys(m);
+
+ /*
+ * Move the Ethernet header, to insert otx2_ipsec_fp_out_hdr prior
+ * to the IP header
+ */
+ memcpy(data, data + sizeof(*hdr), RTE_ETHER_HDR_LEN);
+
+ hdr = (struct otx2_ipsec_fp_out_hdr *)(data + RTE_ETHER_HDR_LEN);
+
+ if (sa->ctl.enc_type == OTX2_IPSEC_FP_SA_ENC_AES_GCM) {
+ /* AES-128-GCM */
+ memcpy(hdr->iv, &sa->nonce, 4);
+ memset(hdr->iv + 4, 0, 12); //TODO: make it random
+ } else {
+ /* AES-128-[CBC] + [SHA1] */
+ memset(hdr->iv, 0, 16); //TODO: make it random
+ }
+
+ /* Keep CPT result and NIX send descriptors in headroom */
+ sd = (void *)RTE_PTR_ALIGN(data - desc_headroom, OTX2_CPT_RES_ALIGN);
+ desc_addr = data_addr - RTE_PTR_DIFF(data, sd);
+
+ /* Prepare CPT instruction */
+
+ inst.nixtx_addr = (desc_addr + offsetof(struct desc, nix_hdr)) >> 4;
+ inst.doneint = 0;
+ inst.nixtxl = 1;
+ inst.res_addr = desc_addr + offsetof(struct desc, cpt_res);
+ inst.u64[2] = 0;
+ inst.u64[3] = 0;
+ inst.wqe_ptr = desc_addr >> 3; /* FIXME: Handle errors */
+ inst.qord = 1;
+ inst.opcode = OTX2_CPT_OP_INLINE_IPSEC_OUTB;
+ inst.dlen = dlen;
+ inst.dptr = data_addr + RTE_ETHER_HDR_LEN;
+ inst.u64[7] = sess->inst_w7;
+
+ /* First word contains 8 bit completion code & 8 bit uc comp code */
+ sd->cpt_res.u16[0] = 0;
+
+ /* Prepare NIX send descriptors for output expected from CPT */
+
+ sd->nix_hdr.w0.u = 0;
+ sd->nix_hdr.w1.u = 0;
+ sd->nix_hdr.w0.sq = txq->sq;
+ sd->nix_hdr.w0.sizem1 = 1;
+ sd->nix_hdr.w0.total = rte_pktmbuf_data_len(m);
+ sd->nix_hdr.w0.aura = npa_lf_aura_handle_to_aura(m->pool->pool_id);
+
+ sd->nix_sg.u = 0;
+ sd->nix_sg.subdc = NIX_SUBDC_SG;
+ sd->nix_sg.ld_type = NIX_SENDLDTYPE_LDD;
+ sd->nix_sg.segs = 1;
+ sd->nix_sg.seg1_size = rte_pktmbuf_data_len(m);
+
+ sd->nix_iova.addr = rte_mbuf_data_iova(m);
+
+ /* Mark mempool object as "put" since it is freed by NIX */
+ __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+
+ if (!ev->sched_type)
+ otx2_ssogws_head_wait(ws);
+
+ inst.param1 = sess->esn_hi >> 16;
+ inst.param2 = sess->esn_hi & 0xffff;
+
+ hdr->seq = rte_cpu_to_be_32(sess->seq);
+ hdr->ip_id = rte_cpu_to_be_32(sess->ip_id);
+
+ sess->ip_id++;
+ sess->esn++;
+
+ rte_cio_wmb();
+
+ do {
+ otx2_lmt_mov(sess->cpt_lmtline, &inst, 2);
+ lmt_status = otx2_lmt_submit(sess->cpt_nq_reg);
+ } while (lmt_status == 0);
+
+ return 1;
+
+drop:
+ if (offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+ /* Don't free if reference count > 1 */
+ if (rte_pktmbuf_prefree_seg(m) == NULL)
+ return 0;
+ }
+ rte_pktmbuf_free(m);
+ return 0;
+}
+
+#endif /* __OTX2_ETHDEV_SEC_TX_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.c
new file mode 100644
index 000000000..13a76e441
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.c
@@ -0,0 +1,1007 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_ethdev.h"
+#include "otx2_ethdev_sec.h"
+#include "otx2_flow.h"
+
+int
+otx2_flow_free_all_resources(struct otx2_eth_dev *hw)
+{
+ struct otx2_npc_flow_info *npc = &hw->npc_flow;
+ struct otx2_mbox *mbox = hw->mbox;
+ struct otx2_mcam_ents_info *info;
+ struct rte_bitmap *bmap;
+ struct rte_flow *flow;
+ int entry_count = 0;
+ int rc, idx;
+
+ for (idx = 0; idx < npc->flow_max_priority; idx++) {
+ info = &npc->flow_entry_info[idx];
+ entry_count += info->live_ent;
+ }
+
+ if (entry_count == 0)
+ return 0;
+
+ /* Free all MCAM entries allocated */
+ rc = otx2_flow_mcam_free_all_entries(mbox);
+
+ /* Free any MCAM counters and delete flow list */
+ for (idx = 0; idx < npc->flow_max_priority; idx++) {
+ while ((flow = TAILQ_FIRST(&npc->flow_list[idx])) != NULL) {
+ if (flow->ctr_id != NPC_COUNTER_NONE)
+ rc |= otx2_flow_mcam_free_counter(mbox,
+ flow->ctr_id);
+
+ TAILQ_REMOVE(&npc->flow_list[idx], flow, next);
+ rte_free(flow);
+ bmap = npc->live_entries[flow->priority];
+ rte_bitmap_clear(bmap, flow->mcam_id);
+ }
+ info = &npc->flow_entry_info[idx];
+ info->free_ent = 0;
+ info->live_ent = 0;
+ }
+ return rc;
+}
+
+
+static int
+flow_program_npc(struct otx2_parse_state *pst, struct otx2_mbox *mbox,
+ struct otx2_npc_flow_info *flow_info)
+{
+ /* This is non-LDATA part in search key */
+ uint64_t key_data[2] = {0ULL, 0ULL};
+ uint64_t key_mask[2] = {0ULL, 0ULL};
+ int intf = pst->flow->nix_intf;
+ int key_len, bit = 0, index;
+ int off, idx, data_off = 0;
+ uint8_t lid, mask, data;
+ uint16_t layer_info;
+ uint64_t lt, flags;
+
+
+ /* Skip till Layer A data start */
+ while (bit < NPC_PARSE_KEX_S_LA_OFFSET) {
+ if (flow_info->keyx_supp_nmask[intf] & (1 << bit))
+ data_off++;
+ bit++;
+ }
+
+ /* Each bit represents 1 nibble */
+ data_off *= 4;
+
+ index = 0;
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ /* Offset in key */
+ off = NPC_PARSE_KEX_S_LID_OFFSET(lid);
+ lt = pst->lt[lid] & 0xf;
+ flags = pst->flags[lid] & 0xff;
+
+ /* NPC_LAYER_KEX_S */
+ layer_info = ((flow_info->keyx_supp_nmask[intf] >> off) & 0x7);
+
+ if (layer_info) {
+ for (idx = 0; idx <= 2 ; idx++) {
+ if (layer_info & (1 << idx)) {
+ if (idx == 2)
+ data = lt;
+ else if (idx == 1)
+ data = ((flags >> 4) & 0xf);
+ else
+ data = (flags & 0xf);
+
+ if (data_off >= 64) {
+ data_off = 0;
+ index++;
+ }
+ key_data[index] |= ((uint64_t)data <<
+ data_off);
+ mask = 0xf;
+ if (lt == 0)
+ mask = 0;
+ key_mask[index] |= ((uint64_t)mask <<
+ data_off);
+ data_off += 4;
+ }
+ }
+ }
+ }
+
+ otx2_npc_dbg("Npc prog key data0: 0x%" PRIx64 ", data1: 0x%" PRIx64,
+ key_data[0], key_data[1]);
+
+ /* Copy this into mcam string */
+ key_len = (pst->npc->keyx_len[intf] + 7) / 8;
+ otx2_npc_dbg("Key_len = %d", key_len);
+ memcpy(pst->flow->mcam_data, key_data, key_len);
+ memcpy(pst->flow->mcam_mask, key_mask, key_len);
+
+ otx2_npc_dbg("Final flow data");
+ for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
+ otx2_npc_dbg("data[%d]: 0x%" PRIx64 ", mask[%d]: 0x%" PRIx64,
+ idx, pst->flow->mcam_data[idx],
+ idx, pst->flow->mcam_mask[idx]);
+ }
+
+ /*
+ * Now we have mcam data and mask formatted as
+ * [Key_len/4 nibbles][0 or 1 nibble hole][data]
+ * hole is present if key_len is odd number of nibbles.
+ * mcam data must be split into 64 bits + 48 bits segments
+ * for each back W0, W1.
+ */
+
+ return otx2_flow_mcam_alloc_and_write(pst->flow, mbox, pst, flow_info);
+}
+
+static int
+flow_parse_attr(struct rte_eth_dev *eth_dev,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+{
+ struct otx2_eth_dev *dev = eth_dev->data->dev_private;
+ const char *errmsg = NULL;
+
+ if (attr == NULL)
+ errmsg = "Attribute can't be empty";
+ else if (attr->group)
+ errmsg = "Groups are not supported";
+ else if (attr->priority >= dev->npc_flow.flow_max_priority)
+ errmsg = "Priority should be with in specified range";
+ else if ((!attr->egress && !attr->ingress) ||
+ (attr->egress && attr->ingress))
+ errmsg = "Exactly one of ingress or egress must be set";
+
+ if (errmsg != NULL) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
+ attr, errmsg);
+ return -ENOTSUP;
+ }
+
+ if (attr->ingress)
+ flow->nix_intf = OTX2_INTF_RX;
+ else
+ flow->nix_intf = OTX2_INTF_TX;
+
+ flow->priority = attr->priority;
+ return 0;
+}
+
+static inline int
+flow_get_free_rss_grp(struct rte_bitmap *bmap,
+ uint32_t size, uint32_t *pos)
+{
+ for (*pos = 0; *pos < size; ++*pos) {
+ if (!rte_bitmap_get(bmap, *pos))
+ break;
+ }
+
+ return *pos < size ? 0 : -1;
+}
+
+static int
+flow_configure_rss_action(struct otx2_eth_dev *dev,
+ const struct rte_flow_action_rss *rss,
+ uint8_t *alg_idx, uint32_t *rss_grp,
+ int mcam_index)
+{
+ struct otx2_npc_flow_info *flow_info = &dev->npc_flow;
+ uint16_t reta[NIX_RSS_RETA_SIZE_MAX];
+ uint32_t flowkey_cfg, grp_aval, i;
+ uint16_t *ind_tbl = NULL;
+ uint8_t flowkey_algx;
+ int rc;
+
+ rc = flow_get_free_rss_grp(flow_info->rss_grp_entries,
+ flow_info->rss_grps, &grp_aval);
+ /* RSS group :0 is not usable for flow rss action */
+ if (rc < 0 || grp_aval == 0)
+ return -ENOSPC;
+
+ *rss_grp = grp_aval;
+
+ otx2_nix_rss_set_key(dev, (uint8_t *)(uintptr_t)rss->key,
+ rss->key_len);
+
+ /* If queue count passed in the rss action is less than
+ * HW configured reta size, replicate rss action reta
+ * across HW reta table.
+ */
+ if (dev->rss_info.rss_size > rss->queue_num) {
+ ind_tbl = reta;
+
+ for (i = 0; i < (dev->rss_info.rss_size / rss->queue_num); i++)
+ memcpy(reta + i * rss->queue_num, rss->queue,
+ sizeof(uint16_t) * rss->queue_num);
+
+ i = dev->rss_info.rss_size % rss->queue_num;
+ if (i)
+ memcpy(&reta[dev->rss_info.rss_size] - i,
+ rss->queue, i * sizeof(uint16_t));
+ } else {
+ ind_tbl = (uint16_t *)(uintptr_t)rss->queue;
+ }
+
+ rc = otx2_nix_rss_tbl_init(dev, *rss_grp, ind_tbl);
+ if (rc) {
+ otx2_err("Failed to init rss table rc = %d", rc);
+ return rc;
+ }
+
+ flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss->types, rss->level);
+
+ rc = otx2_rss_set_hf(dev, flowkey_cfg, &flowkey_algx,
+ *rss_grp, mcam_index);
+ if (rc) {
+ otx2_err("Failed to set rss hash function rc = %d", rc);
+ return rc;
+ }
+
+ *alg_idx = flowkey_algx;
+
+ rte_bitmap_set(flow_info->rss_grp_entries, *rss_grp);
+
+ return 0;
+}
+
+
+static int
+flow_program_rss_action(struct rte_eth_dev *eth_dev,
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow)
+{
+ struct otx2_eth_dev *dev = eth_dev->data->dev_private;
+ const struct rte_flow_action_rss *rss;
+ uint32_t rss_grp;
+ uint8_t alg_idx;
+ int rc;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ rss = (const struct rte_flow_action_rss *)actions->conf;
+
+ rc = flow_configure_rss_action(dev,
+ rss, &alg_idx, &rss_grp,
+ flow->mcam_id);
+ if (rc)
+ return rc;
+
+ flow->npc_action |=
+ ((uint64_t)(alg_idx & NIX_RSS_ACT_ALG_MASK) <<
+ NIX_RSS_ACT_ALG_OFFSET) |
+ ((uint64_t)(rss_grp & NIX_RSS_ACT_GRP_MASK) <<
+ NIX_RSS_ACT_GRP_OFFSET);
+ }
+ }
+ return 0;
+}
+
+static int
+flow_free_rss_action(struct rte_eth_dev *eth_dev,
+ struct rte_flow *flow)
+{
+ struct otx2_eth_dev *dev = eth_dev->data->dev_private;
+ struct otx2_npc_flow_info *npc = &dev->npc_flow;
+ uint32_t rss_grp;
+
+ if (flow->npc_action & NIX_RX_ACTIONOP_RSS) {
+ rss_grp = (flow->npc_action >> NIX_RSS_ACT_GRP_OFFSET) &
+ NIX_RSS_ACT_GRP_MASK;
+ if (rss_grp == 0 || rss_grp >= npc->rss_grps)
+ return -EINVAL;
+
+ rte_bitmap_clear(npc->rss_grp_entries, rss_grp);
+ }
+
+ return 0;
+}
+
+static int
+flow_update_sec_tt(struct rte_eth_dev *eth_dev,
+ const struct rte_flow_action actions[])
+{
+ int rc = 0;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+ rc = otx2_eth_sec_update_tag_type(eth_dev);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int
+flow_parse_meta_items(__rte_unused struct otx2_parse_state *pst)
+{
+ otx2_npc_dbg("Meta Item");
+ return 0;
+}
+
+/*
+ * Parse function of each layer:
+ * - Consume one or more patterns that are relevant.
+ * - Update parse_state
+ * - Set parse_state.pattern = last item consumed
+ * - Set appropriate error code/message when returning error.
+ */
+typedef int (*flow_parse_stage_func_t)(struct otx2_parse_state *pst);
+
+static int
+flow_parse_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow,
+ struct otx2_parse_state *pst)
+{
+ flow_parse_stage_func_t parse_stage_funcs[] = {
+ flow_parse_meta_items,
+ otx2_flow_parse_higig2_hdr,
+ otx2_flow_parse_la,
+ otx2_flow_parse_lb,
+ otx2_flow_parse_lc,
+ otx2_flow_parse_ld,
+ otx2_flow_parse_le,
+ otx2_flow_parse_lf,
+ otx2_flow_parse_lg,
+ otx2_flow_parse_lh,
+ };
+ struct otx2_eth_dev *hw = dev->data->dev_private;
+ uint8_t layer = 0;
+ int key_offset;
+ int rc;
+
+ if (pattern == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+ "pattern is NULL");
+ return -EINVAL;
+ }
+
+ memset(pst, 0, sizeof(*pst));
+ pst->npc = &hw->npc_flow;
+ pst->error = error;
+ pst->flow = flow;
+
+ /* Use integral byte offset */
+ key_offset = pst->npc->keyx_len[flow->nix_intf];
+ key_offset = (key_offset + 7) / 8;
+
+ /* Location where LDATA would begin */
+ pst->mcam_data = (uint8_t *)flow->mcam_data;
+ pst->mcam_mask = (uint8_t *)flow->mcam_mask;
+
+ while (pattern->type != RTE_FLOW_ITEM_TYPE_END &&
+ layer < RTE_DIM(parse_stage_funcs)) {
+ otx2_npc_dbg("Pattern type = %d", pattern->type);
+
+ /* Skip place-holders */
+ pattern = otx2_flow_skip_void_and_any_items(pattern);
+
+ pst->pattern = pattern;
+ otx2_npc_dbg("Is tunnel = %d, layer = %d", pst->tunnel, layer);
+ rc = parse_stage_funcs[layer](pst);
+ if (rc != 0)
+ return -rte_errno;
+
+ layer++;
+
+ /*
+ * Parse stage function sets pst->pattern to
+ * 1 past the last item it consumed.
+ */
+ pattern = pst->pattern;
+
+ if (pst->terminate)
+ break;
+ }
+
+ /* Skip trailing place-holders */
+ pattern = otx2_flow_skip_void_and_any_items(pattern);
+
+ /* Are there more items than what we can handle? */
+ if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+ "unsupported item in the sequence");
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+static int
+flow_parse_rule(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow,
+ struct otx2_parse_state *pst)
+{
+ int err;
+
+ /* Check attributes */
+ err = flow_parse_attr(dev, attr, error, flow);
+ if (err)
+ return err;
+
+ /* Check actions */
+ err = otx2_flow_parse_actions(dev, attr, actions, error, flow);
+ if (err)
+ return err;
+
+ /* Check pattern */
+ err = flow_parse_pattern(dev, pattern, error, flow, pst);
+ if (err)
+ return err;
+
+ /* Check for overlaps? */
+ return 0;
+}
+
+static int
+otx2_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct otx2_parse_state parse_state;
+ struct rte_flow flow;
+
+ memset(&flow, 0, sizeof(flow));
+ return flow_parse_rule(dev, attr, pattern, actions, error, &flow,
+ &parse_state);
+}
+
+static struct rte_flow *
+otx2_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct otx2_eth_dev *hw = dev->data->dev_private;
+ struct otx2_parse_state parse_state;
+ struct otx2_mbox *mbox = hw->mbox;
+ struct rte_flow *flow, *flow_iter;
+ struct otx2_flow_list *list;
+ int rc;
+
+ flow = rte_zmalloc("otx2_rte_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Memory allocation failed");
+ return NULL;
+ }
+ memset(flow, 0, sizeof(*flow));
+
+ rc = flow_parse_rule(dev, attr, pattern, actions, error, flow,
+ &parse_state);
+ if (rc != 0)
+ goto err_exit;
+
+ rc = flow_program_npc(&parse_state, mbox, &hw->npc_flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to insert filter");
+ goto err_exit;
+ }
+
+ rc = flow_program_rss_action(dev, actions, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to program rss action");
+ goto err_exit;
+ }
+
+ if (hw->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ rc = flow_update_sec_tt(dev, actions);
+ if (rc != 0) {
+ rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to update tt with sec act");
+ goto err_exit;
+ }
+ }
+
+ list = &hw->npc_flow.flow_list[flow->priority];
+ /* List in ascending order of mcam entries */
+ TAILQ_FOREACH(flow_iter, list, next) {
+ if (flow_iter->mcam_id > flow->mcam_id) {
+ TAILQ_INSERT_BEFORE(flow_iter, flow, next);
+ return flow;
+ }
+ }
+
+ TAILQ_INSERT_TAIL(list, flow, next);
+ return flow;
+
+err_exit:
+ rte_free(flow);
+ return NULL;
+}
+
+static int
+otx2_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct otx2_eth_dev *hw = dev->data->dev_private;
+ struct otx2_npc_flow_info *npc = &hw->npc_flow;
+ struct otx2_mbox *mbox = hw->mbox;
+ struct rte_bitmap *bmap;
+ uint16_t match_id;
+ int rc;
+
+ match_id = (flow->npc_action >> NIX_RX_ACT_MATCH_OFFSET) &
+ NIX_RX_ACT_MATCH_MASK;
+
+ if (match_id && match_id < OTX2_FLOW_ACTION_FLAG_DEFAULT) {
+ if (rte_atomic32_read(&npc->mark_actions) == 0)
+ return -EINVAL;
+
+ /* Clear mark offload flag if there are no more mark actions */
+ if (rte_atomic32_sub_return(&npc->mark_actions, 1) == 0) {
+ hw->rx_offload_flags &= ~NIX_RX_OFFLOAD_MARK_UPDATE_F;
+ otx2_eth_set_rx_function(dev);
+ }
+ }
+
+ rc = flow_free_rss_action(dev, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to free rss action");
+ }
+
+ rc = otx2_flow_mcam_free_entry(mbox, flow->mcam_id);
+ if (rc != 0) {
+ rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to destroy filter");
+ }
+
+ TAILQ_REMOVE(&npc->flow_list[flow->priority], flow, next);
+
+ bmap = npc->live_entries[flow->priority];
+ rte_bitmap_clear(bmap, flow->mcam_id);
+
+ rte_free(flow);
+ return 0;
+}
+
+static int
+otx2_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct otx2_eth_dev *hw = dev->data->dev_private;
+ int rc;
+
+ rc = otx2_flow_free_all_resources(hw);
+ if (rc) {
+ otx2_err("Error when deleting NPC MCAM entries "
+ ", counters");
+ rte_flow_error_set(error, EIO,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Failed to flush filter");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+otx2_flow_isolate(struct rte_eth_dev *dev __rte_unused,
+ int enable __rte_unused,
+ struct rte_flow_error *error)
+{
+ /*
+ * If we support, we need to un-install the default mcam
+ * entry for this port.
+ */
+
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Flow isolation not supported");
+
+ return -rte_errno;
+}
+
+static int
+otx2_flow_query(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ const struct rte_flow_action *action,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct otx2_eth_dev *hw = dev->data->dev_private;
+ struct rte_flow_query_count *query = data;
+ struct otx2_mbox *mbox = hw->mbox;
+ const char *errmsg = NULL;
+ int errcode = ENOTSUP;
+ int rc;
+
+ if (action->type != RTE_FLOW_ACTION_TYPE_COUNT) {
+ errmsg = "Only COUNT is supported in query";
+ goto err_exit;
+ }
+
+ if (flow->ctr_id == NPC_COUNTER_NONE) {
+ errmsg = "Counter is not available";
+ goto err_exit;
+ }
+
+ rc = otx2_flow_mcam_read_counter(mbox, flow->ctr_id, &query->hits);
+ if (rc != 0) {
+ errcode = EIO;
+ errmsg = "Error reading flow counter";
+ goto err_exit;
+ }
+ query->hits_set = 1;
+ query->bytes_set = 0;
+
+ if (query->reset)
+ rc = otx2_flow_mcam_clear_counter(mbox, flow->ctr_id);
+ if (rc != 0) {
+ errcode = EIO;
+ errmsg = "Error clearing flow counter";
+ goto err_exit;
+ }
+
+ return 0;
+
+err_exit:
+ rte_flow_error_set(error, errcode,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ errmsg);
+ return -rte_errno;
+}
+
+const struct rte_flow_ops otx2_flow_ops = {
+ .validate = otx2_flow_validate,
+ .create = otx2_flow_create,
+ .destroy = otx2_flow_destroy,
+ .flush = otx2_flow_flush,
+ .query = otx2_flow_query,
+ .isolate = otx2_flow_isolate,
+};
+
+static int
+flow_supp_key_len(uint32_t supp_mask)
+{
+ int nib_count = 0;
+ while (supp_mask) {
+ nib_count++;
+ supp_mask &= (supp_mask - 1);
+ }
+ return nib_count * 4;
+}
+
+/* Refer HRM register:
+ * NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG
+ * and
+ * NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG
+ **/
+#define BYTESM1_SHIFT 16
+#define HDR_OFF_SHIFT 8
+static void
+flow_update_kex_info(struct npc_xtract_info *xtract_info,
+ uint64_t val)
+{
+ xtract_info->len = ((val >> BYTESM1_SHIFT) & 0xf) + 1;
+ xtract_info->hdr_off = (val >> HDR_OFF_SHIFT) & 0xff;
+ xtract_info->key_off = val & 0x3f;
+ xtract_info->enable = ((val >> 7) & 0x1);
+ xtract_info->flags_enable = ((val >> 6) & 0x1);
+}
+
+static void
+flow_process_mkex_cfg(struct otx2_npc_flow_info *npc,
+ struct npc_get_kex_cfg_rsp *kex_rsp)
+{
+ volatile uint64_t (*q)[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT]
+ [NPC_MAX_LD];
+ struct npc_xtract_info *x_info = NULL;
+ int lid, lt, ld, fl, ix;
+ otx2_dxcfg_t *p;
+ uint64_t keyw;
+ uint64_t val;
+
+ npc->keyx_supp_nmask[NPC_MCAM_RX] =
+ kex_rsp->rx_keyx_cfg & 0x7fffffffULL;
+ npc->keyx_supp_nmask[NPC_MCAM_TX] =
+ kex_rsp->tx_keyx_cfg & 0x7fffffffULL;
+ npc->keyx_len[NPC_MCAM_RX] =
+ flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_RX]);
+ npc->keyx_len[NPC_MCAM_TX] =
+ flow_supp_key_len(npc->keyx_supp_nmask[NPC_MCAM_TX]);
+
+ keyw = (kex_rsp->rx_keyx_cfg >> 32) & 0x7ULL;
+ npc->keyw[NPC_MCAM_RX] = keyw;
+ keyw = (kex_rsp->tx_keyx_cfg >> 32) & 0x7ULL;
+ npc->keyw[NPC_MCAM_TX] = keyw;
+
+ /* Update KEX_LD_FLAG */
+ for (ix = 0; ix < NPC_MAX_INTF; ix++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ for (fl = 0; fl < NPC_MAX_LFL; fl++) {
+ x_info =
+ &npc->prx_fxcfg[ix][ld][fl].xtract[0];
+ val = kex_rsp->intf_ld_flags[ix][ld][fl];
+ flow_update_kex_info(x_info, val);
+ }
+ }
+ }
+
+ /* Update LID, LT and LDATA cfg */
+ p = &npc->prx_dxcfg;
+ q = (volatile uint64_t (*)[][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD])
+ (&kex_rsp->intf_lid_lt_ld);
+ for (ix = 0; ix < NPC_MAX_INTF; ix++) {
+ for (lid = 0; lid < NPC_MAX_LID; lid++) {
+ for (lt = 0; lt < NPC_MAX_LT; lt++) {
+ for (ld = 0; ld < NPC_MAX_LD; ld++) {
+ x_info = &(*p)[ix][lid][lt].xtract[ld];
+ val = (*q)[ix][lid][lt][ld];
+ flow_update_kex_info(x_info, val);
+ }
+ }
+ }
+ }
+ /* Update LDATA Flags cfg */
+ npc->prx_lfcfg[0].i = kex_rsp->kex_ld_flags[0];
+ npc->prx_lfcfg[1].i = kex_rsp->kex_ld_flags[1];
+}
+
+static struct otx2_idev_kex_cfg *
+flow_intra_dev_kex_cfg(void)
+{
+ static const char name[] = "octeontx2_intra_device_kex_conf";
+ struct otx2_idev_kex_cfg *idev;
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(name);
+ if (mz)
+ return mz->addr;
+
+ /* Request for the first time */
+ mz = rte_memzone_reserve_aligned(name, sizeof(struct otx2_idev_kex_cfg),
+ SOCKET_ID_ANY, 0, OTX2_ALIGN);
+ if (mz) {
+ idev = mz->addr;
+ rte_atomic16_set(&idev->kex_refcnt, 0);
+ return idev;
+ }
+ return NULL;
+}
+
+static int
+flow_fetch_kex_cfg(struct otx2_eth_dev *dev)
+{
+ struct otx2_npc_flow_info *npc = &dev->npc_flow;
+ struct npc_get_kex_cfg_rsp *kex_rsp;
+ struct otx2_mbox *mbox = dev->mbox;
+ char mkex_pfl_name[MKEX_NAME_LEN];
+ struct otx2_idev_kex_cfg *idev;
+ int rc = 0;
+
+ idev = flow_intra_dev_kex_cfg();
+ if (!idev)
+ return -ENOMEM;
+
+ /* Is kex_cfg read by any another driver? */
+ if (rte_atomic16_add_return(&idev->kex_refcnt, 1) == 1) {
+ /* Call mailbox to get key & data size */
+ (void)otx2_mbox_alloc_msg_npc_get_kex_cfg(mbox);
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, (void *)&kex_rsp);
+ if (rc) {
+ otx2_err("Failed to fetch NPC keyx config");
+ goto done;
+ }
+ memcpy(&idev->kex_cfg, kex_rsp,
+ sizeof(struct npc_get_kex_cfg_rsp));
+ }
+
+ otx2_mbox_memcpy(mkex_pfl_name,
+ idev->kex_cfg.mkex_pfl_name, MKEX_NAME_LEN);
+
+ strlcpy((char *)dev->mkex_pfl_name,
+ mkex_pfl_name, sizeof(dev->mkex_pfl_name));
+
+ flow_process_mkex_cfg(npc, &idev->kex_cfg);
+
+done:
+ return rc;
+}
+
+int
+otx2_flow_init(struct otx2_eth_dev *hw)
+{
+ uint8_t *mem = NULL, *nix_mem = NULL, *npc_mem = NULL;
+ struct otx2_npc_flow_info *npc = &hw->npc_flow;
+ uint32_t bmap_sz;
+ int rc = 0, idx;
+
+ rc = flow_fetch_kex_cfg(hw);
+ if (rc) {
+ otx2_err("Failed to fetch NPC keyx config from idev");
+ return rc;
+ }
+
+ rte_atomic32_init(&npc->mark_actions);
+
+ npc->mcam_entries = NPC_MCAM_TOT_ENTRIES >> npc->keyw[NPC_MCAM_RX];
+ /* Free, free_rev, live and live_rev entries */
+ bmap_sz = rte_bitmap_get_memory_footprint(npc->mcam_entries);
+ mem = rte_zmalloc(NULL, 4 * bmap_sz * npc->flow_max_priority,
+ RTE_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ otx2_err("Bmap alloc failed");
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ npc->flow_entry_info = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct otx2_mcam_ents_info),
+ 0);
+ if (npc->flow_entry_info == NULL) {
+ otx2_err("flow_entry_info alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->free_entries = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct rte_bitmap *),
+ 0);
+ if (npc->free_entries == NULL) {
+ otx2_err("free_entries alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->free_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct rte_bitmap *),
+ 0);
+ if (npc->free_entries_rev == NULL) {
+ otx2_err("free_entries_rev alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->live_entries = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct rte_bitmap *),
+ 0);
+ if (npc->live_entries == NULL) {
+ otx2_err("live_entries alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->live_entries_rev = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct rte_bitmap *),
+ 0);
+ if (npc->live_entries_rev == NULL) {
+ otx2_err("live_entries_rev alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->flow_list = rte_zmalloc(NULL, npc->flow_max_priority
+ * sizeof(struct otx2_flow_list),
+ 0);
+ if (npc->flow_list == NULL) {
+ otx2_err("flow_list alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc_mem = mem;
+ for (idx = 0; idx < npc->flow_max_priority; idx++) {
+ TAILQ_INIT(&npc->flow_list[idx]);
+
+ npc->free_entries[idx] =
+ rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
+ mem += bmap_sz;
+
+ npc->free_entries_rev[idx] =
+ rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
+ mem += bmap_sz;
+
+ npc->live_entries[idx] =
+ rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
+ mem += bmap_sz;
+
+ npc->live_entries_rev[idx] =
+ rte_bitmap_init(npc->mcam_entries, mem, bmap_sz);
+ mem += bmap_sz;
+
+ npc->flow_entry_info[idx].free_ent = 0;
+ npc->flow_entry_info[idx].live_ent = 0;
+ npc->flow_entry_info[idx].max_id = 0;
+ npc->flow_entry_info[idx].min_id = ~(0);
+ }
+
+ npc->rss_grps = NIX_RSS_GRPS;
+
+ bmap_sz = rte_bitmap_get_memory_footprint(npc->rss_grps);
+ nix_mem = rte_zmalloc(NULL, bmap_sz, RTE_CACHE_LINE_SIZE);
+ if (nix_mem == NULL) {
+ otx2_err("Bmap alloc failed");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ npc->rss_grp_entries = rte_bitmap_init(npc->rss_grps, nix_mem, bmap_sz);
+
+ /* Group 0 will be used for RSS,
+ * 1 -7 will be used for rte_flow RSS action
+ */
+ rte_bitmap_set(npc->rss_grp_entries, 0);
+
+ return 0;
+
+err:
+ if (npc->flow_list)
+ rte_free(npc->flow_list);
+ if (npc->live_entries_rev)
+ rte_free(npc->live_entries_rev);
+ if (npc->live_entries)
+ rte_free(npc->live_entries);
+ if (npc->free_entries_rev)
+ rte_free(npc->free_entries_rev);
+ if (npc->free_entries)
+ rte_free(npc->free_entries);
+ if (npc->flow_entry_info)
+ rte_free(npc->flow_entry_info);
+ if (npc_mem)
+ rte_free(npc_mem);
+ return rc;
+}
+
+int
+otx2_flow_fini(struct otx2_eth_dev *hw)
+{
+ struct otx2_npc_flow_info *npc = &hw->npc_flow;
+ int rc;
+
+ rc = otx2_flow_free_all_resources(hw);
+ if (rc) {
+ otx2_err("Error when deleting NPC MCAM entries, counters");
+ return rc;
+ }
+
+ if (npc->flow_list)
+ rte_free(npc->flow_list);
+ if (npc->live_entries_rev)
+ rte_free(npc->live_entries_rev);
+ if (npc->live_entries)
+ rte_free(npc->live_entries);
+ if (npc->free_entries_rev)
+ rte_free(npc->free_entries_rev);
+ if (npc->free_entries)
+ rte_free(npc->free_entries);
+ if (npc->flow_entry_info)
+ rte_free(npc->flow_entry_info);
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.h
new file mode 100644
index 000000000..df78f41d3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow.h
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_FLOW_H__
+#define __OTX2_FLOW_H__
+
+#include <stdint.h>
+
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "otx2_common.h"
+#include "otx2_ethdev.h"
+#include "otx2_mbox.h"
+
+struct otx2_eth_dev;
+
+int otx2_flow_init(struct otx2_eth_dev *hw);
+int otx2_flow_fini(struct otx2_eth_dev *hw);
+extern const struct rte_flow_ops otx2_flow_ops;
+
+enum {
+ OTX2_INTF_RX = 0,
+ OTX2_INTF_TX = 1,
+ OTX2_INTF_MAX = 2,
+};
+
+#define NPC_IH_LENGTH 8
+#define NPC_TPID_LENGTH 2
+#define NPC_HIGIG2_LENGTH 16
+#define NPC_COUNTER_NONE (-1)
+/* 32 bytes from LDATA_CFG & 32 bytes from FLAGS_CFG */
+#define NPC_MAX_EXTRACT_DATA_LEN (64)
+#define NPC_LDATA_LFLAG_LEN (16)
+#define NPC_MCAM_TOT_ENTRIES (4096)
+#define NPC_MAX_KEY_NIBBLES (31)
+/* Nibble offsets */
+#define NPC_LAYER_KEYX_SZ (3)
+#define NPC_PARSE_KEX_S_LA_OFFSET (7)
+#define NPC_PARSE_KEX_S_LID_OFFSET(lid) \
+ ((((lid) - NPC_LID_LA) * NPC_LAYER_KEYX_SZ) \
+ + NPC_PARSE_KEX_S_LA_OFFSET)
+
+
+/* supported flow actions flags */
+#define OTX2_FLOW_ACT_MARK (1 << 0)
+#define OTX2_FLOW_ACT_FLAG (1 << 1)
+#define OTX2_FLOW_ACT_DROP (1 << 2)
+#define OTX2_FLOW_ACT_QUEUE (1 << 3)
+#define OTX2_FLOW_ACT_RSS (1 << 4)
+#define OTX2_FLOW_ACT_DUP (1 << 5)
+#define OTX2_FLOW_ACT_SEC (1 << 6)
+#define OTX2_FLOW_ACT_COUNT (1 << 7)
+#define OTX2_FLOW_ACT_PF (1 << 8)
+#define OTX2_FLOW_ACT_VF (1 << 9)
+
+/* terminating actions */
+#define OTX2_FLOW_ACT_TERM (OTX2_FLOW_ACT_DROP | \
+ OTX2_FLOW_ACT_QUEUE | \
+ OTX2_FLOW_ACT_RSS | \
+ OTX2_FLOW_ACT_DUP | \
+ OTX2_FLOW_ACT_SEC)
+
+/* This mark value indicates flag action */
+#define OTX2_FLOW_FLAG_VAL (0xffff)
+
+#define NIX_RX_ACT_MATCH_OFFSET (40)
+#define NIX_RX_ACT_MATCH_MASK (0xFFFF)
+
+#define NIX_RSS_ACT_GRP_OFFSET (20)
+#define NIX_RSS_ACT_ALG_OFFSET (56)
+#define NIX_RSS_ACT_GRP_MASK (0xFFFFF)
+#define NIX_RSS_ACT_ALG_MASK (0x1F)
+
+/* PMD-specific definition of the opaque struct rte_flow */
+#define OTX2_MAX_MCAM_WIDTH_DWORDS 7
+
+enum npc_mcam_intf {
+ NPC_MCAM_RX,
+ NPC_MCAM_TX
+};
+
+struct npc_xtract_info {
+ /* Length in bytes of pkt data extracted. len = 0
+ * indicates that extraction is disabled.
+ */
+ uint8_t len;
+ uint8_t hdr_off; /* Byte offset of proto hdr: extract_src */
+ uint8_t key_off; /* Byte offset in MCAM key where data is placed */
+ uint8_t enable; /* Extraction enabled or disabled */
+ uint8_t flags_enable; /* Flags extraction enabled */
+};
+
+/* Information for a given {LAYER, LTYPE} */
+struct npc_lid_lt_xtract_info {
+ /* Info derived from parser configuration */
+ uint16_t npc_proto; /* Network protocol identified */
+ uint8_t valid_flags_mask; /* Flags applicable */
+ uint8_t is_terminating:1; /* No more parsing */
+ struct npc_xtract_info xtract[NPC_MAX_LD];
+};
+
+union npc_kex_ldata_flags_cfg {
+ struct {
+ #if defined(__BIG_ENDIAN_BITFIELD)
+ uint64_t rvsd_62_1 : 61;
+ uint64_t lid : 3;
+ #else
+ uint64_t lid : 3;
+ uint64_t rvsd_62_1 : 61;
+ #endif
+ } s;
+
+ uint64_t i;
+};
+
+typedef struct npc_lid_lt_xtract_info
+ otx2_dxcfg_t[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT];
+typedef struct npc_lid_lt_xtract_info
+ otx2_fxcfg_t[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
+typedef union npc_kex_ldata_flags_cfg otx2_ld_flags_t[NPC_MAX_LD];
+
+
+/* MBOX_MSG_NPC_GET_DATAX_CFG Response */
+struct npc_get_datax_cfg {
+ /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */
+ union npc_kex_ldata_flags_cfg ld_flags[NPC_MAX_LD];
+ /* Extract information indexed with [LID][LTYPE] */
+ struct npc_lid_lt_xtract_info lid_lt_xtract[NPC_MAX_LID][NPC_MAX_LT];
+ /* Flags based extract indexed with [LDATA][FLAGS_LOWER_NIBBLE]
+ * Fields flags_ena_ld0, flags_ena_ld1 in
+ * struct npc_lid_lt_xtract_info indicate if this is applicable
+ * for a given {LAYER, LTYPE}
+ */
+ struct npc_xtract_info flag_xtract[NPC_MAX_LD][NPC_MAX_LT];
+};
+
+struct otx2_mcam_ents_info {
+ /* Current max & min values of mcam index */
+ uint32_t max_id;
+ uint32_t min_id;
+ uint32_t free_ent;
+ uint32_t live_ent;
+};
+
+struct rte_flow {
+ uint8_t nix_intf;
+ uint32_t mcam_id;
+ int32_t ctr_id;
+ uint32_t priority;
+ /* Contiguous match string */
+ uint64_t mcam_data[OTX2_MAX_MCAM_WIDTH_DWORDS];
+ uint64_t mcam_mask[OTX2_MAX_MCAM_WIDTH_DWORDS];
+ uint64_t npc_action;
+ TAILQ_ENTRY(rte_flow) next;
+};
+
+TAILQ_HEAD(otx2_flow_list, rte_flow);
+
+/* Accessed from ethdev private - otx2_eth_dev */
+struct otx2_npc_flow_info {
+ rte_atomic32_t mark_actions;
+ uint32_t keyx_supp_nmask[NPC_MAX_INTF];/* nibble mask */
+ uint32_t keyx_len[NPC_MAX_INTF]; /* per intf key len in bits */
+ uint32_t datax_len[NPC_MAX_INTF]; /* per intf data len in bits */
+ uint32_t keyw[NPC_MAX_INTF]; /* max key + data len bits */
+ uint32_t mcam_entries; /* mcam entries supported */
+ otx2_dxcfg_t prx_dxcfg; /* intf, lid, lt, extract */
+ otx2_fxcfg_t prx_fxcfg; /* Flag extract */
+ otx2_ld_flags_t prx_lfcfg; /* KEX LD_Flags CFG */
+ /* mcam entry info per priority level: both free & in-use */
+ struct otx2_mcam_ents_info *flow_entry_info;
+ /* Bitmap of free preallocated entries in ascending index &
+ * descending priority
+ */
+ struct rte_bitmap **free_entries;
+ /* Bitmap of free preallocated entries in descending index &
+ * ascending priority
+ */
+ struct rte_bitmap **free_entries_rev;
+ /* Bitmap of live entries in ascending index & descending priority */
+ struct rte_bitmap **live_entries;
+ /* Bitmap of live entries in descending index & ascending priority */
+ struct rte_bitmap **live_entries_rev;
+ /* Priority bucket wise tail queue of all rte_flow resources */
+ struct otx2_flow_list *flow_list;
+ uint32_t rss_grps; /* rss groups supported */
+ struct rte_bitmap *rss_grp_entries;
+ uint16_t channel; /*rx channel */
+ uint16_t flow_prealloc_size;
+ uint16_t flow_max_priority;
+ uint16_t switch_header_type;
+};
+
+struct otx2_parse_state {
+ struct otx2_npc_flow_info *npc;
+ const struct rte_flow_item *pattern;
+ const struct rte_flow_item *last_pattern; /* Temp usage */
+ struct rte_flow_error *error;
+ struct rte_flow *flow;
+ uint8_t tunnel;
+ uint8_t terminate;
+ uint8_t layer_mask;
+ uint8_t lt[NPC_MAX_LID];
+ uint8_t flags[NPC_MAX_LID];
+ uint8_t *mcam_data; /* point to flow->mcam_data + key_len */
+ uint8_t *mcam_mask; /* point to flow->mcam_mask + key_len */
+};
+
+struct otx2_flow_item_info {
+ const void *def_mask; /* rte_flow default mask */
+ void *hw_mask; /* hardware supported mask */
+ int len; /* length of item */
+ const void *spec; /* spec to use, NULL implies match any */
+ const void *mask; /* mask to use */
+ uint8_t hw_hdr_len; /* Extra data len at each layer*/
+};
+
+struct otx2_idev_kex_cfg {
+ struct npc_get_kex_cfg_rsp kex_cfg;
+ rte_atomic16_t kex_refcnt;
+};
+
+enum npc_kpu_parser_flag {
+ NPC_F_NA = 0,
+ NPC_F_PKI,
+ NPC_F_PKI_VLAN,
+ NPC_F_PKI_ETAG,
+ NPC_F_PKI_ITAG,
+ NPC_F_PKI_MPLS,
+ NPC_F_PKI_NSH,
+ NPC_F_ETYPE_UNK,
+ NPC_F_ETHER_VLAN,
+ NPC_F_ETHER_ETAG,
+ NPC_F_ETHER_ITAG,
+ NPC_F_ETHER_MPLS,
+ NPC_F_ETHER_NSH,
+ NPC_F_STAG_CTAG,
+ NPC_F_STAG_CTAG_UNK,
+ NPC_F_STAG_STAG_CTAG,
+ NPC_F_STAG_STAG_STAG,
+ NPC_F_QINQ_CTAG,
+ NPC_F_QINQ_CTAG_UNK,
+ NPC_F_QINQ_QINQ_CTAG,
+ NPC_F_QINQ_QINQ_QINQ,
+ NPC_F_BTAG_ITAG,
+ NPC_F_BTAG_ITAG_STAG,
+ NPC_F_BTAG_ITAG_CTAG,
+ NPC_F_BTAG_ITAG_UNK,
+ NPC_F_ETAG_CTAG,
+ NPC_F_ETAG_BTAG_ITAG,
+ NPC_F_ETAG_STAG,
+ NPC_F_ETAG_QINQ,
+ NPC_F_ETAG_ITAG,
+ NPC_F_ETAG_ITAG_STAG,
+ NPC_F_ETAG_ITAG_CTAG,
+ NPC_F_ETAG_ITAG_UNK,
+ NPC_F_ITAG_STAG_CTAG,
+ NPC_F_ITAG_STAG,
+ NPC_F_ITAG_CTAG,
+ NPC_F_MPLS_4_LABELS,
+ NPC_F_MPLS_3_LABELS,
+ NPC_F_MPLS_2_LABELS,
+ NPC_F_IP_HAS_OPTIONS,
+ NPC_F_IP_IP_IN_IP,
+ NPC_F_IP_6TO4,
+ NPC_F_IP_MPLS_IN_IP,
+ NPC_F_IP_UNK_PROTO,
+ NPC_F_IP_IP_IN_IP_HAS_OPTIONS,
+ NPC_F_IP_6TO4_HAS_OPTIONS,
+ NPC_F_IP_MPLS_IN_IP_HAS_OPTIONS,
+ NPC_F_IP_UNK_PROTO_HAS_OPTIONS,
+ NPC_F_IP6_HAS_EXT,
+ NPC_F_IP6_TUN_IP6,
+ NPC_F_IP6_MPLS_IN_IP,
+ NPC_F_TCP_HAS_OPTIONS,
+ NPC_F_TCP_HTTP,
+ NPC_F_TCP_HTTPS,
+ NPC_F_TCP_PPTP,
+ NPC_F_TCP_UNK_PORT,
+ NPC_F_TCP_HTTP_HAS_OPTIONS,
+ NPC_F_TCP_HTTPS_HAS_OPTIONS,
+ NPC_F_TCP_PPTP_HAS_OPTIONS,
+ NPC_F_TCP_UNK_PORT_HAS_OPTIONS,
+ NPC_F_UDP_VXLAN,
+ NPC_F_UDP_VXLAN_NOVNI,
+ NPC_F_UDP_VXLAN_NOVNI_NSH,
+ NPC_F_UDP_VXLANGPE,
+ NPC_F_UDP_VXLANGPE_NSH,
+ NPC_F_UDP_VXLANGPE_MPLS,
+ NPC_F_UDP_VXLANGPE_NOVNI,
+ NPC_F_UDP_VXLANGPE_NOVNI_NSH,
+ NPC_F_UDP_VXLANGPE_NOVNI_MPLS,
+ NPC_F_UDP_VXLANGPE_UNK,
+ NPC_F_UDP_VXLANGPE_NONP,
+ NPC_F_UDP_GTP_GTPC,
+ NPC_F_UDP_GTP_GTPU_G_PDU,
+ NPC_F_UDP_GTP_GTPU_UNK,
+ NPC_F_UDP_UNK_PORT,
+ NPC_F_UDP_GENEVE,
+ NPC_F_UDP_GENEVE_OAM,
+ NPC_F_UDP_GENEVE_CRI_OPT,
+ NPC_F_UDP_GENEVE_OAM_CRI_OPT,
+ NPC_F_GRE_NVGRE,
+ NPC_F_GRE_HAS_SRE,
+ NPC_F_GRE_HAS_CSUM,
+ NPC_F_GRE_HAS_KEY,
+ NPC_F_GRE_HAS_SEQ,
+ NPC_F_GRE_HAS_CSUM_KEY,
+ NPC_F_GRE_HAS_CSUM_SEQ,
+ NPC_F_GRE_HAS_KEY_SEQ,
+ NPC_F_GRE_HAS_CSUM_KEY_SEQ,
+ NPC_F_GRE_HAS_ROUTE,
+ NPC_F_GRE_UNK_PROTO,
+ NPC_F_GRE_VER1,
+ NPC_F_GRE_VER1_HAS_SEQ,
+ NPC_F_GRE_VER1_HAS_ACK,
+ NPC_F_GRE_VER1_HAS_SEQ_ACK,
+ NPC_F_GRE_VER1_UNK_PROTO,
+ NPC_F_TU_ETHER_UNK,
+ NPC_F_TU_ETHER_CTAG,
+ NPC_F_TU_ETHER_CTAG_UNK,
+ NPC_F_TU_ETHER_STAG_CTAG,
+ NPC_F_TU_ETHER_STAG_CTAG_UNK,
+ NPC_F_TU_ETHER_STAG,
+ NPC_F_TU_ETHER_STAG_UNK,
+ NPC_F_TU_ETHER_QINQ_CTAG,
+ NPC_F_TU_ETHER_QINQ_CTAG_UNK,
+ NPC_F_TU_ETHER_QINQ,
+ NPC_F_TU_ETHER_QINQ_UNK,
+ NPC_F_LAST /* has to be the last item */
+};
+
+
+int otx2_flow_mcam_free_counter(struct otx2_mbox *mbox, uint16_t ctr_id);
+
+int otx2_flow_mcam_read_counter(struct otx2_mbox *mbox, uint32_t ctr_id,
+ uint64_t *count);
+
+int otx2_flow_mcam_clear_counter(struct otx2_mbox *mbox, uint32_t ctr_id);
+
+int otx2_flow_mcam_free_entry(struct otx2_mbox *mbox, uint32_t entry);
+
+int otx2_flow_mcam_free_all_entries(struct otx2_mbox *mbox);
+
+int otx2_flow_update_parse_state(struct otx2_parse_state *pst,
+ struct otx2_flow_item_info *info,
+ int lid, int lt, uint8_t flags);
+
+int otx2_flow_parse_item_basic(const struct rte_flow_item *item,
+ struct otx2_flow_item_info *info,
+ struct rte_flow_error *error);
+
+void otx2_flow_keyx_compress(uint64_t *data, uint32_t nibble_mask);
+
+int otx2_flow_mcam_alloc_and_write(struct rte_flow *flow,
+ struct otx2_mbox *mbox,
+ struct otx2_parse_state *pst,
+ struct otx2_npc_flow_info *flow_info);
+
+void otx2_flow_get_hw_supp_mask(struct otx2_parse_state *pst,
+ struct otx2_flow_item_info *info,
+ int lid, int lt);
+
+const struct rte_flow_item *
+otx2_flow_skip_void_and_any_items(const struct rte_flow_item *pattern);
+
+int otx2_flow_parse_lh(struct otx2_parse_state *pst);
+
+int otx2_flow_parse_lg(struct otx2_parse_state *pst);
+
+int otx2_flow_parse_lf(struct otx2_parse_state *pst);
+
+int otx2_flow_parse_le(struct otx2_parse_state *pst);
+
+int otx2_flow_parse_ld(struct otx2_parse_state *pst);
+
+int otx2_flow_parse_lc(struct otx2_parse_state *pst);
+
+int otx2_flow_parse_lb(struct otx2_parse_state *pst);
+
+int otx2_flow_parse_la(struct otx2_parse_state *pst);
+
+int otx2_flow_parse_higig2_hdr(struct otx2_parse_state *pst);
+
+int otx2_flow_parse_actions(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow);
+
+int otx2_flow_free_all_resources(struct otx2_eth_dev *hw);
+
+int otx2_flow_parse_mpls(struct otx2_parse_state *pst, int lid);
+#endif /* __OTX2_FLOW_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c
new file mode 100644
index 000000000..76bf48100
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_ctrl.c
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_ethdev.h"
+
+int
+otx2_nix_rxchan_bpid_cfg(struct rte_eth_dev *eth_dev, bool enb)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_fc_info *fc = &dev->fc_info;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_bp_cfg_req *req;
+ struct nix_bp_cfg_rsp *rsp;
+ int rc;
+
+ if (otx2_dev_is_sdp(dev))
+ return 0;
+
+ if (enb) {
+ req = otx2_mbox_alloc_msg_nix_bp_enable(mbox);
+ req->chan_base = 0;
+ req->chan_cnt = 1;
+ req->bpid_per_chan = 0;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc || req->chan_cnt != rsp->chan_cnt) {
+ otx2_err("Insufficient BPIDs, alloc=%u < req=%u rc=%d",
+ rsp->chan_cnt, req->chan_cnt, rc);
+ return rc;
+ }
+
+ fc->bpid[0] = rsp->chan_bpid[0];
+ } else {
+ req = otx2_mbox_alloc_msg_nix_bp_disable(mbox);
+ req->chan_base = 0;
+ req->chan_cnt = 1;
+
+ rc = otx2_mbox_process(mbox);
+
+ memset(fc->bpid, 0, sizeof(uint16_t) * NIX_MAX_CHAN);
+ }
+
+ return rc;
+}
+
+int
+otx2_nix_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct cgx_pause_frm_cfg *req, *rsp;
+ struct otx2_mbox *mbox = dev->mbox;
+ int rc;
+
+ if (otx2_dev_is_lbk(dev)) {
+ fc_conf->mode = RTE_FC_NONE;
+ return 0;
+ }
+
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
+ req->set = 0;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ goto done;
+
+ if (rsp->rx_pause && rsp->tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rsp->rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (rsp->tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+
+done:
+ return rc;
+}
+
+static int
+otx2_nix_cq_bp_cfg(struct rte_eth_dev *eth_dev, bool enb)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_fc_info *fc = &dev->fc_info;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *aq;
+ struct otx2_eth_rxq *rxq;
+ int i, rc;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq) {
+ /* The shared memory buffer can be full.
+ * flush it and retry
+ */
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0)
+ return rc;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!aq)
+ return -ENOMEM;
+ }
+ aq->qidx = rxq->rq;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ if (enb) {
+ aq->cq.bpid = fc->bpid[0];
+ aq->cq_mask.bpid = ~(aq->cq_mask.bpid);
+ aq->cq.bp = rxq->cq_drop;
+ aq->cq_mask.bp = ~(aq->cq_mask.bp);
+ }
+
+ aq->cq.bp_ena = !!enb;
+ aq->cq_mask.bp_ena = ~(aq->cq_mask.bp_ena);
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static int
+otx2_nix_rx_fc_cfg(struct rte_eth_dev *eth_dev, bool enb)
+{
+ return otx2_nix_cq_bp_cfg(eth_dev, enb);
+}
+
+int
+otx2_nix_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_fc_info *fc = &dev->fc_info;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct cgx_pause_frm_cfg *req;
+ uint8_t tx_pause, rx_pause;
+ int rc = 0;
+
+ if (otx2_dev_is_lbk(dev)) {
+ otx2_info("No flow control support for LBK bound ethports");
+ return -ENOTSUP;
+ }
+
+ if (fc_conf->high_water || fc_conf->low_water || fc_conf->pause_time ||
+ fc_conf->mac_ctrl_frame_fwd || fc_conf->autoneg) {
+ otx2_info("Flowctrl parameter is not supported");
+ return -EINVAL;
+ }
+
+ if (fc_conf->mode == fc->mode)
+ return 0;
+
+ rx_pause = (fc_conf->mode == RTE_FC_FULL) ||
+ (fc_conf->mode == RTE_FC_RX_PAUSE);
+ tx_pause = (fc_conf->mode == RTE_FC_FULL) ||
+ (fc_conf->mode == RTE_FC_TX_PAUSE);
+
+ /* Check if TX pause frame is already enabled or not */
+ if (fc->tx_pause ^ tx_pause) {
+ if (otx2_dev_is_Ax(dev) && eth_dev->data->dev_started) {
+ /* on Ax, CQ should be in disabled state
+ * while setting flow control configuration.
+ */
+ otx2_info("Stop the port=%d for setting flow control\n",
+ eth_dev->data->port_id);
+ return 0;
+ }
+ /* TX pause frames, enable/disable flowctrl on RX side. */
+ rc = otx2_nix_rx_fc_cfg(eth_dev, tx_pause);
+ if (rc)
+ return rc;
+ }
+
+ req = otx2_mbox_alloc_msg_cgx_cfg_pause_frm(mbox);
+ req->set = 1;
+ req->rx_pause = rx_pause;
+ req->tx_pause = tx_pause;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ fc->tx_pause = tx_pause;
+ fc->rx_pause = rx_pause;
+ fc->mode = fc_conf->mode;
+
+ return rc;
+}
+
+int
+otx2_nix_update_flow_ctrl_mode(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_fc_info *fc = &dev->fc_info;
+ struct rte_eth_fc_conf fc_conf;
+
+ if (otx2_dev_is_lbk(dev) || otx2_dev_is_sdp(dev))
+ return 0;
+
+ memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+ fc_conf.mode = fc->mode;
+
+ /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
+ if (otx2_dev_is_Ax(dev) &&
+ (dev->npc_flow.switch_header_type != OTX2_PRIV_FLAGS_HIGIG) &&
+ (fc_conf.mode == RTE_FC_FULL || fc_conf.mode == RTE_FC_RX_PAUSE)) {
+ fc_conf.mode =
+ (fc_conf.mode == RTE_FC_FULL ||
+ fc_conf.mode == RTE_FC_TX_PAUSE) ?
+ RTE_FC_TX_PAUSE : RTE_FC_NONE;
+ }
+
+ return otx2_nix_flow_ctrl_set(eth_dev, &fc_conf);
+}
+
+int
+otx2_nix_flow_ctrl_init(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_fc_info *fc = &dev->fc_info;
+ struct rte_eth_fc_conf fc_conf;
+ int rc;
+
+ if (otx2_dev_is_lbk(dev) || otx2_dev_is_sdp(dev))
+ return 0;
+
+ memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+ /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+ * by AF driver, update those info in PMD structure.
+ */
+ rc = otx2_nix_flow_ctrl_get(eth_dev, &fc_conf);
+ if (rc)
+ goto exit;
+
+ fc->mode = fc_conf.mode;
+ fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+ (fc_conf.mode == RTE_FC_RX_PAUSE);
+ fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+ (fc_conf.mode == RTE_FC_TX_PAUSE);
+
+exit:
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_parse.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_parse.c
new file mode 100644
index 000000000..2d9a5857c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_parse.c
@@ -0,0 +1,1046 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_ethdev.h"
+#include "otx2_flow.h"
+
+const struct rte_flow_item *
+otx2_flow_skip_void_and_any_items(const struct rte_flow_item *pattern)
+{
+ while ((pattern->type == RTE_FLOW_ITEM_TYPE_VOID) ||
+ (pattern->type == RTE_FLOW_ITEM_TYPE_ANY))
+ pattern++;
+
+ return pattern;
+}
+
+/*
+ * Tunnel+ESP, Tunnel+ICMP4/6, Tunnel+TCP, Tunnel+UDP,
+ * Tunnel+SCTP
+ */
+int
+otx2_flow_parse_lh(struct otx2_parse_state *pst)
+{
+ struct otx2_flow_item_info info;
+ char hw_mask[64];
+ int lid, lt;
+ int rc;
+
+ if (!pst->tunnel)
+ return 0;
+
+ info.hw_mask = &hw_mask;
+ info.spec = NULL;
+ info.mask = NULL;
+ info.hw_hdr_len = 0;
+ lid = NPC_LID_LH;
+
+ switch (pst->pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ lt = NPC_LT_LH_TU_UDP;
+ info.def_mask = &rte_flow_item_udp_mask;
+ info.len = sizeof(struct rte_flow_item_udp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ lt = NPC_LT_LH_TU_TCP;
+ info.def_mask = &rte_flow_item_tcp_mask;
+ info.len = sizeof(struct rte_flow_item_tcp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ lt = NPC_LT_LH_TU_SCTP;
+ info.def_mask = &rte_flow_item_sctp_mask;
+ info.len = sizeof(struct rte_flow_item_sctp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ lt = NPC_LT_LH_TU_ESP;
+ info.def_mask = &rte_flow_item_esp_mask;
+ info.len = sizeof(struct rte_flow_item_esp);
+ break;
+ default:
+ return 0;
+ }
+
+ otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
+ rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
+ if (rc != 0)
+ return rc;
+
+ return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
+}
+
+/* Tunnel+IPv4, Tunnel+IPv6 */
+int
+otx2_flow_parse_lg(struct otx2_parse_state *pst)
+{
+ struct otx2_flow_item_info info;
+ char hw_mask[64];
+ int lid, lt;
+ int rc;
+
+ if (!pst->tunnel)
+ return 0;
+
+ info.hw_mask = &hw_mask;
+ info.spec = NULL;
+ info.mask = NULL;
+ info.hw_hdr_len = 0;
+ lid = NPC_LID_LG;
+
+ if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ lt = NPC_LT_LG_TU_IP;
+ info.def_mask = &rte_flow_item_ipv4_mask;
+ info.len = sizeof(struct rte_flow_item_ipv4);
+ } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ lt = NPC_LT_LG_TU_IP6;
+ info.def_mask = &rte_flow_item_ipv6_mask;
+ info.len = sizeof(struct rte_flow_item_ipv6);
+ } else {
+ /* There is no tunneled IP header */
+ return 0;
+ }
+
+ otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
+ rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
+ if (rc != 0)
+ return rc;
+
+ return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
+}
+
+/* Tunnel+Ether */
+int
+otx2_flow_parse_lf(struct otx2_parse_state *pst)
+{
+ const struct rte_flow_item *pattern, *last_pattern;
+ struct rte_flow_item_eth hw_mask;
+ struct otx2_flow_item_info info;
+ int lid, lt, lflags;
+ int nr_vlans = 0;
+ int rc;
+
+ /* We hit this layer if there is a tunneling protocol */
+ if (!pst->tunnel)
+ return 0;
+
+ if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
+ return 0;
+
+ lid = NPC_LID_LF;
+ lt = NPC_LT_LF_TU_ETHER;
+ lflags = 0;
+
+ info.def_mask = &rte_flow_item_vlan_mask;
+ /* No match support for vlan tags */
+ info.hw_mask = NULL;
+ info.len = sizeof(struct rte_flow_item_vlan);
+ info.spec = NULL;
+ info.mask = NULL;
+ info.hw_hdr_len = 0;
+
+ /* Look ahead and find out any VLAN tags. These can be
+ * detected but no data matching is available.
+ */
+ last_pattern = pst->pattern;
+ pattern = pst->pattern + 1;
+ pattern = otx2_flow_skip_void_and_any_items(pattern);
+ while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ nr_vlans++;
+ rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
+ if (rc != 0)
+ return rc;
+ last_pattern = pattern;
+ pattern++;
+ pattern = otx2_flow_skip_void_and_any_items(pattern);
+ }
+ otx2_npc_dbg("Nr_vlans = %d", nr_vlans);
+ switch (nr_vlans) {
+ case 0:
+ break;
+ case 1:
+ lflags = NPC_F_TU_ETHER_CTAG;
+ break;
+ case 2:
+ lflags = NPC_F_TU_ETHER_STAG_CTAG;
+ break;
+ default:
+ rte_flow_error_set(pst->error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ last_pattern,
+ "more than 2 vlans with tunneled Ethernet "
+ "not supported");
+ return -rte_errno;
+ }
+
+ info.def_mask = &rte_flow_item_eth_mask;
+ info.hw_mask = &hw_mask;
+ info.len = sizeof(struct rte_flow_item_eth);
+ info.hw_hdr_len = 0;
+ otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
+ info.spec = NULL;
+ info.mask = NULL;
+
+ rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
+ if (rc != 0)
+ return rc;
+
+ pst->pattern = last_pattern;
+
+ return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
+}
+
+int
+otx2_flow_parse_le(struct otx2_parse_state *pst)
+{
+ /*
+ * We are positioned at UDP. Scan ahead and look for
+ * UDP encapsulated tunnel protocols. If available,
+ * parse them. In that case handle this:
+ * - RTE spec assumes we point to tunnel header.
+ * - NPC parser provides offset from UDP header.
+ */
+
+ /*
+ * Note: Add support to GENEVE, VXLAN_GPE when we
+ * upgrade DPDK
+ *
+ * Note: Better to split flags into two nibbles:
+ * - Higher nibble can have flags
+ * - Lower nibble to further enumerate protocols
+ * and have flags based extraction
+ */
+ const struct rte_flow_item *pattern = pst->pattern;
+ struct otx2_flow_item_info info;
+ int lid, lt, lflags;
+ char hw_mask[64];
+ int rc;
+
+ if (pst->tunnel)
+ return 0;
+
+ if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
+ return otx2_flow_parse_mpls(pst, NPC_LID_LE);
+
+ info.spec = NULL;
+ info.mask = NULL;
+ info.hw_mask = NULL;
+ info.def_mask = NULL;
+ info.len = 0;
+ info.hw_hdr_len = 0;
+ lid = NPC_LID_LE;
+ lflags = 0;
+
+ /* Ensure we are not matching anything in UDP */
+ rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
+ if (rc)
+ return rc;
+
+ info.hw_mask = &hw_mask;
+ pattern = otx2_flow_skip_void_and_any_items(pattern);
+ otx2_npc_dbg("Pattern->type = %d", pattern->type);
+ switch (pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ lflags = NPC_F_UDP_VXLAN;
+ info.def_mask = &rte_flow_item_vxlan_mask;
+ info.len = sizeof(struct rte_flow_item_vxlan);
+ lt = NPC_LT_LE_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTPC:
+ lflags = NPC_F_UDP_GTP_GTPC;
+ info.def_mask = &rte_flow_item_gtp_mask;
+ info.len = sizeof(struct rte_flow_item_gtp);
+ lt = NPC_LT_LE_GTPC;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ lflags = NPC_F_UDP_GTP_GTPU_G_PDU;
+ info.def_mask = &rte_flow_item_gtp_mask;
+ info.len = sizeof(struct rte_flow_item_gtp);
+ lt = NPC_LT_LE_GTPU;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ lflags = NPC_F_UDP_GENEVE;
+ info.def_mask = &rte_flow_item_geneve_mask;
+ info.len = sizeof(struct rte_flow_item_geneve);
+ lt = NPC_LT_LE_GENEVE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ lflags = NPC_F_UDP_VXLANGPE;
+ info.def_mask = &rte_flow_item_vxlan_gpe_mask;
+ info.len = sizeof(struct rte_flow_item_vxlan_gpe);
+ lt = NPC_LT_LE_VXLANGPE;
+ break;
+ default:
+ return 0;
+ }
+
+ pst->tunnel = 1;
+
+ otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
+ rc = otx2_flow_parse_item_basic(pattern, &info, pst->error);
+ if (rc != 0)
+ return rc;
+
+ return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
+}
+
+static int
+flow_parse_mpls_label_stack(struct otx2_parse_state *pst, int *flag)
+{
+ int nr_labels = 0;
+ const struct rte_flow_item *pattern = pst->pattern;
+ struct otx2_flow_item_info info;
+ int rc;
+ uint8_t flag_list[] = {0, NPC_F_MPLS_2_LABELS,
+ NPC_F_MPLS_3_LABELS, NPC_F_MPLS_4_LABELS};
+
+ /*
+ * pst->pattern points to first MPLS label. We only check
+ * that subsequent labels do not have anything to match.
+ */
+ info.def_mask = &rte_flow_item_mpls_mask;
+ info.hw_mask = NULL;
+ info.len = sizeof(struct rte_flow_item_mpls);
+ info.spec = NULL;
+ info.mask = NULL;
+ info.hw_hdr_len = 0;
+
+ while (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS) {
+ nr_labels++;
+
+ /* Basic validation of 2nd/3rd/4th mpls item */
+ if (nr_labels > 1) {
+ rc = otx2_flow_parse_item_basic(pattern, &info,
+ pst->error);
+ if (rc != 0)
+ return rc;
+ }
+ pst->last_pattern = pattern;
+ pattern++;
+ pattern = otx2_flow_skip_void_and_any_items(pattern);
+ }
+
+ if (nr_labels > 4) {
+ rte_flow_error_set(pst->error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pst->last_pattern,
+ "more than 4 mpls labels not supported");
+ return -rte_errno;
+ }
+
+ *flag = flag_list[nr_labels - 1];
+ return 0;
+}
+
+int
+otx2_flow_parse_mpls(struct otx2_parse_state *pst, int lid)
+{
+ /* Find number of MPLS labels */
+ struct rte_flow_item_mpls hw_mask;
+ struct otx2_flow_item_info info;
+ int lt, lflags;
+ int rc;
+
+ lflags = 0;
+
+ if (lid == NPC_LID_LC)
+ lt = NPC_LT_LC_MPLS;
+ else if (lid == NPC_LID_LD)
+ lt = NPC_LT_LD_TU_MPLS_IN_IP;
+ else
+ lt = NPC_LT_LE_TU_MPLS_IN_UDP;
+
+ /* Prepare for parsing the first item */
+ info.def_mask = &rte_flow_item_mpls_mask;
+ info.hw_mask = &hw_mask;
+ info.len = sizeof(struct rte_flow_item_mpls);
+ info.spec = NULL;
+ info.mask = NULL;
+ info.hw_hdr_len = 0;
+
+ otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
+ rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Parse for more labels.
+ * This sets lflags and pst->last_pattern correctly.
+ */
+ rc = flow_parse_mpls_label_stack(pst, &lflags);
+ if (rc != 0)
+ return rc;
+
+ pst->tunnel = 1;
+ pst->pattern = pst->last_pattern;
+
+ return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
+}
+
+/*
+ * ICMP, ICMP6, UDP, TCP, SCTP, VXLAN, GRE, NVGRE,
+ * GTP, GTPC, GTPU, ESP
+ *
+ * Note: UDP tunnel protocols are identified by flags.
+ * LPTR for these protocol still points to UDP
+ * header. Need flag based extraction to support
+ * this.
+ */
+int
+otx2_flow_parse_ld(struct otx2_parse_state *pst)
+{
+ char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
+ uint32_t gre_key_mask = 0xffffffff;
+ struct otx2_flow_item_info info;
+ int lid, lt, lflags;
+ int rc;
+
+ if (pst->tunnel) {
+ /* We have already parsed MPLS or IPv4/v6 followed
+ * by MPLS or IPv4/v6. Subsequent TCP/UDP etc
+ * would be parsed as tunneled versions. Skip
+ * this layer, except for tunneled MPLS. If LC is
+ * MPLS, we have anyway skipped all stacked MPLS
+ * labels.
+ */
+ if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
+ return otx2_flow_parse_mpls(pst, NPC_LID_LD);
+ return 0;
+ }
+ info.hw_mask = &hw_mask;
+ info.spec = NULL;
+ info.mask = NULL;
+ info.def_mask = NULL;
+ info.len = 0;
+ info.hw_hdr_len = 0;
+
+ lid = NPC_LID_LD;
+ lflags = 0;
+
+ otx2_npc_dbg("Pst->pattern->type = %d", pst->pattern->type);
+ switch (pst->pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_ICMP:
+ if (pst->lt[NPC_LID_LC] == NPC_LT_LC_IP6)
+ lt = NPC_LT_LD_ICMP6;
+ else
+ lt = NPC_LT_LD_ICMP;
+ info.def_mask = &rte_flow_item_icmp_mask;
+ info.len = sizeof(struct rte_flow_item_icmp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ lt = NPC_LT_LD_UDP;
+ info.def_mask = &rte_flow_item_udp_mask;
+ info.len = sizeof(struct rte_flow_item_udp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ lt = NPC_LT_LD_TCP;
+ info.def_mask = &rte_flow_item_tcp_mask;
+ info.len = sizeof(struct rte_flow_item_tcp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ lt = NPC_LT_LD_SCTP;
+ info.def_mask = &rte_flow_item_sctp_mask;
+ info.len = sizeof(struct rte_flow_item_sctp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ lt = NPC_LT_LD_ESP;
+ info.def_mask = &rte_flow_item_esp_mask;
+ info.len = sizeof(struct rte_flow_item_esp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ lt = NPC_LT_LD_GRE;
+ info.def_mask = &rte_flow_item_gre_mask;
+ info.len = sizeof(struct rte_flow_item_gre);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE_KEY:
+ lt = NPC_LT_LD_GRE;
+ info.def_mask = &gre_key_mask;
+ info.len = sizeof(gre_key_mask);
+ info.hw_hdr_len = 4;
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ lt = NPC_LT_LD_NVGRE;
+ lflags = NPC_F_GRE_NVGRE;
+ info.def_mask = &rte_flow_item_nvgre_mask;
+ info.len = sizeof(struct rte_flow_item_nvgre);
+ /* Further IP/Ethernet are parsed as tunneled */
+ pst->tunnel = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
+ rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
+ if (rc != 0)
+ return rc;
+
+ return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
+}
+
+static inline void
+flow_check_lc_ip_tunnel(struct otx2_parse_state *pst)
+{
+ const struct rte_flow_item *pattern = pst->pattern + 1;
+
+ pattern = otx2_flow_skip_void_and_any_items(pattern);
+ if (pattern->type == RTE_FLOW_ITEM_TYPE_MPLS ||
+ pattern->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ pattern->type == RTE_FLOW_ITEM_TYPE_IPV6)
+ pst->tunnel = 1;
+}
+
+/* Outer IPv4, Outer IPv6, MPLS, ARP */
+int
+otx2_flow_parse_lc(struct otx2_parse_state *pst)
+{
+ uint8_t hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
+ struct otx2_flow_item_info info;
+ int lid, lt;
+ int rc;
+
+ if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_MPLS)
+ return otx2_flow_parse_mpls(pst, NPC_LID_LC);
+
+ info.hw_mask = &hw_mask;
+ info.spec = NULL;
+ info.mask = NULL;
+ info.hw_hdr_len = 0;
+ lid = NPC_LID_LC;
+
+ switch (pst->pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ lt = NPC_LT_LC_IP;
+ info.def_mask = &rte_flow_item_ipv4_mask;
+ info.len = sizeof(struct rte_flow_item_ipv4);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ lid = NPC_LID_LC;
+ lt = NPC_LT_LC_IP6;
+ info.def_mask = &rte_flow_item_ipv6_mask;
+ info.len = sizeof(struct rte_flow_item_ipv6);
+ break;
+ case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
+ lt = NPC_LT_LC_ARP;
+ info.def_mask = &rte_flow_item_arp_eth_ipv4_mask;
+ info.len = sizeof(struct rte_flow_item_arp_eth_ipv4);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
+ lid = NPC_LID_LC;
+ lt = NPC_LT_LC_IP6_EXT;
+ info.def_mask = &rte_flow_item_ipv6_ext_mask;
+ info.len = sizeof(struct rte_flow_item_ipv6_ext);
+ info.hw_hdr_len = 40;
+ break;
+ default:
+ /* No match at this layer */
+ return 0;
+ }
+
+ /* Identify if IP tunnels MPLS or IPv4/v6 */
+ flow_check_lc_ip_tunnel(pst);
+
+ otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
+ rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
+ if (rc != 0)
+ return rc;
+
+ return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
+}
+
+/* VLAN, ETAG */
+int
+otx2_flow_parse_lb(struct otx2_parse_state *pst)
+{
+ const struct rte_flow_item *pattern = pst->pattern;
+ const struct rte_flow_item *last_pattern;
+ char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
+ struct otx2_flow_item_info info;
+ int lid, lt, lflags;
+ int nr_vlans = 0;
+ int rc;
+
+ info.spec = NULL;
+ info.mask = NULL;
+ info.hw_hdr_len = NPC_TPID_LENGTH;
+
+ lid = NPC_LID_LB;
+ lflags = 0;
+ last_pattern = pattern;
+
+ if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ /* RTE vlan is either 802.1q or 802.1ad,
+ * this maps to either CTAG/STAG. We need to decide
+ * based on number of VLANS present. Matching is
+ * supported on first tag only.
+ */
+ info.def_mask = &rte_flow_item_vlan_mask;
+ info.hw_mask = NULL;
+ info.len = sizeof(struct rte_flow_item_vlan);
+
+ pattern = pst->pattern;
+ while (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ nr_vlans++;
+
+ /* Basic validation of 2nd/3rd vlan item */
+ if (nr_vlans > 1) {
+ otx2_npc_dbg("Vlans = %d", nr_vlans);
+ rc = otx2_flow_parse_item_basic(pattern, &info,
+ pst->error);
+ if (rc != 0)
+ return rc;
+ }
+ last_pattern = pattern;
+ pattern++;
+ pattern = otx2_flow_skip_void_and_any_items(pattern);
+ }
+
+ switch (nr_vlans) {
+ case 1:
+ lt = NPC_LT_LB_CTAG;
+ break;
+ case 2:
+ lt = NPC_LT_LB_STAG_QINQ;
+ lflags = NPC_F_STAG_CTAG;
+ break;
+ case 3:
+ lt = NPC_LT_LB_STAG_QINQ;
+ lflags = NPC_F_STAG_STAG_CTAG;
+ break;
+ default:
+ rte_flow_error_set(pst->error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ last_pattern,
+ "more than 3 vlans not supported");
+ return -rte_errno;
+ }
+ } else if (pst->pattern->type == RTE_FLOW_ITEM_TYPE_E_TAG) {
+ /* we can support ETAG and match a subsequent CTAG
+ * without any matching support.
+ */
+ lt = NPC_LT_LB_ETAG;
+ lflags = 0;
+
+ last_pattern = pst->pattern;
+ pattern = otx2_flow_skip_void_and_any_items(pst->pattern + 1);
+ if (pattern->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ info.def_mask = &rte_flow_item_vlan_mask;
+ /* set supported mask to NULL for vlan tag */
+ info.hw_mask = NULL;
+ info.len = sizeof(struct rte_flow_item_vlan);
+ rc = otx2_flow_parse_item_basic(pattern, &info,
+ pst->error);
+ if (rc != 0)
+ return rc;
+
+ lflags = NPC_F_ETAG_CTAG;
+ last_pattern = pattern;
+ }
+
+ info.def_mask = &rte_flow_item_e_tag_mask;
+ info.len = sizeof(struct rte_flow_item_e_tag);
+ } else {
+ return 0;
+ }
+
+ info.hw_mask = &hw_mask;
+ info.spec = NULL;
+ info.mask = NULL;
+ otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
+
+ rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
+ if (rc != 0)
+ return rc;
+
+ /* Point pattern to last item consumed */
+ pst->pattern = last_pattern;
+ return otx2_flow_update_parse_state(pst, &info, lid, lt, lflags);
+}
+
+int
+otx2_flow_parse_la(struct otx2_parse_state *pst)
+{
+ struct rte_flow_item_eth hw_mask;
+ struct otx2_flow_item_info info;
+ int lid, lt;
+ int rc;
+
+ /* Identify the pattern type into lid, lt */
+ if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_ETH)
+ return 0;
+
+ lid = NPC_LID_LA;
+ lt = NPC_LT_LA_ETHER;
+ info.hw_hdr_len = 0;
+
+ if (pst->flow->nix_intf == NIX_INTF_TX) {
+ lt = NPC_LT_LA_IH_NIX_ETHER;
+ info.hw_hdr_len = NPC_IH_LENGTH;
+ if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
+ lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
+ info.hw_hdr_len += NPC_HIGIG2_LENGTH;
+ }
+ } else {
+ if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
+ lt = NPC_LT_LA_HIGIG2_ETHER;
+ info.hw_hdr_len = NPC_HIGIG2_LENGTH;
+ }
+ }
+
+ /* Prepare for parsing the item */
+ info.def_mask = &rte_flow_item_eth_mask;
+ info.hw_mask = &hw_mask;
+ info.len = sizeof(struct rte_flow_item_eth);
+ otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
+ info.spec = NULL;
+ info.mask = NULL;
+
+ /* Basic validation of item parameters */
+ rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
+ if (rc)
+ return rc;
+
+ /* Update pst if not validate only? clash check? */
+ return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
+}
+
+int
+otx2_flow_parse_higig2_hdr(struct otx2_parse_state *pst)
+{
+ struct rte_flow_item_higig2_hdr hw_mask;
+ struct otx2_flow_item_info info;
+ int lid, lt;
+ int rc;
+
+ /* Identify the pattern type into lid, lt */
+ if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_HIGIG2)
+ return 0;
+
+ lid = NPC_LID_LA;
+ lt = NPC_LT_LA_HIGIG2_ETHER;
+ info.hw_hdr_len = 0;
+
+ if (pst->flow->nix_intf == NIX_INTF_TX) {
+ lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
+ info.hw_hdr_len = NPC_IH_LENGTH;
+ }
+
+ /* Prepare for parsing the item */
+ info.def_mask = &rte_flow_item_higig2_hdr_mask;
+ info.hw_mask = &hw_mask;
+ info.len = sizeof(struct rte_flow_item_higig2_hdr);
+ otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
+ info.spec = NULL;
+ info.mask = NULL;
+
+ /* Basic validation of item parameters */
+ rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
+ if (rc)
+ return rc;
+
+ /* Update pst if not validate only? clash check? */
+ return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
+}
+
+static int
+parse_rss_action(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action *act,
+ struct rte_flow_error *error)
+{
+ struct otx2_eth_dev *hw = dev->data->dev_private;
+ struct otx2_rss_info *rss_info = &hw->rss_info;
+ const struct rte_flow_action_rss *rss;
+ uint32_t i;
+
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ /* Not supported */
+ if (attr->egress) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "No support of RSS in egress");
+ }
+
+ if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "multi-queue mode is disabled");
+
+ /* Parse RSS related parameters from configuration */
+ if (!rss || !rss->queue_num)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "no valid queues");
+
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions"
+ " are not supported");
+
+ if (rss->key_len && rss->key_len > RTE_DIM(rss_info->key))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key too large");
+
+ if (rss->queue_num > rss_info->rss_size)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+
+ for (i = 0; i < rss->queue_num; i++) {
+ if (rss->queue[i] >= dev->data->nb_rx_queues)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number"
+ " of queues");
+ }
+
+ return 0;
+}
+
+int
+otx2_flow_parse_actions(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow)
+{
+ struct otx2_eth_dev *hw = dev->data->dev_private;
+ struct otx2_npc_flow_info *npc = &hw->npc_flow;
+ const struct rte_flow_action_count *act_count;
+ const struct rte_flow_action_mark *act_mark;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_vf *vf_act;
+ const char *errmsg = NULL;
+ int sel_act, req_act = 0;
+ uint16_t pf_func, vf_id;
+ int errcode = 0;
+ int mark = 0;
+ int rq = 0;
+
+ /* Initialize actions */
+ flow->ctr_id = NPC_COUNTER_NONE;
+ pf_func = otx2_pfvf_func(hw->pf, hw->vf);
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ otx2_npc_dbg("Action type = %d", actions->type);
+
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ act_mark =
+ (const struct rte_flow_action_mark *)actions->conf;
+
+ /* We have only 16 bits. Use highest val for flag */
+ if (act_mark->id > (OTX2_FLOW_FLAG_VAL - 2)) {
+ errmsg = "mark value must be < 0xfffe";
+ errcode = ENOTSUP;
+ goto err_exit;
+ }
+ mark = act_mark->id + 1;
+ req_act |= OTX2_FLOW_ACT_MARK;
+ rte_atomic32_inc(&npc->mark_actions);
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ mark = OTX2_FLOW_FLAG_VAL;
+ req_act |= OTX2_FLOW_ACT_FLAG;
+ rte_atomic32_inc(&npc->mark_actions);
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ act_count =
+ (const struct rte_flow_action_count *)
+ actions->conf;
+
+ if (act_count->shared == 1) {
+ errmsg = "Shared Counters not supported";
+ errcode = ENOTSUP;
+ goto err_exit;
+ }
+ /* Indicates, need a counter */
+ flow->ctr_id = 1;
+ req_act |= OTX2_FLOW_ACT_COUNT;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ req_act |= OTX2_FLOW_ACT_DROP;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_PF:
+ req_act |= OTX2_FLOW_ACT_PF;
+ pf_func &= (0xfc00);
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_VF:
+ vf_act = (const struct rte_flow_action_vf *)
+ actions->conf;
+ req_act |= OTX2_FLOW_ACT_VF;
+ if (vf_act->original == 0) {
+ vf_id = vf_act->id & RVU_PFVF_FUNC_MASK;
+ if (vf_id >= hw->maxvf) {
+ errmsg = "invalid vf specified";
+ errcode = EINVAL;
+ goto err_exit;
+ }
+ pf_func &= (0xfc00);
+ pf_func = (pf_func | (vf_id + 1));
+ }
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ /* Applicable only to ingress flow */
+ act_q = (const struct rte_flow_action_queue *)
+ actions->conf;
+ rq = act_q->index;
+ if (rq >= dev->data->nb_rx_queues) {
+ errmsg = "invalid queue index";
+ errcode = EINVAL;
+ goto err_exit;
+ }
+ req_act |= OTX2_FLOW_ACT_QUEUE;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ errcode = parse_rss_action(dev, attr, actions, error);
+ if (errcode)
+ return -rte_errno;
+
+ req_act |= OTX2_FLOW_ACT_RSS;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_SECURITY:
+ /* Assumes user has already configured security
+ * session for this flow. Associated conf is
+ * opaque. When RTE security is implemented for otx2,
+ * we need to verify that for specified security
+ * session:
+ * action_type ==
+ * RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL &&
+ * session_protocol ==
+ * RTE_SECURITY_PROTOCOL_IPSEC
+ *
+ * RSS is not supported with inline ipsec. Get the
+ * rq from associated conf, or make
+ * RTE_FLOW_ACTION_TYPE_QUEUE compulsory with this
+ * action.
+ * Currently, rq = 0 is assumed.
+ */
+ req_act |= OTX2_FLOW_ACT_SEC;
+ rq = 0;
+ break;
+ default:
+ errmsg = "Unsupported action specified";
+ errcode = ENOTSUP;
+ goto err_exit;
+ }
+ }
+
+ /* Check if actions specified are compatible */
+ if (attr->egress) {
+ /* Only DROP/COUNT is supported */
+ if (!(req_act & OTX2_FLOW_ACT_DROP)) {
+ errmsg = "DROP is required action for egress";
+ errcode = EINVAL;
+ goto err_exit;
+ } else if (req_act & ~(OTX2_FLOW_ACT_DROP |
+ OTX2_FLOW_ACT_COUNT)) {
+ errmsg = "Unsupported action specified";
+ errcode = ENOTSUP;
+ goto err_exit;
+ }
+ flow->npc_action = NIX_TX_ACTIONOP_DROP;
+ goto set_pf_func;
+ }
+
+ /* We have already verified the attr, this is ingress.
+ * - Exactly one terminating action is supported
+ * - Exactly one of MARK or FLAG is supported
+ * - If terminating action is DROP, only count is valid.
+ */
+ sel_act = req_act & OTX2_FLOW_ACT_TERM;
+ if ((sel_act & (sel_act - 1)) != 0) {
+ errmsg = "Only one terminating action supported";
+ errcode = EINVAL;
+ goto err_exit;
+ }
+
+ if (req_act & OTX2_FLOW_ACT_DROP) {
+ sel_act = req_act & ~OTX2_FLOW_ACT_COUNT;
+ if ((sel_act & (sel_act - 1)) != 0) {
+ errmsg = "Only COUNT action is supported "
+ "with DROP ingress action";
+ errcode = ENOTSUP;
+ goto err_exit;
+ }
+ }
+
+ if ((req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK))
+ == (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
+ errmsg = "Only one of FLAG or MARK action is supported";
+ errcode = ENOTSUP;
+ goto err_exit;
+ }
+
+ /* Set NIX_RX_ACTIONOP */
+ if (req_act & (OTX2_FLOW_ACT_PF | OTX2_FLOW_ACT_VF)) {
+ flow->npc_action = NIX_RX_ACTIONOP_UCAST;
+ if (req_act & OTX2_FLOW_ACT_QUEUE)
+ flow->npc_action |= (uint64_t)rq << 20;
+ } else if (req_act & OTX2_FLOW_ACT_DROP) {
+ flow->npc_action = NIX_RX_ACTIONOP_DROP;
+ } else if (req_act & OTX2_FLOW_ACT_QUEUE) {
+ flow->npc_action = NIX_RX_ACTIONOP_UCAST;
+ flow->npc_action |= (uint64_t)rq << 20;
+ } else if (req_act & OTX2_FLOW_ACT_RSS) {
+ /* When user added a rule for rss, first we will add the
+ *rule in MCAM and then update the action, once if we have
+ *FLOW_KEY_ALG index. So, till we update the action with
+ *flow_key_alg index, set the action to drop.
+ */
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ flow->npc_action = NIX_RX_ACTIONOP_DROP;
+ else
+ flow->npc_action = NIX_RX_ACTIONOP_UCAST;
+ } else if (req_act & OTX2_FLOW_ACT_SEC) {
+ flow->npc_action = NIX_RX_ACTIONOP_UCAST_IPSEC;
+ flow->npc_action |= (uint64_t)rq << 20;
+ } else if (req_act & (OTX2_FLOW_ACT_FLAG | OTX2_FLOW_ACT_MARK)) {
+ flow->npc_action = NIX_RX_ACTIONOP_UCAST;
+ } else if (req_act & OTX2_FLOW_ACT_COUNT) {
+ /* Keep OTX2_FLOW_ACT_COUNT always at the end
+ * This is default action, when user specify only
+ * COUNT ACTION
+ */
+ flow->npc_action = NIX_RX_ACTIONOP_UCAST;
+ } else {
+ /* Should never reach here */
+ errmsg = "Invalid action specified";
+ errcode = EINVAL;
+ goto err_exit;
+ }
+
+ if (mark)
+ flow->npc_action |= (uint64_t)mark << 40;
+
+ if (rte_atomic32_read(&npc->mark_actions) == 1) {
+ hw->rx_offload_flags |=
+ NIX_RX_OFFLOAD_MARK_UPDATE_F;
+ otx2_eth_set_rx_function(dev);
+ }
+
+set_pf_func:
+ /* Ideally AF must ensure that correct pf_func is set */
+ flow->npc_action |= (uint64_t)pf_func << 4;
+
+ return 0;
+
+err_exit:
+ rte_flow_error_set(error, errcode,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+ errmsg);
+ return -rte_errno;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_utils.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_utils.c
new file mode 100644
index 000000000..14625c9ad
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_flow_utils.c
@@ -0,0 +1,959 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_ethdev.h"
+#include "otx2_flow.h"
+
+static int
+flow_mcam_alloc_counter(struct otx2_mbox *mbox, uint16_t *ctr)
+{
+ struct npc_mcam_alloc_counter_req *req;
+ struct npc_mcam_alloc_counter_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_counter(mbox);
+ req->count = 1;
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
+
+ *ctr = rsp->cntr_list[0];
+ return rc;
+}
+
+int
+otx2_flow_mcam_free_counter(struct otx2_mbox *mbox, uint16_t ctr_id)
+{
+ struct npc_mcam_oper_counter_req *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_free_counter(mbox);
+ req->cntr = ctr_id;
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, NULL);
+
+ return rc;
+}
+
+int
+otx2_flow_mcam_read_counter(struct otx2_mbox *mbox, uint32_t ctr_id,
+ uint64_t *count)
+{
+ struct npc_mcam_oper_counter_req *req;
+ struct npc_mcam_oper_counter_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_counter_stats(mbox);
+ req->cntr = ctr_id;
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
+
+ *count = rsp->stat;
+ return rc;
+}
+
+int
+otx2_flow_mcam_clear_counter(struct otx2_mbox *mbox, uint32_t ctr_id)
+{
+ struct npc_mcam_oper_counter_req *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_clear_counter(mbox);
+ req->cntr = ctr_id;
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, NULL);
+
+ return rc;
+}
+
+int
+otx2_flow_mcam_free_entry(struct otx2_mbox *mbox, uint32_t entry)
+{
+ struct npc_mcam_free_entry_req *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
+ req->entry = entry;
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, NULL);
+
+ return rc;
+}
+
+int
+otx2_flow_mcam_free_all_entries(struct otx2_mbox *mbox)
+{
+ struct npc_mcam_free_entry_req *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
+ req->all = 1;
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, NULL);
+
+ return rc;
+}
+
+static void
+flow_prep_mcam_ldata(uint8_t *ptr, const uint8_t *data, int len)
+{
+ int idx;
+
+ for (idx = 0; idx < len; idx++)
+ ptr[idx] = data[len - 1 - idx];
+}
+
+static int
+flow_check_copysz(size_t size, size_t len)
+{
+ if (len <= size)
+ return len;
+ return -1;
+}
+
+static inline int
+flow_mem_is_zero(const void *mem, int len)
+{
+ const char *m = mem;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ if (m[i] != 0)
+ return 0;
+ }
+ return 1;
+}
+
+static void
+flow_set_hw_mask(struct otx2_flow_item_info *info,
+ struct npc_xtract_info *xinfo,
+ char *hw_mask)
+{
+ int max_off, offset;
+ int j;
+
+ if (xinfo->enable == 0)
+ return;
+
+ if (xinfo->hdr_off < info->hw_hdr_len)
+ return;
+
+ max_off = xinfo->hdr_off + xinfo->len - info->hw_hdr_len;
+
+ if (max_off > info->len)
+ max_off = info->len;
+
+ offset = xinfo->hdr_off - info->hw_hdr_len;
+ for (j = offset; j < max_off; j++)
+ hw_mask[j] = 0xff;
+}
+
+void
+otx2_flow_get_hw_supp_mask(struct otx2_parse_state *pst,
+ struct otx2_flow_item_info *info, int lid, int lt)
+{
+ struct npc_xtract_info *xinfo, *lfinfo;
+ char *hw_mask = info->hw_mask;
+ int lf_cfg;
+ int i, j;
+ int intf;
+
+ intf = pst->flow->nix_intf;
+ xinfo = pst->npc->prx_dxcfg[intf][lid][lt].xtract;
+ memset(hw_mask, 0, info->len);
+
+ for (i = 0; i < NPC_MAX_LD; i++) {
+ flow_set_hw_mask(info, &xinfo[i], hw_mask);
+ }
+
+ for (i = 0; i < NPC_MAX_LD; i++) {
+
+ if (xinfo[i].flags_enable == 0)
+ continue;
+
+ lf_cfg = pst->npc->prx_lfcfg[i].i;
+ if (lf_cfg == lid) {
+ for (j = 0; j < NPC_MAX_LFL; j++) {
+ lfinfo = pst->npc->prx_fxcfg[intf]
+ [i][j].xtract;
+ flow_set_hw_mask(info, &lfinfo[0], hw_mask);
+ }
+ }
+ }
+}
+
+static int
+flow_update_extraction_data(struct otx2_parse_state *pst,
+ struct otx2_flow_item_info *info,
+ struct npc_xtract_info *xinfo)
+{
+ uint8_t int_info_mask[NPC_MAX_EXTRACT_DATA_LEN];
+ uint8_t int_info[NPC_MAX_EXTRACT_DATA_LEN];
+ struct npc_xtract_info *x;
+ int k, idx, hdr_off;
+ int len = 0;
+
+ x = xinfo;
+ len = x->len;
+ hdr_off = x->hdr_off;
+
+ if (hdr_off < info->hw_hdr_len)
+ return 0;
+
+ if (x->enable == 0)
+ return 0;
+
+ otx2_npc_dbg("x->hdr_off = %d, len = %d, info->len = %d,"
+ "x->key_off = %d", x->hdr_off, len, info->len,
+ x->key_off);
+
+ hdr_off -= info->hw_hdr_len;
+
+ if (hdr_off + len > info->len)
+ len = info->len - hdr_off;
+
+ /* Check for over-write of previous layer */
+ if (!flow_mem_is_zero(pst->mcam_mask + x->key_off,
+ len)) {
+ /* Cannot support this data match */
+ rte_flow_error_set(pst->error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pst->pattern,
+ "Extraction unsupported");
+ return -rte_errno;
+ }
+
+ len = flow_check_copysz((OTX2_MAX_MCAM_WIDTH_DWORDS * 8)
+ - x->key_off,
+ len);
+ if (len < 0) {
+ rte_flow_error_set(pst->error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pst->pattern,
+ "Internal Error");
+ return -rte_errno;
+ }
+
+ /* Need to reverse complete structure so that dest addr is at
+ * MSB so as to program the MCAM using mcam_data & mcam_mask
+ * arrays
+ */
+ flow_prep_mcam_ldata(int_info,
+ (const uint8_t *)info->spec + hdr_off,
+ x->len);
+ flow_prep_mcam_ldata(int_info_mask,
+ (const uint8_t *)info->mask + hdr_off,
+ x->len);
+
+ otx2_npc_dbg("Spec: ");
+ for (k = 0; k < info->len; k++)
+ otx2_npc_dbg("0x%.2x ",
+ ((const uint8_t *)info->spec)[k]);
+
+ otx2_npc_dbg("Int_info: ");
+ for (k = 0; k < info->len; k++)
+ otx2_npc_dbg("0x%.2x ", int_info[k]);
+
+ memcpy(pst->mcam_mask + x->key_off, int_info_mask, len);
+ memcpy(pst->mcam_data + x->key_off, int_info, len);
+
+ otx2_npc_dbg("Parse state mcam data & mask");
+ for (idx = 0; idx < len ; idx++)
+ otx2_npc_dbg("data[%d]: 0x%x, mask[%d]: 0x%x", idx,
+ *(pst->mcam_data + idx + x->key_off), idx,
+ *(pst->mcam_mask + idx + x->key_off));
+ return 0;
+}
+
+int
+otx2_flow_update_parse_state(struct otx2_parse_state *pst,
+ struct otx2_flow_item_info *info, int lid, int lt,
+ uint8_t flags)
+{
+ struct npc_lid_lt_xtract_info *xinfo;
+ struct npc_xtract_info *lfinfo;
+ int intf, lf_cfg;
+ int i, j, rc = 0;
+
+ otx2_npc_dbg("Parse state function info mask total %s",
+ (const uint8_t *)info->mask);
+
+ pst->layer_mask |= lid;
+ pst->lt[lid] = lt;
+ pst->flags[lid] = flags;
+
+ intf = pst->flow->nix_intf;
+ xinfo = &pst->npc->prx_dxcfg[intf][lid][lt];
+ otx2_npc_dbg("Is_terminating = %d", xinfo->is_terminating);
+ if (xinfo->is_terminating)
+ pst->terminate = 1;
+
+ if (info->spec == NULL) {
+ otx2_npc_dbg("Info spec NULL");
+ goto done;
+ }
+
+ for (i = 0; i < NPC_MAX_LD; i++) {
+ rc = flow_update_extraction_data(pst, info, &xinfo->xtract[i]);
+ if (rc != 0)
+ return rc;
+ }
+
+ for (i = 0; i < NPC_MAX_LD; i++) {
+ if (xinfo->xtract[i].flags_enable == 0)
+ continue;
+
+ lf_cfg = pst->npc->prx_lfcfg[i].i;
+ if (lf_cfg == lid) {
+ for (j = 0; j < NPC_MAX_LFL; j++) {
+ lfinfo = pst->npc->prx_fxcfg[intf]
+ [i][j].xtract;
+ rc = flow_update_extraction_data(pst, info,
+ &lfinfo[0]);
+ if (rc != 0)
+ return rc;
+
+ if (lfinfo[0].enable)
+ pst->flags[lid] = j;
+ }
+ }
+ }
+
+done:
+ /* Next pattern to parse by subsequent layers */
+ pst->pattern++;
+ return 0;
+}
+
+static inline int
+flow_range_is_valid(const char *spec, const char *last, const char *mask,
+ int len)
+{
+ /* Mask must be zero or equal to spec as we do not support
+ * non-contiguous ranges.
+ */
+ while (len--) {
+ if (last[len] &&
+ (spec[len] & mask[len]) != (last[len] & mask[len]))
+ return 0; /* False */
+ }
+ return 1;
+}
+
+
+static inline int
+flow_mask_is_supported(const char *mask, const char *hw_mask, int len)
+{
+ /*
+ * If no hw_mask, assume nothing is supported.
+ * mask is never NULL
+ */
+ if (hw_mask == NULL)
+ return flow_mem_is_zero(mask, len);
+
+ while (len--) {
+ if ((mask[len] | hw_mask[len]) != hw_mask[len])
+ return 0; /* False */
+ }
+ return 1;
+}
+
+int
+otx2_flow_parse_item_basic(const struct rte_flow_item *item,
+ struct otx2_flow_item_info *info,
+ struct rte_flow_error *error)
+{
+ /* Item must not be NULL */
+ if (item == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Item is NULL");
+ return -rte_errno;
+ }
+ /* If spec is NULL, both mask and last must be NULL, this
+ * makes it to match ANY value (eq to mask = 0).
+ * Setting either mask or last without spec is an error
+ */
+ if (item->spec == NULL) {
+ if (item->last == NULL && item->mask == NULL) {
+ info->spec = NULL;
+ return 0;
+ }
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "mask or last set without spec");
+ return -rte_errno;
+ }
+
+ /* We have valid spec */
+ info->spec = item->spec;
+
+ /* If mask is not set, use default mask, err if default mask is
+ * also NULL.
+ */
+ if (item->mask == NULL) {
+ otx2_npc_dbg("Item mask null, using default mask");
+ if (info->def_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "No mask or default mask given");
+ return -rte_errno;
+ }
+ info->mask = info->def_mask;
+ } else {
+ info->mask = item->mask;
+ }
+
+ /* mask specified must be subset of hw supported mask
+ * mask | hw_mask == hw_mask
+ */
+ if (!flow_mask_is_supported(info->mask, info->hw_mask, info->len)) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Unsupported field in the mask");
+ return -rte_errno;
+ }
+
+ /* Now we have spec and mask. OTX2 does not support non-contiguous
+ * range. We should have either:
+ * - spec & mask == last & mask or,
+ * - last == 0 or,
+ * - last == NULL
+ */
+ if (item->last != NULL && !flow_mem_is_zero(item->last, info->len)) {
+ if (!flow_range_is_valid(item->spec, item->last, info->mask,
+ info->len)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported range for match");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+void
+otx2_flow_keyx_compress(uint64_t *data, uint32_t nibble_mask)
+{
+ uint64_t cdata[2] = {0ULL, 0ULL}, nibble;
+ int i, j = 0;
+
+ for (i = 0; i < NPC_MAX_KEY_NIBBLES; i++) {
+ if (nibble_mask & (1 << i)) {
+ nibble = (data[i / 16] >> ((i & 0xf) * 4)) & 0xf;
+ cdata[j / 16] |= (nibble << ((j & 0xf) * 4));
+ j += 1;
+ }
+ }
+
+ data[0] = cdata[0];
+ data[1] = cdata[1];
+}
+
+static int
+flow_first_set_bit(uint64_t slab)
+{
+ int num = 0;
+
+ if ((slab & 0xffffffff) == 0) {
+ num += 32;
+ slab >>= 32;
+ }
+ if ((slab & 0xffff) == 0) {
+ num += 16;
+ slab >>= 16;
+ }
+ if ((slab & 0xff) == 0) {
+ num += 8;
+ slab >>= 8;
+ }
+ if ((slab & 0xf) == 0) {
+ num += 4;
+ slab >>= 4;
+ }
+ if ((slab & 0x3) == 0) {
+ num += 2;
+ slab >>= 2;
+ }
+ if ((slab & 0x1) == 0)
+ num += 1;
+
+ return num;
+}
+
+static int
+flow_shift_lv_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
+ struct otx2_npc_flow_info *flow_info,
+ uint32_t old_ent, uint32_t new_ent)
+{
+ struct npc_mcam_shift_entry_req *req;
+ struct npc_mcam_shift_entry_rsp *rsp;
+ struct otx2_flow_list *list;
+ struct rte_flow *flow_iter;
+ int rc = 0;
+
+ otx2_npc_dbg("Old ent:%u new ent:%u priority:%u", old_ent, new_ent,
+ flow->priority);
+
+ list = &flow_info->flow_list[flow->priority];
+
+ /* Old entry is disabled & it's contents are moved to new_entry,
+ * new entry is enabled finally.
+ */
+ req = otx2_mbox_alloc_msg_npc_mcam_shift_entry(mbox);
+ req->curr_entry[0] = old_ent;
+ req->new_entry[0] = new_ent;
+ req->shift_count = 1;
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ /* Remove old node from list */
+ TAILQ_FOREACH(flow_iter, list, next) {
+ if (flow_iter->mcam_id == old_ent)
+ TAILQ_REMOVE(list, flow_iter, next);
+ }
+
+ /* Insert node with new mcam id at right place */
+ TAILQ_FOREACH(flow_iter, list, next) {
+ if (flow_iter->mcam_id > new_ent)
+ TAILQ_INSERT_BEFORE(flow_iter, flow, next);
+ }
+ return rc;
+}
+
+/* Exchange all required entries with a given priority level */
+static int
+flow_shift_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
+ struct otx2_npc_flow_info *flow_info,
+ struct npc_mcam_alloc_entry_rsp *rsp, int dir, int prio_lvl)
+{
+ struct rte_bitmap *fr_bmp, *fr_bmp_rev, *lv_bmp, *lv_bmp_rev, *bmp;
+ uint32_t e_fr = 0, e_lv = 0, e, e_id = 0, mcam_entries;
+ uint64_t fr_bit_pos = 0, lv_bit_pos = 0, bit_pos = 0;
+ /* Bit position within the slab */
+ uint32_t sl_fr_bit_off = 0, sl_lv_bit_off = 0;
+ /* Overall bit position of the start of slab */
+ /* free & live entry index */
+ int rc_fr = 0, rc_lv = 0, rc = 0, idx = 0;
+ struct otx2_mcam_ents_info *ent_info;
+ /* free & live bitmap slab */
+ uint64_t sl_fr = 0, sl_lv = 0, *sl;
+
+ fr_bmp = flow_info->free_entries[prio_lvl];
+ fr_bmp_rev = flow_info->free_entries_rev[prio_lvl];
+ lv_bmp = flow_info->live_entries[prio_lvl];
+ lv_bmp_rev = flow_info->live_entries_rev[prio_lvl];
+ ent_info = &flow_info->flow_entry_info[prio_lvl];
+ mcam_entries = flow_info->mcam_entries;
+
+
+ /* New entries allocated are always contiguous, but older entries
+ * already in free/live bitmap can be non-contiguous: so return
+ * shifted entries should be in non-contiguous format.
+ */
+ while (idx <= rsp->count) {
+ if (!sl_fr && !sl_lv) {
+ /* Lower index elements to be exchanged */
+ if (dir < 0) {
+ rc_fr = rte_bitmap_scan(fr_bmp, &e_fr, &sl_fr);
+ rc_lv = rte_bitmap_scan(lv_bmp, &e_lv, &sl_lv);
+ otx2_npc_dbg("Fwd slab rc fr %u rc lv %u "
+ "e_fr %u e_lv %u", rc_fr, rc_lv,
+ e_fr, e_lv);
+ } else {
+ rc_fr = rte_bitmap_scan(fr_bmp_rev,
+ &sl_fr_bit_off,
+ &sl_fr);
+ rc_lv = rte_bitmap_scan(lv_bmp_rev,
+ &sl_lv_bit_off,
+ &sl_lv);
+
+ otx2_npc_dbg("Rev slab rc fr %u rc lv %u "
+ "e_fr %u e_lv %u", rc_fr, rc_lv,
+ e_fr, e_lv);
+ }
+ }
+
+ if (rc_fr) {
+ fr_bit_pos = flow_first_set_bit(sl_fr);
+ e_fr = sl_fr_bit_off + fr_bit_pos;
+ otx2_npc_dbg("Fr_bit_pos 0x%" PRIx64, fr_bit_pos);
+ } else {
+ e_fr = ~(0);
+ }
+
+ if (rc_lv) {
+ lv_bit_pos = flow_first_set_bit(sl_lv);
+ e_lv = sl_lv_bit_off + lv_bit_pos;
+ otx2_npc_dbg("Lv_bit_pos 0x%" PRIx64, lv_bit_pos);
+ } else {
+ e_lv = ~(0);
+ }
+
+ /* First entry is from free_bmap */
+ if (e_fr < e_lv) {
+ bmp = fr_bmp;
+ e = e_fr;
+ sl = &sl_fr;
+ bit_pos = fr_bit_pos;
+ if (dir > 0)
+ e_id = mcam_entries - e - 1;
+ else
+ e_id = e;
+ otx2_npc_dbg("Fr e %u e_id %u", e, e_id);
+ } else {
+ bmp = lv_bmp;
+ e = e_lv;
+ sl = &sl_lv;
+ bit_pos = lv_bit_pos;
+ if (dir > 0)
+ e_id = mcam_entries - e - 1;
+ else
+ e_id = e;
+
+ otx2_npc_dbg("Lv e %u e_id %u", e, e_id);
+ if (idx < rsp->count)
+ rc =
+ flow_shift_lv_ent(mbox, flow,
+ flow_info, e_id,
+ rsp->entry + idx);
+ }
+
+ rte_bitmap_clear(bmp, e);
+ rte_bitmap_set(bmp, rsp->entry + idx);
+ /* Update entry list, use non-contiguous
+ * list now.
+ */
+ rsp->entry_list[idx] = e_id;
+ *sl &= ~(1 << bit_pos);
+
+ /* Update min & max entry identifiers in current
+ * priority level.
+ */
+ if (dir < 0) {
+ ent_info->max_id = rsp->entry + idx;
+ ent_info->min_id = e_id;
+ } else {
+ ent_info->max_id = e_id;
+ ent_info->min_id = rsp->entry;
+ }
+
+ idx++;
+ }
+ return rc;
+}
+
+/* Validate if newly allocated entries lie in the correct priority zone
+ * since NPC_MCAM_LOWER_PRIO & NPC_MCAM_HIGHER_PRIO don't ensure zone accuracy.
+ * If not properly aligned, shift entries to do so
+ */
+static int
+flow_validate_and_shift_prio_ent(struct otx2_mbox *mbox, struct rte_flow *flow,
+ struct otx2_npc_flow_info *flow_info,
+ struct npc_mcam_alloc_entry_rsp *rsp,
+ int req_prio)
+{
+ int prio_idx = 0, rc = 0, needs_shift = 0, idx, prio = flow->priority;
+ struct otx2_mcam_ents_info *info = flow_info->flow_entry_info;
+ int dir = (req_prio == NPC_MCAM_HIGHER_PRIO) ? 1 : -1;
+ uint32_t tot_ent = 0;
+
+ otx2_npc_dbg("Dir %d, priority = %d", dir, prio);
+
+ if (dir < 0)
+ prio_idx = flow_info->flow_max_priority - 1;
+
+ /* Only live entries needs to be shifted, free entries can just be
+ * moved by bits manipulation.
+ */
+
+ /* For dir = -1(NPC_MCAM_LOWER_PRIO), when shifting,
+ * NPC_MAX_PREALLOC_ENT are exchanged with adjoining higher priority
+ * level entries(lower indexes).
+ *
+ * For dir = +1(NPC_MCAM_HIGHER_PRIO), during shift,
+ * NPC_MAX_PREALLOC_ENT are exchanged with adjoining lower priority
+ * level entries(higher indexes) with highest indexes.
+ */
+ do {
+ tot_ent = info[prio_idx].free_ent + info[prio_idx].live_ent;
+
+ if (dir < 0 && prio_idx != prio &&
+ rsp->entry > info[prio_idx].max_id && tot_ent) {
+ otx2_npc_dbg("Rsp entry %u prio idx %u "
+ "max id %u", rsp->entry, prio_idx,
+ info[prio_idx].max_id);
+
+ needs_shift = 1;
+ } else if ((dir > 0) && (prio_idx != prio) &&
+ (rsp->entry < info[prio_idx].min_id) && tot_ent) {
+ otx2_npc_dbg("Rsp entry %u prio idx %u "
+ "min id %u", rsp->entry, prio_idx,
+ info[prio_idx].min_id);
+ needs_shift = 1;
+ }
+
+ otx2_npc_dbg("Needs_shift = %d", needs_shift);
+ if (needs_shift) {
+ needs_shift = 0;
+ rc = flow_shift_ent(mbox, flow, flow_info, rsp, dir,
+ prio_idx);
+ } else {
+ for (idx = 0; idx < rsp->count; idx++)
+ rsp->entry_list[idx] = rsp->entry + idx;
+ }
+ } while ((prio_idx != prio) && (prio_idx += dir));
+
+ return rc;
+}
+
+static int
+flow_find_ref_entry(struct otx2_npc_flow_info *flow_info, int *prio,
+ int prio_lvl)
+{
+ struct otx2_mcam_ents_info *info = flow_info->flow_entry_info;
+ int step = 1;
+
+ while (step < flow_info->flow_max_priority) {
+ if (((prio_lvl + step) < flow_info->flow_max_priority) &&
+ info[prio_lvl + step].live_ent) {
+ *prio = NPC_MCAM_HIGHER_PRIO;
+ return info[prio_lvl + step].min_id;
+ }
+
+ if (((prio_lvl - step) >= 0) &&
+ info[prio_lvl - step].live_ent) {
+ otx2_npc_dbg("Prio_lvl %u live %u", prio_lvl - step,
+ info[prio_lvl - step].live_ent);
+ *prio = NPC_MCAM_LOWER_PRIO;
+ return info[prio_lvl - step].max_id;
+ }
+ step++;
+ }
+ *prio = NPC_MCAM_ANY_PRIO;
+ return 0;
+}
+
+static int
+flow_fill_entry_cache(struct otx2_mbox *mbox, struct rte_flow *flow,
+ struct otx2_npc_flow_info *flow_info, uint32_t *free_ent)
+{
+ struct rte_bitmap *free_bmp, *free_bmp_rev, *live_bmp, *live_bmp_rev;
+ struct npc_mcam_alloc_entry_rsp rsp_local;
+ struct npc_mcam_alloc_entry_rsp *rsp_cmd;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ struct otx2_mcam_ents_info *info;
+ uint16_t ref_ent, idx;
+ int rc, prio;
+
+ info = &flow_info->flow_entry_info[flow->priority];
+ free_bmp = flow_info->free_entries[flow->priority];
+ free_bmp_rev = flow_info->free_entries_rev[flow->priority];
+ live_bmp = flow_info->live_entries[flow->priority];
+ live_bmp_rev = flow_info->live_entries_rev[flow->priority];
+
+ ref_ent = flow_find_ref_entry(flow_info, &prio, flow->priority);
+
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
+ req->contig = 1;
+ req->count = flow_info->flow_prealloc_size;
+ req->priority = prio;
+ req->ref_entry = ref_ent;
+
+ otx2_npc_dbg("Fill cache ref entry %u prio %u", ref_ent, prio);
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp_cmd);
+ if (rc)
+ return rc;
+
+ rsp = &rsp_local;
+ memcpy(rsp, rsp_cmd, sizeof(*rsp));
+
+ otx2_npc_dbg("Alloc entry %u count %u , prio = %d", rsp->entry,
+ rsp->count, prio);
+
+ /* Non-first ent cache fill */
+ if (prio != NPC_MCAM_ANY_PRIO) {
+ flow_validate_and_shift_prio_ent(mbox, flow, flow_info, rsp,
+ prio);
+ } else {
+ /* Copy into response entry list */
+ for (idx = 0; idx < rsp->count; idx++)
+ rsp->entry_list[idx] = rsp->entry + idx;
+ }
+
+ otx2_npc_dbg("Fill entry cache rsp count %u", rsp->count);
+ /* Update free entries, reverse free entries list,
+ * min & max entry ids.
+ */
+ for (idx = 0; idx < rsp->count; idx++) {
+ if (unlikely(rsp->entry_list[idx] < info->min_id))
+ info->min_id = rsp->entry_list[idx];
+
+ if (unlikely(rsp->entry_list[idx] > info->max_id))
+ info->max_id = rsp->entry_list[idx];
+
+ /* Skip entry to be returned, not to be part of free
+ * list.
+ */
+ if (prio == NPC_MCAM_HIGHER_PRIO) {
+ if (unlikely(idx == (rsp->count - 1))) {
+ *free_ent = rsp->entry_list[idx];
+ continue;
+ }
+ } else {
+ if (unlikely(!idx)) {
+ *free_ent = rsp->entry_list[idx];
+ continue;
+ }
+ }
+ info->free_ent++;
+ rte_bitmap_set(free_bmp, rsp->entry_list[idx]);
+ rte_bitmap_set(free_bmp_rev, flow_info->mcam_entries -
+ rsp->entry_list[idx] - 1);
+
+ otx2_npc_dbg("Final rsp entry %u rsp entry rev %u",
+ rsp->entry_list[idx],
+ flow_info->mcam_entries - rsp->entry_list[idx] - 1);
+ }
+
+ otx2_npc_dbg("Cache free entry %u, rev = %u", *free_ent,
+ flow_info->mcam_entries - *free_ent - 1);
+ info->live_ent++;
+ rte_bitmap_set(live_bmp, *free_ent);
+ rte_bitmap_set(live_bmp_rev, flow_info->mcam_entries - *free_ent - 1);
+
+ return 0;
+}
+
+static int
+flow_check_preallocated_entry_cache(struct otx2_mbox *mbox,
+ struct rte_flow *flow,
+ struct otx2_npc_flow_info *flow_info)
+{
+ struct rte_bitmap *free, *free_rev, *live, *live_rev;
+ uint32_t pos = 0, free_ent = 0, mcam_entries;
+ struct otx2_mcam_ents_info *info;
+ uint64_t slab = 0;
+ int rc;
+
+ otx2_npc_dbg("Flow priority %u", flow->priority);
+
+ info = &flow_info->flow_entry_info[flow->priority];
+
+ free_rev = flow_info->free_entries_rev[flow->priority];
+ free = flow_info->free_entries[flow->priority];
+ live_rev = flow_info->live_entries_rev[flow->priority];
+ live = flow_info->live_entries[flow->priority];
+ mcam_entries = flow_info->mcam_entries;
+
+ if (info->free_ent) {
+ rc = rte_bitmap_scan(free, &pos, &slab);
+ if (rc) {
+ /* Get free_ent from free entry bitmap */
+ free_ent = pos + __builtin_ctzll(slab);
+ otx2_npc_dbg("Allocated from cache entry %u", free_ent);
+ /* Remove from free bitmaps and add to live ones */
+ rte_bitmap_clear(free, free_ent);
+ rte_bitmap_set(live, free_ent);
+ rte_bitmap_clear(free_rev,
+ mcam_entries - free_ent - 1);
+ rte_bitmap_set(live_rev,
+ mcam_entries - free_ent - 1);
+
+ info->free_ent--;
+ info->live_ent++;
+ return free_ent;
+ }
+
+ otx2_npc_dbg("No free entry:its a mess");
+ return -1;
+ }
+
+ rc = flow_fill_entry_cache(mbox, flow, flow_info, &free_ent);
+ if (rc)
+ return rc;
+
+ return free_ent;
+}
+
+int
+otx2_flow_mcam_alloc_and_write(struct rte_flow *flow, struct otx2_mbox *mbox,
+ __rte_unused struct otx2_parse_state *pst,
+ struct otx2_npc_flow_info *flow_info)
+{
+ int use_ctr = (flow->ctr_id == NPC_COUNTER_NONE ? 0 : 1);
+ struct npc_mcam_write_entry_req *req;
+ struct mbox_msghdr *rsp;
+ uint16_t ctr = ~(0);
+ int rc, idx;
+ int entry;
+
+ if (use_ctr) {
+ rc = flow_mcam_alloc_counter(mbox, &ctr);
+ if (rc)
+ return rc;
+ }
+
+ entry = flow_check_preallocated_entry_cache(mbox, flow, flow_info);
+ if (entry < 0) {
+ otx2_err("Prealloc failed");
+ otx2_flow_mcam_free_counter(mbox, ctr);
+ return NPC_MCAM_ALLOC_FAILED;
+ }
+ req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox);
+ req->set_cntr = use_ctr;
+ req->cntr = ctr;
+ req->entry = entry;
+ otx2_npc_dbg("Alloc & write entry %u", entry);
+
+ req->intf =
+ (flow->nix_intf == OTX2_INTF_RX) ? NPC_MCAM_RX : NPC_MCAM_TX;
+ req->enable_entry = 1;
+ req->entry_data.action = flow->npc_action;
+
+ /*
+ * DPDK sets vtag action on per interface basis, not
+ * per flow basis. It is a matter of how we decide to support
+ * this pmd specific behavior. There are two ways:
+ * 1. Inherit the vtag action from the one configured
+ * for this interface. This can be read from the
+ * vtag_action configured for default mcam entry of
+ * this pf_func.
+ * 2. Do not support vtag action with rte_flow.
+ *
+ * Second approach is used now.
+ */
+ req->entry_data.vtag_action = 0ULL;
+
+ for (idx = 0; idx < OTX2_MAX_MCAM_WIDTH_DWORDS; idx++) {
+ req->entry_data.kw[idx] = flow->mcam_data[idx];
+ req->entry_data.kw_mask[idx] = flow->mcam_mask[idx];
+ }
+
+ if (flow->nix_intf == OTX2_INTF_RX) {
+ req->entry_data.kw[0] |= flow_info->channel;
+ req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1);
+ } else {
+ uint16_t pf_func = (flow->npc_action >> 4) & 0xffff;
+
+ pf_func = htons(pf_func);
+ req->entry_data.kw[0] |= ((uint64_t)pf_func << 32);
+ req->entry_data.kw_mask[0] |= ((uint64_t)0xffff << 32);
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
+ if (rc != 0)
+ return rc;
+
+ flow->mcam_id = entry;
+ if (use_ctr)
+ flow->ctr_id = ctr;
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_link.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_link.c
new file mode 100644
index 000000000..12bf6c323
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_link.c
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_common.h>
+#include <rte_ethdev_pci.h>
+
+#include "otx2_ethdev.h"
+
+void
+otx2_nix_toggle_flag_link_cfg(struct otx2_eth_dev *dev, bool set)
+{
+ if (set)
+ dev->flags |= OTX2_LINK_CFG_IN_PROGRESS_F;
+ else
+ dev->flags &= ~OTX2_LINK_CFG_IN_PROGRESS_F;
+
+ rte_wmb();
+}
+
+static inline int
+nix_wait_for_link_cfg(struct otx2_eth_dev *dev)
+{
+ uint16_t wait = 1000;
+
+ do {
+ rte_rmb();
+ if (!(dev->flags & OTX2_LINK_CFG_IN_PROGRESS_F))
+ break;
+ wait--;
+ rte_delay_ms(1);
+ } while (wait);
+
+ return wait ? 0 : -1;
+}
+
+static void
+nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
+{
+ if (link && link->link_status)
+ otx2_info("Port %d: Link Up - speed %u Mbps - %s",
+ (int)(eth_dev->data->port_id),
+ (uint32_t)link->link_speed,
+ link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ else
+ otx2_info("Port %d: Link Down", (int)(eth_dev->data->port_id));
+}
+
+void
+otx2_eth_dev_link_status_update(struct otx2_dev *dev,
+ struct cgx_link_user_info *link)
+{
+ struct otx2_eth_dev *otx2_dev = (struct otx2_eth_dev *)dev;
+ struct rte_eth_link eth_link;
+ struct rte_eth_dev *eth_dev;
+
+ if (!link || !dev)
+ return;
+
+ eth_dev = otx2_dev->eth_dev;
+ if (!eth_dev || !eth_dev->data->dev_conf.intr_conf.lsc)
+ return;
+
+ if (nix_wait_for_link_cfg(otx2_dev)) {
+ otx2_err("Timeout waiting for link_cfg to complete");
+ return;
+ }
+
+ eth_link.link_status = link->link_up;
+ eth_link.link_speed = link->speed;
+ eth_link.link_autoneg = ETH_LINK_AUTONEG;
+ eth_link.link_duplex = link->full_duplex;
+
+ otx2_dev->speed = link->speed;
+ otx2_dev->duplex = link->full_duplex;
+
+ /* Print link info */
+ nix_link_status_print(eth_dev, &eth_link);
+
+ /* Update link info */
+ rte_eth_linkstatus_set(eth_dev, &eth_link);
+
+ /* Set the flag and execute application callbacks */
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
+static int
+lbk_link_update(struct rte_eth_link *link)
+{
+ link->link_status = ETH_LINK_UP;
+ link->link_speed = ETH_SPEED_NUM_100G;
+ link->link_autoneg = ETH_LINK_FIXED;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ return 0;
+}
+
+static int
+cgx_link_update(struct otx2_eth_dev *dev, struct rte_eth_link *link)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct cgx_link_info_msg *rsp;
+ int rc;
+ otx2_mbox_alloc_msg_cgx_get_linkinfo(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ link->link_status = rsp->link_info.link_up;
+ link->link_speed = rsp->link_info.speed;
+ link->link_autoneg = ETH_LINK_AUTONEG;
+
+ if (rsp->link_info.full_duplex)
+ link->link_duplex = rsp->link_info.full_duplex;
+ return 0;
+}
+
+int
+otx2_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_eth_link link;
+ int rc;
+
+ RTE_SET_USED(wait_to_complete);
+ memset(&link, 0, sizeof(struct rte_eth_link));
+
+ if (otx2_dev_is_sdp(dev))
+ return 0;
+
+ if (otx2_dev_is_lbk(dev))
+ rc = lbk_link_update(&link);
+ else
+ rc = cgx_link_update(dev, &link);
+
+ if (rc)
+ return rc;
+
+ return rte_eth_linkstatus_set(eth_dev, &link);
+}
+
+static int
+nix_dev_set_link_state(struct rte_eth_dev *eth_dev, uint8_t enable)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct cgx_set_link_state_msg *req;
+
+ req = otx2_mbox_alloc_msg_cgx_set_link_state(mbox);
+ req->enable = enable;
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_nix_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc, i;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return -ENOTSUP;
+
+ rc = nix_dev_set_link_state(eth_dev, 1);
+ if (rc)
+ goto done;
+
+ /* Start tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ otx2_nix_tx_queue_start(eth_dev, i);
+
+done:
+ return rc;
+}
+
+int
+otx2_nix_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int i;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return -ENOTSUP;
+
+ /* Stop tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ otx2_nix_tx_queue_stop(eth_dev, i);
+
+ return nix_dev_set_link_state(eth_dev, 0);
+}
+
+static int
+cgx_change_mode(struct otx2_eth_dev *dev, struct cgx_set_link_mode_args *cfg)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct cgx_set_link_mode_req *req;
+
+ req = otx2_mbox_alloc_msg_cgx_set_link_mode(mbox);
+ req->args.speed = cfg->speed;
+ req->args.duplex = cfg->duplex;
+ req->args.an = cfg->an;
+
+ return otx2_mbox_process(mbox);
+}
+
+#define SPEED_NONE 0
+static inline uint32_t
+nix_parse_link_speeds(struct otx2_eth_dev *dev, uint32_t link_speeds)
+{
+ uint32_t link_speed = SPEED_NONE;
+
+ /* 50G and 100G to be supported for board version C0 and above */
+ if (!otx2_dev_is_Ax(dev)) {
+ if (link_speeds & ETH_LINK_SPEED_100G)
+ link_speed = 100000;
+ if (link_speeds & ETH_LINK_SPEED_50G)
+ link_speed = 50000;
+ }
+ if (link_speeds & ETH_LINK_SPEED_40G)
+ link_speed = 40000;
+ if (link_speeds & ETH_LINK_SPEED_25G)
+ link_speed = 25000;
+ if (link_speeds & ETH_LINK_SPEED_20G)
+ link_speed = 20000;
+ if (link_speeds & ETH_LINK_SPEED_10G)
+ link_speed = 10000;
+ if (link_speeds & ETH_LINK_SPEED_5G)
+ link_speed = 5000;
+ if (link_speeds & ETH_LINK_SPEED_1G)
+ link_speed = 1000;
+
+ return link_speed;
+}
+
+static inline uint8_t
+nix_parse_eth_link_duplex(uint32_t link_speeds)
+{
+ if ((link_speeds & ETH_LINK_SPEED_10M_HD) ||
+ (link_speeds & ETH_LINK_SPEED_100M_HD))
+ return ETH_LINK_HALF_DUPLEX;
+ else
+ return ETH_LINK_FULL_DUPLEX;
+}
+
+int
+otx2_apply_link_speed(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_eth_conf *conf = &eth_dev->data->dev_conf;
+ struct cgx_set_link_mode_args cfg;
+
+ /* If VF/SDP/LBK, link attributes cannot be changed */
+ if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
+ return 0;
+
+ memset(&cfg, 0, sizeof(struct cgx_set_link_mode_args));
+ cfg.speed = nix_parse_link_speeds(dev, conf->link_speeds);
+ if (cfg.speed != SPEED_NONE && cfg.speed != dev->speed) {
+ cfg.duplex = nix_parse_eth_link_duplex(conf->link_speeds);
+ cfg.an = (conf->link_speeds & ETH_LINK_SPEED_FIXED) == 0;
+
+ return cgx_change_mode(dev, &cfg);
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_lookup.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_lookup.c
new file mode 100644
index 000000000..10944bc17
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_lookup.c
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_common.h>
+#include <rte_memzone.h>
+
+#include "otx2_common.h"
+#include "otx2_ethdev.h"
+
+/* NIX_RX_PARSE_S's ERRCODE + ERRLEV (12 bits) */
+#define ERRCODE_ERRLEN_WIDTH 12
+#define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) *\
+ sizeof(uint32_t))
+
+#define SA_TBL_SZ (RTE_MAX_ETHPORTS * sizeof(uint64_t))
+#define LOOKUP_ARRAY_SZ (PTYPE_ARRAY_SZ + ERR_ARRAY_SZ +\
+ SA_TBL_SZ)
+
+const uint32_t *
+otx2_nix_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+{
+ RTE_SET_USED(eth_dev);
+
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER_QINQ, /* LB */
+ RTE_PTYPE_L2_ETHER_VLAN, /* LB */
+ RTE_PTYPE_L2_ETHER_TIMESYNC, /* LB */
+ RTE_PTYPE_L2_ETHER_ARP, /* LC */
+ RTE_PTYPE_L2_ETHER_NSH, /* LC */
+ RTE_PTYPE_L2_ETHER_FCOE, /* LC */
+ RTE_PTYPE_L2_ETHER_MPLS, /* LC */
+ RTE_PTYPE_L3_IPV4, /* LC */
+ RTE_PTYPE_L3_IPV4_EXT, /* LC */
+ RTE_PTYPE_L3_IPV6, /* LC */
+ RTE_PTYPE_L3_IPV6_EXT, /* LC */
+ RTE_PTYPE_L4_TCP, /* LD */
+ RTE_PTYPE_L4_UDP, /* LD */
+ RTE_PTYPE_L4_SCTP, /* LD */
+ RTE_PTYPE_L4_ICMP, /* LD */
+ RTE_PTYPE_L4_IGMP, /* LD */
+ RTE_PTYPE_TUNNEL_GRE, /* LD */
+ RTE_PTYPE_TUNNEL_ESP, /* LD */
+ RTE_PTYPE_TUNNEL_NVGRE, /* LD */
+ RTE_PTYPE_TUNNEL_VXLAN, /* LE */
+ RTE_PTYPE_TUNNEL_GENEVE, /* LE */
+ RTE_PTYPE_TUNNEL_GTPC, /* LE */
+ RTE_PTYPE_TUNNEL_GTPU, /* LE */
+ RTE_PTYPE_TUNNEL_VXLAN_GPE, /* LE */
+ RTE_PTYPE_TUNNEL_MPLS_IN_GRE, /* LE */
+ RTE_PTYPE_TUNNEL_MPLS_IN_UDP, /* LE */
+ RTE_PTYPE_INNER_L2_ETHER,/* LF */
+ RTE_PTYPE_INNER_L3_IPV4, /* LG */
+ RTE_PTYPE_INNER_L3_IPV6, /* LG */
+ RTE_PTYPE_INNER_L4_TCP, /* LH */
+ RTE_PTYPE_INNER_L4_UDP, /* LH */
+ RTE_PTYPE_INNER_L4_SCTP, /* LH */
+ RTE_PTYPE_INNER_L4_ICMP, /* LH */
+ RTE_PTYPE_UNKNOWN,
+ };
+
+ return ptypes;
+}
+
+int
+otx2_nix_ptypes_set(struct rte_eth_dev *eth_dev, uint32_t ptype_mask)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ if (ptype_mask) {
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_PTYPE_F;
+ dev->ptype_disable = 0;
+ } else {
+ dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_PTYPE_F;
+ dev->ptype_disable = 1;
+ }
+
+ otx2_eth_set_rx_function(eth_dev);
+
+ return 0;
+}
+
+/*
+ * +------------------ +------------------ +
+ * | | IL4 | IL3| IL2 | TU | L4 | L3 | L2 |
+ * +-------------------+-------------------+
+ *
+ * +-------------------+------------------ +
+ * | | LH | LG | LF | LE | LD | LC | LB |
+ * +-------------------+-------------------+
+ *
+ * ptype [LE - LD - LC - LB] = TU - L4 - L3 - T2
+ * ptype_tunnel[LH - LG - LF] = IL4 - IL3 - IL2 - TU
+ *
+ */
+static void
+nix_create_non_tunnel_ptype_array(uint16_t *ptype)
+{
+ uint8_t lb, lc, ld, le;
+ uint16_t val;
+ uint32_t idx;
+
+ for (idx = 0; idx < PTYPE_NON_TUNNEL_ARRAY_SZ; idx++) {
+ lb = idx & 0xF;
+ lc = (idx & 0xF0) >> 4;
+ ld = (idx & 0xF00) >> 8;
+ le = (idx & 0xF000) >> 12;
+ val = RTE_PTYPE_UNKNOWN;
+
+ switch (lb) {
+ case NPC_LT_LB_STAG_QINQ:
+ val |= RTE_PTYPE_L2_ETHER_QINQ;
+ break;
+ case NPC_LT_LB_CTAG:
+ val |= RTE_PTYPE_L2_ETHER_VLAN;
+ break;
+ }
+
+ switch (lc) {
+ case NPC_LT_LC_ARP:
+ val |= RTE_PTYPE_L2_ETHER_ARP;
+ break;
+ case NPC_LT_LC_NSH:
+ val |= RTE_PTYPE_L2_ETHER_NSH;
+ break;
+ case NPC_LT_LC_FCOE:
+ val |= RTE_PTYPE_L2_ETHER_FCOE;
+ break;
+ case NPC_LT_LC_MPLS:
+ val |= RTE_PTYPE_L2_ETHER_MPLS;
+ break;
+ case NPC_LT_LC_IP:
+ val |= RTE_PTYPE_L3_IPV4;
+ break;
+ case NPC_LT_LC_IP_OPT:
+ val |= RTE_PTYPE_L3_IPV4_EXT;
+ break;
+ case NPC_LT_LC_IP6:
+ val |= RTE_PTYPE_L3_IPV6;
+ break;
+ case NPC_LT_LC_IP6_EXT:
+ val |= RTE_PTYPE_L3_IPV6_EXT;
+ break;
+ case NPC_LT_LC_PTP:
+ val |= RTE_PTYPE_L2_ETHER_TIMESYNC;
+ break;
+ }
+
+ switch (ld) {
+ case NPC_LT_LD_TCP:
+ val |= RTE_PTYPE_L4_TCP;
+ break;
+ case NPC_LT_LD_UDP:
+ val |= RTE_PTYPE_L4_UDP;
+ break;
+ case NPC_LT_LD_SCTP:
+ val |= RTE_PTYPE_L4_SCTP;
+ break;
+ case NPC_LT_LD_ICMP:
+ case NPC_LT_LD_ICMP6:
+ val |= RTE_PTYPE_L4_ICMP;
+ break;
+ case NPC_LT_LD_IGMP:
+ val |= RTE_PTYPE_L4_IGMP;
+ break;
+ case NPC_LT_LD_GRE:
+ val |= RTE_PTYPE_TUNNEL_GRE;
+ break;
+ case NPC_LT_LD_NVGRE:
+ val |= RTE_PTYPE_TUNNEL_NVGRE;
+ break;
+ case NPC_LT_LD_ESP:
+ val |= RTE_PTYPE_TUNNEL_ESP;
+ break;
+ }
+
+ switch (le) {
+ case NPC_LT_LE_VXLAN:
+ val |= RTE_PTYPE_TUNNEL_VXLAN;
+ break;
+ case NPC_LT_LE_VXLANGPE:
+ val |= RTE_PTYPE_TUNNEL_VXLAN_GPE;
+ break;
+ case NPC_LT_LE_GENEVE:
+ val |= RTE_PTYPE_TUNNEL_GENEVE;
+ break;
+ case NPC_LT_LE_GTPC:
+ val |= RTE_PTYPE_TUNNEL_GTPC;
+ break;
+ case NPC_LT_LE_GTPU:
+ val |= RTE_PTYPE_TUNNEL_GTPU;
+ break;
+ case NPC_LT_LE_TU_MPLS_IN_GRE:
+ val |= RTE_PTYPE_TUNNEL_MPLS_IN_GRE;
+ break;
+ case NPC_LT_LE_TU_MPLS_IN_UDP:
+ val |= RTE_PTYPE_TUNNEL_MPLS_IN_UDP;
+ break;
+ }
+ ptype[idx] = val;
+ }
+}
+
+#define TU_SHIFT(x) ((x) >> PTYPE_NON_TUNNEL_WIDTH)
+static void
+nix_create_tunnel_ptype_array(uint16_t *ptype)
+{
+ uint8_t lf, lg, lh;
+ uint16_t val;
+ uint32_t idx;
+
+ /* Skip non tunnel ptype array memory */
+ ptype = ptype + PTYPE_NON_TUNNEL_ARRAY_SZ;
+
+ for (idx = 0; idx < PTYPE_TUNNEL_ARRAY_SZ; idx++) {
+ lf = idx & 0xF;
+ lg = (idx & 0xF0) >> 4;
+ lh = (idx & 0xF00) >> 8;
+ val = RTE_PTYPE_UNKNOWN;
+
+ switch (lf) {
+ case NPC_LT_LF_TU_ETHER:
+ val |= TU_SHIFT(RTE_PTYPE_INNER_L2_ETHER);
+ break;
+ }
+ switch (lg) {
+ case NPC_LT_LG_TU_IP:
+ val |= TU_SHIFT(RTE_PTYPE_INNER_L3_IPV4);
+ break;
+ case NPC_LT_LG_TU_IP6:
+ val |= TU_SHIFT(RTE_PTYPE_INNER_L3_IPV6);
+ break;
+ }
+ switch (lh) {
+ case NPC_LT_LH_TU_TCP:
+ val |= TU_SHIFT(RTE_PTYPE_INNER_L4_TCP);
+ break;
+ case NPC_LT_LH_TU_UDP:
+ val |= TU_SHIFT(RTE_PTYPE_INNER_L4_UDP);
+ break;
+ case NPC_LT_LH_TU_SCTP:
+ val |= TU_SHIFT(RTE_PTYPE_INNER_L4_SCTP);
+ break;
+ case NPC_LT_LH_TU_ICMP:
+ case NPC_LT_LH_TU_ICMP6:
+ val |= TU_SHIFT(RTE_PTYPE_INNER_L4_ICMP);
+ break;
+ }
+
+ ptype[idx] = val;
+ }
+}
+
+static void
+nix_create_rx_ol_flags_array(void *mem)
+{
+ uint16_t idx, errcode, errlev;
+ uint32_t val, *ol_flags;
+
+ /* Skip ptype array memory */
+ ol_flags = (uint32_t *)((uint8_t *)mem + PTYPE_ARRAY_SZ);
+
+ for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) {
+ errlev = idx & 0xf;
+ errcode = (idx & 0xff0) >> 4;
+
+ val = PKT_RX_IP_CKSUM_UNKNOWN;
+ val |= PKT_RX_L4_CKSUM_UNKNOWN;
+ val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
+
+ switch (errlev) {
+ case NPC_ERRLEV_RE:
+ /* Mark all errors as BAD checksum errors
+ * including Outer L2 length mismatch error
+ */
+ if (errcode) {
+ val |= PKT_RX_IP_CKSUM_BAD;
+ val |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ break;
+ case NPC_ERRLEV_LC:
+ if (errcode == NPC_EC_OIP4_CSUM ||
+ errcode == NPC_EC_IP_FRAG_OFFSET_1) {
+ val |= PKT_RX_IP_CKSUM_BAD;
+ val |= PKT_RX_EIP_CKSUM_BAD;
+ } else {
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ }
+ break;
+ case NPC_ERRLEV_LG:
+ if (errcode == NPC_EC_IIP4_CSUM)
+ val |= PKT_RX_IP_CKSUM_BAD;
+ else
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ break;
+ case NPC_ERRLEV_NIX:
+ if (errcode == NIX_RX_PERRCODE_OL4_CHK ||
+ errcode == NIX_RX_PERRCODE_OL4_LEN ||
+ errcode == NIX_RX_PERRCODE_OL4_PORT) {
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= PKT_RX_L4_CKSUM_BAD;
+ val |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ } else if (errcode == NIX_RX_PERRCODE_IL4_CHK ||
+ errcode == NIX_RX_PERRCODE_IL4_LEN ||
+ errcode == NIX_RX_PERRCODE_IL4_PORT) {
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= PKT_RX_L4_CKSUM_BAD;
+ } else if (errcode == NIX_RX_PERRCODE_IL3_LEN ||
+ errcode == NIX_RX_PERRCODE_OL3_LEN) {
+ val |= PKT_RX_IP_CKSUM_BAD;
+ } else {
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ break;
+ }
+ ol_flags[idx] = val;
+ }
+}
+
+void *
+otx2_nix_fastpath_lookup_mem_get(void)
+{
+ const char name[] = OTX2_NIX_FASTPATH_LOOKUP_MEM;
+ const struct rte_memzone *mz;
+ void *mem;
+
+ /* SA_TBL starts after PTYPE_ARRAY & ERR_ARRAY */
+ RTE_BUILD_BUG_ON(OTX2_NIX_SA_TBL_START != (PTYPE_ARRAY_SZ +
+ ERR_ARRAY_SZ));
+
+ mz = rte_memzone_lookup(name);
+ if (mz != NULL)
+ return mz->addr;
+
+ /* Request for the first time */
+ mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ,
+ SOCKET_ID_ANY, 0, OTX2_ALIGN);
+ if (mz != NULL) {
+ mem = mz->addr;
+ /* Form the ptype array lookup memory */
+ nix_create_non_tunnel_ptype_array(mem);
+ nix_create_tunnel_ptype_array(mem);
+ /* Form the rx ol_flags based on errcode */
+ nix_create_rx_ol_flags_array(mem);
+ return mem;
+ }
+ return NULL;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_mac.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_mac.c
new file mode 100644
index 000000000..262d185e5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_mac.c
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_common.h>
+
+#include "otx2_dev.h"
+#include "otx2_ethdev.h"
+
+int
+otx2_cgx_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct cgx_mac_addr_set_or_get *req;
+ struct otx2_mbox *mbox = dev->mbox;
+ int rc;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return -ENOTSUP;
+
+ if (otx2_dev_active_vfs(dev))
+ return -ENOTSUP;
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_set(mbox);
+ otx2_mbox_memcpy(req->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ otx2_err("Failed to set mac address in CGX, rc=%d", rc);
+
+ return 0;
+}
+
+int
+otx2_cgx_mac_max_entries_get(struct otx2_eth_dev *dev)
+{
+ struct cgx_max_dmac_entries_get_rsp *rsp;
+ struct otx2_mbox *mbox = dev->mbox;
+ int rc;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_mac_max_entries_get(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ return rsp->max_dmac_filters;
+}
+
+int
+otx2_nix_mac_addr_add(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr,
+ uint32_t index __rte_unused, uint32_t pool __rte_unused)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct cgx_mac_addr_add_req *req;
+ struct cgx_mac_addr_add_rsp *rsp;
+ int rc;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return -ENOTSUP;
+
+ if (otx2_dev_active_vfs(dev))
+ return -ENOTSUP;
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_add(mbox);
+ otx2_mbox_memcpy(req->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to add mac address, rc=%d", rc);
+ goto done;
+ }
+
+ /* Enable promiscuous mode at NIX level */
+ otx2_nix_promisc_config(eth_dev, 1);
+
+done:
+ return rc;
+}
+
+void
+otx2_nix_mac_addr_del(struct rte_eth_dev *eth_dev, uint32_t index)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct cgx_mac_addr_del_req *req;
+ int rc;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return;
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_del(mbox);
+ req->index = index;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ otx2_err("Failed to delete mac address, rc=%d", rc);
+}
+
+int
+otx2_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_set_mac_addr *req;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_set_mac_addr(mbox);
+ otx2_mbox_memcpy(req->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ otx2_err("Failed to set mac address, rc=%d", rc);
+ goto done;
+ }
+
+ otx2_mbox_memcpy(dev->mac_addr, addr->addr_bytes, RTE_ETHER_ADDR_LEN);
+
+ /* Install the same entry into CGX DMAC filter table too. */
+ otx2_cgx_mac_addr_set(eth_dev, addr);
+
+done:
+ return rc;
+}
+
+int
+otx2_nix_mac_addr_get(struct rte_eth_dev *eth_dev, uint8_t *addr)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_get_mac_addr_rsp *rsp;
+ int rc;
+
+ otx2_mbox_alloc_msg_nix_get_mac_addr(mbox);
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_get_rsp(mbox, 0, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to get mac address, rc=%d", rc);
+ goto done;
+ }
+
+ otx2_mbox_memcpy(addr, rsp->mac_addr, RTE_ETHER_ADDR_LEN);
+
+done:
+ return rc;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_mcast.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_mcast.c
new file mode 100644
index 000000000..f84aa1bf5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_mcast.c
@@ -0,0 +1,339 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_ethdev.h"
+
+static int
+nix_mc_addr_list_free(struct otx2_eth_dev *dev, uint32_t entry_count)
+{
+ struct npc_mcam_free_entry_req *req;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct mcast_entry *entry;
+ int rc = 0;
+
+ if (entry_count == 0)
+ goto exit;
+
+ TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) {
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
+ req->entry = entry->mcam_index;
+
+ rc = otx2_mbox_process_msg(mbox, NULL);
+ if (rc < 0)
+ goto exit;
+
+ TAILQ_REMOVE(&dev->mc_fltr_tbl, entry, next);
+ rte_free(entry);
+ entry_count--;
+
+ if (entry_count == 0)
+ break;
+ }
+
+ if (entry == NULL)
+ dev->mc_tbl_set = false;
+
+exit:
+ return rc;
+}
+
+static int
+nix_hw_update_mc_addr_list(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_npc_flow_info *npc = &dev->npc_flow;
+ volatile uint8_t *key_data, *key_mask;
+ struct npc_mcam_write_entry_req *req;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct npc_xtract_info *x_info;
+ uint64_t mcam_data, mcam_mask;
+ struct mcast_entry *entry;
+ otx2_dxcfg_t *ld_cfg;
+ uint8_t *mac_addr;
+ uint64_t action;
+ int idx, rc = 0;
+
+ ld_cfg = &npc->prx_dxcfg;
+ /* Get ETH layer profile info for populating mcam entries */
+ x_info = &(*ld_cfg)[NPC_MCAM_RX][NPC_LID_LA][NPC_LT_LA_ETHER].xtract[0];
+
+ TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) {
+ req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox);
+ if (req == NULL) {
+ /* The mbox memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0)
+ goto exit;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox);
+ if (req == NULL) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ }
+ req->entry = entry->mcam_index;
+ req->intf = NPC_MCAM_RX;
+ req->enable_entry = 1;
+
+ /* Channel base extracted to KW0[11:0] */
+ req->entry_data.kw[0] = dev->rx_chan_base;
+ req->entry_data.kw_mask[0] = RTE_LEN2MASK(12, uint64_t);
+
+ /* Update mcam address */
+ key_data = (volatile uint8_t *)req->entry_data.kw;
+ key_mask = (volatile uint8_t *)req->entry_data.kw_mask;
+
+ mcam_data = 0ull;
+ mcam_mask = RTE_LEN2MASK(48, uint64_t);
+ mac_addr = &entry->mcast_mac.addr_bytes[0];
+ for (idx = RTE_ETHER_ADDR_LEN - 1; idx >= 0; idx--)
+ mcam_data |= ((uint64_t)*mac_addr++) << (8 * idx);
+
+ otx2_mbox_memcpy(key_data + x_info->key_off,
+ &mcam_data, x_info->len);
+ otx2_mbox_memcpy(key_mask + x_info->key_off,
+ &mcam_mask, x_info->len);
+
+ action = NIX_RX_ACTIONOP_UCAST;
+
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ action = NIX_RX_ACTIONOP_RSS;
+ action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
+ }
+
+ action |= ((uint64_t)otx2_pfvf_func(dev->pf, dev->vf)) << 4;
+ req->entry_data.action = action;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+
+exit:
+ return rc;
+}
+
+int
+otx2_nix_mc_addr_list_install(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ struct otx2_mbox *mbox = dev->mbox;
+ uint32_t entry_count = 0, idx = 0;
+ struct mcast_entry *entry;
+ int rc = 0;
+
+ if (!dev->mc_tbl_set)
+ return 0;
+
+ TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next)
+ entry_count++;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
+ req->priority = NPC_MCAM_ANY_PRIO;
+ req->count = entry_count;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc || rsp->count < entry_count) {
+ otx2_err("Failed to allocate required mcam entries");
+ goto exit;
+ }
+
+ TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next)
+ entry->mcam_index = rsp->entry_list[idx];
+
+ rc = nix_hw_update_mc_addr_list(eth_dev);
+
+exit:
+ return rc;
+}
+
+int
+otx2_nix_mc_addr_list_uninstall(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct npc_mcam_free_entry_req *req;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct mcast_entry *entry;
+ int rc = 0;
+
+ if (!dev->mc_tbl_set)
+ return 0;
+
+ TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) {
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
+ if (req == NULL) {
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0)
+ goto exit;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
+ if (req == NULL) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ }
+ req->entry = entry->mcam_index;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+
+exit:
+ return rc;
+}
+
+static int
+nix_setup_mc_addr_list(struct otx2_eth_dev *dev,
+ struct rte_ether_addr *mc_addr_set)
+{
+ struct npc_mcam_ena_dis_entry_req *req;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct mcast_entry *entry;
+ uint32_t idx = 0;
+ int rc = 0;
+
+ /* Populate PMD's mcast list with given mcast mac addresses and
+ * disable all mcam entries pertaining to the mcast list.
+ */
+ TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next) {
+ rte_memcpy(&entry->mcast_mac, &mc_addr_set[idx++],
+ RTE_ETHER_ADDR_LEN);
+
+ req = otx2_mbox_alloc_msg_npc_mcam_dis_entry(mbox);
+ if (req == NULL) {
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0)
+ goto exit;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_dis_entry(mbox);
+ if (req == NULL) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ }
+ req->entry = entry->mcam_index;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+
+exit:
+ return rc;
+}
+
+int
+otx2_nix_set_mc_addr_list(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ struct otx2_mbox *mbox = dev->mbox;
+ uint32_t idx, priv_count = 0;
+ struct mcast_entry *entry;
+ int rc = 0;
+
+ if (otx2_dev_is_vf(dev))
+ return -ENOTSUP;
+
+ TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next)
+ priv_count++;
+
+ if (nb_mc_addr == 0 || mc_addr_set == NULL) {
+ /* Free existing list if new list is null */
+ nb_mc_addr = priv_count;
+ goto exit;
+ }
+
+ for (idx = 0; idx < nb_mc_addr; idx++) {
+ if (!rte_is_multicast_ether_addr(&mc_addr_set[idx]))
+ return -EINVAL;
+ }
+
+ /* New list is bigger than the existing list,
+ * allocate mcam entries for the extra entries.
+ */
+ if (nb_mc_addr > priv_count) {
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(mbox);
+ req->priority = NPC_MCAM_ANY_PRIO;
+ req->count = nb_mc_addr - priv_count;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc || (rsp->count + priv_count < nb_mc_addr)) {
+ otx2_err("Failed to allocate required entries");
+ nb_mc_addr = priv_count;
+ goto exit;
+ }
+
+ /* Append new mcam entries to the existing mc list */
+ for (idx = 0; idx < rsp->count; idx++) {
+ entry = rte_zmalloc("otx2_nix_mc_entry",
+ sizeof(struct mcast_entry), 0);
+ if (!entry) {
+ otx2_err("Failed to allocate memory");
+ nb_mc_addr = priv_count;
+ rc = -ENOMEM;
+ goto exit;
+ }
+ entry->mcam_index = rsp->entry_list[idx];
+ TAILQ_INSERT_HEAD(&dev->mc_fltr_tbl, entry, next);
+ }
+ } else {
+ /* Free the extra mcam entries if the new list is smaller
+ * than exiting list.
+ */
+ nix_mc_addr_list_free(dev, priv_count - nb_mc_addr);
+ }
+
+
+ /* Now mc_fltr_tbl has the required number of mcam entries,
+ * Traverse through it and add new multicast filter table entries.
+ */
+ rc = nix_setup_mc_addr_list(dev, mc_addr_set);
+ if (rc < 0)
+ goto exit;
+
+ rc = nix_hw_update_mc_addr_list(eth_dev);
+ if (rc < 0)
+ goto exit;
+
+ dev->mc_tbl_set = true;
+
+ return 0;
+
+exit:
+ nix_mc_addr_list_free(dev, nb_mc_addr);
+ return rc;
+}
+
+void
+otx2_nix_mc_filter_init(struct otx2_eth_dev *dev)
+{
+ if (otx2_dev_is_vf(dev))
+ return;
+
+ TAILQ_INIT(&dev->mc_fltr_tbl);
+}
+
+void
+otx2_nix_mc_filter_fini(struct otx2_eth_dev *dev)
+{
+ struct mcast_entry *entry;
+ uint32_t count = 0;
+
+ if (otx2_dev_is_vf(dev))
+ return;
+
+ TAILQ_FOREACH(entry, &dev->mc_fltr_tbl, next)
+ count++;
+
+ nix_mc_addr_list_free(dev, count);
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_ptp.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ptp.c
new file mode 100644
index 000000000..ae5a2b7cd
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_ptp.c
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_ethdev_driver.h>
+
+#include "otx2_ethdev.h"
+
+#define PTP_FREQ_ADJUST (1 << 9)
+
+/* Function to enable ptp config for VFs */
+void
+otx2_nix_ptp_enable_vf(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ if (otx2_nix_recalc_mtu(eth_dev))
+ otx2_err("Failed to set MTU size for ptp");
+
+ dev->scalar_ena = true;
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
+ /* Setting up the function pointers as per new offload flags */
+ otx2_eth_set_rx_function(eth_dev);
+ otx2_eth_set_tx_function(eth_dev);
+}
+
+static uint16_t
+nix_eth_ptp_vf_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
+{
+ struct otx2_eth_rxq *rxq = queue;
+ struct rte_eth_dev *eth_dev;
+
+ RTE_SET_USED(mbufs);
+ RTE_SET_USED(pkts);
+
+ eth_dev = rxq->eth_dev;
+ otx2_nix_ptp_enable_vf(eth_dev);
+
+ return 0;
+}
+
+static int
+nix_read_raw_clock(struct otx2_eth_dev *dev, uint64_t *clock, uint64_t *tsc,
+ uint8_t is_pmu)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_ptp_op(mbox);
+ req->op = PTP_OP_GET_CLOCK;
+ req->is_pmu = is_pmu;
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ goto fail;
+
+ if (clock)
+ *clock = rsp->clk;
+ if (tsc)
+ *tsc = rsp->tsc;
+
+fail:
+ return rc;
+}
+
+/* This function calculates two parameters "clk_freq_mult" and
+ * "clk_delta" which is useful in deriving PTP HI clock from
+ * timestamp counter (tsc) value.
+ */
+int
+otx2_nix_raw_clock_tsc_conv(struct otx2_eth_dev *dev)
+{
+ uint64_t ticks_base = 0, ticks = 0, tsc = 0, t_freq;
+ int rc, val;
+
+ /* Calculating the frequency at which PTP HI clock is running */
+ rc = nix_read_raw_clock(dev, &ticks_base, &tsc, false);
+ if (rc) {
+ otx2_err("Failed to read the raw clock value: %d", rc);
+ goto fail;
+ }
+
+ rte_delay_ms(100);
+
+ rc = nix_read_raw_clock(dev, &ticks, &tsc, false);
+ if (rc) {
+ otx2_err("Failed to read the raw clock value: %d", rc);
+ goto fail;
+ }
+
+ t_freq = (ticks - ticks_base) * 10;
+
+ /* Calculating the freq multiplier viz the ratio between the
+ * frequency at which PTP HI clock works and tsc clock runs
+ */
+ dev->clk_freq_mult =
+ (double)pow(10, floor(log10(t_freq))) / rte_get_timer_hz();
+
+ val = false;
+#ifdef RTE_ARM_EAL_RDTSC_USE_PMU
+ val = true;
+#endif
+ rc = nix_read_raw_clock(dev, &ticks, &tsc, val);
+ if (rc) {
+ otx2_err("Failed to read the raw clock value: %d", rc);
+ goto fail;
+ }
+
+ /* Calculating delta between PTP HI clock and tsc */
+ dev->clk_delta = ((uint64_t)(ticks / dev->clk_freq_mult) - tsc);
+
+fail:
+ return rc;
+}
+
+static void
+nix_start_timecounters(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ memset(&dev->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&dev->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&dev->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ dev->systime_tc.cc_mask = OTX2_CYCLECOUNTER_MASK;
+ dev->rx_tstamp_tc.cc_mask = OTX2_CYCLECOUNTER_MASK;
+ dev->tx_tstamp_tc.cc_mask = OTX2_CYCLECOUNTER_MASK;
+}
+
+static int
+nix_ptp_config(struct rte_eth_dev *eth_dev, int en)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ uint8_t rc = -EINVAL;
+
+ if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
+ return rc;
+
+ if (en) {
+ /* Enable time stamping of sent PTP packets. */
+ otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(mbox);
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ otx2_err("MBOX ptp tx conf enable failed: err %d", rc);
+ return rc;
+ }
+ /* Enable time stamping of received PTP packets. */
+ otx2_mbox_alloc_msg_cgx_ptp_rx_enable(mbox);
+ } else {
+ /* Disable time stamping of sent PTP packets. */
+ otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(mbox);
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ otx2_err("MBOX ptp tx conf disable failed: err %d", rc);
+ return rc;
+ }
+ /* Disable time stamping of received PTP packets. */
+ otx2_mbox_alloc_msg_cgx_ptp_rx_disable(mbox);
+ }
+
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_eth_dev_ptp_info_update(struct otx2_dev *dev, bool ptp_en)
+{
+ struct otx2_eth_dev *otx2_dev = (struct otx2_eth_dev *)dev;
+ struct rte_eth_dev *eth_dev;
+ int i;
+
+ if (!dev)
+ return -EINVAL;
+
+ eth_dev = otx2_dev->eth_dev;
+ if (!eth_dev)
+ return -EINVAL;
+
+ otx2_dev->ptp_en = ptp_en;
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[i];
+ rxq->mbuf_initializer =
+ otx2_nix_rxq_mbuf_setup(otx2_dev,
+ eth_dev->data->port_id);
+ }
+ if (otx2_dev_is_vf(otx2_dev) && !(otx2_dev_is_sdp(otx2_dev)) &&
+ !(otx2_dev_is_lbk(otx2_dev))) {
+ /* In case of VF, setting of MTU cant be done directly in this
+ * function as this is running as part of MBOX request(PF->VF)
+ * and MTU setting also requires MBOX message to be
+ * sent(VF->PF)
+ */
+ eth_dev->rx_pkt_burst = nix_eth_ptp_vf_burst;
+ rte_mb();
+ }
+
+ return 0;
+}
+
+int
+otx2_nix_timesync_enable(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int i, rc = 0;
+
+ /* If we are VF/SDP/LBK, ptp cannot not be enabled */
+ if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev)) {
+ otx2_info("PTP cannot be enabled in case of VF/SDP/LBK");
+ return -EINVAL;
+ }
+
+ if (otx2_ethdev_is_ptp_en(dev)) {
+ otx2_info("PTP mode is already enabled");
+ return -EINVAL;
+ }
+
+ if (!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)) {
+ otx2_err("Ptype offload is disabled, it should be enabled");
+ return -EINVAL;
+ }
+
+ if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
+ otx2_err("Both PTP and switch header enabled");
+ return -EINVAL;
+ }
+
+ /* Allocating a iova address for tx tstamp */
+ const struct rte_memzone *ts;
+ ts = rte_eth_dma_zone_reserve(eth_dev, "otx2_ts",
+ 0, OTX2_ALIGN, OTX2_ALIGN,
+ dev->node);
+ if (ts == NULL) {
+ otx2_err("Failed to allocate mem for tx tstamp addr");
+ return -ENOMEM;
+ }
+
+ dev->tstamp.tx_tstamp_iova = ts->iova;
+ dev->tstamp.tx_tstamp = ts->addr;
+
+ /* System time should be already on by default */
+ nix_start_timecounters(eth_dev);
+
+ dev->rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+ dev->tx_offload_flags |= NIX_TX_OFFLOAD_TSTAMP_F;
+
+ rc = nix_ptp_config(eth_dev, 1);
+ if (!rc) {
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ struct otx2_eth_txq *txq = eth_dev->data->tx_queues[i];
+ otx2_nix_form_default_desc(txq);
+ }
+
+ /* Setting up the function pointers as per new offload flags */
+ otx2_eth_set_rx_function(eth_dev);
+ otx2_eth_set_tx_function(eth_dev);
+ }
+
+ rc = otx2_nix_recalc_mtu(eth_dev);
+ if (rc)
+ otx2_err("Failed to set MTU size for ptp");
+
+ return rc;
+}
+
+int
+otx2_nix_timesync_disable(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int i, rc = 0;
+
+ if (!otx2_ethdev_is_ptp_en(dev)) {
+ otx2_nix_dbg("PTP mode is disabled");
+ return -EINVAL;
+ }
+
+ if (otx2_dev_is_vf_or_sdp(dev) || otx2_dev_is_lbk(dev))
+ return -EINVAL;
+
+ dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
+ dev->rx_offload_flags &= ~NIX_RX_OFFLOAD_TSTAMP_F;
+ dev->tx_offload_flags &= ~NIX_TX_OFFLOAD_TSTAMP_F;
+
+ rc = nix_ptp_config(eth_dev, 0);
+ if (!rc) {
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ struct otx2_eth_txq *txq = eth_dev->data->tx_queues[i];
+ otx2_nix_form_default_desc(txq);
+ }
+
+ /* Setting up the function pointers as per new offload flags */
+ otx2_eth_set_rx_function(eth_dev);
+ otx2_eth_set_tx_function(eth_dev);
+ }
+
+ rc = otx2_nix_recalc_mtu(eth_dev);
+ if (rc)
+ otx2_err("Failed to set MTU size for ptp");
+
+ return rc;
+}
+
+int
+otx2_nix_timesync_read_rx_timestamp(struct rte_eth_dev *eth_dev,
+ struct timespec *timestamp,
+ uint32_t __rte_unused flags)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_timesync_info *tstamp = &dev->tstamp;
+ uint64_t ns;
+
+ if (!tstamp->rx_ready)
+ return -EINVAL;
+
+ ns = rte_timecounter_update(&dev->rx_tstamp_tc, tstamp->rx_tstamp);
+ *timestamp = rte_ns_to_timespec(ns);
+ tstamp->rx_ready = 0;
+
+ otx2_nix_dbg("rx timestamp: %"PRIu64" sec: %"PRIu64" nsec %"PRIu64"",
+ (uint64_t)tstamp->rx_tstamp, (uint64_t)timestamp->tv_sec,
+ (uint64_t)timestamp->tv_nsec);
+
+ return 0;
+}
+
+int
+otx2_nix_timesync_read_tx_timestamp(struct rte_eth_dev *eth_dev,
+ struct timespec *timestamp)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_timesync_info *tstamp = &dev->tstamp;
+ uint64_t ns;
+
+ if (*tstamp->tx_tstamp == 0)
+ return -EINVAL;
+
+ ns = rte_timecounter_update(&dev->tx_tstamp_tc, *tstamp->tx_tstamp);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ otx2_nix_dbg("tx timestamp: %"PRIu64" sec: %"PRIu64" nsec %"PRIu64"",
+ *tstamp->tx_tstamp, (uint64_t)timestamp->tv_sec,
+ (uint64_t)timestamp->tv_nsec);
+
+ *tstamp->tx_tstamp = 0;
+ rte_wmb();
+
+ return 0;
+}
+
+int
+otx2_nix_timesync_adjust_time(struct rte_eth_dev *eth_dev, int64_t delta)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int rc;
+
+ /* Adjust the frequent to make tics increments in 10^9 tics per sec */
+ if (delta < PTP_FREQ_ADJUST && delta > -PTP_FREQ_ADJUST) {
+ req = otx2_mbox_alloc_msg_ptp_op(mbox);
+ req->op = PTP_OP_ADJFINE;
+ req->scaled_ppm = delta;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+ /* Since the frequency of PTP comp register is tuned, delta and
+ * freq mult calculation for deriving PTP_HI from timestamp
+ * counter should be done again.
+ */
+ rc = otx2_nix_raw_clock_tsc_conv(dev);
+ if (rc)
+ otx2_err("Failed to calculate delta and freq mult");
+ }
+ dev->systime_tc.nsec += delta;
+ dev->rx_tstamp_tc.nsec += delta;
+ dev->tx_tstamp_tc.nsec += delta;
+
+ return 0;
+}
+
+int
+otx2_nix_timesync_write_time(struct rte_eth_dev *eth_dev,
+ const struct timespec *ts)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t ns;
+
+ ns = rte_timespec_to_ns(ts);
+ /* Set the time counters to a new value. */
+ dev->systime_tc.nsec = ns;
+ dev->rx_tstamp_tc.nsec = ns;
+ dev->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+int
+otx2_nix_timesync_read_time(struct rte_eth_dev *eth_dev, struct timespec *ts)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ uint64_t ns;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_ptp_op(mbox);
+ req->op = PTP_OP_GET_CLOCK;
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ ns = rte_timecounter_update(&dev->systime_tc, rsp->clk);
+ *ts = rte_ns_to_timespec(ns);
+
+ otx2_nix_dbg("PTP time read: %"PRIu64" .%09"PRIu64"",
+ (uint64_t)ts->tv_sec, (uint64_t)ts->tv_nsec);
+
+ return 0;
+}
+
+
+int
+otx2_nix_read_clock(struct rte_eth_dev *eth_dev, uint64_t *clock)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ /* This API returns the raw PTP HI clock value. Since LFs doesn't
+ * have direct access to PTP registers and it requires mbox msg
+ * to AF for this value. In fastpath reading this value for every
+ * packet (which involes mbox call) becomes very expensive, hence
+ * we should be able to derive PTP HI clock value from tsc by
+ * using freq_mult and clk_delta calculated during configure stage.
+ */
+ *clock = (rte_get_tsc_cycles() + dev->clk_delta) * dev->clk_freq_mult;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_rss.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rss.c
new file mode 100644
index 000000000..5e3f86681
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rss.c
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_ethdev.h"
+
+int
+otx2_nix_rss_tbl_init(struct otx2_eth_dev *dev,
+ uint8_t group, uint16_t *ind_tbl)
+{
+ struct otx2_rss_info *rss = &dev->rss_info;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *req;
+ int rc, idx;
+
+ for (idx = 0; idx < rss->rss_size; idx++) {
+ req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!req) {
+ /* The shared memory buffer can be full.
+ * Flush it and retry
+ */
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0)
+ return rc;
+
+ req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ if (!req)
+ return -ENOMEM;
+ }
+ req->rss.rq = ind_tbl[idx];
+ /* Fill AQ info */
+ req->qidx = (group * rss->rss_size) + idx;
+ req->ctype = NIX_AQ_CTYPE_RSS;
+ req->op = NIX_AQ_INSTOP_INIT;
+ }
+
+ otx2_mbox_msg_send(mbox, 0);
+ rc = otx2_mbox_wait_for_rsp(mbox, 0);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int
+otx2_nix_dev_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_rss_info *rss = &dev->rss_info;
+ int rc, i, j;
+ int idx = 0;
+
+ rc = -EINVAL;
+ if (reta_size != dev->rss_info.rss_size) {
+ otx2_err("Size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, dev->rss_info.rss_size);
+ goto fail;
+ }
+
+ /* Copy RETA table */
+ for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ if ((reta_conf[i].mask >> j) & 0x01)
+ rss->ind_tbl[idx] = reta_conf[i].reta[j];
+ idx++;
+ }
+ }
+
+ return otx2_nix_rss_tbl_init(dev, 0, dev->rss_info.ind_tbl);
+
+fail:
+ return rc;
+}
+
+int
+otx2_nix_dev_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_rss_info *rss = &dev->rss_info;
+ int rc, i, j;
+
+ rc = -EINVAL;
+
+ if (reta_size != dev->rss_info.rss_size) {
+ otx2_err("Size of hash lookup table configured "
+ "(%d) doesn't match the number hardware can supported "
+ "(%d)", reta_size, dev->rss_info.rss_size);
+ goto fail;
+ }
+
+ /* Copy RETA table */
+ for (i = 0; i < (dev->rss_info.rss_size / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
+ if ((reta_conf[i].mask >> j) & 0x01)
+ reta_conf[i].reta[j] = rss->ind_tbl[j];
+ }
+
+ return 0;
+
+fail:
+ return rc;
+}
+
+void
+otx2_nix_rss_set_key(struct otx2_eth_dev *dev, uint8_t *key,
+ uint32_t key_len)
+{
+ const uint8_t default_key[NIX_HASH_KEY_SIZE] = {
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
+ 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
+ };
+ struct otx2_rss_info *rss = &dev->rss_info;
+ uint64_t *keyptr;
+ uint64_t val;
+ uint32_t idx;
+
+ if (key == NULL || key == 0) {
+ keyptr = (uint64_t *)(uintptr_t)default_key;
+ key_len = NIX_HASH_KEY_SIZE;
+ memset(rss->key, 0, key_len);
+ } else {
+ memcpy(rss->key, key, key_len);
+ keyptr = (uint64_t *)rss->key;
+ }
+
+ for (idx = 0; idx < (key_len >> 3); idx++) {
+ val = rte_cpu_to_be_64(*keyptr);
+ otx2_write64(val, dev->base + NIX_LF_RX_SECRETX(idx));
+ keyptr++;
+ }
+}
+
+static void
+rss_get_key(struct otx2_eth_dev *dev, uint8_t *key)
+{
+ uint64_t *keyptr = (uint64_t *)key;
+ uint64_t val;
+ int idx;
+
+ for (idx = 0; idx < (NIX_HASH_KEY_SIZE >> 3); idx++) {
+ val = otx2_read64(dev->base + NIX_LF_RX_SECRETX(idx));
+ *keyptr = rte_be_to_cpu_64(val);
+ keyptr++;
+ }
+}
+
+#define RSS_IPV4_ENABLE ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP)
+
+#define RSS_IPV6_ENABLE ( \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_SCTP)
+
+#define RSS_IPV6_EX_ENABLE ( \
+ ETH_RSS_IPV6_EX | \
+ ETH_RSS_IPV6_TCP_EX | \
+ ETH_RSS_IPV6_UDP_EX)
+
+#define RSS_MAX_LEVELS 3
+
+#define RSS_IPV4_INDEX 0
+#define RSS_IPV6_INDEX 1
+#define RSS_TCP_INDEX 2
+#define RSS_UDP_INDEX 3
+#define RSS_SCTP_INDEX 4
+#define RSS_DMAC_INDEX 5
+
+uint32_t
+otx2_rss_ethdev_to_nix(struct otx2_eth_dev *dev, uint64_t ethdev_rss,
+ uint8_t rss_level)
+{
+ uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
+ {
+ FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6,
+ FLOW_KEY_TYPE_TCP, FLOW_KEY_TYPE_UDP,
+ FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC
+ },
+ {
+ FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
+ FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
+ FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC
+ },
+ {
+ FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
+ FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
+ FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
+ FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
+ FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
+ FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC
+ }
+ };
+ uint32_t flowkey_cfg = 0;
+
+ dev->rss_info.nix_rss = ethdev_rss;
+
+ if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
+ dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_LEN_90B) {
+ flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
+ }
+
+ if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+ flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
+
+ if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+ flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
+
+ if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+ flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
+
+ if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+ flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
+
+ if (ethdev_rss & RSS_IPV4_ENABLE)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
+
+ if (ethdev_rss & RSS_IPV6_ENABLE)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
+
+ if (ethdev_rss & ETH_RSS_TCP)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
+
+ if (ethdev_rss & ETH_RSS_UDP)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
+
+ if (ethdev_rss & ETH_RSS_SCTP)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
+
+ if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
+
+ if (ethdev_rss & RSS_IPV6_EX_ENABLE)
+ flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
+
+ if (ethdev_rss & ETH_RSS_PORT)
+ flowkey_cfg |= FLOW_KEY_TYPE_PORT;
+
+ if (ethdev_rss & ETH_RSS_NVGRE)
+ flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
+
+ if (ethdev_rss & ETH_RSS_VXLAN)
+ flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
+
+ if (ethdev_rss & ETH_RSS_GENEVE)
+ flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
+
+ if (ethdev_rss & ETH_RSS_GTPU)
+ flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
+
+ return flowkey_cfg;
+}
+
+int
+otx2_rss_set_hf(struct otx2_eth_dev *dev, uint32_t flowkey_cfg,
+ uint8_t *alg_idx, uint8_t group, int mcam_index)
+{
+ struct nix_rss_flowkey_cfg_rsp *rss_rsp;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_rss_flowkey_cfg *cfg;
+ int rc;
+
+ rc = -EINVAL;
+
+ dev->rss_info.flowkey_cfg = flowkey_cfg;
+
+ cfg = otx2_mbox_alloc_msg_nix_rss_flowkey_cfg(mbox);
+
+ cfg->flowkey_cfg = flowkey_cfg;
+ cfg->mcam_index = mcam_index; /* -1 indicates default group */
+ cfg->group = group; /* 0 is default group */
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rss_rsp);
+ if (rc)
+ return rc;
+
+ if (alg_idx)
+ *alg_idx = rss_rsp->alg_idx;
+
+ return rc;
+}
+
+int
+otx2_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint32_t flowkey_cfg;
+ uint8_t alg_idx;
+ int rc;
+
+ rc = -EINVAL;
+
+ if (rss_conf->rss_key && rss_conf->rss_key_len != NIX_HASH_KEY_SIZE) {
+ otx2_err("Hash key size mismatch %d vs %d",
+ rss_conf->rss_key_len, NIX_HASH_KEY_SIZE);
+ goto fail;
+ }
+
+ if (rss_conf->rss_key)
+ otx2_nix_rss_set_key(dev, rss_conf->rss_key,
+ (uint32_t)rss_conf->rss_key_len);
+
+ flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss_conf->rss_hf, 0);
+
+ rc = otx2_rss_set_hf(dev, flowkey_cfg, &alg_idx,
+ NIX_DEFAULT_RSS_CTX_GROUP,
+ NIX_DEFAULT_RSS_MCAM_IDX);
+ if (rc) {
+ otx2_err("Failed to set RSS hash function rc=%d", rc);
+ return rc;
+ }
+
+ dev->rss_info.alg_idx = alg_idx;
+
+fail:
+ return rc;
+}
+
+int
+otx2_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ if (rss_conf->rss_key)
+ rss_get_key(dev, rss_conf->rss_key);
+
+ rss_conf->rss_key_len = NIX_HASH_KEY_SIZE;
+ rss_conf->rss_hf = dev->rss_info.nix_rss;
+
+ return 0;
+}
+
+int
+otx2_nix_rss_config(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint32_t idx, qcnt = eth_dev->data->nb_rx_queues;
+ uint32_t flowkey_cfg;
+ uint64_t rss_hf;
+ uint8_t alg_idx;
+ int rc;
+
+ /* Skip further configuration if selected mode is not RSS */
+ if (eth_dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS || !qcnt)
+ return 0;
+
+ /* Update default RSS key and cfg */
+ otx2_nix_rss_set_key(dev, NULL, 0);
+
+ /* Update default RSS RETA */
+ for (idx = 0; idx < dev->rss_info.rss_size; idx++)
+ dev->rss_info.ind_tbl[idx] = idx % qcnt;
+
+ /* Init RSS table context */
+ rc = otx2_nix_rss_tbl_init(dev, 0, dev->rss_info.ind_tbl);
+ if (rc) {
+ otx2_err("Failed to init RSS table rc=%d", rc);
+ return rc;
+ }
+
+ rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ flowkey_cfg = otx2_rss_ethdev_to_nix(dev, rss_hf, 0);
+
+ rc = otx2_rss_set_hf(dev, flowkey_cfg, &alg_idx,
+ NIX_DEFAULT_RSS_CTX_GROUP,
+ NIX_DEFAULT_RSS_MCAM_IDX);
+ if (rc) {
+ otx2_err("Failed to set RSS hash function rc=%d", rc);
+ return rc;
+ }
+
+ dev->rss_info.alg_idx = alg_idx;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.c
new file mode 100644
index 000000000..ac40704b6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.c
@@ -0,0 +1,424 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_vect.h>
+
+#include "otx2_ethdev.h"
+#include "otx2_rx.h"
+
+#define NIX_DESCS_PER_LOOP 4
+#define CQE_CAST(x) ((struct nix_cqe_hdr_s *)(x))
+#define CQE_SZ(x) ((x) * NIX_CQ_ENTRY_SZ)
+
+static inline uint16_t
+nix_rx_nb_pkts(struct otx2_eth_rxq *rxq, const uint64_t wdata,
+ const uint16_t pkts, const uint32_t qmask)
+{
+ uint32_t available = rxq->available;
+
+ /* Update the available count if cached value is not enough */
+ if (unlikely(available < pkts)) {
+ uint64_t reg, head, tail;
+
+ /* Use LDADDA version to avoid reorder */
+ reg = otx2_atomic64_add_sync(wdata, rxq->cq_status);
+ /* CQ_OP_STATUS operation error */
+ if (reg & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
+ reg & BIT_ULL(CQ_OP_STAT_CQ_ERR))
+ return 0;
+
+ tail = reg & 0xFFFFF;
+ head = (reg >> 20) & 0xFFFFF;
+ if (tail < head)
+ available = tail - head + qmask + 1;
+ else
+ available = tail - head;
+
+ rxq->available = available;
+ }
+
+ return RTE_MIN(pkts, available);
+}
+
+static __rte_always_inline uint16_t
+nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t pkts, const uint16_t flags)
+{
+ struct otx2_eth_rxq *rxq = rx_queue;
+ const uint64_t mbuf_init = rxq->mbuf_initializer;
+ const void *lookup_mem = rxq->lookup_mem;
+ const uint64_t data_off = rxq->data_off;
+ const uintptr_t desc = rxq->desc;
+ const uint64_t wdata = rxq->wdata;
+ const uint32_t qmask = rxq->qmask;
+ uint16_t packets = 0, nb_pkts;
+ uint32_t head = rxq->head;
+ struct nix_cqe_hdr_s *cq;
+ struct rte_mbuf *mbuf;
+
+ nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
+
+ while (packets < nb_pkts) {
+ /* Prefetch N desc ahead */
+ rte_prefetch_non_temporal((void *)(desc +
+ (CQE_SZ((head + 2) & qmask))));
+ cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
+
+ mbuf = nix_get_mbuf_from_cqe(cq, data_off);
+
+ otx2_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
+ flags);
+ otx2_nix_mbuf_to_tstamp(mbuf, rxq->tstamp, flags,
+ (uint64_t *)((uint8_t *)mbuf + data_off));
+ rx_pkts[packets++] = mbuf;
+ otx2_prefetch_store_keep(mbuf);
+ head++;
+ head &= qmask;
+ }
+
+ rxq->head = head;
+ rxq->available -= nb_pkts;
+
+ /* Free all the CQs that we've processed */
+ otx2_write64((wdata | nb_pkts), rxq->cq_door);
+
+ return nb_pkts;
+}
+
+#if defined(RTE_ARCH_ARM64)
+
+static __rte_always_inline uint64_t
+nix_vlan_update(const uint64_t w2, uint64_t ol_flags, uint8x16_t *f)
+{
+ if (w2 & BIT_ULL(21) /* vtag0_gone */) {
+ ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ *f = vsetq_lane_u16((uint16_t)(w2 >> 32), *f, 5);
+ }
+
+ return ol_flags;
+}
+
+static __rte_always_inline uint64_t
+nix_qinq_update(const uint64_t w2, uint64_t ol_flags, struct rte_mbuf *mbuf)
+{
+ if (w2 & BIT_ULL(23) /* vtag1_gone */) {
+ ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ mbuf->vlan_tci_outer = (uint16_t)(w2 >> 48);
+ }
+
+ return ol_flags;
+}
+
+static __rte_always_inline uint16_t
+nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t pkts, const uint16_t flags)
+{
+ struct otx2_eth_rxq *rxq = rx_queue; uint16_t packets = 0;
+ uint64x2_t cq0_w8, cq1_w8, cq2_w8, cq3_w8, mbuf01, mbuf23;
+ const uint64_t mbuf_initializer = rxq->mbuf_initializer;
+ const uint64x2_t data_off = vdupq_n_u64(rxq->data_off);
+ uint64_t ol_flags0, ol_flags1, ol_flags2, ol_flags3;
+ uint64x2_t rearm0 = vdupq_n_u64(mbuf_initializer);
+ uint64x2_t rearm1 = vdupq_n_u64(mbuf_initializer);
+ uint64x2_t rearm2 = vdupq_n_u64(mbuf_initializer);
+ uint64x2_t rearm3 = vdupq_n_u64(mbuf_initializer);
+ struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
+ const uint16_t *lookup_mem = rxq->lookup_mem;
+ const uint32_t qmask = rxq->qmask;
+ const uint64_t wdata = rxq->wdata;
+ const uintptr_t desc = rxq->desc;
+ uint8x16_t f0, f1, f2, f3;
+ uint32_t head = rxq->head;
+ uint16_t pkts_left;
+
+ pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
+ pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
+
+ /* Packets has to be floor-aligned to NIX_DESCS_PER_LOOP */
+ pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
+
+ while (packets < pkts) {
+ /* Exit loop if head is about to wrap and become unaligned */
+ if (((head + NIX_DESCS_PER_LOOP - 1) & qmask) <
+ NIX_DESCS_PER_LOOP) {
+ pkts_left += (pkts - packets);
+ break;
+ }
+
+ const uintptr_t cq0 = desc + CQE_SZ(head);
+
+ /* Prefetch N desc ahead */
+ rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(8)));
+ rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(9)));
+ rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(10)));
+ rte_prefetch_non_temporal((void *)(cq0 + CQE_SZ(11)));
+
+ /* Get NIX_RX_SG_S for size and buffer pointer */
+ cq0_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(0) + 64));
+ cq1_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(1) + 64));
+ cq2_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(2) + 64));
+ cq3_w8 = vld1q_u64((uint64_t *)(cq0 + CQE_SZ(3) + 64));
+
+ /* Extract mbuf from NIX_RX_SG_S */
+ mbuf01 = vzip2q_u64(cq0_w8, cq1_w8);
+ mbuf23 = vzip2q_u64(cq2_w8, cq3_w8);
+ mbuf01 = vqsubq_u64(mbuf01, data_off);
+ mbuf23 = vqsubq_u64(mbuf23, data_off);
+
+ /* Move mbufs to scalar registers for future use */
+ mbuf0 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 0);
+ mbuf1 = (struct rte_mbuf *)vgetq_lane_u64(mbuf01, 1);
+ mbuf2 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 0);
+ mbuf3 = (struct rte_mbuf *)vgetq_lane_u64(mbuf23, 1);
+
+ /* Mask to get packet len from NIX_RX_SG_S */
+ const uint8x16_t shuf_msk = {
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0, 1, /* octet 1~0, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 0, 1, /* octet 1~0, 16 bits data_len */
+ 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF
+ };
+
+ /* Form the rx_descriptor_fields1 with pkt_len and data_len */
+ f0 = vqtbl1q_u8(cq0_w8, shuf_msk);
+ f1 = vqtbl1q_u8(cq1_w8, shuf_msk);
+ f2 = vqtbl1q_u8(cq2_w8, shuf_msk);
+ f3 = vqtbl1q_u8(cq3_w8, shuf_msk);
+
+ /* Load CQE word0 and word 1 */
+ uint64_t cq0_w0 = ((uint64_t *)(cq0 + CQE_SZ(0)))[0];
+ uint64_t cq0_w1 = ((uint64_t *)(cq0 + CQE_SZ(0)))[1];
+ uint64_t cq1_w0 = ((uint64_t *)(cq0 + CQE_SZ(1)))[0];
+ uint64_t cq1_w1 = ((uint64_t *)(cq0 + CQE_SZ(1)))[1];
+ uint64_t cq2_w0 = ((uint64_t *)(cq0 + CQE_SZ(2)))[0];
+ uint64_t cq2_w1 = ((uint64_t *)(cq0 + CQE_SZ(2)))[1];
+ uint64_t cq3_w0 = ((uint64_t *)(cq0 + CQE_SZ(3)))[0];
+ uint64_t cq3_w1 = ((uint64_t *)(cq0 + CQE_SZ(3)))[1];
+
+ if (flags & NIX_RX_OFFLOAD_RSS_F) {
+ /* Fill rss in the rx_descriptor_fields1 */
+ f0 = vsetq_lane_u32(cq0_w0, f0, 3);
+ f1 = vsetq_lane_u32(cq1_w0, f1, 3);
+ f2 = vsetq_lane_u32(cq2_w0, f2, 3);
+ f3 = vsetq_lane_u32(cq3_w0, f3, 3);
+ ol_flags0 = PKT_RX_RSS_HASH;
+ ol_flags1 = PKT_RX_RSS_HASH;
+ ol_flags2 = PKT_RX_RSS_HASH;
+ ol_flags3 = PKT_RX_RSS_HASH;
+ } else {
+ ol_flags0 = 0; ol_flags1 = 0;
+ ol_flags2 = 0; ol_flags3 = 0;
+ }
+
+ if (flags & NIX_RX_OFFLOAD_PTYPE_F) {
+ /* Fill packet_type in the rx_descriptor_fields1 */
+ f0 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq0_w1),
+ f0, 0);
+ f1 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq1_w1),
+ f1, 0);
+ f2 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq2_w1),
+ f2, 0);
+ f3 = vsetq_lane_u32(nix_ptype_get(lookup_mem, cq3_w1),
+ f3, 0);
+ }
+
+ if (flags & NIX_RX_OFFLOAD_CHECKSUM_F) {
+ ol_flags0 |= nix_rx_olflags_get(lookup_mem, cq0_w1);
+ ol_flags1 |= nix_rx_olflags_get(lookup_mem, cq1_w1);
+ ol_flags2 |= nix_rx_olflags_get(lookup_mem, cq2_w1);
+ ol_flags3 |= nix_rx_olflags_get(lookup_mem, cq3_w1);
+ }
+
+ if (flags & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
+ uint64_t cq0_w2 = *(uint64_t *)(cq0 + CQE_SZ(0) + 16);
+ uint64_t cq1_w2 = *(uint64_t *)(cq0 + CQE_SZ(1) + 16);
+ uint64_t cq2_w2 = *(uint64_t *)(cq0 + CQE_SZ(2) + 16);
+ uint64_t cq3_w2 = *(uint64_t *)(cq0 + CQE_SZ(3) + 16);
+
+ ol_flags0 = nix_vlan_update(cq0_w2, ol_flags0, &f0);
+ ol_flags1 = nix_vlan_update(cq1_w2, ol_flags1, &f1);
+ ol_flags2 = nix_vlan_update(cq2_w2, ol_flags2, &f2);
+ ol_flags3 = nix_vlan_update(cq3_w2, ol_flags3, &f3);
+
+ ol_flags0 = nix_qinq_update(cq0_w2, ol_flags0, mbuf0);
+ ol_flags1 = nix_qinq_update(cq1_w2, ol_flags1, mbuf1);
+ ol_flags2 = nix_qinq_update(cq2_w2, ol_flags2, mbuf2);
+ ol_flags3 = nix_qinq_update(cq3_w2, ol_flags3, mbuf3);
+ }
+
+ if (flags & NIX_RX_OFFLOAD_MARK_UPDATE_F) {
+ ol_flags0 = nix_update_match_id(*(uint16_t *)
+ (cq0 + CQE_SZ(0) + 38), ol_flags0, mbuf0);
+ ol_flags1 = nix_update_match_id(*(uint16_t *)
+ (cq0 + CQE_SZ(1) + 38), ol_flags1, mbuf1);
+ ol_flags2 = nix_update_match_id(*(uint16_t *)
+ (cq0 + CQE_SZ(2) + 38), ol_flags2, mbuf2);
+ ol_flags3 = nix_update_match_id(*(uint16_t *)
+ (cq0 + CQE_SZ(3) + 38), ol_flags3, mbuf3);
+ }
+
+ /* Form rearm_data with ol_flags */
+ rearm0 = vsetq_lane_u64(ol_flags0, rearm0, 1);
+ rearm1 = vsetq_lane_u64(ol_flags1, rearm1, 1);
+ rearm2 = vsetq_lane_u64(ol_flags2, rearm2, 1);
+ rearm3 = vsetq_lane_u64(ol_flags3, rearm3, 1);
+
+ /* Update rx_descriptor_fields1 */
+ vst1q_u64((uint64_t *)mbuf0->rx_descriptor_fields1, f0);
+ vst1q_u64((uint64_t *)mbuf1->rx_descriptor_fields1, f1);
+ vst1q_u64((uint64_t *)mbuf2->rx_descriptor_fields1, f2);
+ vst1q_u64((uint64_t *)mbuf3->rx_descriptor_fields1, f3);
+
+ /* Update rearm_data */
+ vst1q_u64((uint64_t *)mbuf0->rearm_data, rearm0);
+ vst1q_u64((uint64_t *)mbuf1->rearm_data, rearm1);
+ vst1q_u64((uint64_t *)mbuf2->rearm_data, rearm2);
+ vst1q_u64((uint64_t *)mbuf3->rearm_data, rearm3);
+
+ /* Store the mbufs to rx_pkts */
+ vst1q_u64((uint64_t *)&rx_pkts[packets], mbuf01);
+ vst1q_u64((uint64_t *)&rx_pkts[packets + 2], mbuf23);
+
+ /* Prefetch mbufs */
+ otx2_prefetch_store_keep(mbuf0);
+ otx2_prefetch_store_keep(mbuf1);
+ otx2_prefetch_store_keep(mbuf2);
+ otx2_prefetch_store_keep(mbuf3);
+
+ /* Mark mempool obj as "get" as it is alloc'ed by NIX */
+ __mempool_check_cookies(mbuf0->pool, (void **)&mbuf0, 1, 1);
+ __mempool_check_cookies(mbuf1->pool, (void **)&mbuf1, 1, 1);
+ __mempool_check_cookies(mbuf2->pool, (void **)&mbuf2, 1, 1);
+ __mempool_check_cookies(mbuf3->pool, (void **)&mbuf3, 1, 1);
+
+ /* Advance head pointer and packets */
+ head += NIX_DESCS_PER_LOOP; head &= qmask;
+ packets += NIX_DESCS_PER_LOOP;
+ }
+
+ rxq->head = head;
+ rxq->available -= packets;
+
+ rte_cio_wmb();
+ /* Free all the CQs that we've processed */
+ otx2_write64((rxq->wdata | packets), rxq->cq_door);
+
+ if (unlikely(pkts_left))
+ packets += nix_recv_pkts(rx_queue, &rx_pkts[packets],
+ pkts_left, flags);
+
+ return packets;
+}
+
+#else
+
+static inline uint16_t
+nix_recv_pkts_vector(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t pkts, const uint16_t flags)
+{
+ RTE_SET_USED(rx_queue);
+ RTE_SET_USED(rx_pkts);
+ RTE_SET_USED(pkts);
+ RTE_SET_USED(flags);
+
+ return 0;
+}
+
+#endif
+
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+static uint16_t __rte_noinline __rte_hot \
+otx2_nix_recv_pkts_ ## name(void *rx_queue, \
+ struct rte_mbuf **rx_pkts, uint16_t pkts) \
+{ \
+ return nix_recv_pkts(rx_queue, rx_pkts, pkts, (flags)); \
+} \
+ \
+static uint16_t __rte_noinline __rte_hot \
+otx2_nix_recv_pkts_mseg_ ## name(void *rx_queue, \
+ struct rte_mbuf **rx_pkts, uint16_t pkts) \
+{ \
+ return nix_recv_pkts(rx_queue, rx_pkts, pkts, \
+ (flags) | NIX_RX_MULTI_SEG_F); \
+} \
+ \
+static uint16_t __rte_noinline __rte_hot \
+otx2_nix_recv_pkts_vec_ ## name(void *rx_queue, \
+ struct rte_mbuf **rx_pkts, uint16_t pkts) \
+{ \
+ /* TSTMP is not supported by vector */ \
+ if ((flags) & NIX_RX_OFFLOAD_TSTAMP_F) \
+ return 0; \
+ return nix_recv_pkts_vector(rx_queue, rx_pkts, pkts, (flags)); \
+} \
+
+NIX_RX_FASTPATH_MODES
+#undef R
+
+static inline void
+pick_rx_func(struct rte_eth_dev *eth_dev,
+ const eth_rx_burst_t rx_burst[2][2][2][2][2][2][2])
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ /* [SEC] [TSTMP] [MARK] [VLAN] [CKSUM] [PTYPE] [RSS] */
+ eth_dev->rx_pkt_burst = rx_burst
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_SECURITY_F)]
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_CHECKSUM_F)]
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)]
+ [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_RSS_F)];
+}
+
+void
+otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_ ## name,
+
+NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_mseg_ ## name,
+
+NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ const eth_rx_burst_t nix_eth_rx_vec_burst[2][2][2][2][2][2][2] = {
+#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_recv_pkts_vec_ ## name,
+
+NIX_RX_FASTPATH_MODES
+#undef R
+ };
+
+ /* For PTP enabled, scalar rx function should be chosen as most of the
+ * PTP apps are implemented to rx burst 1 pkt.
+ */
+ if (dev->scalar_ena || dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+ pick_rx_func(eth_dev, nix_eth_rx_burst);
+ else
+ pick_rx_func(eth_dev, nix_eth_rx_vec_burst);
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
+
+ /* Copy multi seg version with no offload for tear down sequence */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ dev->rx_pkt_burst_no_offload =
+ nix_eth_rx_burst_mseg[0][0][0][0][0][0][0];
+ rte_mb();
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.h
new file mode 100644
index 000000000..d8648b692
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_rx.h
@@ -0,0 +1,541 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_RX_H__
+#define __OTX2_RX_H__
+
+#include <rte_ether.h>
+
+#include "otx2_common.h"
+#include "otx2_ethdev_sec.h"
+#include "otx2_ipsec_fp.h"
+
+/* Default mark value used when none is provided. */
+#define OTX2_FLOW_ACTION_FLAG_DEFAULT 0xffff
+
+#define PTYPE_NON_TUNNEL_WIDTH 16
+#define PTYPE_TUNNEL_WIDTH 12
+#define PTYPE_NON_TUNNEL_ARRAY_SZ BIT(PTYPE_NON_TUNNEL_WIDTH)
+#define PTYPE_TUNNEL_ARRAY_SZ BIT(PTYPE_TUNNEL_WIDTH)
+#define PTYPE_ARRAY_SZ ((PTYPE_NON_TUNNEL_ARRAY_SZ +\
+ PTYPE_TUNNEL_ARRAY_SZ) *\
+ sizeof(uint16_t))
+
+#define NIX_RX_OFFLOAD_NONE (0)
+#define NIX_RX_OFFLOAD_RSS_F BIT(0)
+#define NIX_RX_OFFLOAD_PTYPE_F BIT(1)
+#define NIX_RX_OFFLOAD_CHECKSUM_F BIT(2)
+#define NIX_RX_OFFLOAD_VLAN_STRIP_F BIT(3)
+#define NIX_RX_OFFLOAD_MARK_UPDATE_F BIT(4)
+#define NIX_RX_OFFLOAD_TSTAMP_F BIT(5)
+#define NIX_RX_OFFLOAD_SECURITY_F BIT(6)
+
+/* Flags to control cqe_to_mbuf conversion function.
+ * Defining it from backwards to denote its been
+ * not used as offload flags to pick function
+ */
+#define NIX_RX_MULTI_SEG_F BIT(15)
+#define NIX_TIMESYNC_RX_OFFSET 8
+
+/* Inline IPsec offsets */
+
+#define INLINE_INB_RPTR_HDR 16
+/* nix_cqe_hdr_s + nix_rx_parse_s + nix_rx_sg_s + nix_iova_s */
+#define INLINE_CPT_RESULT_OFFSET 80
+
+struct otx2_timesync_info {
+ uint64_t rx_tstamp;
+ rte_iova_t tx_tstamp_iova;
+ uint64_t *tx_tstamp;
+ uint8_t tx_ready;
+ uint8_t rx_ready;
+} __rte_cache_aligned;
+
+union mbuf_initializer {
+ struct {
+ uint16_t data_off;
+ uint16_t refcnt;
+ uint16_t nb_segs;
+ uint16_t port;
+ } fields;
+ uint64_t value;
+};
+
+static __rte_always_inline void
+otx2_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
+ struct otx2_timesync_info *tstamp, const uint16_t flag,
+ uint64_t *tstamp_ptr)
+{
+ if ((flag & NIX_RX_OFFLOAD_TSTAMP_F) &&
+ (mbuf->data_off == RTE_PKTMBUF_HEADROOM +
+ NIX_TIMESYNC_RX_OFFSET)) {
+
+ mbuf->pkt_len -= NIX_TIMESYNC_RX_OFFSET;
+
+ /* Reading the rx timestamp inserted by CGX, viz at
+ * starting of the packet data.
+ */
+ mbuf->timestamp = rte_be_to_cpu_64(*tstamp_ptr);
+ /* PKT_RX_IEEE1588_TMST flag needs to be set only in case
+ * PTP packets are received.
+ */
+ if (mbuf->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) {
+ tstamp->rx_tstamp = mbuf->timestamp;
+ tstamp->rx_ready = 1;
+ mbuf->ol_flags |= PKT_RX_IEEE1588_PTP |
+ PKT_RX_IEEE1588_TMST | PKT_RX_TIMESTAMP;
+ }
+ }
+}
+
+static __rte_always_inline uint64_t
+nix_clear_data_off(uint64_t oldval)
+{
+ union mbuf_initializer mbuf_init = { .value = oldval };
+
+ mbuf_init.fields.data_off = 0;
+ return mbuf_init.value;
+}
+
+static __rte_always_inline struct rte_mbuf *
+nix_get_mbuf_from_cqe(void *cq, const uint64_t data_off)
+{
+ rte_iova_t buff;
+
+ /* Skip CQE, NIX_RX_PARSE_S and SG HDR(9 DWORDs) and peek buff addr */
+ buff = *((rte_iova_t *)((uint64_t *)cq + 9));
+ return (struct rte_mbuf *)(buff - data_off);
+}
+
+
+static __rte_always_inline uint32_t
+nix_ptype_get(const void * const lookup_mem, const uint64_t in)
+{
+ const uint16_t * const ptype = lookup_mem;
+ const uint16_t lh_lg_lf = (in & 0xFFF0000000000000) >> 52;
+ const uint16_t tu_l2 = ptype[(in & 0x000FFFF000000000) >> 36];
+ const uint16_t il4_tu = ptype[PTYPE_NON_TUNNEL_ARRAY_SZ + lh_lg_lf];
+
+ return (il4_tu << PTYPE_NON_TUNNEL_WIDTH) | tu_l2;
+}
+
+static __rte_always_inline uint32_t
+nix_rx_olflags_get(const void * const lookup_mem, const uint64_t in)
+{
+ const uint32_t * const ol_flags = (const uint32_t *)
+ ((const uint8_t *)lookup_mem + PTYPE_ARRAY_SZ);
+
+ return ol_flags[(in & 0xfff00000) >> 20];
+}
+
+static inline uint64_t
+nix_update_match_id(const uint16_t match_id, uint64_t ol_flags,
+ struct rte_mbuf *mbuf)
+{
+ /* There is no separate bit to check match_id
+ * is valid or not? and no flag to identify it is an
+ * RTE_FLOW_ACTION_TYPE_FLAG vs RTE_FLOW_ACTION_TYPE_MARK
+ * action. The former case addressed through 0 being invalid
+ * value and inc/dec match_id pair when MARK is activated.
+ * The later case addressed through defining
+ * OTX2_FLOW_MARK_DEFAULT as value for
+ * RTE_FLOW_ACTION_TYPE_MARK.
+ * This would translate to not use
+ * OTX2_FLOW_ACTION_FLAG_DEFAULT - 1 and
+ * OTX2_FLOW_ACTION_FLAG_DEFAULT for match_id.
+ * i.e valid mark_id's are from
+ * 0 to OTX2_FLOW_ACTION_FLAG_DEFAULT - 2
+ */
+ if (likely(match_id)) {
+ ol_flags |= PKT_RX_FDIR;
+ if (match_id != OTX2_FLOW_ACTION_FLAG_DEFAULT) {
+ ol_flags |= PKT_RX_FDIR_ID;
+ mbuf->hash.fdir.hi = match_id - 1;
+ }
+ }
+
+ return ol_flags;
+}
+
+static __rte_always_inline void
+nix_cqe_xtract_mseg(const struct nix_rx_parse_s *rx,
+ struct rte_mbuf *mbuf, uint64_t rearm)
+{
+ const rte_iova_t *iova_list;
+ struct rte_mbuf *head;
+ const rte_iova_t *eol;
+ uint8_t nb_segs;
+ uint64_t sg;
+
+ sg = *(const uint64_t *)(rx + 1);
+ nb_segs = (sg >> 48) & 0x3;
+ mbuf->nb_segs = nb_segs;
+ mbuf->data_len = sg & 0xFFFF;
+ sg = sg >> 16;
+
+ eol = ((const rte_iova_t *)(rx + 1) + ((rx->desc_sizem1 + 1) << 1));
+ /* Skip SG_S and first IOVA*/
+ iova_list = ((const rte_iova_t *)(rx + 1)) + 2;
+ nb_segs--;
+
+ rearm = rearm & ~0xFFFF;
+
+ head = mbuf;
+ while (nb_segs) {
+ mbuf->next = ((struct rte_mbuf *)*iova_list) - 1;
+ mbuf = mbuf->next;
+
+ __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+
+ mbuf->data_len = sg & 0xFFFF;
+ sg = sg >> 16;
+ *(uint64_t *)(&mbuf->rearm_data) = rearm;
+ nb_segs--;
+ iova_list++;
+
+ if (!nb_segs && (iova_list + 1 < eol)) {
+ sg = *(const uint64_t *)(iova_list);
+ nb_segs = (sg >> 48) & 0x3;
+ head->nb_segs += nb_segs;
+ iova_list = (const rte_iova_t *)(iova_list + 1);
+ }
+ }
+}
+
+static __rte_always_inline uint16_t
+nix_rx_sec_cptres_get(const void *cq)
+{
+ volatile const struct otx2_cpt_res *res;
+
+ res = (volatile const struct otx2_cpt_res *)((const char *)cq +
+ INLINE_CPT_RESULT_OFFSET);
+
+ return res->u16[0];
+}
+
+static __rte_always_inline void *
+nix_rx_sec_sa_get(const void * const lookup_mem, int spi, uint16_t port)
+{
+ const uint64_t *const *sa_tbl = (const uint64_t * const *)
+ ((const uint8_t *)lookup_mem + OTX2_NIX_SA_TBL_START);
+
+ return (void *)sa_tbl[port][spi];
+}
+
+static __rte_always_inline uint64_t
+nix_rx_sec_mbuf_update(const struct nix_cqe_hdr_s *cq, struct rte_mbuf *m,
+ const void * const lookup_mem)
+{
+ struct otx2_ipsec_fp_in_sa *sa;
+ struct rte_ipv4_hdr *ipv4;
+ uint16_t m_len;
+ uint32_t spi;
+ char *data;
+
+ if (unlikely(nix_rx_sec_cptres_get(cq) != OTX2_SEC_COMP_GOOD))
+ return PKT_RX_SEC_OFFLOAD | PKT_RX_SEC_OFFLOAD_FAILED;
+
+ /* 20 bits of tag would have the SPI */
+ spi = cq->tag & 0xFFFFF;
+
+ sa = nix_rx_sec_sa_get(lookup_mem, spi, m->port);
+ m->udata64 = (uint64_t)sa->userdata;
+
+ data = rte_pktmbuf_mtod(m, char *);
+ memcpy(data + INLINE_INB_RPTR_HDR, data, RTE_ETHER_HDR_LEN);
+
+ m->data_off += INLINE_INB_RPTR_HDR;
+
+ ipv4 = (struct rte_ipv4_hdr *)(data + INLINE_INB_RPTR_HDR +
+ RTE_ETHER_HDR_LEN);
+
+ m_len = rte_be_to_cpu_16(ipv4->total_length) + RTE_ETHER_HDR_LEN;
+
+ m->data_len = m_len;
+ m->pkt_len = m_len;
+ return PKT_RX_SEC_OFFLOAD;
+}
+
+static __rte_always_inline void
+otx2_nix_cqe_to_mbuf(const struct nix_cqe_hdr_s *cq, const uint32_t tag,
+ struct rte_mbuf *mbuf, const void *lookup_mem,
+ const uint64_t val, const uint16_t flag)
+{
+ const struct nix_rx_parse_s *rx =
+ (const struct nix_rx_parse_s *)((const uint64_t *)cq + 1);
+ const uint64_t w1 = *(const uint64_t *)rx;
+ const uint16_t len = rx->pkt_lenm1 + 1;
+ uint64_t ol_flags = 0;
+
+ /* Mark mempool obj as "get" as it is alloc'ed by NIX */
+ __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
+
+ if (flag & NIX_RX_OFFLOAD_PTYPE_F)
+ mbuf->packet_type = nix_ptype_get(lookup_mem, w1);
+ else
+ mbuf->packet_type = 0;
+
+ if (flag & NIX_RX_OFFLOAD_RSS_F) {
+ mbuf->hash.rss = tag;
+ ol_flags |= PKT_RX_RSS_HASH;
+ }
+
+ if (flag & NIX_RX_OFFLOAD_CHECKSUM_F)
+ ol_flags |= nix_rx_olflags_get(lookup_mem, w1);
+
+ if (flag & NIX_RX_OFFLOAD_VLAN_STRIP_F) {
+ if (rx->vtag0_gone) {
+ ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
+ mbuf->vlan_tci = rx->vtag0_tci;
+ }
+ if (rx->vtag1_gone) {
+ ol_flags |= PKT_RX_QINQ | PKT_RX_QINQ_STRIPPED;
+ mbuf->vlan_tci_outer = rx->vtag1_tci;
+ }
+ }
+
+ if (flag & NIX_RX_OFFLOAD_MARK_UPDATE_F)
+ ol_flags = nix_update_match_id(rx->match_id, ol_flags, mbuf);
+
+ if ((flag & NIX_RX_OFFLOAD_SECURITY_F) &&
+ cq->cqe_type == NIX_XQE_TYPE_RX_IPSECH) {
+ *(uint64_t *)(&mbuf->rearm_data) = val;
+ ol_flags |= nix_rx_sec_mbuf_update(cq, mbuf, lookup_mem);
+ mbuf->ol_flags = ol_flags;
+ return;
+ }
+
+ mbuf->ol_flags = ol_flags;
+ *(uint64_t *)(&mbuf->rearm_data) = val;
+ mbuf->pkt_len = len;
+
+ if (flag & NIX_RX_MULTI_SEG_F)
+ nix_cqe_xtract_mseg(rx, mbuf, val);
+ else
+ mbuf->data_len = len;
+}
+
+#define CKSUM_F NIX_RX_OFFLOAD_CHECKSUM_F
+#define PTYPE_F NIX_RX_OFFLOAD_PTYPE_F
+#define RSS_F NIX_RX_OFFLOAD_RSS_F
+#define RX_VLAN_F NIX_RX_OFFLOAD_VLAN_STRIP_F
+#define MARK_F NIX_RX_OFFLOAD_MARK_UPDATE_F
+#define TS_F NIX_RX_OFFLOAD_TSTAMP_F
+#define RX_SEC_F NIX_RX_OFFLOAD_SECURITY_F
+
+/* [SEC] [TSMP] [MARK] [VLAN] [CKSUM] [PTYPE] [RSS] */
+#define NIX_RX_FASTPATH_MODES \
+R(no_offload, 0, 0, 0, 0, 0, 0, 0, NIX_RX_OFFLOAD_NONE) \
+R(rss, 0, 0, 0, 0, 0, 0, 1, RSS_F) \
+R(ptype, 0, 0, 0, 0, 0, 1, 0, PTYPE_F) \
+R(ptype_rss, 0, 0, 0, 0, 0, 1, 1, PTYPE_F | RSS_F) \
+R(cksum, 0, 0, 0, 0, 1, 0, 0, CKSUM_F) \
+R(cksum_rss, 0, 0, 0, 0, 1, 0, 1, CKSUM_F | RSS_F) \
+R(cksum_ptype, 0, 0, 0, 0, 1, 1, 0, CKSUM_F | PTYPE_F) \
+R(cksum_ptype_rss, 0, 0, 0, 0, 1, 1, 1, CKSUM_F | PTYPE_F | RSS_F)\
+R(vlan, 0, 0, 0, 1, 0, 0, 0, RX_VLAN_F) \
+R(vlan_rss, 0, 0, 0, 1, 0, 0, 1, RX_VLAN_F | RSS_F) \
+R(vlan_ptype, 0, 0, 0, 1, 0, 1, 0, RX_VLAN_F | PTYPE_F) \
+R(vlan_ptype_rss, 0, 0, 0, 1, 0, 1, 1, \
+ RX_VLAN_F | PTYPE_F | RSS_F) \
+R(vlan_cksum, 0, 0, 0, 1, 1, 0, 0, RX_VLAN_F | CKSUM_F) \
+R(vlan_cksum_rss, 0, 0, 0, 1, 1, 0, 1, \
+ RX_VLAN_F | CKSUM_F | RSS_F) \
+R(vlan_cksum_ptype, 0, 0, 0, 1, 1, 1, 0, \
+ RX_VLAN_F | CKSUM_F | PTYPE_F) \
+R(vlan_cksum_ptype_rss, 0, 0, 0, 1, 1, 1, 1, \
+ RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(mark, 0, 0, 1, 0, 0, 0, 0, MARK_F) \
+R(mark_rss, 0, 0, 1, 0, 0, 0, 1, MARK_F | RSS_F) \
+R(mark_ptype, 0, 0, 1, 0, 0, 1, 0, MARK_F | PTYPE_F) \
+R(mark_ptype_rss, 0, 0, 1, 0, 0, 1, 1, MARK_F | PTYPE_F | RSS_F) \
+R(mark_cksum, 0, 0, 1, 0, 1, 0, 0, MARK_F | CKSUM_F) \
+R(mark_cksum_rss, 0, 0, 1, 0, 1, 0, 1, MARK_F | CKSUM_F | RSS_F) \
+R(mark_cksum_ptype, 0, 0, 1, 0, 1, 1, 0, \
+ MARK_F | CKSUM_F | PTYPE_F) \
+R(mark_cksum_ptype_rss, 0, 0, 1, 0, 1, 1, 1, \
+ MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(mark_vlan, 0, 0, 1, 1, 0, 0, 0, MARK_F | RX_VLAN_F) \
+R(mark_vlan_rss, 0, 0, 1, 1, 0, 0, 1, \
+ MARK_F | RX_VLAN_F | RSS_F) \
+R(mark_vlan_ptype, 0, 0, 1, 1, 0, 1, 0, \
+ MARK_F | RX_VLAN_F | PTYPE_F) \
+R(mark_vlan_ptype_rss, 0, 0, 1, 1, 0, 1, 1, \
+ MARK_F | RX_VLAN_F | PTYPE_F | RSS_F) \
+R(mark_vlan_cksum, 0, 0, 1, 1, 1, 0, 0, \
+ MARK_F | RX_VLAN_F | CKSUM_F) \
+R(mark_vlan_cksum_rss, 0, 0, 1, 1, 1, 0, 1, \
+ MARK_F | RX_VLAN_F | CKSUM_F | RSS_F) \
+R(mark_vlan_cksum_ptype, 0, 0, 1, 1, 1, 1, 0, \
+ MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \
+R(mark_vlan_cksum_ptype_rss, 0, 0, 1, 1, 1, 1, 1, \
+ MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(ts, 0, 1, 0, 0, 0, 0, 0, TS_F) \
+R(ts_rss, 0, 1, 0, 0, 0, 0, 1, TS_F | RSS_F) \
+R(ts_ptype, 0, 1, 0, 0, 0, 1, 0, TS_F | PTYPE_F) \
+R(ts_ptype_rss, 0, 1, 0, 0, 0, 1, 1, TS_F | PTYPE_F | RSS_F) \
+R(ts_cksum, 0, 1, 0, 0, 1, 0, 0, TS_F | CKSUM_F) \
+R(ts_cksum_rss, 0, 1, 0, 0, 1, 0, 1, TS_F | CKSUM_F | RSS_F) \
+R(ts_cksum_ptype, 0, 1, 0, 0, 1, 1, 0, TS_F | CKSUM_F | PTYPE_F) \
+R(ts_cksum_ptype_rss, 0, 1, 0, 0, 1, 1, 1, \
+ TS_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(ts_vlan, 0, 1, 0, 1, 0, 0, 0, TS_F | RX_VLAN_F) \
+R(ts_vlan_rss, 0, 1, 0, 1, 0, 0, 1, TS_F | RX_VLAN_F | RSS_F) \
+R(ts_vlan_ptype, 0, 1, 0, 1, 0, 1, 0, \
+ TS_F | RX_VLAN_F | PTYPE_F) \
+R(ts_vlan_ptype_rss, 0, 1, 0, 1, 0, 1, 1, \
+ TS_F | RX_VLAN_F | PTYPE_F | RSS_F) \
+R(ts_vlan_cksum, 0, 1, 0, 1, 1, 0, 0, \
+ TS_F | RX_VLAN_F | CKSUM_F) \
+R(ts_vlan_cksum_rss, 0, 1, 0, 1, 1, 0, 1, \
+ MARK_F | RX_VLAN_F | CKSUM_F | RSS_F) \
+R(ts_vlan_cksum_ptype, 0, 1, 0, 1, 1, 1, 0, \
+ TS_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \
+R(ts_vlan_cksum_ptype_rss, 0, 1, 0, 1, 1, 1, 1, \
+ TS_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(ts_mark, 0, 1, 1, 0, 0, 0, 0, TS_F | MARK_F) \
+R(ts_mark_rss, 0, 1, 1, 0, 0, 0, 1, TS_F | MARK_F | RSS_F) \
+R(ts_mark_ptype, 0, 1, 1, 0, 0, 1, 0, TS_F | MARK_F | PTYPE_F) \
+R(ts_mark_ptype_rss, 0, 1, 1, 0, 0, 1, 1, \
+ TS_F | MARK_F | PTYPE_F | RSS_F) \
+R(ts_mark_cksum, 0, 1, 1, 0, 1, 0, 0, TS_F | MARK_F | CKSUM_F) \
+R(ts_mark_cksum_rss, 0, 1, 1, 0, 1, 0, 1, \
+ TS_F | MARK_F | CKSUM_F | RSS_F) \
+R(ts_mark_cksum_ptype, 0, 1, 1, 0, 1, 1, 0, \
+ TS_F | MARK_F | CKSUM_F | PTYPE_F) \
+R(ts_mark_cksum_ptype_rss, 0, 1, 1, 0, 1, 1, 1, \
+ TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(ts_mark_vlan, 0, 1, 1, 1, 0, 0, 0, TS_F | MARK_F | RX_VLAN_F)\
+R(ts_mark_vlan_rss, 0, 1, 1, 1, 0, 0, 1, \
+ TS_F | MARK_F | RX_VLAN_F | RSS_F) \
+R(ts_mark_vlan_ptype, 0, 1, 1, 1, 0, 1, 0, \
+ TS_F | MARK_F | RX_VLAN_F | PTYPE_F) \
+R(ts_mark_vlan_ptype_rss, 0, 1, 1, 1, 0, 1, 1, \
+ TS_F | MARK_F | RX_VLAN_F | PTYPE_F | RSS_F) \
+R(ts_mark_vlan_cksum_ptype, 0, 1, 1, 1, 1, 1, 0, \
+ TS_F | MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \
+R(ts_mark_vlan_cksum_ptype_rss, 0, 1, 1, 1, 1, 1, 1, \
+ TS_F | MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(sec, 1, 0, 0, 0, 0, 0, 0, RX_SEC_F) \
+R(sec_rss, 1, 0, 0, 0, 0, 0, 1, RX_SEC_F | RSS_F) \
+R(sec_ptype, 1, 0, 0, 0, 0, 1, 0, RX_SEC_F | PTYPE_F) \
+R(sec_ptype_rss, 1, 0, 0, 0, 0, 1, 1, \
+ RX_SEC_F | PTYPE_F | RSS_F) \
+R(sec_cksum, 1, 0, 0, 0, 1, 0, 0, RX_SEC_F | CKSUM_F) \
+R(sec_cksum_rss, 1, 0, 0, 0, 1, 0, 1, \
+ RX_SEC_F | CKSUM_F | RSS_F) \
+R(sec_cksum_ptype, 1, 0, 0, 0, 1, 1, 0, \
+ RX_SEC_F | CKSUM_F | PTYPE_F) \
+R(sec_cksum_ptype_rss, 1, 0, 0, 0, 1, 1, 1, \
+ RX_SEC_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(sec_vlan, 1, 0, 0, 1, 0, 0, 0, RX_SEC_F | RX_VLAN_F) \
+R(sec_vlan_rss, 1, 0, 0, 1, 0, 0, 1, \
+ RX_SEC_F | RX_VLAN_F | RSS_F) \
+R(sec_vlan_ptype, 1, 0, 0, 1, 0, 1, 0, \
+ RX_SEC_F | RX_VLAN_F | PTYPE_F) \
+R(sec_vlan_ptype_rss, 1, 0, 0, 1, 0, 1, 1, \
+ RX_SEC_F | RX_VLAN_F | PTYPE_F | RSS_F) \
+R(sec_vlan_cksum, 1, 0, 0, 1, 1, 0, 0, \
+ RX_SEC_F | RX_VLAN_F | CKSUM_F) \
+R(sec_vlan_cksum_rss, 1, 0, 0, 1, 1, 0, 1, \
+ RX_SEC_F | RX_VLAN_F | CKSUM_F | RSS_F) \
+R(sec_vlan_cksum_ptype, 1, 0, 0, 1, 1, 1, 0, \
+ RX_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \
+R(sec_vlan_cksum_ptype_rss, 1, 0, 0, 1, 1, 1, 1, \
+ RX_SEC_F | RX_VLAN_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(sec_mark, 1, 0, 1, 0, 0, 0, 0, RX_SEC_F | MARK_F) \
+R(sec_mark_rss, 1, 0, 1, 0, 0, 0, 1, RX_SEC_F | MARK_F | RSS_F)\
+R(sec_mark_ptype, 1, 0, 1, 0, 0, 1, 0, \
+ RX_SEC_F | MARK_F | PTYPE_F) \
+R(sec_mark_ptype_rss, 1, 0, 1, 0, 0, 1, 1, \
+ RX_SEC_F | MARK_F | PTYPE_F | RSS_F) \
+R(sec_mark_cksum, 1, 0, 1, 0, 1, 0, 0, \
+ RX_SEC_F | MARK_F | CKSUM_F) \
+R(sec_mark_cksum_rss, 1, 0, 1, 0, 1, 0, 1, \
+ RX_SEC_F | MARK_F | CKSUM_F | RSS_F) \
+R(sec_mark_cksum_ptype, 1, 0, 1, 0, 1, 1, 0, \
+ RX_SEC_F | MARK_F | CKSUM_F | PTYPE_F) \
+R(sec_mark_cksum_ptype_rss, 1, 0, 1, 0, 1, 1, 1, \
+ RX_SEC_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(sec_mark_vlan, 1, 0, 1, 1, 0, 0, 0, RX_SEC_F | RX_VLAN_F) \
+R(sec_mark_vlan_rss, 1, 0, 1, 1, 0, 0, 1, \
+ RX_SEC_F | MARK_F | RX_VLAN_F | RSS_F) \
+R(sec_mark_vlan_ptype, 1, 0, 1, 1, 0, 1, 0, \
+ RX_SEC_F | MARK_F | RX_VLAN_F | PTYPE_F) \
+R(sec_mark_vlan_ptype_rss, 1, 0, 1, 1, 0, 1, 1, \
+ RX_SEC_F | MARK_F | RX_VLAN_F | PTYPE_F | RSS_F) \
+R(sec_mark_vlan_cksum, 1, 0, 1, 1, 1, 0, 0, \
+ RX_SEC_F | MARK_F | RX_VLAN_F | CKSUM_F) \
+R(sec_mark_vlan_cksum_rss, 1, 0, 1, 1, 1, 0, 1, \
+ RX_SEC_F | MARK_F | RX_VLAN_F | CKSUM_F | RSS_F) \
+R(sec_mark_vlan_cksum_ptype, 1, 0, 1, 1, 1, 1, 0, \
+ RX_SEC_F | MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \
+R(sec_mark_vlan_cksum_ptype_rss, \
+ 1, 0, 1, 1, 1, 1, 1, \
+ RX_SEC_F | MARK_F | RX_VLAN_F | CKSUM_F | PTYPE_F | \
+ RSS_F) \
+R(sec_ts, 1, 1, 0, 0, 0, 0, 0, RX_SEC_F | TS_F) \
+R(sec_ts_rss, 1, 1, 0, 0, 0, 0, 1, RX_SEC_F | TS_F | RSS_F) \
+R(sec_ts_ptype, 1, 1, 0, 0, 0, 1, 0, RX_SEC_F | TS_F | PTYPE_F)\
+R(sec_ts_ptype_rss, 1, 1, 0, 0, 0, 1, 1, \
+ RX_SEC_F | TS_F | PTYPE_F | RSS_F) \
+R(sec_ts_cksum, 1, 1, 0, 0, 1, 0, 0, RX_SEC_F | TS_F | CKSUM_F)\
+R(sec_ts_cksum_rss, 1, 1, 0, 0, 1, 0, 1, \
+ RX_SEC_F | TS_F | CKSUM_F | RSS_F) \
+R(sec_ts_cksum_ptype, 1, 1, 0, 0, 1, 1, 0, \
+ RX_SEC_F | CKSUM_F | PTYPE_F) \
+R(sec_ts_cksum_ptype_rss, 1, 1, 0, 0, 1, 1, 1, \
+ RX_SEC_F | TS_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(sec_ts_vlan, 1, 1, 0, 1, 0, 0, 0, \
+ RX_SEC_F | TS_F | RX_VLAN_F) \
+R(sec_ts_vlan_rss, 1, 1, 0, 1, 0, 0, 1, \
+ RX_SEC_F | TS_F | RX_VLAN_F | RSS_F) \
+R(sec_ts_vlan_ptype, 1, 1, 0, 1, 0, 1, 0, \
+ RX_SEC_F | TS_F | RX_VLAN_F | PTYPE_F) \
+R(sec_ts_vlan_ptype_rss, 1, 1, 0, 1, 0, 1, 1, \
+ RX_SEC_F | TS_F | RX_VLAN_F | PTYPE_F | RSS_F) \
+R(sec_ts_vlan_cksum, 1, 1, 0, 1, 1, 0, 0, \
+ RX_SEC_F | TS_F | RX_VLAN_F | CKSUM_F) \
+R(sec_ts_vlan_cksum_rss, 1, 1, 0, 1, 1, 0, 1, \
+ RX_SEC_F | TS_F | RX_VLAN_F | CKSUM_F | RSS_F) \
+R(sec_ts_vlan_cksum_ptype, 1, 1, 0, 1, 1, 1, 0, \
+ RX_SEC_F | TS_F | RX_VLAN_F | CKSUM_F | PTYPE_F) \
+R(sec_ts_vlan_cksum_ptype_rss, 1, 1, 0, 1, 1, 1, 1, \
+ RX_SEC_F | TS_F | RX_VLAN_F | CKSUM_F | PTYPE_F | \
+ RSS_F) \
+R(sec_ts_mark, 1, 1, 1, 0, 0, 0, 0, RX_SEC_F | TS_F | MARK_F) \
+R(sec_ts_mark_rss, 1, 1, 1, 0, 0, 0, 1, \
+ RX_SEC_F | TS_F | MARK_F | RSS_F) \
+R(sec_ts_mark_ptype, 1, 1, 1, 0, 0, 1, 0, \
+ RX_SEC_F | TS_F | MARK_F | PTYPE_F) \
+R(sec_ts_mark_ptype_rss, 1, 1, 1, 0, 0, 1, 1, \
+ RX_SEC_F | TS_F | MARK_F | PTYPE_F | RSS_F) \
+R(sec_ts_mark_cksum, 1, 1, 1, 0, 1, 0, 0, \
+ RX_SEC_F | TS_F | MARK_F | CKSUM_F) \
+R(sec_ts_mark_cksum_rss, 1, 1, 1, 0, 1, 0, 1, \
+ RX_SEC_F | TS_F | MARK_F | CKSUM_F | RSS_F) \
+R(sec_ts_mark_cksum_ptype, 1, 1, 1, 0, 1, 1, 0, \
+ RX_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F) \
+R(sec_ts_mark_cksum_ptype_rss, 1, 1, 1, 0, 1, 1, 1, \
+ RX_SEC_F | TS_F | MARK_F | CKSUM_F | PTYPE_F | RSS_F) \
+R(sec_ts_mark_vlan, 1, 1, 1, 1, 0, 0, 0, \
+ RX_SEC_F | TS_F | MARK_F | RX_VLAN_F) \
+R(sec_ts_mark_vlan_rss, 1, 1, 1, 1, 0, 0, 1, \
+ RX_SEC_F | RX_VLAN_F | RSS_F) \
+R(sec_ts_mark_vlan_ptype, 1, 1, 1, 1, 0, 1, 0, \
+ RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | PTYPE_F) \
+R(sec_ts_mark_vlan_ptype_rss, 1, 1, 1, 1, 0, 1, 1, \
+ RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | PTYPE_F | RSS_F)\
+R(sec_ts_mark_vlan_cksum, 1, 1, 1, 1, 1, 0, 0, \
+ RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | CKSUM_F) \
+R(sec_ts_mark_vlan_cksum_rss, 1, 1, 1, 1, 1, 0, 1, \
+ RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | CKSUM_F | RSS_F)\
+R(sec_ts_mark_vlan_cksum_ptype, 1, 1, 1, 1, 1, 1, 0, \
+ RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | CKSUM_F | \
+ PTYPE_F) \
+R(sec_ts_mark_vlan_cksum_ptype_rss, \
+ 1, 1, 1, 1, 1, 1, 1, \
+ RX_SEC_F | TS_F | MARK_F | RX_VLAN_F | CKSUM_F | \
+ PTYPE_F | RSS_F)
+#endif /* __OTX2_RX_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_stats.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_stats.c
new file mode 100644
index 000000000..8aaf270a7
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_stats.c
@@ -0,0 +1,396 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <inttypes.h>
+
+#include "otx2_ethdev.h"
+
+struct otx2_nix_xstats_name {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ uint32_t offset;
+};
+
+static const struct otx2_nix_xstats_name nix_tx_xstats[] = {
+ {"tx_ucast", NIX_STAT_LF_TX_TX_UCAST},
+ {"tx_bcast", NIX_STAT_LF_TX_TX_BCAST},
+ {"tx_mcast", NIX_STAT_LF_TX_TX_MCAST},
+ {"tx_drop", NIX_STAT_LF_TX_TX_DROP},
+ {"tx_octs", NIX_STAT_LF_TX_TX_OCTS},
+};
+
+static const struct otx2_nix_xstats_name nix_rx_xstats[] = {
+ {"rx_octs", NIX_STAT_LF_RX_RX_OCTS},
+ {"rx_ucast", NIX_STAT_LF_RX_RX_UCAST},
+ {"rx_bcast", NIX_STAT_LF_RX_RX_BCAST},
+ {"rx_mcast", NIX_STAT_LF_RX_RX_MCAST},
+ {"rx_drop", NIX_STAT_LF_RX_RX_DROP},
+ {"rx_drop_octs", NIX_STAT_LF_RX_RX_DROP_OCTS},
+ {"rx_fcs", NIX_STAT_LF_RX_RX_FCS},
+ {"rx_err", NIX_STAT_LF_RX_RX_ERR},
+ {"rx_drp_bcast", NIX_STAT_LF_RX_RX_DRP_BCAST},
+ {"rx_drp_mcast", NIX_STAT_LF_RX_RX_DRP_MCAST},
+ {"rx_drp_l3bcast", NIX_STAT_LF_RX_RX_DRP_L3BCAST},
+ {"rx_drp_l3mcast", NIX_STAT_LF_RX_RX_DRP_L3MCAST},
+};
+
+static const struct otx2_nix_xstats_name nix_q_xstats[] = {
+ {"rq_op_re_pkts", NIX_LF_RQ_OP_RE_PKTS},
+};
+
+#define OTX2_NIX_NUM_RX_XSTATS RTE_DIM(nix_rx_xstats)
+#define OTX2_NIX_NUM_TX_XSTATS RTE_DIM(nix_tx_xstats)
+#define OTX2_NIX_NUM_QUEUE_XSTATS RTE_DIM(nix_q_xstats)
+
+#define OTX2_NIX_NUM_XSTATS_REG (OTX2_NIX_NUM_RX_XSTATS + \
+ OTX2_NIX_NUM_TX_XSTATS + OTX2_NIX_NUM_QUEUE_XSTATS)
+
+int
+otx2_nix_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *stats)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t reg, val;
+ uint32_t qidx, i;
+ int64_t *addr;
+
+ stats->opackets = otx2_read64(dev->base +
+ NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_UCAST));
+ stats->opackets += otx2_read64(dev->base +
+ NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_MCAST));
+ stats->opackets += otx2_read64(dev->base +
+ NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_BCAST));
+ stats->oerrors = otx2_read64(dev->base +
+ NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_DROP));
+ stats->obytes = otx2_read64(dev->base +
+ NIX_LF_TX_STATX(NIX_STAT_LF_TX_TX_OCTS));
+
+ stats->ipackets = otx2_read64(dev->base +
+ NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_UCAST));
+ stats->ipackets += otx2_read64(dev->base +
+ NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_MCAST));
+ stats->ipackets += otx2_read64(dev->base +
+ NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_BCAST));
+ stats->imissed = otx2_read64(dev->base +
+ NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_DROP));
+ stats->ibytes = otx2_read64(dev->base +
+ NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_OCTS));
+ stats->ierrors = otx2_read64(dev->base +
+ NIX_LF_RX_STATX(NIX_STAT_LF_RX_RX_ERR));
+
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ if (dev->txmap[i] & (1U << 31)) {
+ qidx = dev->txmap[i] & 0xFFFF;
+ reg = (((uint64_t)qidx) << 32);
+
+ addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
+ val = otx2_atomic64_add_nosync(reg, addr);
+ if (val & OP_ERR)
+ val = 0;
+ stats->q_opackets[i] = val;
+
+ addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
+ val = otx2_atomic64_add_nosync(reg, addr);
+ if (val & OP_ERR)
+ val = 0;
+ stats->q_obytes[i] = val;
+
+ addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_DROP_PKTS);
+ val = otx2_atomic64_add_nosync(reg, addr);
+ if (val & OP_ERR)
+ val = 0;
+ stats->q_errors[i] = val;
+ }
+ }
+
+ for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ if (dev->rxmap[i] & (1U << 31)) {
+ qidx = dev->rxmap[i] & 0xFFFF;
+ reg = (((uint64_t)qidx) << 32);
+
+ addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_PKTS);
+ val = otx2_atomic64_add_nosync(reg, addr);
+ if (val & OP_ERR)
+ val = 0;
+ stats->q_ipackets[i] = val;
+
+ addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_OCTS);
+ val = otx2_atomic64_add_nosync(reg, addr);
+ if (val & OP_ERR)
+ val = 0;
+ stats->q_ibytes[i] = val;
+
+ addr = (int64_t *)(dev->base + NIX_LF_RQ_OP_DROP_PKTS);
+ val = otx2_atomic64_add_nosync(reg, addr);
+ if (val & OP_ERR)
+ val = 0;
+ stats->q_errors[i] += val;
+ }
+ }
+
+ return 0;
+}
+
+int
+otx2_nix_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_mbox_alloc_msg_nix_stats_rst(mbox) == NULL)
+ return -ENOMEM;
+
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_nix_queue_stats_mapping(struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ uint8_t stat_idx, uint8_t is_rx)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ if (is_rx)
+ dev->rxmap[stat_idx] = ((1U << 31) | queue_id);
+ else
+ dev->txmap[stat_idx] = ((1U << 31) | queue_id);
+
+ return 0;
+}
+
+int
+otx2_nix_xstats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ unsigned int i, count = 0;
+ uint64_t reg, val;
+
+ if (n < OTX2_NIX_NUM_XSTATS_REG)
+ return OTX2_NIX_NUM_XSTATS_REG;
+
+ if (xstats == NULL)
+ return 0;
+
+ for (i = 0; i < OTX2_NIX_NUM_TX_XSTATS; i++) {
+ xstats[count].value = otx2_read64(dev->base +
+ NIX_LF_TX_STATX(nix_tx_xstats[i].offset));
+ xstats[count].id = count;
+ count++;
+ }
+
+ for (i = 0; i < OTX2_NIX_NUM_RX_XSTATS; i++) {
+ xstats[count].value = otx2_read64(dev->base +
+ NIX_LF_RX_STATX(nix_rx_xstats[i].offset));
+ xstats[count].id = count;
+ count++;
+ }
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ reg = (((uint64_t)i) << 32);
+ val = otx2_atomic64_add_nosync(reg, (int64_t *)(dev->base +
+ nix_q_xstats[0].offset));
+ if (val & OP_ERR)
+ val = 0;
+ xstats[count].value += val;
+ }
+ xstats[count].id = count;
+ count++;
+
+ return count;
+}
+
+int
+otx2_nix_xstats_get_names(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int limit)
+{
+ unsigned int i, count = 0;
+
+ RTE_SET_USED(eth_dev);
+
+ if (limit < OTX2_NIX_NUM_XSTATS_REG && xstats_names != NULL)
+ return -ENOMEM;
+
+ if (xstats_names) {
+ for (i = 0; i < OTX2_NIX_NUM_TX_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", nix_tx_xstats[i].name);
+ count++;
+ }
+
+ for (i = 0; i < OTX2_NIX_NUM_RX_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", nix_rx_xstats[i].name);
+ count++;
+ }
+
+ for (i = 0; i < OTX2_NIX_NUM_QUEUE_XSTATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s", nix_q_xstats[i].name);
+ count++;
+ }
+ }
+
+ return OTX2_NIX_NUM_XSTATS_REG;
+}
+
+int
+otx2_nix_xstats_get_names_by_id(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids, unsigned int limit)
+{
+ struct rte_eth_xstat_name xstats_names_copy[OTX2_NIX_NUM_XSTATS_REG];
+ uint16_t i;
+
+ if (limit < OTX2_NIX_NUM_XSTATS_REG && ids == NULL)
+ return OTX2_NIX_NUM_XSTATS_REG;
+
+ if (limit > OTX2_NIX_NUM_XSTATS_REG)
+ return -EINVAL;
+
+ if (xstats_names == NULL)
+ return -ENOMEM;
+
+ otx2_nix_xstats_get_names(eth_dev, xstats_names_copy, limit);
+
+ for (i = 0; i < OTX2_NIX_NUM_XSTATS_REG; i++) {
+ if (ids[i] >= OTX2_NIX_NUM_XSTATS_REG) {
+ otx2_err("Invalid id value");
+ return -EINVAL;
+ }
+ strncpy(xstats_names[i].name, xstats_names_copy[ids[i]].name,
+ sizeof(xstats_names[i].name));
+ }
+
+ return limit;
+}
+
+int
+otx2_nix_xstats_get_by_id(struct rte_eth_dev *eth_dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ struct rte_eth_xstat xstats[OTX2_NIX_NUM_XSTATS_REG];
+ uint16_t i;
+
+ if (n < OTX2_NIX_NUM_XSTATS_REG && ids == NULL)
+ return OTX2_NIX_NUM_XSTATS_REG;
+
+ if (n > OTX2_NIX_NUM_XSTATS_REG)
+ return -EINVAL;
+
+ if (values == NULL)
+ return -ENOMEM;
+
+ otx2_nix_xstats_get(eth_dev, xstats, n);
+
+ for (i = 0; i < OTX2_NIX_NUM_XSTATS_REG; i++) {
+ if (ids[i] >= OTX2_NIX_NUM_XSTATS_REG) {
+ otx2_err("Invalid id value");
+ return -EINVAL;
+ }
+ values[i] = xstats[ids[i]].value;
+ }
+
+ return n;
+}
+
+static int
+nix_queue_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_rsp *rsp;
+ struct nix_aq_enq_req *aq;
+ uint32_t i;
+ int rc;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = i;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to read rq context");
+ return rc;
+ }
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = i;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+ otx2_mbox_memcpy(&aq->rq, &rsp->rq, sizeof(rsp->rq));
+ otx2_mbox_memset(&aq->rq_mask, 0, sizeof(aq->rq_mask));
+ aq->rq.octs = 0;
+ aq->rq.pkts = 0;
+ aq->rq.drop_octs = 0;
+ aq->rq.drop_pkts = 0;
+ aq->rq.re_pkts = 0;
+
+ aq->rq_mask.octs = ~(aq->rq_mask.octs);
+ aq->rq_mask.pkts = ~(aq->rq_mask.pkts);
+ aq->rq_mask.drop_octs = ~(aq->rq_mask.drop_octs);
+ aq->rq_mask.drop_pkts = ~(aq->rq_mask.drop_pkts);
+ aq->rq_mask.re_pkts = ~(aq->rq_mask.re_pkts);
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ otx2_err("Failed to write rq context");
+ return rc;
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = i;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_READ;
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to read sq context");
+ return rc;
+ }
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = i;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+ otx2_mbox_memcpy(&aq->sq, &rsp->sq, sizeof(rsp->sq));
+ otx2_mbox_memset(&aq->sq_mask, 0, sizeof(aq->sq_mask));
+ aq->sq.octs = 0;
+ aq->sq.pkts = 0;
+ aq->sq.drop_octs = 0;
+ aq->sq.drop_pkts = 0;
+
+ aq->sq_mask.octs = ~(aq->sq_mask.octs);
+ aq->sq_mask.pkts = ~(aq->sq_mask.pkts);
+ aq->sq_mask.drop_octs = ~(aq->sq_mask.drop_octs);
+ aq->sq_mask.drop_pkts = ~(aq->sq_mask.drop_pkts);
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ otx2_err("Failed to write sq context");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int
+otx2_nix_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ int ret;
+
+ if (otx2_mbox_alloc_msg_nix_stats_rst(mbox) == NULL)
+ return -ENOMEM;
+
+ ret = otx2_mbox_process(mbox);
+ if (ret != 0)
+ return ret;
+
+ /* Reset queue stats */
+ return nix_queue_stats_reset(eth_dev);
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.c
new file mode 100644
index 000000000..8ed059549
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.c
@@ -0,0 +1,3216 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_malloc.h>
+
+#include "otx2_ethdev.h"
+#include "otx2_tm.h"
+
+/* Use last LVL_CNT nodes as default nodes */
+#define NIX_DEFAULT_NODE_ID_START (RTE_TM_NODE_ID_NULL - NIX_TXSCH_LVL_CNT)
+
+enum otx2_tm_node_level {
+ OTX2_TM_LVL_ROOT = 0,
+ OTX2_TM_LVL_SCH1,
+ OTX2_TM_LVL_SCH2,
+ OTX2_TM_LVL_SCH3,
+ OTX2_TM_LVL_SCH4,
+ OTX2_TM_LVL_QUEUE,
+ OTX2_TM_LVL_MAX,
+};
+
+static inline
+uint64_t shaper2regval(struct shaper_params *shaper)
+{
+ return (shaper->burst_exponent << 37) | (shaper->burst_mantissa << 29) |
+ (shaper->div_exp << 13) | (shaper->exponent << 9) |
+ (shaper->mantissa << 1);
+}
+
+int
+otx2_nix_get_link(struct otx2_eth_dev *dev)
+{
+ int link = 13 /* SDP */;
+ uint16_t lmac_chan;
+ uint16_t map;
+
+ lmac_chan = dev->tx_chan_base;
+
+ /* CGX lmac link */
+ if (lmac_chan >= 0x800) {
+ map = lmac_chan & 0x7FF;
+ link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF);
+ } else if (lmac_chan < 0x700) {
+ /* LBK channel */
+ link = 12;
+ }
+
+ return link;
+}
+
+static uint8_t
+nix_get_relchan(struct otx2_eth_dev *dev)
+{
+ return dev->tx_chan_base & 0xff;
+}
+
+static bool
+nix_tm_have_tl1_access(struct otx2_eth_dev *dev)
+{
+ bool is_lbk = otx2_dev_is_lbk(dev);
+ return otx2_dev_is_pf(dev) && !otx2_dev_is_Ax(dev) && !is_lbk;
+}
+
+static bool
+nix_tm_is_leaf(struct otx2_eth_dev *dev, int lvl)
+{
+ if (nix_tm_have_tl1_access(dev))
+ return (lvl == OTX2_TM_LVL_QUEUE);
+
+ return (lvl == OTX2_TM_LVL_SCH4);
+}
+
+static int
+find_prio_anchor(struct otx2_eth_dev *dev, uint32_t node_id)
+{
+ struct otx2_nix_tm_node *child_node;
+
+ TAILQ_FOREACH(child_node, &dev->node_list, node) {
+ if (!child_node->parent)
+ continue;
+ if (!(child_node->parent->id == node_id))
+ continue;
+ if (child_node->priority == child_node->parent->rr_prio)
+ continue;
+ return child_node->hw_id - child_node->priority;
+ }
+ return 0;
+}
+
+
+static struct otx2_nix_tm_shaper_profile *
+nix_tm_shaper_profile_search(struct otx2_eth_dev *dev, uint32_t shaper_id)
+{
+ struct otx2_nix_tm_shaper_profile *tm_shaper_profile;
+
+ TAILQ_FOREACH(tm_shaper_profile, &dev->shaper_profile_list, shaper) {
+ if (tm_shaper_profile->shaper_profile_id == shaper_id)
+ return tm_shaper_profile;
+ }
+ return NULL;
+}
+
+static inline uint64_t
+shaper_rate_to_nix(uint64_t value, uint64_t *exponent_p,
+ uint64_t *mantissa_p, uint64_t *div_exp_p)
+{
+ uint64_t div_exp, exponent, mantissa;
+
+ /* Boundary checks */
+ if (value < MIN_SHAPER_RATE ||
+ value > MAX_SHAPER_RATE)
+ return 0;
+
+ if (value <= SHAPER_RATE(0, 0, 0)) {
+ /* Calculate rate div_exp and mantissa using
+ * the following formula:
+ *
+ * value = (2E6 * (256 + mantissa)
+ * / ((1 << div_exp) * 256))
+ */
+ div_exp = 0;
+ exponent = 0;
+ mantissa = MAX_RATE_MANTISSA;
+
+ while (value < (NIX_SHAPER_RATE_CONST / (1 << div_exp)))
+ div_exp += 1;
+
+ while (value <
+ ((NIX_SHAPER_RATE_CONST * (256 + mantissa)) /
+ ((1 << div_exp) * 256)))
+ mantissa -= 1;
+ } else {
+ /* Calculate rate exponent and mantissa using
+ * the following formula:
+ *
+ * value = (2E6 * ((256 + mantissa) << exponent)) / 256
+ *
+ */
+ div_exp = 0;
+ exponent = MAX_RATE_EXPONENT;
+ mantissa = MAX_RATE_MANTISSA;
+
+ while (value < (NIX_SHAPER_RATE_CONST * (1 << exponent)))
+ exponent -= 1;
+
+ while (value < ((NIX_SHAPER_RATE_CONST *
+ ((256 + mantissa) << exponent)) / 256))
+ mantissa -= 1;
+ }
+
+ if (div_exp > MAX_RATE_DIV_EXP ||
+ exponent > MAX_RATE_EXPONENT || mantissa > MAX_RATE_MANTISSA)
+ return 0;
+
+ if (div_exp_p)
+ *div_exp_p = div_exp;
+ if (exponent_p)
+ *exponent_p = exponent;
+ if (mantissa_p)
+ *mantissa_p = mantissa;
+
+ /* Calculate real rate value */
+ return SHAPER_RATE(exponent, mantissa, div_exp);
+}
+
+static inline uint64_t
+shaper_burst_to_nix(uint64_t value, uint64_t *exponent_p,
+ uint64_t *mantissa_p)
+{
+ uint64_t exponent, mantissa;
+
+ if (value < MIN_SHAPER_BURST || value > MAX_SHAPER_BURST)
+ return 0;
+
+ /* Calculate burst exponent and mantissa using
+ * the following formula:
+ *
+ * value = (((256 + mantissa) << (exponent + 1)
+ / 256)
+ *
+ */
+ exponent = MAX_BURST_EXPONENT;
+ mantissa = MAX_BURST_MANTISSA;
+
+ while (value < (1ull << (exponent + 1)))
+ exponent -= 1;
+
+ while (value < ((256 + mantissa) << (exponent + 1)) / 256)
+ mantissa -= 1;
+
+ if (exponent > MAX_BURST_EXPONENT || mantissa > MAX_BURST_MANTISSA)
+ return 0;
+
+ if (exponent_p)
+ *exponent_p = exponent;
+ if (mantissa_p)
+ *mantissa_p = mantissa;
+
+ return SHAPER_BURST(exponent, mantissa);
+}
+
+static void
+shaper_config_to_nix(struct otx2_nix_tm_shaper_profile *profile,
+ struct shaper_params *cir,
+ struct shaper_params *pir)
+{
+ struct rte_tm_shaper_params *param = &profile->params;
+
+ if (!profile)
+ return;
+
+ /* Calculate CIR exponent and mantissa */
+ if (param->committed.rate)
+ cir->rate = shaper_rate_to_nix(param->committed.rate,
+ &cir->exponent,
+ &cir->mantissa,
+ &cir->div_exp);
+
+ /* Calculate PIR exponent and mantissa */
+ if (param->peak.rate)
+ pir->rate = shaper_rate_to_nix(param->peak.rate,
+ &pir->exponent,
+ &pir->mantissa,
+ &pir->div_exp);
+
+ /* Calculate CIR burst exponent and mantissa */
+ if (param->committed.size)
+ cir->burst = shaper_burst_to_nix(param->committed.size,
+ &cir->burst_exponent,
+ &cir->burst_mantissa);
+
+ /* Calculate PIR burst exponent and mantissa */
+ if (param->peak.size)
+ pir->burst = shaper_burst_to_nix(param->peak.size,
+ &pir->burst_exponent,
+ &pir->burst_mantissa);
+}
+
+static void
+shaper_default_red_algo(struct otx2_eth_dev *dev,
+ struct otx2_nix_tm_node *tm_node,
+ struct otx2_nix_tm_shaper_profile *profile)
+{
+ struct shaper_params cir, pir;
+
+ /* C0 doesn't support STALL when both PIR & CIR are enabled */
+ if (profile && otx2_dev_is_96xx_Cx(dev)) {
+ memset(&cir, 0, sizeof(cir));
+ memset(&pir, 0, sizeof(pir));
+ shaper_config_to_nix(profile, &cir, &pir);
+
+ if (pir.rate && cir.rate) {
+ tm_node->red_algo = NIX_REDALG_DISCARD;
+ tm_node->flags |= NIX_TM_NODE_RED_DISCARD;
+ return;
+ }
+ }
+
+ tm_node->red_algo = NIX_REDALG_STD;
+ tm_node->flags &= ~NIX_TM_NODE_RED_DISCARD;
+}
+
+static int
+populate_tm_tl1_default(struct otx2_eth_dev *dev, uint32_t schq)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_txschq_config *req;
+
+ /*
+ * Default config for TL1.
+ * For VF this is always ignored.
+ */
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = NIX_TXSCH_LVL_TL1;
+
+ /* Set DWRR quantum */
+ req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq);
+ req->regval[0] = TXSCH_TL1_DFLT_RR_QTM;
+ req->num_regs++;
+
+ req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
+ req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+ req->num_regs++;
+
+ req->reg[2] = NIX_AF_TL1X_CIR(schq);
+ req->regval[2] = 0;
+ req->num_regs++;
+
+ return otx2_mbox_process(mbox);
+}
+
+static uint8_t
+prepare_tm_sched_reg(struct otx2_eth_dev *dev,
+ struct otx2_nix_tm_node *tm_node,
+ volatile uint64_t *reg, volatile uint64_t *regval)
+{
+ uint64_t strict_prio = tm_node->priority;
+ uint32_t hw_lvl = tm_node->hw_lvl;
+ uint32_t schq = tm_node->hw_id;
+ uint64_t rr_quantum;
+ uint8_t k = 0;
+
+ rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
+
+ /* For children to root, strict prio is default if either
+ * device root is TL2 or TL1 Static Priority is disabled.
+ */
+ if (hw_lvl == NIX_TXSCH_LVL_TL2 &&
+ (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
+ dev->tm_flags & NIX_TM_TL1_NO_SP))
+ strict_prio = TXSCH_TL1_DFLT_RR_PRIO;
+
+ otx2_tm_dbg("Schedule config node %s(%u) lvl %u id %u, "
+ "prio 0x%" PRIx64 ", rr_quantum 0x%" PRIx64 " (%p)",
+ nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
+ tm_node->id, strict_prio, rr_quantum, tm_node);
+
+ switch (hw_lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ reg[k] = NIX_AF_MDQX_SCHEDULE(schq);
+ regval[k] = (strict_prio << 24) | rr_quantum;
+ k++;
+
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg[k] = NIX_AF_TL4X_SCHEDULE(schq);
+ regval[k] = (strict_prio << 24) | rr_quantum;
+ k++;
+
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg[k] = NIX_AF_TL3X_SCHEDULE(schq);
+ regval[k] = (strict_prio << 24) | rr_quantum;
+ k++;
+
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg[k] = NIX_AF_TL2X_SCHEDULE(schq);
+ regval[k] = (strict_prio << 24) | rr_quantum;
+ k++;
+
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ reg[k] = NIX_AF_TL1X_SCHEDULE(schq);
+ regval[k] = rr_quantum;
+ k++;
+
+ break;
+ }
+
+ return k;
+}
+
+static uint8_t
+prepare_tm_shaper_reg(struct otx2_nix_tm_node *tm_node,
+ struct otx2_nix_tm_shaper_profile *profile,
+ volatile uint64_t *reg, volatile uint64_t *regval)
+{
+ struct shaper_params cir, pir;
+ uint32_t schq = tm_node->hw_id;
+ uint8_t k = 0;
+
+ memset(&cir, 0, sizeof(cir));
+ memset(&pir, 0, sizeof(pir));
+ shaper_config_to_nix(profile, &cir, &pir);
+
+ otx2_tm_dbg("Shaper config node %s(%u) lvl %u id %u, "
+ "pir %" PRIu64 "(%" PRIu64 "B),"
+ " cir %" PRIu64 "(%" PRIu64 "B) (%p)",
+ nix_hwlvl2str(tm_node->hw_lvl), schq, tm_node->lvl,
+ tm_node->id, pir.rate, pir.burst,
+ cir.rate, cir.burst, tm_node);
+
+ switch (tm_node->hw_lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+ /* Configure PIR, CIR */
+ reg[k] = NIX_AF_MDQX_PIR(schq);
+ regval[k] = (pir.rate && pir.burst) ?
+ (shaper2regval(&pir) | 1) : 0;
+ k++;
+
+ reg[k] = NIX_AF_MDQX_CIR(schq);
+ regval[k] = (cir.rate && cir.burst) ?
+ (shaper2regval(&cir) | 1) : 0;
+ k++;
+
+ /* Configure RED ALG */
+ reg[k] = NIX_AF_MDQX_SHAPE(schq);
+ regval[k] = ((uint64_t)tm_node->red_algo << 9);
+ k++;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ /* Configure PIR, CIR */
+ reg[k] = NIX_AF_TL4X_PIR(schq);
+ regval[k] = (pir.rate && pir.burst) ?
+ (shaper2regval(&pir) | 1) : 0;
+ k++;
+
+ reg[k] = NIX_AF_TL4X_CIR(schq);
+ regval[k] = (cir.rate && cir.burst) ?
+ (shaper2regval(&cir) | 1) : 0;
+ k++;
+
+ /* Configure RED algo */
+ reg[k] = NIX_AF_TL4X_SHAPE(schq);
+ regval[k] = ((uint64_t)tm_node->red_algo << 9);
+ k++;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ /* Configure PIR, CIR */
+ reg[k] = NIX_AF_TL3X_PIR(schq);
+ regval[k] = (pir.rate && pir.burst) ?
+ (shaper2regval(&pir) | 1) : 0;
+ k++;
+
+ reg[k] = NIX_AF_TL3X_CIR(schq);
+ regval[k] = (cir.rate && cir.burst) ?
+ (shaper2regval(&cir) | 1) : 0;
+ k++;
+
+ /* Configure RED algo */
+ reg[k] = NIX_AF_TL3X_SHAPE(schq);
+ regval[k] = ((uint64_t)tm_node->red_algo << 9);
+ k++;
+
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ /* Configure PIR, CIR */
+ reg[k] = NIX_AF_TL2X_PIR(schq);
+ regval[k] = (pir.rate && pir.burst) ?
+ (shaper2regval(&pir) | 1) : 0;
+ k++;
+
+ reg[k] = NIX_AF_TL2X_CIR(schq);
+ regval[k] = (cir.rate && cir.burst) ?
+ (shaper2regval(&cir) | 1) : 0;
+ k++;
+
+ /* Configure RED algo */
+ reg[k] = NIX_AF_TL2X_SHAPE(schq);
+ regval[k] = ((uint64_t)tm_node->red_algo << 9);
+ k++;
+
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ /* Configure CIR */
+ reg[k] = NIX_AF_TL1X_CIR(schq);
+ regval[k] = (cir.rate && cir.burst) ?
+ (shaper2regval(&cir) | 1) : 0;
+ k++;
+ break;
+ }
+
+ return k;
+}
+
+static uint8_t
+prepare_tm_sw_xoff(struct otx2_nix_tm_node *tm_node, bool enable,
+ volatile uint64_t *reg, volatile uint64_t *regval)
+{
+ uint32_t hw_lvl = tm_node->hw_lvl;
+ uint32_t schq = tm_node->hw_id;
+ uint8_t k = 0;
+
+ otx2_tm_dbg("sw xoff config node %s(%u) lvl %u id %u, enable %u (%p)",
+ nix_hwlvl2str(hw_lvl), schq, tm_node->lvl,
+ tm_node->id, enable, tm_node);
+
+ regval[k] = enable;
+
+ switch (hw_lvl) {
+ case NIX_TXSCH_LVL_MDQ:
+ reg[k] = NIX_AF_MDQX_SW_XOFF(schq);
+ k++;
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ reg[k] = NIX_AF_TL4X_SW_XOFF(schq);
+ k++;
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ reg[k] = NIX_AF_TL3X_SW_XOFF(schq);
+ k++;
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ reg[k] = NIX_AF_TL2X_SW_XOFF(schq);
+ k++;
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ reg[k] = NIX_AF_TL1X_SW_XOFF(schq);
+ k++;
+ break;
+ default:
+ break;
+ }
+
+ return k;
+}
+
+static int
+populate_tm_reg(struct otx2_eth_dev *dev,
+ struct otx2_nix_tm_node *tm_node)
+{
+ struct otx2_nix_tm_shaper_profile *profile;
+ uint64_t regval_mask[MAX_REGS_PER_MBOX_MSG];
+ uint64_t regval[MAX_REGS_PER_MBOX_MSG];
+ uint64_t reg[MAX_REGS_PER_MBOX_MSG];
+ struct otx2_mbox *mbox = dev->mbox;
+ uint64_t parent = 0, child = 0;
+ uint32_t hw_lvl, rr_prio, schq;
+ struct nix_txschq_config *req;
+ int rc = -EFAULT;
+ uint8_t k = 0;
+
+ memset(regval_mask, 0, sizeof(regval_mask));
+ profile = nix_tm_shaper_profile_search(dev,
+ tm_node->params.shaper_profile_id);
+ rr_prio = tm_node->rr_prio;
+ hw_lvl = tm_node->hw_lvl;
+ schq = tm_node->hw_id;
+
+ /* Root node will not have a parent node */
+ if (hw_lvl == dev->otx2_tm_root_lvl)
+ parent = tm_node->parent_hw_id;
+ else
+ parent = tm_node->parent->hw_id;
+
+ /* Do we need this trigger to configure TL1 */
+ if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
+ hw_lvl == dev->otx2_tm_root_lvl) {
+ rc = populate_tm_tl1_default(dev, parent);
+ if (rc)
+ goto error;
+ }
+
+ if (hw_lvl != NIX_TXSCH_LVL_SMQ)
+ child = find_prio_anchor(dev, tm_node->id);
+
+ /* Override default rr_prio when TL1
+ * Static Priority is disabled
+ */
+ if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
+ dev->tm_flags & NIX_TM_TL1_NO_SP) {
+ rr_prio = TXSCH_TL1_DFLT_RR_PRIO;
+ child = 0;
+ }
+
+ otx2_tm_dbg("Topology config node %s(%u)->%s(%"PRIu64") lvl %u, id %u"
+ " prio_anchor %"PRIu64" rr_prio %u (%p)",
+ nix_hwlvl2str(hw_lvl), schq, nix_hwlvl2str(hw_lvl + 1),
+ parent, tm_node->lvl, tm_node->id, child, rr_prio, tm_node);
+
+ /* Prepare Topology and Link config */
+ switch (hw_lvl) {
+ case NIX_TXSCH_LVL_SMQ:
+
+ /* Set xoff which will be cleared later and minimum length
+ * which will be used for zero padding if packet length is
+ * smaller
+ */
+ reg[k] = NIX_AF_SMQX_CFG(schq);
+ regval[k] = BIT_ULL(50) | NIX_MIN_HW_FRS;
+ regval_mask[k] = ~(BIT_ULL(50) | 0x7f);
+ k++;
+
+ /* Parent and schedule conf */
+ reg[k] = NIX_AF_MDQX_PARENT(schq);
+ regval[k] = parent << 16;
+ k++;
+
+ break;
+ case NIX_TXSCH_LVL_TL4:
+ /* Parent and schedule conf */
+ reg[k] = NIX_AF_TL4X_PARENT(schq);
+ regval[k] = parent << 16;
+ k++;
+
+ reg[k] = NIX_AF_TL4X_TOPOLOGY(schq);
+ regval[k] = (child << 32) | (rr_prio << 1);
+ k++;
+
+ /* Configure TL4 to send to SDP channel instead of CGX/LBK */
+ if (otx2_dev_is_sdp(dev)) {
+ reg[k] = NIX_AF_TL4X_SDP_LINK_CFG(schq);
+ regval[k] = BIT_ULL(12);
+ k++;
+ }
+ break;
+ case NIX_TXSCH_LVL_TL3:
+ /* Parent and schedule conf */
+ reg[k] = NIX_AF_TL3X_PARENT(schq);
+ regval[k] = parent << 16;
+ k++;
+
+ reg[k] = NIX_AF_TL3X_TOPOLOGY(schq);
+ regval[k] = (child << 32) | (rr_prio << 1);
+ k++;
+
+ /* Link configuration */
+ if (!otx2_dev_is_sdp(dev) &&
+ dev->link_cfg_lvl == NIX_TXSCH_LVL_TL3) {
+ reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ otx2_nix_get_link(dev));
+ regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
+ k++;
+ }
+
+ break;
+ case NIX_TXSCH_LVL_TL2:
+ /* Parent and schedule conf */
+ reg[k] = NIX_AF_TL2X_PARENT(schq);
+ regval[k] = parent << 16;
+ k++;
+
+ reg[k] = NIX_AF_TL2X_TOPOLOGY(schq);
+ regval[k] = (child << 32) | (rr_prio << 1);
+ k++;
+
+ /* Link configuration */
+ if (!otx2_dev_is_sdp(dev) &&
+ dev->link_cfg_lvl == NIX_TXSCH_LVL_TL2) {
+ reg[k] = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ otx2_nix_get_link(dev));
+ regval[k] = BIT_ULL(12) | nix_get_relchan(dev);
+ k++;
+ }
+
+ break;
+ case NIX_TXSCH_LVL_TL1:
+ reg[k] = NIX_AF_TL1X_TOPOLOGY(schq);
+ regval[k] = (child << 32) | (rr_prio << 1 /*RR_PRIO*/);
+ k++;
+
+ break;
+ }
+
+ /* Prepare schedule config */
+ k += prepare_tm_sched_reg(dev, tm_node, &reg[k], &regval[k]);
+
+ /* Prepare shaping config */
+ k += prepare_tm_shaper_reg(tm_node, profile, &reg[k], &regval[k]);
+
+ if (!k)
+ return 0;
+
+ /* Copy and send config mbox */
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = hw_lvl;
+ req->num_regs = k;
+
+ otx2_mbox_memcpy(req->reg, reg, sizeof(uint64_t) * k);
+ otx2_mbox_memcpy(req->regval, regval, sizeof(uint64_t) * k);
+ otx2_mbox_memcpy(req->regval_mask, regval_mask, sizeof(uint64_t) * k);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ goto error;
+
+ return 0;
+error:
+ otx2_err("Txschq cfg request failed for node %p, rc=%d", tm_node, rc);
+ return rc;
+}
+
+
+static int
+nix_tm_txsch_reg_config(struct otx2_eth_dev *dev)
+{
+ struct otx2_nix_tm_node *tm_node;
+ uint32_t hw_lvl;
+ int rc = 0;
+
+ for (hw_lvl = 0; hw_lvl <= dev->otx2_tm_root_lvl; hw_lvl++) {
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (tm_node->hw_lvl == hw_lvl &&
+ tm_node->hw_lvl != NIX_TXSCH_LVL_CNT) {
+ rc = populate_tm_reg(dev, tm_node);
+ if (rc)
+ goto exit;
+ }
+ }
+ }
+exit:
+ return rc;
+}
+
+static struct otx2_nix_tm_node *
+nix_tm_node_search(struct otx2_eth_dev *dev,
+ uint32_t node_id, bool user)
+{
+ struct otx2_nix_tm_node *tm_node;
+
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (tm_node->id == node_id &&
+ (user == !!(tm_node->flags & NIX_TM_NODE_USER)))
+ return tm_node;
+ }
+ return NULL;
+}
+
+static uint32_t
+check_rr(struct otx2_eth_dev *dev, uint32_t priority, uint32_t parent_id)
+{
+ struct otx2_nix_tm_node *tm_node;
+ uint32_t rr_num = 0;
+
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (!tm_node->parent)
+ continue;
+
+ if (!(tm_node->parent->id == parent_id))
+ continue;
+
+ if (tm_node->priority == priority)
+ rr_num++;
+ }
+ return rr_num;
+}
+
+static int
+nix_tm_update_parent_info(struct otx2_eth_dev *dev)
+{
+ struct otx2_nix_tm_node *tm_node_child;
+ struct otx2_nix_tm_node *tm_node;
+ struct otx2_nix_tm_node *parent;
+ uint32_t rr_num = 0;
+ uint32_t priority;
+
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (!tm_node->parent)
+ continue;
+ /* Count group of children of same priority i.e are RR */
+ parent = tm_node->parent;
+ priority = tm_node->priority;
+ rr_num = check_rr(dev, priority, parent->id);
+
+ /* Assuming that multiple RR groups are
+ * not configured based on capability.
+ */
+ if (rr_num > 1) {
+ parent->rr_prio = priority;
+ parent->rr_num = rr_num;
+ }
+
+ /* Find out static priority children that are not in RR */
+ TAILQ_FOREACH(tm_node_child, &dev->node_list, node) {
+ if (!tm_node_child->parent)
+ continue;
+ if (parent->id != tm_node_child->parent->id)
+ continue;
+ if (parent->max_prio == UINT32_MAX &&
+ tm_node_child->priority != parent->rr_prio)
+ parent->max_prio = 0;
+
+ if (parent->max_prio < tm_node_child->priority &&
+ parent->rr_prio != tm_node_child->priority)
+ parent->max_prio = tm_node_child->priority;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nix_tm_node_add_to_list(struct otx2_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint16_t hw_lvl,
+ uint16_t lvl, bool user,
+ struct rte_tm_node_params *params)
+{
+ struct otx2_nix_tm_shaper_profile *profile;
+ struct otx2_nix_tm_node *tm_node, *parent_node;
+ uint32_t profile_id;
+
+ profile_id = params->shaper_profile_id;
+ profile = nix_tm_shaper_profile_search(dev, profile_id);
+
+ parent_node = nix_tm_node_search(dev, parent_node_id, user);
+
+ tm_node = rte_zmalloc("otx2_nix_tm_node",
+ sizeof(struct otx2_nix_tm_node), 0);
+ if (!tm_node)
+ return -ENOMEM;
+
+ tm_node->lvl = lvl;
+ tm_node->hw_lvl = hw_lvl;
+
+ /* Maintain minimum weight */
+ if (!weight)
+ weight = 1;
+
+ tm_node->id = node_id;
+ tm_node->priority = priority;
+ tm_node->weight = weight;
+ tm_node->rr_prio = 0xf;
+ tm_node->max_prio = UINT32_MAX;
+ tm_node->hw_id = UINT32_MAX;
+ tm_node->flags = 0;
+ if (user)
+ tm_node->flags = NIX_TM_NODE_USER;
+ rte_memcpy(&tm_node->params, params, sizeof(struct rte_tm_node_params));
+
+ if (profile)
+ profile->reference_count++;
+
+ tm_node->parent = parent_node;
+ tm_node->parent_hw_id = UINT32_MAX;
+ shaper_default_red_algo(dev, tm_node, profile);
+
+ TAILQ_INSERT_TAIL(&dev->node_list, tm_node, node);
+
+ return 0;
+}
+
+static int
+nix_tm_clear_shaper_profiles(struct otx2_eth_dev *dev)
+{
+ struct otx2_nix_tm_shaper_profile *shaper_profile;
+
+ while ((shaper_profile = TAILQ_FIRST(&dev->shaper_profile_list))) {
+ if (shaper_profile->reference_count)
+ otx2_tm_dbg("Shaper profile %u has non zero references",
+ shaper_profile->shaper_profile_id);
+ TAILQ_REMOVE(&dev->shaper_profile_list, shaper_profile, shaper);
+ rte_free(shaper_profile);
+ }
+
+ return 0;
+}
+
+static int
+nix_clear_path_xoff(struct otx2_eth_dev *dev,
+ struct otx2_nix_tm_node *tm_node)
+{
+ struct nix_txschq_config *req;
+ struct otx2_nix_tm_node *p;
+ int rc;
+
+ /* Manipulating SW_XOFF not supported on Ax */
+ if (otx2_dev_is_Ax(dev))
+ return 0;
+
+ /* Enable nodes in path for flush to succeed */
+ if (!nix_tm_is_leaf(dev, tm_node->lvl))
+ p = tm_node;
+ else
+ p = tm_node->parent;
+ while (p) {
+ if (!(p->flags & NIX_TM_NODE_ENABLED) &&
+ (p->flags & NIX_TM_NODE_HWRES)) {
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
+ req->lvl = p->hw_lvl;
+ req->num_regs = prepare_tm_sw_xoff(p, false, req->reg,
+ req->regval);
+ rc = otx2_mbox_process(dev->mbox);
+ if (rc)
+ return rc;
+
+ p->flags |= NIX_TM_NODE_ENABLED;
+ }
+ p = p->parent;
+ }
+
+ return 0;
+}
+
+static int
+nix_smq_xoff(struct otx2_eth_dev *dev,
+ struct otx2_nix_tm_node *tm_node,
+ bool enable)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_txschq_config *req;
+ uint16_t smq;
+ int rc;
+
+ smq = tm_node->hw_id;
+ otx2_tm_dbg("Setting SMQ %u XOFF/FLUSH to %s", smq,
+ enable ? "enable" : "disable");
+
+ rc = nix_clear_path_xoff(dev, tm_node);
+ if (rc)
+ return rc;
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = NIX_TXSCH_LVL_SMQ;
+ req->num_regs = 1;
+
+ req->reg[0] = NIX_AF_SMQX_CFG(smq);
+ req->regval[0] = enable ? (BIT_ULL(50) | BIT_ULL(49)) : 0;
+ req->regval_mask[0] = enable ?
+ ~(BIT_ULL(50) | BIT_ULL(49)) : ~BIT_ULL(50);
+
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_nix_sq_sqb_aura_fc(void *__txq, bool enable)
+{
+ struct otx2_eth_txq *txq = __txq;
+ struct npa_aq_enq_req *req;
+ struct npa_aq_enq_rsp *rsp;
+ struct otx2_npa_lf *lf;
+ struct otx2_mbox *mbox;
+ uint64_t aura_handle;
+ int rc;
+
+ otx2_tm_dbg("Setting SQ %u SQB aura FC to %s", txq->sq,
+ enable ? "enable" : "disable");
+
+ lf = otx2_npa_lf_obj_get();
+ if (!lf)
+ return -EFAULT;
+ mbox = lf->mbox;
+ /* Set/clear sqb aura fc_ena */
+ aura_handle = txq->sqb_pool->pool_id;
+ req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+
+ req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_WRITE;
+ /* Below is not needed for aura writes but AF driver needs it */
+ /* AF will translate to associated poolctx */
+ req->aura.pool_addr = req->aura_id;
+
+ req->aura.fc_ena = enable;
+ req->aura_mask.fc_ena = 1;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ /* Read back npa aura ctx */
+ req = otx2_mbox_alloc_msg_npa_aq_enq(mbox);
+
+ req->aura_id = npa_lf_aura_handle_to_aura(aura_handle);
+ req->ctype = NPA_AQ_CTYPE_AURA;
+ req->op = NPA_AQ_INSTOP_READ;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ /* Init when enabled as there might be no triggers */
+ if (enable)
+ *(volatile uint64_t *)txq->fc_mem = rsp->aura.count;
+ else
+ *(volatile uint64_t *)txq->fc_mem = txq->nb_sqb_bufs;
+ /* Sync write barrier */
+ rte_wmb();
+
+ return 0;
+}
+
+static int
+nix_txq_flush_sq_spin(struct otx2_eth_txq *txq)
+{
+ uint16_t sqb_cnt, head_off, tail_off;
+ struct otx2_eth_dev *dev = txq->dev;
+ uint64_t wdata, val, prev;
+ uint16_t sq = txq->sq;
+ int64_t *regaddr;
+ uint64_t timeout;/* 10's of usec */
+
+ /* Wait for enough time based on shaper min rate */
+ timeout = (txq->qconf.nb_desc * NIX_MAX_HW_FRS * 8 * 1E5);
+ timeout = timeout / dev->tm_rate_min;
+ if (!timeout)
+ timeout = 10000;
+
+ wdata = ((uint64_t)sq << 32);
+ regaddr = (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS);
+ val = otx2_atomic64_add_nosync(wdata, regaddr);
+
+ /* Spin multiple iterations as "txq->fc_cache_pkts" can still
+ * have space to send pkts even though fc_mem is disabled
+ */
+
+ while (true) {
+ prev = val;
+ rte_delay_us(10);
+ val = otx2_atomic64_add_nosync(wdata, regaddr);
+ /* Continue on error */
+ if (val & BIT_ULL(63))
+ continue;
+
+ if (prev != val)
+ continue;
+
+ sqb_cnt = val & 0xFFFF;
+ head_off = (val >> 20) & 0x3F;
+ tail_off = (val >> 28) & 0x3F;
+
+ /* SQ reached quiescent state */
+ if (sqb_cnt <= 1 && head_off == tail_off &&
+ (*txq->fc_mem == txq->nb_sqb_bufs)) {
+ break;
+ }
+
+ /* Timeout */
+ if (!timeout)
+ goto exit;
+ timeout--;
+ }
+
+ return 0;
+exit:
+ otx2_nix_tm_dump(dev);
+ return -EFAULT;
+}
+
+/* Flush and disable tx queue and its parent SMQ */
+int otx2_nix_sq_flush_pre(void *_txq, bool dev_started)
+{
+ struct otx2_nix_tm_node *tm_node, *sibling;
+ struct otx2_eth_txq *txq;
+ struct otx2_eth_dev *dev;
+ uint16_t sq;
+ bool user;
+ int rc;
+
+ txq = _txq;
+ dev = txq->dev;
+ sq = txq->sq;
+
+ user = !!(dev->tm_flags & NIX_TM_COMMITTED);
+
+ /* Find the node for this SQ */
+ tm_node = nix_tm_node_search(dev, sq, user);
+ if (!tm_node || !(tm_node->flags & NIX_TM_NODE_ENABLED)) {
+ otx2_err("Invalid node/state for sq %u", sq);
+ return -EFAULT;
+ }
+
+ /* Enable CGX RXTX to drain pkts */
+ if (!dev_started) {
+ /* Though it enables both RX MCAM Entries and CGX Link
+ * we assume all the rx queues are stopped way back.
+ */
+ otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
+ rc = otx2_mbox_process(dev->mbox);
+ if (rc) {
+ otx2_err("cgx start failed, rc=%d", rc);
+ return rc;
+ }
+ }
+
+ /* Disable smq xoff for case it was enabled earlier */
+ rc = nix_smq_xoff(dev, tm_node->parent, false);
+ if (rc) {
+ otx2_err("Failed to enable smq %u, rc=%d",
+ tm_node->parent->hw_id, rc);
+ return rc;
+ }
+
+ /* As per HRM, to disable an SQ, all other SQ's
+ * that feed to same SMQ must be paused before SMQ flush.
+ */
+ TAILQ_FOREACH(sibling, &dev->node_list, node) {
+ if (sibling->parent != tm_node->parent)
+ continue;
+ if (!(sibling->flags & NIX_TM_NODE_ENABLED))
+ continue;
+
+ sq = sibling->id;
+ txq = dev->eth_dev->data->tx_queues[sq];
+ if (!txq)
+ continue;
+
+ rc = otx2_nix_sq_sqb_aura_fc(txq, false);
+ if (rc) {
+ otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
+ goto cleanup;
+ }
+
+ /* Wait for sq entries to be flushed */
+ rc = nix_txq_flush_sq_spin(txq);
+ if (rc) {
+ otx2_err("Failed to drain sq %u, rc=%d\n", txq->sq, rc);
+ return rc;
+ }
+ }
+
+ tm_node->flags &= ~NIX_TM_NODE_ENABLED;
+
+ /* Disable and flush */
+ rc = nix_smq_xoff(dev, tm_node->parent, true);
+ if (rc) {
+ otx2_err("Failed to disable smq %u, rc=%d",
+ tm_node->parent->hw_id, rc);
+ goto cleanup;
+ }
+cleanup:
+ /* Restore cgx state */
+ if (!dev_started) {
+ otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
+ rc |= otx2_mbox_process(dev->mbox);
+ }
+
+ return rc;
+}
+
+int otx2_nix_sq_flush_post(void *_txq)
+{
+ struct otx2_nix_tm_node *tm_node, *sibling;
+ struct otx2_eth_txq *txq = _txq;
+ struct otx2_eth_txq *s_txq;
+ struct otx2_eth_dev *dev;
+ bool once = false;
+ uint16_t sq, s_sq;
+ bool user;
+ int rc;
+
+ dev = txq->dev;
+ sq = txq->sq;
+ user = !!(dev->tm_flags & NIX_TM_COMMITTED);
+
+ /* Find the node for this SQ */
+ tm_node = nix_tm_node_search(dev, sq, user);
+ if (!tm_node) {
+ otx2_err("Invalid node for sq %u", sq);
+ return -EFAULT;
+ }
+
+ /* Enable all the siblings back */
+ TAILQ_FOREACH(sibling, &dev->node_list, node) {
+ if (sibling->parent != tm_node->parent)
+ continue;
+
+ if (sibling->id == sq)
+ continue;
+
+ if (!(sibling->flags & NIX_TM_NODE_ENABLED))
+ continue;
+
+ s_sq = sibling->id;
+ s_txq = dev->eth_dev->data->tx_queues[s_sq];
+ if (!s_txq)
+ continue;
+
+ if (!once) {
+ /* Enable back if any SQ is still present */
+ rc = nix_smq_xoff(dev, tm_node->parent, false);
+ if (rc) {
+ otx2_err("Failed to enable smq %u, rc=%d",
+ tm_node->parent->hw_id, rc);
+ return rc;
+ }
+ once = true;
+ }
+
+ rc = otx2_nix_sq_sqb_aura_fc(s_txq, true);
+ if (rc) {
+ otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int
+nix_sq_sched_data(struct otx2_eth_dev *dev,
+ struct otx2_nix_tm_node *tm_node,
+ bool rr_quantum_only)
+{
+ struct rte_eth_dev *eth_dev = dev->eth_dev;
+ struct otx2_mbox *mbox = dev->mbox;
+ uint16_t sq = tm_node->id, smq;
+ struct nix_aq_enq_req *req;
+ uint64_t rr_quantum;
+ int rc;
+
+ smq = tm_node->parent->hw_id;
+ rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
+
+ if (rr_quantum_only)
+ otx2_tm_dbg("Update sq(%u) rr_quantum 0x%"PRIx64, sq, rr_quantum);
+ else
+ otx2_tm_dbg("Enabling sq(%u)->smq(%u), rr_quantum 0x%"PRIx64,
+ sq, smq, rr_quantum);
+
+ if (sq > eth_dev->data->nb_tx_queues)
+ return -EFAULT;
+
+ req = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ req->qidx = sq;
+ req->ctype = NIX_AQ_CTYPE_SQ;
+ req->op = NIX_AQ_INSTOP_WRITE;
+
+ /* smq update only when needed */
+ if (!rr_quantum_only) {
+ req->sq.smq = smq;
+ req->sq_mask.smq = ~req->sq_mask.smq;
+ }
+ req->sq.smq_rr_quantum = rr_quantum;
+ req->sq_mask.smq_rr_quantum = ~req->sq_mask.smq_rr_quantum;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ otx2_err("Failed to set smq, rc=%d", rc);
+ return rc;
+}
+
+int otx2_nix_sq_enable(void *_txq)
+{
+ struct otx2_eth_txq *txq = _txq;
+ int rc;
+
+ /* Enable sqb_aura fc */
+ rc = otx2_nix_sq_sqb_aura_fc(txq, true);
+ if (rc) {
+ otx2_err("Failed to enable sqb aura fc, rc=%d", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+nix_tm_free_resources(struct otx2_eth_dev *dev, uint32_t flags_mask,
+ uint32_t flags, bool hw_only)
+{
+ struct otx2_nix_tm_shaper_profile *profile;
+ struct otx2_nix_tm_node *tm_node, *next_node;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_txsch_free_req *req;
+ uint32_t profile_id;
+ int rc = 0;
+
+ next_node = TAILQ_FIRST(&dev->node_list);
+ while (next_node) {
+ tm_node = next_node;
+ next_node = TAILQ_NEXT(tm_node, node);
+
+ /* Check for only requested nodes */
+ if ((tm_node->flags & flags_mask) != flags)
+ continue;
+
+ if (!nix_tm_is_leaf(dev, tm_node->lvl) &&
+ tm_node->hw_lvl != NIX_TXSCH_LVL_TL1 &&
+ tm_node->flags & NIX_TM_NODE_HWRES) {
+ /* Free specific HW resource */
+ otx2_tm_dbg("Free hwres %s(%u) lvl %u id %u (%p)",
+ nix_hwlvl2str(tm_node->hw_lvl),
+ tm_node->hw_id, tm_node->lvl,
+ tm_node->id, tm_node);
+
+ rc = nix_clear_path_xoff(dev, tm_node);
+ if (rc)
+ return rc;
+
+ req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
+ req->flags = 0;
+ req->schq_lvl = tm_node->hw_lvl;
+ req->schq = tm_node->hw_id;
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+ tm_node->flags &= ~NIX_TM_NODE_HWRES;
+ }
+
+ /* Leave software elements if needed */
+ if (hw_only)
+ continue;
+
+ otx2_tm_dbg("Free node lvl %u id %u (%p)",
+ tm_node->lvl, tm_node->id, tm_node);
+
+ profile_id = tm_node->params.shaper_profile_id;
+ profile = nix_tm_shaper_profile_search(dev, profile_id);
+ if (profile)
+ profile->reference_count--;
+
+ TAILQ_REMOVE(&dev->node_list, tm_node, node);
+ rte_free(tm_node);
+ }
+
+ if (!flags_mask) {
+ /* Free all hw resources */
+ req = otx2_mbox_alloc_msg_nix_txsch_free(mbox);
+ req->flags = TXSCHQ_FREE_ALL;
+
+ return otx2_mbox_process(mbox);
+ }
+
+ return rc;
+}
+
+static uint8_t
+nix_tm_copy_rsp_to_dev(struct otx2_eth_dev *dev,
+ struct nix_txsch_alloc_rsp *rsp)
+{
+ uint16_t schq;
+ uint8_t lvl;
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ for (schq = 0; schq < MAX_TXSCHQ_PER_FUNC; schq++) {
+ dev->txschq_list[lvl][schq] = rsp->schq_list[lvl][schq];
+ dev->txschq_contig_list[lvl][schq] =
+ rsp->schq_contig_list[lvl][schq];
+ }
+
+ dev->txschq[lvl] = rsp->schq[lvl];
+ dev->txschq_contig[lvl] = rsp->schq_contig[lvl];
+ }
+ return 0;
+}
+
+static int
+nix_tm_assign_id_to_node(struct otx2_eth_dev *dev,
+ struct otx2_nix_tm_node *child,
+ struct otx2_nix_tm_node *parent)
+{
+ uint32_t hw_id, schq_con_index, prio_offset;
+ uint32_t l_id, schq_index;
+
+ otx2_tm_dbg("Assign hw id for child node %s lvl %u id %u (%p)",
+ nix_hwlvl2str(child->hw_lvl), child->lvl, child->id, child);
+
+ child->flags |= NIX_TM_NODE_HWRES;
+
+ /* Process root nodes */
+ if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 &&
+ child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
+ int idx = 0;
+ uint32_t tschq_con_index;
+
+ l_id = child->hw_lvl;
+ tschq_con_index = dev->txschq_contig_index[l_id];
+ hw_id = dev->txschq_contig_list[l_id][tschq_con_index];
+ child->hw_id = hw_id;
+ dev->txschq_contig_index[l_id]++;
+ /* Update TL1 hw_id for its parent for config purpose */
+ idx = dev->txschq_index[NIX_TXSCH_LVL_TL1]++;
+ hw_id = dev->txschq_list[NIX_TXSCH_LVL_TL1][idx];
+ child->parent_hw_id = hw_id;
+ return 0;
+ }
+ if (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL1 &&
+ child->hw_lvl == dev->otx2_tm_root_lvl && !parent) {
+ uint32_t tschq_con_index;
+
+ l_id = child->hw_lvl;
+ tschq_con_index = dev->txschq_index[l_id];
+ hw_id = dev->txschq_list[l_id][tschq_con_index];
+ child->hw_id = hw_id;
+ dev->txschq_index[l_id]++;
+ return 0;
+ }
+
+ /* Process children with parents */
+ l_id = child->hw_lvl;
+ schq_index = dev->txschq_index[l_id];
+ schq_con_index = dev->txschq_contig_index[l_id];
+
+ if (child->priority == parent->rr_prio) {
+ hw_id = dev->txschq_list[l_id][schq_index];
+ child->hw_id = hw_id;
+ child->parent_hw_id = parent->hw_id;
+ dev->txschq_index[l_id]++;
+ } else {
+ prio_offset = schq_con_index + child->priority;
+ hw_id = dev->txschq_contig_list[l_id][prio_offset];
+ child->hw_id = hw_id;
+ }
+ return 0;
+}
+
+static int
+nix_tm_assign_hw_id(struct otx2_eth_dev *dev)
+{
+ struct otx2_nix_tm_node *parent, *child;
+ uint32_t child_hw_lvl, con_index_inc, i;
+
+ for (i = NIX_TXSCH_LVL_TL1; i > 0; i--) {
+ TAILQ_FOREACH(parent, &dev->node_list, node) {
+ child_hw_lvl = parent->hw_lvl - 1;
+ if (parent->hw_lvl != i)
+ continue;
+ TAILQ_FOREACH(child, &dev->node_list, node) {
+ if (!child->parent)
+ continue;
+ if (child->parent->id != parent->id)
+ continue;
+ nix_tm_assign_id_to_node(dev, child, parent);
+ }
+
+ con_index_inc = parent->max_prio + 1;
+ dev->txschq_contig_index[child_hw_lvl] += con_index_inc;
+
+ /*
+ * Explicitly assign id to parent node if it
+ * doesn't have a parent
+ */
+ if (parent->hw_lvl == dev->otx2_tm_root_lvl)
+ nix_tm_assign_id_to_node(dev, parent, NULL);
+ }
+ }
+ return 0;
+}
+
+static uint8_t
+nix_tm_count_req_schq(struct otx2_eth_dev *dev,
+ struct nix_txsch_alloc_req *req, uint8_t lvl)
+{
+ struct otx2_nix_tm_node *tm_node;
+ uint8_t contig_count;
+
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (lvl == tm_node->hw_lvl) {
+ req->schq[lvl - 1] += tm_node->rr_num;
+ if (tm_node->max_prio != UINT32_MAX) {
+ contig_count = tm_node->max_prio + 1;
+ req->schq_contig[lvl - 1] += contig_count;
+ }
+ }
+ if (lvl == dev->otx2_tm_root_lvl &&
+ dev->otx2_tm_root_lvl && lvl == NIX_TXSCH_LVL_TL2 &&
+ tm_node->hw_lvl == dev->otx2_tm_root_lvl) {
+ req->schq_contig[dev->otx2_tm_root_lvl]++;
+ }
+ }
+
+ req->schq[NIX_TXSCH_LVL_TL1] = 1;
+ req->schq_contig[NIX_TXSCH_LVL_TL1] = 0;
+
+ return 0;
+}
+
+static int
+nix_tm_prepare_txschq_req(struct otx2_eth_dev *dev,
+ struct nix_txsch_alloc_req *req)
+{
+ uint8_t i;
+
+ for (i = NIX_TXSCH_LVL_TL1; i > 0; i--)
+ nix_tm_count_req_schq(dev, req, i);
+
+ for (i = 0; i < NIX_TXSCH_LVL_CNT; i++) {
+ dev->txschq_index[i] = 0;
+ dev->txschq_contig_index[i] = 0;
+ }
+ return 0;
+}
+
+static int
+nix_tm_send_txsch_alloc_msg(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_txsch_alloc_req *req;
+ struct nix_txsch_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_txsch_alloc(mbox);
+
+ rc = nix_tm_prepare_txschq_req(dev, req);
+ if (rc)
+ return rc;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ nix_tm_copy_rsp_to_dev(dev, rsp);
+ dev->link_cfg_lvl = rsp->link_cfg_lvl;
+
+ nix_tm_assign_hw_id(dev);
+ return 0;
+}
+
+static int
+nix_tm_alloc_resources(struct rte_eth_dev *eth_dev, bool xmit_enable)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_node *tm_node;
+ struct otx2_eth_txq *txq;
+ uint16_t sq;
+ int rc;
+
+ nix_tm_update_parent_info(dev);
+
+ rc = nix_tm_send_txsch_alloc_msg(dev);
+ if (rc) {
+ otx2_err("TM failed to alloc tm resources=%d", rc);
+ return rc;
+ }
+
+ rc = nix_tm_txsch_reg_config(dev);
+ if (rc) {
+ otx2_err("TM failed to configure sched registers=%d", rc);
+ return rc;
+ }
+
+ /* Trigger MTU recalculate as SMQ needs MTU conf */
+ if (eth_dev->data->dev_started && eth_dev->data->nb_rx_queues) {
+ rc = otx2_nix_recalc_mtu(eth_dev);
+ if (rc) {
+ otx2_err("TM MTU update failed, rc=%d", rc);
+ return rc;
+ }
+ }
+
+ /* Mark all non-leaf's as enabled */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (!nix_tm_is_leaf(dev, tm_node->lvl))
+ tm_node->flags |= NIX_TM_NODE_ENABLED;
+ }
+
+ if (!xmit_enable)
+ return 0;
+
+ /* Update SQ Sched Data while SQ is idle */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (!nix_tm_is_leaf(dev, tm_node->lvl))
+ continue;
+
+ rc = nix_sq_sched_data(dev, tm_node, false);
+ if (rc) {
+ otx2_err("SQ %u sched update failed, rc=%d",
+ tm_node->id, rc);
+ return rc;
+ }
+ }
+
+ /* Finally XON all SMQ's */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+
+ rc = nix_smq_xoff(dev, tm_node, false);
+ if (rc) {
+ otx2_err("Failed to enable smq %u, rc=%d",
+ tm_node->hw_id, rc);
+ return rc;
+ }
+ }
+
+ /* Enable xmit as all the topology is ready */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (!nix_tm_is_leaf(dev, tm_node->lvl))
+ continue;
+
+ sq = tm_node->id;
+ txq = eth_dev->data->tx_queues[sq];
+
+ rc = otx2_nix_sq_enable(txq);
+ if (rc) {
+ otx2_err("TM sw xon failed on SQ %u, rc=%d",
+ tm_node->id, rc);
+ return rc;
+ }
+ tm_node->flags |= NIX_TM_NODE_ENABLED;
+ }
+
+ return 0;
+}
+
+static int
+send_tm_reqval(struct otx2_mbox *mbox,
+ struct nix_txschq_config *req,
+ struct rte_tm_error *error)
+{
+ int rc;
+
+ if (!req->num_regs ||
+ req->num_regs > MAX_REGS_PER_MBOX_MSG) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "invalid config";
+ return -EIO;
+ }
+
+ rc = otx2_mbox_process(mbox);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ }
+ return rc;
+}
+
+static uint16_t
+nix_tm_lvl2nix(struct otx2_eth_dev *dev, uint32_t lvl)
+{
+ if (nix_tm_have_tl1_access(dev)) {
+ switch (lvl) {
+ case OTX2_TM_LVL_ROOT:
+ return NIX_TXSCH_LVL_TL1;
+ case OTX2_TM_LVL_SCH1:
+ return NIX_TXSCH_LVL_TL2;
+ case OTX2_TM_LVL_SCH2:
+ return NIX_TXSCH_LVL_TL3;
+ case OTX2_TM_LVL_SCH3:
+ return NIX_TXSCH_LVL_TL4;
+ case OTX2_TM_LVL_SCH4:
+ return NIX_TXSCH_LVL_SMQ;
+ default:
+ return NIX_TXSCH_LVL_CNT;
+ }
+ } else {
+ switch (lvl) {
+ case OTX2_TM_LVL_ROOT:
+ return NIX_TXSCH_LVL_TL2;
+ case OTX2_TM_LVL_SCH1:
+ return NIX_TXSCH_LVL_TL3;
+ case OTX2_TM_LVL_SCH2:
+ return NIX_TXSCH_LVL_TL4;
+ case OTX2_TM_LVL_SCH3:
+ return NIX_TXSCH_LVL_SMQ;
+ default:
+ return NIX_TXSCH_LVL_CNT;
+ }
+ }
+}
+
+static uint16_t
+nix_max_prio(struct otx2_eth_dev *dev, uint16_t hw_lvl)
+{
+ if (hw_lvl >= NIX_TXSCH_LVL_CNT)
+ return 0;
+
+ /* MDQ doesn't support SP */
+ if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+ return 0;
+
+ /* PF's TL1 with VF's enabled doesn't support SP */
+ if (hw_lvl == NIX_TXSCH_LVL_TL1 &&
+ (dev->otx2_tm_root_lvl == NIX_TXSCH_LVL_TL2 ||
+ (dev->tm_flags & NIX_TM_TL1_NO_SP)))
+ return 0;
+
+ return TXSCH_TLX_SP_PRIO_MAX - 1;
+}
+
+
+static int
+validate_prio(struct otx2_eth_dev *dev, uint32_t lvl,
+ uint32_t parent_id, uint32_t priority,
+ struct rte_tm_error *error)
+{
+ uint8_t priorities[TXSCH_TLX_SP_PRIO_MAX];
+ struct otx2_nix_tm_node *tm_node;
+ uint32_t rr_num = 0;
+ int i;
+
+ /* Validate priority against max */
+ if (priority > nix_max_prio(dev, nix_tm_lvl2nix(dev, lvl - 1))) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "unsupported priority value";
+ return -EINVAL;
+ }
+
+ if (parent_id == RTE_TM_NODE_ID_NULL)
+ return 0;
+
+ memset(priorities, 0, TXSCH_TLX_SP_PRIO_MAX);
+ priorities[priority] = 1;
+
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (!tm_node->parent)
+ continue;
+
+ if (!(tm_node->flags & NIX_TM_NODE_USER))
+ continue;
+
+ if (tm_node->parent->id != parent_id)
+ continue;
+
+ priorities[tm_node->priority]++;
+ }
+
+ for (i = 0; i < TXSCH_TLX_SP_PRIO_MAX; i++)
+ if (priorities[i] > 1)
+ rr_num++;
+
+ /* At max, one rr groups per parent */
+ if (rr_num > 1) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "multiple DWRR node priority";
+ return -EINVAL;
+ }
+
+ /* Check for previous priority to avoid holes in priorities */
+ if (priority && !priorities[priority - 1]) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PRIORITY;
+ error->message = "priority not in order";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+read_tm_reg(struct otx2_mbox *mbox, uint64_t reg,
+ uint64_t *regval, uint32_t hw_lvl)
+{
+ volatile struct nix_txschq_config *req;
+ struct nix_txschq_config *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->read = 1;
+ req->lvl = hw_lvl;
+ req->reg[0] = reg;
+ req->num_regs = 1;
+
+ rc = otx2_mbox_process_msg(mbox, (void **)&rsp);
+ if (rc)
+ return rc;
+ *regval = rsp->regval[0];
+ return 0;
+}
+
+/* Search for min rate in topology */
+static void
+nix_tm_shaper_profile_update_min(struct otx2_eth_dev *dev)
+{
+ struct otx2_nix_tm_shaper_profile *profile;
+ uint64_t rate_min = 1E9; /* 1 Gbps */
+
+ TAILQ_FOREACH(profile, &dev->shaper_profile_list, shaper) {
+ if (profile->params.peak.rate &&
+ profile->params.peak.rate < rate_min)
+ rate_min = profile->params.peak.rate;
+
+ if (profile->params.committed.rate &&
+ profile->params.committed.rate < rate_min)
+ rate_min = profile->params.committed.rate;
+ }
+
+ dev->tm_rate_min = rate_min;
+}
+
+static int
+nix_xmit_disable(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
+ uint16_t sqb_cnt, head_off, tail_off;
+ struct otx2_nix_tm_node *tm_node;
+ struct otx2_eth_txq *txq;
+ uint64_t wdata, val;
+ int i, rc;
+
+ otx2_tm_dbg("Disabling xmit on %s", eth_dev->data->name);
+
+ /* Enable CGX RXTX to drain pkts */
+ if (!eth_dev->data->dev_started) {
+ otx2_mbox_alloc_msg_nix_lf_start_rx(dev->mbox);
+ rc = otx2_mbox_process(dev->mbox);
+ if (rc)
+ return rc;
+ }
+
+ /* XON all SMQ's */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+ if (!(tm_node->flags & NIX_TM_NODE_HWRES))
+ continue;
+
+ rc = nix_smq_xoff(dev, tm_node, false);
+ if (rc) {
+ otx2_err("Failed to enable smq %u, rc=%d",
+ tm_node->hw_id, rc);
+ goto cleanup;
+ }
+ }
+
+ /* Flush all tx queues */
+ for (i = 0; i < sq_cnt; i++) {
+ txq = eth_dev->data->tx_queues[i];
+
+ rc = otx2_nix_sq_sqb_aura_fc(txq, false);
+ if (rc) {
+ otx2_err("Failed to disable sqb aura fc, rc=%d", rc);
+ goto cleanup;
+ }
+
+ /* Wait for sq entries to be flushed */
+ rc = nix_txq_flush_sq_spin(txq);
+ if (rc) {
+ otx2_err("Failed to drain sq, rc=%d\n", rc);
+ goto cleanup;
+ }
+ }
+
+ /* XOFF & Flush all SMQ's. HRM mandates
+ * all SQ's empty before SMQ flush is issued.
+ */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (tm_node->hw_lvl != NIX_TXSCH_LVL_SMQ)
+ continue;
+ if (!(tm_node->flags & NIX_TM_NODE_HWRES))
+ continue;
+
+ rc = nix_smq_xoff(dev, tm_node, true);
+ if (rc) {
+ otx2_err("Failed to enable smq %u, rc=%d",
+ tm_node->hw_id, rc);
+ goto cleanup;
+ }
+ }
+
+ /* Verify sanity of all tx queues */
+ for (i = 0; i < sq_cnt; i++) {
+ txq = eth_dev->data->tx_queues[i];
+
+ wdata = ((uint64_t)txq->sq << 32);
+ val = otx2_atomic64_add_nosync(wdata,
+ (int64_t *)(dev->base + NIX_LF_SQ_OP_STATUS));
+
+ sqb_cnt = val & 0xFFFF;
+ head_off = (val >> 20) & 0x3F;
+ tail_off = (val >> 28) & 0x3F;
+
+ if (sqb_cnt > 1 || head_off != tail_off ||
+ (*txq->fc_mem != txq->nb_sqb_bufs))
+ otx2_err("Failed to gracefully flush sq %u", txq->sq);
+ }
+
+cleanup:
+ /* restore cgx state */
+ if (!eth_dev->data->dev_started) {
+ otx2_mbox_alloc_msg_nix_lf_stop_rx(dev->mbox);
+ rc |= otx2_mbox_process(dev->mbox);
+ }
+
+ return rc;
+}
+
+static int
+otx2_nix_tm_node_type_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ int *is_leaf, struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_node *tm_node;
+
+ if (is_leaf == NULL) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ return -EINVAL;
+ }
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (node_id == RTE_TM_NODE_ID_NULL || !tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ return -EINVAL;
+ }
+ if (nix_tm_is_leaf(dev, tm_node->lvl))
+ *is_leaf = true;
+ else
+ *is_leaf = false;
+ return 0;
+}
+
+static int
+otx2_nix_tm_capa_get(struct rte_eth_dev *eth_dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ int rc, max_nr_nodes = 0, i;
+ struct free_rsrcs_rsp *rsp;
+
+ memset(cap, 0, sizeof(*cap));
+
+ otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ for (i = 0; i < NIX_TXSCH_LVL_TL1; i++)
+ max_nr_nodes += rsp->schq[i];
+
+ cap->n_nodes_max = max_nr_nodes + dev->tm_leaf_cnt;
+ /* TL1 level is reserved for PF */
+ cap->n_levels_max = nix_tm_have_tl1_access(dev) ?
+ OTX2_TM_LVL_MAX : OTX2_TM_LVL_MAX - 1;
+ cap->non_leaf_nodes_identical = 1;
+ cap->leaf_nodes_identical = 1;
+
+ /* Shaper Capabilities */
+ cap->shaper_private_n_max = max_nr_nodes;
+ cap->shaper_n_max = max_nr_nodes;
+ cap->shaper_private_dual_rate_n_max = max_nr_nodes;
+ cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+ cap->shaper_pkt_length_adjust_min = 0;
+ cap->shaper_pkt_length_adjust_max = 0;
+
+ /* Schedule Capabilities */
+ cap->sched_n_children_max = rsp->schq[NIX_TXSCH_LVL_MDQ];
+ cap->sched_sp_n_priorities_max = TXSCH_TLX_SP_PRIO_MAX;
+ cap->sched_wfq_n_children_per_group_max = cap->sched_n_children_max;
+ cap->sched_wfq_n_groups_max = 1;
+ cap->sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+
+ cap->dynamic_update_mask =
+ RTE_TM_UPDATE_NODE_PARENT_KEEP_LEVEL |
+ RTE_TM_UPDATE_NODE_SUSPEND_RESUME;
+ cap->stats_mask =
+ RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES |
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+
+ for (i = 0; i < RTE_COLORS; i++) {
+ cap->mark_vlan_dei_supported[i] = false;
+ cap->mark_ip_ecn_tcp_supported[i] = false;
+ cap->mark_ip_dscp_supported[i] = false;
+ }
+
+ return 0;
+}
+
+static int
+otx2_nix_tm_level_capa_get(struct rte_eth_dev *eth_dev, uint32_t lvl,
+ struct rte_tm_level_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct free_rsrcs_rsp *rsp;
+ uint16_t hw_lvl;
+ int rc;
+
+ memset(cap, 0, sizeof(*cap));
+
+ otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ hw_lvl = nix_tm_lvl2nix(dev, lvl);
+
+ if (nix_tm_is_leaf(dev, lvl)) {
+ /* Leaf */
+ cap->n_nodes_max = dev->tm_leaf_cnt;
+ cap->n_nodes_leaf_max = dev->tm_leaf_cnt;
+ cap->leaf_nodes_identical = 1;
+ cap->leaf.stats_mask =
+ RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
+
+ } else if (lvl == OTX2_TM_LVL_ROOT) {
+ /* Root node, aka TL2(vf)/TL1(pf) */
+ cap->n_nodes_max = 1;
+ cap->n_nodes_nonleaf_max = 1;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported =
+ nix_tm_have_tl1_access(dev) ? false : true;
+ cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+
+ cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ nix_max_prio(dev, hw_lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+
+ if (nix_tm_have_tl1_access(dev))
+ cap->nonleaf.stats_mask =
+ RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ } else if ((lvl < OTX2_TM_LVL_MAX) &&
+ (hw_lvl < NIX_TXSCH_LVL_CNT)) {
+ /* TL2, TL3, TL4, MDQ */
+ cap->n_nodes_max = rsp->schq[hw_lvl];
+ cap->n_nodes_nonleaf_max = cap->n_nodes_max;
+ cap->non_leaf_nodes_identical = 1;
+
+ cap->nonleaf.shaper_private_supported = true;
+ cap->nonleaf.shaper_private_dual_rate_supported = true;
+ cap->nonleaf.shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->nonleaf.shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+
+ /* MDQ doesn't support Strict Priority */
+ if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+ cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
+ else
+ cap->nonleaf.sched_n_children_max =
+ rsp->schq[hw_lvl - 1];
+ cap->nonleaf.sched_sp_n_priorities_max =
+ nix_max_prio(dev, hw_lvl) + 1;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+ } else {
+ /* unsupported level */
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ return rc;
+ }
+ return 0;
+}
+
+static int
+otx2_nix_tm_node_capa_get(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_node_capabilities *cap,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct otx2_nix_tm_node *tm_node;
+ struct free_rsrcs_rsp *rsp;
+ int rc, hw_lvl, lvl;
+
+ memset(cap, 0, sizeof(*cap));
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ hw_lvl = tm_node->hw_lvl;
+ lvl = tm_node->lvl;
+
+ /* Leaf node */
+ if (nix_tm_is_leaf(dev, lvl)) {
+ cap->stats_mask = RTE_TM_STATS_N_PKTS |
+ RTE_TM_STATS_N_BYTES;
+ return 0;
+ }
+
+ otx2_mbox_alloc_msg_free_rsrc_cnt(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "unexpected fatal error";
+ return rc;
+ }
+
+ /* Non Leaf Shaper */
+ cap->shaper_private_supported = true;
+ cap->shaper_private_dual_rate_supported =
+ (hw_lvl == NIX_TXSCH_LVL_TL1) ? false : true;
+ cap->shaper_private_rate_min = MIN_SHAPER_RATE / 8;
+ cap->shaper_private_rate_max = MAX_SHAPER_RATE / 8;
+
+ /* Non Leaf Scheduler */
+ if (hw_lvl == NIX_TXSCH_LVL_MDQ)
+ cap->nonleaf.sched_n_children_max = dev->tm_leaf_cnt;
+ else
+ cap->nonleaf.sched_n_children_max = rsp->schq[hw_lvl - 1];
+
+ cap->nonleaf.sched_sp_n_priorities_max = nix_max_prio(dev, hw_lvl) + 1;
+ cap->nonleaf.sched_wfq_n_children_per_group_max =
+ cap->nonleaf.sched_n_children_max;
+ cap->nonleaf.sched_wfq_n_groups_max = 1;
+ cap->nonleaf.sched_wfq_weight_max = MAX_SCHED_WEIGHT;
+
+ if (hw_lvl == NIX_TXSCH_LVL_TL1)
+ cap->stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+ return 0;
+}
+
+static int
+otx2_nix_tm_shaper_profile_add(struct rte_eth_dev *eth_dev,
+ uint32_t profile_id,
+ struct rte_tm_shaper_params *params,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_shaper_profile *profile;
+
+ profile = nix_tm_shaper_profile_search(dev, profile_id);
+ if (profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "shaper profile ID exist";
+ return -EINVAL;
+ }
+
+ /* Committed rate and burst size can be enabled/disabled */
+ if (params->committed.size || params->committed.rate) {
+ if (params->committed.size < MIN_SHAPER_BURST ||
+ params->committed.size > MAX_SHAPER_BURST) {
+ error->type =
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+ return -EINVAL;
+ } else if (!shaper_rate_to_nix(params->committed.rate * 8,
+ NULL, NULL, NULL)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "shaper committed rate invalid";
+ return -EINVAL;
+ }
+ }
+
+ /* Peak rate and burst size can be enabled/disabled */
+ if (params->peak.size || params->peak.rate) {
+ if (params->peak.size < MIN_SHAPER_BURST ||
+ params->peak.size > MAX_SHAPER_BURST) {
+ error->type =
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+ return -EINVAL;
+ } else if (!shaper_rate_to_nix(params->peak.rate * 8,
+ NULL, NULL, NULL)) {
+ error->type =
+ RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+ error->message = "shaper peak rate invalid";
+ return -EINVAL;
+ }
+ }
+
+ profile = rte_zmalloc("otx2_nix_tm_shaper_profile",
+ sizeof(struct otx2_nix_tm_shaper_profile), 0);
+ if (!profile)
+ return -ENOMEM;
+
+ profile->shaper_profile_id = profile_id;
+ rte_memcpy(&profile->params, params,
+ sizeof(struct rte_tm_shaper_params));
+ TAILQ_INSERT_TAIL(&dev->shaper_profile_list, profile, shaper);
+
+ otx2_tm_dbg("Added TM shaper profile %u, "
+ " pir %" PRIu64 " , pbs %" PRIu64 ", cir %" PRIu64
+ ", cbs %" PRIu64 " , adj %u",
+ profile_id,
+ params->peak.rate * 8,
+ params->peak.size,
+ params->committed.rate * 8,
+ params->committed.size,
+ params->pkt_length_adjust);
+
+ /* Translate rate as bits per second */
+ profile->params.peak.rate = profile->params.peak.rate * 8;
+ profile->params.committed.rate = profile->params.committed.rate * 8;
+ /* Always use PIR for single rate shaping */
+ if (!params->peak.rate && params->committed.rate) {
+ profile->params.peak = profile->params.committed;
+ memset(&profile->params.committed, 0,
+ sizeof(profile->params.committed));
+ }
+
+ /* update min rate */
+ nix_tm_shaper_profile_update_min(dev);
+ return 0;
+}
+
+static int
+otx2_nix_tm_shaper_profile_delete(struct rte_eth_dev *eth_dev,
+ uint32_t profile_id,
+ struct rte_tm_error *error)
+{
+ struct otx2_nix_tm_shaper_profile *profile;
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ profile = nix_tm_shaper_profile_search(dev, profile_id);
+
+ if (!profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "shaper profile ID not exist";
+ return -EINVAL;
+ }
+
+ if (profile->reference_count) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+ error->message = "shaper profile in use";
+ return -EINVAL;
+ }
+
+ otx2_tm_dbg("Removing TM shaper profile %u", profile_id);
+ TAILQ_REMOVE(&dev->shaper_profile_list, profile, shaper);
+ rte_free(profile);
+
+ /* update min rate */
+ nix_tm_shaper_profile_update_min(dev);
+ return 0;
+}
+
+static int
+otx2_nix_tm_node_add(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t lvl,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_node *parent_node;
+ int rc, clear_on_fail = 0;
+ uint32_t exp_next_lvl;
+ uint16_t hw_lvl;
+
+ /* we don't support dynamic updates */
+ if (dev->tm_flags & NIX_TM_COMMITTED) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "dynamic update not supported";
+ return -EIO;
+ }
+
+ /* Leaf nodes have to be same priority */
+ if (nix_tm_is_leaf(dev, lvl) && priority != 0) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "queue shapers must be priority 0";
+ return -EIO;
+ }
+
+ parent_node = nix_tm_node_search(dev, parent_node_id, true);
+
+ /* find the right level */
+ if (lvl == RTE_TM_NODE_LEVEL_ID_ANY) {
+ if (parent_node_id == RTE_TM_NODE_ID_NULL) {
+ lvl = OTX2_TM_LVL_ROOT;
+ } else if (parent_node) {
+ lvl = parent_node->lvl + 1;
+ } else {
+ /* Neigher proper parent nor proper level id given */
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "invalid parent node id";
+ return -ERANGE;
+ }
+ }
+
+ /* Translate rte_tm level id's to nix hw level id's */
+ hw_lvl = nix_tm_lvl2nix(dev, lvl);
+ if (hw_lvl == NIX_TXSCH_LVL_CNT &&
+ !nix_tm_is_leaf(dev, lvl)) {
+ error->type = RTE_TM_ERROR_TYPE_LEVEL_ID;
+ error->message = "invalid level id";
+ return -ERANGE;
+ }
+
+ if (node_id < dev->tm_leaf_cnt)
+ exp_next_lvl = NIX_TXSCH_LVL_SMQ;
+ else
+ exp_next_lvl = hw_lvl + 1;
+
+ /* Check if there is no parent node yet */
+ if (hw_lvl != dev->otx2_tm_root_lvl &&
+ (!parent_node || parent_node->hw_lvl != exp_next_lvl)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "invalid parent node id";
+ return -EINVAL;
+ }
+
+ /* Check if a node already exists */
+ if (nix_tm_node_search(dev, node_id, true)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "node already exists";
+ return -EINVAL;
+ }
+
+ /* Check if shaper profile exists for non leaf node */
+ if (!nix_tm_is_leaf(dev, lvl) &&
+ params->shaper_profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE &&
+ !nix_tm_shaper_profile_search(dev, params->shaper_profile_id)) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "invalid shaper profile";
+ return -EINVAL;
+ }
+
+ /* Check if there is second DWRR already in siblings or holes in prio */
+ if (validate_prio(dev, lvl, parent_node_id, priority, error))
+ return -EINVAL;
+
+ if (weight > MAX_SCHED_WEIGHT) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_WEIGHT;
+ error->message = "max weight exceeded";
+ return -EINVAL;
+ }
+
+ rc = nix_tm_node_add_to_list(dev, node_id, parent_node_id,
+ priority, weight, hw_lvl,
+ lvl, true, params);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ /* cleanup user added nodes */
+ if (clear_on_fail)
+ nix_tm_free_resources(dev, NIX_TM_NODE_USER,
+ NIX_TM_NODE_USER, false);
+ error->message = "failed to add node";
+ return rc;
+ }
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+ return 0;
+}
+
+static int
+otx2_nix_tm_node_delete(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_node *tm_node, *child_node;
+ struct otx2_nix_tm_shaper_profile *profile;
+ uint32_t profile_id;
+
+ /* we don't support dynamic updates yet */
+ if (dev->tm_flags & NIX_TM_COMMITTED) {
+ error->type = RTE_TM_ERROR_TYPE_CAPABILITIES;
+ error->message = "hierarchy exists";
+ return -EIO;
+ }
+
+ if (node_id == RTE_TM_NODE_ID_NULL) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node id";
+ return -EINVAL;
+ }
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* Check for any existing children */
+ TAILQ_FOREACH(child_node, &dev->node_list, node) {
+ if (child_node->parent == tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "children exist";
+ return -EINVAL;
+ }
+ }
+
+ /* Remove shaper profile reference */
+ profile_id = tm_node->params.shaper_profile_id;
+ profile = nix_tm_shaper_profile_search(dev, profile_id);
+ profile->reference_count--;
+
+ TAILQ_REMOVE(&dev->node_list, tm_node, node);
+ rte_free(tm_node);
+ return 0;
+}
+
+static int
+nix_tm_node_suspend_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error, bool suspend)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct otx2_nix_tm_node *tm_node;
+ struct nix_txschq_config *req;
+ uint16_t flags;
+ int rc;
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "hierarchy doesn't exist";
+ return -EINVAL;
+ }
+
+ flags = tm_node->flags;
+ flags = suspend ? (flags & ~NIX_TM_NODE_ENABLED) :
+ (flags | NIX_TM_NODE_ENABLED);
+
+ if (tm_node->flags == flags)
+ return 0;
+
+ /* send mbox for state change */
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+
+ req->lvl = tm_node->hw_lvl;
+ req->num_regs = prepare_tm_sw_xoff(tm_node, suspend,
+ req->reg, req->regval);
+ rc = send_tm_reqval(mbox, req, error);
+ if (!rc)
+ tm_node->flags = flags;
+ return rc;
+}
+
+static int
+otx2_nix_tm_node_suspend(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ return nix_tm_node_suspend_resume(eth_dev, node_id, error, true);
+}
+
+static int
+otx2_nix_tm_node_resume(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_error *error)
+{
+ return nix_tm_node_suspend_resume(eth_dev, node_id, error, false);
+}
+
+static int
+otx2_nix_tm_hierarchy_commit(struct rte_eth_dev *eth_dev,
+ int clear_on_fail,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_node *tm_node;
+ uint32_t leaf_cnt = 0;
+ int rc;
+
+ if (dev->tm_flags & NIX_TM_COMMITTED) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "hierarchy exists";
+ return -EINVAL;
+ }
+
+ /* Check if we have all the leaf nodes */
+ TAILQ_FOREACH(tm_node, &dev->node_list, node) {
+ if (tm_node->flags & NIX_TM_NODE_USER &&
+ tm_node->id < dev->tm_leaf_cnt)
+ leaf_cnt++;
+ }
+
+ if (leaf_cnt != dev->tm_leaf_cnt) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "incomplete hierarchy";
+ return -EINVAL;
+ }
+
+ /*
+ * Disable xmit will be enabled when
+ * new topology is available.
+ */
+ rc = nix_xmit_disable(eth_dev);
+ if (rc) {
+ otx2_err("failed to disable TX, rc=%d", rc);
+ return -EIO;
+ }
+
+ /* Delete default/ratelimit tree */
+ if (dev->tm_flags & (NIX_TM_DEFAULT_TREE | NIX_TM_RATE_LIMIT_TREE)) {
+ rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER, 0, false);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "failed to free default resources";
+ return rc;
+ }
+ dev->tm_flags &= ~(NIX_TM_DEFAULT_TREE |
+ NIX_TM_RATE_LIMIT_TREE);
+ }
+
+ /* Free up user alloc'ed resources */
+ rc = nix_tm_free_resources(dev, NIX_TM_NODE_USER,
+ NIX_TM_NODE_USER, true);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "failed to free user resources";
+ return rc;
+ }
+
+ rc = nix_tm_alloc_resources(eth_dev, true);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "alloc resources failed";
+ /* TODO should we restore default config ? */
+ if (clear_on_fail)
+ nix_tm_free_resources(dev, 0, 0, false);
+ return rc;
+ }
+
+ error->type = RTE_TM_ERROR_TYPE_NONE;
+ dev->tm_flags |= NIX_TM_COMMITTED;
+ return 0;
+}
+
+static int
+otx2_nix_tm_node_shaper_update(struct rte_eth_dev *eth_dev,
+ uint32_t node_id,
+ uint32_t profile_id,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_shaper_profile *profile = NULL;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct otx2_nix_tm_node *tm_node;
+ struct nix_txschq_config *req;
+ uint8_t k;
+ int rc;
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (!tm_node || nix_tm_is_leaf(dev, tm_node->lvl)) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "invalid node";
+ return -EINVAL;
+ }
+
+ if (profile_id == tm_node->params.shaper_profile_id)
+ return 0;
+
+ if (profile_id != RTE_TM_SHAPER_PROFILE_ID_NONE) {
+ profile = nix_tm_shaper_profile_search(dev, profile_id);
+ if (!profile) {
+ error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+ error->message = "shaper profile ID not exist";
+ return -EINVAL;
+ }
+ }
+
+ tm_node->params.shaper_profile_id = profile_id;
+
+ /* Nothing to do if not yet committed */
+ if (!(dev->tm_flags & NIX_TM_COMMITTED))
+ return 0;
+
+ tm_node->flags &= ~NIX_TM_NODE_ENABLED;
+
+ /* Flush the specific node with SW_XOFF */
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = tm_node->hw_lvl;
+ k = prepare_tm_sw_xoff(tm_node, true, req->reg, req->regval);
+ req->num_regs = k;
+
+ rc = send_tm_reqval(mbox, req, error);
+ if (rc)
+ return rc;
+
+ shaper_default_red_algo(dev, tm_node, profile);
+
+ /* Update the PIR/CIR and clear SW XOFF */
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = tm_node->hw_lvl;
+
+ k = prepare_tm_shaper_reg(tm_node, profile, req->reg, req->regval);
+
+ k += prepare_tm_sw_xoff(tm_node, false, &req->reg[k], &req->regval[k]);
+
+ req->num_regs = k;
+ rc = send_tm_reqval(mbox, req, error);
+ if (!rc)
+ tm_node->flags |= NIX_TM_NODE_ENABLED;
+ return rc;
+}
+
+static int
+otx2_nix_tm_node_parent_update(struct rte_eth_dev *eth_dev,
+ uint32_t node_id, uint32_t new_parent_id,
+ uint32_t priority, uint32_t weight,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_node *tm_node, *sibling;
+ struct otx2_nix_tm_node *new_parent;
+ struct nix_txschq_config *req;
+ uint8_t k;
+ int rc;
+
+ if (!(dev->tm_flags & NIX_TM_COMMITTED)) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "hierarchy doesn't exist";
+ return -EINVAL;
+ }
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* Parent id valid only for non root nodes */
+ if (tm_node->hw_lvl != dev->otx2_tm_root_lvl) {
+ new_parent = nix_tm_node_search(dev, new_parent_id, true);
+ if (!new_parent) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "no such parent node";
+ return -EINVAL;
+ }
+
+ /* Current support is only for dynamic weight update */
+ if (tm_node->parent != new_parent ||
+ tm_node->priority != priority) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_PARENT_NODE_ID;
+ error->message = "only weight update supported";
+ return -EINVAL;
+ }
+ }
+
+ /* Skip if no change */
+ if (tm_node->weight == weight)
+ return 0;
+
+ tm_node->weight = weight;
+
+ /* For leaf nodes, SQ CTX needs update */
+ if (nix_tm_is_leaf(dev, tm_node->lvl)) {
+ /* Update SQ quantum data on the fly */
+ rc = nix_sq_sched_data(dev, tm_node, true);
+ if (rc) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "sq sched data update failed";
+ return rc;
+ }
+ } else {
+ /* XOFF Parent node */
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
+ req->lvl = tm_node->parent->hw_lvl;
+ req->num_regs = prepare_tm_sw_xoff(tm_node->parent, true,
+ req->reg, req->regval);
+ rc = send_tm_reqval(dev->mbox, req, error);
+ if (rc)
+ return rc;
+
+ /* XOFF this node and all other siblings */
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
+ req->lvl = tm_node->hw_lvl;
+
+ k = 0;
+ TAILQ_FOREACH(sibling, &dev->node_list, node) {
+ if (sibling->parent != tm_node->parent)
+ continue;
+ k += prepare_tm_sw_xoff(sibling, true, &req->reg[k],
+ &req->regval[k]);
+ }
+ req->num_regs = k;
+ rc = send_tm_reqval(dev->mbox, req, error);
+ if (rc)
+ return rc;
+
+ /* Update new weight for current node */
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
+ req->lvl = tm_node->hw_lvl;
+ req->num_regs = prepare_tm_sched_reg(dev, tm_node,
+ req->reg, req->regval);
+ rc = send_tm_reqval(dev->mbox, req, error);
+ if (rc)
+ return rc;
+
+ /* XON this node and all other siblings */
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
+ req->lvl = tm_node->hw_lvl;
+
+ k = 0;
+ TAILQ_FOREACH(sibling, &dev->node_list, node) {
+ if (sibling->parent != tm_node->parent)
+ continue;
+ k += prepare_tm_sw_xoff(sibling, false, &req->reg[k],
+ &req->regval[k]);
+ }
+ req->num_regs = k;
+ rc = send_tm_reqval(dev->mbox, req, error);
+ if (rc)
+ return rc;
+
+ /* XON Parent node */
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(dev->mbox);
+ req->lvl = tm_node->parent->hw_lvl;
+ req->num_regs = prepare_tm_sw_xoff(tm_node->parent, false,
+ req->reg, req->regval);
+ rc = send_tm_reqval(dev->mbox, req, error);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+static int
+otx2_nix_tm_node_stats_read(struct rte_eth_dev *eth_dev, uint32_t node_id,
+ struct rte_tm_node_stats *stats,
+ uint64_t *stats_mask, int clear,
+ struct rte_tm_error *error)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_node *tm_node;
+ uint64_t reg, val;
+ int64_t *addr;
+ int rc = 0;
+
+ tm_node = nix_tm_node_search(dev, node_id, true);
+ if (!tm_node) {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "no such node";
+ return -EINVAL;
+ }
+
+ /* Stats support only for leaf node or TL1 root */
+ if (nix_tm_is_leaf(dev, tm_node->lvl)) {
+ reg = (((uint64_t)tm_node->id) << 32);
+
+ /* Packets */
+ addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_PKTS);
+ val = otx2_atomic64_add_nosync(reg, addr);
+ if (val & OP_ERR)
+ val = 0;
+ stats->n_pkts = val - tm_node->last_pkts;
+
+ /* Bytes */
+ addr = (int64_t *)(dev->base + NIX_LF_SQ_OP_OCTS);
+ val = otx2_atomic64_add_nosync(reg, addr);
+ if (val & OP_ERR)
+ val = 0;
+ stats->n_bytes = val - tm_node->last_bytes;
+
+ if (clear) {
+ tm_node->last_pkts = stats->n_pkts;
+ tm_node->last_bytes = stats->n_bytes;
+ }
+
+ *stats_mask = RTE_TM_STATS_N_PKTS | RTE_TM_STATS_N_BYTES;
+
+ } else if (tm_node->hw_lvl == NIX_TXSCH_LVL_TL1) {
+ error->type = RTE_TM_ERROR_TYPE_UNSPECIFIED;
+ error->message = "stats read error";
+
+ /* RED Drop packets */
+ reg = NIX_AF_TL1X_DROPPED_PACKETS(tm_node->hw_id);
+ rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
+ if (rc)
+ goto exit;
+ stats->leaf.n_pkts_dropped[RTE_COLOR_RED] =
+ val - tm_node->last_pkts;
+
+ /* RED Drop bytes */
+ reg = NIX_AF_TL1X_DROPPED_BYTES(tm_node->hw_id);
+ rc = read_tm_reg(dev->mbox, reg, &val, NIX_TXSCH_LVL_TL1);
+ if (rc)
+ goto exit;
+ stats->leaf.n_bytes_dropped[RTE_COLOR_RED] =
+ val - tm_node->last_bytes;
+
+ /* Clear stats */
+ if (clear) {
+ tm_node->last_pkts =
+ stats->leaf.n_pkts_dropped[RTE_COLOR_RED];
+ tm_node->last_bytes =
+ stats->leaf.n_bytes_dropped[RTE_COLOR_RED];
+ }
+
+ *stats_mask = RTE_TM_STATS_N_PKTS_RED_DROPPED |
+ RTE_TM_STATS_N_BYTES_RED_DROPPED;
+
+ } else {
+ error->type = RTE_TM_ERROR_TYPE_NODE_ID;
+ error->message = "unsupported node";
+ rc = -EINVAL;
+ }
+
+exit:
+ return rc;
+}
+
+const struct rte_tm_ops otx2_tm_ops = {
+ .node_type_get = otx2_nix_tm_node_type_get,
+
+ .capabilities_get = otx2_nix_tm_capa_get,
+ .level_capabilities_get = otx2_nix_tm_level_capa_get,
+ .node_capabilities_get = otx2_nix_tm_node_capa_get,
+
+ .shaper_profile_add = otx2_nix_tm_shaper_profile_add,
+ .shaper_profile_delete = otx2_nix_tm_shaper_profile_delete,
+
+ .node_add = otx2_nix_tm_node_add,
+ .node_delete = otx2_nix_tm_node_delete,
+ .node_suspend = otx2_nix_tm_node_suspend,
+ .node_resume = otx2_nix_tm_node_resume,
+ .hierarchy_commit = otx2_nix_tm_hierarchy_commit,
+
+ .node_shaper_update = otx2_nix_tm_node_shaper_update,
+ .node_parent_update = otx2_nix_tm_node_parent_update,
+ .node_stats_read = otx2_nix_tm_node_stats_read,
+};
+
+static int
+nix_tm_prepare_default_tree(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint32_t def = eth_dev->data->nb_tx_queues;
+ struct rte_tm_node_params params;
+ uint32_t leaf_parent, i;
+ int rc = 0, leaf_level;
+
+ /* Default params */
+ memset(&params, 0, sizeof(params));
+ params.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
+
+ if (nix_tm_have_tl1_access(dev)) {
+ dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
+ rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL1,
+ OTX2_TM_LVL_ROOT, false, &params);
+ if (rc)
+ goto exit;
+ rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL2,
+ OTX2_TM_LVL_SCH1, false, &params);
+ if (rc)
+ goto exit;
+
+ rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL3,
+ OTX2_TM_LVL_SCH2, false, &params);
+ if (rc)
+ goto exit;
+
+ rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL4,
+ OTX2_TM_LVL_SCH3, false, &params);
+ if (rc)
+ goto exit;
+
+ rc = nix_tm_node_add_to_list(dev, def + 4, def + 3, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_SMQ,
+ OTX2_TM_LVL_SCH4, false, &params);
+ if (rc)
+ goto exit;
+
+ leaf_parent = def + 4;
+ leaf_level = OTX2_TM_LVL_QUEUE;
+ } else {
+ dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
+ rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL2,
+ OTX2_TM_LVL_ROOT, false, &params);
+ if (rc)
+ goto exit;
+
+ rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL3,
+ OTX2_TM_LVL_SCH1, false, &params);
+ if (rc)
+ goto exit;
+
+ rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL4,
+ OTX2_TM_LVL_SCH2, false, &params);
+ if (rc)
+ goto exit;
+
+ rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_SMQ,
+ OTX2_TM_LVL_SCH3, false, &params);
+ if (rc)
+ goto exit;
+
+ leaf_parent = def + 3;
+ leaf_level = OTX2_TM_LVL_SCH4;
+ }
+
+ /* Add leaf nodes */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = nix_tm_node_add_to_list(dev, i, leaf_parent, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_CNT,
+ leaf_level, false, &params);
+ if (rc)
+ break;
+ }
+
+exit:
+ return rc;
+}
+
+void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ TAILQ_INIT(&dev->node_list);
+ TAILQ_INIT(&dev->shaper_profile_list);
+ dev->tm_rate_min = 1E9; /* 1Gbps */
+}
+
+int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint16_t sq_cnt = eth_dev->data->nb_tx_queues;
+ int rc;
+
+ /* Free up all resources already held */
+ rc = nix_tm_free_resources(dev, 0, 0, false);
+ if (rc) {
+ otx2_err("Failed to freeup existing resources,rc=%d", rc);
+ return rc;
+ }
+
+ /* Clear shaper profiles */
+ nix_tm_clear_shaper_profiles(dev);
+ dev->tm_flags = NIX_TM_DEFAULT_TREE;
+
+ /* Disable TL1 Static Priority when VF's are enabled
+ * as otherwise VF's TL2 reallocation will be needed
+ * runtime to support a specific topology of PF.
+ */
+ if (pci_dev->max_vfs)
+ dev->tm_flags |= NIX_TM_TL1_NO_SP;
+
+ rc = nix_tm_prepare_default_tree(eth_dev);
+ if (rc != 0)
+ return rc;
+
+ rc = nix_tm_alloc_resources(eth_dev, false);
+ if (rc != 0)
+ return rc;
+ dev->tm_leaf_cnt = sq_cnt;
+
+ return 0;
+}
+
+static int
+nix_tm_prepare_rate_limited_tree(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint32_t def = eth_dev->data->nb_tx_queues;
+ struct rte_tm_node_params params;
+ uint32_t leaf_parent, i, rc = 0;
+
+ memset(&params, 0, sizeof(params));
+
+ if (nix_tm_have_tl1_access(dev)) {
+ dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL1;
+ rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL1,
+ OTX2_TM_LVL_ROOT, false, &params);
+ if (rc)
+ goto error;
+ rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL2,
+ OTX2_TM_LVL_SCH1, false, &params);
+ if (rc)
+ goto error;
+ rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL3,
+ OTX2_TM_LVL_SCH2, false, &params);
+ if (rc)
+ goto error;
+ rc = nix_tm_node_add_to_list(dev, def + 3, def + 2, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_TL4,
+ OTX2_TM_LVL_SCH3, false, &params);
+ if (rc)
+ goto error;
+ leaf_parent = def + 3;
+
+ /* Add per queue SMQ nodes */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
+ leaf_parent,
+ 0, DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_SMQ,
+ OTX2_TM_LVL_SCH4,
+ false, &params);
+ if (rc)
+ goto error;
+ }
+
+ /* Add leaf nodes */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = nix_tm_node_add_to_list(dev, i,
+ leaf_parent + 1 + i, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_CNT,
+ OTX2_TM_LVL_QUEUE,
+ false, &params);
+ if (rc)
+ goto error;
+ }
+
+ return 0;
+ }
+
+ dev->otx2_tm_root_lvl = NIX_TXSCH_LVL_TL2;
+ rc = nix_tm_node_add_to_list(dev, def, RTE_TM_NODE_ID_NULL, 0,
+ DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL2,
+ OTX2_TM_LVL_ROOT, false, &params);
+ if (rc)
+ goto error;
+ rc = nix_tm_node_add_to_list(dev, def + 1, def, 0,
+ DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL3,
+ OTX2_TM_LVL_SCH1, false, &params);
+ if (rc)
+ goto error;
+ rc = nix_tm_node_add_to_list(dev, def + 2, def + 1, 0,
+ DEFAULT_RR_WEIGHT, NIX_TXSCH_LVL_TL4,
+ OTX2_TM_LVL_SCH2, false, &params);
+ if (rc)
+ goto error;
+ leaf_parent = def + 2;
+
+ /* Add per queue SMQ nodes */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = nix_tm_node_add_to_list(dev, leaf_parent + 1 + i,
+ leaf_parent,
+ 0, DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_SMQ,
+ OTX2_TM_LVL_SCH3,
+ false, &params);
+ if (rc)
+ goto error;
+ }
+
+ /* Add leaf nodes */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = nix_tm_node_add_to_list(dev, i, leaf_parent + 1 + i, 0,
+ DEFAULT_RR_WEIGHT,
+ NIX_TXSCH_LVL_CNT,
+ OTX2_TM_LVL_SCH4,
+ false, &params);
+ if (rc)
+ break;
+ }
+error:
+ return rc;
+}
+
+static int
+otx2_nix_tm_rate_limit_mdq(struct rte_eth_dev *eth_dev,
+ struct otx2_nix_tm_node *tm_node,
+ uint64_t tx_rate)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_nix_tm_shaper_profile profile;
+ struct otx2_mbox *mbox = dev->mbox;
+ volatile uint64_t *reg, *regval;
+ struct nix_txschq_config *req;
+ uint16_t flags;
+ uint8_t k = 0;
+ int rc;
+
+ flags = tm_node->flags;
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(mbox);
+ req->lvl = NIX_TXSCH_LVL_MDQ;
+ reg = req->reg;
+ regval = req->regval;
+
+ if (tx_rate == 0) {
+ k += prepare_tm_sw_xoff(tm_node, true, &reg[k], &regval[k]);
+ flags &= ~NIX_TM_NODE_ENABLED;
+ goto exit;
+ }
+
+ if (!(flags & NIX_TM_NODE_ENABLED)) {
+ k += prepare_tm_sw_xoff(tm_node, false, &reg[k], &regval[k]);
+ flags |= NIX_TM_NODE_ENABLED;
+ }
+
+ /* Use only PIR for rate limit */
+ memset(&profile, 0, sizeof(profile));
+ profile.params.peak.rate = tx_rate;
+ /* Minimum burst of ~4us Bytes of Tx */
+ profile.params.peak.size = RTE_MAX(NIX_MAX_HW_FRS,
+ (4ull * tx_rate) / (1E6 * 8));
+ if (!dev->tm_rate_min || dev->tm_rate_min > tx_rate)
+ dev->tm_rate_min = tx_rate;
+
+ k += prepare_tm_shaper_reg(tm_node, &profile, &reg[k], &regval[k]);
+exit:
+ req->num_regs = k;
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ tm_node->flags = flags;
+ return 0;
+}
+
+int
+otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t tx_rate_mbps)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t tx_rate = tx_rate_mbps * (uint64_t)1E6;
+ struct otx2_nix_tm_node *tm_node;
+ int rc;
+
+ /* Check for supported revisions */
+ if (otx2_dev_is_95xx_Ax(dev) ||
+ otx2_dev_is_96xx_Ax(dev))
+ return -EINVAL;
+
+ if (queue_idx >= eth_dev->data->nb_tx_queues)
+ return -EINVAL;
+
+ if (!(dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
+ !(dev->tm_flags & NIX_TM_RATE_LIMIT_TREE))
+ goto error;
+
+ if ((dev->tm_flags & NIX_TM_DEFAULT_TREE) &&
+ eth_dev->data->nb_tx_queues > 1) {
+ /* For TM topology change ethdev needs to be stopped */
+ if (eth_dev->data->dev_started)
+ return -EBUSY;
+
+ /*
+ * Disable xmit will be enabled when
+ * new topology is available.
+ */
+ rc = nix_xmit_disable(eth_dev);
+ if (rc) {
+ otx2_err("failed to disable TX, rc=%d", rc);
+ return -EIO;
+ }
+
+ rc = nix_tm_free_resources(dev, 0, 0, false);
+ if (rc < 0) {
+ otx2_tm_dbg("failed to free default resources, rc %d",
+ rc);
+ return -EIO;
+ }
+
+ rc = nix_tm_prepare_rate_limited_tree(eth_dev);
+ if (rc < 0) {
+ otx2_tm_dbg("failed to prepare tm tree, rc=%d", rc);
+ return rc;
+ }
+
+ rc = nix_tm_alloc_resources(eth_dev, true);
+ if (rc != 0) {
+ otx2_tm_dbg("failed to allocate tm tree, rc=%d", rc);
+ return rc;
+ }
+
+ dev->tm_flags &= ~NIX_TM_DEFAULT_TREE;
+ dev->tm_flags |= NIX_TM_RATE_LIMIT_TREE;
+ }
+
+ tm_node = nix_tm_node_search(dev, queue_idx, false);
+
+ /* check if we found a valid leaf node */
+ if (!tm_node ||
+ !nix_tm_is_leaf(dev, tm_node->lvl) ||
+ !tm_node->parent ||
+ tm_node->parent->hw_id == UINT32_MAX)
+ return -EIO;
+
+ return otx2_nix_tm_rate_limit_mdq(eth_dev, tm_node->parent, tx_rate);
+error:
+ otx2_tm_dbg("Unsupported TM tree 0x%0x", dev->tm_flags);
+ return -EINVAL;
+}
+
+int
+otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *arg)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ if (!arg)
+ return -EINVAL;
+
+ /* Check for supported revisions */
+ if (otx2_dev_is_95xx_Ax(dev) ||
+ otx2_dev_is_96xx_Ax(dev))
+ return -EINVAL;
+
+ *(const void **)arg = &otx2_tm_ops;
+
+ return 0;
+}
+
+int
+otx2_nix_tm_fini(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc;
+
+ /* Xmit is assumed to be disabled */
+ /* Free up resources already held */
+ rc = nix_tm_free_resources(dev, 0, 0, false);
+ if (rc) {
+ otx2_err("Failed to freeup existing resources,rc=%d", rc);
+ return rc;
+ }
+
+ /* Clear shaper profiles */
+ nix_tm_clear_shaper_profiles(dev);
+
+ dev->tm_flags = 0;
+ return 0;
+}
+
+int
+otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
+ uint32_t *rr_quantum, uint16_t *smq)
+{
+ struct otx2_nix_tm_node *tm_node;
+ int rc;
+
+ /* 0..sq_cnt-1 are leaf nodes */
+ if (sq >= dev->tm_leaf_cnt)
+ return -EINVAL;
+
+ /* Search for internal node first */
+ tm_node = nix_tm_node_search(dev, sq, false);
+ if (!tm_node)
+ tm_node = nix_tm_node_search(dev, sq, true);
+
+ /* Check if we found a valid leaf node */
+ if (!tm_node || !nix_tm_is_leaf(dev, tm_node->lvl) ||
+ !tm_node->parent || tm_node->parent->hw_id == UINT32_MAX) {
+ return -EIO;
+ }
+
+ /* Get SMQ Id of leaf node's parent */
+ *smq = tm_node->parent->hw_id;
+ *rr_quantum = NIX_TM_WEIGHT_TO_RR_QUANTUM(tm_node->weight);
+
+ rc = nix_smq_xoff(dev, tm_node->parent, false);
+ if (rc)
+ return rc;
+ tm_node->flags |= NIX_TM_NODE_ENABLED;
+
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.h
new file mode 100644
index 000000000..4a80c234e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tm.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_TM_H__
+#define __OTX2_TM_H__
+
+#include <stdbool.h>
+
+#include <rte_tm_driver.h>
+
+#define NIX_TM_DEFAULT_TREE BIT_ULL(0)
+#define NIX_TM_COMMITTED BIT_ULL(1)
+#define NIX_TM_RATE_LIMIT_TREE BIT_ULL(2)
+#define NIX_TM_TL1_NO_SP BIT_ULL(3)
+
+struct otx2_eth_dev;
+
+void otx2_nix_tm_conf_init(struct rte_eth_dev *eth_dev);
+int otx2_nix_tm_init_default(struct rte_eth_dev *eth_dev);
+int otx2_nix_tm_fini(struct rte_eth_dev *eth_dev);
+int otx2_nix_tm_ops_get(struct rte_eth_dev *eth_dev, void *ops);
+int otx2_nix_tm_get_leaf_data(struct otx2_eth_dev *dev, uint16_t sq,
+ uint32_t *rr_quantum, uint16_t *smq);
+int otx2_nix_tm_set_queue_rate_limit(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t tx_rate);
+int otx2_nix_sq_flush_pre(void *_txq, bool dev_started);
+int otx2_nix_sq_flush_post(void *_txq);
+int otx2_nix_sq_enable(void *_txq);
+int otx2_nix_get_link(struct otx2_eth_dev *dev);
+int otx2_nix_sq_sqb_aura_fc(void *_txq, bool enable);
+
+struct otx2_nix_tm_node {
+ TAILQ_ENTRY(otx2_nix_tm_node) node;
+ uint32_t id;
+ uint32_t hw_id;
+ uint32_t priority;
+ uint32_t weight;
+ uint16_t lvl;
+ uint16_t hw_lvl;
+ uint32_t rr_prio;
+ uint32_t rr_num;
+ uint32_t max_prio;
+ uint32_t parent_hw_id;
+ uint32_t flags:16;
+#define NIX_TM_NODE_HWRES BIT_ULL(0)
+#define NIX_TM_NODE_ENABLED BIT_ULL(1)
+#define NIX_TM_NODE_USER BIT_ULL(2)
+#define NIX_TM_NODE_RED_DISCARD BIT_ULL(3)
+ /* Shaper algorithm for RED state @NIX_REDALG_E */
+ uint32_t red_algo:2;
+
+ struct otx2_nix_tm_node *parent;
+ struct rte_tm_node_params params;
+
+ /* Last stats */
+ uint64_t last_pkts;
+ uint64_t last_bytes;
+};
+
+struct otx2_nix_tm_shaper_profile {
+ TAILQ_ENTRY(otx2_nix_tm_shaper_profile) shaper;
+ uint32_t shaper_profile_id;
+ uint32_t reference_count;
+ struct rte_tm_shaper_params params; /* Rate in bits/sec */
+};
+
+struct shaper_params {
+ uint64_t burst_exponent;
+ uint64_t burst_mantissa;
+ uint64_t div_exp;
+ uint64_t exponent;
+ uint64_t mantissa;
+ uint64_t burst;
+ uint64_t rate;
+};
+
+TAILQ_HEAD(otx2_nix_tm_node_list, otx2_nix_tm_node);
+TAILQ_HEAD(otx2_nix_tm_shaper_profile_list, otx2_nix_tm_shaper_profile);
+
+#define MAX_SCHED_WEIGHT ((uint8_t)~0)
+#define NIX_TM_RR_QUANTUM_MAX (BIT_ULL(24) - 1)
+#define NIX_TM_WEIGHT_TO_RR_QUANTUM(__weight) \
+ ((((__weight) & MAX_SCHED_WEIGHT) * \
+ NIX_TM_RR_QUANTUM_MAX) / MAX_SCHED_WEIGHT)
+
+/* DEFAULT_RR_WEIGHT * NIX_TM_RR_QUANTUM_MAX / MAX_SCHED_WEIGHT */
+/* = NIX_MAX_HW_MTU */
+#define DEFAULT_RR_WEIGHT 71
+
+/** NIX rate limits */
+#define MAX_RATE_DIV_EXP 12
+#define MAX_RATE_EXPONENT 0xf
+#define MAX_RATE_MANTISSA 0xff
+
+#define NIX_SHAPER_RATE_CONST ((uint64_t)2E6)
+
+/* NIX rate calculation in Bits/Sec
+ * PIR_ADD = ((256 + NIX_*_PIR[RATE_MANTISSA])
+ * << NIX_*_PIR[RATE_EXPONENT]) / 256
+ * PIR = (2E6 * PIR_ADD / (1 << NIX_*_PIR[RATE_DIVIDER_EXPONENT]))
+ *
+ * CIR_ADD = ((256 + NIX_*_CIR[RATE_MANTISSA])
+ * << NIX_*_CIR[RATE_EXPONENT]) / 256
+ * CIR = (2E6 * CIR_ADD / (CCLK_TICKS << NIX_*_CIR[RATE_DIVIDER_EXPONENT]))
+ */
+#define SHAPER_RATE(exponent, mantissa, div_exp) \
+ ((NIX_SHAPER_RATE_CONST * ((256 + (mantissa)) << (exponent)))\
+ / (((1ull << (div_exp)) * 256)))
+
+/* 96xx rate limits in Bits/Sec */
+#define MIN_SHAPER_RATE \
+ SHAPER_RATE(0, 0, MAX_RATE_DIV_EXP)
+
+#define MAX_SHAPER_RATE \
+ SHAPER_RATE(MAX_RATE_EXPONENT, MAX_RATE_MANTISSA, 0)
+
+/** TM Shaper - low level operations */
+
+/** NIX burst limits */
+#define MAX_BURST_EXPONENT 0xf
+#define MAX_BURST_MANTISSA 0xff
+
+/* NIX burst calculation
+ * PIR_BURST = ((256 + NIX_*_PIR[BURST_MANTISSA])
+ * << (NIX_*_PIR[BURST_EXPONENT] + 1))
+ * / 256
+ *
+ * CIR_BURST = ((256 + NIX_*_CIR[BURST_MANTISSA])
+ * << (NIX_*_CIR[BURST_EXPONENT] + 1))
+ * / 256
+ */
+#define SHAPER_BURST(exponent, mantissa) \
+ (((256 + (mantissa)) << ((exponent) + 1)) / 256)
+
+/** Shaper burst limits */
+#define MIN_SHAPER_BURST \
+ SHAPER_BURST(0, 0)
+
+#define MAX_SHAPER_BURST \
+ SHAPER_BURST(MAX_BURST_EXPONENT,\
+ MAX_BURST_MANTISSA)
+
+/* Default TL1 priority and Quantum from AF */
+#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_PRIO 1
+
+#define TXSCH_TLX_SP_PRIO_MAX 10
+
+static inline const char *
+nix_hwlvl2str(uint32_t hw_lvl)
+{
+ switch (hw_lvl) {
+ case NIX_TXSCH_LVL_MDQ:
+ return "SMQ/MDQ";
+ case NIX_TXSCH_LVL_TL4:
+ return "TL4";
+ case NIX_TXSCH_LVL_TL3:
+ return "TL3";
+ case NIX_TXSCH_LVL_TL2:
+ return "TL2";
+ case NIX_TXSCH_LVL_TL1:
+ return "TL1";
+ default:
+ break;
+ }
+
+ return "???";
+}
+
+#endif /* __OTX2_TM_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.c
new file mode 100644
index 000000000..1af6fa649
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.c
@@ -0,0 +1,1060 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_vect.h>
+
+#include "otx2_ethdev.h"
+
+#define NIX_XMIT_FC_OR_RETURN(txq, pkts) do { \
+ /* Cached value is low, Update the fc_cache_pkts */ \
+ if (unlikely((txq)->fc_cache_pkts < (pkts))) { \
+ /* Multiply with sqe_per_sqb to express in pkts */ \
+ (txq)->fc_cache_pkts = \
+ ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem) << \
+ (txq)->sqes_per_sqb_log2; \
+ /* Check it again for the room */ \
+ if (unlikely((txq)->fc_cache_pkts < (pkts))) \
+ return 0; \
+ } \
+} while (0)
+
+
+static __rte_always_inline uint16_t
+nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t pkts, uint64_t *cmd, const uint16_t flags)
+{
+ struct otx2_eth_txq *txq = tx_queue; uint16_t i;
+ const rte_iova_t io_addr = txq->io_addr;
+ void *lmt_addr = txq->lmt_addr;
+
+ NIX_XMIT_FC_OR_RETURN(txq, pkts);
+
+ otx2_lmt_mov(cmd, &txq->cmd[0], otx2_nix_tx_ext_subs(flags));
+
+ /* Perform header writes before barrier for TSO */
+ if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ for (i = 0; i < pkts; i++)
+ otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
+ }
+
+ /* Lets commit any changes in the packet */
+ rte_cio_wmb();
+
+ for (i = 0; i < pkts; i++) {
+ otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
+ /* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
+ otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
+ tx_pkts[i]->ol_flags, 4, flags);
+ otx2_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
+ }
+
+ /* Reduce the cached count */
+ txq->fc_cache_pkts -= pkts;
+
+ return pkts;
+}
+
+static __rte_always_inline uint16_t
+nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t pkts, uint64_t *cmd, const uint16_t flags)
+{
+ struct otx2_eth_txq *txq = tx_queue; uint64_t i;
+ const rte_iova_t io_addr = txq->io_addr;
+ void *lmt_addr = txq->lmt_addr;
+ uint16_t segdw;
+
+ NIX_XMIT_FC_OR_RETURN(txq, pkts);
+
+ otx2_lmt_mov(cmd, &txq->cmd[0], otx2_nix_tx_ext_subs(flags));
+
+ /* Perform header writes before barrier for TSO */
+ if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ for (i = 0; i < pkts; i++)
+ otx2_nix_xmit_prepare_tso(tx_pkts[i], flags);
+ }
+
+ /* Lets commit any changes in the packet */
+ rte_cio_wmb();
+
+ for (i = 0; i < pkts; i++) {
+ otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
+ segdw = otx2_nix_prepare_mseg(tx_pkts[i], cmd, flags);
+ otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
+ tx_pkts[i]->ol_flags, segdw,
+ flags);
+ otx2_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw);
+ }
+
+ /* Reduce the cached count */
+ txq->fc_cache_pkts -= pkts;
+
+ return pkts;
+}
+
+#if defined(RTE_ARCH_ARM64)
+
+#define NIX_DESCS_PER_LOOP 4
+static __rte_always_inline uint16_t
+nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t pkts, uint64_t *cmd, const uint16_t flags)
+{
+ uint64x2_t dataoff_iova0, dataoff_iova1, dataoff_iova2, dataoff_iova3;
+ uint64x2_t len_olflags0, len_olflags1, len_olflags2, len_olflags3;
+ uint64_t *mbuf0, *mbuf1, *mbuf2, *mbuf3;
+ uint64x2_t senddesc01_w0, senddesc23_w0;
+ uint64x2_t senddesc01_w1, senddesc23_w1;
+ uint64x2_t sgdesc01_w0, sgdesc23_w0;
+ uint64x2_t sgdesc01_w1, sgdesc23_w1;
+ struct otx2_eth_txq *txq = tx_queue;
+ uint64_t *lmt_addr = txq->lmt_addr;
+ rte_iova_t io_addr = txq->io_addr;
+ uint64x2_t ltypes01, ltypes23;
+ uint64x2_t xtmp128, ytmp128;
+ uint64x2_t xmask01, xmask23;
+ uint64x2_t cmd00, cmd01;
+ uint64x2_t cmd10, cmd11;
+ uint64x2_t cmd20, cmd21;
+ uint64x2_t cmd30, cmd31;
+ uint64_t lmt_status, i;
+ uint16_t pkts_left;
+
+ NIX_XMIT_FC_OR_RETURN(txq, pkts);
+
+ pkts_left = pkts & (NIX_DESCS_PER_LOOP - 1);
+ pkts = RTE_ALIGN_FLOOR(pkts, NIX_DESCS_PER_LOOP);
+
+ /* Reduce the cached count */
+ txq->fc_cache_pkts -= pkts;
+
+ /* Lets commit any changes in the packet */
+ rte_cio_wmb();
+
+ senddesc01_w0 = vld1q_dup_u64(&txq->cmd[0]);
+ senddesc23_w0 = senddesc01_w0;
+ senddesc01_w1 = vdupq_n_u64(0);
+ senddesc23_w1 = senddesc01_w1;
+ sgdesc01_w0 = vld1q_dup_u64(&txq->cmd[2]);
+ sgdesc23_w0 = sgdesc01_w0;
+
+ for (i = 0; i < pkts; i += NIX_DESCS_PER_LOOP) {
+ /* Clear lower 32bit of SEND_HDR_W0 and SEND_SG_W0 */
+ senddesc01_w0 = vbicq_u64(senddesc01_w0,
+ vdupq_n_u64(0xFFFFFFFF));
+ sgdesc01_w0 = vbicq_u64(sgdesc01_w0,
+ vdupq_n_u64(0xFFFFFFFF));
+
+ senddesc23_w0 = senddesc01_w0;
+ sgdesc23_w0 = sgdesc01_w0;
+
+ /* Move mbufs to iova */
+ mbuf0 = (uint64_t *)tx_pkts[0];
+ mbuf1 = (uint64_t *)tx_pkts[1];
+ mbuf2 = (uint64_t *)tx_pkts[2];
+ mbuf3 = (uint64_t *)tx_pkts[3];
+
+ mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
+ offsetof(struct rte_mbuf, buf_iova));
+ mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
+ offsetof(struct rte_mbuf, buf_iova));
+ mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
+ offsetof(struct rte_mbuf, buf_iova));
+ mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
+ offsetof(struct rte_mbuf, buf_iova));
+ /*
+ * Get mbuf's, olflags, iova, pktlen, dataoff
+ * dataoff_iovaX.D[0] = iova,
+ * dataoff_iovaX.D[1](15:0) = mbuf->dataoff
+ * len_olflagsX.D[0] = ol_flags,
+ * len_olflagsX.D[1](63:32) = mbuf->pkt_len
+ */
+ dataoff_iova0 = vld1q_u64(mbuf0);
+ len_olflags0 = vld1q_u64(mbuf0 + 2);
+ dataoff_iova1 = vld1q_u64(mbuf1);
+ len_olflags1 = vld1q_u64(mbuf1 + 2);
+ dataoff_iova2 = vld1q_u64(mbuf2);
+ len_olflags2 = vld1q_u64(mbuf2 + 2);
+ dataoff_iova3 = vld1q_u64(mbuf3);
+ len_olflags3 = vld1q_u64(mbuf3 + 2);
+
+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+ struct rte_mbuf *mbuf;
+ /* Set don't free bit if reference count > 1 */
+ xmask01 = vdupq_n_u64(0);
+ xmask23 = xmask01;
+
+ mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 -
+ offsetof(struct rte_mbuf, buf_iova));
+
+ if (otx2_nix_prefree_seg(mbuf))
+ vsetq_lane_u64(0x80000, xmask01, 0);
+ else
+ __mempool_check_cookies(mbuf->pool,
+ (void **)&mbuf,
+ 1, 0);
+
+ mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 -
+ offsetof(struct rte_mbuf, buf_iova));
+ if (otx2_nix_prefree_seg(mbuf))
+ vsetq_lane_u64(0x80000, xmask01, 1);
+ else
+ __mempool_check_cookies(mbuf->pool,
+ (void **)&mbuf,
+ 1, 0);
+
+ mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 -
+ offsetof(struct rte_mbuf, buf_iova));
+ if (otx2_nix_prefree_seg(mbuf))
+ vsetq_lane_u64(0x80000, xmask23, 0);
+ else
+ __mempool_check_cookies(mbuf->pool,
+ (void **)&mbuf,
+ 1, 0);
+
+ mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 -
+ offsetof(struct rte_mbuf, buf_iova));
+ if (otx2_nix_prefree_seg(mbuf))
+ vsetq_lane_u64(0x80000, xmask23, 1);
+ else
+ __mempool_check_cookies(mbuf->pool,
+ (void **)&mbuf,
+ 1, 0);
+ senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
+ senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
+ } else {
+ struct rte_mbuf *mbuf;
+ /* Mark mempool object as "put" since
+ * it is freed by NIX
+ */
+ mbuf = (struct rte_mbuf *)((uintptr_t)mbuf0 -
+ offsetof(struct rte_mbuf, buf_iova));
+ __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ 1, 0);
+
+ mbuf = (struct rte_mbuf *)((uintptr_t)mbuf1 -
+ offsetof(struct rte_mbuf, buf_iova));
+ __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ 1, 0);
+
+ mbuf = (struct rte_mbuf *)((uintptr_t)mbuf2 -
+ offsetof(struct rte_mbuf, buf_iova));
+ __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ 1, 0);
+
+ mbuf = (struct rte_mbuf *)((uintptr_t)mbuf3 -
+ offsetof(struct rte_mbuf, buf_iova));
+ __mempool_check_cookies(mbuf->pool, (void **)&mbuf,
+ 1, 0);
+ RTE_SET_USED(mbuf);
+ }
+
+ /* Move mbufs to point pool */
+ mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
+ offsetof(struct rte_mbuf, pool) -
+ offsetof(struct rte_mbuf, buf_iova));
+ mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
+ offsetof(struct rte_mbuf, pool) -
+ offsetof(struct rte_mbuf, buf_iova));
+ mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
+ offsetof(struct rte_mbuf, pool) -
+ offsetof(struct rte_mbuf, buf_iova));
+ mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
+ offsetof(struct rte_mbuf, pool) -
+ offsetof(struct rte_mbuf, buf_iova));
+
+ if (flags &
+ (NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
+ NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
+ /* Get tx_offload for ol2, ol3, l2, l3 lengths */
+ /*
+ * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7)
+ * E(8):OL2_LEN(7):OL3_LEN(9):E(24):L3_LEN(9):L2_LEN(7)
+ */
+
+ asm volatile ("LD1 {%[a].D}[0],[%[in]]\n\t" :
+ [a]"+w"(senddesc01_w1) :
+ [in]"r"(mbuf0 + 2) : "memory");
+
+ asm volatile ("LD1 {%[a].D}[1],[%[in]]\n\t" :
+ [a]"+w"(senddesc01_w1) :
+ [in]"r"(mbuf1 + 2) : "memory");
+
+ asm volatile ("LD1 {%[b].D}[0],[%[in]]\n\t" :
+ [b]"+w"(senddesc23_w1) :
+ [in]"r"(mbuf2 + 2) : "memory");
+
+ asm volatile ("LD1 {%[b].D}[1],[%[in]]\n\t" :
+ [b]"+w"(senddesc23_w1) :
+ [in]"r"(mbuf3 + 2) : "memory");
+
+ /* Get pool pointer alone */
+ mbuf0 = (uint64_t *)*mbuf0;
+ mbuf1 = (uint64_t *)*mbuf1;
+ mbuf2 = (uint64_t *)*mbuf2;
+ mbuf3 = (uint64_t *)*mbuf3;
+ } else {
+ /* Get pool pointer alone */
+ mbuf0 = (uint64_t *)*mbuf0;
+ mbuf1 = (uint64_t *)*mbuf1;
+ mbuf2 = (uint64_t *)*mbuf2;
+ mbuf3 = (uint64_t *)*mbuf3;
+ }
+
+ const uint8x16_t shuf_mask2 = {
+ 0x4, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xc, 0xd, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ };
+ xtmp128 = vzip2q_u64(len_olflags0, len_olflags1);
+ ytmp128 = vzip2q_u64(len_olflags2, len_olflags3);
+
+ /* Clear dataoff_iovaX.D[1] bits other than dataoff(15:0) */
+ const uint64x2_t and_mask0 = {
+ 0xFFFFFFFFFFFFFFFF,
+ 0x000000000000FFFF,
+ };
+
+ dataoff_iova0 = vandq_u64(dataoff_iova0, and_mask0);
+ dataoff_iova1 = vandq_u64(dataoff_iova1, and_mask0);
+ dataoff_iova2 = vandq_u64(dataoff_iova2, and_mask0);
+ dataoff_iova3 = vandq_u64(dataoff_iova3, and_mask0);
+
+ /*
+ * Pick only 16 bits of pktlen preset at bits 63:32
+ * and place them at bits 15:0.
+ */
+ xtmp128 = vqtbl1q_u8(xtmp128, shuf_mask2);
+ ytmp128 = vqtbl1q_u8(ytmp128, shuf_mask2);
+
+ /* Add pairwise to get dataoff + iova in sgdesc_w1 */
+ sgdesc01_w1 = vpaddq_u64(dataoff_iova0, dataoff_iova1);
+ sgdesc23_w1 = vpaddq_u64(dataoff_iova2, dataoff_iova3);
+
+ /* Orr both sgdesc_w0 and senddesc_w0 with 16 bits of
+ * pktlen at 15:0 position.
+ */
+ sgdesc01_w0 = vorrq_u64(sgdesc01_w0, xtmp128);
+ sgdesc23_w0 = vorrq_u64(sgdesc23_w0, ytmp128);
+ senddesc01_w0 = vorrq_u64(senddesc01_w0, xtmp128);
+ senddesc23_w0 = vorrq_u64(senddesc23_w0, ytmp128);
+
+ if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
+ !(flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
+ /*
+ * Lookup table to translate ol_flags to
+ * il3/il4 types. But we still use ol3/ol4 types in
+ * senddesc_w1 as only one header processing is enabled.
+ */
+ const uint8x16_t tbl = {
+ /* [0-15] = il4type:il3type */
+ 0x04, /* none (IPv6 assumed) */
+ 0x14, /* PKT_TX_TCP_CKSUM (IPv6 assumed) */
+ 0x24, /* PKT_TX_SCTP_CKSUM (IPv6 assumed) */
+ 0x34, /* PKT_TX_UDP_CKSUM (IPv6 assumed) */
+ 0x03, /* PKT_TX_IP_CKSUM */
+ 0x13, /* PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM */
+ 0x23, /* PKT_TX_IP_CKSUM | PKT_TX_SCTP_CKSUM */
+ 0x33, /* PKT_TX_IP_CKSUM | PKT_TX_UDP_CKSUM */
+ 0x02, /* PKT_TX_IPV4 */
+ 0x12, /* PKT_TX_IPV4 | PKT_TX_TCP_CKSUM */
+ 0x22, /* PKT_TX_IPV4 | PKT_TX_SCTP_CKSUM */
+ 0x32, /* PKT_TX_IPV4 | PKT_TX_UDP_CKSUM */
+ 0x03, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM */
+ 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
+ * PKT_TX_TCP_CKSUM
+ */
+ 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
+ * PKT_TX_SCTP_CKSUM
+ */
+ 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
+ * PKT_TX_UDP_CKSUM
+ */
+ };
+
+ /* Extract olflags to translate to iltypes */
+ xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
+ ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
+
+ /*
+ * E(47):L3_LEN(9):L2_LEN(7+z)
+ * E(47):L3_LEN(9):L2_LEN(7+z)
+ */
+ senddesc01_w1 = vshlq_n_u64(senddesc01_w1, 1);
+ senddesc23_w1 = vshlq_n_u64(senddesc23_w1, 1);
+
+ /* Move OLFLAGS bits 55:52 to 51:48
+ * with zeros preprended on the byte and rest
+ * don't care
+ */
+ xtmp128 = vshrq_n_u8(xtmp128, 4);
+ ytmp128 = vshrq_n_u8(ytmp128, 4);
+ /*
+ * E(48):L3_LEN(8):L2_LEN(z+7)
+ * E(48):L3_LEN(8):L2_LEN(z+7)
+ */
+ const int8x16_t tshft3 = {
+ -1, 0, 8, 8, 8, 8, 8, 8,
+ -1, 0, 8, 8, 8, 8, 8, 8,
+ };
+
+ senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
+ senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
+
+ /* Do the lookup */
+ ltypes01 = vqtbl1q_u8(tbl, xtmp128);
+ ltypes23 = vqtbl1q_u8(tbl, ytmp128);
+
+ /* Just use ld1q to retrieve aura
+ * when we don't need tx_offload
+ */
+ mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
+ offsetof(struct rte_mempool, pool_id));
+
+ /* Pick only relevant fields i.e Bit 48:55 of iltype
+ * and place it in ol3/ol4type of senddesc_w1
+ */
+ const uint8x16_t shuf_mask0 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x6, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xE, 0xFF, 0xFF, 0xFF,
+ };
+
+ ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
+ ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
+
+ /* Prepare ol4ptr, ol3ptr from ol3len, ol2len.
+ * a [E(32):E(16):OL3(8):OL2(8)]
+ * a = a + (a << 8)
+ * a [E(32):E(16):(OL3+OL2):OL2]
+ * => E(32):E(16)::OL4PTR(8):OL3PTR(8)
+ */
+ senddesc01_w1 = vaddq_u8(senddesc01_w1,
+ vshlq_n_u16(senddesc01_w1, 8));
+ senddesc23_w1 = vaddq_u8(senddesc23_w1,
+ vshlq_n_u16(senddesc23_w1, 8));
+
+ /* Create first half of 4W cmd for 4 mbufs (sgdesc) */
+ cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
+ cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
+ cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
+ cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
+
+ xmask01 = vdupq_n_u64(0);
+ xmask23 = xmask01;
+ asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
+ [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
+
+ asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
+ [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
+
+ asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
+ [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
+
+ asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
+ [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
+ xmask01 = vshlq_n_u64(xmask01, 20);
+ xmask23 = vshlq_n_u64(xmask23, 20);
+
+ senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
+ senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
+ /* Move ltypes to senddesc*_w1 */
+ senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
+ senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
+
+ /* Create first half of 4W cmd for 4 mbufs (sendhdr) */
+ cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
+ cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
+ cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
+ cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
+
+ } else if (!(flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
+ (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
+ /*
+ * Lookup table to translate ol_flags to
+ * ol3/ol4 types.
+ */
+
+ const uint8x16_t tbl = {
+ /* [0-15] = ol4type:ol3type */
+ 0x00, /* none */
+ 0x03, /* OUTER_IP_CKSUM */
+ 0x02, /* OUTER_IPV4 */
+ 0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */
+ 0x04, /* OUTER_IPV6 */
+ 0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */
+ 0x00, /* OUTER_IPV6 | OUTER_IPV4 */
+ 0x00, /* OUTER_IPV6 | OUTER_IPV4 |
+ * OUTER_IP_CKSUM
+ */
+ 0x00, /* OUTER_UDP_CKSUM */
+ 0x33, /* OUTER_UDP_CKSUM | OUTER_IP_CKSUM */
+ 0x32, /* OUTER_UDP_CKSUM | OUTER_IPV4 */
+ 0x33, /* OUTER_UDP_CKSUM | OUTER_IPV4 |
+ * OUTER_IP_CKSUM
+ */
+ 0x34, /* OUTER_UDP_CKSUM | OUTER_IPV6 */
+ 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
+ * OUTER_IP_CKSUM
+ */
+ 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
+ * OUTER_IPV4
+ */
+ 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
+ * OUTER_IPV4 | OUTER_IP_CKSUM
+ */
+ };
+
+ /* Extract olflags to translate to iltypes */
+ xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
+ ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
+
+ /*
+ * E(47):OL3_LEN(9):OL2_LEN(7+z)
+ * E(47):OL3_LEN(9):OL2_LEN(7+z)
+ */
+ const uint8x16_t shuf_mask5 = {
+ 0x6, 0x5, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xE, 0xD, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ };
+ senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5);
+ senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5);
+
+ /* Extract outer ol flags only */
+ const uint64x2_t o_cksum_mask = {
+ 0x1C00020000000000,
+ 0x1C00020000000000,
+ };
+
+ xtmp128 = vandq_u64(xtmp128, o_cksum_mask);
+ ytmp128 = vandq_u64(ytmp128, o_cksum_mask);
+
+ /* Extract OUTER_UDP_CKSUM bit 41 and
+ * move it to bit 61
+ */
+
+ xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20);
+ ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20);
+
+ /* Shift oltype by 2 to start nibble from BIT(56)
+ * instead of BIT(58)
+ */
+ xtmp128 = vshrq_n_u8(xtmp128, 2);
+ ytmp128 = vshrq_n_u8(ytmp128, 2);
+ /*
+ * E(48):L3_LEN(8):L2_LEN(z+7)
+ * E(48):L3_LEN(8):L2_LEN(z+7)
+ */
+ const int8x16_t tshft3 = {
+ -1, 0, 8, 8, 8, 8, 8, 8,
+ -1, 0, 8, 8, 8, 8, 8, 8,
+ };
+
+ senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
+ senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
+
+ /* Do the lookup */
+ ltypes01 = vqtbl1q_u8(tbl, xtmp128);
+ ltypes23 = vqtbl1q_u8(tbl, ytmp128);
+
+ /* Just use ld1q to retrieve aura
+ * when we don't need tx_offload
+ */
+ mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
+ offsetof(struct rte_mempool, pool_id));
+
+ /* Pick only relevant fields i.e Bit 56:63 of oltype
+ * and place it in ol3/ol4type of senddesc_w1
+ */
+ const uint8x16_t shuf_mask0 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xFF, 0xFF, 0xFF,
+ };
+
+ ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
+ ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
+
+ /* Prepare ol4ptr, ol3ptr from ol3len, ol2len.
+ * a [E(32):E(16):OL3(8):OL2(8)]
+ * a = a + (a << 8)
+ * a [E(32):E(16):(OL3+OL2):OL2]
+ * => E(32):E(16)::OL4PTR(8):OL3PTR(8)
+ */
+ senddesc01_w1 = vaddq_u8(senddesc01_w1,
+ vshlq_n_u16(senddesc01_w1, 8));
+ senddesc23_w1 = vaddq_u8(senddesc23_w1,
+ vshlq_n_u16(senddesc23_w1, 8));
+
+ /* Create second half of 4W cmd for 4 mbufs (sgdesc) */
+ cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
+ cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
+ cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
+ cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
+
+ xmask01 = vdupq_n_u64(0);
+ xmask23 = xmask01;
+ asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
+ [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
+
+ asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
+ [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
+
+ asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
+ [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
+
+ asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
+ [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
+ xmask01 = vshlq_n_u64(xmask01, 20);
+ xmask23 = vshlq_n_u64(xmask23, 20);
+
+ senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
+ senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
+ /* Move ltypes to senddesc*_w1 */
+ senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
+ senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
+
+ /* Create first half of 4W cmd for 4 mbufs (sendhdr) */
+ cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
+ cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
+ cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
+ cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
+
+ } else if ((flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) &&
+ (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)) {
+ /* Lookup table to translate ol_flags to
+ * ol4type, ol3type, il4type, il3type of senddesc_w1
+ */
+ const uint8x16x2_t tbl = {
+ {
+ {
+ /* [0-15] = il4type:il3type */
+ 0x04, /* none (IPv6) */
+ 0x14, /* PKT_TX_TCP_CKSUM (IPv6) */
+ 0x24, /* PKT_TX_SCTP_CKSUM (IPv6) */
+ 0x34, /* PKT_TX_UDP_CKSUM (IPv6) */
+ 0x03, /* PKT_TX_IP_CKSUM */
+ 0x13, /* PKT_TX_IP_CKSUM |
+ * PKT_TX_TCP_CKSUM
+ */
+ 0x23, /* PKT_TX_IP_CKSUM |
+ * PKT_TX_SCTP_CKSUM
+ */
+ 0x33, /* PKT_TX_IP_CKSUM |
+ * PKT_TX_UDP_CKSUM
+ */
+ 0x02, /* PKT_TX_IPV4 */
+ 0x12, /* PKT_TX_IPV4 |
+ * PKT_TX_TCP_CKSUM
+ */
+ 0x22, /* PKT_TX_IPV4 |
+ * PKT_TX_SCTP_CKSUM
+ */
+ 0x32, /* PKT_TX_IPV4 |
+ * PKT_TX_UDP_CKSUM
+ */
+ 0x03, /* PKT_TX_IPV4 |
+ * PKT_TX_IP_CKSUM
+ */
+ 0x13, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
+ * PKT_TX_TCP_CKSUM
+ */
+ 0x23, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
+ * PKT_TX_SCTP_CKSUM
+ */
+ 0x33, /* PKT_TX_IPV4 | PKT_TX_IP_CKSUM |
+ * PKT_TX_UDP_CKSUM
+ */
+ },
+
+ {
+ /* [16-31] = ol4type:ol3type */
+ 0x00, /* none */
+ 0x03, /* OUTER_IP_CKSUM */
+ 0x02, /* OUTER_IPV4 */
+ 0x03, /* OUTER_IPV4 | OUTER_IP_CKSUM */
+ 0x04, /* OUTER_IPV6 */
+ 0x00, /* OUTER_IPV6 | OUTER_IP_CKSUM */
+ 0x00, /* OUTER_IPV6 | OUTER_IPV4 */
+ 0x00, /* OUTER_IPV6 | OUTER_IPV4 |
+ * OUTER_IP_CKSUM
+ */
+ 0x00, /* OUTER_UDP_CKSUM */
+ 0x33, /* OUTER_UDP_CKSUM |
+ * OUTER_IP_CKSUM
+ */
+ 0x32, /* OUTER_UDP_CKSUM |
+ * OUTER_IPV4
+ */
+ 0x33, /* OUTER_UDP_CKSUM |
+ * OUTER_IPV4 | OUTER_IP_CKSUM
+ */
+ 0x34, /* OUTER_UDP_CKSUM |
+ * OUTER_IPV6
+ */
+ 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
+ * OUTER_IP_CKSUM
+ */
+ 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
+ * OUTER_IPV4
+ */
+ 0x00, /* OUTER_UDP_CKSUM | OUTER_IPV6 |
+ * OUTER_IPV4 | OUTER_IP_CKSUM
+ */
+ },
+ }
+ };
+
+ /* Extract olflags to translate to oltype & iltype */
+ xtmp128 = vzip1q_u64(len_olflags0, len_olflags1);
+ ytmp128 = vzip1q_u64(len_olflags2, len_olflags3);
+
+ /*
+ * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z)
+ * E(8):OL2_LN(7):OL3_LN(9):E(23):L3_LN(9):L2_LN(7+z)
+ */
+ const uint32x4_t tshft_4 = {
+ 1, 0,
+ 1, 0,
+ };
+ senddesc01_w1 = vshlq_u32(senddesc01_w1, tshft_4);
+ senddesc23_w1 = vshlq_u32(senddesc23_w1, tshft_4);
+
+ /*
+ * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z)
+ * E(32):L3_LEN(8):L2_LEN(7+Z):OL3_LEN(8):OL2_LEN(7+Z)
+ */
+ const uint8x16_t shuf_mask5 = {
+ 0x6, 0x5, 0x0, 0x1, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xE, 0xD, 0x8, 0x9, 0xFF, 0xFF, 0xFF, 0xFF,
+ };
+ senddesc01_w1 = vqtbl1q_u8(senddesc01_w1, shuf_mask5);
+ senddesc23_w1 = vqtbl1q_u8(senddesc23_w1, shuf_mask5);
+
+ /* Extract outer and inner header ol_flags */
+ const uint64x2_t oi_cksum_mask = {
+ 0x1CF0020000000000,
+ 0x1CF0020000000000,
+ };
+
+ xtmp128 = vandq_u64(xtmp128, oi_cksum_mask);
+ ytmp128 = vandq_u64(ytmp128, oi_cksum_mask);
+
+ /* Extract OUTER_UDP_CKSUM bit 41 and
+ * move it to bit 61
+ */
+
+ xtmp128 = xtmp128 | vshlq_n_u64(xtmp128, 20);
+ ytmp128 = ytmp128 | vshlq_n_u64(ytmp128, 20);
+
+ /* Shift right oltype by 2 and iltype by 4
+ * to start oltype nibble from BIT(58)
+ * instead of BIT(56) and iltype nibble from BIT(48)
+ * instead of BIT(52).
+ */
+ const int8x16_t tshft5 = {
+ 8, 8, 8, 8, 8, 8, -4, -2,
+ 8, 8, 8, 8, 8, 8, -4, -2,
+ };
+
+ xtmp128 = vshlq_u8(xtmp128, tshft5);
+ ytmp128 = vshlq_u8(ytmp128, tshft5);
+ /*
+ * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8)
+ * E(32):L3_LEN(8):L2_LEN(8):OL3_LEN(8):OL2_LEN(8)
+ */
+ const int8x16_t tshft3 = {
+ -1, 0, -1, 0, 0, 0, 0, 0,
+ -1, 0, -1, 0, 0, 0, 0, 0,
+ };
+
+ senddesc01_w1 = vshlq_u8(senddesc01_w1, tshft3);
+ senddesc23_w1 = vshlq_u8(senddesc23_w1, tshft3);
+
+ /* Mark Bit(4) of oltype */
+ const uint64x2_t oi_cksum_mask2 = {
+ 0x1000000000000000,
+ 0x1000000000000000,
+ };
+
+ xtmp128 = vorrq_u64(xtmp128, oi_cksum_mask2);
+ ytmp128 = vorrq_u64(ytmp128, oi_cksum_mask2);
+
+ /* Do the lookup */
+ ltypes01 = vqtbl2q_u8(tbl, xtmp128);
+ ltypes23 = vqtbl2q_u8(tbl, ytmp128);
+
+ /* Just use ld1q to retrieve aura
+ * when we don't need tx_offload
+ */
+ mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
+ offsetof(struct rte_mempool, pool_id));
+
+ /* Pick only relevant fields i.e Bit 48:55 of iltype and
+ * Bit 56:63 of oltype and place it in corresponding
+ * place in senddesc_w1.
+ */
+ const uint8x16_t shuf_mask0 = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x7, 0x6, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xF, 0xE, 0xFF, 0xFF,
+ };
+
+ ltypes01 = vqtbl1q_u8(ltypes01, shuf_mask0);
+ ltypes23 = vqtbl1q_u8(ltypes23, shuf_mask0);
+
+ /* Prepare l4ptr, l3ptr, ol4ptr, ol3ptr from
+ * l3len, l2len, ol3len, ol2len.
+ * a [E(32):L3(8):L2(8):OL3(8):OL2(8)]
+ * a = a + (a << 8)
+ * a [E:(L3+L2):(L2+OL3):(OL3+OL2):OL2]
+ * a = a + (a << 16)
+ * a [E:(L3+L2+OL3+OL2):(L2+OL3+OL2):(OL3+OL2):OL2]
+ * => E(32):IL4PTR(8):IL3PTR(8):OL4PTR(8):OL3PTR(8)
+ */
+ senddesc01_w1 = vaddq_u8(senddesc01_w1,
+ vshlq_n_u32(senddesc01_w1, 8));
+ senddesc23_w1 = vaddq_u8(senddesc23_w1,
+ vshlq_n_u32(senddesc23_w1, 8));
+
+ /* Create second half of 4W cmd for 4 mbufs (sgdesc) */
+ cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
+ cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
+ cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
+ cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
+
+ /* Continue preparing l4ptr, l3ptr, ol4ptr, ol3ptr */
+ senddesc01_w1 = vaddq_u8(senddesc01_w1,
+ vshlq_n_u32(senddesc01_w1, 16));
+ senddesc23_w1 = vaddq_u8(senddesc23_w1,
+ vshlq_n_u32(senddesc23_w1, 16));
+
+ xmask01 = vdupq_n_u64(0);
+ xmask23 = xmask01;
+ asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
+ [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
+
+ asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
+ [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
+
+ asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
+ [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
+
+ asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
+ [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
+ xmask01 = vshlq_n_u64(xmask01, 20);
+ xmask23 = vshlq_n_u64(xmask23, 20);
+
+ senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
+ senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
+ /* Move ltypes to senddesc*_w1 */
+ senddesc01_w1 = vorrq_u64(senddesc01_w1, ltypes01);
+ senddesc23_w1 = vorrq_u64(senddesc23_w1, ltypes23);
+
+ /* Create first half of 4W cmd for 4 mbufs (sendhdr) */
+ cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
+ cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
+ cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
+ cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
+ } else {
+ /* Just use ld1q to retrieve aura
+ * when we don't need tx_offload
+ */
+ mbuf0 = (uint64_t *)((uintptr_t)mbuf0 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf1 = (uint64_t *)((uintptr_t)mbuf1 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf2 = (uint64_t *)((uintptr_t)mbuf2 +
+ offsetof(struct rte_mempool, pool_id));
+ mbuf3 = (uint64_t *)((uintptr_t)mbuf3 +
+ offsetof(struct rte_mempool, pool_id));
+ xmask01 = vdupq_n_u64(0);
+ xmask23 = xmask01;
+ asm volatile ("LD1 {%[a].H}[0],[%[in]]\n\t" :
+ [a]"+w"(xmask01) : [in]"r"(mbuf0) : "memory");
+
+ asm volatile ("LD1 {%[a].H}[4],[%[in]]\n\t" :
+ [a]"+w"(xmask01) : [in]"r"(mbuf1) : "memory");
+
+ asm volatile ("LD1 {%[b].H}[0],[%[in]]\n\t" :
+ [b]"+w"(xmask23) : [in]"r"(mbuf2) : "memory");
+
+ asm volatile ("LD1 {%[b].H}[4],[%[in]]\n\t" :
+ [b]"+w"(xmask23) : [in]"r"(mbuf3) : "memory");
+ xmask01 = vshlq_n_u64(xmask01, 20);
+ xmask23 = vshlq_n_u64(xmask23, 20);
+
+ senddesc01_w0 = vorrq_u64(senddesc01_w0, xmask01);
+ senddesc23_w0 = vorrq_u64(senddesc23_w0, xmask23);
+
+ /* Create 4W cmd for 4 mbufs (sendhdr, sgdesc) */
+ cmd00 = vzip1q_u64(senddesc01_w0, senddesc01_w1);
+ cmd01 = vzip1q_u64(sgdesc01_w0, sgdesc01_w1);
+ cmd10 = vzip2q_u64(senddesc01_w0, senddesc01_w1);
+ cmd11 = vzip2q_u64(sgdesc01_w0, sgdesc01_w1);
+ cmd20 = vzip1q_u64(senddesc23_w0, senddesc23_w1);
+ cmd21 = vzip1q_u64(sgdesc23_w0, sgdesc23_w1);
+ cmd30 = vzip2q_u64(senddesc23_w0, senddesc23_w1);
+ cmd31 = vzip2q_u64(sgdesc23_w0, sgdesc23_w1);
+ }
+
+ do {
+ vst1q_u64(lmt_addr, cmd00);
+ vst1q_u64(lmt_addr + 2, cmd01);
+ vst1q_u64(lmt_addr + 4, cmd10);
+ vst1q_u64(lmt_addr + 6, cmd11);
+ vst1q_u64(lmt_addr + 8, cmd20);
+ vst1q_u64(lmt_addr + 10, cmd21);
+ vst1q_u64(lmt_addr + 12, cmd30);
+ vst1q_u64(lmt_addr + 14, cmd31);
+ lmt_status = otx2_lmt_submit(io_addr);
+
+ } while (lmt_status == 0);
+ tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP;
+ }
+
+ if (unlikely(pkts_left))
+ pkts += nix_xmit_pkts(tx_queue, tx_pkts, pkts_left, cmd, flags);
+
+ return pkts;
+}
+
+#else
+static __rte_always_inline uint16_t
+nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t pkts, uint64_t *cmd, const uint16_t flags)
+{
+ RTE_SET_USED(tx_queue);
+ RTE_SET_USED(tx_pkts);
+ RTE_SET_USED(pkts);
+ RTE_SET_USED(cmd);
+ RTE_SET_USED(flags);
+ return 0;
+}
+#endif
+
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+static uint16_t __rte_noinline __rte_hot \
+otx2_nix_xmit_pkts_ ## name(void *tx_queue, \
+ struct rte_mbuf **tx_pkts, uint16_t pkts) \
+{ \
+ uint64_t cmd[sz]; \
+ \
+ /* For TSO inner checksum is a must */ \
+ if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \
+ !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \
+ return 0; \
+ return nix_xmit_pkts(tx_queue, tx_pkts, pkts, cmd, flags); \
+}
+
+NIX_TX_FASTPATH_MODES
+#undef T
+
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+static uint16_t __rte_noinline __rte_hot \
+otx2_nix_xmit_pkts_mseg_ ## name(void *tx_queue, \
+ struct rte_mbuf **tx_pkts, uint16_t pkts) \
+{ \
+ uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
+ \
+ /* For TSO inner checksum is a must */ \
+ if (((flags) & NIX_TX_OFFLOAD_TSO_F) && \
+ !((flags) & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) \
+ return 0; \
+ return nix_xmit_pkts_mseg(tx_queue, tx_pkts, pkts, cmd, \
+ (flags) | NIX_TX_MULTI_SEG_F); \
+}
+
+NIX_TX_FASTPATH_MODES
+#undef T
+
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+static uint16_t __rte_noinline __rte_hot \
+otx2_nix_xmit_pkts_vec_ ## name(void *tx_queue, \
+ struct rte_mbuf **tx_pkts, uint16_t pkts) \
+{ \
+ uint64_t cmd[sz]; \
+ \
+ /* VLAN, TSTMP, TSO is not supported by vec */ \
+ if ((flags) & NIX_TX_OFFLOAD_VLAN_QINQ_F || \
+ (flags) & NIX_TX_OFFLOAD_TSTAMP_F || \
+ (flags) & NIX_TX_OFFLOAD_TSO_F) \
+ return 0; \
+ return nix_xmit_pkts_vector(tx_queue, tx_pkts, pkts, cmd, (flags)); \
+}
+
+NIX_TX_FASTPATH_MODES
+#undef T
+
+static inline void
+pick_tx_func(struct rte_eth_dev *eth_dev,
+ const eth_tx_burst_t tx_burst[2][2][2][2][2][2][2])
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ /* [SEC] [TSTMP] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
+ eth_dev->tx_pkt_burst = tx_burst
+ [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_SECURITY_F)]
+ [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F)]
+ [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F)]
+ [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
+ [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
+ [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+ [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
+}
+
+void
+otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+
+ const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_ ## name,
+
+NIX_TX_FASTPATH_MODES
+#undef T
+ };
+
+ const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_mseg_ ## name,
+
+NIX_TX_FASTPATH_MODES
+#undef T
+ };
+
+ const eth_tx_burst_t nix_eth_tx_vec_burst[2][2][2][2][2][2][2] = {
+#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
+ [f6][f5][f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_vec_ ## name,
+
+NIX_TX_FASTPATH_MODES
+#undef T
+ };
+
+ if (dev->scalar_ena ||
+ (dev->tx_offload_flags &
+ (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F |
+ NIX_TX_OFFLOAD_TSO_F)))
+ pick_tx_func(eth_dev, nix_eth_tx_burst);
+ else
+ pick_tx_func(eth_dev, nix_eth_tx_vec_burst);
+
+ if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
+
+ rte_mb();
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.h b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.h
new file mode 100644
index 000000000..3c4317092
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_tx.h
@@ -0,0 +1,744 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#ifndef __OTX2_TX_H__
+#define __OTX2_TX_H__
+
+#define NIX_TX_OFFLOAD_NONE (0)
+#define NIX_TX_OFFLOAD_L3_L4_CSUM_F BIT(0)
+#define NIX_TX_OFFLOAD_OL3_OL4_CSUM_F BIT(1)
+#define NIX_TX_OFFLOAD_VLAN_QINQ_F BIT(2)
+#define NIX_TX_OFFLOAD_MBUF_NOFF_F BIT(3)
+#define NIX_TX_OFFLOAD_TSTAMP_F BIT(4)
+#define NIX_TX_OFFLOAD_TSO_F BIT(5)
+#define NIX_TX_OFFLOAD_SECURITY_F BIT(6)
+
+/* Flags to control xmit_prepare function.
+ * Defining it from backwards to denote its been
+ * not used as offload flags to pick function
+ */
+#define NIX_TX_MULTI_SEG_F BIT(15)
+
+#define NIX_TX_NEED_SEND_HDR_W1 \
+ (NIX_TX_OFFLOAD_L3_L4_CSUM_F | NIX_TX_OFFLOAD_OL3_OL4_CSUM_F | \
+ NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)
+
+#define NIX_TX_NEED_EXT_HDR \
+ (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSTAMP_F | \
+ NIX_TX_OFFLOAD_TSO_F)
+
+#define NIX_UDP_TUN_BITMASK \
+ ((1ull << (PKT_TX_TUNNEL_VXLAN >> 45)) | \
+ (1ull << (PKT_TX_TUNNEL_GENEVE >> 45)))
+
+#define NIX_LSO_FORMAT_IDX_TSOV4 (0)
+#define NIX_LSO_FORMAT_IDX_TSOV6 (1)
+
+/* Function to determine no of tx subdesc required in case ext
+ * sub desc is enabled.
+ */
+static __rte_always_inline int
+otx2_nix_tx_ext_subs(const uint16_t flags)
+{
+ return (flags & NIX_TX_OFFLOAD_TSTAMP_F) ? 2 :
+ ((flags & (NIX_TX_OFFLOAD_VLAN_QINQ_F | NIX_TX_OFFLOAD_TSO_F)) ?
+ 1 : 0);
+}
+
+static __rte_always_inline void
+otx2_nix_xmit_prepare_tstamp(uint64_t *cmd, const uint64_t *send_mem_desc,
+ const uint64_t ol_flags, const uint16_t no_segdw,
+ const uint16_t flags)
+{
+ if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
+ struct nix_send_mem_s *send_mem;
+ uint16_t off = (no_segdw - 1) << 1;
+ const uint8_t is_ol_tstamp = !(ol_flags & PKT_TX_IEEE1588_TMST);
+
+ send_mem = (struct nix_send_mem_s *)(cmd + off);
+ if (flags & NIX_TX_MULTI_SEG_F) {
+ /* Retrieving the default desc values */
+ cmd[off] = send_mem_desc[6];
+
+ /* Using compiler barier to avoid voilation of C
+ * aliasing rules.
+ */
+ rte_compiler_barrier();
+ }
+
+ /* Packets for which PKT_TX_IEEE1588_TMST is not set, tx tstamp
+ * should not be recorded, hence changing the alg type to
+ * NIX_SENDMEMALG_SET and also changing send mem addr field to
+ * next 8 bytes as it corrpt the actual tx tstamp registered
+ * address.
+ */
+ send_mem->alg = NIX_SENDMEMALG_SETTSTMP - (is_ol_tstamp);
+
+ send_mem->addr = (rte_iova_t)((uint64_t *)send_mem_desc[7] +
+ (is_ol_tstamp));
+ }
+}
+
+static __rte_always_inline uint64_t
+otx2_pktmbuf_detach(struct rte_mbuf *m)
+{
+ struct rte_mempool *mp = m->pool;
+ uint32_t mbuf_size, buf_len;
+ struct rte_mbuf *md;
+ uint16_t priv_size;
+ uint16_t refcount;
+
+ /* Update refcount of direct mbuf */
+ md = rte_mbuf_from_indirect(m);
+ refcount = rte_mbuf_refcnt_update(md, -1);
+
+ priv_size = rte_pktmbuf_priv_size(mp);
+ mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
+ buf_len = rte_pktmbuf_data_room_size(mp);
+
+ m->priv_size = priv_size;
+ m->buf_addr = (char *)m + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+ m->buf_len = (uint16_t)buf_len;
+ rte_pktmbuf_reset_headroom(m);
+ m->data_len = 0;
+ m->ol_flags = 0;
+ m->next = NULL;
+ m->nb_segs = 1;
+
+ /* Now indirect mbuf is safe to free */
+ rte_pktmbuf_free(m);
+
+ if (refcount == 0) {
+ rte_mbuf_refcnt_set(md, 1);
+ md->data_len = 0;
+ md->ol_flags = 0;
+ md->next = NULL;
+ md->nb_segs = 1;
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static __rte_always_inline uint64_t
+otx2_nix_prefree_seg(struct rte_mbuf *m)
+{
+ if (likely(rte_mbuf_refcnt_read(m) == 1)) {
+ if (!RTE_MBUF_DIRECT(m))
+ return otx2_pktmbuf_detach(m);
+
+ m->next = NULL;
+ m->nb_segs = 1;
+ return 0;
+ } else if (rte_mbuf_refcnt_update(m, -1) == 0) {
+ if (!RTE_MBUF_DIRECT(m))
+ return otx2_pktmbuf_detach(m);
+
+ rte_mbuf_refcnt_set(m, 1);
+ m->next = NULL;
+ m->nb_segs = 1;
+ return 0;
+ }
+
+ /* Mbuf is having refcount more than 1 so need not to be freed */
+ return 1;
+}
+
+static __rte_always_inline void
+otx2_nix_xmit_prepare_tso(struct rte_mbuf *m, const uint64_t flags)
+{
+ uint64_t mask, ol_flags = m->ol_flags;
+
+ if (flags & NIX_TX_OFFLOAD_TSO_F &&
+ (ol_flags & PKT_TX_TCP_SEG)) {
+ uintptr_t mdata = rte_pktmbuf_mtod(m, uintptr_t);
+ uint16_t *iplen, *oiplen, *oudplen;
+ uint16_t lso_sb, paylen;
+
+ mask = -!!(ol_flags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IPV6));
+ lso_sb = (mask & (m->outer_l2_len + m->outer_l3_len)) +
+ m->l2_len + m->l3_len + m->l4_len;
+
+ /* Reduce payload len from base headers */
+ paylen = m->pkt_len - lso_sb;
+
+ /* Get iplen position assuming no tunnel hdr */
+ iplen = (uint16_t *)(mdata + m->l2_len +
+ (2 << !!(ol_flags & PKT_TX_IPV6)));
+ /* Handle tunnel tso */
+ if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
+ (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ const uint8_t is_udp_tun = (NIX_UDP_TUN_BITMASK >>
+ ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) & 0x1;
+
+ oiplen = (uint16_t *)(mdata + m->outer_l2_len +
+ (2 << !!(ol_flags & PKT_TX_OUTER_IPV6)));
+ *oiplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*oiplen) -
+ paylen);
+
+ /* Update format for UDP tunneled packet */
+ if (is_udp_tun) {
+ oudplen = (uint16_t *)(mdata + m->outer_l2_len +
+ m->outer_l3_len + 4);
+ *oudplen =
+ rte_cpu_to_be_16(rte_be_to_cpu_16(*oudplen) -
+ paylen);
+ }
+
+ /* Update iplen position to inner ip hdr */
+ iplen = (uint16_t *)(mdata + lso_sb - m->l3_len -
+ m->l4_len + (2 << !!(ol_flags & PKT_TX_IPV6)));
+ }
+
+ *iplen = rte_cpu_to_be_16(rte_be_to_cpu_16(*iplen) - paylen);
+ }
+}
+
+static __rte_always_inline void
+otx2_nix_xmit_prepare(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
+{
+ struct nix_send_ext_s *send_hdr_ext;
+ struct nix_send_hdr_s *send_hdr;
+ uint64_t ol_flags = 0, mask;
+ union nix_send_hdr_w1_u w1;
+ union nix_send_sg_s *sg;
+
+ send_hdr = (struct nix_send_hdr_s *)cmd;
+ if (flags & NIX_TX_NEED_EXT_HDR) {
+ send_hdr_ext = (struct nix_send_ext_s *)(cmd + 2);
+ sg = (union nix_send_sg_s *)(cmd + 4);
+ /* Clear previous markings */
+ send_hdr_ext->w0.lso = 0;
+ send_hdr_ext->w1.u = 0;
+ } else {
+ sg = (union nix_send_sg_s *)(cmd + 2);
+ }
+
+ if (flags & NIX_TX_NEED_SEND_HDR_W1) {
+ ol_flags = m->ol_flags;
+ w1.u = 0;
+ }
+
+ if (!(flags & NIX_TX_MULTI_SEG_F)) {
+ send_hdr->w0.total = m->data_len;
+ send_hdr->w0.aura =
+ npa_lf_aura_handle_to_aura(m->pool->pool_id);
+ }
+
+ /*
+ * L3type: 2 => IPV4
+ * 3 => IPV4 with csum
+ * 4 => IPV6
+ * L3type and L3ptr needs to be set for either
+ * L3 csum or L4 csum or LSO
+ *
+ */
+
+ if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
+ (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)) {
+ const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
+ const uint8_t ol3type =
+ ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
+ ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
+ !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
+
+ /* Outer L3 */
+ w1.ol3type = ol3type;
+ mask = 0xffffull << ((!!ol3type) << 4);
+ w1.ol3ptr = ~mask & m->outer_l2_len;
+ w1.ol4ptr = ~mask & (w1.ol3ptr + m->outer_l3_len);
+
+ /* Outer L4 */
+ w1.ol4type = csum + (csum << 1);
+
+ /* Inner L3 */
+ w1.il3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
+ ((!!(ol_flags & PKT_TX_IPV6)) << 2);
+ w1.il3ptr = w1.ol4ptr + m->l2_len;
+ w1.il4ptr = w1.il3ptr + m->l3_len;
+ /* Increment it by 1 if it is IPV4 as 3 is with csum */
+ w1.il3type = w1.il3type + !!(ol_flags & PKT_TX_IP_CKSUM);
+
+ /* Inner L4 */
+ w1.il4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
+
+ /* In case of no tunnel header use only
+ * shift IL3/IL4 fields a bit to use
+ * OL3/OL4 for header checksum
+ */
+ mask = !ol3type;
+ w1.u = ((w1.u & 0xFFFFFFFF00000000) >> (mask << 3)) |
+ ((w1.u & 0X00000000FFFFFFFF) >> (mask << 4));
+
+ } else if (flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) {
+ const uint8_t csum = !!(ol_flags & PKT_TX_OUTER_UDP_CKSUM);
+ const uint8_t outer_l2_len = m->outer_l2_len;
+
+ /* Outer L3 */
+ w1.ol3ptr = outer_l2_len;
+ w1.ol4ptr = outer_l2_len + m->outer_l3_len;
+ /* Increment it by 1 if it is IPV4 as 3 is with csum */
+ w1.ol3type = ((!!(ol_flags & PKT_TX_OUTER_IPV4)) << 1) +
+ ((!!(ol_flags & PKT_TX_OUTER_IPV6)) << 2) +
+ !!(ol_flags & PKT_TX_OUTER_IP_CKSUM);
+
+ /* Outer L4 */
+ w1.ol4type = csum + (csum << 1);
+
+ } else if (flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F) {
+ const uint8_t l2_len = m->l2_len;
+
+ /* Always use OLXPTR and OLXTYPE when only
+ * when one header is present
+ */
+
+ /* Inner L3 */
+ w1.ol3ptr = l2_len;
+ w1.ol4ptr = l2_len + m->l3_len;
+ /* Increment it by 1 if it is IPV4 as 3 is with csum */
+ w1.ol3type = ((!!(ol_flags & PKT_TX_IPV4)) << 1) +
+ ((!!(ol_flags & PKT_TX_IPV6)) << 2) +
+ !!(ol_flags & PKT_TX_IP_CKSUM);
+
+ /* Inner L4 */
+ w1.ol4type = (ol_flags & PKT_TX_L4_MASK) >> 52;
+ }
+
+ if (flags & NIX_TX_NEED_EXT_HDR &&
+ flags & NIX_TX_OFFLOAD_VLAN_QINQ_F) {
+ send_hdr_ext->w1.vlan1_ins_ena = !!(ol_flags & PKT_TX_VLAN);
+ /* HW will update ptr after vlan0 update */
+ send_hdr_ext->w1.vlan1_ins_ptr = 12;
+ send_hdr_ext->w1.vlan1_ins_tci = m->vlan_tci;
+
+ send_hdr_ext->w1.vlan0_ins_ena = !!(ol_flags & PKT_TX_QINQ);
+ /* 2B before end of l2 header */
+ send_hdr_ext->w1.vlan0_ins_ptr = 12;
+ send_hdr_ext->w1.vlan0_ins_tci = m->vlan_tci_outer;
+ }
+
+ if (flags & NIX_TX_OFFLOAD_TSO_F &&
+ (ol_flags & PKT_TX_TCP_SEG)) {
+ uint16_t lso_sb;
+ uint64_t mask;
+
+ mask = -(!w1.il3type);
+ lso_sb = (mask & w1.ol4ptr) + (~mask & w1.il4ptr) + m->l4_len;
+
+ send_hdr_ext->w0.lso_sb = lso_sb;
+ send_hdr_ext->w0.lso = 1;
+ send_hdr_ext->w0.lso_mps = m->tso_segsz;
+ send_hdr_ext->w0.lso_format =
+ NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6);
+ w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
+
+ /* Handle tunnel tso */
+ if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) &&
+ (ol_flags & PKT_TX_TUNNEL_MASK)) {
+ const uint8_t is_udp_tun = (NIX_UDP_TUN_BITMASK >>
+ ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) & 0x1;
+
+ w1.il4type = NIX_SENDL4TYPE_TCP_CKSUM;
+ w1.ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0;
+ /* Update format for UDP tunneled packet */
+ send_hdr_ext->w0.lso_format += is_udp_tun ? 2 : 6;
+
+ send_hdr_ext->w0.lso_format +=
+ !!(ol_flags & PKT_TX_OUTER_IPV6) << 1;
+ }
+ }
+
+ if (flags & NIX_TX_NEED_SEND_HDR_W1)
+ send_hdr->w1.u = w1.u;
+
+ if (!(flags & NIX_TX_MULTI_SEG_F)) {
+ sg->seg1_size = m->data_len;
+ *(rte_iova_t *)(++sg) = rte_mbuf_data_iova(m);
+
+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+ /* DF bit = 1 if refcount of current mbuf or parent mbuf
+ * is greater than 1
+ * DF bit = 0 otherwise
+ */
+ send_hdr->w0.df = otx2_nix_prefree_seg(m);
+ }
+ /* Mark mempool object as "put" since it is freed by NIX */
+ if (!send_hdr->w0.df)
+ __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ }
+}
+
+
+static __rte_always_inline void
+otx2_nix_xmit_one(uint64_t *cmd, void *lmt_addr,
+ const rte_iova_t io_addr, const uint32_t flags)
+{
+ uint64_t lmt_status;
+
+ do {
+ otx2_lmt_mov(lmt_addr, cmd, otx2_nix_tx_ext_subs(flags));
+ lmt_status = otx2_lmt_submit(io_addr);
+ } while (lmt_status == 0);
+}
+
+static __rte_always_inline uint16_t
+otx2_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
+{
+ struct nix_send_hdr_s *send_hdr;
+ union nix_send_sg_s *sg;
+ struct rte_mbuf *m_next;
+ uint64_t *slist, sg_u;
+ uint64_t nb_segs;
+ uint64_t segdw;
+ uint8_t off, i;
+
+ send_hdr = (struct nix_send_hdr_s *)cmd;
+ send_hdr->w0.total = m->pkt_len;
+ send_hdr->w0.aura = npa_lf_aura_handle_to_aura(m->pool->pool_id);
+
+ if (flags & NIX_TX_NEED_EXT_HDR)
+ off = 2;
+ else
+ off = 0;
+
+ sg = (union nix_send_sg_s *)&cmd[2 + off];
+ /* Clear sg->u header before use */
+ sg->u &= 0xFC00000000000000;
+ sg_u = sg->u;
+ slist = &cmd[3 + off];
+
+ i = 0;
+ nb_segs = m->nb_segs;
+
+ /* Fill mbuf segments */
+ do {
+ m_next = m->next;
+ sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
+ *slist = rte_mbuf_data_iova(m);
+ /* Set invert df if buffer is not to be freed by H/W */
+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)
+ sg_u |= (otx2_nix_prefree_seg(m) << (i + 55));
+ /* Mark mempool object as "put" since it is freed by NIX */
+ if (!(sg_u & (1ULL << (i + 55)))) {
+ m->next = NULL;
+ __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ }
+ slist++;
+ i++;
+ nb_segs--;
+ if (i > 2 && nb_segs) {
+ i = 0;
+ /* Next SG subdesc */
+ *(uint64_t *)slist = sg_u & 0xFC00000000000000;
+ sg->u = sg_u;
+ sg->segs = 3;
+ sg = (union nix_send_sg_s *)slist;
+ sg_u = sg->u;
+ slist++;
+ }
+ m = m_next;
+ } while (nb_segs);
+
+ sg->u = sg_u;
+ sg->segs = i;
+ segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off];
+ /* Roundup extra dwords to multiple of 2 */
+ segdw = (segdw >> 1) + (segdw & 0x1);
+ /* Default dwords */
+ segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F);
+ send_hdr->w0.sizem1 = segdw - 1;
+
+ return segdw;
+}
+
+static __rte_always_inline void
+otx2_nix_xmit_mseg_one(uint64_t *cmd, void *lmt_addr,
+ rte_iova_t io_addr, uint16_t segdw)
+{
+ uint64_t lmt_status;
+
+ do {
+ otx2_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
+ lmt_status = otx2_lmt_submit(io_addr);
+ } while (lmt_status == 0);
+}
+
+#define L3L4CSUM_F NIX_TX_OFFLOAD_L3_L4_CSUM_F
+#define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F
+#define VLAN_F NIX_TX_OFFLOAD_VLAN_QINQ_F
+#define NOFF_F NIX_TX_OFFLOAD_MBUF_NOFF_F
+#define TSP_F NIX_TX_OFFLOAD_TSTAMP_F
+#define TSO_F NIX_TX_OFFLOAD_TSO_F
+#define TX_SEC_F NIX_TX_OFFLOAD_SECURITY_F
+
+/* [SEC] [TSO] [TSTMP] [NOFF] [VLAN] [OL3OL4CSUM] [L3L4CSUM] */
+#define NIX_TX_FASTPATH_MODES \
+T(no_offload, 0, 0, 0, 0, 0, 0, 0, 4, \
+ NIX_TX_OFFLOAD_NONE) \
+T(l3l4csum, 0, 0, 0, 0, 0, 0, 1, 4, \
+ L3L4CSUM_F) \
+T(ol3ol4csum, 0, 0, 0, 0, 0, 1, 0, 4, \
+ OL3OL4CSUM_F) \
+T(ol3ol4csum_l3l4csum, 0, 0, 0, 0, 0, 1, 1, 4, \
+ OL3OL4CSUM_F | L3L4CSUM_F) \
+T(vlan, 0, 0, 0, 0, 1, 0, 0, 6, \
+ VLAN_F) \
+T(vlan_l3l4csum, 0, 0, 0, 0, 1, 0, 1, 6, \
+ VLAN_F | L3L4CSUM_F) \
+T(vlan_ol3ol4csum, 0, 0, 0, 0, 1, 1, 0, 6, \
+ VLAN_F | OL3OL4CSUM_F) \
+T(vlan_ol3ol4csum_l3l4csum, 0, 0, 0, 0, 1, 1, 1, 6, \
+ VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(noff, 0, 0, 0, 1, 0, 0, 0, 4, \
+ NOFF_F) \
+T(noff_l3l4csum, 0, 0, 0, 1, 0, 0, 1, 4, \
+ NOFF_F | L3L4CSUM_F) \
+T(noff_ol3ol4csum, 0, 0, 0, 1, 0, 1, 0, 4, \
+ NOFF_F | OL3OL4CSUM_F) \
+T(noff_ol3ol4csum_l3l4csum, 0, 0, 0, 1, 0, 1, 1, 4, \
+ NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(noff_vlan, 0, 0, 0, 1, 1, 0, 0, 6, \
+ NOFF_F | VLAN_F) \
+T(noff_vlan_l3l4csum, 0, 0, 0, 1, 1, 0, 1, 6, \
+ NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(noff_vlan_ol3ol4csum, 0, 0, 0, 1, 1, 1, 0, 6, \
+ NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(noff_vlan_ol3ol4csum_l3l4csum, 0, 0, 0, 1, 1, 1, 1, 6, \
+ NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts, 0, 0, 1, 0, 0, 0, 0, 8, \
+ TSP_F) \
+T(ts_l3l4csum, 0, 0, 1, 0, 0, 0, 1, 8, \
+ TSP_F | L3L4CSUM_F) \
+T(ts_ol3ol4csum, 0, 0, 1, 0, 0, 1, 0, 8, \
+ TSP_F | OL3OL4CSUM_F) \
+T(ts_ol3ol4csum_l3l4csum, 0, 0, 1, 0, 0, 1, 1, 8, \
+ TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_vlan, 0, 0, 1, 0, 1, 0, 0, 8, \
+ TSP_F | VLAN_F) \
+T(ts_vlan_l3l4csum, 0, 0, 1, 0, 1, 0, 1, 8, \
+ TSP_F | VLAN_F | L3L4CSUM_F) \
+T(ts_vlan_ol3ol4csum, 0, 0, 1, 0, 1, 1, 0, 8, \
+ TSP_F | VLAN_F | OL3OL4CSUM_F) \
+T(ts_vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 0, 1, 1, 1, 8, \
+ TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_noff, 0, 0, 1, 1, 0, 0, 0, 8, \
+ TSP_F | NOFF_F) \
+T(ts_noff_l3l4csum, 0, 0, 1, 1, 0, 0, 1, 8, \
+ TSP_F | NOFF_F | L3L4CSUM_F) \
+T(ts_noff_ol3ol4csum, 0, 0, 1, 1, 0, 1, 0, 8, \
+ TSP_F | NOFF_F | OL3OL4CSUM_F) \
+T(ts_noff_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 0, 1, 1, 8, \
+ TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(ts_noff_vlan, 0, 0, 1, 1, 1, 0, 0, 8, \
+ TSP_F | NOFF_F | VLAN_F) \
+T(ts_noff_vlan_l3l4csum, 0, 0, 1, 1, 1, 0, 1, 8, \
+ TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(ts_noff_vlan_ol3ol4csum, 0, 0, 1, 1, 1, 1, 0, 8, \
+ TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(ts_noff_vlan_ol3ol4csum_l3l4csum, 0, 0, 1, 1, 1, 1, 1, 8, \
+ TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+ \
+T(tso, 0, 1, 0, 0, 0, 0, 0, 6, \
+ TSO_F) \
+T(tso_l3l4csum, 0, 1, 0, 0, 0, 0, 1, 6, \
+ TSO_F | L3L4CSUM_F) \
+T(tso_ol3ol4csum, 0, 1, 0, 0, 0, 1, 0, 6, \
+ TSO_F | OL3OL4CSUM_F) \
+T(tso_ol3ol4csum_l3l4csum, 0, 1, 0, 0, 0, 1, 1, 6, \
+ TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(tso_vlan, 0, 1, 0, 0, 1, 0, 0, 6, \
+ TSO_F | VLAN_F) \
+T(tso_vlan_l3l4csum, 0, 1, 0, 0, 1, 0, 1, 6, \
+ TSO_F | VLAN_F | L3L4CSUM_F) \
+T(tso_vlan_ol3ol4csum, 0, 1, 0, 0, 1, 1, 0, 6, \
+ TSO_F | VLAN_F | OL3OL4CSUM_F) \
+T(tso_vlan_ol3ol4csum_l3l4csum, 0, 1, 0, 0, 1, 1, 1, 6, \
+ TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(tso_noff, 0, 1, 0, 1, 0, 0, 0, 6, \
+ TSO_F | NOFF_F) \
+T(tso_noff_l3l4csum, 0, 1, 0, 1, 0, 0, 1, 6, \
+ TSO_F | NOFF_F | L3L4CSUM_F) \
+T(tso_noff_ol3ol4csum, 0, 1, 0, 1, 0, 1, 0, 6, \
+ TSO_F | NOFF_F | OL3OL4CSUM_F) \
+T(tso_noff_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 0, 1, 1, 6, \
+ TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(tso_noff_vlan, 0, 1, 0, 1, 1, 0, 0, 6, \
+ TSO_F | NOFF_F | VLAN_F) \
+T(tso_noff_vlan_l3l4csum, 0, 1, 0, 1, 1, 0, 1, 6, \
+ TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(tso_noff_vlan_ol3ol4csum, 0, 1, 0, 1, 1, 1, 0, 6, \
+ TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(tso_noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 0, 1, 1, 1, 1, 6, \
+ TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(tso_ts, 0, 1, 1, 0, 0, 0, 0, 8, \
+ TSO_F | TSP_F) \
+T(tso_ts_l3l4csum, 0, 1, 1, 0, 0, 0, 1, 8, \
+ TSO_F | TSP_F | L3L4CSUM_F) \
+T(tso_ts_ol3ol4csum, 0, 1, 1, 0, 0, 1, 0, 8, \
+ TSO_F | TSP_F | OL3OL4CSUM_F) \
+T(tso_ts_ol3ol4csum_l3l4csum, 0, 1, 1, 0, 0, 1, 1, 8, \
+ TSO_F | TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(tso_ts_vlan, 0, 1, 1, 0, 1, 0, 0, 8, \
+ TSO_F | TSP_F | VLAN_F) \
+T(tso_ts_vlan_l3l4csum, 0, 1, 1, 0, 1, 0, 1, 8, \
+ TSO_F | TSP_F | VLAN_F | L3L4CSUM_F) \
+T(tso_ts_vlan_ol3ol4csum, 0, 1, 1, 0, 1, 1, 0, 8, \
+ TSO_F | TSP_F | VLAN_F | OL3OL4CSUM_F) \
+T(tso_ts_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 0, 1, 1, 1, 8, \
+ TSO_F | TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(tso_ts_noff, 0, 1, 1, 1, 0, 0, 0, 8, \
+ TSO_F | TSP_F | NOFF_F) \
+T(tso_ts_noff_l3l4csum, 0, 1, 1, 1, 0, 0, 1, 8, \
+ TSO_F | TSP_F | NOFF_F | L3L4CSUM_F) \
+T(tso_ts_noff_ol3ol4csum, 0, 1, 1, 1, 0, 1, 0, 8, \
+ TSO_F | TSP_F | NOFF_F | OL3OL4CSUM_F) \
+T(tso_ts_noff_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 0, 1, 1, 8, \
+ TSO_F | TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(tso_ts_noff_vlan, 0, 1, 1, 1, 1, 0, 0, 8, \
+ TSO_F | TSP_F | NOFF_F | VLAN_F) \
+T(tso_ts_noff_vlan_l3l4csum, 0, 1, 1, 1, 1, 0, 1, 8, \
+ TSO_F | TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(tso_ts_noff_vlan_ol3ol4csum, 0, 1, 1, 1, 1, 1, 0, 8, \
+ TSO_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(tso_ts_noff_vlan_ol3ol4csum_l3l4csum, 0, 1, 1, 1, 1, 1, 1, 8, \
+ TSO_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | \
+ L3L4CSUM_F) \
+T(sec, 1, 0, 0, 0, 0, 0, 0, 8, \
+ TX_SEC_F) \
+T(sec_l3l4csum, 1, 0, 0, 0, 0, 0, 1, 8, \
+ TX_SEC_F | L3L4CSUM_F) \
+T(sec_ol3ol4csum, 1, 0, 0, 0, 0, 1, 0, 8, \
+ TX_SEC_F | OL3OL4CSUM_F) \
+T(sec_ol3ol4csum_l3l4csum, 1, 0, 0, 0, 0, 1, 1, 8, \
+ TX_SEC_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_vlan, 1, 0, 0, 0, 1, 0, 0, 8, \
+ TX_SEC_F | VLAN_F) \
+T(sec_vlan_l3l4csum, 1, 0, 0, 0, 1, 0, 1, 8, \
+ TX_SEC_F | VLAN_F | L3L4CSUM_F) \
+T(sec_vlan_ol3ol4csum, 1, 0, 0, 0, 1, 1, 0, 8, \
+ TX_SEC_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_vlan_ol3ol4csum_l3l4csum, 1, 0, 0, 0, 1, 1, 1, 8, \
+ TX_SEC_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_noff, 1, 0, 0, 1, 0, 0, 0, 8, \
+ TX_SEC_F | NOFF_F) \
+T(sec_noff_l3l4csum, 1, 0, 0, 1, 0, 0, 1, 8, \
+ TX_SEC_F | NOFF_F | L3L4CSUM_F) \
+T(sec_noff_ol3ol4csum, 1, 0, 0, 1, 0, 1, 0, 8, \
+ TX_SEC_F | NOFF_F | OL3OL4CSUM_F) \
+T(sec_noff_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 0, 1, 1, 8, \
+ TX_SEC_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_noff_vlan, 1, 0, 0, 1, 1, 0, 0, 8, \
+ TX_SEC_F | NOFF_F | VLAN_F) \
+T(sec_noff_vlan_l3l4csum, 1, 0, 0, 1, 1, 0, 1, 8, \
+ TX_SEC_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(sec_noff_vlan_ol3ol4csum, 1, 0, 0, 1, 1, 1, 0, 8, \
+ TX_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 0, 1, 1, 1, 1, 8, \
+ TX_SEC_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts, 1, 0, 1, 0, 0, 0, 0, 8, \
+ TX_SEC_F | TSP_F) \
+T(sec_ts_l3l4csum, 1, 0, 1, 0, 0, 0, 1, 8, \
+ TX_SEC_F | TSP_F | L3L4CSUM_F) \
+T(sec_ts_ol3ol4csum, 1, 0, 1, 0, 0, 1, 0, 8, \
+ TX_SEC_F | TSP_F | OL3OL4CSUM_F) \
+T(sec_ts_ol3ol4csum_l3l4csum, 1, 0, 1, 0, 0, 1, 1, 8, \
+ TX_SEC_F | TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts_vlan, 1, 0, 1, 0, 1, 0, 0, 8, \
+ TX_SEC_F | TSP_F | VLAN_F) \
+T(sec_ts_vlan_l3l4csum, 1, 0, 1, 0, 1, 0, 1, 8, \
+ TX_SEC_F | TSP_F | VLAN_F | L3L4CSUM_F) \
+T(sec_ts_vlan_ol3ol4csum, 1, 0, 1, 0, 1, 1, 0, 8, \
+ TX_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_ts_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 0, 1, 1, 1, 8, \
+ TX_SEC_F | TSP_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts_noff, 1, 0, 1, 1, 0, 0, 0, 8, \
+ TX_SEC_F | TSP_F | NOFF_F) \
+T(sec_ts_noff_l3l4csum, 1, 0, 1, 1, 0, 0, 1, 8, \
+ TX_SEC_F | TSP_F | NOFF_F | L3L4CSUM_F) \
+T(sec_ts_noff_ol3ol4csum, 1, 0, 1, 1, 0, 1, 0, 8, \
+ TX_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F) \
+T(sec_ts_noff_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 0, 1, 1, 8, \
+ TX_SEC_F | TSP_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_ts_noff_vlan, 1, 0, 1, 1, 1, 0, 0, 8, \
+ TX_SEC_F | TSP_F | NOFF_F | VLAN_F) \
+T(sec_ts_noff_vlan_l3l4csum, 1, 0, 1, 1, 1, 0, 1, 8, \
+ TX_SEC_F | TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(sec_ts_noff_vlan_ol3ol4csum, 1, 0, 1, 1, 1, 1, 0, 8, \
+ TX_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_ts_noff_vlan_ol3ol4csum_l3l4csum, 1, 0, 1, 1, 1, 1, 1, 8, \
+ TX_SEC_F | TSP_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | \
+ L3L4CSUM_F) \
+T(sec_tso, 1, 1, 0, 0, 0, 0, 0, 8, \
+ TX_SEC_F | TSO_F) \
+T(sec_tso_l3l4csum, 1, 1, 0, 0, 0, 0, 1, 8, \
+ TX_SEC_F | TSO_F | L3L4CSUM_F) \
+T(sec_tso_ol3ol4csum, 1, 1, 0, 0, 0, 1, 0, 8, \
+ TX_SEC_F | TSO_F | OL3OL4CSUM_F) \
+T(sec_tso_ol3ol4csum_l3l4csum, 1, 1, 0, 0, 0, 1, 1, 8, \
+ TX_SEC_F | TSO_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_tso_vlan, 1, 1, 0, 0, 1, 0, 0, 8, \
+ TX_SEC_F | TSO_F | VLAN_F) \
+T(sec_tso_vlan_l3l4csum, 1, 1, 0, 0, 1, 0, 1, 8, \
+ TX_SEC_F | TSO_F | VLAN_F | L3L4CSUM_F) \
+T(sec_tso_vlan_ol3ol4csum, 1, 1, 0, 0, 1, 1, 0, 8, \
+ TX_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_tso_vlan_ol3ol4csum_l3l4csum, 1, 1, 0, 0, 1, 1, 1, 8, \
+ TX_SEC_F | TSO_F | VLAN_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_tso_noff, 1, 1, 0, 1, 0, 0, 0, 8, \
+ TX_SEC_F | TSO_F | NOFF_F) \
+T(sec_tso_noff_l3l4csum, 1, 1, 0, 1, 0, 0, 1, 8, \
+ TX_SEC_F | TSO_F | NOFF_F | L3L4CSUM_F) \
+T(sec_tso_noff_ol3ol4csum, 1, 1, 0, 1, 0, 1, 0, 8, \
+ TX_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F) \
+T(sec_tso_noff_ol3ol4csum_l3l4csum, 1, 1, 0, 1, 0, 1, 1, 8, \
+ TX_SEC_F | TSO_F | NOFF_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_tso_noff_vlan, 1, 1, 0, 1, 1, 0, 0, 8, \
+ TX_SEC_F | TSO_F | NOFF_F | VLAN_F) \
+T(sec_tso_noff_vlan_l3l4csum, 1, 1, 0, 1, 1, 0, 1, 8, \
+ TX_SEC_F | TSO_F | NOFF_F | VLAN_F | L3L4CSUM_F) \
+T(sec_tso_noff_vlan_ol3ol4csum, 1, 1, 0, 1, 1, 1, 0, 8, \
+ TX_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_tso_noff_vlan_ol3ol4csum_l3l4csum, \
+ 1, 1, 0, 1, 1, 1, 1, 8, \
+ TX_SEC_F | TSO_F | NOFF_F | VLAN_F | OL3OL4CSUM_F | \
+ L3L4CSUM_F) \
+T(sec_tso_ts, 1, 1, 1, 0, 0, 0, 0, 8, \
+ TX_SEC_F | TSO_F | TSP_F) \
+T(sec_tso_ts_l3l4csum, 1, 1, 1, 0, 0, 0, 1, 8, \
+ TX_SEC_F | TSO_F | TSP_F | L3L4CSUM_F) \
+T(sec_tso_ts_ol3ol4csum, 1, 1, 1, 0, 0, 1, 0, 8, \
+ TX_SEC_F | TSO_F | TSP_F | OL3OL4CSUM_F) \
+T(sec_tso_ts_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 0, 1, 1, 8, \
+ TX_SEC_F | TSO_F | TSP_F | OL3OL4CSUM_F | L3L4CSUM_F) \
+T(sec_tso_ts_vlan, 1, 1, 1, 0, 1, 0, 0, 8, \
+ TX_SEC_F | TSO_F | TSP_F | VLAN_F) \
+T(sec_tso_ts_vlan_l3l4csum, 1, 1, 1, 0, 1, 0, 1, 8, \
+ TX_SEC_F | TSO_F | TSP_F | VLAN_F | L3L4CSUM_F) \
+T(sec_tso_ts_vlan_ol3ol4csum, 1, 1, 1, 0, 1, 1, 0, 8, \
+ TX_SEC_F | TSO_F | TSP_F | VLAN_F | OL3OL4CSUM_F) \
+T(sec_tso_ts_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 0, 1, 1, 1, 8, \
+ TX_SEC_F | TSO_F | TSP_F | VLAN_F | OL3OL4CSUM_F | \
+ L3L4CSUM_F) \
+T(sec_tso_ts_noff, 1, 1, 1, 1, 0, 0, 0, 8, \
+ TX_SEC_F | TSO_F | TSP_F | NOFF_F) \
+T(sec_tso_ts_noff_l3l4csum, 1, 1, 1, 1, 0, 0, 1, 8, \
+ TX_SEC_F | TSO_F | TSP_F | NOFF_F | L3L4CSUM_F) \
+T(sec_tso_ts_noff_ol3ol4csum, 1, 1, 1, 1, 0, 1, 0, 8, \
+ TX_SEC_F | TSO_F | TSP_F | NOFF_F | OL3OL4CSUM_F) \
+T(sec_tso_ts_noff_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 0, 1, 1, 8, \
+ TX_SEC_F | TSO_F | TSP_F | NOFF_F | OL3OL4CSUM_F | \
+ L3L4CSUM_F) \
+T(sec_tso_ts_noff_vlan, 1, 1, 1, 1, 1, 0, 0, 8, \
+ TX_SEC_F | TSO_F | TSP_F | NOFF_F | VLAN_F) \
+T(sec_tso_ts_noff_vlan_l3l4csum, 1, 1, 1, 1, 1, 0, 1, 8, \
+ TX_SEC_F | TSO_F | TSP_F | NOFF_F | VLAN_F | L3L4CSUM_F)\
+T(sec_tso_ts_noff_vlan_ol3ol4csum, 1, 1, 1, 1, 1, 1, 0, 8, \
+ TX_SEC_F | TSO_F | TSP_F | NOFF_F | VLAN_F | \
+ OL3OL4CSUM_F) \
+T(sec_tso_ts_noff_vlan_ol3ol4csum_l3l4csum, \
+ 1, 1, 1, 1, 1, 1, 1, 8, \
+ TX_SEC_F | TSO_F | TSP_F | NOFF_F | VLAN_F | \
+ OL3OL4CSUM_F | L3L4CSUM_F)
+#endif /* __OTX2_TX_H__ */
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/otx2_vlan.c b/src/spdk/dpdk/drivers/net/octeontx2/otx2_vlan.c
new file mode 100644
index 000000000..322a565b3
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/otx2_vlan.c
@@ -0,0 +1,1040 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "otx2_ethdev.h"
+#include "otx2_flow.h"
+
+
+#define VLAN_ID_MATCH 0x1
+#define VTAG_F_MATCH 0x2
+#define MAC_ADDR_MATCH 0x4
+#define QINQ_F_MATCH 0x8
+#define VLAN_DROP 0x10
+#define DEF_F_ENTRY 0x20
+
+enum vtag_cfg_dir {
+ VTAG_TX,
+ VTAG_RX
+};
+
+static int
+nix_vlan_mcam_enb_dis(struct otx2_eth_dev *dev,
+ uint32_t entry, const int enable)
+{
+ struct npc_mcam_ena_dis_entry_req *req;
+ struct otx2_mbox *mbox = dev->mbox;
+ int rc = -EINVAL;
+
+ if (enable)
+ req = otx2_mbox_alloc_msg_npc_mcam_ena_entry(mbox);
+ else
+ req = otx2_mbox_alloc_msg_npc_mcam_dis_entry(mbox);
+
+ req->entry = entry;
+
+ rc = otx2_mbox_process_msg(mbox, NULL);
+ return rc;
+}
+
+static void
+nix_set_rx_vlan_action(struct rte_eth_dev *eth_dev,
+ struct mcam_entry *entry, bool qinq, bool drop)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int pcifunc = otx2_pfvf_func(dev->pf, dev->vf);
+ uint64_t action = 0, vtag_action = 0;
+
+ action = NIX_RX_ACTIONOP_UCAST;
+
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ action = NIX_RX_ACTIONOP_RSS;
+ action |= (uint64_t)(dev->rss_info.alg_idx) << 56;
+ }
+
+ action |= (uint64_t)pcifunc << 4;
+ entry->action = action;
+
+ if (drop) {
+ entry->action &= ~((uint64_t)0xF);
+ entry->action |= NIX_RX_ACTIONOP_DROP;
+ return;
+ }
+
+ if (!qinq) {
+ /* VTAG0 fields denote CTAG in single vlan case */
+ vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 15);
+ vtag_action |= (NPC_LID_LB << 8);
+ vtag_action |= NIX_RX_VTAGACTION_VTAG0_RELPTR;
+ } else {
+ /* VTAG0 & VTAG1 fields denote CTAG & STAG respectively */
+ vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 15);
+ vtag_action |= (NPC_LID_LB << 8);
+ vtag_action |= NIX_RX_VTAGACTION_VTAG1_RELPTR;
+ vtag_action |= (NIX_RX_VTAGACTION_VTAG_VALID << 47);
+ vtag_action |= ((uint64_t)(NPC_LID_LB) << 40);
+ vtag_action |= (NIX_RX_VTAGACTION_VTAG0_RELPTR << 32);
+ }
+
+ entry->vtag_action = vtag_action;
+}
+
+static void
+nix_set_tx_vlan_action(struct mcam_entry *entry, enum rte_vlan_type type,
+ int vtag_index)
+{
+ union {
+ uint64_t reg;
+ struct nix_tx_vtag_action_s act;
+ } vtag_action;
+
+ uint64_t action;
+
+ action = NIX_TX_ACTIONOP_UCAST_DEFAULT;
+
+ /*
+ * Take offset from LA since in case of untagged packet,
+ * lbptr is zero.
+ */
+ if (type == ETH_VLAN_TYPE_OUTER) {
+ vtag_action.act.vtag0_def = vtag_index;
+ vtag_action.act.vtag0_lid = NPC_LID_LA;
+ vtag_action.act.vtag0_op = NIX_TX_VTAGOP_INSERT;
+ vtag_action.act.vtag0_relptr = NIX_TX_VTAGACTION_VTAG0_RELPTR;
+ } else {
+ vtag_action.act.vtag1_def = vtag_index;
+ vtag_action.act.vtag1_lid = NPC_LID_LA;
+ vtag_action.act.vtag1_op = NIX_TX_VTAGOP_INSERT;
+ vtag_action.act.vtag1_relptr = NIX_TX_VTAGACTION_VTAG1_RELPTR;
+ }
+
+ entry->action = action;
+ entry->vtag_action = vtag_action.reg;
+}
+
+static int
+nix_vlan_mcam_free(struct otx2_eth_dev *dev, uint32_t entry)
+{
+ struct npc_mcam_free_entry_req *req;
+ struct otx2_mbox *mbox = dev->mbox;
+ int rc = -EINVAL;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(mbox);
+ req->entry = entry;
+
+ rc = otx2_mbox_process_msg(mbox, NULL);
+ return rc;
+}
+
+static int
+nix_vlan_mcam_write(struct rte_eth_dev *eth_dev, uint16_t ent_idx,
+ struct mcam_entry *entry, uint8_t intf, uint8_t ena)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct npc_mcam_write_entry_req *req;
+ struct otx2_mbox *mbox = dev->mbox;
+ struct msghdr *rsp;
+ int rc = -EINVAL;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_write_entry(mbox);
+
+ req->entry = ent_idx;
+ req->intf = intf;
+ req->enable_entry = ena;
+ memcpy(&req->entry_data, entry, sizeof(struct mcam_entry));
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ return rc;
+}
+
+static int
+nix_vlan_mcam_alloc_and_write(struct rte_eth_dev *eth_dev,
+ struct mcam_entry *entry,
+ uint8_t intf, bool drop)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct npc_mcam_alloc_and_write_entry_req *req;
+ struct npc_mcam_alloc_and_write_entry_rsp *rsp;
+ struct otx2_mbox *mbox = dev->mbox;
+ int rc = -EINVAL;
+
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_and_write_entry(mbox);
+
+ if (intf == NPC_MCAM_RX) {
+ if (!drop && dev->vlan_info.def_rx_mcam_idx) {
+ req->priority = NPC_MCAM_HIGHER_PRIO;
+ req->ref_entry = dev->vlan_info.def_rx_mcam_idx;
+ } else if (drop && dev->vlan_info.qinq_mcam_idx) {
+ req->priority = NPC_MCAM_LOWER_PRIO;
+ req->ref_entry = dev->vlan_info.qinq_mcam_idx;
+ } else {
+ req->priority = NPC_MCAM_ANY_PRIO;
+ req->ref_entry = 0;
+ }
+ } else {
+ req->priority = NPC_MCAM_ANY_PRIO;
+ req->ref_entry = 0;
+ }
+
+ req->intf = intf;
+ req->enable_entry = 1;
+ memcpy(&req->entry_data, entry, sizeof(struct mcam_entry));
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ return rsp->entry;
+}
+
+static void
+nix_vlan_update_mac(struct rte_eth_dev *eth_dev, int mcam_index,
+ int enable)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct vlan_mkex_info *mkex = &dev->vlan_info.mkex;
+ volatile uint8_t *key_data, *key_mask;
+ struct npc_mcam_read_entry_req *req;
+ struct npc_mcam_read_entry_rsp *rsp;
+ struct otx2_mbox *mbox = dev->mbox;
+ uint64_t mcam_data, mcam_mask;
+ struct mcam_entry entry;
+ uint8_t intf, mcam_ena;
+ int idx, rc = -EINVAL;
+ uint8_t *mac_addr;
+
+ memset(&entry, 0, sizeof(struct mcam_entry));
+
+ /* Read entry first */
+ req = otx2_mbox_alloc_msg_npc_mcam_read_entry(mbox);
+
+ req->entry = mcam_index;
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc) {
+ otx2_err("Failed to read entry %d", mcam_index);
+ return;
+ }
+
+ entry = rsp->entry_data;
+ intf = rsp->intf;
+ mcam_ena = rsp->enable;
+
+ /* Update mcam address */
+ key_data = (volatile uint8_t *)entry.kw;
+ key_mask = (volatile uint8_t *)entry.kw_mask;
+
+ if (enable) {
+ mcam_mask = 0;
+ otx2_mbox_memcpy(key_mask + mkex->la_xtract.key_off,
+ &mcam_mask, mkex->la_xtract.len + 1);
+
+ } else {
+ mcam_data = 0ULL;
+ mac_addr = dev->mac_addr;
+ for (idx = RTE_ETHER_ADDR_LEN - 1; idx >= 0; idx--)
+ mcam_data |= ((uint64_t)*mac_addr++) << (8 * idx);
+
+ mcam_mask = BIT_ULL(48) - 1;
+
+ otx2_mbox_memcpy(key_data + mkex->la_xtract.key_off,
+ &mcam_data, mkex->la_xtract.len + 1);
+ otx2_mbox_memcpy(key_mask + mkex->la_xtract.key_off,
+ &mcam_mask, mkex->la_xtract.len + 1);
+ }
+
+ /* Write back the mcam entry */
+ rc = nix_vlan_mcam_write(eth_dev, mcam_index,
+ &entry, intf, mcam_ena);
+ if (rc) {
+ otx2_err("Failed to write entry %d", mcam_index);
+ return;
+ }
+}
+
+void
+otx2_nix_vlan_update_promisc(struct rte_eth_dev *eth_dev, int enable)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_vlan_info *vlan = &dev->vlan_info;
+ struct vlan_entry *entry;
+
+ /* Already in required mode */
+ if (enable == vlan->promisc_on)
+ return;
+
+ /* Update default rx entry */
+ if (vlan->def_rx_mcam_idx)
+ nix_vlan_update_mac(eth_dev, vlan->def_rx_mcam_idx, enable);
+
+ /* Update all other rx filter entries */
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next)
+ nix_vlan_update_mac(eth_dev, entry->mcam_idx, enable);
+
+ vlan->promisc_on = enable;
+}
+
+/* Configure mcam entry with required MCAM search rules */
+static int
+nix_vlan_mcam_config(struct rte_eth_dev *eth_dev,
+ uint16_t vlan_id, uint16_t flags)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct vlan_mkex_info *mkex = &dev->vlan_info.mkex;
+ volatile uint8_t *key_data, *key_mask;
+ uint64_t mcam_data, mcam_mask;
+ struct mcam_entry entry;
+ uint8_t *mac_addr;
+ int idx, kwi = 0;
+
+ memset(&entry, 0, sizeof(struct mcam_entry));
+ key_data = (volatile uint8_t *)entry.kw;
+ key_mask = (volatile uint8_t *)entry.kw_mask;
+
+ /* Channel base extracted to KW0[11:0] */
+ entry.kw[kwi] = dev->rx_chan_base;
+ entry.kw_mask[kwi] = BIT_ULL(12) - 1;
+
+ /* Adds vlan_id & LB CTAG flag to MCAM KW */
+ if (flags & VLAN_ID_MATCH) {
+ entry.kw[kwi] |= (NPC_LT_LB_CTAG | NPC_LT_LB_STAG_QINQ)
+ << mkex->lb_lt_offset;
+ entry.kw_mask[kwi] |=
+ (0xF & ~(NPC_LT_LB_CTAG ^ NPC_LT_LB_STAG_QINQ))
+ << mkex->lb_lt_offset;
+
+ mcam_data = ((uint32_t)vlan_id << 16);
+ mcam_mask = (BIT_ULL(16) - 1) << 16;
+ otx2_mbox_memcpy(key_data + mkex->lb_xtract.key_off,
+ &mcam_data, mkex->lb_xtract.len + 1);
+ otx2_mbox_memcpy(key_mask + mkex->lb_xtract.key_off,
+ &mcam_mask, mkex->lb_xtract.len + 1);
+ }
+
+ /* Adds LB STAG flag to MCAM KW */
+ if (flags & QINQ_F_MATCH) {
+ entry.kw[kwi] |= NPC_LT_LB_STAG_QINQ << mkex->lb_lt_offset;
+ entry.kw_mask[kwi] |= 0xFULL << mkex->lb_lt_offset;
+ }
+
+ /* Adds LB CTAG & LB STAG flags to MCAM KW */
+ if (flags & VTAG_F_MATCH) {
+ entry.kw[kwi] |= (NPC_LT_LB_CTAG | NPC_LT_LB_STAG_QINQ)
+ << mkex->lb_lt_offset;
+ entry.kw_mask[kwi] |=
+ (0xF & ~(NPC_LT_LB_CTAG ^ NPC_LT_LB_STAG_QINQ))
+ << mkex->lb_lt_offset;
+ }
+
+ /* Adds port MAC address to MCAM KW */
+ if (flags & MAC_ADDR_MATCH) {
+ mcam_data = 0ULL;
+ mac_addr = dev->mac_addr;
+ for (idx = RTE_ETHER_ADDR_LEN - 1; idx >= 0; idx--)
+ mcam_data |= ((uint64_t)*mac_addr++) << (8 * idx);
+
+ mcam_mask = BIT_ULL(48) - 1;
+ otx2_mbox_memcpy(key_data + mkex->la_xtract.key_off,
+ &mcam_data, mkex->la_xtract.len + 1);
+ otx2_mbox_memcpy(key_mask + mkex->la_xtract.key_off,
+ &mcam_mask, mkex->la_xtract.len + 1);
+ }
+
+ /* VLAN_DROP: for drop action for all vlan packets when filter is on.
+ * For QinQ, enable vtag action for both outer & inner tags
+ */
+ if (flags & VLAN_DROP)
+ nix_set_rx_vlan_action(eth_dev, &entry, false, true);
+ else if (flags & QINQ_F_MATCH)
+ nix_set_rx_vlan_action(eth_dev, &entry, true, false);
+ else
+ nix_set_rx_vlan_action(eth_dev, &entry, false, false);
+
+ if (flags & DEF_F_ENTRY)
+ dev->vlan_info.def_rx_mcam_ent = entry;
+
+ return nix_vlan_mcam_alloc_and_write(eth_dev, &entry, NIX_INTF_RX,
+ flags & VLAN_DROP);
+}
+
+/* Installs/Removes/Modifies default rx entry */
+static int
+nix_vlan_handle_default_rx_entry(struct rte_eth_dev *eth_dev, bool strip,
+ bool filter, bool enable)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_vlan_info *vlan = &dev->vlan_info;
+ uint16_t flags = 0;
+ int mcam_idx, rc;
+
+ /* Use default mcam entry to either drop vlan traffic when
+ * vlan filter is on or strip vtag when strip is enabled.
+ * Allocate default entry which matches port mac address
+ * and vtag(ctag/stag) flags with drop action.
+ */
+ if (!vlan->def_rx_mcam_idx) {
+ if (!eth_dev->data->promiscuous)
+ flags = MAC_ADDR_MATCH;
+
+ if (filter && enable)
+ flags |= VTAG_F_MATCH | VLAN_DROP;
+ else if (strip && enable)
+ flags |= VTAG_F_MATCH;
+ else
+ return 0;
+
+ flags |= DEF_F_ENTRY;
+
+ mcam_idx = nix_vlan_mcam_config(eth_dev, 0, flags);
+ if (mcam_idx < 0) {
+ otx2_err("Failed to config vlan mcam");
+ return -mcam_idx;
+ }
+
+ vlan->def_rx_mcam_idx = mcam_idx;
+ return 0;
+ }
+
+ /* Filter is already enabled, so packets would be dropped anyways. No
+ * processing needed for enabling strip wrt mcam entry.
+ */
+
+ /* Filter disable request */
+ if (vlan->filter_on && filter && !enable) {
+ vlan->def_rx_mcam_ent.action &= ~((uint64_t)0xF);
+
+ /* Free default rx entry only when
+ * 1. strip is not on and
+ * 2. qinq entry is allocated before default entry.
+ */
+ if (vlan->strip_on ||
+ (vlan->qinq_on && !vlan->qinq_before_def)) {
+ if (eth_dev->data->dev_conf.rxmode.mq_mode ==
+ ETH_MQ_RX_RSS)
+ vlan->def_rx_mcam_ent.action |=
+ NIX_RX_ACTIONOP_RSS;
+ else
+ vlan->def_rx_mcam_ent.action |=
+ NIX_RX_ACTIONOP_UCAST;
+ return nix_vlan_mcam_write(eth_dev,
+ vlan->def_rx_mcam_idx,
+ &vlan->def_rx_mcam_ent,
+ NIX_INTF_RX, 1);
+ } else {
+ rc = nix_vlan_mcam_free(dev, vlan->def_rx_mcam_idx);
+ if (rc)
+ return rc;
+ vlan->def_rx_mcam_idx = 0;
+ }
+ }
+
+ /* Filter enable request */
+ if (!vlan->filter_on && filter && enable) {
+ vlan->def_rx_mcam_ent.action &= ~((uint64_t)0xF);
+ vlan->def_rx_mcam_ent.action |= NIX_RX_ACTIONOP_DROP;
+ return nix_vlan_mcam_write(eth_dev, vlan->def_rx_mcam_idx,
+ &vlan->def_rx_mcam_ent, NIX_INTF_RX, 1);
+ }
+
+ /* Strip disable request */
+ if (vlan->strip_on && strip && !enable) {
+ if (!vlan->filter_on &&
+ !(vlan->qinq_on && !vlan->qinq_before_def)) {
+ rc = nix_vlan_mcam_free(dev, vlan->def_rx_mcam_idx);
+ if (rc)
+ return rc;
+ vlan->def_rx_mcam_idx = 0;
+ }
+ }
+
+ return 0;
+}
+
+/* Installs/Removes default tx entry */
+static int
+nix_vlan_handle_default_tx_entry(struct rte_eth_dev *eth_dev,
+ enum rte_vlan_type type, int vtag_index,
+ int enable)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_vlan_info *vlan = &dev->vlan_info;
+ struct mcam_entry entry;
+ uint16_t pf_func;
+ int rc;
+
+ if (!vlan->def_tx_mcam_idx && enable) {
+ memset(&entry, 0, sizeof(struct mcam_entry));
+
+ /* Only pf_func is matched, swap it's bytes */
+ pf_func = (dev->pf_func & 0xff) << 8;
+ pf_func |= (dev->pf_func >> 8) & 0xff;
+
+ /* PF Func extracted to KW1[47:32] */
+ entry.kw[0] = (uint64_t)pf_func << 32;
+ entry.kw_mask[0] = (BIT_ULL(16) - 1) << 32;
+
+ nix_set_tx_vlan_action(&entry, type, vtag_index);
+ vlan->def_tx_mcam_ent = entry;
+
+ return nix_vlan_mcam_alloc_and_write(eth_dev, &entry,
+ NIX_INTF_TX, 0);
+ }
+
+ if (vlan->def_tx_mcam_idx && !enable) {
+ rc = nix_vlan_mcam_free(dev, vlan->def_tx_mcam_idx);
+ if (rc)
+ return rc;
+ vlan->def_rx_mcam_idx = 0;
+ }
+
+ return 0;
+}
+
+/* Configure vlan stripping on or off */
+static int
+nix_vlan_hw_strip(struct rte_eth_dev *eth_dev, const uint8_t enable)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_vtag_config *vtag_cfg;
+ int rc = -EINVAL;
+
+ rc = nix_vlan_handle_default_rx_entry(eth_dev, true, false, enable);
+ if (rc) {
+ otx2_err("Failed to config default rx entry");
+ return rc;
+ }
+
+ vtag_cfg = otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
+ /* cfg_type = 1 for rx vlan cfg */
+ vtag_cfg->cfg_type = VTAG_RX;
+
+ if (enable)
+ vtag_cfg->rx.strip_vtag = 1;
+ else
+ vtag_cfg->rx.strip_vtag = 0;
+
+ /* Always capture */
+ vtag_cfg->rx.capture_vtag = 1;
+ vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
+ /* Use rx vtag type index[0] for now */
+ vtag_cfg->rx.vtag_type = 0;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ dev->vlan_info.strip_on = enable;
+ return rc;
+}
+
+/* Configure vlan filtering on or off for all vlans if vlan_id == 0 */
+static int
+nix_vlan_hw_filter(struct rte_eth_dev *eth_dev, const uint8_t enable,
+ uint16_t vlan_id)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_vlan_info *vlan = &dev->vlan_info;
+ struct vlan_entry *entry;
+ int rc = -EINVAL;
+
+ if (!vlan_id && enable) {
+ rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true,
+ enable);
+ if (rc) {
+ otx2_err("Failed to config vlan mcam");
+ return rc;
+ }
+ dev->vlan_info.filter_on = enable;
+ return 0;
+ }
+
+ /* Enable/disable existing vlan filter entries */
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ if (vlan_id) {
+ if (entry->vlan_id == vlan_id) {
+ rc = nix_vlan_mcam_enb_dis(dev,
+ entry->mcam_idx,
+ enable);
+ if (rc)
+ return rc;
+ }
+ } else {
+ rc = nix_vlan_mcam_enb_dis(dev, entry->mcam_idx,
+ enable);
+ if (rc)
+ return rc;
+ }
+ }
+
+ if (!vlan_id && !enable) {
+ rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true,
+ enable);
+ if (rc) {
+ otx2_err("Failed to config vlan mcam");
+ return rc;
+ }
+ dev->vlan_info.filter_on = enable;
+ return 0;
+ }
+
+ return 0;
+}
+
+/* Enable/disable vlan filtering for the given vlan_id */
+int
+otx2_nix_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id,
+ int on)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_vlan_info *vlan = &dev->vlan_info;
+ struct vlan_entry *entry;
+ int entry_exists = 0;
+ int rc = -EINVAL;
+ int mcam_idx;
+
+ if (!vlan_id) {
+ otx2_err("Vlan Id can't be zero");
+ return rc;
+ }
+
+ if (!vlan->def_rx_mcam_idx) {
+ otx2_err("Vlan Filtering is disabled, enable it first");
+ return rc;
+ }
+
+ if (on) {
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ if (entry->vlan_id == vlan_id) {
+ /* Vlan entry already exists */
+ entry_exists = 1;
+ /* Mcam entry already allocated */
+ if (entry->mcam_idx) {
+ rc = nix_vlan_hw_filter(eth_dev, on,
+ vlan_id);
+ return rc;
+ }
+ break;
+ }
+ }
+
+ if (!entry_exists) {
+ entry = rte_zmalloc("otx2_nix_vlan_entry",
+ sizeof(struct vlan_entry), 0);
+ if (!entry) {
+ otx2_err("Failed to allocate memory");
+ return -ENOMEM;
+ }
+ }
+
+ /* Enables vlan_id & mac address based filtering */
+ if (eth_dev->data->promiscuous)
+ mcam_idx = nix_vlan_mcam_config(eth_dev, vlan_id,
+ VLAN_ID_MATCH);
+ else
+ mcam_idx = nix_vlan_mcam_config(eth_dev, vlan_id,
+ VLAN_ID_MATCH |
+ MAC_ADDR_MATCH);
+ if (mcam_idx < 0) {
+ otx2_err("Failed to config vlan mcam");
+ TAILQ_REMOVE(&vlan->fltr_tbl, entry, next);
+ rte_free(entry);
+ return mcam_idx;
+ }
+
+ entry->mcam_idx = mcam_idx;
+ if (!entry_exists) {
+ entry->vlan_id = vlan_id;
+ TAILQ_INSERT_HEAD(&vlan->fltr_tbl, entry, next);
+ }
+ } else {
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ if (entry->vlan_id == vlan_id) {
+ rc = nix_vlan_mcam_free(dev, entry->mcam_idx);
+ if (rc)
+ return rc;
+ TAILQ_REMOVE(&vlan->fltr_tbl, entry, next);
+ rte_free(entry);
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+/* Configure double vlan(qinq) on or off */
+static int
+otx2_nix_config_double_vlan(struct rte_eth_dev *eth_dev,
+ const uint8_t enable)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_vlan_info *vlan_info;
+ int mcam_idx;
+ int rc;
+
+ vlan_info = &dev->vlan_info;
+
+ if (!enable) {
+ if (!vlan_info->qinq_mcam_idx)
+ return 0;
+
+ rc = nix_vlan_mcam_free(dev, vlan_info->qinq_mcam_idx);
+ if (rc)
+ return rc;
+
+ vlan_info->qinq_mcam_idx = 0;
+ dev->vlan_info.qinq_on = 0;
+ vlan_info->qinq_before_def = 0;
+ return 0;
+ }
+
+ if (eth_dev->data->promiscuous)
+ mcam_idx = nix_vlan_mcam_config(eth_dev, 0, QINQ_F_MATCH);
+ else
+ mcam_idx = nix_vlan_mcam_config(eth_dev, 0,
+ QINQ_F_MATCH | MAC_ADDR_MATCH);
+ if (mcam_idx < 0)
+ return mcam_idx;
+
+ if (!vlan_info->def_rx_mcam_idx)
+ vlan_info->qinq_before_def = 1;
+
+ vlan_info->qinq_mcam_idx = mcam_idx;
+ dev->vlan_info.qinq_on = 1;
+ return 0;
+}
+
+int
+otx2_nix_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t offloads = dev->rx_offloads;
+ struct rte_eth_rxmode *rxmode;
+ int rc = 0;
+
+ rxmode = &eth_dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ otx2_err("Extend offload not supported");
+ return -ENOTSUP;
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ rc = nix_vlan_hw_strip(eth_dev, true);
+ } else {
+ offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ rc = nix_vlan_hw_strip(eth_dev, false);
+ }
+ if (rc)
+ goto done;
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
+ offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ rc = nix_vlan_hw_filter(eth_dev, true, 0);
+ } else {
+ offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ rc = nix_vlan_hw_filter(eth_dev, false, 0);
+ }
+ if (rc)
+ goto done;
+ }
+
+ if (rxmode->offloads & DEV_RX_OFFLOAD_QINQ_STRIP) {
+ if (!dev->vlan_info.qinq_on) {
+ offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ rc = otx2_nix_config_double_vlan(eth_dev, true);
+ if (rc)
+ goto done;
+ }
+ } else {
+ if (dev->vlan_info.qinq_on) {
+ offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+ rc = otx2_nix_config_double_vlan(eth_dev, false);
+ if (rc)
+ goto done;
+ }
+ }
+
+ if (offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP)) {
+ dev->rx_offloads |= offloads;
+ dev->rx_offload_flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
+ otx2_eth_set_rx_function(eth_dev);
+ }
+
+done:
+ return rc;
+}
+
+int
+otx2_nix_vlan_tpid_set(struct rte_eth_dev *eth_dev,
+ enum rte_vlan_type type, uint16_t tpid)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct nix_set_vlan_tpid *tpid_cfg;
+ struct otx2_mbox *mbox = dev->mbox;
+ int rc;
+
+ tpid_cfg = otx2_mbox_alloc_msg_nix_set_vlan_tpid(mbox);
+
+ tpid_cfg->tpid = tpid;
+ if (type == ETH_VLAN_TYPE_OUTER)
+ tpid_cfg->vlan_type = NIX_VLAN_TYPE_OUTER;
+ else
+ tpid_cfg->vlan_type = NIX_VLAN_TYPE_INNER;
+
+ rc = otx2_mbox_process(mbox);
+ if (rc)
+ return rc;
+
+ if (type == ETH_VLAN_TYPE_OUTER)
+ dev->vlan_info.outer_vlan_tpid = tpid;
+ else
+ dev->vlan_info.inner_vlan_tpid = tpid;
+ return 0;
+}
+
+int
+otx2_nix_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct otx2_eth_dev *otx2_dev = otx2_eth_pmd_priv(dev);
+ struct otx2_mbox *mbox = otx2_dev->mbox;
+ struct nix_vtag_config *vtag_cfg;
+ struct nix_vtag_config_rsp *rsp;
+ struct otx2_vlan_info *vlan;
+ int rc, rc1, vtag_index = 0;
+
+ if (vlan_id == 0) {
+ otx2_err("vlan id can't be zero");
+ return -EINVAL;
+ }
+
+ vlan = &otx2_dev->vlan_info;
+
+ if (on && vlan->pvid_insert_on && vlan->pvid == vlan_id) {
+ otx2_err("pvid %d is already enabled", vlan_id);
+ return -EINVAL;
+ }
+
+ if (on && vlan->pvid_insert_on && vlan->pvid != vlan_id) {
+ otx2_err("another pvid is enabled, disable that first");
+ return -EINVAL;
+ }
+
+ /* No pvid active */
+ if (!on && !vlan->pvid_insert_on)
+ return 0;
+
+ /* Given pvid already disabled */
+ if (!on && vlan->pvid != vlan_id)
+ return 0;
+
+ vtag_cfg = otx2_mbox_alloc_msg_nix_vtag_cfg(mbox);
+
+ if (on) {
+ vtag_cfg->cfg_type = VTAG_TX;
+ vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
+
+ if (vlan->outer_vlan_tpid)
+ vtag_cfg->tx.vtag0 = ((uint32_t)vlan->outer_vlan_tpid
+ << 16) | vlan_id;
+ else
+ vtag_cfg->tx.vtag0 =
+ ((RTE_ETHER_TYPE_VLAN << 16) | vlan_id);
+ vtag_cfg->tx.cfg_vtag0 = 1;
+ } else {
+ vtag_cfg->cfg_type = VTAG_TX;
+ vtag_cfg->vtag_size = NIX_VTAGSIZE_T4;
+
+ vtag_cfg->tx.vtag0_idx = vlan->outer_vlan_idx;
+ vtag_cfg->tx.free_vtag0 = 1;
+ }
+
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ if (on) {
+ vtag_index = rsp->vtag0_idx;
+ } else {
+ vlan->pvid = 0;
+ vlan->pvid_insert_on = 0;
+ vlan->outer_vlan_idx = 0;
+ }
+
+ rc = nix_vlan_handle_default_tx_entry(dev, ETH_VLAN_TYPE_OUTER,
+ vtag_index, on);
+ if (rc < 0) {
+ printf("Default tx entry failed with rc %d\n", rc);
+ vtag_cfg->tx.vtag0_idx = vtag_index;
+ vtag_cfg->tx.free_vtag0 = 1;
+ vtag_cfg->tx.cfg_vtag0 = 0;
+
+ rc1 = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc1)
+ otx2_err("Vtag free failed");
+
+ return rc;
+ }
+
+ if (on) {
+ vlan->pvid = vlan_id;
+ vlan->pvid_insert_on = 1;
+ vlan->outer_vlan_idx = vtag_index;
+ }
+
+ return 0;
+}
+
+void otx2_nix_vlan_strip_queue_set(__rte_unused struct rte_eth_dev *dev,
+ __rte_unused uint16_t queue,
+ __rte_unused int on)
+{
+ otx2_err("Not Supported");
+}
+
+static int
+nix_vlan_rx_mkex_offset(uint64_t mask)
+{
+ int nib_count = 0;
+
+ while (mask) {
+ nib_count += mask & 1;
+ mask >>= 1;
+ }
+
+ return nib_count * 4;
+}
+
+static int
+nix_vlan_get_mkex_info(struct otx2_eth_dev *dev)
+{
+ struct vlan_mkex_info *mkex = &dev->vlan_info.mkex;
+ struct otx2_npc_flow_info *npc = &dev->npc_flow;
+ struct npc_xtract_info *x_info = NULL;
+ uint64_t rx_keyx;
+ otx2_dxcfg_t *p;
+ int rc = -EINVAL;
+
+ if (npc == NULL) {
+ otx2_err("Missing npc mkex configuration");
+ return rc;
+ }
+
+#define NPC_KEX_CHAN_NIBBLE_ENA 0x7ULL
+#define NPC_KEX_LB_LTYPE_NIBBLE_ENA 0x1000ULL
+#define NPC_KEX_LB_LTYPE_NIBBLE_MASK 0xFFFULL
+
+ rx_keyx = npc->keyx_supp_nmask[NPC_MCAM_RX];
+ if ((rx_keyx & NPC_KEX_CHAN_NIBBLE_ENA) != NPC_KEX_CHAN_NIBBLE_ENA)
+ return rc;
+
+ if ((rx_keyx & NPC_KEX_LB_LTYPE_NIBBLE_ENA) !=
+ NPC_KEX_LB_LTYPE_NIBBLE_ENA)
+ return rc;
+
+ mkex->lb_lt_offset =
+ nix_vlan_rx_mkex_offset(rx_keyx & NPC_KEX_LB_LTYPE_NIBBLE_MASK);
+
+ p = &npc->prx_dxcfg;
+ x_info = &(*p)[NPC_MCAM_RX][NPC_LID_LA][NPC_LT_LA_ETHER].xtract[0];
+ memcpy(&mkex->la_xtract, x_info, sizeof(struct npc_xtract_info));
+ x_info = &(*p)[NPC_MCAM_RX][NPC_LID_LB][NPC_LT_LB_CTAG].xtract[0];
+ memcpy(&mkex->lb_xtract, x_info, sizeof(struct npc_xtract_info));
+
+ return 0;
+}
+
+static void nix_vlan_reinstall_vlan_filters(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct vlan_entry *entry;
+ int rc;
+
+ /* VLAN filters can't be set without setting filtern on */
+ rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true, true);
+ if (rc) {
+ otx2_err("Failed to reinstall vlan filters");
+ return;
+ }
+
+ TAILQ_FOREACH(entry, &dev->vlan_info.fltr_tbl, next) {
+ rc = otx2_nix_vlan_filter_set(eth_dev, entry->vlan_id, true);
+ if (rc)
+ otx2_err("Failed to reinstall filter for vlan:%d",
+ entry->vlan_id);
+ }
+}
+
+int
+otx2_nix_vlan_offload_init(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc, mask;
+
+ /* Port initialized for first time or restarted */
+ if (!dev->configured) {
+ rc = nix_vlan_get_mkex_info(dev);
+ if (rc) {
+ otx2_err("Failed to get vlan mkex info rc=%d", rc);
+ return rc;
+ }
+
+ TAILQ_INIT(&dev->vlan_info.fltr_tbl);
+ } else {
+ /* Reinstall all mcam entries now if filter offload is set */
+ if (eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
+ nix_vlan_reinstall_vlan_filters(eth_dev);
+ }
+
+ mask =
+ ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK;
+ rc = otx2_nix_vlan_offload_set(eth_dev, mask);
+ if (rc) {
+ otx2_err("Failed to set vlan offload rc=%d", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+int
+otx2_nix_vlan_fini(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_vlan_info *vlan = &dev->vlan_info;
+ struct vlan_entry *entry;
+ int rc;
+
+ TAILQ_FOREACH(entry, &vlan->fltr_tbl, next) {
+ if (!dev->configured) {
+ TAILQ_REMOVE(&vlan->fltr_tbl, entry, next);
+ rte_free(entry);
+ } else {
+ /* MCAM entries freed by flow_fini & lf_free on
+ * port stop.
+ */
+ entry->mcam_idx = 0;
+ }
+ }
+
+ if (!dev->configured) {
+ if (vlan->def_rx_mcam_idx) {
+ rc = nix_vlan_mcam_free(dev, vlan->def_rx_mcam_idx);
+ if (rc)
+ return rc;
+ }
+ }
+
+ otx2_nix_config_double_vlan(eth_dev, false);
+ vlan->def_rx_mcam_idx = 0;
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/octeontx2/rte_pmd_octeontx2_version.map b/src/spdk/dpdk/drivers/net/octeontx2/rte_pmd_octeontx2_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/octeontx2/rte_pmd_octeontx2_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};