summaryrefslogtreecommitdiffstats
path: root/src/spdk/dpdk/drivers/net/cxgbe
diff options
context:
space:
mode:
Diffstat (limited to 'src/spdk/dpdk/drivers/net/cxgbe')
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/Makefile57
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h835
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/common.h554
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h60
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c5701
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h144
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h625
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h186
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h966
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h155
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h47
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h2452
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c880
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h15
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c193
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h31
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h111
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h260
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c1259
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c1433
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h276
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c1458
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h44
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c2238
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h89
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h55
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c217
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c302
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/l2t.c229
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/l2t.h58
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/meson.build17
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.c241
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.h52
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map3
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/sge.c2658
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/smt.c230
-rw-r--r--src/spdk/dpdk/drivers/net/cxgbe/smt.h44
37 files changed, 24175 insertions, 0 deletions
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/Makefile b/src/spdk/dpdk/drivers/net/cxgbe/Makefile
new file mode 100644
index 000000000..53b2bb56d
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/Makefile
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2014-2018 Chelsio Communications.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_cxgbe.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_cxgbe_version.map
+
+#
+# CFLAGS for gcc/clang
+#
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
+CFLAGS += -Wno-deprecated
+endif
+endif
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs
+LDLIBS += -lrte_bus_pci
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings in them
+#
+BASE_DRIVER_OBJS=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
+
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbevf_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += clip_tbl.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += mps_tcam.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += l2t.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += smt.c
+SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4vf_hw.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h b/src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h
new file mode 100644
index 000000000..62de35c7c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/adapter.h
@@ -0,0 +1,835 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+/* This file should not be included directly. Include common.h instead. */
+
+#ifndef __T4_ADAPTER_H__
+#define __T4_ADAPTER_H__
+
+#include <rte_bus_pci.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+#include <rte_rwlock.h>
+#include <rte_ethdev.h>
+
+#include "../cxgbe_compat.h"
+#include "../cxgbe_ofld.h"
+#include "t4_regs_values.h"
+
+enum {
+ MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */
+ MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
+};
+
+struct adapter;
+struct sge_rspq;
+
+enum {
+ PORT_RSS_DONE = (1 << 0),
+};
+
+struct port_info {
+ struct adapter *adapter; /* adapter that this port belongs to */
+ struct rte_eth_dev *eth_dev; /* associated rte eth device */
+ struct port_stats stats_base; /* port statistics base */
+ struct link_config link_cfg; /* link configuration info */
+
+ unsigned long flags; /* port related flags */
+ short int xact_addr_filt; /* index of exact MAC address filter */
+
+ u16 viid; /* associated virtual interface id */
+ s8 mdio_addr; /* address of the PHY */
+ u8 port_type; /* firmware port type */
+ u8 mod_type; /* firmware module type */
+ u8 port_id; /* physical port ID */
+ u8 pidx; /* port index for this PF */
+ u8 tx_chan; /* associated channel */
+
+ u8 n_rx_qsets; /* # of rx qsets */
+ u8 n_tx_qsets; /* # of tx qsets */
+ u8 first_qset; /* index of first qset */
+
+ u16 *rss; /* rss table */
+ u8 rss_mode; /* rss mode */
+ u16 rss_size; /* size of VI's RSS table slice */
+ u64 rss_hf; /* RSS Hash Function */
+
+ /* viid fields either returned by fw
+ * or decoded by parsing viid by driver.
+ */
+ u8 vin;
+ u8 vivld;
+};
+
+/* Enable or disable autonegotiation. If this is set to enable,
+ * the forced link modes above are completely ignored.
+ */
+#define AUTONEG_DISABLE 0x00
+#define AUTONEG_ENABLE 0x01
+
+enum { /* adapter flags */
+ FULL_INIT_DONE = (1 << 0),
+ USING_MSI = (1 << 1),
+ USING_MSIX = (1 << 2),
+ FW_QUEUE_BOUND = (1 << 3),
+ FW_OK = (1 << 4),
+ CFG_QUEUES = (1 << 5),
+ MASTER_PF = (1 << 6),
+};
+
+struct rx_sw_desc { /* SW state per Rx descriptor */
+ void *buf; /* struct page or mbuf */
+ dma_addr_t dma_addr;
+};
+
+struct sge_fl { /* SGE free-buffer queue state */
+ /* RO fields */
+ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
+
+ dma_addr_t addr; /* bus address of HW ring start */
+ __be64 *desc; /* address of HW Rx descriptor ring */
+
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
+
+ unsigned int cntxt_id; /* SGE relative QID for the free list */
+ unsigned int size; /* capacity of free list */
+
+ unsigned int avail; /* # of available Rx buffers */
+ unsigned int pend_cred; /* new buffers since last FL DB ring */
+ unsigned int cidx; /* consumer index */
+ unsigned int pidx; /* producer index */
+
+ unsigned long alloc_failed; /* # of times buffer allocation failed */
+ unsigned long low; /* # of times momentarily starving */
+};
+
+#define MAX_MBUF_FRAGS (16384 / 512 + 2)
+
+/* A packet gather list */
+struct pkt_gl {
+ union {
+ struct rte_mbuf *mbufs[MAX_MBUF_FRAGS];
+ } /* UNNAMED */;
+ void *va; /* virtual address of first byte */
+ unsigned int nfrags; /* # of fragments */
+ unsigned int tot_len; /* total length of fragments */
+ bool usembufs; /* use mbufs for fragments */
+};
+
+typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *gl);
+
+struct sge_rspq { /* state for an SGE response queue */
+ struct adapter *adapter; /* adapter that this queue belongs to */
+ struct rte_eth_dev *eth_dev; /* associated rte eth device */
+ struct rte_mempool *mb_pool; /* associated mempool */
+
+ dma_addr_t phys_addr; /* physical address of the ring */
+ __be64 *desc; /* address of HW response ring */
+ const __be64 *cur_desc; /* current descriptor in queue */
+
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
+ struct sge_qstat *stat;
+
+ unsigned int cidx; /* consumer index */
+ unsigned int gts_idx; /* last gts write sent */
+ unsigned int iqe_len; /* entry size */
+ unsigned int size; /* capacity of response queue */
+ int offset; /* offset into current Rx buffer */
+
+ u8 gen; /* current generation bit */
+ u8 intr_params; /* interrupt holdoff parameters */
+ u8 next_intr_params; /* holdoff params for next interrupt */
+ u8 pktcnt_idx; /* interrupt packet threshold */
+ u8 port_id; /* associated port-id */
+ u8 idx; /* queue index within its group */
+ u16 cntxt_id; /* SGE relative QID for the response Q */
+ u16 abs_id; /* absolute SGE id for the response q */
+
+ rspq_handler_t handler; /* associated handler for this response q */
+};
+
+struct sge_eth_rx_stats { /* Ethernet rx queue statistics */
+ u64 pkts; /* # of ethernet packets */
+ u64 rx_bytes; /* # of ethernet bytes */
+ u64 rx_cso; /* # of Rx checksum offloads */
+ u64 vlan_ex; /* # of Rx VLAN extractions */
+ u64 rx_drops; /* # of packets dropped due to no mem */
+};
+
+struct sge_eth_rxq { /* a SW Ethernet Rx queue */
+ struct sge_rspq rspq;
+ struct sge_fl fl;
+ struct sge_eth_rx_stats stats;
+ bool usembufs; /* one ingress packet per mbuf FL buffer */
+} __rte_cache_aligned;
+
+/*
+ * Currently there are two types of coalesce WR. Type 0 needs 48 bytes per
+ * packet (if one sgl is present) and type 1 needs 32 bytes. This means
+ * that type 0 can fit a maximum of 10 packets per WR and type 1 can fit
+ * 15 packets. We need to keep track of the mbuf pointers in a coalesce WR
+ * to be able to free those mbufs when we get completions back from the FW.
+ * Allocating the maximum number of pointers in every tx desc is a waste
+ * of memory resources so we only store 2 pointers per tx desc which should
+ * be enough since a tx desc can only fit 2 packets in the best case
+ * scenario where a packet needs 32 bytes.
+ */
+#define ETH_COALESCE_PKT_NUM 15
+#define ETH_COALESCE_VF_PKT_NUM 7
+#define ETH_COALESCE_PKT_PER_DESC 2
+
+struct tx_eth_coal_desc {
+ struct rte_mbuf *mbuf[ETH_COALESCE_PKT_PER_DESC];
+ struct ulptx_sgl *sgl[ETH_COALESCE_PKT_PER_DESC];
+ int idx;
+};
+
+struct tx_desc {
+ __be64 flit[8];
+};
+
+struct tx_sw_desc { /* SW state per Tx descriptor */
+ struct rte_mbuf *mbuf;
+ struct ulptx_sgl *sgl;
+ struct tx_eth_coal_desc coalesce;
+};
+
+enum {
+ EQ_STOPPED = (1 << 0),
+};
+
+struct eth_coalesce {
+ unsigned char *ptr;
+ unsigned char type;
+ unsigned int idx;
+ unsigned int len;
+ unsigned int flits;
+ unsigned int max;
+ __u8 ethmacdst[ETHER_ADDR_LEN];
+ __u8 ethmacsrc[ETHER_ADDR_LEN];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+struct sge_txq {
+ struct tx_desc *desc; /* address of HW Tx descriptor ring */
+ struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
+ struct sge_qstat *stat; /* queue status entry */
+ struct eth_coalesce coalesce; /* coalesce info */
+
+ uint64_t phys_addr; /* physical address of the ring */
+
+ void __iomem *bar2_addr; /* address of BAR2 Queue registers */
+ unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
+
+ unsigned int cntxt_id; /* SGE relative QID for the Tx Q */
+ unsigned int in_use; /* # of in-use Tx descriptors */
+ unsigned int size; /* # of descriptors */
+ unsigned int cidx; /* SW consumer index */
+ unsigned int pidx; /* producer index */
+ unsigned int dbidx; /* last idx when db ring was done */
+ unsigned int equeidx; /* last sent credit request */
+ unsigned int last_pidx; /* last pidx recorded by tx monitor */
+ unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */
+ unsigned int abs_id;
+
+ int db_disabled; /* doorbell state */
+ unsigned short db_pidx; /* doorbell producer index */
+ unsigned short db_pidx_inc; /* doorbell producer increment */
+};
+
+struct sge_eth_tx_stats { /* Ethernet tx queue statistics */
+ u64 pkts; /* # of ethernet packets */
+ u64 tx_bytes; /* # of ethernet bytes */
+ u64 tso; /* # of TSO requests */
+ u64 tx_cso; /* # of Tx checksum offloads */
+ u64 vlan_ins; /* # of Tx VLAN insertions */
+ u64 mapping_err; /* # of I/O MMU packet mapping errors */
+ u64 coal_wr; /* # of coalesced wr */
+ u64 coal_pkts; /* # of coalesced packets */
+};
+
+struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
+ struct sge_txq q;
+ struct rte_eth_dev *eth_dev; /* port that this queue belongs to */
+ struct rte_eth_dev_data *data;
+ struct sge_eth_tx_stats stats; /* queue statistics */
+ rte_spinlock_t txq_lock;
+
+ unsigned int flags; /* flags for state of the queue */
+} __rte_cache_aligned;
+
+struct sge_ctrl_txq { /* State for an SGE control Tx queue */
+ struct sge_txq q; /* txq */
+ struct adapter *adapter; /* adapter associated with this queue */
+ rte_spinlock_t ctrlq_lock; /* control queue lock */
+ u8 full; /* the Tx ring is full */
+ u64 txp; /* number of transmits */
+ struct rte_mempool *mb_pool; /* mempool to generate ctrl pkts */
+} __rte_cache_aligned;
+
+struct sge {
+ struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+ struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
+ struct sge_rspq fw_evtq __rte_cache_aligned;
+ struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
+
+ u16 max_ethqsets; /* # of available Ethernet queue sets */
+ u32 stat_len; /* length of status page at ring end */
+ u32 pktshift; /* padding between CPL & packet data */
+
+ /* response queue interrupt parameters */
+ u16 timer_val[SGE_NTIMERS];
+ u8 counter_val[SGE_NCOUNTERS];
+
+ u32 fl_align; /* response queue message alignment */
+ u32 fl_pg_order; /* large page allocation size */
+ u32 fl_starve_thres; /* Free List starvation threshold */
+};
+
+#define T4_OS_NEEDS_MBOX_LOCKING 1
+
+/*
+ * OS Lock/List primitives for those interfaces in the Common Code which
+ * need this.
+ */
+
+struct mbox_entry {
+ TAILQ_ENTRY(mbox_entry) next;
+};
+
+TAILQ_HEAD(mbox_list, mbox_entry);
+
+struct adapter_devargs {
+ bool keep_ovlan;
+ bool force_link_up;
+ bool tx_mode_latency;
+ u32 filtermode;
+ u32 filtermask;
+};
+
+struct adapter {
+ struct rte_pci_device *pdev; /* associated rte pci device */
+ struct rte_eth_dev *eth_dev; /* first port's rte eth device */
+ struct adapter_params params; /* adapter parameters */
+ struct port_info *port[MAX_NPORTS];/* ports belonging to this adapter */
+ struct sge sge; /* associated SGE */
+
+ /* support for single-threading access to adapter mailbox registers */
+ struct mbox_list mbox_list;
+ rte_spinlock_t mbox_lock;
+
+ u8 *regs; /* pointer to registers region */
+ u8 *bar2; /* pointer to bar2 region */
+ unsigned long flags; /* adapter flags */
+ unsigned int mbox; /* associated mailbox */
+ unsigned int pf; /* associated physical function id */
+
+ unsigned int vpd_busy;
+ unsigned int vpd_flag;
+
+ int use_unpacked_mode; /* unpacked rx mode state */
+ rte_spinlock_t win0_lock;
+
+ rte_spinlock_t flow_lock; /* Serialize access for rte_flow ops */
+
+ unsigned int clipt_start; /* CLIP table start */
+ unsigned int clipt_end; /* CLIP table end */
+ unsigned int l2t_start; /* Layer 2 table start */
+ unsigned int l2t_end; /* Layer 2 table end */
+ struct clip_tbl *clipt; /* CLIP table */
+ struct l2t_data *l2t; /* Layer 2 table */
+ struct smt_data *smt; /* Source mac table */
+ struct mpstcam_table *mpstcam;
+
+ struct tid_info tids; /* Info used to access TID related tables */
+
+ struct adapter_devargs devargs;
+};
+
+/**
+ * t4_os_rwlock_init - initialize rwlock
+ * @lock: the rwlock
+ */
+static inline void t4_os_rwlock_init(rte_rwlock_t *lock)
+{
+ rte_rwlock_init(lock);
+}
+
+/**
+ * t4_os_write_lock - get a write lock
+ * @lock: the rwlock
+ */
+static inline void t4_os_write_lock(rte_rwlock_t *lock)
+{
+ rte_rwlock_write_lock(lock);
+}
+
+/**
+ * t4_os_write_unlock - unlock a write lock
+ * @lock: the rwlock
+ */
+static inline void t4_os_write_unlock(rte_rwlock_t *lock)
+{
+ rte_rwlock_write_unlock(lock);
+}
+
+/**
+ * ethdev2pinfo - return the port_info structure associated with a rte_eth_dev
+ * @dev: the rte_eth_dev
+ *
+ * Return the struct port_info associated with a rte_eth_dev
+ */
+static inline struct port_info *ethdev2pinfo(const struct rte_eth_dev *dev)
+{
+ return dev->data->dev_private;
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Return the port_info structure for the port of the given index.
+ */
+static inline struct port_info *adap2pinfo(const struct adapter *adap, int idx)
+{
+ return adap->port[idx];
+}
+
+/**
+ * ethdev2adap - return the adapter structure associated with a rte_eth_dev
+ * @dev: the rte_eth_dev
+ *
+ * Return the struct adapter associated with a rte_eth_dev
+ */
+static inline struct adapter *ethdev2adap(const struct rte_eth_dev *dev)
+{
+ return ethdev2pinfo(dev)->adapter;
+}
+
+#define CXGBE_PCI_REG(reg) rte_read32(reg)
+
+static inline uint64_t cxgbe_read_addr64(volatile void *addr)
+{
+ uint64_t val = CXGBE_PCI_REG(addr);
+ uint64_t val2 = CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4));
+
+ val2 = (uint64_t)(val2 << 32);
+ val += val2;
+ return val;
+}
+
+static inline uint32_t cxgbe_read_addr(volatile void *addr)
+{
+ return CXGBE_PCI_REG(addr);
+}
+
+#define CXGBE_PCI_REG_ADDR(adap, reg) \
+ ((volatile uint32_t *)((char *)(adap)->regs + (reg)))
+
+#define CXGBE_READ_REG(adap, reg) \
+ cxgbe_read_addr(CXGBE_PCI_REG_ADDR((adap), (reg)))
+
+#define CXGBE_READ_REG64(adap, reg) \
+ cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)))
+
+#define CXGBE_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
+
+#define CXGBE_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((value), (reg))
+
+#define CXGBE_WRITE_REG(adap, reg, value) \
+ CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+
+#define CXGBE_WRITE_REG_RELAXED(adap, reg, value) \
+ CXGBE_PCI_REG_WRITE_RELAXED(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+
+static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val)
+{
+ CXGBE_PCI_REG_WRITE(addr, val);
+ CXGBE_PCI_REG_WRITE(((volatile uint8_t *)(addr) + 4), (val >> 32));
+ return val;
+}
+
+#define CXGBE_WRITE_REG64(adap, reg, value) \
+ cxgbe_write_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+
+/**
+ * t4_read_reg - read a HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 32-bit value of the given HW register.
+ */
+static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
+{
+ return CXGBE_READ_REG(adapter, reg_addr);
+}
+
+/**
+ * t4_write_reg - write a HW register with barrier
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
+{
+ CXGBE_WRITE_REG(adapter, reg_addr, val);
+}
+
+/**
+ * t4_write_reg_relaxed - write a HW register with no barrier
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t4_write_reg_relaxed(struct adapter *adapter, u32 reg_addr,
+ u32 val)
+{
+ CXGBE_WRITE_REG_RELAXED(adapter, reg_addr, val);
+}
+
+/**
+ * t4_read_reg64 - read a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ *
+ * Returns the 64-bit value of the given HW register.
+ */
+static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
+{
+ return CXGBE_READ_REG64(adapter, reg_addr);
+}
+
+/**
+ * t4_write_reg64 - write a 64-bit HW register
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 64-bit value into the given HW register.
+ */
+static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
+ u64 val)
+{
+ CXGBE_WRITE_REG64(adapter, reg_addr, val);
+}
+
+#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
+#define PCI_CAPABILITY_LIST 0x34
+/* Offset of first capability list entry */
+#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
+#define PCI_CAP_LIST_ID 0 /* Capability ID */
+#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
+#define PCI_EXP_DEVCTL 0x0008 /* Device control */
+#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+#define PCI_EXP_DEVCTL_EXT_TAG 0x0100 /* Extended Tag Field Enable */
+#define PCI_EXP_DEVCTL_PAYLOAD 0x00E0 /* Max payload */
+#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */
+#define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */
+#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
+#define PCI_VPD_DATA 4 /* 32-bits of data returned here */
+
+/**
+ * t4_os_pci_write_cfg4 - 32-bit write to PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given register in PCI config space.
+ */
+static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr,
+ off_t val)
+{
+ u32 val32 = val;
+
+ if (rte_pci_write_config(adapter->pdev, &val32, sizeof(val32),
+ addr) < 0)
+ dev_err(adapter, "Can't write to PCI config space\n");
+}
+
+/**
+ * t4_os_pci_read_cfg4 - read a 32-bit value from PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: where to store the value read
+ *
+ * Read a 32-bit value from the given register in PCI config space.
+ */
+static inline void t4_os_pci_read_cfg4(struct adapter *adapter, size_t addr,
+ u32 *val)
+{
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
+ addr) < 0)
+ dev_err(adapter, "Can't read from PCI config space\n");
+}
+
+/**
+ * t4_os_pci_write_cfg2 - 16-bit write to PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: the value to write
+ *
+ * Write a 16-bit value into the given register in PCI config space.
+ */
+static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr,
+ off_t val)
+{
+ u16 val16 = val;
+
+ if (rte_pci_write_config(adapter->pdev, &val16, sizeof(val16),
+ addr) < 0)
+ dev_err(adapter, "Can't write to PCI config space\n");
+}
+
+/**
+ * t4_os_pci_read_cfg2 - read a 16-bit value from PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: where to store the value read
+ *
+ * Read a 16-bit value from the given register in PCI config space.
+ */
+static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr,
+ u16 *val)
+{
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
+ addr) < 0)
+ dev_err(adapter, "Can't read from PCI config space\n");
+}
+
+/**
+ * t4_os_pci_read_cfg - read a 8-bit value from PCI config space
+ * @adapter: the adapter
+ * @addr: the register address
+ * @val: where to store the value read
+ *
+ * Read a 8-bit value from the given register in PCI config space.
+ */
+static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr,
+ u8 *val)
+{
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
+ addr) < 0)
+ dev_err(adapter, "Can't read from PCI config space\n");
+}
+
+/**
+ * t4_os_find_pci_capability - lookup a capability in the PCI capability list
+ * @adapter: the adapter
+ * @cap: the capability
+ *
+ * Return the address of the given capability within the PCI capability list.
+ */
+static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap)
+{
+ u16 status;
+ int ttl = 48;
+ u8 pos = 0;
+ u8 id = 0;
+
+ t4_os_pci_read_cfg2(adapter, PCI_STATUS, &status);
+ if (!(status & PCI_STATUS_CAP_LIST)) {
+ dev_err(adapter, "PCIe capability reading failed\n");
+ return -1;
+ }
+
+ t4_os_pci_read_cfg(adapter, PCI_CAPABILITY_LIST, &pos);
+ while (ttl-- && pos >= 0x40) {
+ pos &= ~3;
+ t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_ID), &id);
+
+ if (id == 0xff)
+ break;
+
+ if (id == cap)
+ return (int)pos;
+
+ t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_NEXT), &pos);
+ }
+ return 0;
+}
+
+/**
+ * t4_os_set_hw_addr - store a port's MAC address in SW
+ * @adapter: the adapter
+ * @port_idx: the port index
+ * @hw_addr: the Ethernet address
+ *
+ * Store the Ethernet address of the given port in SW. Called by the
+ * common code when it retrieves a port's Ethernet address from EEPROM.
+ */
+static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx,
+ u8 hw_addr[])
+{
+ struct port_info *pi = adap2pinfo(adapter, port_idx);
+
+ rte_ether_addr_copy((struct rte_ether_addr *)hw_addr,
+ &pi->eth_dev->data->mac_addrs[0]);
+}
+
+/**
+ * t4_os_lock_init - initialize spinlock
+ * @lock: the spinlock
+ */
+static inline void t4_os_lock_init(rte_spinlock_t *lock)
+{
+ rte_spinlock_init(lock);
+}
+
+/**
+ * t4_os_lock - spin until lock is acquired
+ * @lock: the spinlock
+ */
+static inline void t4_os_lock(rte_spinlock_t *lock)
+{
+ rte_spinlock_lock(lock);
+}
+
+/**
+ * t4_os_unlock - unlock a spinlock
+ * @lock: the spinlock
+ */
+static inline void t4_os_unlock(rte_spinlock_t *lock)
+{
+ rte_spinlock_unlock(lock);
+}
+
+/**
+ * t4_os_trylock - try to get a lock
+ * @lock: the spinlock
+ */
+static inline int t4_os_trylock(rte_spinlock_t *lock)
+{
+ return rte_spinlock_trylock(lock);
+}
+
+/**
+ * t4_os_init_list_head - initialize
+ * @head: head of list to initialize [to empty]
+ */
+static inline void t4_os_init_list_head(struct mbox_list *head)
+{
+ TAILQ_INIT(head);
+}
+
+static inline struct mbox_entry *t4_os_list_first_entry(struct mbox_list *head)
+{
+ return TAILQ_FIRST(head);
+}
+
+/**
+ * t4_os_atomic_add_tail - Enqueue list element atomically onto list
+ * @new: the entry to be addded to the queue
+ * @head: current head of the linked list
+ * @lock: lock to use to guarantee atomicity
+ */
+static inline void t4_os_atomic_add_tail(struct mbox_entry *entry,
+ struct mbox_list *head,
+ rte_spinlock_t *lock)
+{
+ t4_os_lock(lock);
+ TAILQ_INSERT_TAIL(head, entry, next);
+ t4_os_unlock(lock);
+}
+
+/**
+ * t4_os_atomic_list_del - Dequeue list element atomically from list
+ * @entry: the entry to be remove/dequeued from the list.
+ * @lock: the spinlock
+ */
+static inline void t4_os_atomic_list_del(struct mbox_entry *entry,
+ struct mbox_list *head,
+ rte_spinlock_t *lock)
+{
+ t4_os_lock(lock);
+ TAILQ_REMOVE(head, entry, next);
+ t4_os_unlock(lock);
+}
+
+/**
+ * t4_init_completion - initialize completion
+ * @c: the completion context
+ */
+static inline void t4_init_completion(struct t4_completion *c)
+{
+ c->done = 0;
+ t4_os_lock_init(&c->lock);
+}
+
+/**
+ * t4_complete - set completion as done
+ * @c: the completion context
+ */
+static inline void t4_complete(struct t4_completion *c)
+{
+ t4_os_lock(&c->lock);
+ c->done = 1;
+ t4_os_unlock(&c->lock);
+}
+
+/**
+ * cxgbe_port_viid - get the VI id of a port
+ * @dev: the device for the port
+ *
+ * Return the VI id of the given port.
+ */
+static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev)
+{
+ return ethdev2pinfo(dev)->viid;
+}
+
+void *t4_alloc_mem(size_t size);
+void t4_free_mem(void *addr);
+#define t4_os_alloc(_size) t4_alloc_mem((_size))
+#define t4_os_free(_ptr) t4_free_mem((_ptr))
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id);
+void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
+
+void reclaim_completed_tx(struct sge_txq *q);
+void t4_free_sge_resources(struct adapter *adap);
+void t4_sge_tx_monitor_start(struct adapter *adap);
+void t4_sge_tx_monitor_stop(struct adapter *adap);
+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ uint16_t nb_pkts);
+int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf);
+int t4_sge_init(struct adapter *adap);
+int t4vf_sge_init(struct adapter *adap);
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id);
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id);
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,
+ struct rte_eth_dev *eth_dev, int intr_idx,
+ struct sge_fl *fl, rspq_handler_t handler,
+ int cong, struct rte_mempool *mp, int queue_id,
+ int socket_id);
+int t4_sge_eth_txq_start(struct sge_eth_txq *txq);
+int t4_sge_eth_txq_stop(struct sge_eth_txq *txq);
+void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq);
+int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq);
+int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq);
+void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq);
+void t4_sge_eth_clear_queues(struct port_info *pi);
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt);
+int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
+ unsigned int budget, unsigned int *work_done);
+int cxgbe_write_rss(const struct port_info *pi, const u16 *queues);
+int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t flags);
+
+#endif /* __T4_ADAPTER_H__ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/common.h b/src/spdk/dpdk/drivers/net/cxgbe/base/common.h
new file mode 100644
index 000000000..79c8fcb76
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/common.h
@@ -0,0 +1,554 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __CHELSIO_COMMON_H
+#define __CHELSIO_COMMON_H
+
+#include "../cxgbe_compat.h"
+#include "t4_hw.h"
+#include "t4vf_hw.h"
+#include "t4_chip_type.h"
+#include "t4fw_interface.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CXGBE_PAGE_SIZE RTE_PGSIZE_4K
+
+#define T4_MEMORY_WRITE 0
+#define T4_MEMORY_READ 1
+
+enum {
+ MAX_NPORTS = 4, /* max # of ports */
+};
+
+enum {
+ T5_REGMAP_SIZE = (332 * 1024),
+};
+
+enum {
+ MEMWIN0_APERTURE = 2048,
+ MEMWIN0_BASE = 0x1b800,
+};
+
+enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST };
+
+enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR };
+
+enum cc_pause {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+enum cc_fec {
+ FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */
+ FEC_RS = 1 << 1, /* Reed-Solomon */
+ FEC_BASER_RS = 1 << 2, /* BaseR/Reed-Solomon */
+};
+
+enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
+
+struct port_stats {
+ u64 tx_octets; /* total # of octets in good frames */
+ u64 tx_frames; /* all good frames */
+ u64 tx_bcast_frames; /* all broadcast frames */
+ u64 tx_mcast_frames; /* all multicast frames */
+ u64 tx_ucast_frames; /* all unicast frames */
+ u64 tx_error_frames; /* all error frames */
+
+ u64 tx_frames_64; /* # of Tx frames in a particular range */
+ u64 tx_frames_65_127;
+ u64 tx_frames_128_255;
+ u64 tx_frames_256_511;
+ u64 tx_frames_512_1023;
+ u64 tx_frames_1024_1518;
+ u64 tx_frames_1519_max;
+
+ u64 tx_drop; /* # of dropped Tx frames */
+ u64 tx_pause; /* # of transmitted pause frames */
+ u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
+ u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
+ u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
+ u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
+ u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
+ u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
+ u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
+ u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
+
+ u64 rx_octets; /* total # of octets in good frames */
+ u64 rx_frames; /* all good frames */
+ u64 rx_bcast_frames; /* all broadcast frames */
+ u64 rx_mcast_frames; /* all multicast frames */
+ u64 rx_ucast_frames; /* all unicast frames */
+ u64 rx_too_long; /* # of frames exceeding MTU */
+ u64 rx_jabber; /* # of jabber frames */
+ u64 rx_fcs_err; /* # of received frames with bad FCS */
+ u64 rx_len_err; /* # of received frames with length error */
+ u64 rx_symbol_err; /* symbol errors */
+ u64 rx_runt; /* # of short frames */
+
+ u64 rx_frames_64; /* # of Rx frames in a particular range */
+ u64 rx_frames_65_127;
+ u64 rx_frames_128_255;
+ u64 rx_frames_256_511;
+ u64 rx_frames_512_1023;
+ u64 rx_frames_1024_1518;
+ u64 rx_frames_1519_max;
+
+ u64 rx_pause; /* # of received pause frames */
+ u64 rx_ppp0; /* # of received PPP prio 0 frames */
+ u64 rx_ppp1; /* # of received PPP prio 1 frames */
+ u64 rx_ppp2; /* # of received PPP prio 2 frames */
+ u64 rx_ppp3; /* # of received PPP prio 3 frames */
+ u64 rx_ppp4; /* # of received PPP prio 4 frames */
+ u64 rx_ppp5; /* # of received PPP prio 5 frames */
+ u64 rx_ppp6; /* # of received PPP prio 6 frames */
+ u64 rx_ppp7; /* # of received PPP prio 7 frames */
+
+ u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
+ u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
+ u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
+ u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
+ u64 rx_trunc0; /* buffer-group 0 truncated packets */
+ u64 rx_trunc1; /* buffer-group 1 truncated packets */
+ u64 rx_trunc2; /* buffer-group 2 truncated packets */
+ u64 rx_trunc3; /* buffer-group 3 truncated packets */
+};
+
+struct sge_params {
+ u32 hps; /* host page size for our PF/VF */
+ u32 eq_qpp; /* egress queues/page for our PF/VF */
+ u32 iq_qpp; /* egress queues/page for our PF/VF */
+};
+
+struct tp_params {
+ unsigned int ntxchan; /* # of Tx channels */
+ unsigned int tre; /* log2 of core clocks per TP tick */
+ unsigned int dack_re; /* DACK timer resolution */
+ unsigned int la_mask; /* what events are recorded by TP LA */
+ unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
+
+ u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
+ u32 filter_mask;
+ u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+
+ /* cached TP_OUT_CONFIG compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ int rx_pkt_encap;
+
+ /*
+ * TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
+ * subset of the set of fields which may be present in the Compressed
+ * Filter Tuple portion of filters and TCP TCB connections. The
+ * fields which are present are controlled by the TP_VLAN_PRI_MAP.
+ * Since a variable number of fields may or may not be present, their
+ * shifted field positions within the Compressed Filter Tuple may
+ * vary, or not even be present if the field isn't selected in
+ * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
+ * places we store their offsets here, or a -1 if the field isn't
+ * present.
+ */
+ int vlan_shift;
+ int vnic_shift;
+ int port_shift;
+ int protocol_shift;
+ int ethertype_shift;
+ int macmatch_shift;
+ int tos_shift;
+
+ u64 hash_filter_mask;
+};
+
+struct vpd_params {
+ unsigned int cclk;
+};
+
+struct pci_params {
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint32_t vpd_cap_addr;
+ uint16_t speed;
+ uint8_t width;
+};
+
+/*
+ * Firmware device log.
+ */
+struct devlog_params {
+ u32 memtype; /* which memory (EDC0, EDC1, MC) */
+ u32 start; /* start of log in firmware memory */
+ u32 size; /* size of log */
+};
+
+struct arch_specific_params {
+ u8 nchan;
+ u16 mps_rplc_size;
+ u16 vfcount;
+ u32 sge_fl_db;
+ u16 mps_tcam_size;
+};
+
+/*
+ * Global Receive Side Scaling (RSS) parameters in host-native format.
+ */
+struct rss_params {
+ unsigned int mode; /* RSS mode */
+ union {
+ struct {
+ uint synmapen:1; /* SYN Map Enable */
+ uint syn4tupenipv6:1; /* en 4-tuple IPv6 SYNs hash */
+ uint syn2tupenipv6:1; /* en 2-tuple IPv6 SYNs hash */
+ uint syn4tupenipv4:1; /* en 4-tuple IPv4 SYNs hash */
+ uint syn2tupenipv4:1; /* en 2-tuple IPv4 SYNs hash */
+ uint ofdmapen:1; /* Offload Map Enable */
+ uint tnlmapen:1; /* Tunnel Map Enable */
+ uint tnlalllookup:1; /* Tunnel All Lookup */
+ uint hashtoeplitz:1; /* use Toeplitz hash */
+ } basicvirtual;
+ } u;
+};
+
+/*
+ * Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int neq; /* N egress Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+};
+
+/*
+ * Maximum resources provisioned for a PCI VF.
+ */
+struct vf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
+struct adapter_params {
+ struct sge_params sge;
+ struct tp_params tp;
+ struct vpd_params vpd;
+ struct pci_params pci;
+ struct devlog_params devlog;
+ struct rss_params rss;
+ struct pf_resources pfres;
+ struct vf_resources vfres;
+ enum pcie_memwin drv_memwin;
+
+ unsigned int sf_size; /* serial flash size in bytes */
+ unsigned int sf_nsec; /* # of flash sectors */
+
+ unsigned int fw_vers;
+ unsigned int bs_vers;
+ unsigned int tp_vers;
+ unsigned int er_vers;
+
+ unsigned short mtus[NMTUS];
+ unsigned short a_wnd[NCCTRL_WIN];
+ unsigned short b_wnd[NCCTRL_WIN];
+
+ unsigned int mc_size; /* MC memory size */
+ unsigned int cim_la_size;
+
+ unsigned char nports; /* # of ethernet ports */
+ unsigned char portvec;
+
+ unsigned char hash_filter;
+
+ enum chip_type chip; /* chip code */
+ struct arch_specific_params arch; /* chip specific params */
+
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
+ u8 fw_caps_support; /* 32-bit Port Capabilities */
+ u8 filter2_wr_support; /* FW support for FILTER2_WR */
+ u32 viid_smt_extn_support:1; /* FW returns vin and smt index */
+ u32 max_tx_coalesce_num; /* Max # of Tx packets that can be coalesced */
+};
+
+/* Firmware Port Capabilities types.
+ */
+typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */
+typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */
+
+enum fw_caps {
+ FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */
+ FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */
+ FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */
+};
+
+struct link_config {
+ fw_port_cap32_t pcaps; /* link capabilities */
+ fw_port_cap32_t acaps; /* advertised capabilities */
+
+ u32 requested_speed; /* speed (Mb/s) user has requested */
+ u32 speed; /* actual link speed (Mb/s) */
+
+ enum cc_pause requested_fc; /* flow control user has requested */
+ enum cc_pause fc; /* actual link flow control */
+
+ enum cc_fec auto_fec; /* Forward Error Correction
+ * "automatic" (IEEE 802.3)
+ */
+ enum cc_fec requested_fec; /* Forward Error Correction requested */
+ enum cc_fec fec; /* Forward Error Correction actual */
+
+ unsigned char autoneg; /* autonegotiating? */
+
+ unsigned char link_ok; /* link up? */
+ unsigned char link_down_rc; /* link down reason */
+};
+
+#include "adapter.h"
+
+void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+ u32 val);
+int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity,
+ int attempts, int delay, u32 *valp);
+
+static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay)
+{
+ return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+ delay, NULL);
+}
+
+static inline int is_pf4(struct adapter *adap)
+{
+ return adap->pf == 4;
+}
+
+#define for_each_port(adapter, iter) \
+ for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+static inline int is_hashfilter(const struct adapter *adap)
+{
+ return adap->params.hash_filter;
+}
+
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+ unsigned int mask, unsigned int val);
+void t4_intr_enable(struct adapter *adapter);
+void t4_intr_disable(struct adapter *adapter);
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc);
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta);
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state);
+int t4_fw_bye(struct adapter *adap, unsigned int mbox);
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4vf_fw_reset(struct adapter *adap);
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
+int t4_fl_pkt_align(struct adapter *adap);
+int t4vf_fl_pkt_align(struct adapter *adap, u32 sge_control, u32 sge_control2);
+int t4vf_get_vfres(struct adapter *adap);
+int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
+ unsigned int cache_line_size,
+ enum chip_type chip_compat);
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+ unsigned int cache_line_size);
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val);
+int t4vf_query_params(struct adapter *adap, unsigned int nparams,
+ const u32 *params, u32 *vals);
+int t4vf_get_dev_params(struct adapter *adap);
+int t4vf_get_vpd_params(struct adapter *adap);
+int t4vf_get_rss_glb_config(struct adapter *adap);
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals);
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ const u32 *val, int timeout);
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val);
+int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
+ unsigned int port, unsigned int pf, unsigned int vf,
+ unsigned int nmac, u8 *mac, unsigned int *rss_size,
+ unsigned int portfunc, unsigned int idstype,
+ u8 *vivld, u8 *vin);
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size, u8 *vivild, u8 *vin);
+int t4_free_vi(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int viid);
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok);
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt);
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en);
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+ unsigned int pf, unsigned int vf, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id);
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid);
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+ return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adap,
+ unsigned int us)
+{
+ return (us * adap->params.vpd.cclk) / 1000;
+}
+
+static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
+ unsigned int ticks)
+{
+ /* add Core Clock / 2 to round ticks to nearest uS */
+ return ((ticks * 1000 + adapter->params.vpd.cclk / 2) /
+ adapter->params.vpd.cclk);
+}
+
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout);
+int t4_wr_mbox_meat(struct adapter *adap, int mbox,
+ const void __attribute__((__may_alias__)) *cmd, int size,
+ void *rpl, bool sleep_ok);
+
+static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
+ const void *cmd, int size, void *rpl,
+ int timeout)
+{
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
+ timeout);
+}
+
+int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p);
+
+static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
+}
+
+static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
+ int size, void *rpl)
+{
+ return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
+}
+
+int t4vf_wr_mbox_core(struct adapter *, const void *, int, void *, bool);
+
+static inline int t4vf_wr_mbox(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, true);
+}
+
+static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
+ int size, void *rpl)
+{
+ return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
+}
+
+
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx);
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx);
+
+int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
+int t4_get_pfres(struct adapter *adapter);
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented);
+int t4_flash_cfg_addr(struct adapter *adapter);
+unsigned int t4_get_mps_bg_map(struct adapter *adapter, unsigned int pidx);
+unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx);
+const char *t4_get_port_type_description(enum fw_port_type port_type);
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct port_stats *p);
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset);
+void t4_clr_port_stats(struct adapter *adap, int idx);
+void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
+ fw_port_cap32_t acaps);
+void t4_reset_link_config(struct adapter *adap, int idx);
+int t4_get_version_info(struct adapter *adapter);
+void t4_dump_version_info(struct adapter *adapter);
+int t4_get_flash_params(struct adapter *adapter);
+int t4_get_chip_type(struct adapter *adap, int ver);
+int t4_prep_adapter(struct adapter *adapter);
+int t4vf_prep_adapter(struct adapter *adapter);
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+int t4vf_port_init(struct adapter *adap);
+int t4_init_rss_mode(struct adapter *adap, int mbox);
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq);
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq);
+int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ u64 *flags, unsigned int *defq);
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw);
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx);
+void t4_read_rss_key(struct adapter *adap, u32 *key);
+
+enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
+int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
+ enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
+
+int t4_init_sge_params(struct adapter *adapter);
+int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel);
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+unsigned int t4_get_regs_len(struct adapter *adap);
+unsigned int t4vf_get_pf_from_vf(struct adapter *adap);
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
+int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
+int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
+int t4_seeprom_wp(struct adapter *adapter, int enable);
+int t4_memory_rw_addr(struct adapter *adap, int win,
+ u32 addr, u32 len, void *hbuf, int dir);
+int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
+ u32 len, void *hbuf, int dir);
+static inline int t4_memory_rw(struct adapter *adap, int win,
+ int mtype, u32 maddr, u32 len,
+ void *hbuf, int dir)
+{
+ return t4_memory_rw_mtype(adap, win, mtype, maddr, len, hbuf, dir);
+}
+fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16);
+#endif /* __CHELSIO_COMMON_H */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h
new file mode 100644
index 000000000..c0c5d0b2c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_chip_type.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4_CHIP_TYPE_H__
+#define __T4_CHIP_TYPE_H__
+
+/*
+ * All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where:
+ *
+ * V = "4" for T4; "5" for T5, etc. or
+ * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ * PP = adapter product designation
+ *
+ * We use the "version" (V) of the adpater to code the Chip Version above.
+ */
+#define CHELSIO_PCI_ID_VER(devid) ((devid) >> 12)
+#define CHELSIO_PCI_ID_FUNC(devid) (((devid) >> 8) & 0xf)
+#define CHELSIO_PCI_ID_PROD(devid) ((devid) & 0xff)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
+
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A2,
+
+ T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+ T5_FIRST_REV = T5_A0,
+ T5_LAST_REV = T5_A1,
+
+ T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+ T6_FIRST_REV = T6_A0,
+ T6_LAST_REV = T6_A0,
+};
+
+static inline int is_t4(enum chip_type chip)
+{
+ return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4);
+}
+
+static inline int is_t5(enum chip_type chip)
+{
+ return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5);
+}
+
+static inline int is_t6(enum chip_type chip)
+{
+ return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6);
+}
+#endif /* __T4_CHIP_TYPE_H__ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c
new file mode 100644
index 000000000..c8514c963
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.c
@@ -0,0 +1,5701 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <netinet/in.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_byteorder.h>
+
+#include "common.h"
+#include "t4_regs.h"
+#include "t4_regs_values.h"
+#include "t4fw_interface.h"
+
+/**
+ * t4_read_mtu_tbl - returns the values in the HW path MTU table
+ * @adap: the adapter
+ * @mtus: where to store the MTU values
+ * @mtu_log: where to store the MTU base-2 log (may be %NULL)
+ *
+ * Reads the HW path MTU table.
+ */
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
+{
+ u32 v;
+ int i;
+
+ for (i = 0; i < NMTUS; ++i) {
+ t4_write_reg(adap, A_TP_MTU_TABLE,
+ V_MTUINDEX(0xff) | V_MTUVALUE(i));
+ v = t4_read_reg(adap, A_TP_MTU_TABLE);
+ mtus[i] = G_MTUVALUE(v);
+ if (mtu_log)
+ mtu_log[i] = G_MTUWIDTH(v);
+ }
+}
+
+/**
+ * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
+ * @adap: the adapter
+ * @addr: the indirect TP register address
+ * @mask: specifies the field within the register to modify
+ * @val: new value for the field
+ *
+ * Sets a field of an indirect TP register to the given value.
+ */
+void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
+ unsigned int mask, unsigned int val)
+{
+ t4_write_reg(adap, A_TP_PIO_ADDR, addr);
+ val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
+ t4_write_reg(adap, A_TP_PIO_DATA, val);
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ * t4_load_mtus - write the MTU and congestion control HW tables
+ * @adap: the adapter
+ * @mtus: the values for the MTU table
+ * @alpha: the values for the congestion control alpha parameter
+ * @beta: the values for the congestion control beta parameter
+ *
+ * Write the HW MTU table with the supplied MTUs and the high-speed
+ * congestion control table with the supplied alpha, beta, and MTUs.
+ * We write the two tables together because the additive increments
+ * depend on the MTUs.
+ */
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+ const unsigned short *alpha, const unsigned short *beta)
+{
+ static const unsigned int avg_pkts[NCCTRL_WIN] = {
+ 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+ 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+ 28672, 40960, 57344, 81920, 114688, 163840, 229376
+ };
+
+ unsigned int i, w;
+
+ for (i = 0; i < NMTUS; ++i) {
+ unsigned int mtu = mtus[i];
+ unsigned int log2 = cxgbe_fls(mtu);
+
+ if (!(mtu & ((1 << log2) >> 2))) /* round */
+ log2--;
+ t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
+ V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
+
+ for (w = 0; w < NCCTRL_WIN; ++w) {
+ unsigned int inc;
+
+ inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+ CC_MIN_INCR);
+
+ t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
+ (w << 16) | (beta[w] << 13) | inc);
+ }
+ }
+}
+
+/**
+ * t4_wait_op_done_val - wait until an operation is completed
+ * @adapter: the adapter performing the operation
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+ int polarity, int attempts, int delay, u32 *valp)
+{
+ while (1) {
+ u32 val = t4_read_reg(adapter, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+/**
+ * t4_set_reg_field - set a register field to a value
+ * @adapter: the adapter to program
+ * @addr: the register address
+ * @mask: specifies the portion of the register to modify
+ * @val: the new value for the register field
+ *
+ * Sets a register field specified by the supplied mask to the
+ * given value.
+ */
+void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+ u32 val)
+{
+ u32 v = t4_read_reg(adapter, addr) & ~mask;
+
+ t4_write_reg(adapter, addr, v | val);
+ (void)t4_read_reg(adapter, addr); /* flush */
+}
+
+/**
+ * t4_read_indirect - read indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect address
+ * @data_reg: register holding the value of the indirect register
+ * @vals: where the read register values are stored
+ * @nregs: how many indirect registers to read
+ * @start_idx: index of first indirect register to read
+ *
+ * Reads registers that are accessed indirectly through an address/data
+ * register pair.
+ */
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, u32 *vals, unsigned int nregs,
+ unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx);
+ *vals++ = t4_read_reg(adap, data_reg);
+ start_idx++;
+ }
+}
+
+/**
+ * t4_write_indirect - write indirectly addressed registers
+ * @adap: the adapter
+ * @addr_reg: register holding the indirect addresses
+ * @data_reg: register holding the value for the indirect registers
+ * @vals: values to write
+ * @nregs: how many indirect registers to write
+ * @start_idx: address of first indirect register to write
+ *
+ * Writes a sequential block of registers that are accessed indirectly
+ * through an address/data register pair.
+ */
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+ unsigned int data_reg, const u32 *vals,
+ unsigned int nregs, unsigned int start_idx)
+{
+ while (nregs--) {
+ t4_write_reg(adap, addr_reg, start_idx++);
+ t4_write_reg(adap, data_reg, *vals++);
+ }
+}
+
+/**
+ * t4_report_fw_error - report firmware error
+ * @adap: the adapter
+ *
+ * The adapter firmware can indicate error conditions to the host.
+ * If the firmware has indicated an error, print out the reason for
+ * the firmware error.
+ */
+static void t4_report_fw_error(struct adapter *adap)
+{
+ static const char * const reason[] = {
+ "Crash", /* PCIE_FW_EVAL_CRASH */
+ "During Device Preparation", /* PCIE_FW_EVAL_PREP */
+ "During Device Configuration", /* PCIE_FW_EVAL_CONF */
+ "During Device Initialization", /* PCIE_FW_EVAL_INIT */
+ "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
+ "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
+ "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
+ "Reserved", /* reserved */
+ };
+ u32 pcie_fw;
+
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ if (pcie_fw & F_PCIE_FW_ERR)
+ pr_err("%s: Firmware reports adapter error: %s\n",
+ __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+ u32 mbox_addr)
+{
+ for ( ; nflit; nflit--, mbox_addr += 8)
+ *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
+}
+
+/*
+ * Handle a FW assertion reported in a mailbox.
+ */
+static void fw_asrt(struct adapter *adap, u32 mbox_addr)
+{
+ struct fw_debug_cmd asrt;
+
+ get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
+ pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+ asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
+ be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
+}
+
+#define X_CIM_PF_NOACCESS 0xeeeeeeee
+
+/*
+ * If the Host OS Driver needs locking arround accesses to the mailbox, this
+ * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
+ */
+/* makes single-statement usage a bit cleaner ... */
+#ifdef T4_OS_NEEDS_MBOX_LOCKING
+#define T4_OS_MBOX_LOCKING(x) x
+#else
+#define T4_OS_MBOX_LOCKING(x) do {} while (0)
+#endif
+
+/**
+ * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
+ * @adap: the adapter
+ * @mbox: index of the mailbox to use
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
+ * (negative implies @sleep_ok=false)
+ *
+ * Sends the given command to FW through the selected mailbox and waits
+ * for the FW to execute the command. If @rpl is not %NULL it is used to
+ * store the FW's reply to the command. The command and its optional
+ * reply are of the same length. Some FW commands like RESET and
+ * INITIALIZE can take a considerable amount of time to execute.
+ * @sleep_ok determines whether we may sleep while awaiting the response.
+ * If sleeping is allowed we use progressive backoff otherwise we spin.
+ * Note that passing in a negative @timeout is an alternate mechanism
+ * for specifying @sleep_ok=false. This is useful when a higher level
+ * interface allows for specification of @timeout but not @sleep_ok ...
+ *
+ * Returns 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
+ const void __attribute__((__may_alias__)) *cmd,
+ int size, void *rpl, bool sleep_ok, int timeout)
+{
+ /*
+ * We delay in small increments at first in an effort to maintain
+ * responsiveness for simple, fast executing commands but then back
+ * off to larger delays to a maximum retry delay.
+ */
+ static const int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100
+ };
+
+ u32 v;
+ u64 res;
+ int i, ms;
+ unsigned int delay_idx;
+ __be64 *temp = (__be64 *)malloc(size * sizeof(char));
+ __be64 *p = temp;
+ u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
+ u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
+ u32 ctl;
+ struct mbox_entry entry;
+ u32 pcie_fw = 0;
+
+ if (!temp)
+ return -ENOMEM;
+
+ if ((size & 15) || size > MBOX_LEN) {
+ free(temp);
+ return -EINVAL;
+ }
+
+ memset(p, 0, size);
+ memcpy(p, (const __be64 *)cmd, size);
+
+ /*
+ * If we have a negative timeout, that implies that we can't sleep.
+ */
+ if (timeout < 0) {
+ sleep_ok = false;
+ timeout = -timeout;
+ }
+
+#ifdef T4_OS_NEEDS_MBOX_LOCKING
+ /*
+ * Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /*
+ * If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rarely
+ * contend on access to the mailbox ... Also check for a
+ * firmware error which we'll report as a device error.
+ */
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
+ t4_os_atomic_list_del(&entry, &adap->mbox_list,
+ &adap->mbox_lock);
+ t4_report_fw_error(adap);
+ free(temp);
+ return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
+ }
+
+ /*
+ * If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
+ break;
+
+ /*
+ * Delay for a bit before checking again ...
+ */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+ }
+#endif /* T4_OS_NEEDS_MBOX_LOCKING */
+
+ /*
+ * Attempt to gain access to the mailbox.
+ */
+ for (i = 0; i < 4; i++) {
+ ctl = t4_read_reg(adap, ctl_reg);
+ v = G_MBOWNER(ctl);
+ if (v != X_MBOWNER_NONE)
+ break;
+ }
+
+ /*
+ * If we were unable to gain access, dequeue ourselves from the
+ * mailbox atomic access list and report the error to our caller.
+ */
+ if (v != X_MBOWNER_PL) {
+ T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
+ &adap->mbox_list,
+ &adap->mbox_lock));
+ t4_report_fw_error(adap);
+ free(temp);
+ return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
+ }
+
+ /*
+ * If we gain ownership of the mailbox and there's a "valid" message
+ * in it, this is likely an asynchronous error message from the
+ * firmware. So we'll report that and then proceed on with attempting
+ * to issue our own command ... which may well fail if the error
+ * presaged the firmware crashing ...
+ */
+ if (ctl & F_MBMSGVALID) {
+ dev_err(adap, "found VALID command in mbox %u: "
+ "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
+ (unsigned long long)t4_read_reg64(adap, data_reg),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+ }
+
+ /*
+ * Copy in the new mailbox command and send it on its way ...
+ */
+ for (i = 0; i < size; i += 8, p++)
+ t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
+
+ CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
+ "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
+ (unsigned long long)t4_read_reg64(adap, data_reg),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+
+ t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
+ t4_read_reg(adap, ctl_reg); /* flush write */
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ /*
+ * Loop waiting for the reply; bail out if we time out or the firmware
+ * reports an error.
+ */
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ msleep(ms);
+ }
+
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ v = t4_read_reg(adap, ctl_reg);
+ if (v == X_CIM_PF_NOACCESS)
+ continue;
+ if (G_MBOWNER(v) == X_MBOWNER_PL) {
+ if (!(v & F_MBMSGVALID)) {
+ t4_write_reg(adap, ctl_reg,
+ V_MBOWNER(X_MBOWNER_NONE));
+ continue;
+ }
+
+ CXGBE_DEBUG_MBOX(adap,
+ "%s: mbox %u: %016llx %016llx %016llx %016llx "
+ "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
+ (unsigned long long)t4_read_reg64(adap, data_reg),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 8),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 16),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 24),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 32),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 40),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 48),
+ (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+
+ CXGBE_DEBUG_MBOX(adap,
+ "command %#x completed in %d ms (%ssleeping)\n",
+ *(const u8 *)cmd,
+ i + ms, sleep_ok ? "" : "non-");
+
+ res = t4_read_reg64(adap, data_reg);
+ if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
+ fw_asrt(adap, data_reg);
+ res = V_FW_CMD_RETVAL(EIO);
+ } else if (rpl) {
+ get_mbox_rpl(adap, rpl, size / 8, data_reg);
+ }
+ t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
+ T4_OS_MBOX_LOCKING(
+ t4_os_atomic_list_del(&entry, &adap->mbox_list,
+ &adap->mbox_lock));
+ free(temp);
+ return -G_FW_CMD_RETVAL((int)res);
+ }
+ }
+
+ /*
+ * We timed out waiting for a reply to our mailbox command. Report
+ * the error and also check to see if the firmware reported any
+ * errors ...
+ */
+ dev_err(adap, "command %#x in mailbox %d timed out\n",
+ *(const u8 *)cmd, mbox);
+ T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
+ &adap->mbox_list,
+ &adap->mbox_lock));
+ t4_report_fw_error(adap);
+ free(temp);
+ return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
+}
+
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+ void *rpl, bool sleep_ok)
+{
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
+ FW_CMD_MAX_TIMEOUT);
+}
+
+/**
+ * t4_get_regs_len - return the size of the chips register set
+ * @adapter: the adapter
+ *
+ * Returns the size of the chip's BAR0 register space.
+ */
+unsigned int t4_get_regs_len(struct adapter *adapter)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ switch (chip_version) {
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ return T5_REGMAP_SIZE;
+ }
+
+ dev_err(adapter,
+ "Unsupported chip version %d\n", chip_version);
+ return 0;
+}
+
+/**
+ * t4_get_regs - read chip registers into provided buffer
+ * @adap: the adapter
+ * @buf: register buffer
+ * @buf_size: size (in bytes) of register buffer
+ *
+ * If the provided register buffer isn't large enough for the chip's
+ * full register range, the register dump will be truncated to the
+ * register buffer's size.
+ */
+void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
+{
+ static const unsigned int t5_reg_ranges[] = {
+ 0x1008, 0x10c0,
+ 0x10cc, 0x10f8,
+ 0x1100, 0x1100,
+ 0x110c, 0x1148,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
+ 0x11fc, 0x123c,
+ 0x1280, 0x173c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3028,
+ 0x3060, 0x30b0,
+ 0x30b8, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x59c8,
+ 0x59d0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a70,
+ 0x5a80, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x6000, 0x6020,
+ 0x6028, 0x6040,
+ 0x6058, 0x609c,
+ 0x60a8, 0x614c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x78fc,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c54,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d80,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7edc,
+ 0x7ee8, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e84,
+ 0x8ea0, 0x8f84,
+ 0x8fc0, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x90f8,
+ 0x9400, 0x9408,
+ 0x9410, 0x9470,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x96f4,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd004,
+ 0xd010, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0x1106c,
+ 0x11074, 0x11088,
+ 0x1109c, 0x1117c,
+ 0x11190, 0x11204,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x193f8, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c08,
+ 0x19c10, 0x19c60,
+ 0x19c94, 0x19ce4,
+ 0x19cf0, 0x19d40,
+ 0x19d50, 0x19d94,
+ 0x19da0, 0x19de8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e90,
+ 0x19ea0, 0x19f24,
+ 0x19f34, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fb4,
+ 0x19fc4, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f8,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30038, 0x30038,
+ 0x30040, 0x30040,
+ 0x30100, 0x30144,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x30828,
+ 0x30834, 0x30834,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309ac,
+ 0x30a00, 0x30a14,
+ 0x30a1c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30a74,
+ 0x30a7c, 0x30afc,
+ 0x30b08, 0x30c24,
+ 0x30d00, 0x30d00,
+ 0x30d08, 0x30d14,
+ 0x30d1c, 0x30d20,
+ 0x30d3c, 0x30d3c,
+ 0x30d48, 0x30d50,
+ 0x31200, 0x3120c,
+ 0x31220, 0x31220,
+ 0x31240, 0x31240,
+ 0x31600, 0x3160c,
+ 0x31a00, 0x31a1c,
+ 0x31e00, 0x31e20,
+ 0x31e38, 0x31e3c,
+ 0x31e80, 0x31e80,
+ 0x31e88, 0x31ea8,
+ 0x31eb0, 0x31eb4,
+ 0x31ec8, 0x31ed4,
+ 0x31fb8, 0x32004,
+ 0x32200, 0x32200,
+ 0x32208, 0x32240,
+ 0x32248, 0x32280,
+ 0x32288, 0x322c0,
+ 0x322c8, 0x322fc,
+ 0x32600, 0x32630,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b10,
+ 0x32b20, 0x32b30,
+ 0x32b40, 0x32b50,
+ 0x32b60, 0x32b70,
+ 0x33000, 0x33028,
+ 0x33030, 0x33048,
+ 0x33060, 0x33068,
+ 0x33070, 0x3309c,
+ 0x330f0, 0x33128,
+ 0x33130, 0x33148,
+ 0x33160, 0x33168,
+ 0x33170, 0x3319c,
+ 0x331f0, 0x33238,
+ 0x33240, 0x33240,
+ 0x33248, 0x33250,
+ 0x3325c, 0x33264,
+ 0x33270, 0x332b8,
+ 0x332c0, 0x332e4,
+ 0x332f8, 0x33338,
+ 0x33340, 0x33340,
+ 0x33348, 0x33350,
+ 0x3335c, 0x33364,
+ 0x33370, 0x333b8,
+ 0x333c0, 0x333e4,
+ 0x333f8, 0x33428,
+ 0x33430, 0x33448,
+ 0x33460, 0x33468,
+ 0x33470, 0x3349c,
+ 0x334f0, 0x33528,
+ 0x33530, 0x33548,
+ 0x33560, 0x33568,
+ 0x33570, 0x3359c,
+ 0x335f0, 0x33638,
+ 0x33640, 0x33640,
+ 0x33648, 0x33650,
+ 0x3365c, 0x33664,
+ 0x33670, 0x336b8,
+ 0x336c0, 0x336e4,
+ 0x336f8, 0x33738,
+ 0x33740, 0x33740,
+ 0x33748, 0x33750,
+ 0x3375c, 0x33764,
+ 0x33770, 0x337b8,
+ 0x337c0, 0x337e4,
+ 0x337f8, 0x337fc,
+ 0x33814, 0x33814,
+ 0x3382c, 0x3382c,
+ 0x33880, 0x3388c,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x33928,
+ 0x33930, 0x33948,
+ 0x33960, 0x33968,
+ 0x33970, 0x3399c,
+ 0x339f0, 0x33a38,
+ 0x33a40, 0x33a40,
+ 0x33a48, 0x33a50,
+ 0x33a5c, 0x33a64,
+ 0x33a70, 0x33ab8,
+ 0x33ac0, 0x33ae4,
+ 0x33af8, 0x33b10,
+ 0x33b28, 0x33b28,
+ 0x33b3c, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c28, 0x33c28,
+ 0x33c3c, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34038, 0x34038,
+ 0x34040, 0x34040,
+ 0x34100, 0x34144,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x34828,
+ 0x34834, 0x34834,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349ac,
+ 0x34a00, 0x34a14,
+ 0x34a1c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34a74,
+ 0x34a7c, 0x34afc,
+ 0x34b08, 0x34c24,
+ 0x34d00, 0x34d00,
+ 0x34d08, 0x34d14,
+ 0x34d1c, 0x34d20,
+ 0x34d3c, 0x34d3c,
+ 0x34d48, 0x34d50,
+ 0x35200, 0x3520c,
+ 0x35220, 0x35220,
+ 0x35240, 0x35240,
+ 0x35600, 0x3560c,
+ 0x35a00, 0x35a1c,
+ 0x35e00, 0x35e20,
+ 0x35e38, 0x35e3c,
+ 0x35e80, 0x35e80,
+ 0x35e88, 0x35ea8,
+ 0x35eb0, 0x35eb4,
+ 0x35ec8, 0x35ed4,
+ 0x35fb8, 0x36004,
+ 0x36200, 0x36200,
+ 0x36208, 0x36240,
+ 0x36248, 0x36280,
+ 0x36288, 0x362c0,
+ 0x362c8, 0x362fc,
+ 0x36600, 0x36630,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b10,
+ 0x36b20, 0x36b30,
+ 0x36b40, 0x36b50,
+ 0x36b60, 0x36b70,
+ 0x37000, 0x37028,
+ 0x37030, 0x37048,
+ 0x37060, 0x37068,
+ 0x37070, 0x3709c,
+ 0x370f0, 0x37128,
+ 0x37130, 0x37148,
+ 0x37160, 0x37168,
+ 0x37170, 0x3719c,
+ 0x371f0, 0x37238,
+ 0x37240, 0x37240,
+ 0x37248, 0x37250,
+ 0x3725c, 0x37264,
+ 0x37270, 0x372b8,
+ 0x372c0, 0x372e4,
+ 0x372f8, 0x37338,
+ 0x37340, 0x37340,
+ 0x37348, 0x37350,
+ 0x3735c, 0x37364,
+ 0x37370, 0x373b8,
+ 0x373c0, 0x373e4,
+ 0x373f8, 0x37428,
+ 0x37430, 0x37448,
+ 0x37460, 0x37468,
+ 0x37470, 0x3749c,
+ 0x374f0, 0x37528,
+ 0x37530, 0x37548,
+ 0x37560, 0x37568,
+ 0x37570, 0x3759c,
+ 0x375f0, 0x37638,
+ 0x37640, 0x37640,
+ 0x37648, 0x37650,
+ 0x3765c, 0x37664,
+ 0x37670, 0x376b8,
+ 0x376c0, 0x376e4,
+ 0x376f8, 0x37738,
+ 0x37740, 0x37740,
+ 0x37748, 0x37750,
+ 0x3775c, 0x37764,
+ 0x37770, 0x377b8,
+ 0x377c0, 0x377e4,
+ 0x377f8, 0x377fc,
+ 0x37814, 0x37814,
+ 0x3782c, 0x3782c,
+ 0x37880, 0x3788c,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x37928,
+ 0x37930, 0x37948,
+ 0x37960, 0x37968,
+ 0x37970, 0x3799c,
+ 0x379f0, 0x37a38,
+ 0x37a40, 0x37a40,
+ 0x37a48, 0x37a50,
+ 0x37a5c, 0x37a64,
+ 0x37a70, 0x37ab8,
+ 0x37ac0, 0x37ae4,
+ 0x37af8, 0x37b10,
+ 0x37b28, 0x37b28,
+ 0x37b3c, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c28, 0x37c28,
+ 0x37c3c, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x38000, 0x38030,
+ 0x38038, 0x38038,
+ 0x38040, 0x38040,
+ 0x38100, 0x38144,
+ 0x38190, 0x381a0,
+ 0x381a8, 0x381b8,
+ 0x381c4, 0x381c8,
+ 0x381d0, 0x381d0,
+ 0x38200, 0x38318,
+ 0x38400, 0x384b4,
+ 0x384c0, 0x3852c,
+ 0x38540, 0x3861c,
+ 0x38800, 0x38828,
+ 0x38834, 0x38834,
+ 0x388c0, 0x38908,
+ 0x38910, 0x389ac,
+ 0x38a00, 0x38a14,
+ 0x38a1c, 0x38a2c,
+ 0x38a44, 0x38a50,
+ 0x38a74, 0x38a74,
+ 0x38a7c, 0x38afc,
+ 0x38b08, 0x38c24,
+ 0x38d00, 0x38d00,
+ 0x38d08, 0x38d14,
+ 0x38d1c, 0x38d20,
+ 0x38d3c, 0x38d3c,
+ 0x38d48, 0x38d50,
+ 0x39200, 0x3920c,
+ 0x39220, 0x39220,
+ 0x39240, 0x39240,
+ 0x39600, 0x3960c,
+ 0x39a00, 0x39a1c,
+ 0x39e00, 0x39e20,
+ 0x39e38, 0x39e3c,
+ 0x39e80, 0x39e80,
+ 0x39e88, 0x39ea8,
+ 0x39eb0, 0x39eb4,
+ 0x39ec8, 0x39ed4,
+ 0x39fb8, 0x3a004,
+ 0x3a200, 0x3a200,
+ 0x3a208, 0x3a240,
+ 0x3a248, 0x3a280,
+ 0x3a288, 0x3a2c0,
+ 0x3a2c8, 0x3a2fc,
+ 0x3a600, 0x3a630,
+ 0x3aa00, 0x3aabc,
+ 0x3ab00, 0x3ab10,
+ 0x3ab20, 0x3ab30,
+ 0x3ab40, 0x3ab50,
+ 0x3ab60, 0x3ab70,
+ 0x3b000, 0x3b028,
+ 0x3b030, 0x3b048,
+ 0x3b060, 0x3b068,
+ 0x3b070, 0x3b09c,
+ 0x3b0f0, 0x3b128,
+ 0x3b130, 0x3b148,
+ 0x3b160, 0x3b168,
+ 0x3b170, 0x3b19c,
+ 0x3b1f0, 0x3b238,
+ 0x3b240, 0x3b240,
+ 0x3b248, 0x3b250,
+ 0x3b25c, 0x3b264,
+ 0x3b270, 0x3b2b8,
+ 0x3b2c0, 0x3b2e4,
+ 0x3b2f8, 0x3b338,
+ 0x3b340, 0x3b340,
+ 0x3b348, 0x3b350,
+ 0x3b35c, 0x3b364,
+ 0x3b370, 0x3b3b8,
+ 0x3b3c0, 0x3b3e4,
+ 0x3b3f8, 0x3b428,
+ 0x3b430, 0x3b448,
+ 0x3b460, 0x3b468,
+ 0x3b470, 0x3b49c,
+ 0x3b4f0, 0x3b528,
+ 0x3b530, 0x3b548,
+ 0x3b560, 0x3b568,
+ 0x3b570, 0x3b59c,
+ 0x3b5f0, 0x3b638,
+ 0x3b640, 0x3b640,
+ 0x3b648, 0x3b650,
+ 0x3b65c, 0x3b664,
+ 0x3b670, 0x3b6b8,
+ 0x3b6c0, 0x3b6e4,
+ 0x3b6f8, 0x3b738,
+ 0x3b740, 0x3b740,
+ 0x3b748, 0x3b750,
+ 0x3b75c, 0x3b764,
+ 0x3b770, 0x3b7b8,
+ 0x3b7c0, 0x3b7e4,
+ 0x3b7f8, 0x3b7fc,
+ 0x3b814, 0x3b814,
+ 0x3b82c, 0x3b82c,
+ 0x3b880, 0x3b88c,
+ 0x3b8e8, 0x3b8ec,
+ 0x3b900, 0x3b928,
+ 0x3b930, 0x3b948,
+ 0x3b960, 0x3b968,
+ 0x3b970, 0x3b99c,
+ 0x3b9f0, 0x3ba38,
+ 0x3ba40, 0x3ba40,
+ 0x3ba48, 0x3ba50,
+ 0x3ba5c, 0x3ba64,
+ 0x3ba70, 0x3bab8,
+ 0x3bac0, 0x3bae4,
+ 0x3baf8, 0x3bb10,
+ 0x3bb28, 0x3bb28,
+ 0x3bb3c, 0x3bb50,
+ 0x3bbf0, 0x3bc10,
+ 0x3bc28, 0x3bc28,
+ 0x3bc3c, 0x3bc50,
+ 0x3bcf0, 0x3bcfc,
+ 0x3c000, 0x3c030,
+ 0x3c038, 0x3c038,
+ 0x3c040, 0x3c040,
+ 0x3c100, 0x3c144,
+ 0x3c190, 0x3c1a0,
+ 0x3c1a8, 0x3c1b8,
+ 0x3c1c4, 0x3c1c8,
+ 0x3c1d0, 0x3c1d0,
+ 0x3c200, 0x3c318,
+ 0x3c400, 0x3c4b4,
+ 0x3c4c0, 0x3c52c,
+ 0x3c540, 0x3c61c,
+ 0x3c800, 0x3c828,
+ 0x3c834, 0x3c834,
+ 0x3c8c0, 0x3c908,
+ 0x3c910, 0x3c9ac,
+ 0x3ca00, 0x3ca14,
+ 0x3ca1c, 0x3ca2c,
+ 0x3ca44, 0x3ca50,
+ 0x3ca74, 0x3ca74,
+ 0x3ca7c, 0x3cafc,
+ 0x3cb08, 0x3cc24,
+ 0x3cd00, 0x3cd00,
+ 0x3cd08, 0x3cd14,
+ 0x3cd1c, 0x3cd20,
+ 0x3cd3c, 0x3cd3c,
+ 0x3cd48, 0x3cd50,
+ 0x3d200, 0x3d20c,
+ 0x3d220, 0x3d220,
+ 0x3d240, 0x3d240,
+ 0x3d600, 0x3d60c,
+ 0x3da00, 0x3da1c,
+ 0x3de00, 0x3de20,
+ 0x3de38, 0x3de3c,
+ 0x3de80, 0x3de80,
+ 0x3de88, 0x3dea8,
+ 0x3deb0, 0x3deb4,
+ 0x3dec8, 0x3ded4,
+ 0x3dfb8, 0x3e004,
+ 0x3e200, 0x3e200,
+ 0x3e208, 0x3e240,
+ 0x3e248, 0x3e280,
+ 0x3e288, 0x3e2c0,
+ 0x3e2c8, 0x3e2fc,
+ 0x3e600, 0x3e630,
+ 0x3ea00, 0x3eabc,
+ 0x3eb00, 0x3eb10,
+ 0x3eb20, 0x3eb30,
+ 0x3eb40, 0x3eb50,
+ 0x3eb60, 0x3eb70,
+ 0x3f000, 0x3f028,
+ 0x3f030, 0x3f048,
+ 0x3f060, 0x3f068,
+ 0x3f070, 0x3f09c,
+ 0x3f0f0, 0x3f128,
+ 0x3f130, 0x3f148,
+ 0x3f160, 0x3f168,
+ 0x3f170, 0x3f19c,
+ 0x3f1f0, 0x3f238,
+ 0x3f240, 0x3f240,
+ 0x3f248, 0x3f250,
+ 0x3f25c, 0x3f264,
+ 0x3f270, 0x3f2b8,
+ 0x3f2c0, 0x3f2e4,
+ 0x3f2f8, 0x3f338,
+ 0x3f340, 0x3f340,
+ 0x3f348, 0x3f350,
+ 0x3f35c, 0x3f364,
+ 0x3f370, 0x3f3b8,
+ 0x3f3c0, 0x3f3e4,
+ 0x3f3f8, 0x3f428,
+ 0x3f430, 0x3f448,
+ 0x3f460, 0x3f468,
+ 0x3f470, 0x3f49c,
+ 0x3f4f0, 0x3f528,
+ 0x3f530, 0x3f548,
+ 0x3f560, 0x3f568,
+ 0x3f570, 0x3f59c,
+ 0x3f5f0, 0x3f638,
+ 0x3f640, 0x3f640,
+ 0x3f648, 0x3f650,
+ 0x3f65c, 0x3f664,
+ 0x3f670, 0x3f6b8,
+ 0x3f6c0, 0x3f6e4,
+ 0x3f6f8, 0x3f738,
+ 0x3f740, 0x3f740,
+ 0x3f748, 0x3f750,
+ 0x3f75c, 0x3f764,
+ 0x3f770, 0x3f7b8,
+ 0x3f7c0, 0x3f7e4,
+ 0x3f7f8, 0x3f7fc,
+ 0x3f814, 0x3f814,
+ 0x3f82c, 0x3f82c,
+ 0x3f880, 0x3f88c,
+ 0x3f8e8, 0x3f8ec,
+ 0x3f900, 0x3f928,
+ 0x3f930, 0x3f948,
+ 0x3f960, 0x3f968,
+ 0x3f970, 0x3f99c,
+ 0x3f9f0, 0x3fa38,
+ 0x3fa40, 0x3fa40,
+ 0x3fa48, 0x3fa50,
+ 0x3fa5c, 0x3fa64,
+ 0x3fa70, 0x3fab8,
+ 0x3fac0, 0x3fae4,
+ 0x3faf8, 0x3fb10,
+ 0x3fb28, 0x3fb28,
+ 0x3fb3c, 0x3fb50,
+ 0x3fbf0, 0x3fc10,
+ 0x3fc28, 0x3fc28,
+ 0x3fc3c, 0x3fc50,
+ 0x3fcf0, 0x3fcfc,
+ 0x40000, 0x4000c,
+ 0x40040, 0x40050,
+ 0x40060, 0x40068,
+ 0x4007c, 0x4008c,
+ 0x40094, 0x400b0,
+ 0x400c0, 0x40144,
+ 0x40180, 0x4018c,
+ 0x40200, 0x40254,
+ 0x40260, 0x40264,
+ 0x40270, 0x40288,
+ 0x40290, 0x40298,
+ 0x402ac, 0x402c8,
+ 0x402d0, 0x402e0,
+ 0x402f0, 0x402f0,
+ 0x40300, 0x4033c,
+ 0x403f8, 0x403fc,
+ 0x41304, 0x413c4,
+ 0x41400, 0x4140c,
+ 0x41414, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x44054,
+ 0x4405c, 0x44078,
+ 0x440c0, 0x44174,
+ 0x44180, 0x441ac,
+ 0x441b4, 0x441b8,
+ 0x441c0, 0x44254,
+ 0x4425c, 0x44278,
+ 0x442c0, 0x44374,
+ 0x44380, 0x443ac,
+ 0x443b4, 0x443b8,
+ 0x443c0, 0x44454,
+ 0x4445c, 0x44478,
+ 0x444c0, 0x44574,
+ 0x44580, 0x445ac,
+ 0x445b4, 0x445b8,
+ 0x445c0, 0x44654,
+ 0x4465c, 0x44678,
+ 0x446c0, 0x44774,
+ 0x44780, 0x447ac,
+ 0x447b4, 0x447b8,
+ 0x447c0, 0x44854,
+ 0x4485c, 0x44878,
+ 0x448c0, 0x44974,
+ 0x44980, 0x449ac,
+ 0x449b4, 0x449b8,
+ 0x449c0, 0x449fc,
+ 0x45000, 0x45004,
+ 0x45010, 0x45030,
+ 0x45040, 0x45060,
+ 0x45068, 0x45068,
+ 0x45080, 0x45084,
+ 0x450a0, 0x450b0,
+ 0x45200, 0x45204,
+ 0x45210, 0x45230,
+ 0x45240, 0x45260,
+ 0x45268, 0x45268,
+ 0x45280, 0x45284,
+ 0x452a0, 0x452b0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4703c,
+ 0x47044, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47408,
+ 0x47414, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x48000, 0x4800c,
+ 0x48040, 0x48050,
+ 0x48060, 0x48068,
+ 0x4807c, 0x4808c,
+ 0x48094, 0x480b0,
+ 0x480c0, 0x48144,
+ 0x48180, 0x4818c,
+ 0x48200, 0x48254,
+ 0x48260, 0x48264,
+ 0x48270, 0x48288,
+ 0x48290, 0x48298,
+ 0x482ac, 0x482c8,
+ 0x482d0, 0x482e0,
+ 0x482f0, 0x482f0,
+ 0x48300, 0x4833c,
+ 0x483f8, 0x483fc,
+ 0x49304, 0x493c4,
+ 0x49400, 0x4940c,
+ 0x49414, 0x4941c,
+ 0x49480, 0x494d0,
+ 0x4c000, 0x4c054,
+ 0x4c05c, 0x4c078,
+ 0x4c0c0, 0x4c174,
+ 0x4c180, 0x4c1ac,
+ 0x4c1b4, 0x4c1b8,
+ 0x4c1c0, 0x4c254,
+ 0x4c25c, 0x4c278,
+ 0x4c2c0, 0x4c374,
+ 0x4c380, 0x4c3ac,
+ 0x4c3b4, 0x4c3b8,
+ 0x4c3c0, 0x4c454,
+ 0x4c45c, 0x4c478,
+ 0x4c4c0, 0x4c574,
+ 0x4c580, 0x4c5ac,
+ 0x4c5b4, 0x4c5b8,
+ 0x4c5c0, 0x4c654,
+ 0x4c65c, 0x4c678,
+ 0x4c6c0, 0x4c774,
+ 0x4c780, 0x4c7ac,
+ 0x4c7b4, 0x4c7b8,
+ 0x4c7c0, 0x4c854,
+ 0x4c85c, 0x4c878,
+ 0x4c8c0, 0x4c974,
+ 0x4c980, 0x4c9ac,
+ 0x4c9b4, 0x4c9b8,
+ 0x4c9c0, 0x4c9fc,
+ 0x4d000, 0x4d004,
+ 0x4d010, 0x4d030,
+ 0x4d040, 0x4d060,
+ 0x4d068, 0x4d068,
+ 0x4d080, 0x4d084,
+ 0x4d0a0, 0x4d0b0,
+ 0x4d200, 0x4d204,
+ 0x4d210, 0x4d230,
+ 0x4d240, 0x4d260,
+ 0x4d268, 0x4d268,
+ 0x4d280, 0x4d284,
+ 0x4d2a0, 0x4d2b0,
+ 0x4e0c0, 0x4e0e4,
+ 0x4f000, 0x4f03c,
+ 0x4f044, 0x4f08c,
+ 0x4f200, 0x4f250,
+ 0x4f400, 0x4f408,
+ 0x4f414, 0x4f420,
+ 0x4f600, 0x4f618,
+ 0x4f800, 0x4f814,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x5101c,
+ 0x51300, 0x51308,
+ };
+
+ static const unsigned int t6_reg_ranges[] = {
+ 0x1008, 0x101c,
+ 0x1024, 0x10a8,
+ 0x10b4, 0x10f8,
+ 0x1100, 0x1114,
+ 0x111c, 0x112c,
+ 0x1138, 0x113c,
+ 0x1144, 0x114c,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11b4,
+ 0x11fc, 0x1274,
+ 0x1280, 0x133c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x302c,
+ 0x3060, 0x30b0,
+ 0x30b8, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x595c,
+ 0x5980, 0x598c,
+ 0x59b0, 0x59c8,
+ 0x59d0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a8c,
+ 0x5a94, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5e48,
+ 0x5e50, 0x5e94,
+ 0x5ea0, 0x5eb0,
+ 0x5ec0, 0x5ec0,
+ 0x5ec8, 0x5ed0,
+ 0x5ee0, 0x5ee0,
+ 0x5ef0, 0x5ef0,
+ 0x5f00, 0x5f00,
+ 0x6000, 0x6020,
+ 0x6028, 0x6040,
+ 0x6058, 0x609c,
+ 0x60a8, 0x619c,
+ 0x7700, 0x7798,
+ 0x77c0, 0x7880,
+ 0x78cc, 0x78fc,
+ 0x7b00, 0x7b58,
+ 0x7b60, 0x7b84,
+ 0x7b8c, 0x7c54,
+ 0x7d00, 0x7d38,
+ 0x7d40, 0x7d84,
+ 0x7d8c, 0x7ddc,
+ 0x7de4, 0x7e04,
+ 0x7e10, 0x7e1c,
+ 0x7e24, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e78,
+ 0x7e80, 0x7edc,
+ 0x7ee8, 0x7efc,
+ 0x8dc0, 0x8de4,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e84,
+ 0x8ea0, 0x8f88,
+ 0x8fb8, 0x9058,
+ 0x9060, 0x9060,
+ 0x9068, 0x90f8,
+ 0x9100, 0x9124,
+ 0x9400, 0x9470,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x9704,
+ 0x9710, 0x971c,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xd100, 0xd118,
+ 0xd200, 0xd214,
+ 0xd220, 0xd234,
+ 0xd240, 0xd254,
+ 0xd260, 0xd274,
+ 0xd280, 0xd294,
+ 0xd2a0, 0xd2b4,
+ 0xd2c0, 0xd2d4,
+ 0xd2e0, 0xd2f4,
+ 0xd300, 0xd31c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xf008,
+ 0xf010, 0xf018,
+ 0xf020, 0xf028,
+ 0x11000, 0x11014,
+ 0x11048, 0x1106c,
+ 0x11074, 0x11088,
+ 0x11098, 0x11120,
+ 0x1112c, 0x1117c,
+ 0x11190, 0x112e0,
+ 0x11300, 0x1130c,
+ 0x12000, 0x1206c,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x19290,
+ 0x192a4, 0x192b0,
+ 0x192bc, 0x192bc,
+ 0x19348, 0x1934c,
+ 0x193f8, 0x19418,
+ 0x19420, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c48,
+ 0x19c50, 0x19c80,
+ 0x19c94, 0x19c98,
+ 0x19ca0, 0x19cbc,
+ 0x19ce4, 0x19ce4,
+ 0x19cf0, 0x19cf8,
+ 0x19d00, 0x19d28,
+ 0x19d50, 0x19d78,
+ 0x19d94, 0x19d98,
+ 0x19da0, 0x19dc8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19ebc,
+ 0x19ec4, 0x19ef4,
+ 0x19f04, 0x19f2c,
+ 0x19f34, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fac,
+ 0x19fc4, 0x19fc8,
+ 0x19fd0, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a0f8,
+ 0x1a100, 0x1a108,
+ 0x1a114, 0x1a120,
+ 0x1a128, 0x1a130,
+ 0x1a138, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30030,
+ 0x30100, 0x30168,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301d0,
+ 0x30200, 0x30320,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x308a0,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309b8,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a14,
+ 0x30a1c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30a74,
+ 0x30a7c, 0x30afc,
+ 0x30b08, 0x30c24,
+ 0x30d00, 0x30d14,
+ 0x30d1c, 0x30d3c,
+ 0x30d44, 0x30d4c,
+ 0x30d54, 0x30d74,
+ 0x30d7c, 0x30d7c,
+ 0x30de0, 0x30de0,
+ 0x30e00, 0x30ed4,
+ 0x30f00, 0x30fa4,
+ 0x30fc0, 0x30fc4,
+ 0x31000, 0x31004,
+ 0x31080, 0x310fc,
+ 0x31208, 0x31220,
+ 0x3123c, 0x31254,
+ 0x31300, 0x31300,
+ 0x31308, 0x3131c,
+ 0x31338, 0x3133c,
+ 0x31380, 0x31380,
+ 0x31388, 0x313a8,
+ 0x313b4, 0x313b4,
+ 0x31400, 0x31420,
+ 0x31438, 0x3143c,
+ 0x31480, 0x31480,
+ 0x314a8, 0x314a8,
+ 0x314b0, 0x314b4,
+ 0x314c8, 0x314d4,
+ 0x31a40, 0x31a4c,
+ 0x31af0, 0x31b20,
+ 0x31b38, 0x31b3c,
+ 0x31b80, 0x31b80,
+ 0x31ba8, 0x31ba8,
+ 0x31bb0, 0x31bb4,
+ 0x31bc8, 0x31bd4,
+ 0x32140, 0x3218c,
+ 0x321f0, 0x321f4,
+ 0x32200, 0x32200,
+ 0x32218, 0x32218,
+ 0x32400, 0x32400,
+ 0x32408, 0x3241c,
+ 0x32618, 0x32620,
+ 0x32664, 0x32664,
+ 0x326a8, 0x326a8,
+ 0x326ec, 0x326ec,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b38,
+ 0x32b20, 0x32b38,
+ 0x32b40, 0x32b58,
+ 0x32b60, 0x32b78,
+ 0x32c00, 0x32c00,
+ 0x32c08, 0x32c3c,
+ 0x33000, 0x3302c,
+ 0x33034, 0x33050,
+ 0x33058, 0x33058,
+ 0x33060, 0x3308c,
+ 0x3309c, 0x330ac,
+ 0x330c0, 0x330c0,
+ 0x330c8, 0x330d0,
+ 0x330d8, 0x330e0,
+ 0x330ec, 0x3312c,
+ 0x33134, 0x33150,
+ 0x33158, 0x33158,
+ 0x33160, 0x3318c,
+ 0x3319c, 0x331ac,
+ 0x331c0, 0x331c0,
+ 0x331c8, 0x331d0,
+ 0x331d8, 0x331e0,
+ 0x331ec, 0x33290,
+ 0x33298, 0x332c4,
+ 0x332e4, 0x33390,
+ 0x33398, 0x333c4,
+ 0x333e4, 0x3342c,
+ 0x33434, 0x33450,
+ 0x33458, 0x33458,
+ 0x33460, 0x3348c,
+ 0x3349c, 0x334ac,
+ 0x334c0, 0x334c0,
+ 0x334c8, 0x334d0,
+ 0x334d8, 0x334e0,
+ 0x334ec, 0x3352c,
+ 0x33534, 0x33550,
+ 0x33558, 0x33558,
+ 0x33560, 0x3358c,
+ 0x3359c, 0x335ac,
+ 0x335c0, 0x335c0,
+ 0x335c8, 0x335d0,
+ 0x335d8, 0x335e0,
+ 0x335ec, 0x33690,
+ 0x33698, 0x336c4,
+ 0x336e4, 0x33790,
+ 0x33798, 0x337c4,
+ 0x337e4, 0x337fc,
+ 0x33814, 0x33814,
+ 0x33854, 0x33868,
+ 0x33880, 0x3388c,
+ 0x338c0, 0x338d0,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x3392c,
+ 0x33934, 0x33950,
+ 0x33958, 0x33958,
+ 0x33960, 0x3398c,
+ 0x3399c, 0x339ac,
+ 0x339c0, 0x339c0,
+ 0x339c8, 0x339d0,
+ 0x339d8, 0x339e0,
+ 0x339ec, 0x33a90,
+ 0x33a98, 0x33ac4,
+ 0x33ae4, 0x33b10,
+ 0x33b24, 0x33b28,
+ 0x33b38, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c24, 0x33c28,
+ 0x33c38, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34030,
+ 0x34100, 0x34168,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341d0,
+ 0x34200, 0x34320,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x348a0,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349b8,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a14,
+ 0x34a1c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34a74,
+ 0x34a7c, 0x34afc,
+ 0x34b08, 0x34c24,
+ 0x34d00, 0x34d14,
+ 0x34d1c, 0x34d3c,
+ 0x34d44, 0x34d4c,
+ 0x34d54, 0x34d74,
+ 0x34d7c, 0x34d7c,
+ 0x34de0, 0x34de0,
+ 0x34e00, 0x34ed4,
+ 0x34f00, 0x34fa4,
+ 0x34fc0, 0x34fc4,
+ 0x35000, 0x35004,
+ 0x35080, 0x350fc,
+ 0x35208, 0x35220,
+ 0x3523c, 0x35254,
+ 0x35300, 0x35300,
+ 0x35308, 0x3531c,
+ 0x35338, 0x3533c,
+ 0x35380, 0x35380,
+ 0x35388, 0x353a8,
+ 0x353b4, 0x353b4,
+ 0x35400, 0x35420,
+ 0x35438, 0x3543c,
+ 0x35480, 0x35480,
+ 0x354a8, 0x354a8,
+ 0x354b0, 0x354b4,
+ 0x354c8, 0x354d4,
+ 0x35a40, 0x35a4c,
+ 0x35af0, 0x35b20,
+ 0x35b38, 0x35b3c,
+ 0x35b80, 0x35b80,
+ 0x35ba8, 0x35ba8,
+ 0x35bb0, 0x35bb4,
+ 0x35bc8, 0x35bd4,
+ 0x36140, 0x3618c,
+ 0x361f0, 0x361f4,
+ 0x36200, 0x36200,
+ 0x36218, 0x36218,
+ 0x36400, 0x36400,
+ 0x36408, 0x3641c,
+ 0x36618, 0x36620,
+ 0x36664, 0x36664,
+ 0x366a8, 0x366a8,
+ 0x366ec, 0x366ec,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b38,
+ 0x36b20, 0x36b38,
+ 0x36b40, 0x36b58,
+ 0x36b60, 0x36b78,
+ 0x36c00, 0x36c00,
+ 0x36c08, 0x36c3c,
+ 0x37000, 0x3702c,
+ 0x37034, 0x37050,
+ 0x37058, 0x37058,
+ 0x37060, 0x3708c,
+ 0x3709c, 0x370ac,
+ 0x370c0, 0x370c0,
+ 0x370c8, 0x370d0,
+ 0x370d8, 0x370e0,
+ 0x370ec, 0x3712c,
+ 0x37134, 0x37150,
+ 0x37158, 0x37158,
+ 0x37160, 0x3718c,
+ 0x3719c, 0x371ac,
+ 0x371c0, 0x371c0,
+ 0x371c8, 0x371d0,
+ 0x371d8, 0x371e0,
+ 0x371ec, 0x37290,
+ 0x37298, 0x372c4,
+ 0x372e4, 0x37390,
+ 0x37398, 0x373c4,
+ 0x373e4, 0x3742c,
+ 0x37434, 0x37450,
+ 0x37458, 0x37458,
+ 0x37460, 0x3748c,
+ 0x3749c, 0x374ac,
+ 0x374c0, 0x374c0,
+ 0x374c8, 0x374d0,
+ 0x374d8, 0x374e0,
+ 0x374ec, 0x3752c,
+ 0x37534, 0x37550,
+ 0x37558, 0x37558,
+ 0x37560, 0x3758c,
+ 0x3759c, 0x375ac,
+ 0x375c0, 0x375c0,
+ 0x375c8, 0x375d0,
+ 0x375d8, 0x375e0,
+ 0x375ec, 0x37690,
+ 0x37698, 0x376c4,
+ 0x376e4, 0x37790,
+ 0x37798, 0x377c4,
+ 0x377e4, 0x377fc,
+ 0x37814, 0x37814,
+ 0x37854, 0x37868,
+ 0x37880, 0x3788c,
+ 0x378c0, 0x378d0,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x3792c,
+ 0x37934, 0x37950,
+ 0x37958, 0x37958,
+ 0x37960, 0x3798c,
+ 0x3799c, 0x379ac,
+ 0x379c0, 0x379c0,
+ 0x379c8, 0x379d0,
+ 0x379d8, 0x379e0,
+ 0x379ec, 0x37a90,
+ 0x37a98, 0x37ac4,
+ 0x37ae4, 0x37b10,
+ 0x37b24, 0x37b28,
+ 0x37b38, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c24, 0x37c28,
+ 0x37c38, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x40040, 0x40040,
+ 0x40080, 0x40084,
+ 0x40100, 0x40100,
+ 0x40140, 0x401bc,
+ 0x40200, 0x40214,
+ 0x40228, 0x40228,
+ 0x40240, 0x40258,
+ 0x40280, 0x40280,
+ 0x40304, 0x40304,
+ 0x40330, 0x4033c,
+ 0x41304, 0x413c8,
+ 0x413d0, 0x413dc,
+ 0x413f0, 0x413f0,
+ 0x41400, 0x4140c,
+ 0x41414, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x4407c,
+ 0x440c0, 0x441ac,
+ 0x441b4, 0x4427c,
+ 0x442c0, 0x443ac,
+ 0x443b4, 0x4447c,
+ 0x444c0, 0x445ac,
+ 0x445b4, 0x4467c,
+ 0x446c0, 0x447ac,
+ 0x447b4, 0x4487c,
+ 0x448c0, 0x449ac,
+ 0x449b4, 0x44a7c,
+ 0x44ac0, 0x44bac,
+ 0x44bb4, 0x44c7c,
+ 0x44cc0, 0x44dac,
+ 0x44db4, 0x44e7c,
+ 0x44ec0, 0x44fac,
+ 0x44fb4, 0x4507c,
+ 0x450c0, 0x451ac,
+ 0x451b4, 0x451fc,
+ 0x45800, 0x45804,
+ 0x45810, 0x45830,
+ 0x45840, 0x45860,
+ 0x45868, 0x45868,
+ 0x45880, 0x45884,
+ 0x458a0, 0x458b0,
+ 0x45a00, 0x45a04,
+ 0x45a10, 0x45a30,
+ 0x45a40, 0x45a60,
+ 0x45a68, 0x45a68,
+ 0x45a80, 0x45a84,
+ 0x45aa0, 0x45ab0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4703c,
+ 0x47044, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47408,
+ 0x47414, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x47814,
+ 0x47820, 0x4782c,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50300, 0x50384,
+ 0x50400, 0x50400,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50b00, 0x50b84,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x51020,
+ 0x51028, 0x510b0,
+ 0x51300, 0x51324,
+ };
+
+ u32 *buf_end = (u32 *)((char *)buf + buf_size);
+ const unsigned int *reg_ranges;
+ int reg_ranges_size, range;
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ /* Select the right set of register ranges to dump depending on the
+ * adapter chip type.
+ */
+ switch (chip_version) {
+ case CHELSIO_T5:
+ reg_ranges = t5_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
+ break;
+
+ case CHELSIO_T6:
+ reg_ranges = t6_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+ break;
+
+ default:
+ dev_err(adap,
+ "Unsupported chip version %d\n", chip_version);
+ return;
+ }
+
+ /* Clear the register buffer and insert the appropriate register
+ * values selected by the above register ranges.
+ */
+ memset(buf, 0, buf_size);
+ for (range = 0; range < reg_ranges_size; range += 2) {
+ unsigned int reg = reg_ranges[range];
+ unsigned int last_reg = reg_ranges[range + 1];
+ u32 *bufp = (u32 *)((char *)buf + reg);
+
+ /* Iterate across the register range filling in the register
+ * buffer but don't write past the end of the register buffer.
+ */
+ while (reg <= last_reg && bufp < buf_end) {
+ *bufp++ = t4_read_reg(adap, reg);
+ reg += sizeof(u32);
+ }
+ }
+}
+
+/* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
+#define EEPROM_DELAY 10 /* 10us per poll spin */
+#define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
+
+#define EEPROM_STAT_ADDR 0x7bfc
+
+/**
+ * Small utility function to wait till any outstanding VPD Access is complete.
+ * We have a per-adapter state variable "VPD Busy" to indicate when we have a
+ * VPD Access in flight. This allows us to handle the problem of having a
+ * previous VPD Access time out and prevent an attempt to inject a new VPD
+ * Request before any in-flight VPD request has completed.
+ */
+static int t4_seeprom_wait(struct adapter *adapter)
+{
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+ int max_poll;
+
+ /* If no VPD Access is in flight, we can just return success right
+ * away.
+ */
+ if (!adapter->vpd_busy)
+ return 0;
+
+ /* Poll the VPD Capability Address/Flag register waiting for it
+ * to indicate that the operation is complete.
+ */
+ max_poll = EEPROM_MAX_POLL;
+ do {
+ u16 val;
+
+ udelay(EEPROM_DELAY);
+ t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
+
+ /* If the operation is complete, mark the VPD as no longer
+ * busy and return success.
+ */
+ if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
+ adapter->vpd_busy = 0;
+ return 0;
+ }
+ } while (--max_poll);
+
+ /* Failure! Note that we leave the VPD Busy status set in order to
+ * avoid pushing a new VPD Access request into the VPD Capability till
+ * the current operation eventually succeeds. It's a bug to issue a
+ * new request when an existing request is in flight and will result
+ * in corrupt hardware state.
+ */
+ return -ETIMEDOUT;
+}
+
+/**
+ * t4_seeprom_read - read a serial EEPROM location
+ * @adapter: adapter to read
+ * @addr: EEPROM virtual address
+ * @data: where to store the read data
+ *
+ * Read a 32-bit word from a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
+{
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+ int ret;
+
+ /* VPD Accesses must alway be 4-byte aligned!
+ */
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ /* Wait for any previous operation which may still be in flight to
+ * complete.
+ */
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD still busy from previous operation\n");
+ return ret;
+ }
+
+ /* Issue our new VPD Read request, mark the VPD as being busy and wait
+ * for our request to complete. If it doesn't complete, note the
+ * error and return it to our caller. Note that we do not reset the
+ * VPD Busy status!
+ */
+ t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
+ adapter->vpd_busy = 1;
+ adapter->vpd_flag = PCI_VPD_ADDR_F;
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD read of address %#x failed\n", addr);
+ return ret;
+ }
+
+ /* Grab the returned data, swizzle it into our endianness and
+ * return success.
+ */
+ t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
+ *data = le32_to_cpu(*data);
+ return 0;
+}
+
+/**
+ * t4_seeprom_write - write a serial EEPROM location
+ * @adapter: adapter to write
+ * @addr: virtual EEPROM address
+ * @data: value to write
+ *
+ * Write a 32-bit word to a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
+{
+ unsigned int base = adapter->params.pci.vpd_cap_addr;
+ int ret;
+ u32 stats_reg = 0;
+ int max_poll;
+
+ /* VPD Accesses must alway be 4-byte aligned!
+ */
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ /* Wait for any previous operation which may still be in flight to
+ * complete.
+ */
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD still busy from previous operation\n");
+ return ret;
+ }
+
+ /* Issue our new VPD Read request, mark the VPD as being busy and wait
+ * for our request to complete. If it doesn't complete, note the
+ * error and return it to our caller. Note that we do not reset the
+ * VPD Busy status!
+ */
+ t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
+ cpu_to_le32(data));
+ t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
+ (u16)addr | PCI_VPD_ADDR_F);
+ adapter->vpd_busy = 1;
+ adapter->vpd_flag = 0;
+ ret = t4_seeprom_wait(adapter);
+ if (ret) {
+ dev_err(adapter, "VPD write of address %#x failed\n", addr);
+ return ret;
+ }
+
+ /* Reset PCI_VPD_DATA register after a transaction and wait for our
+ * request to complete. If it doesn't complete, return error.
+ */
+ t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
+ max_poll = EEPROM_MAX_POLL;
+ do {
+ udelay(EEPROM_DELAY);
+ t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
+ } while ((stats_reg & 0x1) && --max_poll);
+ if (!max_poll)
+ return -ETIMEDOUT;
+
+ /* Return success! */
+ return 0;
+}
+
+/**
+ * t4_seeprom_wp - enable/disable EEPROM write protection
+ * @adapter: the adapter
+ * @enable: whether to enable or disable write protection
+ *
+ * Enables or disables write protection on the serial EEPROM.
+ */
+int t4_seeprom_wp(struct adapter *adapter, int enable)
+{
+ return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
+}
+
+/**
+ * t4_fw_tp_pio_rw - Access TP PIO through LDST
+ * @adap: the adapter
+ * @vals: where the indirect register values are stored/written
+ * @nregs: how many indirect registers to read/write
+ * @start_idx: index of first indirect register to read/write
+ * @rw: Read (1) or Write (0)
+ *
+ * Access TP PIO registers through LDST
+ */
+void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
+ unsigned int start_index, unsigned int rw)
+{
+ int cmd = FW_LDST_ADDRSPC_TP_PIO;
+ struct fw_ldst_cmd c;
+ unsigned int i;
+ int ret;
+
+ for (i = 0 ; i < nregs; i++) {
+ memset(&c, 0, sizeof(c));
+ c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
+ F_FW_CMD_REQUEST |
+ (rw ? F_FW_CMD_READ :
+ F_FW_CMD_WRITE) |
+ V_FW_LDST_CMD_ADDRSPACE(cmd));
+ c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
+
+ c.u.addrval.addr = cpu_to_be32(start_index + i);
+ c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret == 0) {
+ if (rw)
+ vals[i] = be32_to_cpu(c.u.addrval.val);
+ }
+ }
+}
+
+/**
+ * t4_read_rss_key - read the global RSS key
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ *
+ * Reads the global 320-bit RSS key.
+ */
+void t4_read_rss_key(struct adapter *adap, u32 *key)
+{
+ t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
+}
+
+/**
+ * t4_write_rss_key - program one of the RSS keys
+ * @adap: the adapter
+ * @key: 10-entry array holding the 320-bit RSS key
+ * @idx: which RSS key to write
+ *
+ * Writes one of the RSS keys with the given 320-bit value. If @idx is
+ * 0..15 the corresponding entry in the RSS key table is written,
+ * otherwise the global RSS key is written.
+ */
+void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
+{
+ u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
+ u8 rss_key_addr_cnt = 16;
+
+ /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+ * allows access to key addresses 16-63 by using KeyWrAddrX
+ * as index[5:4](upper 2) into key table
+ */
+ if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+ (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
+ rss_key_addr_cnt = 32;
+
+ t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
+
+ if (idx >= 0 && idx < rss_key_addr_cnt) {
+ if (rss_key_addr_cnt > 16)
+ t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+ V_KEYWRADDRX(idx >> 4) |
+ V_T6_VFWRADDR(idx) | F_KEYWREN);
+ else
+ t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
+ V_KEYWRADDR(idx) | F_KEYWREN);
+ }
+}
+
+/**
+ * t4_config_rss_range - configure a portion of the RSS mapping table
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: virtual interface whose RSS subtable is to be written
+ * @start: start entry in the table to write
+ * @n: how many table entries to write
+ * @rspq: values for the "response queue" (Ingress Queue) lookup table
+ * @nrspq: number of values in @rspq
+ *
+ * Programs the selected part of the VI's RSS mapping table with the
+ * provided values. If @nrspq < @n the supplied values are used repeatedly
+ * until the full table range is populated.
+ *
+ * The caller must ensure the values in @rspq are in the range allowed for
+ * @viid.
+ */
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+ int start, int n, const u16 *rspq, unsigned int nrspq)
+{
+ int ret;
+ const u16 *rsp = rspq;
+ const u16 *rsp_end = rspq + nrspq;
+ struct fw_rss_ind_tbl_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_RSS_IND_TBL_CMD_VIID(viid));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+
+ /*
+ * Each firmware RSS command can accommodate up to 32 RSS Ingress
+ * Queue Identifiers. These Ingress Queue IDs are packed three to
+ * a 32-bit word as 10-bit values with the upper remaining 2 bits
+ * reserved.
+ */
+ while (n > 0) {
+ int nq = min(n, 32);
+ int nq_packed = 0;
+ __be32 *qp = &cmd.iq0_to_iq2;
+
+ /*
+ * Set up the firmware RSS command header to send the next
+ * "nq" Ingress Queue IDs to the firmware.
+ */
+ cmd.niqid = cpu_to_be16(nq);
+ cmd.startidx = cpu_to_be16(start);
+
+ /*
+ * "nq" more done for the start of the next loop.
+ */
+ start += nq;
+ n -= nq;
+
+ /*
+ * While there are still Ingress Queue IDs to stuff into the
+ * current firmware RSS command, retrieve them from the
+ * Ingress Queue ID array and insert them into the command.
+ */
+ while (nq > 0) {
+ /*
+ * Grab up to the next 3 Ingress Queue IDs (wrapping
+ * around the Ingress Queue ID array if necessary) and
+ * insert them into the firmware RSS command at the
+ * current 3-tuple position within the commad.
+ */
+ u16 qbuf[3];
+ u16 *qbp = qbuf;
+ int nqbuf = min(3, nq);
+
+ nq -= nqbuf;
+ qbuf[0] = 0;
+ qbuf[1] = 0;
+ qbuf[2] = 0;
+ while (nqbuf && nq_packed < 32) {
+ nqbuf--;
+ nq_packed++;
+ *qbp++ = *rsp++;
+ if (rsp >= rsp_end)
+ rsp = rspq;
+ }
+ *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
+ V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
+ V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
+ }
+
+ /*
+ * Send this portion of the RRS table update to the firmware;
+ * bail out on any errors.
+ */
+ if (is_pf4(adapter))
+ ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd),
+ NULL);
+ else
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * t4_config_vi_rss - configure per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: RSS flags
+ * @defq: id of the default RSS queue for the VI.
+ *
+ * Configures VI-specific RSS properties.
+ */
+int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ unsigned int flags, unsigned int defq)
+{
+ struct fw_rss_vi_config_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
+ V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
+ if (is_pf4(adapter))
+ return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_read_config_vi_rss - read the configured per VI RSS settings
+ * @adapter: the adapter
+ * @mbox: mbox to use for the FW command
+ * @viid: the VI id
+ * @flags: where to place the configured flags
+ * @defq: where to place the id of the default RSS queue for the VI.
+ *
+ * Read configured VI-specific RSS properties.
+ */
+int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
+ u64 *flags, unsigned int *defq)
+{
+ struct fw_rss_vi_config_cmd c;
+ unsigned int result;
+ int ret;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c);
+ if (!ret) {
+ result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen);
+ if (defq)
+ *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result);
+ if (flags)
+ *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ;
+ }
+
+ return ret;
+}
+
+/**
+ * init_cong_ctrl - initialize congestion control parameters
+ * @a: the alpha values for congestion control
+ * @b: the beta values for congestion control
+ *
+ * Initialize the congestion control parameters.
+ */
+static void init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+ int i;
+
+ for (i = 0; i < 9; i++) {
+ a[i] = 1;
+ b[i] = 0;
+ }
+
+ a[9] = 2;
+ a[10] = 3;
+ a[11] = 4;
+ a[12] = 5;
+ a[13] = 6;
+ a[14] = 7;
+ a[15] = 8;
+ a[16] = 9;
+ a[17] = 10;
+ a[18] = 14;
+ a[19] = 17;
+ a[20] = 21;
+ a[21] = 25;
+ a[22] = 30;
+ a[23] = 35;
+ a[24] = 45;
+ a[25] = 60;
+ a[26] = 80;
+ a[27] = 100;
+ a[28] = 200;
+ a[29] = 300;
+ a[30] = 400;
+ a[31] = 500;
+
+ b[9] = 1;
+ b[10] = 1;
+ b[11] = 2;
+ b[12] = 2;
+ b[13] = 3;
+ b[14] = 3;
+ b[15] = 3;
+ b[16] = 3;
+ b[17] = 4;
+ b[18] = 4;
+ b[19] = 4;
+ b[20] = 4;
+ b[21] = 4;
+ b[22] = 5;
+ b[23] = 5;
+ b[24] = 5;
+ b[25] = 5;
+ b[26] = 5;
+ b[27] = 5;
+ b[28] = 6;
+ b[29] = 6;
+ b[30] = 7;
+ b[31] = 7;
+}
+
+#define INIT_CMD(var, cmd, rd_wr) do { \
+ (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
+ F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
+ (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
+} while (0)
+
+int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
+{
+ u32 cclk_param, cclk_val;
+ int ret;
+
+ /*
+ * Ask firmware for the Core Clock since it knows how to translate the
+ * Reference Clock ('V2') VPD field into a Core Clock value ...
+ */
+ cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
+ 1, &cclk_param, &cclk_val);
+ if (ret) {
+ dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ p->cclk = cclk_val;
+ dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
+ return 0;
+}
+
+/**
+ * t4_get_pfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a physical
+ * function. The results are stored in @adapter->pfres.
+ */
+int t4_get_pfres(struct adapter *adapter)
+{
+ struct pf_resources *pfres = &adapter->params.pfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ u32 word;
+ int v;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PFVF_CMD_PFN(adapter->pf) |
+ V_FW_PFVF_CMD_VFN(0));
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Extract PF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ pfres->neq = G_FW_PFVF_CMD_NEQ(word);
+ return 0;
+}
+
+/* serial flash and firmware constants and flash config file constants */
+enum {
+ SF_ATTEMPTS = 10, /* max retries for SF operations */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+};
+
+/**
+ * sf1_read - read data from the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t4_write_reg(adapter, A_SF_OP,
+ V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+ ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
+ if (!ret)
+ *valp = t4_read_reg(adapter, A_SF_DATA);
+ return ret;
+}
+
+/**
+ * sf1_write - write data to the serial flash
+ * @adapter: the adapter
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+ int lock, u32 val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
+ return -EBUSY;
+ t4_write_reg(adapter, A_SF_DATA, val);
+ t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
+ V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
+ return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
+}
+
+/**
+ * t4_read_flash - read words from serial flash
+ * @adapter: the adapter
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianness.
+ */
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+ unsigned int nwords, u32 *data, int byte_oriented)
+{
+ int ret;
+
+ if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
+ (addr & 3))
+ return -EINVAL;
+
+ addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
+
+ ret = sf1_write(adapter, 4, 1, 0, addr);
+ if (ret != 0)
+ return ret;
+
+ ret = sf1_read(adapter, 1, 1, 0, data);
+ if (ret != 0)
+ return ret;
+
+ for ( ; nwords; nwords--, data++) {
+ ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
+ if (nwords == 1)
+ t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = cpu_to_be32(*data);
+ }
+ return 0;
+}
+
+/**
+ * t4_get_exprom_version - return the Expansion ROM version (if any)
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the Expansion ROM header from FLASH and returns the version
+ * number (if present) through the @vers return value pointer. We return
+ * this in the Firmware Version Format since it's convenient. Return
+ * 0 on success, -ENOENT if no Expansion ROM is present.
+ */
+static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
+{
+ struct exprom_header {
+ unsigned char hdr_arr[16]; /* must start with 0x55aa */
+ unsigned char hdr_ver[4]; /* Expansion ROM version */
+ } *hdr;
+ u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
+ sizeof(u32))];
+ int ret;
+
+ ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
+ ARRAY_SIZE(exprom_header_buf),
+ exprom_header_buf, 0);
+ if (ret)
+ return ret;
+
+ hdr = (struct exprom_header *)exprom_header_buf;
+ if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
+ return -ENOENT;
+
+ *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
+ V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
+ V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
+ V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
+ return 0;
+}
+
+/**
+ * t4_get_fw_version - read the firmware version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+static int t4_get_fw_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FW_START +
+ offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
+}
+
+/**
+ * t4_get_bs_version - read the firmware bootstrap version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW Bootstrap version from flash.
+ */
+static int t4_get_bs_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/**
+ * t4_get_tp_version - read the TP microcode version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the TP microcode version from flash.
+ */
+static int t4_get_tp_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FW_START +
+ offsetof(struct fw_hdr, tp_microcode_ver),
+ 1, vers, 0);
+}
+
+/**
+ * t4_get_version_info - extract various chip/firmware version information
+ * @adapter: the adapter
+ *
+ * Reads various chip/firmware version numbers and stores them into the
+ * adapter Adapter Parameters structure. If any of the efforts fails
+ * the first failure will be returned, but all of the version numbers
+ * will be read.
+ */
+int t4_get_version_info(struct adapter *adapter)
+{
+ int ret = 0;
+
+#define FIRST_RET(__getvinfo) \
+ do { \
+ int __ret = __getvinfo; \
+ if (__ret && !ret) \
+ ret = __ret; \
+ } while (0)
+
+ FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
+ FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
+ FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
+ FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
+
+#undef FIRST_RET
+
+ return ret;
+}
+
+/**
+ * t4_dump_version_info - dump all of the adapter configuration IDs
+ * @adapter: the adapter
+ *
+ * Dumps all of the various bits of adapter configuration version/revision
+ * IDs information. This is typically called at some point after
+ * t4_get_version_info() has been called.
+ */
+void t4_dump_version_info(struct adapter *adapter)
+{
+ /**
+ * Device information.
+ */
+ dev_info(adapter, "Chelsio rev %d\n",
+ CHELSIO_CHIP_RELEASE(adapter->params.chip));
+
+ /**
+ * Firmware Version.
+ */
+ if (!adapter->params.fw_vers)
+ dev_warn(adapter, "No firmware loaded\n");
+ else
+ dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
+
+ /**
+ * Bootstrap Firmware Version.
+ */
+ if (!adapter->params.bs_vers)
+ dev_warn(adapter, "No bootstrap loaded\n");
+ else
+ dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
+
+ /**
+ * TP Microcode Version.
+ */
+ if (!adapter->params.tp_vers)
+ dev_warn(adapter, "No TP Microcode loaded\n");
+ else
+ dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
+
+ /**
+ * Expansion ROM version.
+ */
+ if (!adapter->params.er_vers)
+ dev_info(adapter, "No Expansion ROM loaded\n");
+ else
+ dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
+}
+
+#define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \
+ FW_PORT_CAP32_ANEG)
+/**
+ * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
+ * @caps16: a 16-bit Port Capabilities value
+ *
+ * Returns the equivalent 32-bit Port Capabilities value.
+ */
+fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
+{
+ fw_port_cap32_t caps32 = 0;
+
+#define CAP16_TO_CAP32(__cap) \
+ do { \
+ if (caps16 & FW_PORT_CAP_##__cap) \
+ caps32 |= FW_PORT_CAP32_##__cap; \
+ } while (0)
+
+ CAP16_TO_CAP32(SPEED_100M);
+ CAP16_TO_CAP32(SPEED_1G);
+ CAP16_TO_CAP32(SPEED_25G);
+ CAP16_TO_CAP32(SPEED_10G);
+ CAP16_TO_CAP32(SPEED_40G);
+ CAP16_TO_CAP32(SPEED_100G);
+ CAP16_TO_CAP32(FC_RX);
+ CAP16_TO_CAP32(FC_TX);
+ CAP16_TO_CAP32(ANEG);
+ CAP16_TO_CAP32(MDIX);
+ CAP16_TO_CAP32(MDIAUTO);
+ CAP16_TO_CAP32(FEC_RS);
+ CAP16_TO_CAP32(FEC_BASER_RS);
+ CAP16_TO_CAP32(802_3_PAUSE);
+ CAP16_TO_CAP32(802_3_ASM_DIR);
+
+#undef CAP16_TO_CAP32
+
+ return caps32;
+}
+
+/**
+ * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
+ * @caps32: a 32-bit Port Capabilities value
+ *
+ * Returns the equivalent 16-bit Port Capabilities value. Note that
+ * not all 32-bit Port Capabilities can be represented in the 16-bit
+ * Port Capabilities and some fields/values may not make it.
+ */
+static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
+{
+ fw_port_cap16_t caps16 = 0;
+
+#define CAP32_TO_CAP16(__cap) \
+ do { \
+ if (caps32 & FW_PORT_CAP32_##__cap) \
+ caps16 |= FW_PORT_CAP_##__cap; \
+ } while (0)
+
+ CAP32_TO_CAP16(SPEED_100M);
+ CAP32_TO_CAP16(SPEED_1G);
+ CAP32_TO_CAP16(SPEED_10G);
+ CAP32_TO_CAP16(SPEED_25G);
+ CAP32_TO_CAP16(SPEED_40G);
+ CAP32_TO_CAP16(SPEED_100G);
+ CAP32_TO_CAP16(FC_RX);
+ CAP32_TO_CAP16(FC_TX);
+ CAP32_TO_CAP16(802_3_PAUSE);
+ CAP32_TO_CAP16(802_3_ASM_DIR);
+ CAP32_TO_CAP16(ANEG);
+ CAP32_TO_CAP16(MDIX);
+ CAP32_TO_CAP16(MDIAUTO);
+ CAP32_TO_CAP16(FEC_RS);
+ CAP32_TO_CAP16(FEC_BASER_RS);
+
+#undef CAP32_TO_CAP16
+
+ return caps16;
+}
+
+/* Translate Firmware Pause specification to Common Code */
+static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
+{
+ enum cc_pause cc_pause = 0;
+
+ if (fw_pause & FW_PORT_CAP32_FC_RX)
+ cc_pause |= PAUSE_RX;
+ if (fw_pause & FW_PORT_CAP32_FC_TX)
+ cc_pause |= PAUSE_TX;
+
+ return cc_pause;
+}
+
+/* Translate Common Code Pause Frame specification into Firmware */
+static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
+{
+ fw_port_cap32_t fw_pause = 0;
+
+ if (cc_pause & PAUSE_RX)
+ fw_pause |= FW_PORT_CAP32_FC_RX;
+ if (cc_pause & PAUSE_TX)
+ fw_pause |= FW_PORT_CAP32_FC_TX;
+
+ return fw_pause;
+}
+
+/* Translate Firmware Forward Error Correction specification to Common Code */
+static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
+{
+ enum cc_fec cc_fec = 0;
+
+ if (fw_fec & FW_PORT_CAP32_FEC_RS)
+ cc_fec |= FEC_RS;
+ if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
+ cc_fec |= FEC_BASER_RS;
+
+ return cc_fec;
+}
+
+/* Translate Common Code Forward Error Correction specification to Firmware */
+static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
+{
+ fw_port_cap32_t fw_fec = 0;
+
+ if (cc_fec & FEC_RS)
+ fw_fec |= FW_PORT_CAP32_FEC_RS;
+ if (cc_fec & FEC_BASER_RS)
+ fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
+
+ return fw_fec;
+}
+
+/**
+ * t4_link_l1cfg - apply link configuration to MAC/PHY
+ * @adapter: the adapter
+ * @mbox: the Firmware Mailbox to use
+ * @port: the Port ID
+ * @lc: the Port's Link Configuration
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
+ struct link_config *lc)
+{
+ unsigned int fw_mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
+ unsigned int fw_caps = adap->params.fw_caps_support;
+ fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
+ struct fw_port_cmd cmd;
+
+ lc->link_ok = 0;
+
+ fw_fc = cc_to_fwcap_pause(lc->requested_fc);
+
+ /* Convert Common Code Forward Error Control settings into the
+ * Firmware's API. If the current Requested FEC has "Automatic"
+ * (IEEE 802.3) specified, then we use whatever the Firmware
+ * sent us as part of it's IEEE 802.3-based interpratation of
+ * the Transceiver Module EPROM FEC parameters. Otherwise we
+ * use whatever is in the current Requested FEC settings.
+ */
+ if (lc->requested_fec & FEC_AUTO)
+ cc_fec = lc->auto_fec;
+ else
+ cc_fec = lc->requested_fec;
+ fw_fec = cc_to_fwcap_fec(cc_fec);
+
+ /* Figure out what our Requested Port Capabilities are going to be.
+ */
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
+ rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
+ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+ lc->fec = cc_fec;
+ } else if (lc->autoneg == AUTONEG_DISABLE) {
+ rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi;
+ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+ lc->fec = cc_fec;
+ } else {
+ rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
+ }
+
+ /* And send that on to the Firmware ...
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_PORT_CMD_PORTID(port));
+ cmd.action_to_len16 =
+ cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
+ FW_PORT_ACTION_L1_CFG :
+ FW_PORT_ACTION_L1_CFG32) |
+ FW_LEN16(cmd));
+
+ if (fw_caps == FW_CAPS16)
+ cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
+ else
+ cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
+
+ return t4_wr_mbox(adap, mbox, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4_flash_cfg_addr - return the address of the flash configuration file
+ * @adapter: the adapter
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored, or an error if the device FLASH is too small to contain
+ * a Firmware Configuration File.
+ */
+int t4_flash_cfg_addr(struct adapter *adapter)
+{
+ /*
+ * If the device FLASH isn't large enough to hold a Firmware
+ * Configuration File, return an error.
+ */
+ if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
+ return -ENOSPC;
+
+ return FLASH_CFG_START;
+}
+
+#define PF_INTR_MASK (F_PFSW | F_PFCIM)
+
+/**
+ * t4_intr_enable - enable interrupts
+ * @adapter: the adapter whose interrupts should be enabled
+ *
+ * Enable PF-specific interrupts for the calling function and the top-level
+ * interrupt concentrator for global interrupts. Interrupts are already
+ * enabled at each module, here we just enable the roots of the interrupt
+ * hierarchies.
+ *
+ * Note: this function should be called only when the driver manages
+ * non PF-specific interrupts from the various HW modules. Only one PCI
+ * function at a time should be doing this.
+ */
+void t4_intr_enable(struct adapter *adapter)
+{
+ u32 val = 0;
+ u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
+ t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
+ F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
+ F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
+ F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
+ F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
+ F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
+ F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
+ t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
+ t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
+}
+
+/**
+ * t4_intr_disable - disable interrupts
+ * @adapter: the adapter whose interrupts should be disabled
+ *
+ * Disable interrupts. We only disable the top-level interrupt
+ * concentrators. The caller must be a PCI function managing global
+ * interrupts.
+ */
+void t4_intr_disable(struct adapter *adapter)
+{
+ u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
+
+ t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
+ t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
+}
+
+/**
+ * t4_get_port_type_description - return Port Type string description
+ * @port_type: firmware Port Type enumeration
+ */
+const char *t4_get_port_type_description(enum fw_port_type port_type)
+{
+ static const char * const port_type_description[] = {
+ "Fiber_XFI",
+ "Fiber_XAUI",
+ "BT_SGMII",
+ "BT_XFI",
+ "BT_XAUI",
+ "KX4",
+ "CX4",
+ "KX",
+ "KR",
+ "SFP",
+ "BP_AP",
+ "BP4_AP",
+ "QSFP_10G",
+ "QSA",
+ "QSFP",
+ "BP40_BA",
+ "KR4_100G",
+ "CR4_QSFP",
+ "CR_QSFP",
+ "CR2_QSFP",
+ "SFP28",
+ "KR_SFP28",
+ };
+
+ if (port_type < ARRAY_SIZE(port_type_description))
+ return port_type_description[port_type];
+ return "UNKNOWN";
+}
+
+/**
+ * t4_get_mps_bg_map - return the buffer groups associated with a port
+ * @adap: the adapter
+ * @pidx: the port index
+ *
+ * Returns a bitmap indicating which MPS buffer groups are associated
+ * with the given port. Bit i is set if buffer group i is used by the
+ * port.
+ */
+unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
+ unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap,
+ A_MPS_CMN_CTL));
+
+ if (pidx >= nports) {
+ dev_warn(adap, "MPS Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << (2 * pidx);
+ }
+ break;
+ }
+
+ dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+ return 0;
+}
+
+/**
+ * t4_get_tp_ch_map - return TP ingress channels associated with a port
+ * @adapter: the adapter
+ * @pidx: the port index
+ *
+ * Returns a bitmap indicating which TP Ingress Channels are associated with
+ * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
+ */
+unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx)
+{
+ unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter,
+ A_MPS_CMN_CTL));
+
+ if (pidx >= nports) {
+ dev_warn(adap, "TP Port Index %d >= Nports %d\n",
+ pidx, nports);
+ return 0;
+ }
+
+ switch (chip_version) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ /* Note that this happens to be the same values as the MPS
+ * Buffer Group Map for these Chips. But we replicate the code
+ * here because they're really separate concepts.
+ */
+ switch (nports) {
+ case 1: return 0xf;
+ case 2: return 3 << (2 * pidx);
+ case 4: return 1 << pidx;
+ }
+ break;
+
+ case CHELSIO_T6:
+ switch (nports) {
+ case 2: return 1 << pidx;
+ }
+ break;
+ }
+
+ dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
+ chip_version, nports);
+ return 0;
+}
+
+/**
+ * t4_get_port_stats - collect port statistics
+ * @adap: the adapter
+ * @idx: the port index
+ * @p: the stats structure to fill
+ *
+ * Collect statistics related to the given port from HW.
+ */
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
+{
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
+ u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
+
+#define GET_STAT(name) \
+ t4_read_reg64(adap, \
+ (is_t4(adap->params.chip) ? \
+ PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
+ T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
+#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+
+ p->tx_octets = GET_STAT(TX_PORT_BYTES);
+ p->tx_frames = GET_STAT(TX_PORT_FRAMES);
+ p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
+ p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
+ p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
+ p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
+ p->tx_frames_64 = GET_STAT(TX_PORT_64B);
+ p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
+ p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
+ p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
+ p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
+ p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
+ p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
+ p->tx_drop = GET_STAT(TX_PORT_DROP);
+ p->tx_pause = GET_STAT(TX_PORT_PAUSE);
+ p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
+ p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
+ p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
+ p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
+ p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
+ p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
+ p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
+ p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & F_COUNTPAUSESTATTX) {
+ p->tx_frames -= p->tx_pause;
+ p->tx_octets -= p->tx_pause * 64;
+ }
+ if (stat_ctl & F_COUNTPAUSEMCTX)
+ p->tx_mcast_frames -= p->tx_pause;
+ }
+
+ p->rx_octets = GET_STAT(RX_PORT_BYTES);
+ p->rx_frames = GET_STAT(RX_PORT_FRAMES);
+ p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
+ p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
+ p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
+ p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
+ p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
+ p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
+ p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
+ p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
+ p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
+ p->rx_frames_64 = GET_STAT(RX_PORT_64B);
+ p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
+ p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
+ p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
+ p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
+ p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
+ p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
+ p->rx_pause = GET_STAT(RX_PORT_PAUSE);
+ p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
+ p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
+ p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
+ p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
+ p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
+ p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
+ p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
+ p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & F_COUNTPAUSESTATRX) {
+ p->rx_frames -= p->rx_pause;
+ p->rx_octets -= p->rx_pause * 64;
+ }
+ if (stat_ctl & F_COUNTPAUSEMCRX)
+ p->rx_mcast_frames -= p->rx_pause;
+ }
+
+ p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
+ p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
+ p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
+ p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/**
+ * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
+ * @adap: The adapter
+ * @idx: The port
+ * @stats: Current stats to fill
+ * @offset: Previous stats snapshot
+ */
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset)
+{
+ u64 *s, *o;
+ unsigned int i;
+
+ t4_get_port_stats(adap, idx, stats);
+ for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
+ i < (sizeof(struct port_stats) / sizeof(u64));
+ i++, s++, o++)
+ *s -= *o;
+}
+
+/**
+ * t4_clr_port_stats - clear port statistics
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Clear HW statistics for the given port.
+ */
+void t4_clr_port_stats(struct adapter *adap, int idx)
+{
+ unsigned int i;
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
+ u32 port_base_addr;
+
+ if (is_t4(adap->params.chip))
+ port_base_addr = PORT_BASE(idx);
+ else
+ port_base_addr = T5_PORT_BASE(idx);
+
+ for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ for (i = 0; i < 4; i++)
+ if (bgmap & (1 << i)) {
+ t4_write_reg(adap,
+ A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
+ i * 8, 0);
+ t4_write_reg(adap,
+ A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
+ i * 8, 0);
+ }
+}
+
+/**
+ * t4_fw_hello - establish communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @evt_mbox: mailbox to receive async FW events
+ * @master: specifies the caller's willingness to be the device master
+ * @state: returns the current device state (if non-NULL)
+ *
+ * Issues a command to establish communication with FW. Returns either
+ * an error (negative integer) or the mailbox of the Master PF.
+ */
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+ enum dev_master master, enum dev_state *state)
+{
+ int ret;
+ struct fw_hello_cmd c;
+ u32 v;
+ unsigned int master_mbox;
+ int retries = FW_CMD_HELLO_RETRIES;
+
+retry:
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, HELLO, WRITE);
+ c.err_to_clearinit = cpu_to_be32(
+ V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
+ V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
+ V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
+ M_FW_HELLO_CMD_MBMASTER) |
+ V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
+ V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
+ F_FW_HELLO_CMD_CLEARINIT);
+
+ /*
+ * Issue the HELLO command to the firmware. If it's not successful
+ * but indicates that we got a "busy" or "timeout" condition, retry
+ * the HELLO until we exhaust our retry limit. If we do exceed our
+ * retry limit, check to see if the firmware left us any error
+ * information and report that if so ...
+ */
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret != FW_SUCCESS) {
+ if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
+ goto retry;
+ if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
+ t4_report_fw_error(adap);
+ return ret;
+ }
+
+ v = be32_to_cpu(c.err_to_clearinit);
+ master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
+ if (state) {
+ if (v & F_FW_HELLO_CMD_ERR)
+ *state = DEV_STATE_ERR;
+ else if (v & F_FW_HELLO_CMD_INIT)
+ *state = DEV_STATE_INIT;
+ else
+ *state = DEV_STATE_UNINIT;
+ }
+
+ /*
+ * If we're not the Master PF then we need to wait around for the
+ * Master PF Driver to finish setting up the adapter.
+ *
+ * Note that we also do this wait if we're a non-Master-capable PF and
+ * there is no current Master PF; a Master PF may show up momentarily
+ * and we wouldn't want to fail pointlessly. (This can happen when an
+ * OS loads lots of different drivers rapidly at the same time). In
+ * this case, the Master PF returned by the firmware will be
+ * M_PCIE_FW_MASTER so the test below will work ...
+ */
+ if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
+ master_mbox != mbox) {
+ int waiting = FW_CMD_HELLO_TIMEOUT;
+
+ /*
+ * Wait for the firmware to either indicate an error or
+ * initialized state. If we see either of these we bail out
+ * and report the issue to the caller. If we exhaust the
+ * "hello timeout" and we haven't exhausted our retries, try
+ * again. Otherwise bail with a timeout error.
+ */
+ for (;;) {
+ u32 pcie_fw;
+
+ msleep(50);
+ waiting -= 50;
+
+ /*
+ * If neither Error nor Initialialized are indicated
+ * by the firmware keep waiting till we exaust our
+ * timeout ... and then retry if we haven't exhausted
+ * our retries ...
+ */
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
+ if (waiting <= 0) {
+ if (retries-- > 0)
+ goto retry;
+
+ return -ETIMEDOUT;
+ }
+ continue;
+ }
+
+ /*
+ * We either have an Error or Initialized condition
+ * report errors preferentially.
+ */
+ if (state) {
+ if (pcie_fw & F_PCIE_FW_ERR)
+ *state = DEV_STATE_ERR;
+ else if (pcie_fw & F_PCIE_FW_INIT)
+ *state = DEV_STATE_INIT;
+ }
+
+ /*
+ * If we arrived before a Master PF was selected and
+ * there's not a valid Master PF, grab its identity
+ * for our caller.
+ */
+ if (master_mbox == M_PCIE_FW_MASTER &&
+ (pcie_fw & F_PCIE_FW_MASTER_VLD))
+ master_mbox = G_PCIE_FW_MASTER(pcie_fw);
+ break;
+ }
+ }
+
+ return master_mbox;
+}
+
+/**
+ * t4_fw_bye - end communication with FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to terminate communication with FW.
+ */
+int t4_fw_bye(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_bye_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, BYE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_fw_reset - issue a reset to FW
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @reset: specifies the type of reset to perform
+ *
+ * Issues a reset command of the specified type to FW.
+ */
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
+{
+ struct fw_reset_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, RESET, WRITE);
+ c.val = cpu_to_be32(reset);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @force: force uP into RESET even if FW RESET command fails
+ *
+ * Issues a RESET command to firmware (if desired) with a HALT indication
+ * and then puts the microprocessor into RESET state. The RESET command
+ * will only be issued if a legitimate mailbox is provided (mbox <=
+ * M_PCIE_FW_MASTER).
+ *
+ * This is generally used in order for the host to safely manipulate the
+ * adapter without fear of conflicting with whatever the firmware might
+ * be doing. The only way out of this state is to RESTART the firmware
+ * ...
+ */
+int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
+{
+ int ret = 0;
+
+ /*
+ * If a legitimate mailbox is provided, issue a RESET command
+ * with a HALT indication.
+ */
+ if (mbox <= M_PCIE_FW_MASTER) {
+ struct fw_reset_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, RESET, WRITE);
+ c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
+ c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ }
+
+ /*
+ * Normally we won't complete the operation if the firmware RESET
+ * command fails but if our caller insists we'll go ahead and put the
+ * uP into RESET. This can be useful if the firmware is hung or even
+ * missing ... We'll have to take the risk of putting the uP into
+ * RESET without the cooperation of firmware in that case.
+ *
+ * We also force the firmware's HALT flag to be on in case we bypassed
+ * the firmware RESET command above or we're dealing with old firmware
+ * which doesn't have the HALT capability. This will serve as a flag
+ * for the incoming firmware to know that it's coming out of a HALT
+ * rather than a RESET ... if it's new enough to understand that ...
+ */
+ if (ret == 0 || force) {
+ t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
+ t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
+ F_PCIE_FW_HALT);
+ }
+
+ /*
+ * And we always return the result of the firmware RESET command
+ * even when we force the uP into RESET ...
+ */
+ return ret;
+}
+
+/**
+ * t4_fw_restart - restart the firmware by taking the uP out of RESET
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @reset: if we want to do a RESET to restart things
+ *
+ * Restart firmware previously halted by t4_fw_halt(). On successful
+ * return the previous PF Master remains as the new PF Master and there
+ * is no need to issue a new HELLO command, etc.
+ *
+ * We do this in two ways:
+ *
+ * 1. If we're dealing with newer firmware we'll simply want to take
+ * the chip's microprocessor out of RESET. This will cause the
+ * firmware to start up from its start vector. And then we'll loop
+ * until the firmware indicates it's started again (PCIE_FW.HALT
+ * reset to 0) or we timeout.
+ *
+ * 2. If we're dealing with older firmware then we'll need to RESET
+ * the chip since older firmware won't recognize the PCIE_FW.HALT
+ * flag and automatically RESET itself on startup.
+ */
+int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
+{
+ if (reset) {
+ /*
+ * Since we're directing the RESET instead of the firmware
+ * doing it automatically, we need to clear the PCIE_FW.HALT
+ * bit.
+ */
+ t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
+
+ /*
+ * If we've been given a valid mailbox, first try to get the
+ * firmware to do the RESET. If that works, great and we can
+ * return success. Otherwise, if we haven't been given a
+ * valid mailbox or the RESET command failed, fall back to
+ * hitting the chip with a hammer.
+ */
+ if (mbox <= M_PCIE_FW_MASTER) {
+ t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
+ msleep(100);
+ if (t4_fw_reset(adap, mbox,
+ F_PIORST | F_PIORSTMODE) == 0)
+ return 0;
+ }
+
+ t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
+ msleep(2000);
+ } else {
+ int ms;
+
+ t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
+ for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
+ if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
+ return FW_SUCCESS;
+ msleep(100);
+ ms += 100;
+ }
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/**
+ * t4_fl_pkt_align - return the fl packet alignment
+ * @adap: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ */
+int t4_fl_pkt_align(struct adapter *adap)
+{
+ u32 sge_control, sge_control2;
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ sge_control = t4_read_reg(adap, A_SGE_CONTROL);
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ ingpad_shift = X_INGPADBOUNDARY_SHIFT;
+ else
+ ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
+
+ ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adap->params.chip)) {
+ sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
+ ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
+ if (ingpackboundary == X_INGPACKBOUNDARY_16B)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ X_INGPACKBOUNDARY_SHIFT);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+/**
+ * t4_fixup_host_params_compat - fix up host-dependent parameters
+ * @adap: the adapter
+ * @page_size: the host's Base Page Size
+ * @cache_line_size: the host's Cache Line Size
+ * @chip_compat: maintain compatibility with designated chip
+ *
+ * Various registers in the chip contain values which are dependent on the
+ * host's Base Page and Cache Line Sizes. This function will fix all of
+ * those registers with the appropriate values as passed in ...
+ *
+ * @chip_compat is used to limit the set of changes that are made
+ * to be compatible with the indicated chip release. This is used by
+ * drivers to maintain compatibility with chip register settings when
+ * the drivers haven't [yet] been updated with new chip support.
+ */
+int t4_fixup_host_params_compat(struct adapter *adap,
+ unsigned int page_size,
+ unsigned int cache_line_size,
+ enum chip_type chip_compat)
+{
+ unsigned int page_shift = cxgbe_fls(page_size) - 1;
+ unsigned int sge_hps = page_shift - 10;
+ unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
+ unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
+ unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
+
+ t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
+ V_HOSTPAGESIZEPF0(sge_hps) |
+ V_HOSTPAGESIZEPF1(sge_hps) |
+ V_HOSTPAGESIZEPF2(sge_hps) |
+ V_HOSTPAGESIZEPF3(sge_hps) |
+ V_HOSTPAGESIZEPF4(sge_hps) |
+ V_HOSTPAGESIZEPF5(sge_hps) |
+ V_HOSTPAGESIZEPF6(sge_hps) |
+ V_HOSTPAGESIZEPF7(sge_hps));
+
+ if (is_t4(adap->params.chip) || is_t4(chip_compat))
+ t4_set_reg_field(adap, A_SGE_CONTROL,
+ V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
+ F_EGRSTATUSPAGESIZE,
+ V_INGPADBOUNDARY(fl_align_log -
+ X_INGPADBOUNDARY_SHIFT) |
+ V_EGRSTATUSPAGESIZE(stat_len != 64));
+ else {
+ unsigned int pack_align;
+ unsigned int ingpad, ingpack;
+ unsigned int pcie_cap;
+
+ /*
+ * T5 introduced the separation of the Free List Padding and
+ * Packing Boundaries. Thus, we can select a smaller Padding
+ * Boundary to avoid uselessly chewing up PCIe Link and Memory
+ * Bandwidth, and use a Packing Boundary which is large enough
+ * to avoid false sharing between CPUs, etc.
+ *
+ * For the PCI Link, the smaller the Padding Boundary the
+ * better. For the Memory Controller, a smaller Padding
+ * Boundary is better until we cross under the Memory Line
+ * Size (the minimum unit of transfer to/from Memory). If we
+ * have a Padding Boundary which is smaller than the Memory
+ * Line Size, that'll involve a Read-Modify-Write cycle on the
+ * Memory Controller which is never good.
+ */
+
+ /* We want the Packing Boundary to be based on the Cache Line
+ * Size in order to help avoid False Sharing performance
+ * issues between CPUs, etc. We also want the Packing
+ * Boundary to incorporate the PCI-E Maximum Payload Size. We
+ * get best performance when the Packing Boundary is a
+ * multiple of the Maximum Payload Size.
+ */
+ pack_align = fl_align;
+ pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ unsigned int mps, mps_log;
+ u16 devctl;
+
+ /* The PCIe Device Control Maximum Payload Size field
+ * [bits 7:5] encodes sizes as powers of 2 starting at
+ * 128 bytes.
+ */
+ t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
+ &devctl);
+ mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
+ mps = 1 << mps_log;
+ if (mps > pack_align)
+ pack_align = mps;
+ }
+
+ /*
+ * N.B. T5 has a different interpretation of the "0" value for
+ * the Packing Boundary. This corresponds to 16 bytes instead
+ * of the expected 32 bytes. We never have a Packing Boundary
+ * less than 32 bytes so we can't use that special value but
+ * on the other hand, if we wanted 32 bytes, the best we can
+ * really do is 64 bytes ...
+ */
+ if (pack_align <= 16) {
+ ingpack = X_INGPACKBOUNDARY_16B;
+ fl_align = 16;
+ } else if (pack_align == 32) {
+ ingpack = X_INGPACKBOUNDARY_64B;
+ fl_align = 64;
+ } else {
+ unsigned int pack_align_log = cxgbe_fls(pack_align) - 1;
+
+ ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
+ fl_align = pack_align;
+ }
+
+ /* Use the smallest Ingress Padding which isn't smaller than
+ * the Memory Controller Read/Write Size. We'll take that as
+ * being 8 bytes since we don't know of any system with a
+ * wider Memory Controller Bus Width.
+ */
+ if (is_t5(adap->params.chip))
+ ingpad = X_INGPADBOUNDARY_32B;
+ else
+ ingpad = X_T6_INGPADBOUNDARY_8B;
+ t4_set_reg_field(adap, A_SGE_CONTROL,
+ V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
+ F_EGRSTATUSPAGESIZE,
+ V_INGPADBOUNDARY(ingpad) |
+ V_EGRSTATUSPAGESIZE(stat_len != 64));
+ t4_set_reg_field(adap, A_SGE_CONTROL2,
+ V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
+ V_INGPACKBOUNDARY(ingpack));
+ }
+
+ /*
+ * Adjust various SGE Free List Host Buffer Sizes.
+ *
+ * The first four entries are:
+ *
+ * 0: Host Page Size
+ * 1: 64KB
+ * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
+ * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
+ *
+ * For the single-MTU buffers in unpacked mode we need to include
+ * space for the SGE Control Packet Shift, 14 byte Ethernet header,
+ * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
+ * Padding boundary. All of these are accommodated in the Factory
+ * Default Firmware Configuration File but we need to adjust it for
+ * this host's cache line size.
+ */
+ t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
+ t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
+ (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
+ & ~(fl_align - 1));
+ t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
+ (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
+ & ~(fl_align - 1));
+
+ t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
+
+ return 0;
+}
+
+/**
+ * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
+ * @adap: the adapter
+ * @page_size: the host's Base Page Size
+ * @cache_line_size: the host's Cache Line Size
+ *
+ * Various registers in T4 contain values which are dependent on the
+ * host's Base Page and Cache Line Sizes. This function will fix all of
+ * those registers with the appropriate values as passed in ...
+ *
+ * This routine makes changes which are compatible with T4 chips.
+ */
+int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
+ unsigned int cache_line_size)
+{
+ return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
+ T4_LAST_REV);
+}
+
+/**
+ * t4_fw_initialize - ask FW to initialize the device
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ *
+ * Issues a command to FW to partially initialize the device. This
+ * performs initialization that generally doesn't depend on user input.
+ */
+int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
+{
+ struct fw_initialize_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ INIT_CMD(c, INITIALIZE, WRITE);
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_query_params_rw - query FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ * @rw: Write and read flag
+ *
+ * Reads the value of FW or device parameters. Up to 7 parameters can be
+ * queried at once.
+ */
+static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ u32 *val, int rw)
+{
+ unsigned int i;
+ int ret;
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_PARAMS_CMD_PFN(pf) |
+ V_FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+ for (i = 0; i < nparams; i++) {
+ *p++ = cpu_to_be32(*params++);
+ if (rw)
+ *p = cpu_to_be32(*(val + i));
+ p++;
+ }
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret == 0)
+ for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
+ *val++ = be32_to_cpu(*p);
+ return ret;
+}
+
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ u32 *val)
+{
+ return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
+}
+
+/**
+ * t4_set_params_timeout - sets FW or device parameters
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF
+ * @vf: the VF
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @val: the parameter values
+ * @timeout: the timeout time
+ *
+ * Sets the value of FW or device parameters. Up to 7 parameters can be
+ * specified at once.
+ */
+int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
+ unsigned int pf, unsigned int vf,
+ unsigned int nparams, const u32 *params,
+ const u32 *val, int timeout)
+{
+ struct fw_params_cmd c;
+ __be32 *p = &c.param[0].mnem;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_PARAMS_CMD_PFN(pf) |
+ V_FW_PARAMS_CMD_VFN(vf));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+ while (nparams--) {
+ *p++ = cpu_to_be32(*params++);
+ *p++ = cpu_to_be32(*val++);
+ }
+
+ return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
+}
+
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int nparams, const u32 *params,
+ const u32 *val)
+{
+ return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
+ FW_CMD_MAX_TIMEOUT);
+}
+
+/**
+ * t4_alloc_vi_func - allocate a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @nmac: number of MAC addresses needed (1 to 5)
+ * @mac: the MAC addresses of the VI
+ * @rss_size: size of RSS table slice associated with this VI
+ * @portfunc: which Port Application Function MAC Address is desired
+ * @idstype: Intrusion Detection Type
+ *
+ * Allocates a virtual interface for the given physical port. If @mac is
+ * not %NULL it contains the MAC addresses of the VI as assigned by FW.
+ * @mac should be large enough to hold @nmac Ethernet addresses, they are
+ * stored consecutively so the space needed is @nmac * 6 bytes.
+ * Returns a negative error number or the non-negative VI id.
+ */
+int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
+ unsigned int port, unsigned int pf, unsigned int vf,
+ unsigned int nmac, u8 *mac, unsigned int *rss_size,
+ unsigned int portfunc, unsigned int idstype,
+ u8 *vivld, u8 *vin)
+{
+ int ret;
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
+ c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
+ V_FW_VI_CMD_FUNC(portfunc));
+ c.portid_pkd = V_FW_VI_CMD_PORTID(port);
+ c.nmac = nmac - 1;
+
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ if (mac) {
+ memcpy(mac, c.mac, sizeof(c.mac));
+ switch (nmac) {
+ case 5:
+ memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
+ /* FALLTHROUGH */
+ case 4:
+ memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
+ /* FALLTHROUGH */
+ case 3:
+ memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
+ /* FALLTHROUGH */
+ case 2:
+ memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
+ /* FALLTHROUGH */
+ }
+ }
+ if (rss_size)
+ *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
+ if (vivld)
+ *vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16));
+ if (vin)
+ *vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16));
+ return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
+}
+
+/**
+ * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @nmac: number of MAC addresses needed (1 to 5)
+ * @mac: the MAC addresses of the VI
+ * @rss_size: size of RSS table slice associated with this VI
+ *
+ * Backwards compatible and convieniance routine to allocate a Virtual
+ * Interface with a Ethernet Port Application Function and Intrustion
+ * Detection System disabled.
+ */
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+ unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+ unsigned int *rss_size, u8 *vivld, u8 *vin)
+{
+ return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
+ FW_VI_FUNC_ETH, 0, vivld, vin);
+}
+
+/**
+ * t4_free_vi - free a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @viid: virtual interface identifiler
+ *
+ * Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int viid)
+{
+ struct fw_vi_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) |
+ V_FW_VI_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
+ c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
+
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_set_rxmode - set Rx properties of a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @mtu: the new MTU or -1
+ * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
+ * -1 no change
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sets Rx properties of a virtual interface.
+ */
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int mtu, int promisc, int all_multi, int bcast, int vlanex,
+ bool sleep_ok)
+{
+ struct fw_vi_rxmode_cmd c;
+
+ /* convert to FW values */
+ if (mtu < 0)
+ mtu = M_FW_VI_RXMODE_CMD_MTU;
+ if (promisc < 0)
+ promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
+ if (all_multi < 0)
+ all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
+ if (bcast < 0)
+ bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
+ if (vlanex < 0)
+ vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_VI_RXMODE_CMD_VIID(viid));
+ c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+ c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
+ V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+ V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+ V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
+ V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
+ if (is_pf4(adap))
+ return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL,
+ sleep_ok);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_alloc_raw_mac_filt - Adds a raw mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @mac: the MAC address
+ * @mask: the mask
+ * @idx: index at which to add this entry
+ * @port_id: the port index
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Adds the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ int ret = 0;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_VI_MAC_CMD_VIID(viid));
+ val = V_FW_CMD_LEN16(1) |
+ V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(val);
+
+ /* Specify that this is an inner mac address */
+ p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
+ V_DATAPORTNUM(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
+ V_DATAPORTNUM(M_DATAPORTNUM));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
+
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret == 0) {
+ ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
+ if (ret != (int)idx)
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+/**
+ * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @addr: the MAC address
+ * @mask: the mask
+ * @idx: index of the entry in mps tcam
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @port_id: the port index
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Removes the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number on failure.
+ */
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 raw;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_CMD_EXEC(0) |
+ V_FW_VI_MAC_CMD_VIID(viid));
+ raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0U) |
+ raw |
+ V_FW_CMD_LEN16(1));
+
+ p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
+ FW_VI_MAC_ID_BASED_FREE);
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
+ V_DATAPORTNUM(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
+ V_DATAPORTNUM(M_DATAPORTNUM));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
+
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+}
+
+/**
+ * t4_change_mac - modifies the exact-match filter for a MAC address
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @idx: index of existing filter for old value of MAC address, or -1
+ * @addr: the new MAC address value
+ * @persist: whether a new MAC allocation should be persistent
+ * @add_smt: if true also add the address to the HW SMT
+ *
+ * Modifies an exact-match filter and sets it to the new MAC address if
+ * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
+ * latter case the address is added persistently if @persist is %true.
+ *
+ * Note that in general it is not possible to modify the value of a given
+ * filter so the generic way to modify an address filter is to free the one
+ * being used by the old address value and allocate a new filter for the
+ * new address value.
+ *
+ * Returns a negative error number or the index of the filter with the new
+ * MAC value. Note that this index may differ from @idx.
+ */
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ int idx, const u8 *addr, bool persist, bool add_smt)
+{
+ int ret, mode;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_exact *p = c.u.exact;
+ int max_mac_addr = adap->params.arch.mps_tcam_size;
+
+ if (idx < 0) /* new allocation */
+ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+ mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
+ V_FW_VI_MAC_CMD_VIID(viid));
+ c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
+ p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
+ V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
+ V_FW_VI_MAC_CMD_IDX(idx));
+ memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
+ if (ret == 0) {
+ ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
+ if (ret >= max_mac_addr)
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * t4_enable_vi_params - enable/disable a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ * @dcb_en: 1=enable delivery of Data Center Bridging messages.
+ *
+ * Enables/disables a virtual interface. Note that setting DCB Enable
+ * only makes sense when enabling a Virtual Interface ...
+ */
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+ unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
+{
+ struct fw_vi_enable_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_VI_ENABLE_CMD_VIID(viid));
+ c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
+ V_FW_VI_ENABLE_CMD_EEN(tx_en) |
+ V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
+ FW_LEN16(c));
+ if (is_pf4(adap))
+ return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_enable_vi - enable/disable a virtual interface
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @viid: the VI id
+ * @rx_en: 1=enable Rx, 0=disable Rx
+ * @tx_en: 1=enable Tx, 0=disable Tx
+ *
+ * Enables/disables a virtual interface. Note that setting DCB Enable
+ * only makes sense when enabling a Virtual Interface ...
+ */
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+ bool rx_en, bool tx_en)
+{
+ return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
+}
+
+/**
+ * t4_iq_start_stop - enable/disable an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @start: %true to enable the queues, %false to disable them
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Starts or stops an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+ unsigned int pf, unsigned int vf, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC);
+ c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
+ V_FW_IQ_CMD_IQSTOP(!start) |
+ FW_LEN16(c));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
+ if (is_pf4(adap)) {
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ } else {
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+ }
+}
+
+/**
+ * t4_iq_free - free an ingress queue and its FLs
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queues
+ * @vf: the VF owning the queues
+ * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
+ * @iqid: ingress queue id
+ * @fl0id: FL0 queue id or 0xffff if no attached FL0
+ * @fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ * Frees an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int iqtype, unsigned int iqid,
+ unsigned int fl0id, unsigned int fl1id)
+{
+ struct fw_iq_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
+ c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
+ c.iqid = cpu_to_be16(iqid);
+ c.fl0id = cpu_to_be16(fl0id);
+ c.fl1id = cpu_to_be16(fl1id);
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_eth_eq_free - free an Ethernet egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees an Ethernet egress queue.
+ */
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_eth_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC);
+ if (is_pf4(adap))
+ c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
+ V_FW_IQ_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
+ c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
+ if (is_pf4(adap))
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+ else
+ return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_link_down_rc_str - return a string for a Link Down Reason Code
+ * @link_down_rc: Link Down Reason Code
+ *
+ * Returns a string representation of the Link Down Reason Code.
+ */
+static const char *t4_link_down_rc_str(unsigned char link_down_rc)
+{
+ static const char * const reason[] = {
+ "Link Down",
+ "Remote Fault",
+ "Auto-negotiation Failure",
+ "Reserved",
+ "Insufficient Airflow",
+ "Unable To Determine Reason",
+ "No RX Signal Detected",
+ "Reserved",
+ };
+
+ if (link_down_rc >= ARRAY_SIZE(reason))
+ return "Bad Reason Code";
+
+ return reason[link_down_rc];
+}
+
+/* Return the highest speed set in the port capabilities, in Mb/s. */
+static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
+{
+#define TEST_SPEED_RETURN(__caps_speed, __speed) \
+ do { \
+ if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
+ return __speed; \
+ } while (0)
+
+ TEST_SPEED_RETURN(100G, 100000);
+ TEST_SPEED_RETURN(50G, 50000);
+ TEST_SPEED_RETURN(40G, 40000);
+ TEST_SPEED_RETURN(25G, 25000);
+ TEST_SPEED_RETURN(10G, 10000);
+ TEST_SPEED_RETURN(1G, 1000);
+ TEST_SPEED_RETURN(100M, 100);
+
+#undef TEST_SPEED_RETURN
+
+ return 0;
+}
+
+/**
+ * t4_handle_get_port_info - process a FW reply message
+ * @pi: the port info
+ * @rpl: start of the FW message
+ *
+ * Processes a GET_PORT_INFO FW reply message.
+ */
+static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+{
+ const struct fw_port_cmd *cmd = (const void *)rpl;
+ int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
+ fw_port_cap32_t pcaps, acaps, linkattr;
+ struct link_config *lc = &pi->link_cfg;
+ struct adapter *adapter = pi->adapter;
+ enum fw_port_module_type mod_type;
+ enum fw_port_type port_type;
+ unsigned int speed, fc, fec;
+ int link_ok, linkdnrc;
+
+ /* Extract the various fields from the Port Information message.
+ */
+ switch (action) {
+ case FW_PORT_ACTION_GET_PORT_INFO: {
+ u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
+
+ link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
+ linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
+ pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
+ acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
+
+ /* Unfortunately the format of the Link Status in the old
+ * 16-bit Port Information message isn't the same as the
+ * 16-bit Port Capabilities bitfield used everywhere else ...
+ */
+ linkattr = 0;
+ if (lstatus & F_FW_PORT_CMD_RXPAUSE)
+ linkattr |= FW_PORT_CAP32_FC_RX;
+ if (lstatus & F_FW_PORT_CMD_TXPAUSE)
+ linkattr |= FW_PORT_CAP32_FC_TX;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+ linkattr |= FW_PORT_CAP32_SPEED_100M;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+ linkattr |= FW_PORT_CAP32_SPEED_1G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+ linkattr |= FW_PORT_CAP32_SPEED_10G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
+ linkattr |= FW_PORT_CAP32_SPEED_25G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ linkattr |= FW_PORT_CAP32_SPEED_40G;
+ if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
+ linkattr |= FW_PORT_CAP32_SPEED_100G;
+
+ break;
+ }
+
+ case FW_PORT_ACTION_GET_PORT_INFO32: {
+ u32 lstatus32 =
+ be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
+
+ link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
+ linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
+ pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
+ acaps = be32_to_cpu(cmd->u.info32.acaps32);
+ linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
+ break;
+ }
+
+ default:
+ dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n",
+ be32_to_cpu(cmd->action_to_len16));
+ return;
+ }
+
+ fec = fwcap_to_cc_fec(acaps);
+
+ fc = fwcap_to_cc_pause(linkattr);
+ speed = fwcap_to_speed(linkattr);
+
+ if (mod_type != pi->mod_type) {
+ lc->auto_fec = fec;
+ pi->port_type = port_type;
+ pi->mod_type = mod_type;
+ t4_os_portmod_changed(adapter, pi->pidx);
+ }
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc || fec != lc->fec) { /* something changed */
+ if (!link_ok && lc->link_ok) {
+ lc->link_down_rc = linkdnrc;
+ dev_warn(adap, "Port %d link down, reason: %s\n",
+ pi->tx_chan, t4_link_down_rc_str(linkdnrc));
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ lc->fec = fec;
+ lc->pcaps = pcaps;
+ lc->acaps = acaps & ADVERT_MASK;
+
+ if (lc->acaps & FW_PORT_CAP32_ANEG) {
+ lc->autoneg = AUTONEG_ENABLE;
+ } else {
+ /* When Autoneg is disabled, user needs to set
+ * single speed.
+ * Similar to cxgb4_ethtool.c: set_link_ksettings
+ */
+ lc->acaps = 0;
+ lc->requested_speed = fwcap_to_speed(acaps);
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+ }
+}
+
+/**
+ * t4_ctrl_eq_free - free a control egress queue
+ * @adap: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @pf: the PF owning the queue
+ * @vf: the VF owning the queue
+ * @eqid: egress queue id
+ *
+ * Frees a control egress queue.
+ */
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+ unsigned int vf, unsigned int eqid)
+{
+ struct fw_eq_ctrl_cmd c;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
+ V_FW_EQ_CTRL_CMD_PFN(pf) |
+ V_FW_EQ_CTRL_CMD_VFN(vf));
+ c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
+ c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
+ return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ * t4_handle_fw_rpl - process a FW reply message
+ * @adap: the adapter
+ * @rpl: start of the FW message
+ *
+ * Processes a FW message, such as link state change messages.
+ */
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
+{
+ u8 opcode = *(const u8 *)rpl;
+
+ /*
+ * This might be a port command ... this simplifies the following
+ * conditionals ... We can get away with pre-dereferencing
+ * action_to_len16 because it's in the first 16 bytes and all messages
+ * will be at least that long.
+ */
+ const struct fw_port_cmd *p = (const void *)rpl;
+ unsigned int action =
+ G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
+
+ if (opcode == FW_PORT_CMD &&
+ (action == FW_PORT_ACTION_GET_PORT_INFO ||
+ action == FW_PORT_ACTION_GET_PORT_INFO32)) {
+ /* link/module state change message */
+ int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
+ struct port_info *pi = NULL;
+ int i;
+
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->tx_chan == chan)
+ break;
+ }
+
+ t4_handle_get_port_info(pi, rpl);
+ } else {
+ dev_warn(adap, "Unknown firmware reply %d\n", opcode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void t4_reset_link_config(struct adapter *adap, int idx)
+{
+ struct port_info *pi = adap2pinfo(adap, idx);
+ struct link_config *lc = &pi->link_cfg;
+
+ lc->link_ok = 0;
+ lc->requested_speed = 0;
+ lc->requested_fc = 0;
+ lc->speed = 0;
+ lc->fc = 0;
+}
+
+/**
+ * init_link_config - initialize a link's SW state
+ * @lc: structure holding the link state
+ * @pcaps: link Port Capabilities
+ * @acaps: link current Advertised Port Capabilities
+ *
+ * Initializes the SW state maintained for each link, including the link's
+ * capabilities and default speed/flow-control/autonegotiation settings.
+ */
+void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
+ fw_port_cap32_t acaps)
+{
+ lc->pcaps = pcaps;
+ lc->requested_speed = 0;
+ lc->speed = 0;
+ lc->requested_fc = 0;
+ lc->fc = 0;
+
+ /**
+ * For Forward Error Control, we default to whatever the Firmware
+ * tells us the Link is currently advertising.
+ */
+ lc->auto_fec = fwcap_to_cc_fec(acaps);
+ lc->requested_fec = FEC_AUTO;
+ lc->fec = lc->auto_fec;
+
+ if (lc->pcaps & FW_PORT_CAP32_ANEG) {
+ lc->acaps = lc->pcaps & ADVERT_MASK;
+ lc->autoneg = AUTONEG_ENABLE;
+ lc->requested_fc |= PAUSE_AUTONEG;
+ } else {
+ lc->acaps = 0;
+ lc->autoneg = AUTONEG_DISABLE;
+ }
+}
+
+/**
+ * t4_wait_dev_ready - wait till to reads of registers work
+ *
+ * Right after the device is RESET is can take a small amount of time
+ * for it to respond to register reads. Until then, all reads will
+ * return either 0xff...ff or 0xee...ee. Return an error if reads
+ * don't work within a reasonable time frame.
+ */
+static int t4_wait_dev_ready(struct adapter *adapter)
+{
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+
+ if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
+ return 0;
+
+ msleep(500);
+ whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
+ return 0;
+
+ dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
+ whoami);
+ return -EIO;
+}
+
+struct flash_desc {
+ u32 vendor_and_model_id;
+ u32 size_mb;
+};
+
+int t4_get_flash_params(struct adapter *adapter)
+{
+ /*
+ * Table for non-standard supported Flash parts. Note, all Flash
+ * parts must have 64KB sectors.
+ */
+ static struct flash_desc supported_flash[] = {
+ { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
+ };
+
+ int ret;
+ u32 flashid = 0;
+ unsigned int part, manufacturer;
+ unsigned int density, size = 0;
+
+ /**
+ * Issue a Read ID Command to the Flash part. We decode supported
+ * Flash parts and their sizes from this. There's a newer Query
+ * Command which can retrieve detailed geometry information but
+ * many Flash parts don't support it.
+ */
+ ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
+ if (!ret)
+ ret = sf1_read(adapter, 3, 0, 1, &flashid);
+ t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
+ if (ret < 0)
+ return ret;
+
+ /**
+ * Check to see if it's one of our non-standard supported Flash parts.
+ */
+ for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
+ if (supported_flash[part].vendor_and_model_id == flashid) {
+ adapter->params.sf_size =
+ supported_flash[part].size_mb;
+ adapter->params.sf_nsec =
+ adapter->params.sf_size / SF_SEC_SIZE;
+ goto found;
+ }
+ }
+
+ /**
+ * Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
+ manufacturer = flashid & 0xff;
+ switch (manufacturer) {
+ case 0x20: { /* Micron/Numonix */
+ /**
+ * This Density -> Size decoding table is taken from Micron
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x14:
+ size = 1 << 20; /* 1MB */
+ break;
+ case 0x15:
+ size = 1 << 21; /* 2MB */
+ break;
+ case 0x16:
+ size = 1 << 22; /* 4MB */
+ break;
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ case 0x19:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x20:
+ size = 1 << 26; /* 64MB */
+ break;
+ case 0x21:
+ size = 1 << 27; /* 128MB */
+ break;
+ case 0x22:
+ size = 1 << 28; /* 256MB */
+ break;
+ }
+ break;
+ }
+
+ case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
+ /**
+ * This Density -> Size decoding table is taken from ISSI
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x16:
+ size = 1 << 25; /* 32MB */
+ break;
+ case 0x17:
+ size = 1 << 26; /* 64MB */
+ break;
+ }
+ break;
+ }
+
+ case 0xc2: { /* Macronix */
+ /**
+ * This Density -> Size decoding table is taken from Macronix
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
+ }
+
+ case 0xef: { /* Winbond */
+ /**
+ * This Density -> Size decoding table is taken from Winbond
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17:
+ size = 1 << 23; /* 8MB */
+ break;
+ case 0x18:
+ size = 1 << 24; /* 16MB */
+ break;
+ }
+ break;
+ }
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ dev_warn(adapter,
+ "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
+ }
+
+ /**
+ * Store decoded Flash size and fall through into vetting code.
+ */
+ adapter->params.sf_size = size;
+ adapter->params.sf_nsec = size / SF_SEC_SIZE;
+
+found:
+ /*
+ * We should reject adapters with FLASHes which are too small. So, emit
+ * a warning.
+ */
+ if (adapter->params.sf_size < FLASH_MIN_SIZE)
+ dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
+ flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
+
+ return 0;
+}
+
+static void set_pcie_completion_timeout(struct adapter *adapter,
+ u8 range)
+{
+ u32 pcie_cap;
+ u16 val;
+
+ pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
+ if (pcie_cap) {
+ t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
+ val &= 0xfff0;
+ val |= range;
+ t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
+ }
+}
+
+/**
+ * t4_get_chip_type - Determine chip type from device ID
+ * @adap: the adapter
+ * @ver: adapter version
+ */
+int t4_get_chip_type(struct adapter *adap, int ver)
+{
+ enum chip_type chip = 0;
+ u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
+
+ /* Retrieve adapter's device ID */
+ switch (ver) {
+ case CHELSIO_T5:
+ chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ break;
+ case CHELSIO_T6:
+ chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ break;
+ default:
+ dev_err(adap, "Device %d is not supported\n",
+ adap->params.pci.device_id);
+ return -EINVAL;
+ }
+
+ return chip;
+}
+
+/**
+ * t4_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
+ */
+int t4_prep_adapter(struct adapter *adapter)
+{
+ int ret, ver;
+ u32 pl_rev;
+
+ ret = t4_wait_dev_ready(adapter);
+ if (ret < 0)
+ return ret;
+
+ pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
+ adapter->params.pci.device_id = adapter->pdev->id.device_id;
+ adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
+
+ /*
+ * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
+ * ADAPTER (VERSION << 4 | REVISION)
+ */
+ ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
+ adapter->params.chip = 0;
+ switch (ver) {
+ case CHELSIO_T5:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 128;
+ adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.vfcount = 128;
+ break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 256;
+ adapter->params.arch.nchan = 2;
+ adapter->params.arch.vfcount = 256;
+ break;
+ default:
+ dev_err(adapter, "%s: Device %d is not supported\n",
+ __func__, adapter->params.pci.device_id);
+ return -EINVAL;
+ }
+
+ adapter->params.pci.vpd_cap_addr =
+ t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
+
+ ret = t4_get_flash_params(adapter);
+ if (ret < 0) {
+ dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
+ -ret);
+ return ret;
+ }
+
+ adapter->params.cim_la_size = CIMLA_SIZE;
+
+ init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+
+ /*
+ * Default port and clock for debugging in case we can't reach FW.
+ */
+ adapter->params.nports = 1;
+ adapter->params.portvec = 1;
+ adapter->params.vpd.cclk = 50000;
+
+ /* Set pci completion timeout value to 4 seconds. */
+ set_pcie_completion_timeout(adapter, 0xd);
+ return 0;
+}
+
+/**
+ * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * @adapter: the adapter
+ * @qid: the Queue ID
+ * @qtype: the Ingress or Egress type for @qid
+ * @pbar2_qoffset: BAR2 Queue Offset
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 SGE Queue Registers information associated with the
+ * indicated Absolute Queue ID. These are passed back in return value
+ * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
+ * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
+ *
+ * This may return an error which indicates that BAR2 SGE Queue
+ * registers aren't available. If an error is not returned, then the
+ * following values are returned:
+ *
+ * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
+ * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
+ *
+ * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
+ * require the "Inferred Queue ID" ability may be used. E.g. the
+ * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
+ * then these "Inferred Queue ID" register may not be used.
+ */
+int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
+ enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
+{
+ unsigned int page_shift, page_size, qpp_shift, qpp_mask;
+ u64 bar2_page_offset, bar2_qoffset;
+ unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
+
+ /*
+ * T4 doesn't support BAR2 SGE Queue registers.
+ */
+ if (is_t4(adapter->params.chip))
+ return -EINVAL;
+
+ /*
+ * Get our SGE Page Size parameters.
+ */
+ page_shift = adapter->params.sge.hps + 10;
+ page_size = 1 << page_shift;
+
+ /*
+ * Get the right Queues per Page parameters for our Queue.
+ */
+ qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
+ adapter->params.sge.eq_qpp :
+ adapter->params.sge.iq_qpp);
+ qpp_mask = (1 << qpp_shift) - 1;
+
+ /*
+ * Calculate the basics of the BAR2 SGE Queue register area:
+ * o The BAR2 page the Queue registers will be in.
+ * o The BAR2 Queue ID.
+ * o The BAR2 Queue ID Offset into the BAR2 page.
+ */
+ bar2_page_offset = ((qid >> qpp_shift) << page_shift);
+ bar2_qid = qid & qpp_mask;
+ bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
+
+ /*
+ * If the BAR2 Queue ID Offset is less than the Page Size, then the
+ * hardware will infer the Absolute Queue ID simply from the writes to
+ * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
+ * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
+ * write to the first BAR2 SGE Queue Area within the BAR2 Page with
+ * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
+ * from the BAR2 Page and BAR2 Queue ID.
+ *
+ * One important censequence of this is that some BAR2 SGE registers
+ * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
+ * there. But other registers synthesize the SGE Queue ID purely
+ * from the writes to the registers -- the Write Combined Doorbell
+ * Buffer is a good example. These BAR2 SGE Registers are only
+ * available for those BAR2 SGE Register areas where the SGE Absolute
+ * Queue ID can be inferred from simple writes.
+ */
+ bar2_qoffset = bar2_page_offset;
+ bar2_qinferred = (bar2_qid_offset < page_size);
+ if (bar2_qinferred) {
+ bar2_qoffset += bar2_qid_offset;
+ bar2_qid = 0;
+ }
+
+ *pbar2_qoffset = bar2_qoffset;
+ *pbar2_qid = bar2_qid;
+ return 0;
+}
+
+/**
+ * t4_init_sge_params - initialize adap->params.sge
+ * @adapter: the adapter
+ *
+ * Initialize various fields of the adapter's SGE Parameters structure.
+ */
+int t4_init_sge_params(struct adapter *adapter)
+{
+ struct sge_params *sge_params = &adapter->params.sge;
+ u32 hps, qpp;
+ unsigned int s_hps, s_qpp;
+
+ /*
+ * Extract the SGE Page Size for our PF.
+ */
+ hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
+ s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
+ adapter->pf);
+ sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
+
+ /*
+ * Extract the SGE Egress and Ingess Queues Per Page for our PF.
+ */
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
+ qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
+ sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
+ qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
+ sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
+
+ return 0;
+}
+
+/**
+ * t4_init_tp_params - initialize adap->params.tp
+ * @adap: the adapter
+ *
+ * Initialize various fields of the adapter's TP Parameters structure.
+ */
+int t4_init_tp_params(struct adapter *adap)
+{
+ int chan, ret;
+ u32 param, v;
+
+ v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
+ adap->params.tp.tre = G_TIMERRESOLUTION(v);
+ adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
+
+ /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+ for (chan = 0; chan < NCHAN; chan++)
+ adap->params.tp.tx_modq[chan] = chan;
+
+ /*
+ * Cache the adapter's Compressed Filter Mode/Mask and global Ingress
+ * Configuration.
+ */
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
+ V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK));
+
+ /* Read current value */
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, &param, &v);
+ if (!ret) {
+ dev_info(adap, "Current filter mode/mask 0x%x:0x%x\n",
+ G_FW_PARAMS_PARAM_FILTER_MODE(v),
+ G_FW_PARAMS_PARAM_FILTER_MASK(v));
+ adap->params.tp.vlan_pri_map =
+ G_FW_PARAMS_PARAM_FILTER_MODE(v);
+ adap->params.tp.filter_mask =
+ G_FW_PARAMS_PARAM_FILTER_MASK(v);
+ } else {
+ dev_info(adap,
+ "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
+
+ /* In case of older-fw (which doesn't expose the api
+ * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
+ * the fw api) combination, fall-back to older method of reading
+ * the filter mode from indirect-register
+ */
+ t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &adap->params.tp.vlan_pri_map, 1,
+ A_TP_VLAN_PRI_MAP);
+
+ /* With the older-fw and newer-driver combination we might run
+ * into an issue when user wants to use hash filter region but
+ * the filter_mask is zero, in this case filter_mask validation
+ * is tough. To avoid that we set the filter_mask same as filter
+ * mode, which will behave exactly as the older way of ignoring
+ * the filter mask validation.
+ */
+ adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
+ }
+
+ t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
+ &adap->params.tp.ingress_config, 1,
+ A_TP_INGRESS_CONFIG);
+
+ /* For T6, cache the adapter's compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ v = t4_read_reg(adap, A_TP_OUT_CONFIG);
+ adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
+ }
+
+ /*
+ * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+ * shift positions of several elements of the Compressed Filter Tuple
+ * for this adapter which we need frequently ...
+ */
+ adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+ adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
+ F_PROTOCOL);
+ adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
+ F_ETHERTYPE);
+ adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
+ F_MACMATCH);
+ adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
+
+ v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask = v;
+ v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
+ adap->params.tp.hash_filter_mask |= ((u64)v << 32);
+
+ return 0;
+}
+
+/**
+ * t4_filter_field_shift - calculate filter field shift
+ * @adap: the adapter
+ * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+ *
+ * Return the shift position of a filter field within the Compressed
+ * Filter Tuple. The filter field is specified via its selection bit
+ * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
+ */
+int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
+{
+ unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+ unsigned int sel;
+ int field_shift;
+
+ if ((filter_mode & filter_sel) == 0)
+ return -1;
+
+ for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ switch (filter_mode & sel) {
+ case F_FCOE:
+ field_shift += W_FT_FCOE;
+ break;
+ case F_PORT:
+ field_shift += W_FT_PORT;
+ break;
+ case F_VNIC_ID:
+ field_shift += W_FT_VNIC_ID;
+ break;
+ case F_VLAN:
+ field_shift += W_FT_VLAN;
+ break;
+ case F_TOS:
+ field_shift += W_FT_TOS;
+ break;
+ case F_PROTOCOL:
+ field_shift += W_FT_PROTOCOL;
+ break;
+ case F_ETHERTYPE:
+ field_shift += W_FT_ETHERTYPE;
+ break;
+ case F_MACMATCH:
+ field_shift += W_FT_MACMATCH;
+ break;
+ case F_MPSHITTYPE:
+ field_shift += W_FT_MPSHITTYPE;
+ break;
+ case F_FRAGMENTATION:
+ field_shift += W_FT_FRAGMENTATION;
+ break;
+ }
+ }
+ return field_shift;
+}
+
+int t4_init_rss_mode(struct adapter *adap, int mbox)
+{
+ int i, ret;
+ struct fw_rss_vi_config_cmd rvc;
+
+ memset(&rvc, 0, sizeof(rvc));
+
+ for_each_port(adap, i) {
+ struct port_info *p = adap2pinfo(adap, i);
+
+ rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
+ rvc.retval_len16 = htonl(FW_LEN16(rvc));
+ ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
+ if (ret)
+ return ret;
+ p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
+ }
+ return 0;
+}
+
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+{
+ unsigned int fw_caps = adap->params.fw_caps_support;
+ fw_port_cap32_t pcaps, acaps;
+ enum fw_port_type port_type;
+ struct fw_port_cmd cmd;
+ u8 vivld = 0, vin = 0;
+ int ret, i, j = 0;
+ int mdio_addr;
+ u32 action;
+ u8 addr[6];
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+ unsigned int rss_size = 0;
+
+ while ((adap->params.portvec & (1 << j)) == 0)
+ j++;
+
+ /* If we haven't yet determined whether we're talking to
+ * Firmware which knows the new 32-bit Port Capabilities, it's
+ * time to find out now. This will also tell new Firmware to
+ * send us Port Status Updates using the new 32-bit Port
+ * Capabilities version of the Port Information message.
+ */
+ if (fw_caps == FW_CAPS_UNKNOWN) {
+ u32 param, val, caps;
+
+ caps = FW_PARAMS_PARAM_PFVF_PORT_CAPS32;
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X(caps));
+ val = 1;
+ ret = t4_set_params(adap, mbox, pf, vf, 1, &param,
+ &val);
+ fw_caps = ret == 0 ? FW_CAPS32 : FW_CAPS16;
+ adap->params.fw_caps_support = fw_caps;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(j));
+ action = fw_caps == FW_CAPS16 ? FW_PORT_ACTION_GET_PORT_INFO :
+ FW_PORT_ACTION_GET_PORT_INFO32;
+ cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
+ FW_LEN16(cmd));
+ ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ /* Extract the various fields from the Port Information message.
+ */
+ if (fw_caps == FW_CAPS16) {
+ u32 lstatus =
+ be32_to_cpu(cmd.u.info.lstatus_to_modtype);
+
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mdio_addr = (lstatus & F_FW_PORT_CMD_MDIOCAP) ?
+ (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : -1;
+ pcaps = be16_to_cpu(cmd.u.info.pcap);
+ acaps = be16_to_cpu(cmd.u.info.acap);
+ pcaps = fwcaps16_to_caps32(pcaps);
+ acaps = fwcaps16_to_caps32(acaps);
+ } else {
+ u32 lstatus32 =
+ be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
+
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
+ (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
+ -1;
+ pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
+ acaps = be32_to_cpu(cmd.u.info32.acaps32);
+ }
+
+ ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size,
+ &vivld, &vin);
+ if (ret < 0)
+ return ret;
+
+ pi->viid = ret;
+ pi->tx_chan = j;
+ pi->rss_size = rss_size;
+ t4_os_set_hw_addr(adap, i, addr);
+
+ /* If fw supports returning the VIN as part of FW_VI_CMD,
+ * save the returned values.
+ */
+ if (adap->params.viid_smt_extn_support) {
+ pi->vivld = vivld;
+ pi->vin = vin;
+ } else {
+ /* Retrieve the values from VIID */
+ pi->vivld = G_FW_VIID_VIVLD(pi->viid);
+ pi->vin = G_FW_VIID_VIN(pi->viid);
+ }
+
+ pi->port_type = port_type;
+ pi->mdio_addr = mdio_addr;
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+ init_link_config(&pi->link_cfg, pcaps, acaps);
+ j++;
+ }
+ return 0;
+}
+
+/**
+ * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @addr: address within adapter memory
+ * @len: amount of memory to transfer
+ * @hbuf: host memory buffer
+ * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address and host buffer must be aligned on 32-bit
+ * boudaries; the length may be arbitrary.
+ *
+ * NOTES:
+ * 1. The memory is transferred as a raw byte sequence from/to the
+ * firmware's memory. If this memory contains data structures which
+ * contain multi-byte integers, it's the caller's responsibility to
+ * perform appropriate byte order conversions.
+ *
+ * 2. It is the Caller's responsibility to ensure that no other code
+ * uses the specified PCI-E Memory Window while this routine is
+ * using it. This is typically done via the use of OS-specific
+ * locks, etc.
+ */
+int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
+ u32 len, void *hbuf, int dir)
+{
+ u32 pos, offset, resid;
+ u32 win_pf, mem_reg, mem_aperture, mem_base;
+ u32 *buf;
+
+ /* Argument sanity checks ...*/
+ if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
+ return -EINVAL;
+ buf = (u32 *)hbuf;
+
+ /* It's convenient to be able to handle lengths which aren't a
+ * multiple of 32-bits because we often end up transferring files to
+ * the firmware. So we'll handle that by normalizing the length here
+ * and then handling any residual transfer at the end.
+ */
+ resid = len & 0x3;
+ len -= resid;
+
+ /* Each PCI-E Memory Window is programmed with a window size -- or
+ * "aperture" -- which controls the granularity of its mapping onto
+ * adapter memory. We need to grab that aperture in order to know
+ * how to use the specified window. The window is also programmed
+ * with the base address of the Memory Window in BAR0's address
+ * space. For T4 this is an absolute PCI-E Bus Address. For T5
+ * the address is relative to BAR0.
+ */
+ mem_reg = t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
+ win));
+ mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
+ mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
+
+ win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
+
+ /* Calculate our initial PCI-E Memory Window Position and Offset into
+ * that Window.
+ */
+ pos = addr & ~(mem_aperture - 1);
+ offset = addr - pos;
+
+ /* Set up initial PCI-E Memory Window to cover the start of our
+ * transfer. (Read it back to ensure that changes propagate before we
+ * attempt to use the new value.)
+ */
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
+ pos | win_pf);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
+
+ /* Transfer data to/from the adapter as long as there's an integral
+ * number of 32-bit transfers to complete.
+ *
+ * A note on Endianness issues:
+ *
+ * The "register" reads and writes below from/to the PCI-E Memory
+ * Window invoke the standard adapter Big-Endian to PCI-E Link
+ * Little-Endian "swizzel." As a result, if we have the following
+ * data in adapter memory:
+ *
+ * Memory: ... | b0 | b1 | b2 | b3 | ...
+ * Address: i+0 i+1 i+2 i+3
+ *
+ * Then a read of the adapter memory via the PCI-E Memory Window
+ * will yield:
+ *
+ * x = readl(i)
+ * 31 0
+ * [ b3 | b2 | b1 | b0 ]
+ *
+ * If this value is stored into local memory on a Little-Endian system
+ * it will show up correctly in local memory as:
+ *
+ * ( ..., b0, b1, b2, b3, ... )
+ *
+ * But on a Big-Endian system, the store will show up in memory
+ * incorrectly swizzled as:
+ *
+ * ( ..., b3, b2, b1, b0, ... )
+ *
+ * So we need to account for this in the reads and writes to the
+ * PCI-E Memory Window below by undoing the register read/write
+ * swizzels.
+ */
+ while (len > 0) {
+ if (dir == T4_MEMORY_READ)
+ *buf++ = le32_to_cpu((__le32)t4_read_reg(adap,
+ mem_base +
+ offset));
+ else
+ t4_write_reg(adap, mem_base + offset,
+ (u32)cpu_to_le32(*buf++));
+ offset += sizeof(__be32);
+ len -= sizeof(__be32);
+
+ /* If we've reached the end of our current window aperture,
+ * move the PCI-E Memory Window on to the next. Note that
+ * doing this here after "len" may be 0 allows us to set up
+ * the PCI-E Memory Window for a possible final residual
+ * transfer below ...
+ */
+ if (offset == mem_aperture) {
+ pos += mem_aperture;
+ offset = 0;
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
+ win), pos | win_pf);
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
+ win));
+ }
+ }
+
+ /* If the original transfer had a length which wasn't a multiple of
+ * 32-bits, now's where we need to finish off the transfer of the
+ * residual amount. The PCI-E Memory Window has already been moved
+ * above (if necessary) to cover this final transfer.
+ */
+ if (resid) {
+ union {
+ u32 word;
+ char byte[4];
+ } last;
+ unsigned char *bp;
+ int i;
+
+ if (dir == T4_MEMORY_READ) {
+ last.word = le32_to_cpu((__le32)t4_read_reg(adap,
+ mem_base +
+ offset));
+ for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
+ bp[i] = last.byte[i];
+ } else {
+ last.word = *buf;
+ for (i = resid; i < 4; i++)
+ last.byte[i] = 0;
+ t4_write_reg(adap, mem_base + offset,
+ (u32)cpu_to_le32(last.word));
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ * @maddr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @hbuf: host memory buffer
+ * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
+ * provides an (memory type, address within memory type) interface.
+ */
+int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
+ u32 len, void *hbuf, int dir)
+{
+ u32 mtype_offset;
+ u32 edc_size, mc_size;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
+ * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
+ */
+ edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
+ if (mtype != MEM_MC1) {
+ mtype_offset = (mtype * (edc_size * 1024 * 1024));
+ } else {
+ mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
+ A_MA_EXT_MEMORY0_BAR));
+ mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+ return t4_memory_rw_addr(adap, win,
+ mtype_offset + maddr, len,
+ hbuf, dir);
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h
new file mode 100644
index 000000000..e77563dfa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_hw.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4_HW_H
+#define __T4_HW_H
+
+enum {
+ NCHAN = 4, /* # of HW channels */
+ EEPROMSIZE = 17408, /* Serial EEPROM physical size */
+ EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ MBOX_LEN = 64, /* mailbox size in bytes */
+ UDBS_SEG_SIZE = 128, /* segment size for BAR2 user doorbells */
+};
+
+enum {
+ CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */
+};
+
+enum {
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+};
+
+enum {
+ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
+ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+};
+
+/* PCI-e memory window access */
+enum pcie_memwin {
+ MEMWIN_NIC = 0,
+};
+
+enum {
+ SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
+ SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */
+ /* max no. of desc allowed in WR */
+ SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / SGE_EQ_IDXSIZE,
+};
+
+enum {
+ TCB_SIZE = 128, /* TCB size */
+};
+
+struct sge_qstat { /* data written to SGE queue status entries */
+ __be32 qid;
+ __be16 cidx;
+ __be16 pidx;
+};
+
+/*
+ * Structure for last 128 bits of response descriptors
+ */
+struct rsp_ctrl {
+ __be32 hdrbuflen_pidx;
+ __be32 pldbuflen_qid;
+ union {
+ u8 type_gen;
+ __be64 last_flit;
+ } u;
+};
+
+#define S_RSPD_NEWBUF 31
+#define V_RSPD_NEWBUF(x) ((x) << S_RSPD_NEWBUF)
+#define F_RSPD_NEWBUF V_RSPD_NEWBUF(1U)
+
+#define S_RSPD_LEN 0
+#define M_RSPD_LEN 0x7fffffff
+#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
+#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
+
+#define S_RSPD_GEN 7
+#define V_RSPD_GEN(x) ((x) << S_RSPD_GEN)
+#define F_RSPD_GEN V_RSPD_GEN(1U)
+
+#define S_RSPD_TYPE 4
+#define M_RSPD_TYPE 0x3
+#define V_RSPD_TYPE(x) ((x) << S_RSPD_TYPE)
+#define G_RSPD_TYPE(x) (((x) >> S_RSPD_TYPE) & M_RSPD_TYPE)
+
+/* Rx queue interrupt deferral field: timer index */
+#define S_QINTR_CNT_EN 0
+#define V_QINTR_CNT_EN(x) ((x) << S_QINTR_CNT_EN)
+#define F_QINTR_CNT_EN V_QINTR_CNT_EN(1U)
+
+#define S_QINTR_TIMER_IDX 1
+#define M_QINTR_TIMER_IDX 0x7
+#define V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX)
+#define G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX)
+
+/*
+ * Flash layout.
+ */
+#define FLASH_START(start) ((start) * SF_SEC_SIZE)
+#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
+
+enum {
+ /*
+ * Various Expansion-ROM boot images, etc.
+ */
+ FLASH_EXP_ROM_START_SEC = 0,
+ FLASH_EXP_ROM_NSECS = 6,
+ FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
+ FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+
+ /*
+ * Location of firmware image in FLASH.
+ */
+ FLASH_FW_START_SEC = 8,
+ FLASH_FW_NSECS = 16,
+ FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
+ FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+
+ /*
+ * Location of bootstrap firmware image in FLASH.
+ */
+ FLASH_FWBOOTSTRAP_START_SEC = 27,
+ FLASH_FWBOOTSTRAP_NSECS = 1,
+ FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
+ FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+
+ /*
+ * Location of Firmware Configuration File in FLASH.
+ */
+ FLASH_CFG_START_SEC = 31,
+ FLASH_CFG_NSECS = 1,
+ FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
+ FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+
+ /*
+ * We don't support FLASH devices which can't support the full
+ * standard set of sections which we need for normal operations.
+ */
+ FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE,
+};
+
+#undef FLASH_START
+#undef FLASH_MAX_SIZE
+
+#endif /* __T4_HW_H */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h
new file mode 100644
index 000000000..a6ddaa7b0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_msg.h
@@ -0,0 +1,625 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef T4_MSG_H
+#define T4_MSG_H
+
+enum {
+ CPL_ACT_OPEN_REQ = 0x3,
+ CPL_SET_TCB_FIELD = 0x5,
+ CPL_ABORT_REQ = 0xA,
+ CPL_ABORT_RPL = 0xB,
+ CPL_L2T_WRITE_REQ = 0x12,
+ CPL_SMT_WRITE_REQ = 0x14,
+ CPL_TID_RELEASE = 0x1A,
+ CPL_L2T_WRITE_RPL = 0x23,
+ CPL_ACT_OPEN_RPL = 0x25,
+ CPL_ABORT_RPL_RSS = 0x2D,
+ CPL_SMT_WRITE_RPL = 0x2E,
+ CPL_SET_TCB_RPL = 0x3A,
+ CPL_ACT_OPEN_REQ6 = 0x83,
+ CPL_SGE_EGR_UPDATE = 0xA5,
+ CPL_FW4_MSG = 0xC0,
+ CPL_FW6_MSG = 0xE0,
+ CPL_TX_PKT_LSO = 0xED,
+ CPL_TX_PKT_XT = 0xEE,
+};
+
+enum CPL_error {
+ CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_FULL = 3,
+};
+
+enum {
+ ULP_MODE_NONE = 0,
+ ULP_MODE_TCPDDP = 5,
+};
+
+enum {
+ CPL_ABORT_SEND_RST = 0,
+ CPL_ABORT_NO_RST,
+};
+
+enum { /* TX_PKT_XT checksum types */
+ TX_CSUM_TCPIP = 8,
+ TX_CSUM_UDPIP = 9,
+ TX_CSUM_TCPIP6 = 10,
+};
+
+union opcode_tid {
+ __be32 opcode_tid;
+ __u8 opcode;
+};
+
+#define S_CPL_OPCODE 24
+#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+
+#define G_TID(x) ((x) & 0xFFFFFF)
+
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_CPL_OPCODE(opcode) | (tid))
+
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+
+/* extract the TID from a CPL command */
+#define GET_TID(cmd) (G_TID(be32_to_cpu(OPCODE_TID(cmd))))
+
+/* partitioning of TID fields that also carry a queue id */
+#define S_TID_TID 0
+#define M_TID_TID 0x3fff
+#define G_TID_TID(x) (((x) >> S_TID_TID) & M_TID_TID)
+
+#define S_TID_QID 14
+#define V_TID_QID(x) ((x) << S_TID_QID)
+
+struct rss_header {
+ __u8 opcode;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ __u8 channel:2;
+ __u8 filter_hit:1;
+ __u8 filter_tid:1;
+ __u8 hash_type:2;
+ __u8 ipv6:1;
+ __u8 send2fw:1;
+#else
+ __u8 send2fw:1;
+ __u8 ipv6:1;
+ __u8 hash_type:2;
+ __u8 filter_tid:1;
+ __u8 filter_hit:1;
+ __u8 channel:2;
+#endif
+ __be16 qid;
+ __be32 hash_val;
+};
+
+#if defined(RSS_HDR_VLD) || defined(CHELSIO_FW)
+#define RSS_HDR struct rss_header rss_hdr
+#else
+#define RSS_HDR
+#endif
+
+#ifndef CHELSIO_FW
+struct work_request_hdr {
+ __be32 wr_hi;
+ __be32 wr_mid;
+ __be64 wr_lo;
+};
+
+#define WR_HDR struct work_request_hdr wr
+#define WR_HDR_SIZE sizeof(struct work_request_hdr)
+#else
+#define WR_HDR
+#define WR_HDR_SIZE 0
+#endif
+
+#define S_COOKIE 5
+#define M_COOKIE 0x7
+#define V_COOKIE(x) ((x) << S_COOKIE)
+#define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
+
+/* option 0 fields */
+#define S_TX_CHAN 2
+#define V_TX_CHAN(x) ((x) << S_TX_CHAN)
+
+#define S_DELACK 5
+#define V_DELACK(x) ((x) << S_DELACK)
+
+#define S_NON_OFFLOAD 7
+#define V_NON_OFFLOAD(x) ((x) << S_NON_OFFLOAD)
+#define F_NON_OFFLOAD V_NON_OFFLOAD(1U)
+
+#define S_ULP_MODE 8
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+
+#define S_SMAC_SEL 28
+#define V_SMAC_SEL(x) ((__u64)(x) << S_SMAC_SEL)
+
+#define S_TCAM_BYPASS 48
+#define V_TCAM_BYPASS(x) ((__u64)(x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS V_TCAM_BYPASS(1ULL)
+
+#define S_L2T_IDX 36
+#define V_L2T_IDX(x) ((__u64)(x) << S_L2T_IDX)
+
+#define S_NAGLE 49
+#define V_NAGLE(x) ((__u64)(x) << S_NAGLE)
+
+/* option 2 fields */
+#define S_RSS_QUEUE 0
+#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+
+#define S_RSS_QUEUE_VALID 10
+#define V_RSS_QUEUE_VALID(x) ((x) << S_RSS_QUEUE_VALID)
+#define F_RSS_QUEUE_VALID V_RSS_QUEUE_VALID(1U)
+
+#define S_CONG_CNTRL 14
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+
+#define S_RX_CHANNEL 26
+#define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL)
+#define F_RX_CHANNEL V_RX_CHANNEL(1U)
+
+#define S_CCTRL_ECN 27
+#define V_CCTRL_ECN(x) ((x) << S_CCTRL_ECN)
+
+#define S_SACK_EN 30
+#define V_SACK_EN(x) ((x) << S_SACK_EN)
+
+#define S_T5_OPT_2_VALID 31
+#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
+#define F_T5_OPT_2_VALID V_T5_OPT_2_VALID(1U)
+
+struct cpl_t6_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+struct cpl_t6_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+#define S_FILTER_TUPLE 24
+#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
+
+struct cpl_act_open_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 atid_status;
+};
+
+/* cpl_act_open_rpl.atid_status fields */
+#define S_AOPEN_STATUS 0
+#define M_AOPEN_STATUS 0xFF
+#define G_AOPEN_STATUS(x) (((x) >> S_AOPEN_STATUS) & M_AOPEN_STATUS)
+
+#define S_AOPEN_ATID 8
+#define M_AOPEN_ATID 0xFFFFFF
+#define G_AOPEN_ATID(x) (((x) >> S_AOPEN_ATID) & M_AOPEN_ATID)
+
+struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 reply_ctrl;
+ __be16 word_cookie;
+ __be64 mask;
+ __be64 val;
+};
+
+/* cpl_set_tcb_field.word_cookie fields */
+#define S_WORD 0
+#define V_WORD(x) ((x) << S_WORD)
+
+/* cpl_get_tcb.reply_ctrl fields */
+#define S_QUEUENO 0
+#define V_QUEUENO(x) ((x) << S_QUEUENO)
+
+#define S_REPLY_CHAN 14
+#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
+
+#define S_NO_REPLY 15
+#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
+
+struct cpl_set_tcb_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 rsvd;
+ __u8 cookie;
+ __u8 status;
+ __be64 oldval;
+};
+
+/* cpl_abort_req status command code
+ */
+struct cpl_abort_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 rsvd[3];
+ __u8 status;
+};
+
+struct cpl_abort_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd0;
+ __u8 rsvd1;
+ __u8 cmd;
+ __u8 rsvd2[6];
+};
+
+struct cpl_tid_release {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 rsvd;
+};
+
+struct cpl_tx_data {
+ union opcode_tid ot;
+ __be32 len;
+ __be32 rsvd;
+ __be32 flags;
+};
+
+struct cpl_tx_pkt_core {
+ __be32 ctrl0;
+ __be16 pack;
+ __be16 len;
+ __be64 ctrl1;
+};
+
+struct cpl_tx_pkt {
+ WR_HDR;
+ struct cpl_tx_pkt_core c;
+};
+
+/* cpl_tx_pkt_core.ctrl0 fields */
+#define S_TXPKT_PF 8
+#define M_TXPKT_PF 0x7
+#define V_TXPKT_PF(x) ((x) << S_TXPKT_PF)
+#define G_TXPKT_PF(x) (((x) >> S_TXPKT_PF) & M_TXPKT_PF)
+
+#define S_TXPKT_INTF 16
+#define M_TXPKT_INTF 0xF
+#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF)
+#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF)
+
+#define S_TXPKT_OPCODE 24
+#define M_TXPKT_OPCODE 0xFF
+#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE)
+#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE)
+
+/* cpl_tx_pkt_core.ctrl1 fields */
+#define S_TXPKT_IPHDR_LEN 20
+#define M_TXPKT_IPHDR_LEN 0x3FFF
+#define V_TXPKT_IPHDR_LEN(x) ((__u64)(x) << S_TXPKT_IPHDR_LEN)
+#define G_TXPKT_IPHDR_LEN(x) (((x) >> S_TXPKT_IPHDR_LEN) & M_TXPKT_IPHDR_LEN)
+
+#define S_TXPKT_ETHHDR_LEN 34
+#define M_TXPKT_ETHHDR_LEN 0x3F
+#define V_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_TXPKT_ETHHDR_LEN)
+#define G_TXPKT_ETHHDR_LEN(x) (((x) >> S_TXPKT_ETHHDR_LEN) & M_TXPKT_ETHHDR_LEN)
+
+#define S_T6_TXPKT_ETHHDR_LEN 32
+#define M_T6_TXPKT_ETHHDR_LEN 0xFF
+#define V_T6_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_T6_TXPKT_ETHHDR_LEN)
+#define G_T6_TXPKT_ETHHDR_LEN(x) \
+ (((x) >> S_T6_TXPKT_ETHHDR_LEN) & M_T6_TXPKT_ETHHDR_LEN)
+
+#define S_TXPKT_CSUM_TYPE 40
+#define M_TXPKT_CSUM_TYPE 0xF
+#define V_TXPKT_CSUM_TYPE(x) ((__u64)(x) << S_TXPKT_CSUM_TYPE)
+#define G_TXPKT_CSUM_TYPE(x) (((x) >> S_TXPKT_CSUM_TYPE) & M_TXPKT_CSUM_TYPE)
+
+#define S_TXPKT_VLAN 44
+#define M_TXPKT_VLAN 0xFFFF
+#define V_TXPKT_VLAN(x) ((__u64)(x) << S_TXPKT_VLAN)
+#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN)
+
+#define S_TXPKT_VLAN_VLD 60
+#define V_TXPKT_VLAN_VLD(x) ((__u64)(x) << S_TXPKT_VLAN_VLD)
+#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1ULL)
+
+#define S_TXPKT_IPCSUM_DIS 62
+#define V_TXPKT_IPCSUM_DIS(x) ((__u64)(x) << S_TXPKT_IPCSUM_DIS)
+#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1ULL)
+
+#define S_TXPKT_L4CSUM_DIS 63
+#define V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS)
+#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1ULL)
+
+struct cpl_tx_pkt_lso_core {
+ __be32 lso_ctrl;
+ __be16 ipid_ofst;
+ __be16 mss;
+ __be32 seqno_offset;
+ __be32 len;
+ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+struct cpl_tx_pkt_lso {
+ WR_HDR;
+ struct cpl_tx_pkt_lso_core c;
+ /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+/* cpl_tx_pkt_lso_core.lso_ctrl fields */
+#define S_LSO_TCPHDR_LEN 0
+#define M_LSO_TCPHDR_LEN 0xF
+#define V_LSO_TCPHDR_LEN(x) ((x) << S_LSO_TCPHDR_LEN)
+#define G_LSO_TCPHDR_LEN(x) (((x) >> S_LSO_TCPHDR_LEN) & M_LSO_TCPHDR_LEN)
+
+#define S_LSO_IPHDR_LEN 4
+#define M_LSO_IPHDR_LEN 0xFFF
+#define V_LSO_IPHDR_LEN(x) ((x) << S_LSO_IPHDR_LEN)
+#define G_LSO_IPHDR_LEN(x) (((x) >> S_LSO_IPHDR_LEN) & M_LSO_IPHDR_LEN)
+
+#define S_LSO_ETHHDR_LEN 16
+#define M_LSO_ETHHDR_LEN 0xF
+#define V_LSO_ETHHDR_LEN(x) ((x) << S_LSO_ETHHDR_LEN)
+#define G_LSO_ETHHDR_LEN(x) (((x) >> S_LSO_ETHHDR_LEN) & M_LSO_ETHHDR_LEN)
+
+#define S_LSO_IPV6 20
+#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6)
+#define F_LSO_IPV6 V_LSO_IPV6(1U)
+
+#define S_LSO_LAST_SLICE 22
+#define V_LSO_LAST_SLICE(x) ((x) << S_LSO_LAST_SLICE)
+#define F_LSO_LAST_SLICE V_LSO_LAST_SLICE(1U)
+
+#define S_LSO_FIRST_SLICE 23
+#define V_LSO_FIRST_SLICE(x) ((x) << S_LSO_FIRST_SLICE)
+#define F_LSO_FIRST_SLICE V_LSO_FIRST_SLICE(1U)
+
+#define S_LSO_OPCODE 24
+#define M_LSO_OPCODE 0xFF
+#define V_LSO_OPCODE(x) ((x) << S_LSO_OPCODE)
+#define G_LSO_OPCODE(x) (((x) >> S_LSO_OPCODE) & M_LSO_OPCODE)
+
+#define S_LSO_T5_XFER_SIZE 0
+#define M_LSO_T5_XFER_SIZE 0xFFFFFFF
+#define V_LSO_T5_XFER_SIZE(x) ((x) << S_LSO_T5_XFER_SIZE)
+#define G_LSO_T5_XFER_SIZE(x) (((x) >> S_LSO_T5_XFER_SIZE) & M_LSO_T5_XFER_SIZE)
+
+struct cpl_rx_pkt {
+ RSS_HDR;
+ __u8 opcode;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ __u8 iff:4;
+ __u8 csum_calc:1;
+ __u8 ipmi_pkt:1;
+ __u8 vlan_ex:1;
+ __u8 ip_frag:1;
+#else
+ __u8 ip_frag:1;
+ __u8 vlan_ex:1;
+ __u8 ipmi_pkt:1;
+ __u8 csum_calc:1;
+ __u8 iff:4;
+#endif
+ __be16 csum;
+ __be16 vlan;
+ __be16 len;
+ __be32 l2info;
+ __be16 hdr_len;
+ __be16 err_vec;
+};
+
+struct cpl_l2t_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 params;
+ __be16 l2t_idx;
+ __be16 vlan;
+ __u8 dst_mac[6];
+};
+
+/* cpl_l2t_write_req.params fields */
+#define S_L2T_W_PORT 8
+#define V_L2T_W_PORT(x) ((x) << S_L2T_W_PORT)
+
+#define S_L2T_W_LPBK 10
+#define V_L2T_W_LPBK(x) ((x) << S_L2T_W_LPBK)
+
+#define S_L2T_W_ARPMISS 11
+#define V_L2T_W_ARPMISS(x) ((x) << S_L2T_W_ARPMISS)
+
+#define S_L2T_W_NOREPLY 15
+#define V_L2T_W_NOREPLY(x) ((x) << S_L2T_W_NOREPLY)
+
+struct cpl_l2t_write_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __u8 status;
+ __u8 rsvd[3];
+};
+
+struct cpl_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+ __be16 pfvf1;
+ __u8 src_mac1[6];
+ __be16 pfvf0;
+ __u8 src_mac0[6];
+};
+
+struct cpl_t6_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 params;
+ __be64 tag;
+ __be16 pfvf0;
+ __u8 src_mac0[6];
+ __be32 local_ip;
+ __be32 rsvd;
+};
+
+struct cpl_smt_write_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ u8 status;
+ u8 rsvd[3];
+};
+
+/* cpl_smt_{read,write}_req.params fields */
+#define S_SMTW_OVLAN_IDX 16
+#define V_SMTW_OVLAN_IDX(x) ((x) << S_SMTW_OVLAN_IDX)
+
+#define S_SMTW_IDX 20
+#define V_SMTW_IDX(x) ((x) << S_SMTW_IDX)
+
+#define S_SMTW_NORPL 31
+#define V_SMTW_NORPL(x) ((x) << S_SMTW_NORPL)
+
+/* rx_pkt.l2info fields */
+#define S_RXF_UDP 22
+#define V_RXF_UDP(x) ((x) << S_RXF_UDP)
+#define F_RXF_UDP V_RXF_UDP(1U)
+
+#define S_RXF_TCP 23
+#define V_RXF_TCP(x) ((x) << S_RXF_TCP)
+#define F_RXF_TCP V_RXF_TCP(1U)
+
+#define S_RXF_IP 24
+#define V_RXF_IP(x) ((x) << S_RXF_IP)
+#define F_RXF_IP V_RXF_IP(1U)
+
+#define S_RXF_IP6 25
+#define V_RXF_IP6(x) ((x) << S_RXF_IP6)
+#define F_RXF_IP6 V_RXF_IP6(1U)
+
+/* rx_pkt.err_vec fields */
+/* In T6, rx_pkt.err_vec indicates
+ * RxError Error vector (16b) or
+ * Encapsulating header length (8b),
+ * Outer encapsulation type (2b) and
+ * compressed error vector (6b) if CRxPktEnc is
+ * enabled in TP_OUT_CONFIG
+ */
+#define S_T6_COMPR_RXERR_VEC 0
+#define M_T6_COMPR_RXERR_VEC 0x3F
+#define V_T6_COMPR_RXERR_VEC(x) ((x) << S_T6_COMPR_RXERR_VEC)
+#define G_T6_COMPR_RXERR_VEC(x) \
+ (((x) >> S_T6_COMPR_RXERR_VEC) & M_T6_COMPR_RXERR_VEC)
+
+/* cpl_fw*.type values */
+enum {
+ FW_TYPE_RSSCPL = 4,
+};
+
+struct cpl_fw4_msg {
+ RSS_HDR;
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[2];
+};
+
+struct cpl_fw6_msg {
+ RSS_HDR;
+ u8 opcode;
+ u8 type;
+ __be16 rsvd0;
+ __be32 rsvd1;
+ __be64 data[4];
+};
+
+/* ULP_TX opcodes */
+enum {
+ ULP_TX_PKT = 4
+};
+
+enum {
+ ULP_TX_SC_NOOP = 0x80,
+ ULP_TX_SC_IMM = 0x81,
+ ULP_TX_SC_DSGL = 0x82,
+ ULP_TX_SC_ISGL = 0x83
+};
+
+#define S_ULPTX_CMD 24
+#define M_ULPTX_CMD 0xFF
+#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD)
+
+#define S_ULP_TX_SC_MORE 23
+#define V_ULP_TX_SC_MORE(x) ((x) << S_ULP_TX_SC_MORE)
+#define F_ULP_TX_SC_MORE V_ULP_TX_SC_MORE(1U)
+
+struct ulptx_sge_pair {
+ __be32 len[2];
+ __be64 addr[2];
+};
+
+struct ulptx_sgl {
+ __be32 cmd_nsge;
+ __be32 len0;
+ __be64 addr0;
+
+#if !(defined C99_NOT_SUPPORTED)
+ struct ulptx_sge_pair sge[0];
+#endif
+
+};
+
+struct ulptx_idata {
+ __be32 cmd_more;
+ __be32 len;
+};
+
+#define S_ULPTX_NSGE 0
+#define M_ULPTX_NSGE 0xFFFF
+#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE)
+
+struct ulp_txpkt {
+ __be32 cmd_dest;
+ __be32 len;
+};
+
+/* ulp_txpkt.cmd_dest fields */
+#define S_ULP_TXPKT_DEST 16
+#define M_ULP_TXPKT_DEST 0x3
+#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST)
+
+#define S_ULP_TXPKT_FID 4
+#define M_ULP_TXPKT_FID 0x7ff
+#define V_ULP_TXPKT_FID(x) ((x) << S_ULP_TXPKT_FID)
+
+#define S_ULP_TXPKT_RO 3
+#define V_ULP_TXPKT_RO(x) ((x) << S_ULP_TXPKT_RO)
+#define F_ULP_TXPKT_RO V_ULP_TXPKT_RO(1U)
+
+#endif /* T4_MSG_H */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h
new file mode 100644
index 000000000..f5f027a2e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_pci_id_tbl.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2019 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4_PCI_ID_TBL_H__
+#define __T4_PCI_ID_TBL_H__
+
+/*
+ * The Os-Dependent code can defined cpp macros for creating a PCI Device ID
+ * Table. This is useful because it allows the PCI ID Table to be maintained
+ * in a single place and all supporting OSes to get new PCI Device IDs
+ * automatically.
+ *
+ * The macros are:
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ * -- Used to start the definition of the PCI ID Table.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION
+ * -- The PCI Function Number to use in the PCI Device ID Table. "0"
+ * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4,
+ * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc.
+ *
+ * CH_PCI_DEVICE_ID_FUNCTION2 [optional]
+ * -- If defined, create a PCI Device ID Table with both
+ * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated.
+ *
+ * CH_PCI_ID_TABLE_ENTRY(DeviceID)
+ * -- Used for the individual PCI Device ID entries. Note that we will
+ * -- be adding a trailing comma (",") after all of the entries (and
+ * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined).
+ *
+ * CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+ * -- Used to finish the definition of the PCI ID Table. Note that we
+ * -- will be adding a trailing semi-colon (";") here.
+ *
+ * CH_PCI_DEVICE_ID_BYPASS_SUPPORTED [optional]
+ * -- If defined, indicates that the OS Driver has support for Bypass
+ * -- Adapters.
+ */
+#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+
+/*
+ * Some sanity checks ...
+ */
+#ifndef CH_PCI_DEVICE_ID_FUNCTION
+#error CH_PCI_DEVICE_ID_FUNCTION not defined!
+#endif
+#ifndef CH_PCI_ID_TABLE_ENTRY
+#error CH_PCI_ID_TABLE_ENTRY not defined!
+#endif
+#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END
+#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined!
+#endif
+
+/*
+ * T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where:
+ *
+ * V = "4" for T4; "5" for T5, etc.
+ * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ * PP = adapter product designation
+ *
+ * We use this consistency in order to create the proper PCI Device IDs
+ * for the specified CH_PCI_DEVICE_ID_FUNCTION.
+ */
+#ifndef CH_PCI_DEVICE_ID_FUNCTION2
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION) << 8))
+#else
+#define CH_PCI_ID_TABLE_FENTRY(devid) \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \
+ CH_PCI_ID_TABLE_ENTRY((devid) | \
+ ((CH_PCI_DEVICE_ID_FUNCTION2) << 8))
+#endif
+
+CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ /*
+ * T5 adapters:
+ */
+ CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */
+ CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */
+ CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */
+ CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */
+ CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */
+ CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */
+#ifdef CH_PCI_DEVICE_ID_BYPASS_SUPPORTED
+ CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */
+ CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */
+#endif
+ CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */
+ CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */
+ CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
+ CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x5019), /* T540-LP-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x501a), /* T540-SO-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x501b), /* T540-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */
+ CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */
+ CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
+ CH_PCI_ID_TABLE_FENTRY(0x509A), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x509B), /* Custom T540-CR LOM */
+ CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR SFP+ LOM */
+ CH_PCI_ID_TABLE_FENTRY(0x509d), /* Custom T540-CR SFP+ */
+ CH_PCI_ID_TABLE_FENTRY(0x509e), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x509f), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T580-KR4 */
+ CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */
+ CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a5), /* Custom T522-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x50a6), /* Custom T522-BT-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50a7), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a8), /* Custom T580-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x50ad), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ae), /* Custom T540-XL-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50af), /* Custom T580-KR-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x50b0), /* Custom T520-CR-LOM */
+
+ /* T6 adapter */
+ CH_PCI_ID_TABLE_FENTRY(0x6001), /* T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6002), /* T6225-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6003), /* T6425-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6004), /* T6425-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6005), /* T6225-OCP */
+ CH_PCI_ID_TABLE_FENTRY(0x6006), /* T62100-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x6007), /* T62100-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6008), /* T62100-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6009), /* T6210-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x600d), /* T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6011), /* T6225-LL-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6014), /* T61100-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x6015), /* T6201-BT */
+ CH_PCI_ID_TABLE_FENTRY(0x6080), /* Custom T6225-CR SFP28 */
+ CH_PCI_ID_TABLE_FENTRY(0x6081), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6082), /* Custom T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6083), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
+CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
+
+#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */
+
+#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h
new file mode 100644
index 000000000..97cf49a48
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs.h
@@ -0,0 +1,966 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#define MYPF_BASE 0x1b000
+#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
+
+#define PF0_BASE 0x1e000
+#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
+
+#define PF_STRIDE 0x400
+#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
+#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+
+#define MYPORT_BASE 0x1c000
+#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
+
+#define PORT0_BASE 0x20000
+#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
+
+#define PORT_STRIDE 0x2000
+#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
+#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
+
+#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_PCIE_MEM_ACCESS_INSTANCES 8
+
+#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_PCIE_FW_INSTANCES 8
+
+#define T5_MYPORT_BASE 0x2c000
+#define T5_MYPORT_REG(reg_addr) (T5_MYPORT_BASE + (reg_addr))
+
+#define T5_PORT0_BASE 0x30000
+#define T5_PORT0_REG(reg_addr) (T5_PORT0_BASE + (reg_addr))
+
+#define T5_PORT_STRIDE 0x4000
+#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
+#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
+
+#define MPS_T5_CLS_SRAM_L(idx) (A_MPS_T5_CLS_SRAM_L + (idx) * 8)
+#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
+
+#define MPS_T5_CLS_SRAM_H(idx) (A_MPS_T5_CLS_SRAM_H + (idx) * 8)
+#define NUM_MPS_T5_CLS_SRAM_H_INSTANCES 512
+
+#define S_DATAPORTNUM 12
+#define M_DATAPORTNUM 0xfU
+#define V_DATAPORTNUM(x) ((x) << S_DATAPORTNUM)
+
+#define S_DATALKPTYPE 10
+#define M_DATALKPTYPE 0x3U
+#define V_DATALKPTYPE(x) ((x) << S_DATALKPTYPE)
+
+/* registers for module SGE */
+#define SGE_BASE_ADDR 0x1000
+
+#define A_SGE_PF_KDOORBELL 0x0
+#define A_SGE_VF_KDOORBELL 0x0
+
+#define S_QID 15
+#define M_QID 0x1ffffU
+#define V_QID(x) ((x) << S_QID)
+#define G_QID(x) (((x) >> S_QID) & M_QID)
+
+#define S_DBPRIO 14
+#define V_DBPRIO(x) ((x) << S_DBPRIO)
+#define F_DBPRIO V_DBPRIO(1U)
+
+#define S_PIDX 0
+#define M_PIDX 0x3fffU
+#define V_PIDX(x) ((x) << S_PIDX)
+#define G_PIDX(x) (((x) >> S_PIDX) & M_PIDX)
+
+#define S_DBTYPE 13
+#define V_DBTYPE(x) ((x) << S_DBTYPE)
+#define F_DBTYPE V_DBTYPE(1U)
+
+#define S_PIDX_T5 0
+#define M_PIDX_T5 0x1fffU
+#define V_PIDX_T5(x) ((x) << S_PIDX_T5)
+#define G_PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5)
+
+#define A_SGE_PF_GTS 0x4
+
+#define T4VF_SGE_BASE_ADDR 0x0000
+#define A_SGE_VF_GTS 0x4
+
+#define S_INGRESSQID 16
+#define M_INGRESSQID 0xffffU
+#define V_INGRESSQID(x) ((x) << S_INGRESSQID)
+#define G_INGRESSQID(x) (((x) >> S_INGRESSQID) & M_INGRESSQID)
+
+#define S_SEINTARM 12
+#define V_SEINTARM(x) ((x) << S_SEINTARM)
+#define F_SEINTARM V_SEINTARM(1U)
+
+#define S_CIDXINC 0
+#define M_CIDXINC 0xfffU
+#define V_CIDXINC(x) ((x) << S_CIDXINC)
+#define G_CIDXINC(x) (((x) >> S_CIDXINC) & M_CIDXINC)
+
+#define A_SGE_CONTROL 0x1008
+
+#define S_RXPKTCPLMODE 18
+#define V_RXPKTCPLMODE(x) ((x) << S_RXPKTCPLMODE)
+#define F_RXPKTCPLMODE V_RXPKTCPLMODE(1U)
+
+#define S_EGRSTATUSPAGESIZE 17
+#define V_EGRSTATUSPAGESIZE(x) ((x) << S_EGRSTATUSPAGESIZE)
+#define F_EGRSTATUSPAGESIZE V_EGRSTATUSPAGESIZE(1U)
+
+#define S_PKTSHIFT 10
+#define M_PKTSHIFT 0x7U
+#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT)
+#define G_PKTSHIFT(x) (((x) >> S_PKTSHIFT) & M_PKTSHIFT)
+
+#define S_INGPADBOUNDARY 4
+#define M_INGPADBOUNDARY 0x7U
+#define V_INGPADBOUNDARY(x) ((x) << S_INGPADBOUNDARY)
+#define G_INGPADBOUNDARY(x) (((x) >> S_INGPADBOUNDARY) & M_INGPADBOUNDARY)
+
+#define A_SGE_HOST_PAGE_SIZE 0x100c
+
+#define S_HOSTPAGESIZEPF7 28
+#define M_HOSTPAGESIZEPF7 0xfU
+#define V_HOSTPAGESIZEPF7(x) ((x) << S_HOSTPAGESIZEPF7)
+#define G_HOSTPAGESIZEPF7(x) (((x) >> S_HOSTPAGESIZEPF7) & M_HOSTPAGESIZEPF7)
+
+#define S_HOSTPAGESIZEPF6 24
+#define M_HOSTPAGESIZEPF6 0xfU
+#define V_HOSTPAGESIZEPF6(x) ((x) << S_HOSTPAGESIZEPF6)
+#define G_HOSTPAGESIZEPF6(x) (((x) >> S_HOSTPAGESIZEPF6) & M_HOSTPAGESIZEPF6)
+
+#define S_HOSTPAGESIZEPF5 20
+#define M_HOSTPAGESIZEPF5 0xfU
+#define V_HOSTPAGESIZEPF5(x) ((x) << S_HOSTPAGESIZEPF5)
+#define G_HOSTPAGESIZEPF5(x) (((x) >> S_HOSTPAGESIZEPF5) & M_HOSTPAGESIZEPF5)
+
+#define S_HOSTPAGESIZEPF4 16
+#define M_HOSTPAGESIZEPF4 0xfU
+#define V_HOSTPAGESIZEPF4(x) ((x) << S_HOSTPAGESIZEPF4)
+#define G_HOSTPAGESIZEPF4(x) (((x) >> S_HOSTPAGESIZEPF4) & M_HOSTPAGESIZEPF4)
+
+#define S_HOSTPAGESIZEPF3 12
+#define M_HOSTPAGESIZEPF3 0xfU
+#define V_HOSTPAGESIZEPF3(x) ((x) << S_HOSTPAGESIZEPF3)
+#define G_HOSTPAGESIZEPF3(x) (((x) >> S_HOSTPAGESIZEPF3) & M_HOSTPAGESIZEPF3)
+
+#define S_HOSTPAGESIZEPF2 8
+#define M_HOSTPAGESIZEPF2 0xfU
+#define V_HOSTPAGESIZEPF2(x) ((x) << S_HOSTPAGESIZEPF2)
+#define G_HOSTPAGESIZEPF2(x) (((x) >> S_HOSTPAGESIZEPF2) & M_HOSTPAGESIZEPF2)
+
+#define S_HOSTPAGESIZEPF1 4
+#define M_HOSTPAGESIZEPF1 0xfU
+#define V_HOSTPAGESIZEPF1(x) ((x) << S_HOSTPAGESIZEPF1)
+#define G_HOSTPAGESIZEPF1(x) (((x) >> S_HOSTPAGESIZEPF1) & M_HOSTPAGESIZEPF1)
+
+#define S_HOSTPAGESIZEPF0 0
+#define M_HOSTPAGESIZEPF0 0xfU
+#define V_HOSTPAGESIZEPF0(x) ((x) << S_HOSTPAGESIZEPF0)
+#define G_HOSTPAGESIZEPF0(x) (((x) >> S_HOSTPAGESIZEPF0) & M_HOSTPAGESIZEPF0)
+
+#define A_SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
+
+#define S_QUEUESPERPAGEPF1 4
+#define M_QUEUESPERPAGEPF1 0xfU
+#define V_QUEUESPERPAGEPF1(x) ((x) << S_QUEUESPERPAGEPF1)
+#define G_QUEUESPERPAGEPF1(x) (((x) >> S_QUEUESPERPAGEPF1) & M_QUEUESPERPAGEPF1)
+
+#define S_QUEUESPERPAGEPF0 0
+#define M_QUEUESPERPAGEPF0 0xfU
+#define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0)
+#define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0)
+
+#define A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014
+
+#define S_ERR_CPL_EXCEED_IQE_SIZE 22
+#define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE)
+#define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U)
+
+#define S_ERR_INVALID_CIDX_INC 21
+#define V_ERR_INVALID_CIDX_INC(x) ((x) << S_ERR_INVALID_CIDX_INC)
+#define F_ERR_INVALID_CIDX_INC V_ERR_INVALID_CIDX_INC(1U)
+
+#define S_ERR_CPL_OPCODE_0 19
+#define V_ERR_CPL_OPCODE_0(x) ((x) << S_ERR_CPL_OPCODE_0)
+#define F_ERR_CPL_OPCODE_0 V_ERR_CPL_OPCODE_0(1U)
+
+#define S_ERR_DROPPED_DB 18
+#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB)
+#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U)
+
+#define S_ERR_DATA_CPL_ON_HIGH_QID1 17
+#define V_ERR_DATA_CPL_ON_HIGH_QID1(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID1)
+#define F_ERR_DATA_CPL_ON_HIGH_QID1 V_ERR_DATA_CPL_ON_HIGH_QID1(1U)
+
+#define S_ERR_DATA_CPL_ON_HIGH_QID0 16
+#define V_ERR_DATA_CPL_ON_HIGH_QID0(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID0)
+#define F_ERR_DATA_CPL_ON_HIGH_QID0 V_ERR_DATA_CPL_ON_HIGH_QID0(1U)
+
+#define S_ERR_BAD_DB_PIDX3 15
+#define V_ERR_BAD_DB_PIDX3(x) ((x) << S_ERR_BAD_DB_PIDX3)
+#define F_ERR_BAD_DB_PIDX3 V_ERR_BAD_DB_PIDX3(1U)
+
+#define S_ERR_BAD_DB_PIDX2 14
+#define V_ERR_BAD_DB_PIDX2(x) ((x) << S_ERR_BAD_DB_PIDX2)
+#define F_ERR_BAD_DB_PIDX2 V_ERR_BAD_DB_PIDX2(1U)
+
+#define S_ERR_BAD_DB_PIDX1 13
+#define V_ERR_BAD_DB_PIDX1(x) ((x) << S_ERR_BAD_DB_PIDX1)
+#define F_ERR_BAD_DB_PIDX1 V_ERR_BAD_DB_PIDX1(1U)
+
+#define S_ERR_BAD_DB_PIDX0 12
+#define V_ERR_BAD_DB_PIDX0(x) ((x) << S_ERR_BAD_DB_PIDX0)
+#define F_ERR_BAD_DB_PIDX0 V_ERR_BAD_DB_PIDX0(1U)
+
+#define S_ERR_ING_PCIE_CHAN 11
+#define V_ERR_ING_PCIE_CHAN(x) ((x) << S_ERR_ING_PCIE_CHAN)
+#define F_ERR_ING_PCIE_CHAN V_ERR_ING_PCIE_CHAN(1U)
+
+#define S_ERR_ING_CTXT_PRIO 10
+#define V_ERR_ING_CTXT_PRIO(x) ((x) << S_ERR_ING_CTXT_PRIO)
+#define F_ERR_ING_CTXT_PRIO V_ERR_ING_CTXT_PRIO(1U)
+
+#define S_ERR_EGR_CTXT_PRIO 9
+#define V_ERR_EGR_CTXT_PRIO(x) ((x) << S_ERR_EGR_CTXT_PRIO)
+#define F_ERR_EGR_CTXT_PRIO V_ERR_EGR_CTXT_PRIO(1U)
+
+#define S_DBFIFO_HP_INT 8
+#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT)
+#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U)
+
+#define S_DBFIFO_LP_INT 7
+#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT)
+#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U)
+
+#define S_INGRESS_SIZE_ERR 5
+#define V_INGRESS_SIZE_ERR(x) ((x) << S_INGRESS_SIZE_ERR)
+#define F_INGRESS_SIZE_ERR V_INGRESS_SIZE_ERR(1U)
+
+#define S_EGRESS_SIZE_ERR 4
+#define V_EGRESS_SIZE_ERR(x) ((x) << S_EGRESS_SIZE_ERR)
+#define F_EGRESS_SIZE_ERR V_EGRESS_SIZE_ERR(1U)
+
+#define A_SGE_INT_ENABLE3 0x1040
+
+#define A_SGE_FL_BUFFER_SIZE0 0x1044
+#define A_SGE_FL_BUFFER_SIZE1 0x1048
+#define A_SGE_FL_BUFFER_SIZE2 0x104c
+#define A_SGE_FL_BUFFER_SIZE3 0x1050
+
+#define A_SGE_FLM_CFG 0x1090
+
+#define S_CREDITCNT 4
+#define M_CREDITCNT 0x3U
+#define V_CREDITCNT(x) ((x) << S_CREDITCNT)
+#define G_CREDITCNT(x) (((x) >> S_CREDITCNT) & M_CREDITCNT)
+
+#define S_CREDITCNTPACKING 2
+#define M_CREDITCNTPACKING 0x3U
+#define V_CREDITCNTPACKING(x) ((x) << S_CREDITCNTPACKING)
+#define G_CREDITCNTPACKING(x) (((x) >> S_CREDITCNTPACKING) & M_CREDITCNTPACKING)
+
+#define A_SGE_CONM_CTRL 0x1094
+
+#define S_T6_EGRTHRESHOLDPACKING 16
+#define M_T6_EGRTHRESHOLDPACKING 0xffU
+#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & \
+ M_T6_EGRTHRESHOLDPACKING)
+
+#define S_EGRTHRESHOLD 8
+#define M_EGRTHRESHOLD 0x3fU
+#define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD)
+#define G_EGRTHRESHOLD(x) (((x) >> S_EGRTHRESHOLD) & M_EGRTHRESHOLD)
+
+#define S_EGRTHRESHOLDPACKING 14
+#define M_EGRTHRESHOLDPACKING 0x3fU
+#define V_EGRTHRESHOLDPACKING(x) ((x) << S_EGRTHRESHOLDPACKING)
+#define G_EGRTHRESHOLDPACKING(x) (((x) >> S_EGRTHRESHOLDPACKING) & \
+ M_EGRTHRESHOLDPACKING)
+
+#define S_INGTHRESHOLD 2
+#define M_INGTHRESHOLD 0x3fU
+#define V_INGTHRESHOLD(x) ((x) << S_INGTHRESHOLD)
+#define G_INGTHRESHOLD(x) (((x) >> S_INGTHRESHOLD) & M_INGTHRESHOLD)
+
+#define A_SGE_INGRESS_RX_THRESHOLD 0x10a0
+
+#define S_THRESHOLD_0 24
+#define M_THRESHOLD_0 0x3fU
+#define V_THRESHOLD_0(x) ((x) << S_THRESHOLD_0)
+#define G_THRESHOLD_0(x) (((x) >> S_THRESHOLD_0) & M_THRESHOLD_0)
+
+#define S_THRESHOLD_1 16
+#define M_THRESHOLD_1 0x3fU
+#define V_THRESHOLD_1(x) ((x) << S_THRESHOLD_1)
+#define G_THRESHOLD_1(x) (((x) >> S_THRESHOLD_1) & M_THRESHOLD_1)
+
+#define S_THRESHOLD_2 8
+#define M_THRESHOLD_2 0x3fU
+#define V_THRESHOLD_2(x) ((x) << S_THRESHOLD_2)
+#define G_THRESHOLD_2(x) (((x) >> S_THRESHOLD_2) & M_THRESHOLD_2)
+
+#define S_THRESHOLD_3 0
+#define M_THRESHOLD_3 0x3fU
+#define V_THRESHOLD_3(x) ((x) << S_THRESHOLD_3)
+#define G_THRESHOLD_3(x) (((x) >> S_THRESHOLD_3) & M_THRESHOLD_3)
+
+#define A_SGE_TIMER_VALUE_0_AND_1 0x10b8
+
+#define S_TIMERVALUE0 16
+#define M_TIMERVALUE0 0xffffU
+#define V_TIMERVALUE0(x) ((x) << S_TIMERVALUE0)
+#define G_TIMERVALUE0(x) (((x) >> S_TIMERVALUE0) & M_TIMERVALUE0)
+
+#define S_TIMERVALUE1 0
+#define M_TIMERVALUE1 0xffffU
+#define V_TIMERVALUE1(x) ((x) << S_TIMERVALUE1)
+#define G_TIMERVALUE1(x) (((x) >> S_TIMERVALUE1) & M_TIMERVALUE1)
+
+#define A_SGE_TIMER_VALUE_2_AND_3 0x10bc
+
+#define S_TIMERVALUE2 16
+#define M_TIMERVALUE2 0xffffU
+#define V_TIMERVALUE2(x) ((x) << S_TIMERVALUE2)
+#define G_TIMERVALUE2(x) (((x) >> S_TIMERVALUE2) & M_TIMERVALUE2)
+
+#define S_TIMERVALUE3 0
+#define M_TIMERVALUE3 0xffffU
+#define V_TIMERVALUE3(x) ((x) << S_TIMERVALUE3)
+#define G_TIMERVALUE3(x) (((x) >> S_TIMERVALUE3) & M_TIMERVALUE3)
+
+#define A_SGE_TIMER_VALUE_4_AND_5 0x10c0
+
+#define S_TIMERVALUE4 16
+#define M_TIMERVALUE4 0xffffU
+#define V_TIMERVALUE4(x) ((x) << S_TIMERVALUE4)
+#define G_TIMERVALUE4(x) (((x) >> S_TIMERVALUE4) & M_TIMERVALUE4)
+
+#define S_TIMERVALUE5 0
+#define M_TIMERVALUE5 0xffffU
+#define V_TIMERVALUE5(x) ((x) << S_TIMERVALUE5)
+#define G_TIMERVALUE5(x) (((x) >> S_TIMERVALUE5) & M_TIMERVALUE5)
+
+#define A_SGE_DEBUG_INDEX 0x10cc
+#define A_SGE_DEBUG_DATA_HIGH 0x10d0
+#define A_SGE_DEBUG_DATA_LOW 0x10d4
+#define A_SGE_STAT_CFG 0x10ec
+
+#define S_STATMODE 2
+#define M_STATMODE 0x3U
+#define V_STATMODE(x) ((x) << S_STATMODE)
+#define G_STATMODE(x) (((x) >> S_STATMODE) & M_STATMODE)
+
+#define S_STATSOURCE_T5 9
+#define M_STATSOURCE_T5 0xfU
+#define V_STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
+#define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5)
+
+#define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+#define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8
+
+#define A_SGE_CONTROL2 0x1124
+
+#define S_IDMAARBROUNDROBIN 19
+#define V_IDMAARBROUNDROBIN(x) ((x) << S_IDMAARBROUNDROBIN)
+#define F_IDMAARBROUNDROBIN V_IDMAARBROUNDROBIN(1U)
+
+#define S_INGPACKBOUNDARY 16
+#define M_INGPACKBOUNDARY 0x7U
+#define V_INGPACKBOUNDARY(x) ((x) << S_INGPACKBOUNDARY)
+#define G_INGPACKBOUNDARY(x) (((x) >> S_INGPACKBOUNDARY) & M_INGPACKBOUNDARY)
+
+#define S_BUSY 31
+#define V_BUSY(x) ((x) << S_BUSY)
+#define F_BUSY V_BUSY(1U)
+
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
+#define A_SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
+#define A_SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
+
+/* registers for module PCIE */
+#define PCIE_BASE_ADDR 0x3000
+
+#define A_PCIE_MEM_ACCESS_BASE_WIN 0x3068
+
+#define S_PCIEOFST 10
+#define M_PCIEOFST 0x3fffffU
+#define V_PCIEOFST(x) ((x) << S_PCIEOFST)
+#define G_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
+
+#define S_BIR 8
+#define M_BIR 0x3U
+#define V_BIR(x) ((x) << S_BIR)
+#define G_BIR(x) (((x) >> S_BIR) & M_BIR)
+
+#define S_WINDOW 0
+#define M_WINDOW 0xffU
+#define V_WINDOW(x) ((x) << S_WINDOW)
+#define G_WINDOW(x) (((x) >> S_WINDOW) & M_WINDOW)
+
+#define A_PCIE_MEM_ACCESS_OFFSET 0x306c
+
+#define S_PFNUM 0
+#define M_PFNUM 0x7U
+#define V_PFNUM(x) ((x) << S_PFNUM)
+#define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM)
+
+#define A_PCIE_FW 0x30b8
+#define A_PCIE_FW_PF 0x30bc
+
+#define A_PCIE_CFG2 0x3018
+
+#define S_TOTMAXTAG 0
+#define M_TOTMAXTAG 0x3U
+#define V_TOTMAXTAG(x) ((x) << S_TOTMAXTAG)
+
+#define S_T6_TOTMAXTAG 0
+#define M_T6_TOTMAXTAG 0x7U
+#define V_T6_TOTMAXTAG(x) ((x) << S_T6_TOTMAXTAG)
+
+#define A_PCIE_CMD_CFG 0x5980
+
+#define S_MINTAG 0
+#define M_MINTAG 0xffU
+#define V_MINTAG(x) ((x) << S_MINTAG)
+
+#define S_T6_MINTAG 0
+#define M_T6_MINTAG 0xffU
+#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
+
+/* registers for module CIM */
+#define CIM_BASE_ADDR 0x7b00
+
+#define A_CIM_VF_EXT_MAILBOX_CTRL 0x0
+
+#define A_CIM_PF_MAILBOX_DATA 0x240
+#define A_CIM_PF_MAILBOX_CTRL 0x280
+
+#define S_MBMSGVALID 3
+#define V_MBMSGVALID(x) ((x) << S_MBMSGVALID)
+#define F_MBMSGVALID V_MBMSGVALID(1U)
+
+#define S_MBOWNER 0
+#define M_MBOWNER 0x3U
+#define V_MBOWNER(x) ((x) << S_MBOWNER)
+#define G_MBOWNER(x) (((x) >> S_MBOWNER) & M_MBOWNER)
+
+#define A_CIM_PF_MAILBOX_CTRL_SHADOW_COPY 0x290
+#define A_CIM_BOOT_CFG 0x7b00
+
+#define S_UPCRST 0
+#define V_UPCRST(x) ((x) << S_UPCRST)
+#define F_UPCRST V_UPCRST(1U)
+
+#define NUM_CIM_PF_MAILBOX_DATA_INSTANCES 16
+
+/* registers for module TP */
+#define A_TP_OUT_CONFIG 0x7d04
+
+#define S_CRXPKTENC 3
+#define V_CRXPKTENC(x) ((x) << S_CRXPKTENC)
+#define F_CRXPKTENC V_CRXPKTENC(1U)
+
+#define TP_BASE_ADDR 0x7d00
+#define A_TP_CMM_TCB_BASE 0x7d10
+
+#define A_TP_TIMER_RESOLUTION 0x7d90
+
+#define S_TIMERRESOLUTION 16
+#define M_TIMERRESOLUTION 0xffU
+#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION)
+#define G_TIMERRESOLUTION(x) (((x) >> S_TIMERRESOLUTION) & M_TIMERRESOLUTION)
+
+#define S_DELAYEDACKRESOLUTION 0
+#define M_DELAYEDACKRESOLUTION 0xffU
+#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
+#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & \
+ M_DELAYEDACKRESOLUTION)
+
+#define A_TP_CCTRL_TABLE 0x7ddc
+
+#define A_TP_MTU_TABLE 0x7de4
+
+#define S_MTUINDEX 24
+#define M_MTUINDEX 0xffU
+#define V_MTUINDEX(x) ((x) << S_MTUINDEX)
+#define G_MTUINDEX(x) (((x) >> S_MTUINDEX) & M_MTUINDEX)
+
+#define S_MTUWIDTH 16
+#define M_MTUWIDTH 0xfU
+#define V_MTUWIDTH(x) ((x) << S_MTUWIDTH)
+#define G_MTUWIDTH(x) (((x) >> S_MTUWIDTH) & M_MTUWIDTH)
+
+#define S_MTUVALUE 0
+#define M_MTUVALUE 0x3fffU
+#define V_MTUVALUE(x) ((x) << S_MTUVALUE)
+#define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE)
+
+#define A_TP_RSS_CONFIG_VRT 0x7e00
+
+#define S_KEYMODE 6
+#define M_KEYMODE 0x3U
+#define G_KEYMODE(x) (((x) >> S_KEYMODE) & M_KEYMODE)
+
+#define S_KEYWRADDR 0
+#define V_KEYWRADDR(x) ((x) << S_KEYWRADDR)
+
+#define S_KEYWREN 4
+#define V_KEYWREN(x) ((x) << S_KEYWREN)
+#define F_KEYWREN V_KEYWREN(1U)
+
+#define S_KEYWRADDRX 30
+#define V_KEYWRADDRX(x) ((x) << S_KEYWRADDRX)
+
+#define S_KEYEXTEND 26
+#define V_KEYEXTEND(x) ((x) << S_KEYEXTEND)
+#define F_KEYEXTEND V_KEYEXTEND(1U)
+
+#define S_T6_VFWRADDR 8
+#define V_T6_VFWRADDR(x) ((x) << S_T6_VFWRADDR)
+
+#define A_TP_PIO_ADDR 0x7e40
+#define A_TP_PIO_DATA 0x7e44
+
+#define A_TP_RSS_SECRET_KEY0 0x40
+
+#define A_TP_VLAN_PRI_MAP 0x140
+
+#define S_FRAGMENTATION 9
+#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
+#define F_FRAGMENTATION V_FRAGMENTATION(1U)
+
+#define S_MPSHITTYPE 8
+#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
+#define F_MPSHITTYPE V_MPSHITTYPE(1U)
+
+#define S_MACMATCH 7
+#define V_MACMATCH(x) ((x) << S_MACMATCH)
+#define F_MACMATCH V_MACMATCH(1U)
+
+#define S_ETHERTYPE 6
+#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
+#define F_ETHERTYPE V_ETHERTYPE(1U)
+
+#define S_PROTOCOL 5
+#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
+#define F_PROTOCOL V_PROTOCOL(1U)
+
+#define S_TOS 4
+#define V_TOS(x) ((x) << S_TOS)
+#define F_TOS V_TOS(1U)
+
+#define S_VLAN 3
+#define V_VLAN(x) ((x) << S_VLAN)
+#define F_VLAN V_VLAN(1U)
+
+#define S_VNIC_ID 2
+#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
+#define F_VNIC_ID V_VNIC_ID(1U)
+
+#define S_PORT 1
+#define V_PORT(x) ((x) << S_PORT)
+#define F_PORT V_PORT(1U)
+
+#define S_FCOE 0
+#define V_FCOE(x) ((x) << S_FCOE)
+#define F_FCOE V_FCOE(1U)
+
+#define A_TP_INGRESS_CONFIG 0x141
+
+#define S_USE_ENC_IDX 13
+#define V_USE_ENC_IDX(x) ((x) << S_USE_ENC_IDX)
+#define F_USE_ENC_IDX V_USE_ENC_IDX(1U)
+
+#define S_VNIC 11
+#define V_VNIC(x) ((x) << S_VNIC)
+#define F_VNIC V_VNIC(1U)
+
+#define S_CSUM_HAS_PSEUDO_HDR 10
+#define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR)
+#define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U)
+
+#define S_RM_OVLAN 9
+#define V_RM_OVLAN(x) ((x) << S_RM_OVLAN)
+
+/* registers for module MA */
+#define A_MA_EDRAM0_BAR 0x77c0
+
+#define S_EDRAM0_SIZE 0
+#define M_EDRAM0_SIZE 0xfffU
+#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE)
+#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE)
+
+#define A_MA_EXT_MEMORY0_BAR 0x77c8
+
+#define S_EXT_MEM0_SIZE 0
+#define M_EXT_MEM0_SIZE 0xfffU
+#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE)
+#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE)
+
+/* registers for module MPS */
+#define MPS_BASE_ADDR 0x9000
+#define T4VF_MPS_BASE_ADDR 0x0100
+
+#define S_REPLICATE 11
+#define V_REPLICATE(x) ((x) << S_REPLICATE)
+#define F_REPLICATE V_REPLICATE(1U)
+
+#define S_PF 8
+#define M_PF 0x7U
+#define V_PF(x) ((x) << S_PF)
+#define G_PF(x) (((x) >> S_PF) & M_PF)
+
+#define S_VF_VALID 7
+#define V_VF_VALID(x) ((x) << S_VF_VALID)
+#define F_VF_VALID V_VF_VALID(1U)
+
+#define S_VF 0
+#define M_VF 0x7fU
+#define V_VF(x) ((x) << S_VF)
+#define G_VF(x) (((x) >> S_VF) & M_VF)
+
+#define A_MPS_STAT_CTL 0x9600
+
+#define S_COUNTPAUSEMCRX 5
+#define V_COUNTPAUSEMCRX(x) ((x) << S_COUNTPAUSEMCRX)
+#define F_COUNTPAUSEMCRX V_COUNTPAUSEMCRX(1U)
+
+#define S_COUNTPAUSESTATRX 4
+#define V_COUNTPAUSESTATRX(x) ((x) << S_COUNTPAUSESTATRX)
+#define F_COUNTPAUSESTATRX V_COUNTPAUSESTATRX(1U)
+
+#define S_COUNTPAUSEMCTX 3
+#define V_COUNTPAUSEMCTX(x) ((x) << S_COUNTPAUSEMCTX)
+#define F_COUNTPAUSEMCTX V_COUNTPAUSEMCTX(1U)
+
+#define S_COUNTPAUSESTATTX 2
+#define V_COUNTPAUSESTATTX(x) ((x) << S_COUNTPAUSESTATTX)
+#define F_COUNTPAUSESTATTX V_COUNTPAUSESTATTX(1U)
+
+#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
+#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
+#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
+#define A_MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
+#define A_MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
+#define A_MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
+#define A_MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
+#define A_MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
+#define A_MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
+#define A_MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
+#define A_MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
+#define A_MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
+#define A_MPS_PORT_STAT_TX_PORT_64B_L 0x430
+#define A_MPS_PORT_STAT_TX_PORT_64B_H 0x434
+#define A_MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
+#define A_MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
+#define A_MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
+#define A_MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
+#define A_MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
+#define A_MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
+#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
+#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
+#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
+#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
+#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
+#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
+#define A_MPS_PORT_STAT_TX_PORT_DROP_L 0x468
+#define A_MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
+#define A_MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
+#define A_MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
+#define A_MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
+#define A_MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
+#define A_MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
+#define A_MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
+#define A_MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
+#define A_MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
+#define A_MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
+#define A_MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
+#define A_MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
+#define A_MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
+#define A_MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
+#define A_MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
+#define A_MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
+#define A_MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
+#define A_MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
+#define A_MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
+#define A_MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
+#define A_MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
+#define A_MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
+#define A_MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
+#define A_MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
+#define A_MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
+#define A_MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
+#define A_MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
+#define A_MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
+#define A_MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
+#define A_MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
+#define A_MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
+#define A_MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
+#define A_MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
+#define A_MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
+#define A_MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
+#define A_MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
+#define A_MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
+#define A_MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
+#define A_MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
+#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
+#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
+#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
+#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
+#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
+#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528
+#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_H 0x52c
+#define A_MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
+#define A_MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
+#define A_MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
+#define A_MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
+#define A_MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
+#define A_MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
+#define A_MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
+#define A_MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
+#define A_MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
+#define A_MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
+#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
+#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
+#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
+#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
+#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
+#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
+#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
+#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
+#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
+#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
+#define A_MPS_PORT_STAT_RX_PORT_64B_L 0x590
+#define A_MPS_PORT_STAT_RX_PORT_64B_H 0x594
+#define A_MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
+#define A_MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
+#define A_MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
+#define A_MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
+#define A_MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
+#define A_MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
+#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
+#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
+#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
+#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
+#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
+#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
+#define A_MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
+#define A_MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
+#define A_MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
+#define A_MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
+#define A_MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
+#define A_MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
+#define A_MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
+#define A_MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
+#define A_MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
+#define A_MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
+#define A_MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
+#define A_MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
+#define A_MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
+#define A_MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
+#define A_MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
+#define A_MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
+#define A_MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
+#define A_MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
+#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
+#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define A_MPS_CMN_CTL 0x9000
+
+#define S_NUMPORTS 0
+#define M_NUMPORTS 0x3U
+#define V_NUMPORTS(x) ((x) << S_NUMPORTS)
+#define G_NUMPORTS(x) (((x) >> S_NUMPORTS) & M_NUMPORTS)
+
+#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
+#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
+#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
+#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
+#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
+#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
+#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
+#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
+#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
+#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
+#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
+#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
+#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
+#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
+#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
+#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
+#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
+#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
+#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
+#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
+#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
+#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
+#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
+#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
+#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
+#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
+#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
+#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
+#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
+#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
+#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
+#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+
+#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80
+#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
+#define A_MPS_VF_STAT_TX_VF_MCAST_BYTES_L 0x90
+#define A_MPS_VF_STAT_TX_VF_MCAST_FRAMES_L 0x98
+#define A_MPS_VF_STAT_TX_VF_UCAST_BYTES_L 0xa0
+#define A_MPS_VF_STAT_TX_VF_UCAST_FRAMES_L 0xa8
+#define A_MPS_VF_STAT_TX_VF_DROP_FRAMES_L 0xb0
+#define A_MPS_VF_STAT_RX_VF_BCAST_FRAMES_L 0xd0
+#define A_MPS_VF_STAT_RX_VF_MCAST_FRAMES_L 0xe0
+#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0
+#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8
+
+#define A_MPS_PORT0_RX_IVLAN 0x3011c
+
+#define S_IVLAN_ETYPE 0
+#define M_IVLAN_ETYPE 0xffffU
+#define V_IVLAN_ETYPE(x) ((x) << S_IVLAN_ETYPE)
+
+#define MPS_PORT_RX_IVLAN_STRIDE 0x4000
+#define MPS_PORT_RX_IVLAN(idx) \
+ (A_MPS_PORT0_RX_IVLAN + (idx) * MPS_PORT_RX_IVLAN_STRIDE)
+
+#define A_MPS_PORT0_RX_OVLAN0 0x30120
+
+#define S_OVLAN_MASK 16
+#define M_OVLAN_MASK 0xffffU
+#define V_OVLAN_MASK(x) ((x) << S_OVLAN_MASK)
+
+#define S_OVLAN_ETYPE 0
+#define M_OVLAN_ETYPE 0xffffU
+#define V_OVLAN_ETYPE(x) ((x) << S_OVLAN_ETYPE)
+
+#define MPS_PORT_RX_OVLAN_STRIDE 0x4000
+#define MPS_PORT_RX_OVLAN_BASE(idx) \
+(A_MPS_PORT0_RX_OVLAN0 + (idx) * MPS_PORT_RX_OVLAN_STRIDE)
+#define MPS_PORT_RX_OVLAN_REG(idx, reg) (MPS_PORT_RX_OVLAN_BASE(idx) + (reg))
+
+#define A_RX_OVLAN0 0x0
+#define A_RX_OVLAN1 0x4
+#define A_RX_OVLAN2 0x8
+
+#define A_MPS_PORT0_RX_CTL 0x30100
+
+#define S_OVLAN_EN0 0
+#define V_OVLAN_EN0(x) ((x) << S_OVLAN_EN0)
+#define F_OVLAN_EN0 V_OVLAN_EN0(1)
+
+#define S_OVLAN_EN1 1
+#define V_OVLAN_EN1(x) ((x) << S_OVLAN_EN1)
+#define F_OVLAN_EN1 V_OVLAN_EN1(1)
+
+#define S_OVLAN_EN2 2
+#define V_OVLAN_EN2(x) ((x) << S_OVLAN_EN2)
+#define F_OVLAN_EN2 V_OVLAN_EN2(1)
+
+#define S_IVLAN_EN 4
+#define V_IVLAN_EN(x) ((x) << S_IVLAN_EN)
+#define F_IVLAN_EN V_IVLAN_EN(1)
+
+#define MPS_PORT_RX_CTL_STRIDE 0x4000
+#define MPS_PORT_RX_CTL(idx) \
+ (A_MPS_PORT0_RX_CTL + (idx) * MPS_PORT_RX_CTL_STRIDE)
+
+/* registers for module ULP_RX */
+#define ULP_RX_BASE_ADDR 0x19150
+
+#define S_HPZ0 0
+#define M_HPZ0 0xfU
+#define V_HPZ0(x) ((x) << S_HPZ0)
+#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0)
+
+#define A_ULP_RX_TDDP_PSZ 0x19178
+
+/* registers for module SF */
+#define SF_BASE_ADDR 0x193f8
+
+#define A_SF_DATA 0x193f8
+#define A_SF_OP 0x193fc
+
+#define S_SF_LOCK 4
+#define V_SF_LOCK(x) ((x) << S_SF_LOCK)
+#define F_SF_LOCK V_SF_LOCK(1U)
+
+#define S_CONT 3
+#define V_CONT(x) ((x) << S_CONT)
+#define F_CONT V_CONT(1U)
+
+#define S_BYTECNT 1
+#define M_BYTECNT 0x3U
+#define V_BYTECNT(x) ((x) << S_BYTECNT)
+#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT)
+
+#define S_OP 0
+#define V_OP(x) ((x) << S_OP)
+#define F_OP V_OP(1U)
+
+/* registers for module PL */
+#define PL_BASE_ADDR 0x19400
+
+#define S_SOURCEPF 8
+#define M_SOURCEPF 0x7U
+#define V_SOURCEPF(x) ((x) << S_SOURCEPF)
+#define G_SOURCEPF(x) (((x) >> S_SOURCEPF) & M_SOURCEPF)
+
+#define S_T6_SOURCEPF 9
+#define M_T6_SOURCEPF 0x7U
+#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF)
+#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF)
+
+#define A_PL_PF_INT_ENABLE 0x3c4
+
+#define S_PFSW 3
+#define V_PFSW(x) ((x) << S_PFSW)
+#define F_PFSW V_PFSW(1U)
+
+#define S_PFCIM 1
+#define V_PFCIM(x) ((x) << S_PFCIM)
+#define F_PFCIM V_PFCIM(1U)
+
+#define A_PL_WHOAMI 0x19400
+#define A_PL_VF_WHOAMI 0x0
+
+#define A_PL_RST 0x19428
+
+#define A_PL_INT_MAP0 0x19414
+
+#define S_PIORST 1
+#define V_PIORST(x) ((x) << S_PIORST)
+#define F_PIORST V_PIORST(1U)
+
+#define S_PIORSTMODE 0
+#define V_PIORSTMODE(x) ((x) << S_PIORSTMODE)
+#define F_PIORSTMODE V_PIORSTMODE(1U)
+
+#define A_PL_REV 0x1943c
+#define A_PL_VF_REV 0x4
+
+#define S_REV 0
+#define M_REV 0xfU
+#define V_REV(x) ((x) << S_REV)
+#define G_REV(x) (((x) >> S_REV) & M_REV)
+
+/* registers for module LE */
+#define A_LE_DB_CONFIG 0x19c04
+
+#define S_HASHEN 20
+#define V_HASHEN(x) ((x) << S_HASHEN)
+#define F_HASHEN V_HASHEN(1U)
+
+#define A_LE_DB_TID_HASHBASE 0x19df8
+
+#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac
+#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h
new file mode 100644
index 000000000..e3f549e51
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_regs_values.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4_REGS_VALUES_H__
+#define __T4_REGS_VALUES_H__
+
+/*
+ * This file contains definitions for various T4 register value hardware
+ * constants. The types of values encoded here are predominantly those for
+ * register fields which control "modal" behavior. For the most part, we do
+ * not include definitions for register fields which are simple numeric
+ * metrics, etc.
+ */
+
+/*
+ * SGE definitions.
+ * ================
+ */
+
+/*
+ * SGE register field values.
+ */
+
+/* CONTROL register */
+#define X_RXPKTCPLMODE_SPLIT 1
+#define X_INGPCIEBOUNDARY_32B 0
+#define X_INGPADBOUNDARY_SHIFT 5
+#define X_INGPADBOUNDARY_32B 0
+
+#define X_T6_INGPADBOUNDARY_SHIFT 3
+#define X_T6_INGPADBOUNDARY_8B 0
+
+/* CONTROL2 register */
+#define X_INGPACKBOUNDARY_SHIFT 5
+#define X_INGPACKBOUNDARY_16B 0
+#define X_INGPACKBOUNDARY_64B 1
+
+/* GTS register */
+#define X_TIMERREG_RESTART_COUNTER 6
+#define X_TIMERREG_UPDATE_CIDX 7
+
+/*
+ * Egress Context field values
+ */
+#define X_FETCHBURSTMIN_64B 2
+#define X_FETCHBURSTMIN_128B 3
+#define X_FETCHBURSTMAX_256B 2
+#define X_FETCHBURSTMAX_512B 3
+
+#define X_HOSTFCMODE_NONE 0
+
+/*
+ * Ingress Context field values
+ */
+#define X_UPDATEDELIVERY_STATUS_PAGE 2
+
+#define X_RSPD_TYPE_FLBUF 0
+#define X_RSPD_TYPE_CPL 1
+
+/*
+ * Context field definitions. This is by no means a complete list of SGE
+ * Context fields. In the vast majority of cases the firmware initializes
+ * things the way they need to be set up. But in a few small cases, we need
+ * to compute new values and ship them off to the firmware to be applied to
+ * the SGE Conexts ...
+ */
+
+/*
+ * Congestion Manager Definitions.
+ */
+#define S_CONMCTXT_CNGTPMODE 19
+#define M_CONMCTXT_CNGTPMODE 0x3
+#define V_CONMCTXT_CNGTPMODE(x) ((x) << S_CONMCTXT_CNGTPMODE)
+#define G_CONMCTXT_CNGTPMODE(x) \
+ (((x) >> S_CONMCTXT_CNGTPMODE) & M_CONMCTXT_CNGTPMODE)
+#define S_CONMCTXT_CNGCHMAP 0
+#define M_CONMCTXT_CNGCHMAP 0xffff
+#define V_CONMCTXT_CNGCHMAP(x) ((x) << S_CONMCTXT_CNGCHMAP)
+#define G_CONMCTXT_CNGCHMAP(x) \
+ (((x) >> S_CONMCTXT_CNGCHMAP) & M_CONMCTXT_CNGCHMAP)
+
+#define X_CONMCTXT_CNGTPMODE_QUEUE 1
+#define X_CONMCTXT_CNGTPMODE_CHANNEL 2
+
+/*
+ * T5 and later support a new BAR2-based doorbell mechanism for Egress Queues.
+ * The User Doorbells are each 128 bytes in length with a Simple Doorbell at
+ * offsets 8x and a Write Combining single 64-byte Egress Queue Unit
+ * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues,
+ * we have a Going To Sleep register at offsets 8x+4.
+ *
+ * As noted above, we have many instances of the Simple Doorbell and Going To
+ * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a
+ * non-64-byte aligned offset for the Simple Doorbell in order to attempt to
+ * avoid buffering of the writes to the Simple Doorbell and we want to use a
+ * non-contiguous offset for the Going To Sleep writes in order to avoid
+ * possible combining between them.
+ */
+#define SGE_UDB_SIZE 128
+#define SGE_UDB_KDOORBELL 8
+#define SGE_UDB_GTS 20
+
+/*
+ * CIM definitions.
+ * ================
+ */
+
+/*
+ * CIM register field values.
+ */
+#define X_MBOWNER_NONE 0
+#define X_MBOWNER_FW 1
+#define X_MBOWNER_PL 2
+
+/*
+ * PCI-E definitions.
+ * ==================
+ */
+#define X_WINDOW_SHIFT 10
+#define X_PCIEOFST_SHIFT 10
+
+/*
+ * TP definitions.
+ * ===============
+ */
+
+/*
+ * TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present. These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define W_FT_FCOE 1
+#define W_FT_PORT 3
+#define W_FT_VNIC_ID 17
+#define W_FT_VLAN 17
+#define W_FT_TOS 8
+#define W_FT_PROTOCOL 8
+#define W_FT_ETHERTYPE 16
+#define W_FT_MACMATCH 9
+#define W_FT_MPSHITTYPE 3
+#define W_FT_FRAGMENTATION 1
+
+/*
+ * Some of the Compressed Filter Tuple fields have internal structure. These
+ * bit shifts/masks describe those structures. All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define S_FT_VLAN_VLD 16
+#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
+#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
+
+#endif /* __T4_REGS_VALUES_H__ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h
new file mode 100644
index 000000000..afd03b735
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4_tcb.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _T4_TCB_DEFS_H
+#define _T4_TCB_DEFS_H
+
+/* 31:24 */
+#define W_TCB_SMAC_SEL 0
+#define S_TCB_SMAC_SEL 24
+#define M_TCB_SMAC_SEL 0xffULL
+#define V_TCB_SMAC_SEL(x) ((x) << S_TCB_SMAC_SEL)
+
+/* 95:32 */
+#define W_TCB_T_FLAGS 1
+
+/* 105:96 */
+#define W_TCB_RSS_INFO 3
+#define S_TCB_RSS_INFO 0
+#define M_TCB_RSS_INFO 0x3ffULL
+#define V_TCB_RSS_INFO(x) ((x) << S_TCB_RSS_INFO)
+
+/* 191:160 */
+#define W_TCB_TIMESTAMP 5
+#define S_TCB_TIMESTAMP 0
+#define M_TCB_TIMESTAMP 0xffffffffULL
+#define V_TCB_TIMESTAMP(x) ((x) << S_TCB_TIMESTAMP)
+
+/* 223:192 */
+#define W_TCB_T_RTT_TS_RECENT_AGE 6
+#define S_TCB_T_RTT_TS_RECENT_AGE 0
+#define M_TCB_T_RTT_TS_RECENT_AGE 0xffffffffULL
+#define V_TCB_T_RTT_TS_RECENT_AGE(x) ((x) << S_TCB_T_RTT_TS_RECENT_AGE)
+
+/* 255:224 */
+#define S_TCB_T_RTSEQ_RECENT 0
+#define M_TCB_T_RTSEQ_RECENT 0xffffffffULL
+#define V_TCB_T_RTSEQ_RECENT(x) ((x) << S_TCB_T_RTSEQ_RECENT)
+
+#define S_TF_CCTRL_ECE 60
+
+#define S_TF_CCTRL_CWR 61
+
+#define S_TF_CCTRL_RFR 62
+
+#endif /* _T4_TCB_DEFS_H */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h
new file mode 100644
index 000000000..0032178d0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4fw_interface.h
@@ -0,0 +1,2452 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _T4FW_INTERFACE_H_
+#define _T4FW_INTERFACE_H_
+
+/******************************************************************************
+ * R E T U R N V A L U E S
+ ********************************/
+
+enum fw_retval {
+ FW_SUCCESS = 0, /* completed successfully */
+ FW_EPERM = 1, /* operation not permitted */
+ FW_ENOENT = 2, /* no such file or directory */
+ FW_EIO = 5, /* input/output error; hw bad */
+ FW_ENOEXEC = 8, /* exec format error; inv microcode */
+ FW_EAGAIN = 11, /* try again */
+ FW_ENOMEM = 12, /* out of memory */
+ FW_EFAULT = 14, /* bad address; fw bad */
+ FW_EBUSY = 16, /* resource busy */
+ FW_EEXIST = 17, /* file exists */
+ FW_ENODEV = 19, /* no such device */
+ FW_EINVAL = 22, /* invalid argument */
+ FW_ENOSPC = 28, /* no space left on device */
+ FW_ENOSYS = 38, /* functionality not implemented */
+ FW_ENODATA = 61, /* no data available */
+ FW_EPROTO = 71, /* protocol error */
+ FW_EADDRINUSE = 98, /* address already in use */
+ FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */
+ FW_ENETDOWN = 100, /* network is down */
+ FW_ENETUNREACH = 101, /* network is unreachable */
+ FW_ENOBUFS = 105, /* no buffer space available */
+ FW_ETIMEDOUT = 110, /* timeout */
+ FW_EINPROGRESS = 115, /* fw internal */
+};
+
+/******************************************************************************
+ * M E M O R Y T Y P E s
+ ******************************/
+
+enum fw_memtype {
+ FW_MEMTYPE_EDC0 = 0x0,
+ FW_MEMTYPE_EDC1 = 0x1,
+ FW_MEMTYPE_EXTMEM = 0x2,
+ FW_MEMTYPE_FLASH = 0x4,
+ FW_MEMTYPE_INTERNAL = 0x5,
+ FW_MEMTYPE_EXTMEM1 = 0x6,
+};
+
+/******************************************************************************
+ * W O R K R E Q U E S T s
+ ********************************/
+
+enum fw_wr_opcodes {
+ FW_FILTER_WR = 0x02,
+ FW_ULPTX_WR = 0x04,
+ FW_TP_WR = 0x05,
+ FW_ETH_TX_PKT_WR = 0x08,
+ FW_ETH_TX_PKTS_WR = 0x09,
+ FW_ETH_TX_PKT_VM_WR = 0x11,
+ FW_ETH_TX_PKTS_VM_WR = 0x12,
+ FW_FILTER2_WR = 0x77,
+ FW_ETH_TX_PKTS2_WR = 0x78,
+};
+
+/*
+ * Generic work request header flit0
+ */
+struct fw_wr_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+/* work request opcode (hi)
+ */
+#define S_FW_WR_OP 24
+#define M_FW_WR_OP 0xff
+#define V_FW_WR_OP(x) ((x) << S_FW_WR_OP)
+#define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP)
+
+/* atomic flag (hi) - firmware encapsulates CPLs in CPL_BARRIER
+ */
+#define S_FW_WR_ATOMIC 23
+#define V_FW_WR_ATOMIC(x) ((x) << S_FW_WR_ATOMIC)
+
+/* work request immediate data length (hi)
+ */
+#define S_FW_WR_IMMDLEN 0
+#define M_FW_WR_IMMDLEN 0xff
+#define V_FW_WR_IMMDLEN(x) ((x) << S_FW_WR_IMMDLEN)
+#define G_FW_WR_IMMDLEN(x) \
+ (((x) >> S_FW_WR_IMMDLEN) & M_FW_WR_IMMDLEN)
+
+/* egress queue status update to egress queue status entry (lo)
+ */
+#define S_FW_WR_EQUEQ 30
+#define M_FW_WR_EQUEQ 0x1
+#define V_FW_WR_EQUEQ(x) ((x) << S_FW_WR_EQUEQ)
+#define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ)
+#define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U)
+
+/* flow context identifier (lo)
+ */
+#define S_FW_WR_FLOWID 8
+#define V_FW_WR_FLOWID(x) ((x) << S_FW_WR_FLOWID)
+
+/* length in units of 16-bytes (lo)
+ */
+#define S_FW_WR_LEN16 0
+#define M_FW_WR_LEN16 0xff
+#define V_FW_WR_LEN16(x) ((x) << S_FW_WR_LEN16)
+#define G_FW_WR_LEN16(x) (((x) >> S_FW_WR_LEN16) & M_FW_WR_LEN16)
+
+struct fw_eth_tx_pkt_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be64 r3;
+};
+
+#define S_FW_ETH_TX_PKT_WR_IMMDLEN 0
+#define M_FW_ETH_TX_PKT_WR_IMMDLEN 0x1ff
+#define V_FW_ETH_TX_PKT_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_PKT_WR_IMMDLEN)
+#define G_FW_ETH_TX_PKT_WR_IMMDLEN(x) \
+ (((x) >> S_FW_ETH_TX_PKT_WR_IMMDLEN) & M_FW_ETH_TX_PKT_WR_IMMDLEN)
+
+struct fw_eth_tx_pkts_wr {
+ __be32 op_pkd;
+ __be32 equiq_to_len16;
+ __be32 r3;
+ __be16 plen;
+ __u8 npkt;
+ __u8 type;
+};
+
+struct fw_eth_tx_pkt_vm_wr {
+ __be32 op_immdlen;
+ __be32 equiq_to_len16;
+ __be32 r3[2];
+ __u8 ethmacdst[6];
+ __u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+struct fw_eth_tx_pkts_vm_wr {
+ __be32 op_pkd;
+ __be32 equiq_to_len16;
+ __be32 r3;
+ __be16 plen;
+ __u8 npkt;
+ __u8 r4;
+ __u8 ethmacdst[6];
+ __u8 ethmacsrc[6];
+ __be16 ethtype;
+ __be16 vlantci;
+};
+
+/* filter wr reply code in cookie in CPL_SET_TCB_RPL */
+enum fw_filter_wr_cookie {
+ FW_FILTER_WR_SUCCESS,
+ FW_FILTER_WR_FLT_ADDED,
+ FW_FILTER_WR_FLT_DELETED,
+ FW_FILTER_WR_SMT_TBL_FULL,
+ FW_FILTER_WR_EINVAL,
+};
+
+struct fw_filter2_wr {
+ __be32 op_pkd;
+ __be32 len16_pkd;
+ __be64 r3;
+ __be32 tid_to_iq;
+ __be32 del_filter_to_l2tix;
+ __be16 ethtype;
+ __be16 ethtypem;
+ __u8 frag_to_ovlan_vldm;
+ __u8 smac_sel;
+ __be16 rx_chan_rx_rpl_iq;
+ __be32 maci_to_matchtypem;
+ __u8 ptcl;
+ __u8 ptclm;
+ __u8 ttyp;
+ __u8 ttypm;
+ __be16 ivlan;
+ __be16 ivlanm;
+ __be16 ovlan;
+ __be16 ovlanm;
+ __u8 lip[16];
+ __u8 lipm[16];
+ __u8 fip[16];
+ __u8 fipm[16];
+ __be16 lp;
+ __be16 lpm;
+ __be16 fp;
+ __be16 fpm;
+ __be16 r7;
+ __u8 sma[6];
+ __be16 r8;
+ __u8 filter_type_swapmac;
+ __u8 natmode_to_ulp_type;
+ __be16 newlport;
+ __be16 newfport;
+ __u8 newlip[16];
+ __u8 newfip[16];
+ __be32 natseqcheck;
+ __be32 r9;
+ __be64 r10;
+ __be64 r11;
+ __be64 r12;
+ __be64 r13;
+};
+
+#define S_FW_FILTER_WR_TID 12
+#define V_FW_FILTER_WR_TID(x) ((x) << S_FW_FILTER_WR_TID)
+
+#define S_FW_FILTER_WR_RQTYPE 11
+#define V_FW_FILTER_WR_RQTYPE(x) ((x) << S_FW_FILTER_WR_RQTYPE)
+
+#define S_FW_FILTER_WR_NOREPLY 10
+#define V_FW_FILTER_WR_NOREPLY(x) ((x) << S_FW_FILTER_WR_NOREPLY)
+
+#define S_FW_FILTER_WR_IQ 0
+#define V_FW_FILTER_WR_IQ(x) ((x) << S_FW_FILTER_WR_IQ)
+
+#define S_FW_FILTER_WR_DEL_FILTER 31
+#define V_FW_FILTER_WR_DEL_FILTER(x) ((x) << S_FW_FILTER_WR_DEL_FILTER)
+#define F_FW_FILTER_WR_DEL_FILTER V_FW_FILTER_WR_DEL_FILTER(1U)
+
+#define S_FW_FILTER_WR_RPTTID 25
+#define V_FW_FILTER_WR_RPTTID(x) ((x) << S_FW_FILTER_WR_RPTTID)
+
+#define S_FW_FILTER_WR_DROP 24
+#define V_FW_FILTER_WR_DROP(x) ((x) << S_FW_FILTER_WR_DROP)
+
+#define S_FW_FILTER_WR_DIRSTEER 23
+#define V_FW_FILTER_WR_DIRSTEER(x) ((x) << S_FW_FILTER_WR_DIRSTEER)
+
+#define S_FW_FILTER_WR_MASKHASH 22
+#define V_FW_FILTER_WR_MASKHASH(x) ((x) << S_FW_FILTER_WR_MASKHASH)
+
+#define S_FW_FILTER_WR_DIRSTEERHASH 21
+#define V_FW_FILTER_WR_DIRSTEERHASH(x) ((x) << S_FW_FILTER_WR_DIRSTEERHASH)
+
+#define S_FW_FILTER_WR_LPBK 20
+#define V_FW_FILTER_WR_LPBK(x) ((x) << S_FW_FILTER_WR_LPBK)
+
+#define S_FW_FILTER_WR_DMAC 19
+#define V_FW_FILTER_WR_DMAC(x) ((x) << S_FW_FILTER_WR_DMAC)
+
+#define S_FW_FILTER_WR_SMAC 18
+#define V_FW_FILTER_WR_SMAC(x) ((x) << S_FW_FILTER_WR_SMAC)
+
+#define S_FW_FILTER_WR_INSVLAN 17
+#define V_FW_FILTER_WR_INSVLAN(x) ((x) << S_FW_FILTER_WR_INSVLAN)
+
+#define S_FW_FILTER_WR_RMVLAN 16
+#define V_FW_FILTER_WR_RMVLAN(x) ((x) << S_FW_FILTER_WR_RMVLAN)
+
+#define S_FW_FILTER_WR_HITCNTS 15
+#define V_FW_FILTER_WR_HITCNTS(x) ((x) << S_FW_FILTER_WR_HITCNTS)
+
+#define S_FW_FILTER_WR_TXCHAN 13
+#define V_FW_FILTER_WR_TXCHAN(x) ((x) << S_FW_FILTER_WR_TXCHAN)
+
+#define S_FW_FILTER_WR_PRIO 12
+#define V_FW_FILTER_WR_PRIO(x) ((x) << S_FW_FILTER_WR_PRIO)
+
+#define S_FW_FILTER_WR_L2TIX 0
+#define V_FW_FILTER_WR_L2TIX(x) ((x) << S_FW_FILTER_WR_L2TIX)
+
+#define S_FW_FILTER_WR_FRAG 7
+#define V_FW_FILTER_WR_FRAG(x) ((x) << S_FW_FILTER_WR_FRAG)
+
+#define S_FW_FILTER_WR_FRAGM 6
+#define V_FW_FILTER_WR_FRAGM(x) ((x) << S_FW_FILTER_WR_FRAGM)
+
+#define S_FW_FILTER_WR_IVLAN_VLD 5
+#define V_FW_FILTER_WR_IVLAN_VLD(x) ((x) << S_FW_FILTER_WR_IVLAN_VLD)
+
+#define S_FW_FILTER_WR_OVLAN_VLD 4
+#define V_FW_FILTER_WR_OVLAN_VLD(x) ((x) << S_FW_FILTER_WR_OVLAN_VLD)
+
+#define S_FW_FILTER_WR_IVLAN_VLDM 3
+#define V_FW_FILTER_WR_IVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_IVLAN_VLDM)
+
+#define S_FW_FILTER_WR_OVLAN_VLDM 2
+#define V_FW_FILTER_WR_OVLAN_VLDM(x) ((x) << S_FW_FILTER_WR_OVLAN_VLDM)
+
+#define S_FW_FILTER_WR_RX_CHAN 15
+#define V_FW_FILTER_WR_RX_CHAN(x) ((x) << S_FW_FILTER_WR_RX_CHAN)
+
+#define S_FW_FILTER_WR_RX_RPL_IQ 0
+#define V_FW_FILTER_WR_RX_RPL_IQ(x) ((x) << S_FW_FILTER_WR_RX_RPL_IQ)
+
+#define S_FW_FILTER_WR_MACI 23
+#define V_FW_FILTER_WR_MACI(x) ((x) << S_FW_FILTER_WR_MACI)
+
+#define S_FW_FILTER_WR_MACIM 14
+#define V_FW_FILTER_WR_MACIM(x) ((x) << S_FW_FILTER_WR_MACIM)
+
+#define S_FW_FILTER_WR_FCOE 13
+#define V_FW_FILTER_WR_FCOE(x) ((x) << S_FW_FILTER_WR_FCOE)
+
+#define S_FW_FILTER_WR_FCOEM 12
+#define V_FW_FILTER_WR_FCOEM(x) ((x) << S_FW_FILTER_WR_FCOEM)
+
+#define S_FW_FILTER_WR_PORT 9
+#define V_FW_FILTER_WR_PORT(x) ((x) << S_FW_FILTER_WR_PORT)
+
+#define S_FW_FILTER_WR_PORTM 6
+#define V_FW_FILTER_WR_PORTM(x) ((x) << S_FW_FILTER_WR_PORTM)
+
+#define S_FW_FILTER_WR_MATCHTYPE 3
+#define V_FW_FILTER_WR_MATCHTYPE(x) ((x) << S_FW_FILTER_WR_MATCHTYPE)
+
+#define S_FW_FILTER_WR_MATCHTYPEM 0
+#define V_FW_FILTER_WR_MATCHTYPEM(x) ((x) << S_FW_FILTER_WR_MATCHTYPEM)
+
+#define S_FW_FILTER2_WR_SWAPMAC 0
+#define V_FW_FILTER2_WR_SWAPMAC(x) ((x) << S_FW_FILTER2_WR_SWAPMAC)
+
+#define S_FW_FILTER2_WR_NATMODE 5
+#define V_FW_FILTER2_WR_NATMODE(x) ((x) << S_FW_FILTER2_WR_NATMODE)
+
+#define S_FW_FILTER2_WR_ULP_TYPE 0
+#define V_FW_FILTER2_WR_ULP_TYPE(x) ((x) << S_FW_FILTER2_WR_ULP_TYPE)
+
+/******************************************************************************
+ * C O M M A N D s
+ *********************/
+
+/*
+ * The maximum length of time, in miliseconds, that we expect any firmware
+ * command to take to execute and return a reply to the host. The RESET
+ * and INITIALIZE commands can take a fair amount of time to execute but
+ * most execute in far less time than this maximum. This constant is used
+ * by host software to determine how long to wait for a firmware command
+ * reply before declaring the firmware as dead/unreachable ...
+ */
+#define FW_CMD_MAX_TIMEOUT 10000
+
+/*
+ * If a host driver does a HELLO and discovers that there's already a MASTER
+ * selected, we may have to wait for that MASTER to finish issuing RESET,
+ * configuration and INITIALIZE commands. Also, there's a possibility that
+ * our own HELLO may get lost if it happens right as the MASTER is issuign a
+ * RESET command, so we need to be willing to make a few retries of our HELLO.
+ */
+#define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT)
+#define FW_CMD_HELLO_RETRIES 3
+
+enum fw_cmd_opcodes {
+ FW_LDST_CMD = 0x01,
+ FW_RESET_CMD = 0x03,
+ FW_HELLO_CMD = 0x04,
+ FW_BYE_CMD = 0x05,
+ FW_INITIALIZE_CMD = 0x06,
+ FW_CAPS_CONFIG_CMD = 0x07,
+ FW_PARAMS_CMD = 0x08,
+ FW_PFVF_CMD = 0x09,
+ FW_IQ_CMD = 0x10,
+ FW_EQ_ETH_CMD = 0x12,
+ FW_EQ_CTRL_CMD = 0x13,
+ FW_VI_CMD = 0x14,
+ FW_VI_MAC_CMD = 0x15,
+ FW_VI_RXMODE_CMD = 0x16,
+ FW_VI_ENABLE_CMD = 0x17,
+ FW_VI_STATS_CMD = 0x1a,
+ FW_PORT_CMD = 0x1b,
+ FW_RSS_IND_TBL_CMD = 0x20,
+ FW_RSS_GLB_CONFIG_CMD = 0x22,
+ FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_CLIP_CMD = 0x28,
+ FW_DEBUG_CMD = 0x81,
+};
+
+enum fw_cmd_cap {
+ FW_CMD_CAP_PORT = 0x04,
+};
+
+/*
+ * Generic command header flit0
+ */
+struct fw_cmd_hdr {
+ __be32 hi;
+ __be32 lo;
+};
+
+#define S_FW_CMD_OP 24
+#define M_FW_CMD_OP 0xff
+#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP)
+#define G_FW_CMD_OP(x) (((x) >> S_FW_CMD_OP) & M_FW_CMD_OP)
+
+#define S_FW_CMD_REQUEST 23
+#define M_FW_CMD_REQUEST 0x1
+#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST)
+#define G_FW_CMD_REQUEST(x) (((x) >> S_FW_CMD_REQUEST) & M_FW_CMD_REQUEST)
+#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U)
+
+#define S_FW_CMD_READ 22
+#define M_FW_CMD_READ 0x1
+#define V_FW_CMD_READ(x) ((x) << S_FW_CMD_READ)
+#define G_FW_CMD_READ(x) (((x) >> S_FW_CMD_READ) & M_FW_CMD_READ)
+#define F_FW_CMD_READ V_FW_CMD_READ(1U)
+
+#define S_FW_CMD_WRITE 21
+#define M_FW_CMD_WRITE 0x1
+#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE)
+#define G_FW_CMD_WRITE(x) (((x) >> S_FW_CMD_WRITE) & M_FW_CMD_WRITE)
+#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U)
+
+#define S_FW_CMD_EXEC 20
+#define M_FW_CMD_EXEC 0x1
+#define V_FW_CMD_EXEC(x) ((x) << S_FW_CMD_EXEC)
+#define G_FW_CMD_EXEC(x) (((x) >> S_FW_CMD_EXEC) & M_FW_CMD_EXEC)
+#define F_FW_CMD_EXEC V_FW_CMD_EXEC(1U)
+
+#define S_FW_CMD_RETVAL 8
+#define M_FW_CMD_RETVAL 0xff
+#define V_FW_CMD_RETVAL(x) ((x) << S_FW_CMD_RETVAL)
+#define G_FW_CMD_RETVAL(x) (((x) >> S_FW_CMD_RETVAL) & M_FW_CMD_RETVAL)
+
+#define S_FW_CMD_LEN16 0
+#define M_FW_CMD_LEN16 0xff
+#define V_FW_CMD_LEN16(x) ((x) << S_FW_CMD_LEN16)
+#define G_FW_CMD_LEN16(x) (((x) >> S_FW_CMD_LEN16) & M_FW_CMD_LEN16)
+
+#define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16)
+
+/* address spaces
+ */
+enum fw_ldst_addrspc {
+ FW_LDST_ADDRSPC_TP_PIO = 0x0010,
+};
+
+struct fw_ldst_cmd {
+ __be32 op_to_addrspace;
+ __be32 cycles_to_len16;
+ union fw_ldst {
+ struct fw_ldst_addrval {
+ __be32 addr;
+ __be32 val;
+ } addrval;
+ struct fw_ldst_idctxt {
+ __be32 physid;
+ __be32 msg_ctxtflush;
+ __be32 ctxt_data7;
+ __be32 ctxt_data6;
+ __be32 ctxt_data5;
+ __be32 ctxt_data4;
+ __be32 ctxt_data3;
+ __be32 ctxt_data2;
+ __be32 ctxt_data1;
+ __be32 ctxt_data0;
+ } idctxt;
+ struct fw_ldst_mdio {
+ __be16 paddr_mmd;
+ __be16 raddr;
+ __be16 vctl;
+ __be16 rval;
+ } mdio;
+ struct fw_ldst_mps {
+ __be16 fid_ctl;
+ __be16 rplcpf_pkd;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ __be32 atrb;
+ __be16 vlan[16];
+ } mps;
+ struct fw_ldst_func {
+ __u8 access_ctl;
+ __u8 mod_index;
+ __be16 ctl_id;
+ __be32 offset;
+ __be64 data0;
+ __be64 data1;
+ } func;
+ struct fw_ldst_pcie {
+ __u8 ctrl_to_fn;
+ __u8 bnum;
+ __u8 r;
+ __u8 ext_r;
+ __u8 select_naccess;
+ __u8 pcie_fn;
+ __be16 nset_pkd;
+ __be32 data[12];
+ } pcie;
+ struct fw_ldst_i2c_deprecated {
+ __u8 pid_pkd;
+ __u8 base;
+ __u8 boffset;
+ __u8 data;
+ __be32 r9;
+ } i2c_deprecated;
+ struct fw_ldst_i2c {
+ __u8 pid;
+ __u8 did;
+ __u8 boffset;
+ __u8 blen;
+ __be32 r9;
+ __u8 data[48];
+ } i2c;
+ struct fw_ldst_le {
+ __be32 index;
+ __be32 r9;
+ __u8 val[33];
+ __u8 r11[7];
+ } le;
+ } u;
+};
+
+#define S_FW_LDST_CMD_ADDRSPACE 0
+#define M_FW_LDST_CMD_ADDRSPACE 0xff
+#define V_FW_LDST_CMD_ADDRSPACE(x) ((x) << S_FW_LDST_CMD_ADDRSPACE)
+
+struct fw_reset_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 val;
+ __be32 halt_pkd;
+};
+
+#define S_FW_RESET_CMD_HALT 31
+#define M_FW_RESET_CMD_HALT 0x1
+#define V_FW_RESET_CMD_HALT(x) ((x) << S_FW_RESET_CMD_HALT)
+#define G_FW_RESET_CMD_HALT(x) \
+ (((x) >> S_FW_RESET_CMD_HALT) & M_FW_RESET_CMD_HALT)
+#define F_FW_RESET_CMD_HALT V_FW_RESET_CMD_HALT(1U)
+
+enum {
+ FW_HELLO_CMD_STAGE_OS = 0,
+};
+
+struct fw_hello_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be32 err_to_clearinit;
+ __be32 fwrev;
+};
+
+#define S_FW_HELLO_CMD_ERR 31
+#define M_FW_HELLO_CMD_ERR 0x1
+#define V_FW_HELLO_CMD_ERR(x) ((x) << S_FW_HELLO_CMD_ERR)
+#define G_FW_HELLO_CMD_ERR(x) \
+ (((x) >> S_FW_HELLO_CMD_ERR) & M_FW_HELLO_CMD_ERR)
+#define F_FW_HELLO_CMD_ERR V_FW_HELLO_CMD_ERR(1U)
+
+#define S_FW_HELLO_CMD_INIT 30
+#define M_FW_HELLO_CMD_INIT 0x1
+#define V_FW_HELLO_CMD_INIT(x) ((x) << S_FW_HELLO_CMD_INIT)
+#define G_FW_HELLO_CMD_INIT(x) \
+ (((x) >> S_FW_HELLO_CMD_INIT) & M_FW_HELLO_CMD_INIT)
+#define F_FW_HELLO_CMD_INIT V_FW_HELLO_CMD_INIT(1U)
+
+#define S_FW_HELLO_CMD_MASTERDIS 29
+#define M_FW_HELLO_CMD_MASTERDIS 0x1
+#define V_FW_HELLO_CMD_MASTERDIS(x) ((x) << S_FW_HELLO_CMD_MASTERDIS)
+#define G_FW_HELLO_CMD_MASTERDIS(x) \
+ (((x) >> S_FW_HELLO_CMD_MASTERDIS) & M_FW_HELLO_CMD_MASTERDIS)
+#define F_FW_HELLO_CMD_MASTERDIS V_FW_HELLO_CMD_MASTERDIS(1U)
+
+#define S_FW_HELLO_CMD_MASTERFORCE 28
+#define M_FW_HELLO_CMD_MASTERFORCE 0x1
+#define V_FW_HELLO_CMD_MASTERFORCE(x) ((x) << S_FW_HELLO_CMD_MASTERFORCE)
+#define G_FW_HELLO_CMD_MASTERFORCE(x) \
+ (((x) >> S_FW_HELLO_CMD_MASTERFORCE) & M_FW_HELLO_CMD_MASTERFORCE)
+#define F_FW_HELLO_CMD_MASTERFORCE V_FW_HELLO_CMD_MASTERFORCE(1U)
+
+#define S_FW_HELLO_CMD_MBMASTER 24
+#define M_FW_HELLO_CMD_MBMASTER 0xf
+#define V_FW_HELLO_CMD_MBMASTER(x) ((x) << S_FW_HELLO_CMD_MBMASTER)
+#define G_FW_HELLO_CMD_MBMASTER(x) \
+ (((x) >> S_FW_HELLO_CMD_MBMASTER) & M_FW_HELLO_CMD_MBMASTER)
+
+#define S_FW_HELLO_CMD_MBASYNCNOT 20
+#define M_FW_HELLO_CMD_MBASYNCNOT 0x7
+#define V_FW_HELLO_CMD_MBASYNCNOT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOT)
+#define G_FW_HELLO_CMD_MBASYNCNOT(x) \
+ (((x) >> S_FW_HELLO_CMD_MBASYNCNOT) & M_FW_HELLO_CMD_MBASYNCNOT)
+
+#define S_FW_HELLO_CMD_STAGE 17
+#define M_FW_HELLO_CMD_STAGE 0x7
+#define V_FW_HELLO_CMD_STAGE(x) ((x) << S_FW_HELLO_CMD_STAGE)
+#define G_FW_HELLO_CMD_STAGE(x) \
+ (((x) >> S_FW_HELLO_CMD_STAGE) & M_FW_HELLO_CMD_STAGE)
+
+#define S_FW_HELLO_CMD_CLEARINIT 16
+#define M_FW_HELLO_CMD_CLEARINIT 0x1
+#define V_FW_HELLO_CMD_CLEARINIT(x) ((x) << S_FW_HELLO_CMD_CLEARINIT)
+#define G_FW_HELLO_CMD_CLEARINIT(x) \
+ (((x) >> S_FW_HELLO_CMD_CLEARINIT) & M_FW_HELLO_CMD_CLEARINIT)
+#define F_FW_HELLO_CMD_CLEARINIT V_FW_HELLO_CMD_CLEARINIT(1U)
+
+struct fw_bye_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+struct fw_initialize_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ __be64 r3;
+};
+
+enum fw_caps_config_nic {
+ FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020,
+ FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040,
+};
+
+enum fw_memtype_cf {
+ FW_MEMTYPE_CF_FLASH = FW_MEMTYPE_FLASH,
+};
+
+struct fw_caps_config_cmd {
+ __be32 op_to_write;
+ __be32 cfvalid_to_len16;
+ __be32 r2;
+ __be32 hwmbitmap;
+ __be16 nbmcaps;
+ __be16 linkcaps;
+ __be16 switchcaps;
+ __be16 r3;
+ __be16 niccaps;
+ __be16 toecaps;
+ __be16 rdmacaps;
+ __be16 r4;
+ __be16 iscsicaps;
+ __be16 fcoecaps;
+ __be32 cfcsum;
+ __be32 finiver;
+ __be32 finicsum;
+};
+
+#define S_FW_CAPS_CONFIG_CMD_CFVALID 27
+#define M_FW_CAPS_CONFIG_CMD_CFVALID 0x1
+#define V_FW_CAPS_CONFIG_CMD_CFVALID(x) ((x) << S_FW_CAPS_CONFIG_CMD_CFVALID)
+#define G_FW_CAPS_CONFIG_CMD_CFVALID(x) \
+ (((x) >> S_FW_CAPS_CONFIG_CMD_CFVALID) & M_FW_CAPS_CONFIG_CMD_CFVALID)
+#define F_FW_CAPS_CONFIG_CMD_CFVALID V_FW_CAPS_CONFIG_CMD_CFVALID(1U)
+
+#define S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 24
+#define M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 0x7
+#define V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \
+ ((x) << S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF)
+#define G_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \
+ (((x) >> S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) & \
+ M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF)
+
+#define S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 16
+#define M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 0xff
+#define V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \
+ ((x) << S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF)
+#define G_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \
+ (((x) >> S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) & \
+ M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF)
+
+/*
+ * params command mnemonics
+ */
+enum fw_params_mnem {
+ FW_PARAMS_MNEM_DEV = 1, /* device params */
+ FW_PARAMS_MNEM_PFVF = 2, /* function params */
+ FW_PARAMS_MNEM_REG = 3, /* limited register access */
+ FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */
+};
+
+/*
+ * device parameters
+ */
+
+#define S_FW_PARAMS_PARAM_FILTER_MODE 16
+#define M_FW_PARAMS_PARAM_FILTER_MODE 0xffff
+#define V_FW_PARAMS_PARAM_FILTER_MODE(x) \
+ ((x) << S_FW_PARAMS_PARAM_FILTER_MODE)
+#define G_FW_PARAMS_PARAM_FILTER_MODE(x) \
+ (((x) >> S_FW_PARAMS_PARAM_FILTER_MODE) & \
+ M_FW_PARAMS_PARAM_FILTER_MODE)
+
+#define S_FW_PARAMS_PARAM_FILTER_MASK 0
+#define M_FW_PARAMS_PARAM_FILTER_MASK 0xffff
+#define V_FW_PARAMS_PARAM_FILTER_MASK(x) \
+ ((x) << S_FW_PARAMS_PARAM_FILTER_MASK)
+#define G_FW_PARAMS_PARAM_FILTER_MASK(x) \
+ (((x) >> S_FW_PARAMS_PARAM_FILTER_MASK) & \
+ M_FW_PARAMS_PARAM_FILTER_MASK)
+
+enum fw_params_param_dev {
+ FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */
+ FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */
+ FW_PARAMS_PARAM_DEV_NTID = 0x02, /* reads the number of TIDs
+ * allocated by the device's
+ * Lookup Engine
+ */
+ FW_PARAMS_PARAM_DEV_FWREV = 0x0B, /* fw version */
+ FW_PARAMS_PARAM_DEV_TPREV = 0x0C, /* tp version */
+ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
+ FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D,
+ FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27,
+ FW_PARAMS_PARAM_DEV_FILTER = 0x2E,
+};
+
+/*
+ * physical and virtual function parameters
+ */
+enum fw_params_param_pfvf {
+ FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
+ FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
+ FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
+ FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
+ FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
+ FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
+ FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31,
+ FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A,
+ FW_PARAMS_PARAM_PFVF_MAX_PKTS_PER_ETH_TX_PKTS_WR = 0x3D,
+ FW_PARAMS_PARAM_PFVF_GET_SMT_START = 0x3E,
+ FW_PARAMS_PARAM_PFVF_GET_SMT_SIZE = 0x3F,
+};
+
+/*
+ * dma queue parameters
+ */
+enum fw_params_param_dmaq {
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
+ FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20,
+};
+
+enum fw_params_param_dev_filter {
+ FW_PARAM_DEV_FILTER_VNIC_MODE = 0x00,
+ FW_PARAM_DEV_FILTER_MODE_MASK = 0x01,
+};
+
+#define S_FW_PARAMS_MNEM 24
+#define M_FW_PARAMS_MNEM 0xff
+#define V_FW_PARAMS_MNEM(x) ((x) << S_FW_PARAMS_MNEM)
+#define G_FW_PARAMS_MNEM(x) \
+ (((x) >> S_FW_PARAMS_MNEM) & M_FW_PARAMS_MNEM)
+
+#define S_FW_PARAMS_PARAM_X 16
+#define M_FW_PARAMS_PARAM_X 0xff
+#define V_FW_PARAMS_PARAM_X(x) ((x) << S_FW_PARAMS_PARAM_X)
+#define G_FW_PARAMS_PARAM_X(x) \
+ (((x) >> S_FW_PARAMS_PARAM_X) & M_FW_PARAMS_PARAM_X)
+
+#define S_FW_PARAMS_PARAM_Y 8
+#define M_FW_PARAMS_PARAM_Y 0xff
+#define V_FW_PARAMS_PARAM_Y(x) ((x) << S_FW_PARAMS_PARAM_Y)
+#define G_FW_PARAMS_PARAM_Y(x) \
+ (((x) >> S_FW_PARAMS_PARAM_Y) & M_FW_PARAMS_PARAM_Y)
+
+#define S_FW_PARAMS_PARAM_Z 0
+#define M_FW_PARAMS_PARAM_Z 0xff
+#define V_FW_PARAMS_PARAM_Z(x) ((x) << S_FW_PARAMS_PARAM_Z)
+#define G_FW_PARAMS_PARAM_Z(x) \
+ (((x) >> S_FW_PARAMS_PARAM_Z) & M_FW_PARAMS_PARAM_Z)
+
+#define S_FW_PARAMS_PARAM_YZ 0
+#define M_FW_PARAMS_PARAM_YZ 0xffff
+#define V_FW_PARAMS_PARAM_YZ(x) ((x) << S_FW_PARAMS_PARAM_YZ)
+#define G_FW_PARAMS_PARAM_YZ(x) \
+ (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ)
+
+#define S_FW_PARAMS_PARAM_XYZ 0
+#define M_FW_PARAMS_PARAM_XYZ 0xffffff
+#define V_FW_PARAMS_PARAM_XYZ(x) ((x) << S_FW_PARAMS_PARAM_XYZ)
+
+struct fw_params_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ struct fw_params_param {
+ __be32 mnem;
+ __be32 val;
+ } param[7];
+};
+
+#define S_FW_PARAMS_CMD_PFN 8
+#define M_FW_PARAMS_CMD_PFN 0x7
+#define V_FW_PARAMS_CMD_PFN(x) ((x) << S_FW_PARAMS_CMD_PFN)
+#define G_FW_PARAMS_CMD_PFN(x) \
+ (((x) >> S_FW_PARAMS_CMD_PFN) & M_FW_PARAMS_CMD_PFN)
+
+#define S_FW_PARAMS_CMD_VFN 0
+#define M_FW_PARAMS_CMD_VFN 0xff
+#define V_FW_PARAMS_CMD_VFN(x) ((x) << S_FW_PARAMS_CMD_VFN)
+#define G_FW_PARAMS_CMD_VFN(x) \
+ (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN)
+
+struct fw_pfvf_cmd {
+ __be32 op_to_vfn;
+ __be32 retval_len16;
+ __be32 niqflint_niq;
+ __be32 type_to_neq;
+ __be32 tc_to_nexactf;
+ __be32 r_caps_to_nethctrl;
+ __be16 nricq;
+ __be16 nriqp;
+ __be32 r4;
+};
+
+#define S_FW_PFVF_CMD_PFN 8
+#define V_FW_PFVF_CMD_PFN(x) ((x) << S_FW_PFVF_CMD_PFN)
+
+#define S_FW_PFVF_CMD_VFN 0
+#define V_FW_PFVF_CMD_VFN(x) ((x) << S_FW_PFVF_CMD_VFN)
+
+#define S_FW_PFVF_CMD_NIQFLINT 20
+#define M_FW_PFVF_CMD_NIQFLINT 0xfff
+#define G_FW_PFVF_CMD_NIQFLINT(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQFLINT) & M_FW_PFVF_CMD_NIQFLINT)
+
+#define S_FW_PFVF_CMD_NIQ 0
+#define M_FW_PFVF_CMD_NIQ 0xfffff
+#define G_FW_PFVF_CMD_NIQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NIQ) & M_FW_PFVF_CMD_NIQ)
+
+#define S_FW_PFVF_CMD_PMASK 20
+#define M_FW_PFVF_CMD_PMASK 0xf
+#define G_FW_PFVF_CMD_PMASK(x) \
+ (((x) >> S_FW_PFVF_CMD_PMASK) & M_FW_PFVF_CMD_PMASK)
+
+#define S_FW_PFVF_CMD_NEQ 0
+#define M_FW_PFVF_CMD_NEQ 0xfffff
+#define G_FW_PFVF_CMD_NEQ(x) \
+ (((x) >> S_FW_PFVF_CMD_NEQ) & M_FW_PFVF_CMD_NEQ)
+
+#define S_FW_PFVF_CMD_TC 24
+#define M_FW_PFVF_CMD_TC 0xff
+#define G_FW_PFVF_CMD_TC(x) \
+ (((x) >> S_FW_PFVF_CMD_TC) & M_FW_PFVF_CMD_TC)
+
+#define S_FW_PFVF_CMD_NVI 16
+#define M_FW_PFVF_CMD_NVI 0xff
+#define G_FW_PFVF_CMD_NVI(x) \
+ (((x) >> S_FW_PFVF_CMD_NVI) & M_FW_PFVF_CMD_NVI)
+
+#define S_FW_PFVF_CMD_NEXACTF 0
+#define M_FW_PFVF_CMD_NEXACTF 0xffff
+#define G_FW_PFVF_CMD_NEXACTF(x) \
+ (((x) >> S_FW_PFVF_CMD_NEXACTF) & M_FW_PFVF_CMD_NEXACTF)
+
+#define S_FW_PFVF_CMD_R_CAPS 24
+#define M_FW_PFVF_CMD_R_CAPS 0xff
+#define G_FW_PFVF_CMD_R_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_R_CAPS) & M_FW_PFVF_CMD_R_CAPS)
+
+#define S_FW_PFVF_CMD_WX_CAPS 16
+#define M_FW_PFVF_CMD_WX_CAPS 0xff
+#define G_FW_PFVF_CMD_WX_CAPS(x) \
+ (((x) >> S_FW_PFVF_CMD_WX_CAPS) & M_FW_PFVF_CMD_WX_CAPS)
+
+#define S_FW_PFVF_CMD_NETHCTRL 0
+#define M_FW_PFVF_CMD_NETHCTRL 0xffff
+#define G_FW_PFVF_CMD_NETHCTRL(x) \
+ (((x) >> S_FW_PFVF_CMD_NETHCTRL) & M_FW_PFVF_CMD_NETHCTRL)
+
+/*
+ * ingress queue type; the first 1K ingress queues can have associated 0,
+ * 1 or 2 free lists and an interrupt, all other ingress queues lack these
+ * capabilities
+ */
+enum fw_iq_type {
+ FW_IQ_TYPE_FL_INT_CAP,
+};
+
+enum fw_iq_iqtype {
+ FW_IQ_IQTYPE_NIC = 1,
+ FW_IQ_IQTYPE_OFLD,
+};
+
+struct fw_iq_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 physiqid;
+ __be16 iqid;
+ __be16 fl0id;
+ __be16 fl1id;
+ __be32 type_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_to_fl0congen;
+ __be16 fl0dcaen_to_fl0cidxfthresh;
+ __be16 fl0size;
+ __be64 fl0addr;
+ __be32 fl1cngchmap_to_fl1congen;
+ __be16 fl1dcaen_to_fl1cidxfthresh;
+ __be16 fl1size;
+ __be64 fl1addr;
+};
+
+#define S_FW_IQ_CMD_PFN 8
+#define M_FW_IQ_CMD_PFN 0x7
+#define V_FW_IQ_CMD_PFN(x) ((x) << S_FW_IQ_CMD_PFN)
+#define G_FW_IQ_CMD_PFN(x) (((x) >> S_FW_IQ_CMD_PFN) & M_FW_IQ_CMD_PFN)
+
+#define S_FW_IQ_CMD_VFN 0
+#define M_FW_IQ_CMD_VFN 0xff
+#define V_FW_IQ_CMD_VFN(x) ((x) << S_FW_IQ_CMD_VFN)
+#define G_FW_IQ_CMD_VFN(x) (((x) >> S_FW_IQ_CMD_VFN) & M_FW_IQ_CMD_VFN)
+
+#define S_FW_IQ_CMD_ALLOC 31
+#define M_FW_IQ_CMD_ALLOC 0x1
+#define V_FW_IQ_CMD_ALLOC(x) ((x) << S_FW_IQ_CMD_ALLOC)
+#define G_FW_IQ_CMD_ALLOC(x) \
+ (((x) >> S_FW_IQ_CMD_ALLOC) & M_FW_IQ_CMD_ALLOC)
+#define F_FW_IQ_CMD_ALLOC V_FW_IQ_CMD_ALLOC(1U)
+
+#define S_FW_IQ_CMD_FREE 30
+#define M_FW_IQ_CMD_FREE 0x1
+#define V_FW_IQ_CMD_FREE(x) ((x) << S_FW_IQ_CMD_FREE)
+#define G_FW_IQ_CMD_FREE(x) (((x) >> S_FW_IQ_CMD_FREE) & M_FW_IQ_CMD_FREE)
+#define F_FW_IQ_CMD_FREE V_FW_IQ_CMD_FREE(1U)
+
+#define S_FW_IQ_CMD_IQSTART 28
+#define M_FW_IQ_CMD_IQSTART 0x1
+#define V_FW_IQ_CMD_IQSTART(x) ((x) << S_FW_IQ_CMD_IQSTART)
+#define G_FW_IQ_CMD_IQSTART(x) \
+ (((x) >> S_FW_IQ_CMD_IQSTART) & M_FW_IQ_CMD_IQSTART)
+#define F_FW_IQ_CMD_IQSTART V_FW_IQ_CMD_IQSTART(1U)
+
+#define S_FW_IQ_CMD_IQSTOP 27
+#define M_FW_IQ_CMD_IQSTOP 0x1
+#define V_FW_IQ_CMD_IQSTOP(x) ((x) << S_FW_IQ_CMD_IQSTOP)
+#define G_FW_IQ_CMD_IQSTOP(x) \
+ (((x) >> S_FW_IQ_CMD_IQSTOP) & M_FW_IQ_CMD_IQSTOP)
+#define F_FW_IQ_CMD_IQSTOP V_FW_IQ_CMD_IQSTOP(1U)
+
+#define S_FW_IQ_CMD_TYPE 29
+#define M_FW_IQ_CMD_TYPE 0x7
+#define V_FW_IQ_CMD_TYPE(x) ((x) << S_FW_IQ_CMD_TYPE)
+#define G_FW_IQ_CMD_TYPE(x) (((x) >> S_FW_IQ_CMD_TYPE) & M_FW_IQ_CMD_TYPE)
+
+#define S_FW_IQ_CMD_IQASYNCH 28
+#define M_FW_IQ_CMD_IQASYNCH 0x1
+#define V_FW_IQ_CMD_IQASYNCH(x) ((x) << S_FW_IQ_CMD_IQASYNCH)
+#define G_FW_IQ_CMD_IQASYNCH(x) \
+ (((x) >> S_FW_IQ_CMD_IQASYNCH) & M_FW_IQ_CMD_IQASYNCH)
+#define F_FW_IQ_CMD_IQASYNCH V_FW_IQ_CMD_IQASYNCH(1U)
+
+#define S_FW_IQ_CMD_VIID 16
+#define M_FW_IQ_CMD_VIID 0xfff
+#define V_FW_IQ_CMD_VIID(x) ((x) << S_FW_IQ_CMD_VIID)
+#define G_FW_IQ_CMD_VIID(x) (((x) >> S_FW_IQ_CMD_VIID) & M_FW_IQ_CMD_VIID)
+
+#define S_FW_IQ_CMD_IQANDST 15
+#define M_FW_IQ_CMD_IQANDST 0x1
+#define V_FW_IQ_CMD_IQANDST(x) ((x) << S_FW_IQ_CMD_IQANDST)
+#define G_FW_IQ_CMD_IQANDST(x) \
+ (((x) >> S_FW_IQ_CMD_IQANDST) & M_FW_IQ_CMD_IQANDST)
+#define F_FW_IQ_CMD_IQANDST V_FW_IQ_CMD_IQANDST(1U)
+
+#define S_FW_IQ_CMD_IQANUD 12
+#define M_FW_IQ_CMD_IQANUD 0x3
+#define V_FW_IQ_CMD_IQANUD(x) ((x) << S_FW_IQ_CMD_IQANUD)
+#define G_FW_IQ_CMD_IQANUD(x) \
+ (((x) >> S_FW_IQ_CMD_IQANUD) & M_FW_IQ_CMD_IQANUD)
+
+#define S_FW_IQ_CMD_IQANDSTINDEX 0
+#define M_FW_IQ_CMD_IQANDSTINDEX 0xfff
+#define V_FW_IQ_CMD_IQANDSTINDEX(x) ((x) << S_FW_IQ_CMD_IQANDSTINDEX)
+#define G_FW_IQ_CMD_IQANDSTINDEX(x) \
+ (((x) >> S_FW_IQ_CMD_IQANDSTINDEX) & M_FW_IQ_CMD_IQANDSTINDEX)
+
+#define S_FW_IQ_CMD_IQGTSMODE 14
+#define M_FW_IQ_CMD_IQGTSMODE 0x1
+#define V_FW_IQ_CMD_IQGTSMODE(x) ((x) << S_FW_IQ_CMD_IQGTSMODE)
+#define G_FW_IQ_CMD_IQGTSMODE(x) \
+ (((x) >> S_FW_IQ_CMD_IQGTSMODE) & M_FW_IQ_CMD_IQGTSMODE)
+#define F_FW_IQ_CMD_IQGTSMODE V_FW_IQ_CMD_IQGTSMODE(1U)
+
+#define S_FW_IQ_CMD_IQPCIECH 12
+#define M_FW_IQ_CMD_IQPCIECH 0x3
+#define V_FW_IQ_CMD_IQPCIECH(x) ((x) << S_FW_IQ_CMD_IQPCIECH)
+#define G_FW_IQ_CMD_IQPCIECH(x) \
+ (((x) >> S_FW_IQ_CMD_IQPCIECH) & M_FW_IQ_CMD_IQPCIECH)
+
+#define S_FW_IQ_CMD_IQINTCNTTHRESH 4
+#define M_FW_IQ_CMD_IQINTCNTTHRESH 0x3
+#define V_FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << S_FW_IQ_CMD_IQINTCNTTHRESH)
+#define G_FW_IQ_CMD_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_IQ_CMD_IQINTCNTTHRESH) & M_FW_IQ_CMD_IQINTCNTTHRESH)
+
+#define S_FW_IQ_CMD_IQESIZE 0
+#define M_FW_IQ_CMD_IQESIZE 0x3
+#define V_FW_IQ_CMD_IQESIZE(x) ((x) << S_FW_IQ_CMD_IQESIZE)
+#define G_FW_IQ_CMD_IQESIZE(x) \
+ (((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE)
+
+#define S_FW_IQ_CMD_IQRO 30
+#define M_FW_IQ_CMD_IQRO 0x1
+#define V_FW_IQ_CMD_IQRO(x) ((x) << S_FW_IQ_CMD_IQRO)
+#define G_FW_IQ_CMD_IQRO(x) \
+ (((x) >> S_FW_IQ_CMD_IQRO) & M_FW_IQ_CMD_IQRO)
+#define F_FW_IQ_CMD_IQRO V_FW_IQ_CMD_IQRO(1U)
+
+#define S_FW_IQ_CMD_IQFLINTCONGEN 27
+#define M_FW_IQ_CMD_IQFLINTCONGEN 0x1
+#define V_FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << S_FW_IQ_CMD_IQFLINTCONGEN)
+#define G_FW_IQ_CMD_IQFLINTCONGEN(x) \
+ (((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN)
+#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U)
+
+#define S_FW_IQ_CMD_IQTYPE 24
+#define V_FW_IQ_CMD_IQTYPE(x) ((x) << S_FW_IQ_CMD_IQTYPE)
+
+#define S_FW_IQ_CMD_FL0CNGCHMAP 20
+#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf
+#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP)
+#define G_FW_IQ_CMD_FL0CNGCHMAP(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CNGCHMAP) & M_FW_IQ_CMD_FL0CNGCHMAP)
+
+#define S_FW_IQ_CMD_FL0DATARO 12
+#define M_FW_IQ_CMD_FL0DATARO 0x1
+#define V_FW_IQ_CMD_FL0DATARO(x) ((x) << S_FW_IQ_CMD_FL0DATARO)
+#define G_FW_IQ_CMD_FL0DATARO(x) \
+ (((x) >> S_FW_IQ_CMD_FL0DATARO) & M_FW_IQ_CMD_FL0DATARO)
+#define F_FW_IQ_CMD_FL0DATARO V_FW_IQ_CMD_FL0DATARO(1U)
+
+#define S_FW_IQ_CMD_FL0CONGCIF 11
+#define M_FW_IQ_CMD_FL0CONGCIF 0x1
+#define V_FW_IQ_CMD_FL0CONGCIF(x) ((x) << S_FW_IQ_CMD_FL0CONGCIF)
+#define G_FW_IQ_CMD_FL0CONGCIF(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CONGCIF) & M_FW_IQ_CMD_FL0CONGCIF)
+#define F_FW_IQ_CMD_FL0CONGCIF V_FW_IQ_CMD_FL0CONGCIF(1U)
+
+#define S_FW_IQ_CMD_FL0FETCHRO 6
+#define M_FW_IQ_CMD_FL0FETCHRO 0x1
+#define V_FW_IQ_CMD_FL0FETCHRO(x) ((x) << S_FW_IQ_CMD_FL0FETCHRO)
+#define G_FW_IQ_CMD_FL0FETCHRO(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FETCHRO) & M_FW_IQ_CMD_FL0FETCHRO)
+#define F_FW_IQ_CMD_FL0FETCHRO V_FW_IQ_CMD_FL0FETCHRO(1U)
+
+#define S_FW_IQ_CMD_FL0HOSTFCMODE 4
+#define M_FW_IQ_CMD_FL0HOSTFCMODE 0x3
+#define V_FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL0HOSTFCMODE)
+#define G_FW_IQ_CMD_FL0HOSTFCMODE(x) \
+ (((x) >> S_FW_IQ_CMD_FL0HOSTFCMODE) & M_FW_IQ_CMD_FL0HOSTFCMODE)
+
+#define S_FW_IQ_CMD_FL0PADEN 2
+#define M_FW_IQ_CMD_FL0PADEN 0x1
+#define V_FW_IQ_CMD_FL0PADEN(x) ((x) << S_FW_IQ_CMD_FL0PADEN)
+#define G_FW_IQ_CMD_FL0PADEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0PADEN) & M_FW_IQ_CMD_FL0PADEN)
+#define F_FW_IQ_CMD_FL0PADEN V_FW_IQ_CMD_FL0PADEN(1U)
+
+#define S_FW_IQ_CMD_FL0PACKEN 1
+#define M_FW_IQ_CMD_FL0PACKEN 0x1
+#define V_FW_IQ_CMD_FL0PACKEN(x) ((x) << S_FW_IQ_CMD_FL0PACKEN)
+#define G_FW_IQ_CMD_FL0PACKEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0PACKEN) & M_FW_IQ_CMD_FL0PACKEN)
+#define F_FW_IQ_CMD_FL0PACKEN V_FW_IQ_CMD_FL0PACKEN(1U)
+
+#define S_FW_IQ_CMD_FL0CONGEN 0
+#define M_FW_IQ_CMD_FL0CONGEN 0x1
+#define V_FW_IQ_CMD_FL0CONGEN(x) ((x) << S_FW_IQ_CMD_FL0CONGEN)
+#define G_FW_IQ_CMD_FL0CONGEN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0CONGEN) & M_FW_IQ_CMD_FL0CONGEN)
+#define F_FW_IQ_CMD_FL0CONGEN V_FW_IQ_CMD_FL0CONGEN(1U)
+
+#define S_FW_IQ_CMD_FL0FBMIN 7
+#define M_FW_IQ_CMD_FL0FBMIN 0x7
+#define V_FW_IQ_CMD_FL0FBMIN(x) ((x) << S_FW_IQ_CMD_FL0FBMIN)
+#define G_FW_IQ_CMD_FL0FBMIN(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FBMIN) & M_FW_IQ_CMD_FL0FBMIN)
+
+#define S_FW_IQ_CMD_FL0FBMAX 4
+#define M_FW_IQ_CMD_FL0FBMAX 0x7
+#define V_FW_IQ_CMD_FL0FBMAX(x) ((x) << S_FW_IQ_CMD_FL0FBMAX)
+#define G_FW_IQ_CMD_FL0FBMAX(x) \
+ (((x) >> S_FW_IQ_CMD_FL0FBMAX) & M_FW_IQ_CMD_FL0FBMAX)
+
+struct fw_eq_eth_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 eqid_pkd;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ __be32 autoequiqe_to_viid;
+ __be32 r8_lo;
+ __be64 r9;
+};
+
+#define S_FW_EQ_ETH_CMD_PFN 8
+#define M_FW_EQ_ETH_CMD_PFN 0x7
+#define V_FW_EQ_ETH_CMD_PFN(x) ((x) << S_FW_EQ_ETH_CMD_PFN)
+#define G_FW_EQ_ETH_CMD_PFN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PFN) & M_FW_EQ_ETH_CMD_PFN)
+
+#define S_FW_EQ_ETH_CMD_VFN 0
+#define M_FW_EQ_ETH_CMD_VFN 0xff
+#define V_FW_EQ_ETH_CMD_VFN(x) ((x) << S_FW_EQ_ETH_CMD_VFN)
+#define G_FW_EQ_ETH_CMD_VFN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_VFN) & M_FW_EQ_ETH_CMD_VFN)
+
+#define S_FW_EQ_ETH_CMD_ALLOC 31
+#define M_FW_EQ_ETH_CMD_ALLOC 0x1
+#define V_FW_EQ_ETH_CMD_ALLOC(x) ((x) << S_FW_EQ_ETH_CMD_ALLOC)
+#define G_FW_EQ_ETH_CMD_ALLOC(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_ALLOC) & M_FW_EQ_ETH_CMD_ALLOC)
+#define F_FW_EQ_ETH_CMD_ALLOC V_FW_EQ_ETH_CMD_ALLOC(1U)
+
+#define S_FW_EQ_ETH_CMD_FREE 30
+#define M_FW_EQ_ETH_CMD_FREE 0x1
+#define V_FW_EQ_ETH_CMD_FREE(x) ((x) << S_FW_EQ_ETH_CMD_FREE)
+#define G_FW_EQ_ETH_CMD_FREE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FREE) & M_FW_EQ_ETH_CMD_FREE)
+#define F_FW_EQ_ETH_CMD_FREE V_FW_EQ_ETH_CMD_FREE(1U)
+
+#define S_FW_EQ_ETH_CMD_EQSTART 28
+#define M_FW_EQ_ETH_CMD_EQSTART 0x1
+#define V_FW_EQ_ETH_CMD_EQSTART(x) ((x) << S_FW_EQ_ETH_CMD_EQSTART)
+#define G_FW_EQ_ETH_CMD_EQSTART(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQSTART) & M_FW_EQ_ETH_CMD_EQSTART)
+#define F_FW_EQ_ETH_CMD_EQSTART V_FW_EQ_ETH_CMD_EQSTART(1U)
+
+#define S_FW_EQ_ETH_CMD_EQID 0
+#define M_FW_EQ_ETH_CMD_EQID 0xfffff
+#define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID)
+#define G_FW_EQ_ETH_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID)
+
+#define S_FW_EQ_ETH_CMD_PHYSEQID 0
+#define M_FW_EQ_ETH_CMD_PHYSEQID 0xfffff
+#define G_FW_EQ_ETH_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID)
+
+#define S_FW_EQ_ETH_CMD_FETCHRO 22
+#define M_FW_EQ_ETH_CMD_FETCHRO 0x1
+#define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO)
+#define G_FW_EQ_ETH_CMD_FETCHRO(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FETCHRO) & M_FW_EQ_ETH_CMD_FETCHRO)
+#define F_FW_EQ_ETH_CMD_FETCHRO V_FW_EQ_ETH_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_ETH_CMD_HOSTFCMODE 20
+#define M_FW_EQ_ETH_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_ETH_CMD_HOSTFCMODE)
+#define G_FW_EQ_ETH_CMD_HOSTFCMODE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_HOSTFCMODE) & M_FW_EQ_ETH_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_ETH_CMD_PCIECHN 16
+#define M_FW_EQ_ETH_CMD_PCIECHN 0x3
+#define V_FW_EQ_ETH_CMD_PCIECHN(x) ((x) << S_FW_EQ_ETH_CMD_PCIECHN)
+#define G_FW_EQ_ETH_CMD_PCIECHN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_PCIECHN) & M_FW_EQ_ETH_CMD_PCIECHN)
+
+#define S_FW_EQ_ETH_CMD_IQID 0
+#define M_FW_EQ_ETH_CMD_IQID 0xffff
+#define V_FW_EQ_ETH_CMD_IQID(x) ((x) << S_FW_EQ_ETH_CMD_IQID)
+#define G_FW_EQ_ETH_CMD_IQID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_IQID) & M_FW_EQ_ETH_CMD_IQID)
+
+#define S_FW_EQ_ETH_CMD_FBMIN 23
+#define M_FW_EQ_ETH_CMD_FBMIN 0x7
+#define V_FW_EQ_ETH_CMD_FBMIN(x) ((x) << S_FW_EQ_ETH_CMD_FBMIN)
+#define G_FW_EQ_ETH_CMD_FBMIN(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FBMIN) & M_FW_EQ_ETH_CMD_FBMIN)
+
+#define S_FW_EQ_ETH_CMD_FBMAX 20
+#define M_FW_EQ_ETH_CMD_FBMAX 0x7
+#define V_FW_EQ_ETH_CMD_FBMAX(x) ((x) << S_FW_EQ_ETH_CMD_FBMAX)
+#define G_FW_EQ_ETH_CMD_FBMAX(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_FBMAX) & M_FW_EQ_ETH_CMD_FBMAX)
+
+#define S_FW_EQ_ETH_CMD_CIDXFTHRESH 16
+#define M_FW_EQ_ETH_CMD_CIDXFTHRESH 0x7
+#define V_FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESH)
+#define G_FW_EQ_ETH_CMD_CIDXFTHRESH(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESH) & M_FW_EQ_ETH_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_ETH_CMD_EQSIZE 0
+#define M_FW_EQ_ETH_CMD_EQSIZE 0xffff
+#define V_FW_EQ_ETH_CMD_EQSIZE(x) ((x) << S_FW_EQ_ETH_CMD_EQSIZE)
+#define G_FW_EQ_ETH_CMD_EQSIZE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_EQSIZE) & M_FW_EQ_ETH_CMD_EQSIZE)
+
+#define S_FW_EQ_ETH_CMD_AUTOEQUEQE 30
+#define M_FW_EQ_ETH_CMD_AUTOEQUEQE 0x1
+#define V_FW_EQ_ETH_CMD_AUTOEQUEQE(x) ((x) << S_FW_EQ_ETH_CMD_AUTOEQUEQE)
+#define G_FW_EQ_ETH_CMD_AUTOEQUEQE(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUEQE) & M_FW_EQ_ETH_CMD_AUTOEQUEQE)
+#define F_FW_EQ_ETH_CMD_AUTOEQUEQE V_FW_EQ_ETH_CMD_AUTOEQUEQE(1U)
+
+#define S_FW_EQ_ETH_CMD_VIID 16
+#define M_FW_EQ_ETH_CMD_VIID 0xfff
+#define V_FW_EQ_ETH_CMD_VIID(x) ((x) << S_FW_EQ_ETH_CMD_VIID)
+#define G_FW_EQ_ETH_CMD_VIID(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID)
+
+struct fw_eq_ctrl_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be32 cmpliqid_eqid;
+ __be32 physeqid_pkd;
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+};
+
+#define S_FW_EQ_CTRL_CMD_PFN 8
+#define V_FW_EQ_CTRL_CMD_PFN(x) ((x) << S_FW_EQ_CTRL_CMD_PFN)
+
+#define S_FW_EQ_CTRL_CMD_VFN 0
+#define V_FW_EQ_CTRL_CMD_VFN(x) ((x) << S_FW_EQ_CTRL_CMD_VFN)
+
+#define S_FW_EQ_CTRL_CMD_ALLOC 31
+#define V_FW_EQ_CTRL_CMD_ALLOC(x) ((x) << S_FW_EQ_CTRL_CMD_ALLOC)
+#define F_FW_EQ_CTRL_CMD_ALLOC V_FW_EQ_CTRL_CMD_ALLOC(1U)
+
+#define S_FW_EQ_CTRL_CMD_FREE 30
+#define V_FW_EQ_CTRL_CMD_FREE(x) ((x) << S_FW_EQ_CTRL_CMD_FREE)
+#define F_FW_EQ_CTRL_CMD_FREE V_FW_EQ_CTRL_CMD_FREE(1U)
+
+#define S_FW_EQ_CTRL_CMD_EQSTART 28
+#define V_FW_EQ_CTRL_CMD_EQSTART(x) ((x) << S_FW_EQ_CTRL_CMD_EQSTART)
+#define F_FW_EQ_CTRL_CMD_EQSTART V_FW_EQ_CTRL_CMD_EQSTART(1U)
+
+#define S_FW_EQ_CTRL_CMD_CMPLIQID 20
+#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)
+
+#define S_FW_EQ_CTRL_CMD_EQID 0
+#define M_FW_EQ_CTRL_CMD_EQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_EQID(x) ((x) << S_FW_EQ_CTRL_CMD_EQID)
+#define G_FW_EQ_CTRL_CMD_EQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_EQID) & M_FW_EQ_CTRL_CMD_EQID)
+
+#define S_FW_EQ_CTRL_CMD_PHYSEQID 0
+#define M_FW_EQ_CTRL_CMD_PHYSEQID 0xfffff
+#define V_FW_EQ_CTRL_CMD_PHYSEQID(x) ((x) << S_FW_EQ_CTRL_CMD_PHYSEQID)
+#define G_FW_EQ_CTRL_CMD_PHYSEQID(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_PHYSEQID) & M_FW_EQ_CTRL_CMD_PHYSEQID)
+
+#define S_FW_EQ_CTRL_CMD_FETCHRO 22
+#define V_FW_EQ_CTRL_CMD_FETCHRO(x) ((x) << S_FW_EQ_CTRL_CMD_FETCHRO)
+#define F_FW_EQ_CTRL_CMD_FETCHRO V_FW_EQ_CTRL_CMD_FETCHRO(1U)
+
+#define S_FW_EQ_CTRL_CMD_HOSTFCMODE 20
+#define M_FW_EQ_CTRL_CMD_HOSTFCMODE 0x3
+#define V_FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_CTRL_CMD_HOSTFCMODE)
+
+#define S_FW_EQ_CTRL_CMD_PCIECHN 16
+#define V_FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << S_FW_EQ_CTRL_CMD_PCIECHN)
+
+#define S_FW_EQ_CTRL_CMD_IQID 0
+#define V_FW_EQ_CTRL_CMD_IQID(x) ((x) << S_FW_EQ_CTRL_CMD_IQID)
+
+#define S_FW_EQ_CTRL_CMD_FBMIN 23
+#define V_FW_EQ_CTRL_CMD_FBMIN(x) ((x) << S_FW_EQ_CTRL_CMD_FBMIN)
+
+#define S_FW_EQ_CTRL_CMD_FBMAX 20
+#define V_FW_EQ_CTRL_CMD_FBMAX(x) ((x) << S_FW_EQ_CTRL_CMD_FBMAX)
+
+#define S_FW_EQ_CTRL_CMD_CIDXFTHRESH 16
+#define V_FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_CTRL_CMD_CIDXFTHRESH)
+
+#define S_FW_EQ_CTRL_CMD_EQSIZE 0
+#define V_FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << S_FW_EQ_CTRL_CMD_EQSIZE)
+
+enum fw_vi_func {
+ FW_VI_FUNC_ETH,
+};
+
+/* Macros for VIID parsing:
+ * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
+ */
+
+#define S_FW_VIID_VIVLD 7
+#define M_FW_VIID_VIVLD 0x1
+#define G_FW_VIID_VIVLD(x) (((x) >> S_FW_VIID_VIVLD) & M_FW_VIID_VIVLD)
+
+#define S_FW_VIID_VIN 0
+#define M_FW_VIID_VIN 0x7F
+#define G_FW_VIID_VIN(x) (((x) >> S_FW_VIID_VIN) & M_FW_VIID_VIN)
+
+struct fw_vi_cmd {
+ __be32 op_to_vfn;
+ __be32 alloc_to_len16;
+ __be16 type_to_viid;
+ __u8 mac[6];
+ __u8 portid_pkd;
+ __u8 nmac;
+ __u8 nmac0[6];
+ __be16 norss_rsssize;
+ __u8 nmac1[6];
+ __be16 idsiiq_pkd;
+ __u8 nmac2[6];
+ __be16 idseiq_pkd;
+ __u8 nmac3[6];
+ __be64 r9;
+ __be64 r10;
+};
+
+#define S_FW_VI_CMD_PFN 8
+#define M_FW_VI_CMD_PFN 0x7
+#define V_FW_VI_CMD_PFN(x) ((x) << S_FW_VI_CMD_PFN)
+#define G_FW_VI_CMD_PFN(x) (((x) >> S_FW_VI_CMD_PFN) & M_FW_VI_CMD_PFN)
+
+#define S_FW_VI_CMD_VFN 0
+#define M_FW_VI_CMD_VFN 0xff
+#define V_FW_VI_CMD_VFN(x) ((x) << S_FW_VI_CMD_VFN)
+#define G_FW_VI_CMD_VFN(x) (((x) >> S_FW_VI_CMD_VFN) & M_FW_VI_CMD_VFN)
+
+#define S_FW_VI_CMD_ALLOC 31
+#define M_FW_VI_CMD_ALLOC 0x1
+#define V_FW_VI_CMD_ALLOC(x) ((x) << S_FW_VI_CMD_ALLOC)
+#define G_FW_VI_CMD_ALLOC(x) \
+ (((x) >> S_FW_VI_CMD_ALLOC) & M_FW_VI_CMD_ALLOC)
+#define F_FW_VI_CMD_ALLOC V_FW_VI_CMD_ALLOC(1U)
+
+#define S_FW_VI_CMD_FREE 30
+#define M_FW_VI_CMD_FREE 0x1
+#define V_FW_VI_CMD_FREE(x) ((x) << S_FW_VI_CMD_FREE)
+#define G_FW_VI_CMD_FREE(x) (((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE)
+#define F_FW_VI_CMD_FREE V_FW_VI_CMD_FREE(1U)
+
+#define S_FW_VI_CMD_VFVLD 24
+#define M_FW_VI_CMD_VFVLD 0x1
+#define G_FW_VI_CMD_VFVLD(x) \
+ (((x) >> S_FW_VI_CMD_VFVLD) & M_FW_VI_CMD_VFVLD)
+
+#define S_FW_VI_CMD_VIN 16
+#define M_FW_VI_CMD_VIN 0xff
+#define G_FW_VI_CMD_VIN(x) \
+ (((x) >> S_FW_VI_CMD_VIN) & M_FW_VI_CMD_VIN)
+
+#define S_FW_VI_CMD_TYPE 15
+#define M_FW_VI_CMD_TYPE 0x1
+#define V_FW_VI_CMD_TYPE(x) ((x) << S_FW_VI_CMD_TYPE)
+#define G_FW_VI_CMD_TYPE(x) (((x) >> S_FW_VI_CMD_TYPE) & M_FW_VI_CMD_TYPE)
+#define F_FW_VI_CMD_TYPE V_FW_VI_CMD_TYPE(1U)
+
+#define S_FW_VI_CMD_FUNC 12
+#define M_FW_VI_CMD_FUNC 0x7
+#define V_FW_VI_CMD_FUNC(x) ((x) << S_FW_VI_CMD_FUNC)
+#define G_FW_VI_CMD_FUNC(x) (((x) >> S_FW_VI_CMD_FUNC) & M_FW_VI_CMD_FUNC)
+
+#define S_FW_VI_CMD_VIID 0
+#define M_FW_VI_CMD_VIID 0xfff
+#define V_FW_VI_CMD_VIID(x) ((x) << S_FW_VI_CMD_VIID)
+#define G_FW_VI_CMD_VIID(x) (((x) >> S_FW_VI_CMD_VIID) & M_FW_VI_CMD_VIID)
+
+#define S_FW_VI_CMD_PORTID 4
+#define M_FW_VI_CMD_PORTID 0xf
+#define V_FW_VI_CMD_PORTID(x) ((x) << S_FW_VI_CMD_PORTID)
+#define G_FW_VI_CMD_PORTID(x) \
+ (((x) >> S_FW_VI_CMD_PORTID) & M_FW_VI_CMD_PORTID)
+
+#define S_FW_VI_CMD_RSSSIZE 0
+#define M_FW_VI_CMD_RSSSIZE 0x7ff
+#define V_FW_VI_CMD_RSSSIZE(x) ((x) << S_FW_VI_CMD_RSSSIZE)
+#define G_FW_VI_CMD_RSSSIZE(x) \
+ (((x) >> S_FW_VI_CMD_RSSSIZE) & M_FW_VI_CMD_RSSSIZE)
+
+/* Special VI_MAC command index ids */
+#define FW_VI_MAC_ADD_MAC 0x3FF
+#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
+#define FW_VI_MAC_ID_BASED_FREE 0x3FC
+
+enum fw_vi_mac_smac {
+ FW_VI_MAC_MPS_TCAM_ENTRY = 0x0,
+ FW_VI_MAC_SMT_AND_MPSTCAM = 0x3
+};
+
+enum fw_vi_mac_entry_types {
+ FW_VI_MAC_TYPE_RAW = 0x2,
+};
+
+struct fw_vi_mac_cmd {
+ __be32 op_to_viid;
+ __be32 freemacs_to_len16;
+ union fw_vi_mac {
+ struct fw_vi_mac_exact {
+ __be16 valid_to_idx;
+ __u8 macaddr[6];
+ } exact[7];
+ struct fw_vi_mac_hash {
+ __be64 hashvec;
+ } hash;
+ struct fw_vi_mac_raw {
+ __be32 raw_idx_pkd;
+ __be32 data0_pkd;
+ __be32 data1[2];
+ __be64 data0m_pkd;
+ __be32 data1m[2];
+ } raw;
+ } u;
+};
+
+#define S_FW_VI_MAC_CMD_VIID 0
+#define M_FW_VI_MAC_CMD_VIID 0xfff
+#define V_FW_VI_MAC_CMD_VIID(x) ((x) << S_FW_VI_MAC_CMD_VIID)
+#define G_FW_VI_MAC_CMD_VIID(x) \
+ (((x) >> S_FW_VI_MAC_CMD_VIID) & M_FW_VI_MAC_CMD_VIID)
+
+#define S_FW_VI_MAC_CMD_FREEMACS 31
+#define V_FW_VI_MAC_CMD_FREEMACS(x) ((x) << S_FW_VI_MAC_CMD_FREEMACS)
+
+#define S_FW_VI_MAC_CMD_ENTRY_TYPE 23
+#define V_FW_VI_MAC_CMD_ENTRY_TYPE(x) ((x) << S_FW_VI_MAC_CMD_ENTRY_TYPE)
+
+#define S_FW_VI_MAC_CMD_VALID 15
+#define M_FW_VI_MAC_CMD_VALID 0x1
+#define V_FW_VI_MAC_CMD_VALID(x) ((x) << S_FW_VI_MAC_CMD_VALID)
+#define G_FW_VI_MAC_CMD_VALID(x) \
+ (((x) >> S_FW_VI_MAC_CMD_VALID) & M_FW_VI_MAC_CMD_VALID)
+#define F_FW_VI_MAC_CMD_VALID V_FW_VI_MAC_CMD_VALID(1U)
+
+#define S_FW_VI_MAC_CMD_SMAC_RESULT 10
+#define M_FW_VI_MAC_CMD_SMAC_RESULT 0x3
+#define V_FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << S_FW_VI_MAC_CMD_SMAC_RESULT)
+#define G_FW_VI_MAC_CMD_SMAC_RESULT(x) \
+ (((x) >> S_FW_VI_MAC_CMD_SMAC_RESULT) & M_FW_VI_MAC_CMD_SMAC_RESULT)
+
+#define S_FW_VI_MAC_CMD_IDX 0
+#define M_FW_VI_MAC_CMD_IDX 0x3ff
+#define V_FW_VI_MAC_CMD_IDX(x) ((x) << S_FW_VI_MAC_CMD_IDX)
+#define G_FW_VI_MAC_CMD_IDX(x) \
+ (((x) >> S_FW_VI_MAC_CMD_IDX) & M_FW_VI_MAC_CMD_IDX)
+
+#define S_FW_VI_MAC_CMD_RAW_IDX 16
+#define M_FW_VI_MAC_CMD_RAW_IDX 0xffff
+#define V_FW_VI_MAC_CMD_RAW_IDX(x) ((x) << S_FW_VI_MAC_CMD_RAW_IDX)
+#define G_FW_VI_MAC_CMD_RAW_IDX(x) \
+ (((x) >> S_FW_VI_MAC_CMD_RAW_IDX) & M_FW_VI_MAC_CMD_RAW_IDX)
+
+struct fw_vi_rxmode_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ __be32 mtu_to_vlanexen;
+ __be32 r4_lo;
+};
+
+#define S_FW_VI_RXMODE_CMD_VIID 0
+#define M_FW_VI_RXMODE_CMD_VIID 0xfff
+#define V_FW_VI_RXMODE_CMD_VIID(x) ((x) << S_FW_VI_RXMODE_CMD_VIID)
+#define G_FW_VI_RXMODE_CMD_VIID(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_VIID) & M_FW_VI_RXMODE_CMD_VIID)
+
+#define S_FW_VI_RXMODE_CMD_MTU 16
+#define M_FW_VI_RXMODE_CMD_MTU 0xffff
+#define V_FW_VI_RXMODE_CMD_MTU(x) ((x) << S_FW_VI_RXMODE_CMD_MTU)
+#define G_FW_VI_RXMODE_CMD_MTU(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_MTU) & M_FW_VI_RXMODE_CMD_MTU)
+
+#define S_FW_VI_RXMODE_CMD_PROMISCEN 14
+#define M_FW_VI_RXMODE_CMD_PROMISCEN 0x3
+#define V_FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << S_FW_VI_RXMODE_CMD_PROMISCEN)
+#define G_FW_VI_RXMODE_CMD_PROMISCEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_PROMISCEN) & M_FW_VI_RXMODE_CMD_PROMISCEN)
+
+#define S_FW_VI_RXMODE_CMD_ALLMULTIEN 12
+#define M_FW_VI_RXMODE_CMD_ALLMULTIEN 0x3
+#define V_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \
+ ((x) << S_FW_VI_RXMODE_CMD_ALLMULTIEN)
+#define G_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_ALLMULTIEN) & M_FW_VI_RXMODE_CMD_ALLMULTIEN)
+
+#define S_FW_VI_RXMODE_CMD_BROADCASTEN 10
+#define M_FW_VI_RXMODE_CMD_BROADCASTEN 0x3
+#define V_FW_VI_RXMODE_CMD_BROADCASTEN(x) \
+ ((x) << S_FW_VI_RXMODE_CMD_BROADCASTEN)
+#define G_FW_VI_RXMODE_CMD_BROADCASTEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_BROADCASTEN) & \
+ M_FW_VI_RXMODE_CMD_BROADCASTEN)
+
+#define S_FW_VI_RXMODE_CMD_VLANEXEN 8
+#define M_FW_VI_RXMODE_CMD_VLANEXEN 0x3
+#define V_FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << S_FW_VI_RXMODE_CMD_VLANEXEN)
+#define G_FW_VI_RXMODE_CMD_VLANEXEN(x) \
+ (((x) >> S_FW_VI_RXMODE_CMD_VLANEXEN) & M_FW_VI_RXMODE_CMD_VLANEXEN)
+
+struct fw_vi_enable_cmd {
+ __be32 op_to_viid;
+ __be32 ien_to_len16;
+ __be16 blinkdur;
+ __be16 r3;
+ __be32 r4;
+};
+
+#define S_FW_VI_ENABLE_CMD_VIID 0
+#define M_FW_VI_ENABLE_CMD_VIID 0xfff
+#define V_FW_VI_ENABLE_CMD_VIID(x) ((x) << S_FW_VI_ENABLE_CMD_VIID)
+#define G_FW_VI_ENABLE_CMD_VIID(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_VIID) & M_FW_VI_ENABLE_CMD_VIID)
+
+#define S_FW_VI_ENABLE_CMD_IEN 31
+#define M_FW_VI_ENABLE_CMD_IEN 0x1
+#define V_FW_VI_ENABLE_CMD_IEN(x) ((x) << S_FW_VI_ENABLE_CMD_IEN)
+#define G_FW_VI_ENABLE_CMD_IEN(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_IEN) & M_FW_VI_ENABLE_CMD_IEN)
+#define F_FW_VI_ENABLE_CMD_IEN V_FW_VI_ENABLE_CMD_IEN(1U)
+
+#define S_FW_VI_ENABLE_CMD_EEN 30
+#define M_FW_VI_ENABLE_CMD_EEN 0x1
+#define V_FW_VI_ENABLE_CMD_EEN(x) ((x) << S_FW_VI_ENABLE_CMD_EEN)
+#define G_FW_VI_ENABLE_CMD_EEN(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_EEN) & M_FW_VI_ENABLE_CMD_EEN)
+#define F_FW_VI_ENABLE_CMD_EEN V_FW_VI_ENABLE_CMD_EEN(1U)
+
+#define S_FW_VI_ENABLE_CMD_DCB_INFO 28
+#define M_FW_VI_ENABLE_CMD_DCB_INFO 0x1
+#define V_FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << S_FW_VI_ENABLE_CMD_DCB_INFO)
+#define G_FW_VI_ENABLE_CMD_DCB_INFO(x) \
+ (((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO)
+#define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U)
+
+/* VI VF stats offset definitions */
+#define VI_VF_NUM_STATS 16
+
+/* VI PF stats offset definitions */
+#define VI_PF_NUM_STATS 17
+enum fw_vi_stats_pf_index {
+ FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
+ FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BYTES_IX,
+ FW_VI_PF_STAT_RX_FRAMES_IX,
+ FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
+ FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
+ FW_VI_PF_STAT_RX_ERR_FRAMES_IX
+};
+
+struct fw_vi_stats_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ union fw_vi_stats {
+ struct fw_vi_stats_ctl {
+ __be16 nstats_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_vi_stats_pf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_pf_bytes;
+ __be64 rx_pf_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } pf;
+ struct fw_vi_stats_vf {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_drop_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } vf;
+ } u;
+};
+
+#define S_FW_VI_STATS_CMD_VIID 0
+#define V_FW_VI_STATS_CMD_VIID(x) ((x) << S_FW_VI_STATS_CMD_VIID)
+
+#define S_FW_VI_STATS_CMD_NSTATS 12
+#define V_FW_VI_STATS_CMD_NSTATS(x) ((x) << S_FW_VI_STATS_CMD_NSTATS)
+
+#define S_FW_VI_STATS_CMD_IX 0
+#define V_FW_VI_STATS_CMD_IX(x) ((x) << S_FW_VI_STATS_CMD_IX)
+
+/* old 16-bit port capabilities bitmap */
+enum fw_port_cap {
+ FW_PORT_CAP_SPEED_100M = 0x0001,
+ FW_PORT_CAP_SPEED_1G = 0x0002,
+ FW_PORT_CAP_SPEED_25G = 0x0004,
+ FW_PORT_CAP_SPEED_10G = 0x0008,
+ FW_PORT_CAP_SPEED_40G = 0x0010,
+ FW_PORT_CAP_SPEED_100G = 0x0020,
+ FW_PORT_CAP_FC_RX = 0x0040,
+ FW_PORT_CAP_FC_TX = 0x0080,
+ FW_PORT_CAP_ANEG = 0x0100,
+ FW_PORT_CAP_MDIX = 0x0200,
+ FW_PORT_CAP_MDIAUTO = 0x0400,
+ FW_PORT_CAP_FEC_RS = 0x0800,
+ FW_PORT_CAP_FEC_BASER_RS = 0x1000,
+ FW_PORT_CAP_FEC_RESERVED = 0x2000,
+ FW_PORT_CAP_802_3_PAUSE = 0x4000,
+ FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
+};
+
+#define S_FW_PORT_CAP_SPEED 0
+#define M_FW_PORT_CAP_SPEED 0x3f
+#define V_FW_PORT_CAP_SPEED(x) ((x) << S_FW_PORT_CAP_SPEED)
+#define G_FW_PORT_CAP_SPEED(x) \
+ (((x) >> S_FW_PORT_CAP_SPEED) & M_FW_PORT_CAP_SPEED)
+
+enum fw_port_mdi {
+ FW_PORT_CAP_MDI_AUTO,
+};
+
+#define S_FW_PORT_CAP_MDI 9
+#define M_FW_PORT_CAP_MDI 3
+#define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI)
+#define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI)
+
+/* new 32-bit port capabilities bitmap (fw_port_cap32_t) */
+#define FW_PORT_CAP32_SPEED_100M 0x00000001UL
+#define FW_PORT_CAP32_SPEED_1G 0x00000002UL
+#define FW_PORT_CAP32_SPEED_10G 0x00000004UL
+#define FW_PORT_CAP32_SPEED_25G 0x00000008UL
+#define FW_PORT_CAP32_SPEED_40G 0x00000010UL
+#define FW_PORT_CAP32_SPEED_50G 0x00000020UL
+#define FW_PORT_CAP32_SPEED_100G 0x00000040UL
+#define FW_PORT_CAP32_FC_RX 0x00010000UL
+#define FW_PORT_CAP32_FC_TX 0x00020000UL
+#define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL
+#define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL
+#define FW_PORT_CAP32_ANEG 0x00100000UL
+#define FW_PORT_CAP32_MDIX 0x00200000UL
+#define FW_PORT_CAP32_MDIAUTO 0x00400000UL
+#define FW_PORT_CAP32_FEC_RS 0x00800000UL
+#define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL
+
+#define S_FW_PORT_CAP32_SPEED 0
+#define M_FW_PORT_CAP32_SPEED 0xfff
+#define V_FW_PORT_CAP32_SPEED(x) ((x) << S_FW_PORT_CAP32_SPEED)
+#define G_FW_PORT_CAP32_SPEED(x) \
+ (((x) >> S_FW_PORT_CAP32_SPEED) & M_FW_PORT_CAP32_SPEED)
+
+enum fw_port_mdi32 {
+ FW_PORT_CAP32_MDI_AUTO,
+};
+
+#define S_FW_PORT_CAP32_MDI 21
+#define M_FW_PORT_CAP32_MDI 3
+#define V_FW_PORT_CAP32_MDI(x) ((x) << S_FW_PORT_CAP32_MDI)
+#define G_FW_PORT_CAP32_MDI(x) \
+ (((x) >> S_FW_PORT_CAP32_MDI) & M_FW_PORT_CAP32_MDI)
+
+enum fw_port_action {
+ FW_PORT_ACTION_L1_CFG = 0x0001,
+ FW_PORT_ACTION_GET_PORT_INFO = 0x0003,
+ FW_PORT_ACTION_L1_CFG32 = 0x0009,
+ FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a,
+};
+
+struct fw_port_cmd {
+ __be32 op_to_portid;
+ __be32 action_to_len16;
+ union fw_port {
+ struct fw_port_l1cfg {
+ __be32 rcap;
+ __be32 r;
+ } l1cfg;
+ struct fw_port_l2cfg {
+ __u8 ctlbf;
+ __u8 ovlan3_to_ivlan0;
+ __be16 ivlantype;
+ __be16 txipg_force_pinfo;
+ __be16 mtu;
+ __be16 ovlan0mask;
+ __be16 ovlan0type;
+ __be16 ovlan1mask;
+ __be16 ovlan1type;
+ __be16 ovlan2mask;
+ __be16 ovlan2type;
+ __be16 ovlan3mask;
+ __be16 ovlan3type;
+ } l2cfg;
+ struct fw_port_info {
+ __be32 lstatus_to_modtype;
+ __be16 pcap;
+ __be16 acap;
+ __be16 mtu;
+ __u8 cbllen;
+ __u8 auxlinfo;
+ __u8 dcbxdis_pkd;
+ __u8 r8_lo;
+ __be16 lpacap;
+ __be64 r9;
+ } info;
+ struct fw_port_diags {
+ __u8 diagop;
+ __u8 r[3];
+ __be32 diagval;
+ } diags;
+ union fw_port_dcb {
+ struct fw_port_dcb_pgid {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[2];
+ __be32 pgid;
+ __be64 r11;
+ } pgid;
+ struct fw_port_dcb_pgrate {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[5];
+ __u8 num_tcs_supported;
+ __u8 pgrate[8];
+ __u8 tsa[8];
+ } pgrate;
+ struct fw_port_dcb_priorate {
+ __u8 type;
+ __u8 apply_pkd;
+ __u8 r10_lo[6];
+ __u8 strict_priorate[8];
+ } priorate;
+ struct fw_port_dcb_pfc {
+ __u8 type;
+ __u8 pfcen;
+ __u8 r10[5];
+ __u8 max_pfc_tcs;
+ __be64 r11;
+ } pfc;
+ struct fw_port_app_priority {
+ __u8 type;
+ __u8 r10[2];
+ __u8 idx;
+ __u8 user_prio_map;
+ __u8 sel_field;
+ __be16 protocolid;
+ __be64 r12;
+ } app_priority;
+ struct fw_port_dcb_control {
+ __u8 type;
+ __u8 all_syncd_pkd;
+ __be16 dcb_version_to_app_state;
+ __be32 r11;
+ __be64 r12;
+ } control;
+ } dcb;
+ struct fw_port_l1cfg32 {
+ __be32 rcap32;
+ __be32 r;
+ } l1cfg32;
+ struct fw_port_info32 {
+ __be32 lstatus32_to_cbllen32;
+ __be32 auxlinfo32_mtu32;
+ __be32 linkattr32;
+ __be32 pcaps32;
+ __be32 acaps32;
+ __be32 lpacaps32;
+ } info32;
+ } u;
+};
+
+#define S_FW_PORT_CMD_PORTID 0
+#define M_FW_PORT_CMD_PORTID 0xf
+#define V_FW_PORT_CMD_PORTID(x) ((x) << S_FW_PORT_CMD_PORTID)
+#define G_FW_PORT_CMD_PORTID(x) \
+ (((x) >> S_FW_PORT_CMD_PORTID) & M_FW_PORT_CMD_PORTID)
+
+#define S_FW_PORT_CMD_ACTION 16
+#define M_FW_PORT_CMD_ACTION 0xffff
+#define V_FW_PORT_CMD_ACTION(x) ((x) << S_FW_PORT_CMD_ACTION)
+#define G_FW_PORT_CMD_ACTION(x) \
+ (((x) >> S_FW_PORT_CMD_ACTION) & M_FW_PORT_CMD_ACTION)
+
+#define S_FW_PORT_CMD_LSTATUS 31
+#define M_FW_PORT_CMD_LSTATUS 0x1
+#define V_FW_PORT_CMD_LSTATUS(x) ((x) << S_FW_PORT_CMD_LSTATUS)
+#define G_FW_PORT_CMD_LSTATUS(x) \
+ (((x) >> S_FW_PORT_CMD_LSTATUS) & M_FW_PORT_CMD_LSTATUS)
+#define F_FW_PORT_CMD_LSTATUS V_FW_PORT_CMD_LSTATUS(1U)
+
+#define S_FW_PORT_CMD_LSPEED 24
+#define M_FW_PORT_CMD_LSPEED 0x3f
+#define V_FW_PORT_CMD_LSPEED(x) ((x) << S_FW_PORT_CMD_LSPEED)
+#define G_FW_PORT_CMD_LSPEED(x) \
+ (((x) >> S_FW_PORT_CMD_LSPEED) & M_FW_PORT_CMD_LSPEED)
+
+#define S_FW_PORT_CMD_TXPAUSE 23
+#define M_FW_PORT_CMD_TXPAUSE 0x1
+#define V_FW_PORT_CMD_TXPAUSE(x) ((x) << S_FW_PORT_CMD_TXPAUSE)
+#define G_FW_PORT_CMD_TXPAUSE(x) \
+ (((x) >> S_FW_PORT_CMD_TXPAUSE) & M_FW_PORT_CMD_TXPAUSE)
+#define F_FW_PORT_CMD_TXPAUSE V_FW_PORT_CMD_TXPAUSE(1U)
+
+#define S_FW_PORT_CMD_RXPAUSE 22
+#define M_FW_PORT_CMD_RXPAUSE 0x1
+#define V_FW_PORT_CMD_RXPAUSE(x) ((x) << S_FW_PORT_CMD_RXPAUSE)
+#define G_FW_PORT_CMD_RXPAUSE(x) \
+ (((x) >> S_FW_PORT_CMD_RXPAUSE) & M_FW_PORT_CMD_RXPAUSE)
+#define F_FW_PORT_CMD_RXPAUSE V_FW_PORT_CMD_RXPAUSE(1U)
+
+#define S_FW_PORT_CMD_MDIOCAP 21
+#define M_FW_PORT_CMD_MDIOCAP 0x1
+#define V_FW_PORT_CMD_MDIOCAP(x) ((x) << S_FW_PORT_CMD_MDIOCAP)
+#define G_FW_PORT_CMD_MDIOCAP(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOCAP) & M_FW_PORT_CMD_MDIOCAP)
+#define F_FW_PORT_CMD_MDIOCAP V_FW_PORT_CMD_MDIOCAP(1U)
+
+#define S_FW_PORT_CMD_MDIOADDR 16
+#define M_FW_PORT_CMD_MDIOADDR 0x1f
+#define V_FW_PORT_CMD_MDIOADDR(x) ((x) << S_FW_PORT_CMD_MDIOADDR)
+#define G_FW_PORT_CMD_MDIOADDR(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOADDR) & M_FW_PORT_CMD_MDIOADDR)
+
+#define S_FW_PORT_CMD_PTYPE 8
+#define M_FW_PORT_CMD_PTYPE 0x1f
+#define V_FW_PORT_CMD_PTYPE(x) ((x) << S_FW_PORT_CMD_PTYPE)
+#define G_FW_PORT_CMD_PTYPE(x) \
+ (((x) >> S_FW_PORT_CMD_PTYPE) & M_FW_PORT_CMD_PTYPE)
+
+#define S_FW_PORT_CMD_LINKDNRC 5
+#define M_FW_PORT_CMD_LINKDNRC 0x7
+#define V_FW_PORT_CMD_LINKDNRC(x) ((x) << S_FW_PORT_CMD_LINKDNRC)
+#define G_FW_PORT_CMD_LINKDNRC(x) \
+ (((x) >> S_FW_PORT_CMD_LINKDNRC) & M_FW_PORT_CMD_LINKDNRC)
+
+#define S_FW_PORT_CMD_MODTYPE 0
+#define M_FW_PORT_CMD_MODTYPE 0x1f
+#define V_FW_PORT_CMD_MODTYPE(x) ((x) << S_FW_PORT_CMD_MODTYPE)
+#define G_FW_PORT_CMD_MODTYPE(x) \
+ (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE)
+
+#define S_FW_PORT_CMD_LSTATUS32 31
+#define M_FW_PORT_CMD_LSTATUS32 0x1
+#define V_FW_PORT_CMD_LSTATUS32(x) ((x) << S_FW_PORT_CMD_LSTATUS32)
+#define F_FW_PORT_CMD_LSTATUS32 V_FW_PORT_CMD_LSTATUS32(1U)
+
+#define S_FW_PORT_CMD_LINKDNRC32 28
+#define M_FW_PORT_CMD_LINKDNRC32 0x7
+#define G_FW_PORT_CMD_LINKDNRC32(x) \
+ (((x) >> S_FW_PORT_CMD_LINKDNRC32) & M_FW_PORT_CMD_LINKDNRC32)
+
+#define S_FW_PORT_CMD_MDIOCAP32 26
+#define M_FW_PORT_CMD_MDIOCAP32 0x1
+#define V_FW_PORT_CMD_MDIOCAP32(x) ((x) << S_FW_PORT_CMD_MDIOCAP32)
+#define F_FW_PORT_CMD_MDIOCAP32 V_FW_PORT_CMD_MDIOCAP32(1U)
+
+#define S_FW_PORT_CMD_MDIOADDR32 21
+#define M_FW_PORT_CMD_MDIOADDR32 0x1f
+#define G_FW_PORT_CMD_MDIOADDR32(x) \
+ (((x) >> S_FW_PORT_CMD_MDIOADDR32) & M_FW_PORT_CMD_MDIOADDR32)
+
+#define S_FW_PORT_CMD_PORTTYPE32 13
+#define M_FW_PORT_CMD_PORTTYPE32 0xff
+#define G_FW_PORT_CMD_PORTTYPE32(x) \
+ (((x) >> S_FW_PORT_CMD_PORTTYPE32) & M_FW_PORT_CMD_PORTTYPE32)
+
+#define S_FW_PORT_CMD_MODTYPE32 8
+#define M_FW_PORT_CMD_MODTYPE32 0x1f
+#define G_FW_PORT_CMD_MODTYPE32(x) \
+ (((x) >> S_FW_PORT_CMD_MODTYPE32) & M_FW_PORT_CMD_MODTYPE32)
+
+/*
+ * These are configured into the VPD and hence tools that generate
+ * VPD may use this enumeration.
+ * extPHY #lanes T4_I2C extI2C BP_Eq BP_ANEG Speed
+ *
+ * REMEMBER:
+ * Update the Common Code t4_hw.c:t4_get_port_type_description()
+ * with any new Firmware Port Technology Types!
+ */
+enum fw_port_type {
+ FW_PORT_TYPE_FIBER_XFI = 0, /* Y, 1, N, Y, N, N, 10G */
+ FW_PORT_TYPE_FIBER_XAUI = 1, /* Y, 4, N, Y, N, N, 10G */
+ FW_PORT_TYPE_BT_SGMII = 2, /* Y, 1, No, No, No, No, 1G/100M */
+ FW_PORT_TYPE_BT_XFI = 3, /* Y, 1, No, No, No, No, 10G */
+ FW_PORT_TYPE_BT_XAUI = 4, /* Y, 4, No, No, No, No, 10G/1G/100M? */
+ FW_PORT_TYPE_KX4 = 5, /* No, 4, No, No, Yes, Yes, 10G */
+ FW_PORT_TYPE_CX4 = 6, /* No, 4, No, No, No, No, 10G */
+ FW_PORT_TYPE_KX = 7, /* No, 1, No, No, Yes, No, 1G */
+ FW_PORT_TYPE_KR = 8, /* No, 1, No, No, Yes, Yes, 10G */
+ FW_PORT_TYPE_SFP = 9, /* No, 1, Yes, No, No, No, 10G */
+ FW_PORT_TYPE_BP_AP = 10,
+ /* No, 1, No, No, Yes, Yes, 10G, BP ANGE */
+ FW_PORT_TYPE_BP4_AP = 11,
+ /* No, 4, No, No, Yes, Yes, 10G, BP ANGE */
+ FW_PORT_TYPE_QSFP_10G = 12, /* No, 1, Yes, No, No, No, 10G */
+ FW_PORT_TYPE_QSA = 13, /* No, 1, Yes, No, No, No, 10G */
+ FW_PORT_TYPE_QSFP = 14, /* No, 4, Yes, No, No, No, 40G */
+ FW_PORT_TYPE_BP40_BA = 15,
+ /* No, 4, No, No, Yes, Yes, 40G/10G/1G, BP ANGE */
+ FW_PORT_TYPE_KR4_100G = 16, /* No, 4, 100G/40G/25G, Backplane */
+ FW_PORT_TYPE_CR4_QSFP = 17, /* No, 4, 100G/40G/25G */
+ FW_PORT_TYPE_CR_QSFP = 18, /* No, 1, 25G Spider cable */
+ FW_PORT_TYPE_CR2_QSFP = 19, /* No, 2, 50G */
+ FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */
+ FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */
+ FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
+};
+
+/* These are read from module's EEPROM and determined once the
+ * module is inserted.
+ */
+enum fw_port_module_type {
+ FW_PORT_MOD_TYPE_NA = 0x0,
+ FW_PORT_MOD_TYPE_LR = 0x1,
+ FW_PORT_MOD_TYPE_SR = 0x2,
+ FW_PORT_MOD_TYPE_ER = 0x3,
+ FW_PORT_MOD_TYPE_TWINAX_PASSIVE = 0x4,
+ FW_PORT_MOD_TYPE_TWINAX_ACTIVE = 0x5,
+ FW_PORT_MOD_TYPE_LRM = 0x6,
+ FW_PORT_MOD_TYPE_ERROR = M_FW_PORT_CMD_MODTYPE - 3,
+ FW_PORT_MOD_TYPE_UNKNOWN = M_FW_PORT_CMD_MODTYPE - 2,
+ FW_PORT_MOD_TYPE_NOTSUPPORTED = M_FW_PORT_CMD_MODTYPE - 1,
+ FW_PORT_MOD_TYPE_NONE = M_FW_PORT_CMD_MODTYPE
+};
+
+/* used by FW and tools may use this to generate VPD */
+enum fw_port_mod_sub_type {
+ FW_PORT_MOD_SUB_TYPE_NA,
+ FW_PORT_MOD_SUB_TYPE_MV88E114X = 0x1,
+ FW_PORT_MOD_SUB_TYPE_TN8022 = 0x2,
+ FW_PORT_MOD_SUB_TYPE_AQ1202 = 0x3,
+ FW_PORT_MOD_SUB_TYPE_88x3120 = 0x4,
+ FW_PORT_MOD_SUB_TYPE_BCM84834 = 0x5,
+ FW_PORT_MOD_SUB_TYPE_BCM5482 = 0x6,
+ FW_PORT_MOD_SUB_TYPE_BCM84856 = 0x7,
+ FW_PORT_MOD_SUB_TYPE_BT_VSC8634 = 0x8,
+
+ /*
+ * The following will never been in the VPD. They are TWINAX cable
+ * lengths decoded from SFP+ module i2c PROMs. These should almost
+ * certainly go somewhere else ...
+ */
+ FW_PORT_MOD_SUB_TYPE_TWINAX_1 = 0x9,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_3 = 0xA,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_5 = 0xB,
+ FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
+};
+
+/* link down reason codes (3b) */
+enum fw_port_link_dn_rc {
+ FW_PORT_LINK_DN_RC_NONE,
+ FW_PORT_LINK_DN_RC_REMFLT, /* Remote fault detected */
+ FW_PORT_LINK_DN_ANEG_F, /* Auto-negotiation fault */
+ FW_PORT_LINK_DN_RESERVED3,
+ FW_PORT_LINK_DN_OVERHEAT, /* Port overheated */
+ FW_PORT_LINK_DN_UNKNOWN, /* Unable to determine reason */
+ FW_PORT_LINK_DN_RX_LOS, /* No RX signal detected */
+ FW_PORT_LINK_DN_RESERVED7
+};
+
+/* port stats */
+#define FW_NUM_PORT_STATS 50
+#define FW_NUM_PORT_TX_STATS 23
+#define FW_NUM_PORT_RX_STATS 27
+
+enum fw_port_stats_tx_index {
+ FW_STAT_TX_PORT_BYTES_IX,
+ FW_STAT_TX_PORT_FRAMES_IX,
+ FW_STAT_TX_PORT_BCAST_IX,
+ FW_STAT_TX_PORT_MCAST_IX,
+ FW_STAT_TX_PORT_UCAST_IX,
+ FW_STAT_TX_PORT_ERROR_IX,
+ FW_STAT_TX_PORT_64B_IX,
+ FW_STAT_TX_PORT_65B_127B_IX,
+ FW_STAT_TX_PORT_128B_255B_IX,
+ FW_STAT_TX_PORT_256B_511B_IX,
+ FW_STAT_TX_PORT_512B_1023B_IX,
+ FW_STAT_TX_PORT_1024B_1518B_IX,
+ FW_STAT_TX_PORT_1519B_MAX_IX,
+ FW_STAT_TX_PORT_DROP_IX,
+ FW_STAT_TX_PORT_PAUSE_IX,
+ FW_STAT_TX_PORT_PPP0_IX,
+ FW_STAT_TX_PORT_PPP1_IX,
+ FW_STAT_TX_PORT_PPP2_IX,
+ FW_STAT_TX_PORT_PPP3_IX,
+ FW_STAT_TX_PORT_PPP4_IX,
+ FW_STAT_TX_PORT_PPP5_IX,
+ FW_STAT_TX_PORT_PPP6_IX,
+ FW_STAT_TX_PORT_PPP7_IX
+};
+
+enum fw_port_stat_rx_index {
+ FW_STAT_RX_PORT_BYTES_IX,
+ FW_STAT_RX_PORT_FRAMES_IX,
+ FW_STAT_RX_PORT_BCAST_IX,
+ FW_STAT_RX_PORT_MCAST_IX,
+ FW_STAT_RX_PORT_UCAST_IX,
+ FW_STAT_RX_PORT_MTU_ERROR_IX,
+ FW_STAT_RX_PORT_MTU_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_CRC_ERROR_IX,
+ FW_STAT_RX_PORT_LEN_ERROR_IX,
+ FW_STAT_RX_PORT_SYM_ERROR_IX,
+ FW_STAT_RX_PORT_64B_IX,
+ FW_STAT_RX_PORT_65B_127B_IX,
+ FW_STAT_RX_PORT_128B_255B_IX,
+ FW_STAT_RX_PORT_256B_511B_IX,
+ FW_STAT_RX_PORT_512B_1023B_IX,
+ FW_STAT_RX_PORT_1024B_1518B_IX,
+ FW_STAT_RX_PORT_1519B_MAX_IX,
+ FW_STAT_RX_PORT_PAUSE_IX,
+ FW_STAT_RX_PORT_PPP0_IX,
+ FW_STAT_RX_PORT_PPP1_IX,
+ FW_STAT_RX_PORT_PPP2_IX,
+ FW_STAT_RX_PORT_PPP3_IX,
+ FW_STAT_RX_PORT_PPP4_IX,
+ FW_STAT_RX_PORT_PPP5_IX,
+ FW_STAT_RX_PORT_PPP6_IX,
+ FW_STAT_RX_PORT_PPP7_IX,
+ FW_STAT_RX_PORT_LESS_64B_IX
+};
+
+struct fw_port_stats_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ union fw_port_stats {
+ struct fw_port_stats_ctl {
+ __u8 nstats_bg_bm;
+ __u8 tx_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_port_stats_all {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 tx_bcast;
+ __be64 tx_mcast;
+ __be64 tx_ucast;
+ __be64 tx_error;
+ __be64 tx_64b;
+ __be64 tx_65b_127b;
+ __be64 tx_128b_255b;
+ __be64 tx_256b_511b;
+ __be64 tx_512b_1023b;
+ __be64 tx_1024b_1518b;
+ __be64 tx_1519b_max;
+ __be64 tx_drop;
+ __be64 tx_pause;
+ __be64 tx_ppp0;
+ __be64 tx_ppp1;
+ __be64 tx_ppp2;
+ __be64 tx_ppp3;
+ __be64 tx_ppp4;
+ __be64 tx_ppp5;
+ __be64 tx_ppp6;
+ __be64 tx_ppp7;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be64 rx_bcast;
+ __be64 rx_mcast;
+ __be64 rx_ucast;
+ __be64 rx_mtu_error;
+ __be64 rx_mtu_crc_error;
+ __be64 rx_crc_error;
+ __be64 rx_len_error;
+ __be64 rx_sym_error;
+ __be64 rx_64b;
+ __be64 rx_65b_127b;
+ __be64 rx_128b_255b;
+ __be64 rx_256b_511b;
+ __be64 rx_512b_1023b;
+ __be64 rx_1024b_1518b;
+ __be64 rx_1519b_max;
+ __be64 rx_pause;
+ __be64 rx_ppp0;
+ __be64 rx_ppp1;
+ __be64 rx_ppp2;
+ __be64 rx_ppp3;
+ __be64 rx_ppp4;
+ __be64 rx_ppp5;
+ __be64 rx_ppp6;
+ __be64 rx_ppp7;
+ __be64 rx_less_64b;
+ __be64 rx_bg_drop;
+ __be64 rx_bg_trunc;
+ } all;
+ } u;
+};
+
+struct fw_rss_ind_tbl_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ __be16 niqid;
+ __be16 startidx;
+ __be32 r3;
+ __be32 iq0_to_iq2;
+ __be32 iq3_to_iq5;
+ __be32 iq6_to_iq8;
+ __be32 iq9_to_iq11;
+ __be32 iq12_to_iq14;
+ __be32 iq15_to_iq17;
+ __be32 iq18_to_iq20;
+ __be32 iq21_to_iq23;
+ __be32 iq24_to_iq26;
+ __be32 iq27_to_iq29;
+ __be32 iq30_iq31;
+ __be32 r15_lo;
+};
+
+#define S_FW_RSS_IND_TBL_CMD_VIID 0
+#define M_FW_RSS_IND_TBL_CMD_VIID 0xfff
+#define V_FW_RSS_IND_TBL_CMD_VIID(x) ((x) << S_FW_RSS_IND_TBL_CMD_VIID)
+#define G_FW_RSS_IND_TBL_CMD_VIID(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_VIID) & M_FW_RSS_IND_TBL_CMD_VIID)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ0 20
+#define M_FW_RSS_IND_TBL_CMD_IQ0 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ0)
+#define G_FW_RSS_IND_TBL_CMD_IQ0(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ0) & M_FW_RSS_IND_TBL_CMD_IQ0)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ1 10
+#define M_FW_RSS_IND_TBL_CMD_IQ1 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ1)
+#define G_FW_RSS_IND_TBL_CMD_IQ1(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ1) & M_FW_RSS_IND_TBL_CMD_IQ1)
+
+#define S_FW_RSS_IND_TBL_CMD_IQ2 0
+#define M_FW_RSS_IND_TBL_CMD_IQ2 0x3ff
+#define V_FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ2)
+#define G_FW_RSS_IND_TBL_CMD_IQ2(x) \
+ (((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2)
+
+struct fw_rss_glb_config_cmd {
+ __be32 op_to_write;
+ __be32 retval_len16;
+ union fw_rss_glb_config {
+ struct fw_rss_glb_config_manual {
+ __be32 mode_pkd;
+ __be32 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_glb_config_basicvirtual {
+ __be32 mode_keymode;
+ __be32 synmapen_to_hashtoeplitz;
+ __be64 r8;
+ __be64 r9;
+ } basicvirtual;
+ } u;
+};
+
+#define S_FW_RSS_GLB_CONFIG_CMD_MODE 28
+#define M_FW_RSS_GLB_CONFIG_CMD_MODE 0xf
+#define G_FW_RSS_GLB_CONFIG_CMD_MODE(x) \
+ (((x) >> S_FW_RSS_GLB_CONFIG_CMD_MODE) & M_FW_RSS_GLB_CONFIG_CMD_MODE)
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL 1
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN 8
+#define V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN V_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 7
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 6
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 5
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 4
+#define V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4)
+#define F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 \
+ V_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN 3
+#define V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN V_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN 2
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN V_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP 1
+#define V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP)
+#define F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP \
+ V_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP(1U)
+
+#define S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ 0
+#define V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(x) \
+ ((x) << S_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ)
+#define F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ \
+ V_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ(1U)
+
+struct fw_rss_vi_config_cmd {
+ __be32 op_to_viid;
+ __be32 retval_len16;
+ union fw_rss_vi_config {
+ struct fw_rss_vi_config_manual {
+ __be64 r3;
+ __be64 r4;
+ __be64 r5;
+ } manual;
+ struct fw_rss_vi_config_basicvirtual {
+ __be32 r6;
+ __be32 defaultq_to_udpen;
+ __be64 r9;
+ __be64 r10;
+ } basicvirtual;
+ } u;
+};
+
+#define S_FW_RSS_VI_CONFIG_CMD_VIID 0
+#define M_FW_RSS_VI_CONFIG_CMD_VIID 0xfff
+#define V_FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_VIID)
+#define G_FW_RSS_VI_CONFIG_CMD_VIID(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_VIID) & M_FW_RSS_VI_CONFIG_CMD_VIID)
+
+#define S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 16
+#define M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 0x3ff
+#define V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ)
+#define G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) & \
+ M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 4
+#define M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 3
+#define M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 2
+#define M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 1
+#define M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \
+ ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) & \
+ M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN \
+ V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(1U)
+
+#define S_FW_RSS_VI_CONFIG_CMD_UDPEN 0
+#define M_FW_RSS_VI_CONFIG_CMD_UDPEN 0x1
+#define V_FW_RSS_VI_CONFIG_CMD_UDPEN(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_UDPEN)
+#define G_FW_RSS_VI_CONFIG_CMD_UDPEN(x) \
+ (((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN)
+#define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U)
+
+struct fw_clip_cmd {
+ __be32 op_to_write;
+ __be32 alloc_to_len16;
+ __be64 ip_hi;
+ __be64 ip_lo;
+ __be32 r4[2];
+};
+
+#define S_FW_CLIP_CMD_ALLOC 31
+#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC)
+#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U)
+
+#define S_FW_CLIP_CMD_FREE 30
+#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE)
+#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U)
+
+/******************************************************************************
+ * D E B U G C O M M A N D s
+ ******************************************************/
+
+struct fw_debug_cmd {
+ __be32 op_type;
+ __be32 len16_pkd;
+ union fw_debug {
+ struct fw_debug_assert {
+ __be32 fcid;
+ __be32 line;
+ __be32 x;
+ __be32 y;
+ __u8 filename_0_7[8];
+ __u8 filename_8_15[8];
+ __be64 r3;
+ } assert;
+ struct fw_debug_prt {
+ __be16 dprtstridx;
+ __be16 r3[3];
+ __be32 dprtstrparam0;
+ __be32 dprtstrparam1;
+ __be32 dprtstrparam2;
+ __be32 dprtstrparam3;
+ } prt;
+ } u;
+};
+
+#define S_FW_DEBUG_CMD_TYPE 0
+#define M_FW_DEBUG_CMD_TYPE 0xff
+#define V_FW_DEBUG_CMD_TYPE(x) ((x) << S_FW_DEBUG_CMD_TYPE)
+#define G_FW_DEBUG_CMD_TYPE(x) \
+ (((x) >> S_FW_DEBUG_CMD_TYPE) & M_FW_DEBUG_CMD_TYPE)
+
+/******************************************************************************
+ * P C I E F W R E G I S T E R
+ **************************************/
+
+/*
+ * Register definitions for the PCIE_FW register which the firmware uses
+ * to retain status across RESETs. This register should be considered
+ * as a READ-ONLY register for Host Software and only to be used to
+ * track firmware initialization/error state, etc.
+ */
+#define S_PCIE_FW_ERR 31
+#define M_PCIE_FW_ERR 0x1
+#define V_PCIE_FW_ERR(x) ((x) << S_PCIE_FW_ERR)
+#define G_PCIE_FW_ERR(x) (((x) >> S_PCIE_FW_ERR) & M_PCIE_FW_ERR)
+#define F_PCIE_FW_ERR V_PCIE_FW_ERR(1U)
+
+#define S_PCIE_FW_INIT 30
+#define M_PCIE_FW_INIT 0x1
+#define V_PCIE_FW_INIT(x) ((x) << S_PCIE_FW_INIT)
+#define G_PCIE_FW_INIT(x) (((x) >> S_PCIE_FW_INIT) & M_PCIE_FW_INIT)
+#define F_PCIE_FW_INIT V_PCIE_FW_INIT(1U)
+
+#define S_PCIE_FW_HALT 29
+#define M_PCIE_FW_HALT 0x1
+#define V_PCIE_FW_HALT(x) ((x) << S_PCIE_FW_HALT)
+#define G_PCIE_FW_HALT(x) (((x) >> S_PCIE_FW_HALT) & M_PCIE_FW_HALT)
+#define F_PCIE_FW_HALT V_PCIE_FW_HALT(1U)
+
+#define S_PCIE_FW_EVAL 24
+#define M_PCIE_FW_EVAL 0x7
+#define V_PCIE_FW_EVAL(x) ((x) << S_PCIE_FW_EVAL)
+#define G_PCIE_FW_EVAL(x) (((x) >> S_PCIE_FW_EVAL) & M_PCIE_FW_EVAL)
+
+#define S_PCIE_FW_MASTER_VLD 15
+#define M_PCIE_FW_MASTER_VLD 0x1
+#define V_PCIE_FW_MASTER_VLD(x) ((x) << S_PCIE_FW_MASTER_VLD)
+#define G_PCIE_FW_MASTER_VLD(x) \
+ (((x) >> S_PCIE_FW_MASTER_VLD) & M_PCIE_FW_MASTER_VLD)
+#define F_PCIE_FW_MASTER_VLD V_PCIE_FW_MASTER_VLD(1U)
+
+#define S_PCIE_FW_MASTER 12
+#define M_PCIE_FW_MASTER 0x7
+#define V_PCIE_FW_MASTER(x) ((x) << S_PCIE_FW_MASTER)
+#define G_PCIE_FW_MASTER(x) (((x) >> S_PCIE_FW_MASTER) & M_PCIE_FW_MASTER)
+
+/******************************************************************************
+ * B I N A R Y H E A D E R F O R M A T
+ **********************************************/
+
+/*
+ * firmware binary header format
+ */
+struct fw_hdr {
+ __u8 ver;
+ __u8 chip; /* terminator chip family */
+ __be16 len512; /* bin length in units of 512-bytes */
+ __be32 fw_ver; /* firmware version */
+ __be32 tp_microcode_ver; /* tcp processor microcode version */
+ __u8 intfver_nic;
+ __u8 intfver_vnic;
+ __u8 intfver_ofld;
+ __u8 intfver_ri;
+ __u8 intfver_iscsipdu;
+ __u8 intfver_iscsi;
+ __u8 intfver_fcoepdu;
+ __u8 intfver_fcoe;
+ __u32 reserved2;
+ __u32 reserved3;
+ __u32 magic; /* runtime or bootstrap fw */
+ __be32 flags;
+ __be32 reserved6[23];
+};
+
+#define S_FW_HDR_FW_VER_MAJOR 24
+#define M_FW_HDR_FW_VER_MAJOR 0xff
+#define V_FW_HDR_FW_VER_MAJOR(x) \
+ ((x) << S_FW_HDR_FW_VER_MAJOR)
+#define G_FW_HDR_FW_VER_MAJOR(x) \
+ (((x) >> S_FW_HDR_FW_VER_MAJOR) & M_FW_HDR_FW_VER_MAJOR)
+
+#define S_FW_HDR_FW_VER_MINOR 16
+#define M_FW_HDR_FW_VER_MINOR 0xff
+#define V_FW_HDR_FW_VER_MINOR(x) \
+ ((x) << S_FW_HDR_FW_VER_MINOR)
+#define G_FW_HDR_FW_VER_MINOR(x) \
+ (((x) >> S_FW_HDR_FW_VER_MINOR) & M_FW_HDR_FW_VER_MINOR)
+
+#define S_FW_HDR_FW_VER_MICRO 8
+#define M_FW_HDR_FW_VER_MICRO 0xff
+#define V_FW_HDR_FW_VER_MICRO(x) \
+ ((x) << S_FW_HDR_FW_VER_MICRO)
+#define G_FW_HDR_FW_VER_MICRO(x) \
+ (((x) >> S_FW_HDR_FW_VER_MICRO) & M_FW_HDR_FW_VER_MICRO)
+
+#define S_FW_HDR_FW_VER_BUILD 0
+#define M_FW_HDR_FW_VER_BUILD 0xff
+#define V_FW_HDR_FW_VER_BUILD(x) \
+ ((x) << S_FW_HDR_FW_VER_BUILD)
+#define G_FW_HDR_FW_VER_BUILD(x) \
+ (((x) >> S_FW_HDR_FW_VER_BUILD) & M_FW_HDR_FW_VER_BUILD)
+
+#endif /* _T4FW_INTERFACE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c
new file mode 100644
index 000000000..649bacfb2
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.c
@@ -0,0 +1,880 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ether.h>
+
+#include "common.h"
+#include "t4_regs.h"
+
+/**
+ * t4vf_wait_dev_ready - wait till to reads of registers work
+ *
+ * Wait for the device to become ready (signified by our "who am I" register
+ * returning a value other than all 1's). Return an error if it doesn't
+ * become ready ...
+ */
+static int t4vf_wait_dev_ready(struct adapter *adapter)
+{
+ const u32 whoami = T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI;
+ const u32 notready1 = 0xffffffff;
+ const u32 notready2 = 0xeeeeeeee;
+ u32 val;
+
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+
+ msleep(500);
+ val = t4_read_reg(adapter, whoami);
+ if (val != notready1 && val != notready2)
+ return 0;
+
+ dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
+ val);
+ return -EIO;
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+ u32 mbox_addr)
+{
+ for ( ; nflit; nflit--, mbox_addr += 8)
+ *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
+}
+
+/**
+ * t4vf_wr_mbox_core - send a command to FW through the mailbox
+ * @adapter: the adapter
+ * @cmd: the command to write
+ * @size: command length in bytes
+ * @rpl: where to optionally store the reply
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ *
+ * Sends the given command to FW through the mailbox and waits for the
+ * FW to execute the command. If @rpl is not %NULL it is used to store
+ * the FW's reply to the command. The command and its optional reply
+ * are of the same length. FW can take up to 500 ms to respond.
+ * @sleep_ok determines whether we may sleep while awaiting the response.
+ * If sleeping is allowed we use progressive backoff otherwise we spin.
+ *
+ * The return value is 0 on success or a negative errno on failure. A
+ * failure can happen either because we are not able to execute the
+ * command or FW executes it but signals an error. In the latter case
+ * the return value is the error code indicated by FW (negated).
+ */
+int t4vf_wr_mbox_core(struct adapter *adapter,
+ const void __attribute__((__may_alias__)) *cmd,
+ int size, void *rpl, bool sleep_ok)
+{
+ /*
+ * We delay in small increments at first in an effort to maintain
+ * responsiveness for simple, fast executing commands but then back
+ * off to larger delays to a maximum retry delay.
+ */
+ static const int delay[] = {
+ 1, 1, 3, 5, 10, 10, 20, 50, 100
+ };
+
+
+ u32 mbox_ctl = T4VF_CIM_BASE_ADDR + A_CIM_VF_EXT_MAILBOX_CTRL;
+ __be64 cmd_rpl[MBOX_LEN / 8];
+ struct mbox_entry entry;
+ unsigned int delay_idx;
+ u32 v, mbox_data;
+ const __be64 *p;
+ int i, ret;
+ int ms;
+
+ /* In T6, mailbox size is changed to 128 bytes to avoid
+ * invalidating the entire prefetch buffer.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ mbox_data = T4VF_MBDATA_BASE_ADDR;
+ else
+ mbox_data = T6VF_MBDATA_BASE_ADDR;
+
+ /*
+ * Commands must be multiples of 16 bytes in length and may not be
+ * larger than the size of the Mailbox Data register array.
+ */
+ if ((size % 16) != 0 ||
+ size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
+ return -EINVAL;
+
+ /*
+ * Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ t4_os_atomic_add_tail(&entry, &adapter->mbox_list, &adapter->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /*
+ * If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rarely
+ * contend on access to the mailbox ...
+ */
+ if (i > (2 * FW_CMD_MAX_TIMEOUT)) {
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+ ret = -EBUSY;
+ return ret;
+ }
+
+ /*
+ * If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (t4_os_list_first_entry(&adapter->mbox_list) == &entry)
+ break;
+
+ /*
+ * Delay for a bit before checking again ...
+ */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+ }
+
+ /*
+ * Loop trying to get ownership of the mailbox. Return an error
+ * if we can't gain ownership.
+ */
+ v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
+ for (i = 0; v == X_MBOWNER_NONE && i < 3; i++)
+ v = G_MBOWNER(t4_read_reg(adapter, mbox_ctl));
+
+ if (v != X_MBOWNER_PL) {
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+ ret = (v == X_MBOWNER_FW) ? -EBUSY : -ETIMEDOUT;
+ return ret;
+ }
+
+ /*
+ * Write the command array into the Mailbox Data register array and
+ * transfer ownership of the mailbox to the firmware.
+ */
+ for (i = 0, p = cmd; i < size; i += 8)
+ t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+
+ t4_read_reg(adapter, mbox_data); /* flush write */
+ t4_write_reg(adapter, mbox_ctl,
+ F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
+ t4_read_reg(adapter, mbox_ctl); /* flush write */
+ delay_idx = 0;
+ ms = delay[0];
+
+ /*
+ * Spin waiting for firmware to acknowledge processing our command.
+ */
+ for (i = 0; i < FW_CMD_MAX_TIMEOUT; i++) {
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ rte_delay_ms(ms);
+ }
+
+ /*
+ * If we're the owner, see if this is the reply we wanted.
+ */
+ v = t4_read_reg(adapter, mbox_ctl);
+ if (G_MBOWNER(v) == X_MBOWNER_PL) {
+ /*
+ * If the Message Valid bit isn't on, revoke ownership
+ * of the mailbox and continue waiting for our reply.
+ */
+ if ((v & F_MBMSGVALID) == 0) {
+ t4_write_reg(adapter, mbox_ctl,
+ V_MBOWNER(X_MBOWNER_NONE));
+ continue;
+ }
+
+ /*
+ * We now have our reply. Extract the command return
+ * value, copy the reply back to our caller's buffer
+ * (if specified) and revoke ownership of the mailbox.
+ * We return the (negated) firmware command return
+ * code (this depends on FW_SUCCESS == 0). (Again we
+ * avoid clogging the log with FW_VI_STATS_CMD
+ * reply results.)
+ */
+
+ /*
+ * Retrieve the command reply and release the mailbox.
+ */
+ get_mbox_rpl(adapter, cmd_rpl, size / 8, mbox_data);
+ t4_write_reg(adapter, mbox_ctl,
+ V_MBOWNER(X_MBOWNER_NONE));
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list,
+ &adapter->mbox_lock);
+
+ /* return value in high-order host-endian word */
+ v = be64_to_cpu(cmd_rpl[0]);
+
+ if (rpl) {
+ /* request bit in high-order BE word */
+ WARN_ON((be32_to_cpu(*(const u32 *)cmd)
+ & F_FW_CMD_REQUEST) == 0);
+ memcpy(rpl, cmd_rpl, size);
+ }
+ return -((int)G_FW_CMD_RETVAL(v));
+ }
+ }
+
+ /*
+ * We timed out. Return the error ...
+ */
+ dev_err(adapter, "command %#x timed out\n",
+ *(const u8 *)cmd);
+ dev_err(adapter, " Control = %#x\n", t4_read_reg(adapter, mbox_ctl));
+ t4_os_atomic_list_del(&entry, &adapter->mbox_list, &adapter->mbox_lock);
+ ret = -ETIMEDOUT;
+ return ret;
+}
+
+/**
+ * t4vf_fw_reset - issue a reset to FW
+ * @adapter: the adapter
+ *
+ * Issues a reset command to FW. For a Physical Function this would
+ * result in the Firmware resetting all of its state. For a Virtual
+ * Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+ struct fw_reset_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RESET_CMD) |
+ F_FW_CMD_WRITE);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(FW_LEN16(cmd)));
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_prep_adapter - prepare SW and HW for operation
+ * @adapter: the adapter
+ *
+ * Initialize adapter SW state for the various HW modules, set initial
+ * values for some adapter tunables, take PHYs out of reset, and
+ * initialize the MDIO interface.
+ */
+int t4vf_prep_adapter(struct adapter *adapter)
+{
+ u32 pl_vf_rev;
+ int ret, ver;
+
+ ret = t4vf_wait_dev_ready(adapter);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Default port and clock for debugging in case we can't reach
+ * firmware.
+ */
+ adapter->params.nports = 1;
+ adapter->params.vfres.pmask = 1;
+ adapter->params.vpd.cclk = 50000;
+
+ pl_vf_rev = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+ adapter->params.pci.device_id = adapter->pdev->id.device_id;
+ adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
+
+ /*
+ * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
+ * ADAPTER (VERSION << 4 | REVISION)
+ */
+ ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
+ adapter->params.chip = 0;
+ switch (ver) {
+ case CHELSIO_T5:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5,
+ pl_vf_rev);
+ adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6,
+ pl_vf_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+ default:
+ dev_err(adapter, "%s: Device %d is not supported\n",
+ __func__, adapter->params.pci.device_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_query_params - query FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Reads the values of firmware or device parameters. Up to 7 parameters
+ * can be queried at once.
+ */
+int t4vf_query_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, u32 *vals)
+{
+ struct fw_params_cmd cmd, rpl;
+ struct fw_params_param *p;
+ unsigned int i;
+ size_t len16;
+ int ret;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++)
+ p->mnem = cpu_to_be32(*params++);
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (ret == 0)
+ for (i = 0, p = &rpl.param[0]; i < nparams; i++, p++)
+ *vals++ = be32_to_cpu(p->val);
+ return ret;
+}
+
+/**
+ * t4vf_get_vpd_params - retrieve device VPD paremeters
+ * @adapter: the adapter
+ *
+ * Retrives various device Vital Product Data parameters. The parameters
+ * are stored in @adapter->params.vpd.
+ */
+int t4vf_get_vpd_params(struct adapter *adapter)
+{
+ struct vpd_params *vpd_params = &adapter->params.vpd;
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
+ v = t4vf_query_params(adapter, 1, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ vpd_params->cclk = vals[0];
+ dev_debug(adapter, "%s: vpd_params->cclk = %u\n",
+ __func__, vpd_params->cclk);
+ return 0;
+}
+
+/**
+ * t4vf_get_dev_params - retrieve device paremeters
+ * @adapter: the adapter
+ *
+ * Retrives fw and tp version.
+ */
+int t4vf_get_dev_params(struct adapter *adapter)
+{
+ u32 params[7], vals[7];
+ int v;
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWREV));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_TPREV));
+ v = t4vf_query_params(adapter, 2, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ adapter->params.fw_vers = vals[0];
+ adapter->params.tp_vers = vals[1];
+
+ dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
+
+ dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
+ G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
+ G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
+ return 0;
+}
+
+/**
+ * t4vf_set_params - sets FW or device parameters
+ * @adapter: the adapter
+ * @nparams: the number of parameters
+ * @params: the parameter names
+ * @vals: the parameter values
+ *
+ * Sets the values of firmware or device parameters. Up to 7 parameters
+ * can be specified at once.
+ */
+int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
+ const u32 *params, const u32 *vals)
+{
+ struct fw_params_param *p;
+ struct fw_params_cmd cmd;
+ unsigned int i;
+ size_t len16;
+
+ if (nparams > 7)
+ return -EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE);
+ len16 = DIV_ROUND_UP(offsetof(struct fw_params_cmd,
+ param[nparams]), 16);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ for (i = 0, p = &cmd.param[0]; i < nparams; i++, p++) {
+ p->mnem = cpu_to_be32(*params++);
+ p->val = cpu_to_be32(*vals++);
+ }
+ return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
+ * t4vf_fl_pkt_align - return the fl packet alignment
+ * @adapter: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ */
+int t4vf_fl_pkt_align(struct adapter *adapter, u32 sge_control,
+ u32 sge_control2)
+{
+ unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+ /* T4 uses a single control field to specify both the PCIe Padding and
+ * Packing Boundary. T5 introduced the ability to specify these
+ * separately. The actual Ingress Packet Data alignment boundary
+ * within Packed Buffer Mode is the maximum of these two
+ * specifications.
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ ingpad_shift = X_INGPADBOUNDARY_SHIFT;
+ else
+ ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
+
+ ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
+
+ fl_align = ingpadboundary;
+ if (!is_t4(adapter->params.chip)) {
+ ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
+ if (ingpackboundary == X_INGPACKBOUNDARY_16B)
+ ingpackboundary = 16;
+ else
+ ingpackboundary = 1 << (ingpackboundary +
+ X_INGPACKBOUNDARY_SHIFT);
+
+ fl_align = max(ingpadboundary, ingpackboundary);
+ }
+ return fl_align;
+}
+
+unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
+{
+ u32 whoami;
+
+ whoami = t4_read_reg(adapter, T4VF_PL_BASE_ADDR + A_PL_VF_WHOAMI);
+ return (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami));
+}
+
+/**
+ * t4vf_get_rss_glb_config - retrieve adapter RSS Global Configuration
+ * @adapter: the adapter
+ *
+ * Retrieves global RSS mode and parameters with which we have to live
+ * and stores them in the @adapter's RSS parameters.
+ */
+int t4vf_get_rss_glb_config(struct adapter *adapter)
+{
+ struct rss_params *rss = &adapter->params.rss;
+ struct fw_rss_glb_config_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute an RSS Global Configuration read command to retrieve
+ * our RSS configuration.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Translate the big-endian RSS Global Configuration into our
+ * cpu-endian format based on the RSS mode. We also do first level
+ * filtering at this point to weed out modes which don't support
+ * VF Drivers ...
+ */
+ rss->mode = G_FW_RSS_GLB_CONFIG_CMD_MODE
+ (be32_to_cpu(rpl.u.manual.mode_pkd));
+ switch (rss->mode) {
+ case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL: {
+ u32 word = be32_to_cpu
+ (rpl.u.basicvirtual.synmapen_to_hashtoeplitz);
+
+ rss->u.basicvirtual.synmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYNMAPEN) != 0);
+ rss->u.basicvirtual.syn4tupenipv6 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn2tupenipv6 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6) != 0);
+ rss->u.basicvirtual.syn4tupenipv4 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4) != 0);
+ rss->u.basicvirtual.syn2tupenipv4 =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4) != 0);
+ rss->u.basicvirtual.ofdmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_OFDMAPEN) != 0);
+ rss->u.basicvirtual.tnlmapen =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLMAPEN) != 0);
+ rss->u.basicvirtual.tnlalllookup =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_TNLALLLKP) != 0);
+ rss->u.basicvirtual.hashtoeplitz =
+ ((word & F_FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ) != 0);
+
+ /* we need at least Tunnel Map Enable to be set */
+ if (!rss->u.basicvirtual.tnlmapen)
+ return -EINVAL;
+ break;
+ }
+
+ default:
+ /* all unknown/unsupported RSS modes result in an error */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * t4vf_get_vfres - retrieve VF resource limits
+ * @adapter: the adapter
+ *
+ * Retrieves configured resource limits and capabilities for a virtual
+ * function. The results are stored in @adapter->vfres.
+ */
+int t4vf_get_vfres(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ struct fw_pfvf_cmd cmd, rpl;
+ u32 word;
+ int v;
+
+ /*
+ * Execute PFVF Read command to get VF resource limits; bail out early
+ * with error on command failure.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+
+ /*
+ * Extract VF resource limits and return success.
+ */
+ word = be32_to_cpu(rpl.niqflint_niq);
+ vfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
+ vfres->niq = G_FW_PFVF_CMD_NIQ(word);
+
+ word = be32_to_cpu(rpl.type_to_neq);
+ vfres->neq = G_FW_PFVF_CMD_NEQ(word);
+ vfres->pmask = G_FW_PFVF_CMD_PMASK(word);
+
+ word = be32_to_cpu(rpl.tc_to_nexactf);
+ vfres->tc = G_FW_PFVF_CMD_TC(word);
+ vfres->nvi = G_FW_PFVF_CMD_NVI(word);
+ vfres->nexactf = G_FW_PFVF_CMD_NEXACTF(word);
+
+ word = be32_to_cpu(rpl.r_caps_to_nethctrl);
+ vfres->r_caps = G_FW_PFVF_CMD_R_CAPS(word);
+ vfres->wx_caps = G_FW_PFVF_CMD_WX_CAPS(word);
+ vfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
+ return 0;
+}
+
+/**
+ * t4vf_get_port_stats_fw - collect "port" statistics via Firmware
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface via Firmware
+ * commands.
+ */
+static int t4vf_get_port_stats_fw(struct adapter *adapter, int pidx,
+ struct port_stats *p)
+{
+ struct port_info *pi = adap2pinfo(adapter, pidx);
+ unsigned int rem = VI_VF_NUM_STATS;
+ struct fw_vi_stats_vf fwstats;
+ __be64 *fwsp = (__be64 *)&fwstats;
+
+ /*
+ * Grab the Virtual Interface statistics a chunk at a time via mailbox
+ * commands. We could use a Work Request and get all of them at once
+ * but that's an asynchronous interface which is awkward to use.
+ */
+ while (rem) {
+ unsigned int ix = VI_VF_NUM_STATS - rem;
+ unsigned int nstats = min(6U, rem);
+ struct fw_vi_stats_cmd cmd, rpl;
+ size_t len = (offsetof(struct fw_vi_stats_cmd, u) +
+ sizeof(struct fw_vi_stats_ctl));
+ size_t len16 = DIV_ROUND_UP(len, 16);
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_STATS_CMD) |
+ V_FW_VI_STATS_CMD_VIID(pi->viid) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ cmd.retval_len16 = cpu_to_be32(V_FW_CMD_LEN16(len16));
+ cmd.u.ctl.nstats_ix =
+ cpu_to_be16(V_FW_VI_STATS_CMD_IX(ix) |
+ V_FW_VI_STATS_CMD_NSTATS(nstats));
+ ret = t4vf_wr_mbox_ns(adapter, &cmd, len, &rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ memcpy(fwsp, &rpl.u.ctl.stat0, sizeof(__be64) * nstats);
+
+ rem -= nstats;
+ fwsp += nstats;
+ }
+
+ /*
+ * Translate firmware statistics into host native statistics.
+ */
+ p->tx_octets = be64_to_cpu(fwstats.tx_bcast_bytes) +
+ be64_to_cpu(fwstats.tx_mcast_bytes) +
+ be64_to_cpu(fwstats.tx_ucast_bytes);
+ p->tx_bcast_frames = be64_to_cpu(fwstats.tx_bcast_frames);
+ p->tx_mcast_frames = be64_to_cpu(fwstats.tx_mcast_frames);
+ p->tx_ucast_frames = be64_to_cpu(fwstats.tx_ucast_frames);
+ p->tx_drop = be64_to_cpu(fwstats.tx_drop_frames);
+
+ p->rx_bcast_frames = be64_to_cpu(fwstats.rx_bcast_frames);
+ p->rx_mcast_frames = be64_to_cpu(fwstats.rx_mcast_frames);
+ p->rx_ucast_frames = be64_to_cpu(fwstats.rx_ucast_frames);
+ p->rx_len_err = be64_to_cpu(fwstats.rx_err_frames);
+
+ return 0;
+}
+
+/**
+ * t4vf_get_port_stats - collect "port" statistics
+ * @adapter: the adapter
+ * @pidx: the port index
+ * @s: the stats structure to fill
+ *
+ * Collect statistics for the "port"'s Virtual Interface.
+ */
+void t4vf_get_port_stats(struct adapter *adapter, int pidx,
+ struct port_stats *p)
+{
+ /*
+ * If this is not the first Virtual Interface for our Virtual
+ * Function, we need to use Firmware commands to retrieve its
+ * MPS statistics.
+ */
+ if (pidx != 0)
+ t4vf_get_port_stats_fw(adapter, pidx, p);
+
+ /*
+ * But for the first VI, we can grab its statistics via the MPS
+ * register mapped into the VF register space.
+ */
+#define GET_STAT(name) \
+ t4_read_reg64(adapter, \
+ T4VF_MPS_BASE_ADDR + A_MPS_VF_STAT_##name##_L)
+ p->tx_octets = GET_STAT(TX_VF_BCAST_BYTES) +
+ GET_STAT(TX_VF_MCAST_BYTES) +
+ GET_STAT(TX_VF_UCAST_BYTES);
+ p->tx_bcast_frames = GET_STAT(TX_VF_BCAST_FRAMES);
+ p->tx_mcast_frames = GET_STAT(TX_VF_MCAST_FRAMES);
+ p->tx_ucast_frames = GET_STAT(TX_VF_UCAST_FRAMES);
+ p->tx_drop = GET_STAT(TX_VF_DROP_FRAMES);
+
+ p->rx_bcast_frames = GET_STAT(RX_VF_BCAST_FRAMES);
+ p->rx_mcast_frames = GET_STAT(RX_VF_MCAST_FRAMES);
+ p->rx_ucast_frames = GET_STAT(RX_VF_UCAST_FRAMES);
+
+ p->rx_len_err = GET_STAT(RX_VF_ERR_FRAMES);
+#undef GET_STAT
+}
+
+static int t4vf_alloc_vi(struct adapter *adapter, int port_id)
+{
+ struct fw_vi_cmd cmd, rpl;
+ int v;
+
+ /*
+ * Execute a VI command to allocate Virtual Interface and return its
+ * VIID.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE |
+ F_FW_CMD_EXEC);
+ cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(cmd) |
+ F_FW_VI_CMD_ALLOC);
+ cmd.portid_pkd = V_FW_VI_CMD_PORTID(port_id);
+ v = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
+ if (v != FW_SUCCESS)
+ return v;
+ return G_FW_VI_CMD_VIID(be16_to_cpu(rpl.type_to_viid));
+}
+
+int t4vf_port_init(struct adapter *adapter)
+{
+ unsigned int fw_caps = adapter->params.fw_caps_support;
+ struct fw_port_cmd port_cmd, port_rpl;
+ struct fw_vi_cmd vi_cmd, vi_rpl;
+ fw_port_cap32_t pcaps, acaps;
+ enum fw_port_type port_type;
+ int mdio_addr;
+ int ret, i;
+
+ for_each_port(adapter, i) {
+ struct port_info *p = adap2pinfo(adapter, i);
+
+ /*
+ * If we haven't yet determined if we're talking to Firmware
+ * which knows the new 32-bit Port Caps, it's time to find
+ * out now. This will also tell new Firmware to send us Port
+ * Status Updates using the new 32-bit Port Capabilities
+ * version of the Port Information message.
+ */
+ if (fw_caps == FW_CAPS_UNKNOWN) {
+ u32 param, val;
+
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
+ V_FW_PARAMS_PARAM_X
+ (FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
+ val = 1;
+ ret = t4vf_set_params(adapter, 1, &param, &val);
+ fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
+ adapter->params.fw_caps_support = fw_caps;
+ }
+
+ ret = t4vf_alloc_vi(adapter, p->port_id);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot allocate VI for port %d:"
+ " err=%d\n", p->port_id, ret);
+ return ret;
+ }
+ p->viid = ret;
+
+ /*
+ * Execute a VI Read command to get our Virtual Interface
+ * information like MAC address, etc.
+ */
+ memset(&vi_cmd, 0, sizeof(vi_cmd));
+ vi_cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) |
+ F_FW_CMD_REQUEST |
+ F_FW_CMD_READ);
+ vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd));
+ vi_cmd.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(p->viid));
+ ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ p->rss_size = G_FW_VI_CMD_RSSSIZE
+ (be16_to_cpu(vi_rpl.norss_rsssize));
+ t4_os_set_hw_addr(adapter, i, vi_rpl.mac);
+
+ /*
+ * If we don't have read access to our port information, we're
+ * done now. Else, execute a PORT Read command to get it ...
+ */
+ if (!(adapter->params.vfres.r_caps & FW_CMD_CAP_PORT))
+ return 0;
+
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_portid = cpu_to_be32
+ (V_FW_CMD_OP(FW_PORT_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_READ |
+ V_FW_PORT_CMD_PORTID(p->port_id));
+ port_cmd.action_to_len16 = cpu_to_be32
+ (V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
+ FW_PORT_ACTION_GET_PORT_INFO :
+ FW_PORT_ACTION_GET_PORT_INFO32) |
+ FW_LEN16(port_cmd));
+ ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
+ if (ret != FW_SUCCESS)
+ return ret;
+
+ /*
+ * Extract the various fields from the Port Information message.
+ */
+ if (fw_caps == FW_CAPS16) {
+ u32 lstatus = be32_to_cpu
+ (port_rpl.u.info.lstatus_to_modtype);
+
+ port_type = G_FW_PORT_CMD_PTYPE(lstatus);
+ mdio_addr = ((lstatus & F_FW_PORT_CMD_MDIOCAP) ?
+ (int)G_FW_PORT_CMD_MDIOADDR(lstatus) :
+ -1);
+ pcaps = fwcaps16_to_caps32
+ (be16_to_cpu(port_rpl.u.info.pcap));
+ acaps = fwcaps16_to_caps32
+ (be16_to_cpu(port_rpl.u.info.acap));
+ } else {
+ u32 lstatus32 = be32_to_cpu
+ (port_rpl.u.info32.lstatus32_to_cbllen32);
+
+ port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
+ mdio_addr = ((lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
+ (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
+ -1);
+ pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32);
+ acaps = be32_to_cpu(port_rpl.u.info32.acaps32);
+ }
+
+ p->port_type = port_type;
+ p->mdio_addr = mdio_addr;
+ p->mod_type = FW_PORT_MOD_TYPE_NA;
+ init_link_config(&p->link_cfg, pcaps, acaps);
+ }
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h
new file mode 100644
index 000000000..55e436e74
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/base/t4vf_hw.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef __T4VF_HW_H
+#define __T4VF_HW_H
+
+#define T4VF_PL_BASE_ADDR 0x0200
+#define T4VF_CIM_BASE_ADDR 0x0300
+#define T4VF_MBDATA_BASE_ADDR 0x0240
+#define T6VF_MBDATA_BASE_ADDR 0x0280
+
+#define NUM_CIM_VF_MAILBOX_DATA_INSTANCES NUM_CIM_PF_MAILBOX_DATA_INSTANCES
+#endif /* __T4VF_HW_H */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c
new file mode 100644
index 000000000..a0ab2a6ac
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.c
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include "base/common.h"
+#include "clip_tbl.h"
+
+/**
+ * Allocate clip entry in HW with associated IPV4/IPv6 address
+ */
+static int clip6_get_mbox(const struct rte_eth_dev *dev, const u32 *lip)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct fw_clip_cmd c;
+ u64 hi = ((u64)lip[1]) << 32 | lip[0];
+ u64 lo = ((u64)lip[3]) << 32 | lip[2];
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+ c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
+ c.ip_hi = hi;
+ c.ip_lo = lo;
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+/**
+ * Delete clip entry in HW having the associated IPV4/IPV6 address
+ */
+static int clip6_release_mbox(const struct rte_eth_dev *dev, const u32 *lip)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct fw_clip_cmd c;
+ u64 hi = ((u64)lip[1]) << 32 | lip[0];
+ u64 lo = ((u64)lip[3]) << 32 | lip[2];
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CLIP_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ c.alloc_to_len16 = cpu_to_be32(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
+ c.ip_hi = hi;
+ c.ip_lo = lo;
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+/**
+ * cxgbe_clip_release - Release associated CLIP entry
+ * @ce: clip entry to release
+ *
+ * Releases ref count and frees up a clip entry from CLIP table
+ */
+void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce)
+{
+ int ret;
+
+ t4_os_lock(&ce->lock);
+ if (rte_atomic32_dec_and_test(&ce->refcnt)) {
+ ret = clip6_release_mbox(dev, ce->addr);
+ if (ret)
+ dev_debug(adap, "CLIP FW DEL CMD failed: %d", ret);
+ }
+ t4_os_unlock(&ce->lock);
+}
+
+/**
+ * find_or_alloc_clipe - Find/Allocate a free CLIP entry
+ * @c: CLIP table
+ * @lip: IPV4/IPV6 address to compare/add
+ * Returns pointer to the IPV4/IPV6 entry found/created
+ *
+ * Finds/Allocates an CLIP entry to be used for a filter rule.
+ */
+static struct clip_entry *find_or_alloc_clipe(struct clip_tbl *c,
+ const u32 *lip)
+{
+ struct clip_entry *end, *e;
+ struct clip_entry *first_free = NULL;
+ unsigned int clipt_size = c->clipt_size;
+
+ for (e = &c->cl_list[0], end = &c->cl_list[clipt_size]; e != end; ++e) {
+ if (rte_atomic32_read(&e->refcnt) == 0) {
+ if (!first_free)
+ first_free = e;
+ } else {
+ if (memcmp(lip, e->addr, sizeof(e->addr)) == 0)
+ goto exists;
+ }
+ }
+
+ if (first_free) {
+ e = first_free;
+ goto exists;
+ }
+
+ return NULL;
+
+exists:
+ return e;
+}
+
+static struct clip_entry *t4_clip_alloc(struct rte_eth_dev *dev,
+ u32 *lip, u8 v6)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct clip_tbl *ctbl = adap->clipt;
+ struct clip_entry *ce;
+ int ret = 0;
+
+ if (!ctbl)
+ return NULL;
+
+ t4_os_write_lock(&ctbl->lock);
+ ce = find_or_alloc_clipe(ctbl, lip);
+ if (ce) {
+ t4_os_lock(&ce->lock);
+ if (!rte_atomic32_read(&ce->refcnt)) {
+ rte_memcpy(ce->addr, lip, sizeof(ce->addr));
+ if (v6) {
+ ce->type = FILTER_TYPE_IPV6;
+ rte_atomic32_set(&ce->refcnt, 1);
+ ret = clip6_get_mbox(dev, lip);
+ if (ret)
+ dev_debug(adap,
+ "CLIP FW ADD CMD failed: %d",
+ ret);
+ } else {
+ ce->type = FILTER_TYPE_IPV4;
+ }
+ } else {
+ rte_atomic32_inc(&ce->refcnt);
+ }
+ t4_os_unlock(&ce->lock);
+ }
+ t4_os_write_unlock(&ctbl->lock);
+
+ return ret ? NULL : ce;
+}
+
+/**
+ * cxgbe_clip_alloc - Allocate a IPV6 CLIP entry
+ * @dev: rte_eth_dev pointer
+ * @lip: IPV6 address to add
+ * Returns pointer to the CLIP entry created
+ *
+ * Allocates a IPV6 CLIP entry to be used for a filter rule.
+ */
+struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip)
+{
+ return t4_clip_alloc(dev, lip, FILTER_TYPE_IPV6);
+}
+
+/**
+ * Initialize CLIP Table
+ */
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end)
+{
+ unsigned int clipt_size;
+ struct clip_tbl *ctbl;
+ unsigned int i;
+
+ if (clipt_start >= clipt_end)
+ return NULL;
+
+ clipt_size = clipt_end - clipt_start + 1;
+
+ ctbl = t4_os_alloc(sizeof(*ctbl) +
+ clipt_size * sizeof(struct clip_entry));
+ if (!ctbl)
+ return NULL;
+
+ ctbl->clipt_start = clipt_start;
+ ctbl->clipt_size = clipt_size;
+
+ t4_os_rwlock_init(&ctbl->lock);
+
+ for (i = 0; i < ctbl->clipt_size; i++) {
+ t4_os_lock_init(&ctbl->cl_list[i].lock);
+ rte_atomic32_set(&ctbl->cl_list[i].refcnt, 0);
+ }
+
+ return ctbl;
+}
+
+/**
+ * Cleanup CLIP Table
+ */
+void t4_cleanup_clip_tbl(struct adapter *adap)
+{
+ if (adap->clipt)
+ t4_os_free(adap->clipt);
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h
new file mode 100644
index 000000000..737ccc691
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/clip_tbl.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_CLIP_H_
+#define _CXGBE_CLIP_H_
+
+/*
+ * State for the corresponding entry of the HW CLIP table.
+ */
+struct clip_entry {
+ enum filter_type type; /* entry type */
+ u32 addr[4]; /* IPV4 or IPV6 address */
+ rte_spinlock_t lock; /* entry lock */
+ rte_atomic32_t refcnt; /* entry reference count */
+};
+
+struct clip_tbl {
+ unsigned int clipt_start; /* start index of CLIP table */
+ unsigned int clipt_size; /* size of CLIP table */
+ rte_rwlock_t lock; /* table rw lock */
+ struct clip_entry cl_list[0]; /* MUST BE LAST */
+};
+
+struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
+ unsigned int clipt_end);
+void t4_cleanup_clip_tbl(struct adapter *adap);
+struct clip_entry *cxgbe_clip_alloc(struct rte_eth_dev *dev, u32 *lip);
+void cxgbe_clip_release(struct rte_eth_dev *dev, struct clip_entry *ce);
+#endif /* _CXGBE_CLIP_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h
new file mode 100644
index 000000000..0bf6061c0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_H_
+#define _CXGBE_H_
+
+#include "base/common.h"
+#include "base/t4_regs.h"
+
+#define CXGBE_MIN_RING_DESC_SIZE 128 /* Min TX/RX descriptor ring size */
+#define CXGBE_MAX_RING_DESC_SIZE 4096 /* Max TX/RX descriptor ring size */
+
+#define CXGBE_DEFAULT_TX_DESC_SIZE 1024 /* Default TX ring size */
+#define CXGBE_DEFAULT_RX_DESC_SIZE 1024 /* Default RX ring size */
+
+#define CXGBE_MIN_RX_BUFSIZE RTE_ETHER_MIN_MTU /* min buf size */
+#define CXGBE_MAX_RX_PKTLEN (9000 + RTE_ETHER_HDR_LEN + \
+ RTE_ETHER_CRC_LEN) /* max pkt */
+
+/* Max poll time is 100 * 100msec = 10 sec */
+#define CXGBE_LINK_STATUS_POLL_MS 100 /* 100ms */
+#define CXGBE_LINK_STATUS_POLL_CNT 100 /* Max number of times to poll */
+
+#define CXGBE_DEFAULT_RSS_KEY_LEN 40 /* 320-bits */
+#define CXGBE_RSS_HF_IPV4_MASK (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_OTHER)
+#define CXGBE_RSS_HF_IPV6_MASK (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_OTHER | \
+ ETH_RSS_IPV6_EX)
+#define CXGBE_RSS_HF_TCP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_IPV6_TCP_EX)
+#define CXGBE_RSS_HF_UDP_IPV6_MASK (ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_IPV6_UDP_EX)
+#define CXGBE_RSS_HF_ALL (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP)
+
+/* Tx/Rx Offloads supported */
+#define CXGBE_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT | \
+ DEV_TX_OFFLOAD_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_CKSUM | \
+ DEV_TX_OFFLOAD_TCP_TSO | \
+ DEV_TX_OFFLOAD_MULTI_SEGS)
+
+#define CXGBE_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_IPV4_CKSUM | \
+ DEV_RX_OFFLOAD_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_TCP_CKSUM | \
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_SCATTER | \
+ DEV_RX_OFFLOAD_RSS_HASH)
+
+/* Devargs filtermode and filtermask representation */
+enum cxgbe_devargs_filter_mode_flags {
+ CXGBE_DEVARGS_FILTER_MODE_PHYSICAL_PORT = (1 << 0),
+ CXGBE_DEVARGS_FILTER_MODE_PF_VF = (1 << 1),
+
+ CXGBE_DEVARGS_FILTER_MODE_ETHERNET_DSTMAC = (1 << 2),
+ CXGBE_DEVARGS_FILTER_MODE_ETHERNET_ETHTYPE = (1 << 3),
+ CXGBE_DEVARGS_FILTER_MODE_VLAN_INNER = (1 << 4),
+ CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER = (1 << 5),
+ CXGBE_DEVARGS_FILTER_MODE_IP_TOS = (1 << 6),
+ CXGBE_DEVARGS_FILTER_MODE_IP_PROTOCOL = (1 << 7),
+ CXGBE_DEVARGS_FILTER_MODE_MAX = (1 << 8),
+};
+
+enum cxgbe_filter_vnic_mode {
+ CXGBE_FILTER_VNIC_MODE_NONE,
+ CXGBE_FILTER_VNIC_MODE_PFVF,
+ CXGBE_FILTER_VNIC_MODE_OVLAN,
+};
+
+/* Common PF and VF devargs */
+#define CXGBE_DEVARG_CMN_KEEP_OVLAN "keep_ovlan"
+#define CXGBE_DEVARG_CMN_TX_MODE_LATENCY "tx_mode_latency"
+
+/* VF only devargs */
+#define CXGBE_DEVARG_VF_FORCE_LINK_UP "force_link_up"
+
+/* Filter Mode/Mask devargs */
+#define CXGBE_DEVARG_PF_FILTER_MODE "filtermode"
+#define CXGBE_DEVARG_PF_FILTER_MASK "filtermask"
+
+bool cxgbe_force_linkup(struct adapter *adap);
+int cxgbe_probe(struct adapter *adapter);
+int cxgbevf_probe(struct adapter *adapter);
+void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps);
+int cxgbe_set_link_status(struct port_info *pi, bool status);
+int cxgbe_up(struct adapter *adap);
+int cxgbe_down(struct port_info *pi);
+void cxgbe_close(struct adapter *adapter);
+void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats);
+void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats);
+void cxgbe_stats_reset(struct port_info *pi);
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt, struct t4_completion *c);
+int cxgbe_link_start(struct port_info *pi);
+int cxgbe_setup_sge_fwevtq(struct adapter *adapter);
+int cxgbe_setup_sge_ctrl_txq(struct adapter *adapter);
+void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev);
+int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev);
+int cxgbe_init_rss(struct adapter *adap);
+int cxgbe_setup_rss(struct port_info *pi);
+void cxgbe_enable_rx_queues(struct port_info *pi);
+void cxgbe_print_port_info(struct adapter *adap);
+void cxgbe_print_adapter_info(struct adapter *adap);
+void cxgbe_process_devargs(struct adapter *adap);
+void cxgbe_configure_max_ethqsets(struct adapter *adapter);
+
+#endif /* _CXGBE_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h
new file mode 100644
index 000000000..83ae1c2e5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_compat.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_COMPAT_H_
+#define _CXGBE_COMPAT_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdbool.h>
+
+#include <rte_common.h>
+#include <rte_memcpy.h>
+#include <rte_byteorder.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_io.h>
+#include <rte_net.h>
+
+extern int cxgbe_logtype;
+extern int cxgbe_mbox_logtype;
+
+#define dev_printf(level, logtype, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, logtype, \
+ "rte_cxgbe_pmd: " fmt, ##__VA_ARGS__)
+
+#define dev_err(x, fmt, ...) \
+ dev_printf(ERR, cxgbe_logtype, fmt, ##__VA_ARGS__)
+#define dev_info(x, fmt, ...) \
+ dev_printf(INFO, cxgbe_logtype, fmt, ##__VA_ARGS__)
+#define dev_warn(x, fmt, ...) \
+ dev_printf(WARNING, cxgbe_logtype, fmt, ##__VA_ARGS__)
+#define dev_debug(x, fmt, ...) \
+ dev_printf(DEBUG, cxgbe_logtype, fmt, ##__VA_ARGS__)
+
+#define CXGBE_DEBUG_MBOX(x, fmt, ...) \
+ dev_printf(DEBUG, cxgbe_mbox_logtype, "MBOX:" fmt, ##__VA_ARGS__)
+
+#define CXGBE_FUNC_TRACE() \
+ dev_printf(DEBUG, cxgbe_logtype, "CXGBE trace: %s\n", __func__)
+
+#define pr_err(fmt, ...) dev_err(0, fmt, ##__VA_ARGS__)
+#define pr_warn(fmt, ...) dev_warn(0, fmt, ##__VA_ARGS__)
+#define pr_info(fmt, ...) dev_info(0, fmt, ##__VA_ARGS__)
+#define BUG() pr_err("BUG at %s:%d", __func__, __LINE__)
+
+#define ASSERT(x) do {\
+ if (!(x)) \
+ rte_panic("CXGBE: x"); \
+} while (0)
+#define BUG_ON(x) ASSERT(!(x))
+
+#ifndef WARN_ON
+#define WARN_ON(x) do { \
+ int ret = !!(x); \
+ if (unlikely(ret)) \
+ pr_warn("WARN_ON: \"" #x "\" at %s:%d\n", __func__, __LINE__); \
+} while (0)
+#endif
+
+#define __iomem
+
+#ifndef BIT
+#define BIT(n) (1 << (n))
+#endif
+
+#define L1_CACHE_SHIFT 6
+#define L1_CACHE_BYTES BIT(L1_CACHE_SHIFT)
+
+#define PAGE_SHIFT 12
+#define CXGBE_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
+#define PTR_ALIGN(p, a) ((typeof(p))CXGBE_ALIGN((unsigned long)(p), (a)))
+
+#define VLAN_HLEN 4
+#define ETHER_ADDR_LEN 6
+
+#define rmb() rte_rmb() /* dpdk rte provided rmb */
+#define wmb() rte_wmb() /* dpdk rte provided wmb */
+
+typedef uint8_t u8;
+typedef int8_t s8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef int32_t s32;
+typedef uint64_t u64;
+typedef uint64_t dma_addr_t;
+
+#ifndef __le16
+#define __le16 uint16_t
+#endif
+#ifndef __le32
+#define __le32 uint32_t
+#endif
+#ifndef __le64
+#define __le64 uint64_t
+#endif
+#ifndef __be16
+#define __be16 uint16_t
+#endif
+#ifndef __be32
+#define __be32 uint32_t
+#endif
+#ifndef __be64
+#define __be64 uint64_t
+#endif
+#ifndef __u8
+#define __u8 uint8_t
+#endif
+#ifndef __u16
+#define __u16 uint16_t
+#endif
+#ifndef __u32
+#define __u32 uint32_t
+#endif
+#ifndef __u64
+#define __u64 uint64_t
+#endif
+
+#define FALSE 0
+#define TRUE 1
+
+#ifndef min
+#define min(a, b) RTE_MIN(a, b)
+#endif
+
+#ifndef max
+#define max(a, b) RTE_MAX(a, b)
+#endif
+
+/*
+ * round up val _p to a power of 2 size _s
+ */
+#define cxgbe_roundup(_p, _s) (((unsigned long)(_p) + (_s - 1)) & ~(_s - 1))
+
+#ifndef container_of
+#define container_of(ptr, type, member) ({ \
+ typeof(((type *)0)->member)(*__mptr) = (ptr); \
+ (type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+#define ARRAY_SIZE(arr) RTE_DIM(arr)
+
+#define cpu_to_be16(o) rte_cpu_to_be_16(o)
+#define cpu_to_be32(o) rte_cpu_to_be_32(o)
+#define cpu_to_be64(o) rte_cpu_to_be_64(o)
+#define cpu_to_le32(o) rte_cpu_to_le_32(o)
+#define be16_to_cpu(o) rte_be_to_cpu_16(o)
+#define be32_to_cpu(o) rte_be_to_cpu_32(o)
+#define be64_to_cpu(o) rte_be_to_cpu_64(o)
+#define le32_to_cpu(o) rte_le_to_cpu_32(o)
+
+#ifndef ntohs
+#define ntohs(o) be16_to_cpu(o)
+#endif
+
+#ifndef ntohl
+#define ntohl(o) be32_to_cpu(o)
+#endif
+
+#ifndef htons
+#define htons(o) cpu_to_be16(o)
+#endif
+
+#ifndef htonl
+#define htonl(o) cpu_to_be32(o)
+#endif
+
+#ifndef caddr_t
+typedef char *caddr_t;
+#endif
+
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define DELAY(x) rte_delay_us(x)
+#define udelay(x) DELAY(x)
+#define msleep(x) DELAY(1000 * (x))
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+
+static inline uint8_t hweight32(uint32_t word32)
+{
+ uint32_t res = word32 - ((word32 >> 1) & 0x55555555);
+
+ res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
+ res = (res + (res >> 4)) & 0x0F0F0F0F;
+ res = res + (res >> 8);
+ return (res + (res >> 16)) & 0x000000FF;
+
+} /* weight32 */
+
+/**
+ * cxgbe_fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note cxgbe_fls(0) = 0, cxgbe_fls(1) = 1, cxgbe_fls(0x80000000) = 32.
+ */
+static inline int cxgbe_fls(int x)
+{
+ return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
+}
+
+static inline unsigned long ilog2(unsigned long n)
+{
+ unsigned int e = 0;
+
+ while (n) {
+ if (n & ~((1 << 8) - 1)) {
+ e += 8;
+ n >>= 8;
+ continue;
+ }
+
+ if (n & ~((1 << 4) - 1)) {
+ e += 4;
+ n >>= 4;
+ }
+
+ for (;;) {
+ n >>= 1;
+ if (n == 0)
+ break;
+ e++;
+ }
+ }
+
+ return e;
+}
+
+static inline void writel(unsigned int val, volatile void __iomem *addr)
+{
+ rte_write32(val, addr);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, (void *)((uintptr_t)addr + 4));
+}
+
+static inline void writel_relaxed(unsigned int val, volatile void __iomem *addr)
+{
+ rte_write32_relaxed(val, addr);
+}
+
+/*
+ * Multiplies an integer by a fraction, while avoiding unnecessary
+ * overflow or loss of precision.
+ */
+static inline unsigned int mult_frac(unsigned int x, unsigned int numer,
+ unsigned int denom)
+{
+ unsigned int quot = x / denom;
+ unsigned int rem = x % denom;
+
+ return (quot * numer) + ((rem * numer) / denom);
+}
+#endif /* _CXGBE_COMPAT_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c
new file mode 100644
index 000000000..1deee2f5c
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -0,0 +1,1259 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+
+#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
+#include "cxgbe_flow.h"
+
+int cxgbe_logtype;
+int cxgbe_mbox_logtype;
+
+/*
+ * Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static const struct rte_pci_id cxgb4_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+
+#define PCI_VENDOR_ID_CHELSIO 0x1425
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+ { .vendor_id = 0, } \
+ }
+
+/*
+ *... and the PCI ID Table itself ...
+ */
+#include "base/t4_pci_id_tbl.h"
+
+uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
+ uint16_t pkts_sent, pkts_remain;
+ uint16_t total_sent = 0;
+ uint16_t idx = 0;
+ int ret = 0;
+
+ t4_os_lock(&txq->txq_lock);
+ /* free up desc from already completed tx */
+ reclaim_completed_tx(&txq->q);
+ rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
+ while (total_sent < nb_pkts) {
+ pkts_remain = nb_pkts - total_sent;
+
+ for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
+ idx = total_sent + pkts_sent;
+ if ((idx + 1) < nb_pkts)
+ rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
+ volatile void *));
+ ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
+ if (ret < 0)
+ break;
+ }
+ if (!pkts_sent)
+ break;
+ total_sent += pkts_sent;
+ /* reclaim as much as possible */
+ reclaim_completed_tx(&txq->q);
+ }
+
+ t4_os_unlock(&txq->txq_lock);
+ return total_sent;
+}
+
+uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
+ unsigned int work_done;
+
+ if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
+ dev_err(adapter, "error in cxgbe poll\n");
+
+ return work_done;
+}
+
+int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
+
+ static const struct rte_eth_desc_lim cxgbe_desc_lim = {
+ .nb_max = CXGBE_MAX_RING_DESC_SIZE,
+ .nb_min = CXGBE_MIN_RING_DESC_SIZE,
+ .nb_align = 1,
+ };
+
+ device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
+ device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
+ device_info->max_rx_queues = max_queues;
+ device_info->max_tx_queues = max_queues;
+ device_info->max_mac_addrs = 1;
+ /* XXX: For now we support one MAC/port */
+ device_info->max_vfs = adapter->params.arch.vfcount;
+ device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */
+
+ device_info->rx_queue_offload_capa = 0UL;
+ device_info->rx_offload_capa = CXGBE_RX_OFFLOADS;
+
+ device_info->tx_queue_offload_capa = 0UL;
+ device_info->tx_offload_capa = CXGBE_TX_OFFLOADS;
+
+ device_info->reta_size = pi->rss_size;
+ device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN;
+ device_info->flow_type_rss_offloads = CXGBE_RSS_HF_ALL;
+
+ device_info->rx_desc_lim = cxgbe_desc_lim;
+ device_info->tx_desc_lim = cxgbe_desc_lim;
+ cxgbe_get_speed_caps(pi, &device_info->speed_capa);
+
+ return 0;
+}
+
+int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ 1, -1, 1, -1, false);
+}
+
+int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ 0, -1, 1, -1, false);
+}
+
+int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+
+ /* TODO: address filters ?? */
+
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ -1, 1, 1, -1, false);
+}
+
+int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+
+ /* TODO: address filters ?? */
+
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ -1, 0, 1, -1, false);
+}
+
+int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct rte_eth_link new_link = { 0 };
+ unsigned int i, work_done, budget = 32;
+ u8 old_link = pi->link_cfg.link_ok;
+
+ for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
+ if (!s->fw_evtq.desc)
+ break;
+
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* Exit if link status changed or always forced up */
+ if (pi->link_cfg.link_ok != old_link ||
+ cxgbe_force_linkup(adapter))
+ break;
+
+ if (!wait_to_complete)
+ break;
+
+ rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS);
+ }
+
+ new_link.link_status = cxgbe_force_linkup(adapter) ?
+ ETH_LINK_UP : pi->link_cfg.link_ok;
+ new_link.link_autoneg = pi->link_cfg.autoneg;
+ new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_speed = pi->link_cfg.speed;
+
+ return rte_eth_linkstatus_set(eth_dev, &new_link);
+}
+
+/**
+ * Set device link up.
+ */
+int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct port_info *pi = dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ unsigned int work_done, budget = 32;
+ struct sge *s = &adapter->sge;
+ int ret;
+
+ if (!s->fw_evtq.desc)
+ return -ENOMEM;
+
+ /* Flush all link events */
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* If link already up, nothing to do */
+ if (pi->link_cfg.link_ok)
+ return 0;
+
+ ret = cxgbe_set_link_status(pi, true);
+ if (ret)
+ return ret;
+
+ cxgbe_dev_link_update(dev, 1);
+ return 0;
+}
+
+/**
+ * Set device link down.
+ */
+int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct port_info *pi = dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ unsigned int work_done, budget = 32;
+ struct sge *s = &adapter->sge;
+ int ret;
+
+ if (!s->fw_evtq.desc)
+ return -ENOMEM;
+
+ /* Flush all link events */
+ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
+
+ /* If link already down, nothing to do */
+ if (!pi->link_cfg.link_ok)
+ return 0;
+
+ ret = cxgbe_set_link_status(pi, false);
+ if (ret)
+ return ret;
+
+ cxgbe_dev_link_update(dev, 0);
+ return 0;
+}
+
+int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ struct rte_eth_dev_info dev_info;
+ int err;
+ uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ err = cxgbe_dev_info_get(eth_dev, &dev_info);
+ if (err != 0)
+ return err;
+
+ /* Must accommodate at least RTE_ETHER_MIN_MTU */
+ if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
+ return -EINVAL;
+
+ /* set to jumbo mode if needed */
+ if (new_mtu > RTE_ETHER_MAX_LEN)
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
+ -1, -1, true);
+ if (!err)
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
+
+ return err;
+}
+
+/*
+ * Stop device.
+ */
+void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+
+ CXGBE_FUNC_TRACE();
+
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return;
+
+ cxgbe_down(pi);
+
+ /*
+ * We clear queues only if both tx and rx path of the port
+ * have been disabled
+ */
+ t4_sge_eth_clear_queues(pi);
+}
+
+/* Start the device.
+ * It returns 0 on success.
+ */
+int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct rte_eth_rxmode *rx_conf = &eth_dev->data->dev_conf.rxmode;
+ struct adapter *adapter = pi->adapter;
+ int err = 0, i;
+
+ CXGBE_FUNC_TRACE();
+
+ /*
+ * If we don't have a connection to the firmware there's nothing we
+ * can do.
+ */
+ if (!(adapter->flags & FW_OK)) {
+ err = -ENXIO;
+ goto out;
+ }
+
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ err = cxgbe_up(adapter);
+ if (err < 0)
+ goto out;
+ }
+
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+ eth_dev->data->scattered_rx = 1;
+ else
+ eth_dev->data->scattered_rx = 0;
+
+ cxgbe_enable_rx_queues(pi);
+
+ err = cxgbe_setup_rss(pi);
+ if (err)
+ goto out;
+
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ err = cxgbe_dev_tx_queue_start(eth_dev, i);
+ if (err)
+ goto out;
+ }
+
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ err = cxgbe_dev_rx_queue_start(eth_dev, i);
+ if (err)
+ goto out;
+ }
+
+ err = cxgbe_link_start(pi);
+ if (err)
+ goto out;
+
+out:
+ return err;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+
+ CXGBE_FUNC_TRACE();
+
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return;
+
+ cxgbe_down(pi);
+
+ /*
+ * We clear queues only if both tx and rx path of the port
+ * have been disabled
+ */
+ t4_sge_eth_clear_queues(pi);
+ eth_dev->data->scattered_rx = 0;
+}
+
+int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ int err;
+
+ CXGBE_FUNC_TRACE();
+
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_RSS_HASH;
+
+ if (!(adapter->flags & FW_QUEUE_BOUND)) {
+ err = cxgbe_setup_sge_fwevtq(adapter);
+ if (err)
+ return err;
+ adapter->flags |= FW_QUEUE_BOUND;
+ if (is_pf4(adapter)) {
+ err = cxgbe_setup_sge_ctrl_txq(adapter);
+ if (err)
+ return err;
+ }
+ }
+
+ err = cxgbe_cfg_queue_count(eth_dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ int ret;
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)
+ (eth_dev->data->tx_queues[tx_queue_id]);
+
+ dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
+
+ ret = t4_sge_eth_txq_start(txq);
+ if (ret == 0)
+ eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
+{
+ int ret;
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)
+ (eth_dev->data->tx_queues[tx_queue_id]);
+
+ dev_debug(NULL, "%s: tx_queue_id = %d\n", __func__, tx_queue_id);
+
+ ret = t4_sge_eth_txq_stop(txq);
+ if (ret == 0)
+ eth_dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return ret;
+}
+
+int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
+ int err = 0;
+ unsigned int temp_nb_desc;
+
+ dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
+ __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
+ socket_id, pi->first_qset);
+
+ /* Free up the existing queue */
+ if (eth_dev->data->tx_queues[queue_idx]) {
+ cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
+ eth_dev->data->tx_queues[queue_idx] = NULL;
+ }
+
+ eth_dev->data->tx_queues[queue_idx] = (void *)txq;
+
+ /* Sanity Checking
+ *
+ * nb_desc should be > 1023 and <= CXGBE_MAX_RING_DESC_SIZE
+ */
+ temp_nb_desc = nb_desc;
+ if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
+ dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_DEFAULT_TX_DESC_SIZE);
+ temp_nb_desc = CXGBE_DEFAULT_TX_DESC_SIZE;
+ } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
+ dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_TX_DESC_SIZE);
+ return -(EINVAL);
+ }
+
+ txq->q.size = temp_nb_desc;
+
+ err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
+ s->fw_evtq.cntxt_id, socket_id);
+
+ dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
+ __func__, txq->q.cntxt_id, txq->q.abs_id, err);
+ return err;
+}
+
+void cxgbe_dev_tx_queue_release(void *q)
+{
+ struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
+
+ if (txq) {
+ struct port_info *pi = (struct port_info *)
+ (txq->eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; tx_queue_id = %d\n",
+ __func__, pi->port_id, txq->q.cntxt_id);
+
+ t4_sge_eth_txq_release(adap, txq);
+ }
+}
+
+int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ int ret;
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adap = pi->adapter;
+ struct sge_rspq *q;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
+ __func__, pi->port_id, rx_queue_id);
+
+ q = eth_dev->data->rx_queues[rx_queue_id];
+
+ ret = t4_sge_eth_rxq_start(adap, q);
+ if (ret == 0)
+ eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return ret;
+}
+
+int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ int ret;
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adap = pi->adapter;
+ struct sge_rspq *q;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
+ __func__, pi->port_id, rx_queue_id);
+
+ q = eth_dev->data->rx_queues[rx_queue_id];
+ ret = t4_sge_eth_rxq_stop(adap, q);
+ if (ret == 0)
+ eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return ret;
+}
+
+int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t queue_idx, uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
+ int err = 0;
+ int msi_idx = 0;
+ unsigned int temp_nb_desc;
+ struct rte_eth_dev_info dev_info;
+ unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
+ __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
+ socket_id, mp);
+
+ err = cxgbe_dev_info_get(eth_dev, &dev_info);
+ if (err != 0) {
+ dev_err(adap, "%s: error during getting ethernet device info",
+ __func__);
+ return err;
+ }
+
+ /* Must accommodate at least RTE_ETHER_MIN_MTU */
+ if ((pkt_len < dev_info.min_rx_bufsize) ||
+ (pkt_len > dev_info.max_rx_pktlen)) {
+ dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
+ __func__, dev_info.min_rx_bufsize,
+ dev_info.max_rx_pktlen);
+ return -EINVAL;
+ }
+
+ /* Free up the existing queue */
+ if (eth_dev->data->rx_queues[queue_idx]) {
+ cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
+ eth_dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ eth_dev->data->rx_queues[queue_idx] = (void *)rxq;
+
+ /* Sanity Checking
+ *
+ * nb_desc should be > 0 and <= CXGBE_MAX_RING_DESC_SIZE
+ */
+ temp_nb_desc = nb_desc;
+ if (nb_desc < CXGBE_MIN_RING_DESC_SIZE) {
+ dev_warn(adapter, "%s: number of descriptors must be >= %d. Using default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_DEFAULT_RX_DESC_SIZE);
+ temp_nb_desc = CXGBE_DEFAULT_RX_DESC_SIZE;
+ } else if (nb_desc > CXGBE_MAX_RING_DESC_SIZE) {
+ dev_err(adapter, "%s: number of descriptors must be between %d and %d inclusive. Default [%d]\n",
+ __func__, CXGBE_MIN_RING_DESC_SIZE,
+ CXGBE_MAX_RING_DESC_SIZE, CXGBE_DEFAULT_RX_DESC_SIZE);
+ return -(EINVAL);
+ }
+
+ rxq->rspq.size = temp_nb_desc;
+ if ((&rxq->fl) != NULL)
+ rxq->fl.size = temp_nb_desc;
+
+ /* Set to jumbo mode if necessary */
+ if (pkt_len > RTE_ETHER_MAX_LEN)
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ eth_dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
+ &rxq->fl, NULL,
+ is_pf4(adapter) ?
+ t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
+ queue_idx, socket_id);
+
+ dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
+ __func__, err, pi->port_id, rxq->rspq.cntxt_id,
+ rxq->rspq.abs_id);
+ return err;
+}
+
+void cxgbe_dev_rx_queue_release(void *q)
+{
+ struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
+ struct sge_rspq *rq = &rxq->rspq;
+
+ if (rq) {
+ struct port_info *pi = (struct port_info *)
+ (rq->eth_dev->data->dev_private);
+ struct adapter *adap = pi->adapter;
+
+ dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
+ __func__, pi->port_id, rxq->rspq.cntxt_id);
+
+ t4_sge_eth_rxq_release(adap, rxq);
+ }
+}
+
+/*
+ * Get port statistics.
+ */
+static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *eth_stats)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct port_stats ps;
+ unsigned int i;
+
+ cxgbe_stats_get(pi, &ps);
+
+ /* RX Stats */
+ eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 +
+ ps.rx_ovflow2 + ps.rx_ovflow3 +
+ ps.rx_trunc0 + ps.rx_trunc1 +
+ ps.rx_trunc2 + ps.rx_trunc3;
+ eth_stats->ierrors = ps.rx_symbol_err + ps.rx_fcs_err +
+ ps.rx_jabber + ps.rx_too_long + ps.rx_runt +
+ ps.rx_len_err;
+
+ /* TX Stats */
+ eth_stats->opackets = ps.tx_frames;
+ eth_stats->obytes = ps.tx_octets;
+ eth_stats->oerrors = ps.tx_error_frames;
+
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ struct sge_eth_rxq *rxq =
+ &s->ethrxq[pi->first_qset + i];
+
+ eth_stats->q_ipackets[i] = rxq->stats.pkts;
+ eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
+ eth_stats->ipackets += eth_stats->q_ipackets[i];
+ eth_stats->ibytes += eth_stats->q_ibytes[i];
+ }
+
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ struct sge_eth_txq *txq =
+ &s->ethtxq[pi->first_qset + i];
+
+ eth_stats->q_opackets[i] = txq->stats.pkts;
+ eth_stats->q_obytes[i] = txq->stats.tx_bytes;
+ }
+ return 0;
+}
+
+/*
+ * Reset port statistics.
+ */
+static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ unsigned int i;
+
+ cxgbe_stats_reset(pi);
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ struct sge_eth_rxq *rxq =
+ &s->ethrxq[pi->first_qset + i];
+
+ rxq->stats.pkts = 0;
+ rxq->stats.rx_bytes = 0;
+ }
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ struct sge_eth_txq *txq =
+ &s->ethtxq[pi->first_qset + i];
+
+ txq->stats.pkts = 0;
+ txq->stats.tx_bytes = 0;
+ txq->stats.mapping_err = 0;
+ }
+
+ return 0;
+}
+
+static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct link_config *lc = &pi->link_cfg;
+ int rx_pause, tx_pause;
+
+ fc_conf->autoneg = lc->fc & PAUSE_AUTONEG;
+ rx_pause = lc->fc & PAUSE_RX;
+ tx_pause = lc->fc & PAUSE_TX;
+
+ if (rx_pause && tx_pause)
+ fc_conf->mode = RTE_FC_FULL;
+ else if (rx_pause)
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ else if (tx_pause)
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ else
+ fc_conf->mode = RTE_FC_NONE;
+ return 0;
+}
+
+static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ struct link_config *lc = &pi->link_cfg;
+
+ if (lc->pcaps & FW_PORT_CAP32_ANEG) {
+ if (fc_conf->autoneg)
+ lc->requested_fc |= PAUSE_AUTONEG;
+ else
+ lc->requested_fc &= ~PAUSE_AUTONEG;
+ }
+
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_RX_PAUSE))
+ lc->requested_fc |= PAUSE_RX;
+ else
+ lc->requested_fc &= ~PAUSE_RX;
+
+ if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
+ (fc_conf->mode & RTE_FC_TX_PAUSE))
+ lc->requested_fc |= PAUSE_TX;
+ else
+ lc->requested_fc &= ~PAUSE_TX;
+
+ return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
+ &pi->link_cfg);
+}
+
+const uint32_t *
+cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (eth_dev->rx_pkt_burst == cxgbe_recv_pkts)
+ return ptypes;
+ return NULL;
+}
+
+/* Update RSS hash configuration
+ */
+static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct port_info *pi = dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ int err;
+
+ err = cxgbe_write_rss_conf(pi, rss_conf->rss_hf);
+ if (err)
+ return err;
+
+ pi->rss_hf = rss_conf->rss_hf;
+
+ if (rss_conf->rss_key) {
+ u32 key[10], mod_key[10];
+ int i, j;
+
+ memcpy(key, rss_conf->rss_key, CXGBE_DEFAULT_RSS_KEY_LEN);
+
+ for (i = 9, j = 0; i >= 0; i--, j++)
+ mod_key[j] = cpu_to_be32(key[i]);
+
+ t4_write_rss_key(adapter, mod_key, -1);
+ }
+
+ return 0;
+}
+
+/* Get RSS hash configuration
+ */
+static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct port_info *pi = dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ u64 rss_hf = 0;
+ u64 flags = 0;
+ int err;
+
+ err = t4_read_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ &flags, NULL);
+
+ if (err)
+ return err;
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) {
+ rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK;
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
+ rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK;
+ }
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
+ rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ }
+
+ if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
+ rss_hf |= CXGBE_RSS_HF_IPV4_MASK;
+
+ rss_conf->rss_hf = rss_hf;
+
+ if (rss_conf->rss_key) {
+ u32 key[10], mod_key[10];
+ int i, j;
+
+ t4_read_rss_key(adapter, key);
+
+ for (i = 9, j = 0; i >= 0; i--, j++)
+ mod_key[j] = be32_to_cpu(key[i]);
+
+ memcpy(rss_conf->rss_key, mod_key, CXGBE_DEFAULT_RSS_KEY_LEN);
+ }
+
+ return 0;
+}
+
+static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+ return EEPROMSIZE;
+}
+
+/**
+ * eeprom_ptov - translate a physical EEPROM address to virtual
+ * @phys_addr: the physical EEPROM address
+ * @fn: the PCI function number
+ * @sz: size of function-specific area
+ *
+ * Translate a physical EEPROM address to virtual. The first 1K is
+ * accessed through virtual addresses starting at 31K, the rest is
+ * accessed through virtual addresses starting at 0.
+ *
+ * The mapping is as follows:
+ * [0..1K) -> [31K..32K)
+ * [1K..1K+A) -> [31K-A..31K)
+ * [1K+A..ES) -> [0..ES-A-1K)
+ *
+ * where A = @fn * @sz, and ES = EEPROM size.
+ */
+static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
+{
+ fn *= sz;
+ if (phys_addr < 1024)
+ return phys_addr + (31 << 10);
+ if (phys_addr < 1024 + fn)
+ return fn + phys_addr - 1024;
+ if (phys_addr < EEPROMSIZE)
+ return phys_addr - 1024 - fn;
+ if (phys_addr < EEPROMVSIZE)
+ return phys_addr - 1024;
+ return -EINVAL;
+}
+
+/* The next two routines implement eeprom read/write from physical addresses.
+ */
+static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = t4_seeprom_read(adap, vaddr, v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
+{
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
+
+ if (vaddr >= 0)
+ vaddr = t4_seeprom_write(adap, vaddr, v);
+ return vaddr < 0 ? vaddr : 0;
+}
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *e)
+{
+ struct port_info *pi = dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ u32 i, err = 0;
+ u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
+
+ if (!buf)
+ return -ENOMEM;
+
+ e->magic = EEPROM_MAGIC;
+ for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4)
+ err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
+
+ if (!err)
+ rte_memcpy(e->data, buf + e->offset, e->length);
+ rte_free(buf);
+ return err;
+}
+
+static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *eeprom)
+{
+ struct port_info *pi = dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ u8 *buf;
+ int err = 0;
+ u32 aligned_offset, aligned_len, *p;
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return -EINVAL;
+
+ aligned_offset = eeprom->offset & ~3;
+ aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3;
+
+ if (adapter->pf > 0) {
+ u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
+
+ if (aligned_offset < start ||
+ aligned_offset + aligned_len > start + EEPROMPFSIZE)
+ return -EPERM;
+ }
+
+ if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) {
+ /* RMW possibly needed for first or last words.
+ */
+ buf = rte_zmalloc(NULL, aligned_len, 0);
+ if (!buf)
+ return -ENOMEM;
+ err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
+ if (!err && aligned_len > 4)
+ err = eeprom_rd_phys(adapter,
+ aligned_offset + aligned_len - 4,
+ (u32 *)&buf[aligned_len - 4]);
+ if (err)
+ goto out;
+ rte_memcpy(buf + (eeprom->offset & 3), eeprom->data,
+ eeprom->length);
+ } else {
+ buf = eeprom->data;
+ }
+
+ err = t4_seeprom_wp(adapter, false);
+ if (err)
+ goto out;
+
+ for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
+ err = eeprom_wr_phys(adapter, aligned_offset, *p);
+ aligned_offset += 4;
+ }
+
+ if (!err)
+ err = t4_seeprom_wp(adapter, true);
+out:
+ if (buf != eeprom->data)
+ rte_free(buf);
+ return err;
+}
+
+static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+
+ return t4_get_regs_len(adapter) / sizeof(uint32_t);
+}
+
+static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+
+ regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
+ (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) |
+ (1 << 16);
+
+ if (regs->data == NULL) {
+ regs->length = cxgbe_get_regs_len(eth_dev);
+ regs->width = sizeof(uint32_t);
+
+ return 0;
+ }
+
+ t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t)));
+
+ return 0;
+}
+
+int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
+{
+ struct port_info *pi = dev->data->dev_private;
+ int ret;
+
+ ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
+ if (ret < 0) {
+ dev_err(adapter, "failed to set mac addr; err = %d\n",
+ ret);
+ return ret;
+ }
+ pi->xact_addr_filt = ret;
+ return 0;
+}
+
+static const struct eth_dev_ops cxgbe_eth_dev_ops = {
+ .dev_start = cxgbe_dev_start,
+ .dev_stop = cxgbe_dev_stop,
+ .dev_close = cxgbe_dev_close,
+ .promiscuous_enable = cxgbe_dev_promiscuous_enable,
+ .promiscuous_disable = cxgbe_dev_promiscuous_disable,
+ .allmulticast_enable = cxgbe_dev_allmulticast_enable,
+ .allmulticast_disable = cxgbe_dev_allmulticast_disable,
+ .dev_configure = cxgbe_dev_configure,
+ .dev_infos_get = cxgbe_dev_info_get,
+ .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
+ .link_update = cxgbe_dev_link_update,
+ .dev_set_link_up = cxgbe_dev_set_link_up,
+ .dev_set_link_down = cxgbe_dev_set_link_down,
+ .mtu_set = cxgbe_dev_mtu_set,
+ .tx_queue_setup = cxgbe_dev_tx_queue_setup,
+ .tx_queue_start = cxgbe_dev_tx_queue_start,
+ .tx_queue_stop = cxgbe_dev_tx_queue_stop,
+ .tx_queue_release = cxgbe_dev_tx_queue_release,
+ .rx_queue_setup = cxgbe_dev_rx_queue_setup,
+ .rx_queue_start = cxgbe_dev_rx_queue_start,
+ .rx_queue_stop = cxgbe_dev_rx_queue_stop,
+ .rx_queue_release = cxgbe_dev_rx_queue_release,
+ .filter_ctrl = cxgbe_dev_filter_ctrl,
+ .stats_get = cxgbe_dev_stats_get,
+ .stats_reset = cxgbe_dev_stats_reset,
+ .flow_ctrl_get = cxgbe_flow_ctrl_get,
+ .flow_ctrl_set = cxgbe_flow_ctrl_set,
+ .get_eeprom_length = cxgbe_get_eeprom_length,
+ .get_eeprom = cxgbe_get_eeprom,
+ .set_eeprom = cxgbe_set_eeprom,
+ .get_reg = cxgbe_get_regs,
+ .rss_hash_update = cxgbe_dev_rss_hash_update,
+ .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
+ .mac_addr_set = cxgbe_mac_addr_set,
+};
+
+/*
+ * Initialize driver
+ * It returns 0 on success.
+ */
+static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = NULL;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ int err = 0;
+
+ CXGBE_FUNC_TRACE();
+
+ eth_dev->dev_ops = &cxgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ /* for secondary processes, we attach to ethdevs allocated by primary
+ * and do minimal initialization.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ int i;
+
+ for (i = 1; i < MAX_NPORTS; i++) {
+ struct rte_eth_dev *rest_eth_dev;
+ char namei[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(namei, sizeof(namei), "%s_%d",
+ pci_dev->device.name, i);
+ rest_eth_dev = rte_eth_dev_attach_secondary(namei);
+ if (rest_eth_dev) {
+ rest_eth_dev->device = &pci_dev->device;
+ rest_eth_dev->dev_ops =
+ eth_dev->dev_ops;
+ rest_eth_dev->rx_pkt_burst =
+ eth_dev->rx_pkt_burst;
+ rest_eth_dev->tx_pkt_burst =
+ eth_dev->tx_pkt_burst;
+ rte_eth_dev_probing_finish(rest_eth_dev);
+ }
+ }
+ return 0;
+ }
+
+ snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
+ adapter = rte_zmalloc(name, sizeof(*adapter), 0);
+ if (!adapter)
+ return -1;
+
+ adapter->use_unpacked_mode = 1;
+ adapter->regs = (void *)pci_dev->mem_resource[0].addr;
+ if (!adapter->regs) {
+ dev_err(adapter, "%s: cannot map device registers\n", __func__);
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ adapter->pdev = pci_dev;
+ adapter->eth_dev = eth_dev;
+ pi->adapter = adapter;
+
+ cxgbe_process_devargs(adapter);
+
+ err = cxgbe_probe(adapter);
+ if (err) {
+ dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
+ __func__, err);
+ goto out_free_adapter;
+ }
+
+ return 0;
+
+out_free_adapter:
+ rte_free(adapter);
+ return err;
+}
+
+static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adap = pi->adapter;
+
+ /* Free up other ports and all resources */
+ cxgbe_close(adap);
+ return 0;
+}
+
+static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct port_info), eth_cxgbe_dev_init);
+}
+
+static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit);
+}
+
+static struct rte_pci_driver rte_cxgbe_pmd = {
+ .id_table = cxgb4_pci_tbl,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_cxgbe_pci_probe,
+ .remove = eth_cxgbe_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
+ CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
+ CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
+ CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
+ CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
+
+RTE_INIT(cxgbe_init_log)
+{
+ cxgbe_logtype = rte_log_register("pmd.net.cxgbe");
+ if (cxgbe_logtype >= 0)
+ rte_log_set_level(cxgbe_logtype, RTE_LOG_NOTICE);
+ cxgbe_mbox_logtype = rte_log_register("pmd.net.cxgbe.mbox");
+ if (cxgbe_mbox_logtype >= 0)
+ rte_log_set_level(cxgbe_mbox_logtype, RTE_LOG_NOTICE);
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c
new file mode 100644
index 000000000..27e96c73e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.c
@@ -0,0 +1,1433 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#include <rte_net.h>
+
+#include "base/common.h"
+#include "base/t4_tcb.h"
+#include "base/t4_regs.h"
+#include "cxgbe_filter.h"
+#include "clip_tbl.h"
+#include "l2t.h"
+#include "smt.h"
+
+/**
+ * Initialize Hash Filters
+ */
+int cxgbe_init_hash_filter(struct adapter *adap)
+{
+ unsigned int n_user_filters;
+ unsigned int user_filter_perc;
+ int ret;
+ u32 params[7], val[7];
+
+#define FW_PARAM_DEV(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+ V_FW_PARAMS_PARAM_Y(0) | \
+ V_FW_PARAMS_PARAM_Z(0))
+
+ params[0] = FW_PARAM_DEV(NTID);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ params, val);
+ if (ret < 0)
+ return ret;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+
+ user_filter_perc = 100;
+ n_user_filters = mult_frac(adap->tids.nftids,
+ user_filter_perc,
+ 100);
+
+ adap->tids.nftids = n_user_filters;
+ adap->params.hash_filter = 1;
+ return 0;
+}
+
+/**
+ * Validate if the requested filter specification can be set by checking
+ * if the requested features have been enabled
+ */
+int cxgbe_validate_filter(struct adapter *adapter,
+ struct ch_filter_specification *fs)
+{
+ u32 fconf, iconf;
+
+ /*
+ * Check for unconfigured fields being used.
+ */
+ fconf = fs->cap ? adapter->params.tp.filter_mask :
+ adapter->params.tp.vlan_pri_map;
+
+ iconf = adapter->params.tp.ingress_config;
+
+#define S(_field) \
+ (fs->val._field || fs->mask._field)
+#define U(_mask, _field) \
+ (!(fconf & (_mask)) && S(_field))
+
+ if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
+ U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
+ U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) ||
+ U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld))
+ return -EOPNOTSUPP;
+
+ /* Either OVLAN or PFVF match is enabled in hardware, but not both */
+ if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
+ (S(ovlan_vld) && (iconf & F_VNIC)))
+ return -EOPNOTSUPP;
+
+ /* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */
+ if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) ||
+ (S(pfvf_vld) && (iconf & F_USE_ENC_IDX)))
+ return -EOPNOTSUPP;
+
+#undef S
+#undef U
+
+ /*
+ * If the user is requesting that the filter action loop
+ * matching packets back out one of our ports, make sure that
+ * the egress port is in range.
+ */
+ if (fs->action == FILTER_SWITCH &&
+ fs->eport >= adapter->params.nports)
+ return -ERANGE;
+
+ /*
+ * Don't allow various trivially obvious bogus out-of-range
+ * values ...
+ */
+ if (fs->val.iport >= adapter->params.nports)
+ return -ERANGE;
+
+ if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
+ return -EOPNOTSUPP;
+
+ if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
+ * Get the queue to which the traffic must be steered to.
+ */
+static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
+ struct ch_filter_specification *fs)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int iq;
+
+ /*
+ * If the user has requested steering matching Ingress Packets
+ * to a specific Queue Set, we need to make sure it's in range
+ * for the port and map that into the Absolute Queue ID of the
+ * Queue Set's Response Queue.
+ */
+ if (!fs->dirsteer) {
+ iq = 0;
+ } else {
+ /*
+ * If the iq id is greater than the number of qsets,
+ * then assume it is an absolute qid.
+ */
+ if (fs->iq < pi->n_rx_qsets)
+ iq = adapter->sge.ethrxq[pi->first_qset +
+ fs->iq].rspq.abs_id;
+ else
+ iq = fs->iq;
+ }
+
+ return iq;
+}
+
+/* Return an error number if the indicated filter isn't writable ... */
+static int writable_filter(struct filter_entry *f)
+{
+ if (f->locked)
+ return -EPERM;
+ if (f->pending)
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
+ * Send CPL_SET_TCB_FIELD message
+ */
+static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
+ u16 word, u64 mask, u64 val, int no_reply)
+{
+ struct rte_mbuf *mbuf;
+ struct cpl_set_tcb_field *req;
+ struct sge_ctrl_txq *ctrlq;
+
+ ctrlq = &adapter->sge.ctrlq[0];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ WARN_ON(!mbuf);
+
+ mbuf->data_len = sizeof(*req);
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
+ memset(req, 0, sizeof(*req));
+ INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
+ req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
+ V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
+ V_NO_REPLY(no_reply));
+ req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+
+ t4_mgmt_tx(ctrlq, mbuf);
+}
+
+/**
+ * Set one of the t_flags bits in the TCB.
+ */
+static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
+ unsigned int bit_pos, unsigned int val, int no_reply)
+{
+ set_tcb_field(adap, ftid, W_TCB_T_FLAGS, 1ULL << bit_pos,
+ (unsigned long long)val << bit_pos, no_reply);
+}
+
+/**
+ * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
+ */
+static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
+ struct cpl_set_tcb_field *req,
+ unsigned int word,
+ u64 mask, u64 val, u8 cookie,
+ int no_reply)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
+ req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
+ V_QUEUENO(0));
+ req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
+ req->mask = cpu_to_be64(mask);
+ req->val = cpu_to_be64(val);
+ sc = (struct ulptx_idata *)(req + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
+ * IPv4 requires only 1 slot on all cards.
+ */
+u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
+{
+ if (family == FILTER_TYPE_IPV6) {
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
+ return 4;
+
+ return 2;
+ }
+
+ return 1;
+}
+
+/**
+ * Check if entries are already filled.
+ */
+bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
+{
+ bool result = FALSE;
+ u32 i;
+
+ /* Ensure there's enough slots available. */
+ t4_os_lock(&t->ftid_lock);
+ for (i = fidx; i < fidx + nentries; i++) {
+ if (rte_bitmap_get(t->ftid_bmap, i)) {
+ result = TRUE;
+ break;
+ }
+ }
+ t4_os_unlock(&t->ftid_lock);
+ return result;
+}
+
+/**
+ * Allocate available free entries.
+ */
+int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
+{
+ struct tid_info *t = &adap->tids;
+ int pos;
+ int size = t->nftids;
+
+ t4_os_lock(&t->ftid_lock);
+ if (nentries > 1)
+ pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
+ nentries);
+ else
+ pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
+ t4_os_unlock(&t->ftid_lock);
+
+ return pos < size ? pos : -1;
+}
+
+/**
+ * Construct hash filter ntuple.
+ */
+static u64 hash_filter_ntuple(const struct filter_entry *f)
+{
+ struct adapter *adap = ethdev2adap(f->dev);
+ struct tp_params *tp = &adap->params.tp;
+ u64 ntuple = 0;
+ u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
+
+ if (tp->port_shift >= 0 && f->fs.mask.iport)
+ ntuple |= (u64)f->fs.val.iport << tp->port_shift;
+
+ if (tp->protocol_shift >= 0) {
+ if (!f->fs.val.proto)
+ ntuple |= (u64)tcp_proto << tp->protocol_shift;
+ else
+ ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
+ }
+
+ if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
+ ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
+ if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
+ ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
+ if (tp->vlan_shift >= 0 && f->fs.mask.ivlan)
+ ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
+ tp->vlan_shift;
+ if (tp->vnic_shift >= 0) {
+ if ((adap->params.tp.ingress_config & F_VNIC) &&
+ f->fs.mask.pfvf_vld)
+ ntuple |= (u64)(f->fs.val.pfvf_vld << 16 |
+ f->fs.val.pf << 13 | f->fs.val.vf) <<
+ tp->vnic_shift;
+ else if (!(adap->params.tp.ingress_config & F_VNIC) &&
+ f->fs.mask.ovlan_vld)
+ ntuple |= (u64)(f->fs.val.ovlan_vld << 16 |
+ f->fs.val.ovlan) << tp->vnic_shift;
+ }
+ if (tp->tos_shift >= 0 && f->fs.mask.tos)
+ ntuple |= (u64)f->fs.val.tos << tp->tos_shift;
+
+ return ntuple;
+}
+
+/**
+ * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
+ */
+static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
+ unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*abort_req) -
+ sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
+ abort_req->rsvd0 = cpu_to_be32(0);
+ abort_req->rsvd1 = 0;
+ abort_req->cmd = CPL_ABORT_NO_RST;
+ sc = (struct ulptx_idata *)(abort_req + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
+ */
+static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
+ unsigned int tid)
+{
+ struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
+ struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
+
+ txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(0));
+ txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ sc->len = cpu_to_be32(sizeof(*abort_rpl) -
+ sizeof(struct work_request_hdr));
+ OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ abort_rpl->rsvd0 = cpu_to_be32(0);
+ abort_rpl->rsvd1 = 0;
+ abort_rpl->cmd = CPL_ABORT_NO_RST;
+ sc = (struct ulptx_idata *)(abort_rpl + 1);
+ sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ sc->len = cpu_to_be32(0);
+}
+
+/**
+ * Delete the specified hash filter.
+ */
+static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
+ unsigned int filter_id,
+ struct filter_ctx *ctx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct tid_info *t = &adapter->tids;
+ struct filter_entry *f;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+ int ret;
+
+ if (filter_id > adapter->tids.ntids)
+ return -E2BIG;
+
+ f = lookup_tid(t, filter_id);
+ if (!f) {
+ dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
+ __func__, filter_id);
+ return -EINVAL;
+ }
+
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ unsigned int wrlen;
+ struct rte_mbuf *mbuf;
+ struct work_request_hdr *wr;
+ struct ulptx_idata *aligner;
+ struct cpl_set_tcb_field *req;
+ struct cpl_abort_req *abort_req;
+ struct cpl_abort_rpl *abort_rpl;
+
+ f->ctx = ctx;
+ f->pending = 1;
+
+ wrlen = cxgbe_roundup(sizeof(*wr) +
+ (sizeof(*req) + sizeof(*aligner)) +
+ sizeof(*abort_req) + sizeof(*abort_rpl),
+ 16);
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ dev_err(adapter, "%s: could not allocate skb ..\n",
+ __func__);
+ goto out_err;
+ }
+
+ mbuf->data_len = wrlen;
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
+ INIT_ULPTX_WR(req, wrlen, 0, 0);
+ wr = (struct work_request_hdr *)req;
+ wr++;
+ req = (struct cpl_set_tcb_field *)wr;
+ mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
+ V_TCB_RSS_INFO(M_TCB_RSS_INFO),
+ V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
+ 0, 1);
+ aligner = (struct ulptx_idata *)(req + 1);
+ abort_req = (struct cpl_abort_req *)(aligner + 1);
+ mk_abort_req_ulp(abort_req, f->tid);
+ abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
+ mk_abort_rpl_ulp(abort_rpl, f->tid);
+ t4_mgmt_tx(ctrlq, mbuf);
+ }
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/**
+ * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
+ */
+static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req6 *req = NULL;
+ u64 local_lo, local_hi, peer_lo, peer_hi;
+ u32 *lip = (u32 *)f->fs.val.lip;
+ u32 *fip = (u32 *)f->fs.val.fip;
+
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T6:
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
+
+ INIT_TP_WR(req, 0);
+ break;
+ default:
+ dev_err(adap, "%s: unsupported chip type!\n", __func__);
+ return;
+ }
+
+ local_hi = ((u64)lip[1]) << 32 | lip[0];
+ local_lo = ((u64)lip[3]) << 32 | lip[2];
+ peer_hi = ((u64)fip[1]) << 32 | fip[0];
+ peer_lo = ((u64)fip[3]) << 32 | fip[2];
+
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip_hi = local_hi;
+ req->local_ip_lo = local_lo;
+ req->peer_ip_hi = peer_hi;
+ req->peer_ip_lo = peer_lo;
+ req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_DELACK(f->fs.hitcnts) |
+ V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
+ V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+ << 1) |
+ V_TX_CHAN(f->fs.eport) |
+ V_ULP_MODE(ULP_MODE_NONE) |
+ F_TCAM_BYPASS | F_NON_OFFLOAD);
+ req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+ req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+ V_RSS_QUEUE(f->fs.iq) |
+ F_T5_OPT_2_VALID |
+ F_RX_CHANNEL |
+ V_SACK_EN(f->fs.swapmac) |
+ V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
+}
+
+/**
+ * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
+ */
+static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
+ unsigned int qid_filterid, struct adapter *adap)
+{
+ struct cpl_t6_act_open_req *req = NULL;
+
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T6:
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
+
+ INIT_TP_WR(req, 0);
+ break;
+ default:
+ dev_err(adap, "%s: unsupported chip type!\n", __func__);
+ return;
+ }
+
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ qid_filterid));
+ req->local_port = cpu_to_be16(f->fs.val.lport);
+ req->peer_port = cpu_to_be16(f->fs.val.fport);
+ req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
+ f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
+ req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
+ f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+ req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_DELACK(f->fs.hitcnts) |
+ V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
+ V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+ << 1) |
+ V_TX_CHAN(f->fs.eport) |
+ V_ULP_MODE(ULP_MODE_NONE) |
+ F_TCAM_BYPASS | F_NON_OFFLOAD);
+ req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+ req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+ V_RSS_QUEUE(f->fs.iq) |
+ F_T5_OPT_2_VALID |
+ F_RX_CHANNEL |
+ V_SACK_EN(f->fs.swapmac) |
+ V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+ (f->fs.dirsteer << 1)) |
+ V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
+}
+
+/**
+ * Set the specified hash filter.
+ */
+static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ struct tid_info *t = &adapter->tids;
+ struct filter_entry *f;
+ struct rte_mbuf *mbuf;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int iq;
+ int atid, size;
+ int ret = 0;
+
+ ret = cxgbe_validate_filter(adapter, fs);
+ if (ret)
+ return ret;
+
+ iq = get_filter_steerq(dev, fs);
+
+ ctrlq = &adapter->sge.ctrlq[pi->port_id];
+
+ f = t4_os_alloc(sizeof(*f));
+ if (!f)
+ goto out_err;
+
+ f->fs = *fs;
+ f->ctx = ctx;
+ f->dev = dev;
+ f->fs.iq = iq;
+
+ /*
+ * If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newdmac || f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) {
+ /* allocate L2T entry for new filter */
+ f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
+ f->fs.eport, f->fs.dmac);
+ if (!f->l2t) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+ }
+
+ /* If the new filter requires Source MAC rewriting then we need to
+ * allocate a SMT entry for the filter
+ */
+ if (f->fs.newsmac) {
+ f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
+ if (!f->smt) {
+ ret = -EAGAIN;
+ goto out_err;
+ }
+ }
+
+ atid = cxgbe_alloc_atid(t, f);
+ if (atid < 0)
+ goto out_err;
+
+ if (f->fs.type == FILTER_TYPE_IPV6) {
+ /* IPv6 hash filter */
+ f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
+ if (!f->clipt)
+ goto free_atid;
+
+ size = sizeof(struct cpl_t6_act_open_req6);
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto free_clip;
+ }
+
+ mbuf->data_len = size;
+ mbuf->pkt_len = mbuf->data_len;
+
+ mk_act_open_req6(f, mbuf,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ } else {
+ /* IPv4 hash filter */
+ size = sizeof(struct cpl_t6_act_open_req);
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto free_atid;
+ }
+
+ mbuf->data_len = size;
+ mbuf->pkt_len = mbuf->data_len;
+
+ mk_act_open_req(f, mbuf,
+ ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+ adapter);
+ }
+
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+
+free_clip:
+ cxgbe_clip_release(f->dev, f->clipt);
+free_atid:
+ cxgbe_free_atid(t, atid);
+
+out_err:
+ t4_os_free(f);
+ return ret;
+}
+
+/**
+ * Clear a filter and release any of its resources that we own. This also
+ * clears the filter's "pending" status.
+ */
+static void clear_filter(struct filter_entry *f)
+{
+ if (f->clipt)
+ cxgbe_clip_release(f->dev, f->clipt);
+
+ /*
+ * The zeroing of the filter rule below clears the filter valid,
+ * pending, locked flags etc. so it's all we need for
+ * this operation.
+ */
+ memset(f, 0, sizeof(*f));
+}
+
+/**
+ * t4_mk_filtdelwr - create a delete filter WR
+ * @adap: adapter context
+ * @ftid: the filter ID
+ * @wr: the filter work request to populate
+ * @qid: ingress queue to receive the delete notification
+ *
+ * Creates a filter work request to delete the supplied filter. If @qid is
+ * negative the delete notification is suppressed.
+ */
+static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
+ struct fw_filter2_wr *wr, int qid)
+{
+ memset(wr, 0, sizeof(*wr));
+ if (adap->params.filter2_wr_support)
+ wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
+ else
+ wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
+ wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
+ V_FW_FILTER_WR_NOREPLY(qid < 0));
+ wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
+ if (qid >= 0)
+ wr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
+}
+
+/**
+ * Create FW work request to delete the filter at a specified index
+ */
+static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct rte_mbuf *mbuf;
+ struct fw_filter2_wr *fwr;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf)
+ return -ENOMEM;
+
+ mbuf->data_len = sizeof(*fwr);
+ mbuf->pkt_len = mbuf->data_len;
+
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
+ t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /*
+ * Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+}
+
+static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
+{
+ struct adapter *adapter = ethdev2adap(dev);
+ struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
+ struct rte_mbuf *mbuf;
+ struct fw_filter2_wr *fwr;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+ int ret;
+
+ /*
+ * If the new filter requires loopback Destination MAC and/or VLAN
+ * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
+ * the filter.
+ */
+ if (f->fs.newvlan || f->fs.newdmac) {
+ /* allocate L2T entry for new filter */
+ f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
+ f->fs.eport, f->fs.dmac);
+
+ if (!f->l2t)
+ return -ENOMEM;
+ }
+
+ /* If the new filter requires Source MAC rewriting then we need to
+ * allocate a SMT entry for the filter
+ */
+ if (f->fs.newsmac) {
+ f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
+ if (!f->smt) {
+ if (f->l2t) {
+ cxgbe_l2t_release(f->l2t);
+ f->l2t = NULL;
+ }
+ return -ENOMEM;
+ }
+ }
+
+ ctrlq = &adapter->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mbuf->data_len = sizeof(*fwr);
+ mbuf->pkt_len = mbuf->data_len;
+
+ fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
+ memset(fwr, 0, sizeof(*fwr));
+
+ /*
+ * Construct the work request to set the filter.
+ */
+ if (adapter->params.filter2_wr_support)
+ fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
+ else
+ fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
+ fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
+ fwr->tid_to_iq =
+ cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
+ V_FW_FILTER_WR_RQTYPE(f->fs.type) |
+ V_FW_FILTER_WR_NOREPLY(0) |
+ V_FW_FILTER_WR_IQ(f->fs.iq));
+ fwr->del_filter_to_l2tix =
+ cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
+ V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
+ V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
+ V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
+ V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
+ V_FW_FILTER_WR_INSVLAN
+ (f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_FW_FILTER_WR_RMVLAN
+ (f->fs.newvlan == VLAN_REMOVE ||
+ f->fs.newvlan == VLAN_REWRITE) |
+ V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
+ V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
+ V_FW_FILTER_WR_PRIO(f->fs.prio) |
+ V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
+ fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
+ fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
+ fwr->frag_to_ovlan_vldm =
+ (V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
+ V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
+ V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
+ V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
+ fwr->smac_sel = f->smt ? f->smt->hw_idx : 0;
+ fwr->rx_chan_rx_rpl_iq =
+ cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
+ V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
+ ));
+ fwr->maci_to_matchtypem =
+ cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
+ V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
+ V_FW_FILTER_WR_PORT(f->fs.val.iport) |
+ V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
+ fwr->ptcl = f->fs.val.proto;
+ fwr->ptclm = f->fs.mask.proto;
+ fwr->ttyp = f->fs.val.tos;
+ fwr->ttypm = f->fs.mask.tos;
+ fwr->ivlan = cpu_to_be16(f->fs.val.ivlan);
+ fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan);
+ fwr->ovlan = cpu_to_be16(f->fs.val.ovlan);
+ fwr->ovlanm = cpu_to_be16(f->fs.mask.ovlan);
+ rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
+ rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
+ rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
+ rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
+ fwr->lp = cpu_to_be16(f->fs.val.lport);
+ fwr->lpm = cpu_to_be16(f->fs.mask.lport);
+ fwr->fp = cpu_to_be16(f->fs.val.fport);
+ fwr->fpm = cpu_to_be16(f->fs.mask.fport);
+
+ if (adapter->params.filter2_wr_support) {
+ fwr->filter_type_swapmac =
+ V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
+ fwr->natmode_to_ulp_type =
+ V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
+ ULP_MODE_TCPDDP :
+ ULP_MODE_NONE) |
+ V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
+ memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
+ memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
+ fwr->newlport = cpu_to_be16(f->fs.nat_lport);
+ fwr->newfport = cpu_to_be16(f->fs.nat_fport);
+ }
+
+ /*
+ * Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+ */
+ f->pending = 1;
+ t4_mgmt_tx(ctrlq, mbuf);
+ return 0;
+
+out:
+ return ret;
+}
+
+/**
+ * Set the corresponding entries in the bitmap.
+ */
+static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
+{
+ u32 i;
+
+ t4_os_lock(&t->ftid_lock);
+ if (rte_bitmap_get(t->ftid_bmap, fidx)) {
+ t4_os_unlock(&t->ftid_lock);
+ return -EBUSY;
+ }
+
+ for (i = fidx; i < fidx + nentries; i++)
+ rte_bitmap_set(t->ftid_bmap, i);
+ t4_os_unlock(&t->ftid_lock);
+ return 0;
+}
+
+/**
+ * Clear the corresponding entries in the bitmap.
+ */
+static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
+{
+ u32 i;
+
+ t4_os_lock(&t->ftid_lock);
+ for (i = fidx; i < fidx + nentries; i++)
+ rte_bitmap_clear(t->ftid_bmap, i);
+ t4_os_unlock(&t->ftid_lock);
+}
+
+/**
+ * Check a delete filter request for validity and send it to the hardware.
+ * Return 0 on success, an error number otherwise. We attach any provided
+ * filter operation context to the internal filter specification in order to
+ * facilitate signaling completion of the operation.
+ */
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ struct filter_entry *f;
+ unsigned int chip_ver;
+ u8 nentries;
+ int ret;
+
+ if (is_hashfilter(adapter) && fs->cap)
+ return cxgbe_del_hash_filter(dev, filter_id, ctx);
+
+ if (filter_id >= adapter->tids.nftids)
+ return -ERANGE;
+
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ /*
+ * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
+ * and 4 slot boundary for cards below T6.
+ */
+ if (fs->type == FILTER_TYPE_IPV6) {
+ if (chip_ver < CHELSIO_T6)
+ filter_id &= ~(0x3);
+ else
+ filter_id &= ~(0x1);
+ }
+
+ nentries = cxgbe_filter_slots(adapter, fs->type);
+ ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
+ if (!ret) {
+ dev_warn(adap, "%s: could not find filter entry: %u\n",
+ __func__, filter_id);
+ return -EINVAL;
+ }
+
+ f = &adapter->tids.ftid_tab[filter_id];
+ ret = writable_filter(f);
+ if (ret)
+ return ret;
+
+ if (f->valid) {
+ f->ctx = ctx;
+ cxgbe_clear_ftid(&adapter->tids,
+ f->tid - adapter->tids.ftid_base,
+ nentries);
+ return del_filter_wr(dev, filter_id);
+ }
+
+ /*
+ * If the caller has passed in a Completion Context then we need to
+ * mark it as a successful completion so they don't stall waiting
+ * for it.
+ */
+ if (ctx) {
+ ctx->result = 0;
+ t4_complete(&ctx->completion);
+ }
+
+ return 0;
+}
+
+/**
+ * Check a Chelsio Filter Request for validity, convert it into our internal
+ * format and send it to the hardware. Return 0 on success, an error number
+ * otherwise. We attach any provided filter operation context to the internal
+ * filter specification in order to facilitate signaling completion of the
+ * operation.
+ */
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+ struct adapter *adapter = pi->adapter;
+ u8 nentries, bitoff[16] = {0};
+ struct filter_entry *f;
+ unsigned int chip_ver;
+ unsigned int fidx, iq;
+ u32 iconf;
+ int ret;
+
+ if (is_hashfilter(adapter) && fs->cap)
+ return cxgbe_set_hash_filter(dev, fs, ctx);
+
+ if (filter_id >= adapter->tids.nftids)
+ return -ERANGE;
+
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+
+ ret = cxgbe_validate_filter(adapter, fs);
+ if (ret)
+ return ret;
+
+ /*
+ * IPv6 filters occupy four slots and must be aligned on four-slot
+ * boundaries for T5. On T6, IPv6 filters occupy two-slots and
+ * must be aligned on two-slot boundaries.
+ *
+ * IPv4 filters only occupy a single slot and have no alignment
+ * requirements.
+ */
+ fidx = filter_id;
+ if (fs->type == FILTER_TYPE_IPV6) {
+ if (chip_ver < CHELSIO_T6)
+ fidx &= ~(0x3);
+ else
+ fidx &= ~(0x1);
+ }
+
+ if (fidx != filter_id)
+ return -EINVAL;
+
+ nentries = cxgbe_filter_slots(adapter, fs->type);
+ ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
+ if (ret)
+ return -EBUSY;
+
+ iq = get_filter_steerq(dev, fs);
+
+ /*
+ * Check to make sure that provided filter index is not
+ * already in use by someone else
+ */
+ f = &adapter->tids.ftid_tab[filter_id];
+ if (f->valid)
+ return -EBUSY;
+
+ fidx = adapter->tids.ftid_base + filter_id;
+ ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
+ if (ret)
+ return ret;
+
+ /*
+ * Check to make sure the filter requested is writable ...
+ */
+ ret = writable_filter(f);
+ if (ret) {
+ /* Clear the bits we have set above */
+ cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
+ return ret;
+ }
+
+ /*
+ * Allocate a clip table entry only if we have non-zero IPv6 address
+ */
+ if (chip_ver > CHELSIO_T5 && fs->type &&
+ memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
+ f->clipt = cxgbe_clip_alloc(dev, (u32 *)&fs->val.lip);
+ if (!f->clipt)
+ goto free_tid;
+ }
+
+ /*
+ * Convert the filter specification into our internal format.
+ * We copy the PF/VF specification into the Outer VLAN field
+ * here so the rest of the code -- including the interface to
+ * the firmware -- doesn't have to constantly do these checks.
+ */
+ f->fs = *fs;
+ f->fs.iq = iq;
+ f->dev = dev;
+
+ iconf = adapter->params.tp.ingress_config;
+
+ /* Either PFVF or OVLAN can be active, but not both
+ * So, if PFVF is enabled, then overwrite the OVLAN
+ * fields with PFVF fields before writing the spec
+ * to hardware.
+ */
+ if (iconf & F_VNIC) {
+ f->fs.val.ovlan = fs->val.pf << 13 | fs->val.vf;
+ f->fs.mask.ovlan = fs->mask.pf << 13 | fs->mask.vf;
+ f->fs.val.ovlan_vld = fs->val.pfvf_vld;
+ f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+ }
+
+ /*
+ * Attempt to set the filter. If we don't succeed, we clear
+ * it and return the failure.
+ */
+ f->ctx = ctx;
+ f->tid = fidx; /* Save the actual tid */
+ ret = set_filter_wr(dev, filter_id);
+ if (ret)
+ goto free_tid;
+
+ return ret;
+
+free_tid:
+ cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
+ clear_filter(f);
+ return ret;
+}
+
+/**
+ * Handle a Hash filter write reply.
+ */
+void cxgbe_hash_filter_rpl(struct adapter *adap,
+ const struct cpl_act_open_rpl *rpl)
+{
+ struct tid_info *t = &adap->tids;
+ struct filter_entry *f;
+ struct filter_ctx *ctx = NULL;
+ unsigned int tid = GET_TID(rpl);
+ unsigned int ftid = G_TID_TID(G_AOPEN_ATID
+ (be32_to_cpu(rpl->atid_status)));
+ unsigned int status = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+
+ f = lookup_atid(t, ftid);
+ if (!f) {
+ dev_warn(adap, "%s: could not find filter entry: %d\n",
+ __func__, ftid);
+ return;
+ }
+
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ switch (status) {
+ case CPL_ERR_NONE: {
+ f->tid = tid;
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+
+ cxgbe_insert_tid(t, f, f->tid, 0);
+ cxgbe_free_atid(t, ftid);
+ if (ctx) {
+ ctx->tid = f->tid;
+ ctx->result = 0;
+ }
+ if (f->fs.hitcnts)
+ set_tcb_field(adap, tid,
+ W_TCB_TIMESTAMP,
+ V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
+ V_TCB_T_RTT_TS_RECENT_AGE
+ (M_TCB_T_RTT_TS_RECENT_AGE),
+ V_TCB_TIMESTAMP(0ULL) |
+ V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
+ 1);
+ if (f->fs.newdmac)
+ set_tcb_tflag(adap, tid, S_TF_CCTRL_ECE, 1, 1);
+ if (f->fs.newvlan == VLAN_INSERT ||
+ f->fs.newvlan == VLAN_REWRITE)
+ set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
+ if (f->fs.newsmac) {
+ set_tcb_tflag(adap, tid, S_TF_CCTRL_CWR, 1, 1);
+ set_tcb_field(adap, tid, W_TCB_SMAC_SEL,
+ V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
+ V_TCB_SMAC_SEL(f->smt->hw_idx), 1);
+ }
+ break;
+ }
+ default:
+ dev_warn(adap, "%s: filter creation failed with status = %u\n",
+ __func__, status);
+
+ if (ctx) {
+ if (status == CPL_ERR_TCAM_FULL)
+ ctx->result = -EAGAIN;
+ else
+ ctx->result = -EINVAL;
+ }
+
+ cxgbe_free_atid(t, ftid);
+ t4_os_free(f);
+ }
+
+ if (ctx)
+ t4_complete(&ctx->completion);
+}
+
+/**
+ * Handle a LE-TCAM filter write/deletion reply.
+ */
+void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
+{
+ struct filter_entry *f = NULL;
+ unsigned int tid = GET_TID(rpl);
+ int idx, max_fidx = adap->tids.nftids;
+
+ /* Get the corresponding filter entry for this tid */
+ if (adap->tids.ftid_tab) {
+ /* Check this in normal filter region */
+ idx = tid - adap->tids.ftid_base;
+ if (idx >= max_fidx)
+ return;
+
+ f = &adap->tids.ftid_tab[idx];
+ if (f->tid != tid)
+ return;
+ }
+
+ /* We found the filter entry for this tid */
+ if (f) {
+ unsigned int ret = G_COOKIE(rpl->cookie);
+ struct filter_ctx *ctx;
+
+ /*
+ * Pull off any filter operation context attached to the
+ * filter.
+ */
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ if (ret == FW_FILTER_WR_FLT_ADDED) {
+ f->pending = 0; /* asynchronous setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->tid = f->tid;
+ ctx->result = 0;
+ }
+ } else if (ret == FW_FILTER_WR_FLT_DELETED) {
+ /*
+ * Clear the filter when we get confirmation from the
+ * hardware that the filter has been deleted.
+ */
+ clear_filter(f);
+ if (ctx)
+ ctx->result = 0;
+ } else {
+ /*
+ * Something went wrong. Issue a warning about the
+ * problem and clear everything out.
+ */
+ dev_warn(adap, "filter %u setup failed with error %u\n",
+ idx, ret);
+ clear_filter(f);
+ if (ctx)
+ ctx->result = -EINVAL;
+ }
+
+ if (ctx)
+ t4_complete(&ctx->completion);
+ }
+}
+
+/*
+ * Retrieve the packet count for the specified filter.
+ */
+int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
+ u64 *c, int hash, bool get_byte)
+{
+ struct filter_entry *f;
+ unsigned int tcb_base, tcbaddr;
+ int ret;
+
+ tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
+ if (is_hashfilter(adapter) && hash) {
+ if (fidx < adapter->tids.ntids) {
+ f = adapter->tids.tid_tab[fidx];
+ if (!f)
+ return -EINVAL;
+
+ if (is_t5(adapter->params.chip)) {
+ *c = 0;
+ return 0;
+ }
+ tcbaddr = tcb_base + (fidx * TCB_SIZE);
+ goto get_count;
+ } else {
+ return -ERANGE;
+ }
+ } else {
+ if (fidx >= adapter->tids.nftids)
+ return -ERANGE;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ if (!f->valid)
+ return -EINVAL;
+
+ tcbaddr = tcb_base + f->tid * TCB_SIZE;
+ }
+
+ f = &adapter->tids.ftid_tab[fidx];
+ if (!f->valid)
+ return -EINVAL;
+
+get_count:
+ if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
+ /*
+ * For T5, the Filter Packet Hit Count is maintained as a
+ * 32-bit Big Endian value in the TCB field {timestamp}.
+ * Similar to the craziness above, instead of the filter hit
+ * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
+ * sizeof(u32)), it actually shows up at offset 24. Whacky.
+ */
+ if (get_byte) {
+ unsigned int word_offset = 4;
+ __be64 be64_byte_count;
+
+ t4_os_lock(&adapter->win0_lock);
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr +
+ (word_offset * sizeof(__be32)),
+ sizeof(be64_byte_count),
+ &be64_byte_count,
+ T4_MEMORY_READ);
+ t4_os_unlock(&adapter->win0_lock);
+ if (ret < 0)
+ return ret;
+ *c = be64_to_cpu(be64_byte_count);
+ } else {
+ unsigned int word_offset = 6;
+ __be32 be32_count;
+
+ t4_os_lock(&adapter->win0_lock);
+ ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
+ tcbaddr +
+ (word_offset * sizeof(__be32)),
+ sizeof(be32_count), &be32_count,
+ T4_MEMORY_READ);
+ t4_os_unlock(&adapter->win0_lock);
+ if (ret < 0)
+ return ret;
+ *c = (u64)be32_to_cpu(be32_count);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Clear the packet count for the specified filter.
+ */
+int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
+ int hash, bool clear_byte)
+{
+ u64 tcb_mask = 0, tcb_val = 0;
+ struct filter_entry *f = NULL;
+ u16 tcb_word = 0;
+
+ if (is_hashfilter(adapter) && hash) {
+ if (fidx >= adapter->tids.ntids)
+ return -ERANGE;
+
+ /* No hitcounts supported for T5 hashfilters */
+ if (is_t5(adapter->params.chip))
+ return 0;
+
+ f = adapter->tids.tid_tab[fidx];
+ } else {
+ if (fidx >= adapter->tids.nftids)
+ return -ERANGE;
+
+ f = &adapter->tids.ftid_tab[fidx];
+ }
+
+ if (!f || !f->valid)
+ return -EINVAL;
+
+ tcb_word = W_TCB_TIMESTAMP;
+ tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
+ tcb_val = V_TCB_TIMESTAMP(0ULL);
+
+ set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
+
+ if (clear_byte) {
+ tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
+ tcb_mask =
+ V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
+ V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
+ tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
+ V_TCB_T_RTSEQ_RECENT(0ULL);
+
+ set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
+ }
+
+ return 0;
+}
+
+/**
+ * Handle a Hash filter delete reply.
+ */
+void cxgbe_hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl)
+{
+ struct tid_info *t = &adap->tids;
+ struct filter_entry *f;
+ struct filter_ctx *ctx = NULL;
+ unsigned int tid = GET_TID(rpl);
+
+ f = lookup_tid(t, tid);
+ if (!f) {
+ dev_warn(adap, "%s: could not find filter entry: %u\n",
+ __func__, tid);
+ return;
+ }
+
+ ctx = f->ctx;
+ f->ctx = NULL;
+
+ f->valid = 0;
+
+ if (f->clipt)
+ cxgbe_clip_release(f->dev, f->clipt);
+
+ cxgbe_remove_tid(t, 0, tid, 0);
+ t4_os_free(f);
+
+ if (ctx) {
+ ctx->result = 0;
+ t4_complete(&ctx->completion);
+ }
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h
new file mode 100644
index 000000000..e79c052de
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_filter.h
@@ -0,0 +1,276 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_FILTER_H_
+#define _CXGBE_FILTER_H_
+
+#include "base/t4_msg.h"
+/*
+ * Defined bit width of user definable filter tuples
+ */
+#define ETHTYPE_BITWIDTH 16
+#define FRAG_BITWIDTH 1
+#define MACIDX_BITWIDTH 9
+#define FCOE_BITWIDTH 1
+#define IPORT_BITWIDTH 3
+#define MATCHTYPE_BITWIDTH 3
+#define PROTO_BITWIDTH 8
+#define TOS_BITWIDTH 8
+#define PF_BITWIDTH 3
+#define VF_BITWIDTH 13
+#define IVLAN_BITWIDTH 16
+#define OVLAN_BITWIDTH 16
+
+/*
+ * Filter matching rules. These consist of a set of ingress packet field
+ * (value, mask) tuples. The associated ingress packet field matches the
+ * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
+ * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
+ * matches an ingress packet when all of the individual individual field
+ * matching rules are true.
+ *
+ * Partial field masks are always valid, however, while it may be easy to
+ * understand their meanings for some fields (e.g. IP address to match a
+ * subnet), for others making sensible partial masks is less intuitive (e.g.
+ * MPS match type) ...
+ */
+struct ch_filter_tuple {
+ /*
+ * Compressed header matching field rules. The TP_VLAN_PRI_MAP
+ * register selects which of these fields will participate in the
+ * filter match rules -- up to a maximum of 36 bits. Because
+ * TP_VLAN_PRI_MAP is a global register, all filters must use the same
+ * set of fields.
+ */
+ uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
+ uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
+ uint32_t ivlan_vld:1; /* inner VLAN valid */
+ uint32_t ovlan_vld:1; /* outer VLAN valid */
+ uint32_t pfvf_vld:1; /* PF/VF valid */
+ uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
+ uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
+ uint32_t iport:IPORT_BITWIDTH; /* ingress port */
+ uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
+ uint32_t proto:PROTO_BITWIDTH; /* protocol type */
+ uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
+ uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
+ uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
+ uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
+ uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
+
+ /*
+ * Uncompressed header matching field rules. These are always
+ * available for field rules.
+ */
+ uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
+ uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
+ uint16_t lport; /* local port */
+ uint16_t fport; /* foreign port */
+
+ /* reservations for future additions */
+ uint8_t rsvd[12];
+};
+
+/*
+ * Filter specification
+ */
+struct ch_filter_specification {
+ void *private;
+ /* Administrative fields for filter. */
+ uint32_t hitcnts:1; /* count filter hits in TCB */
+ uint32_t prio:1; /* filter has priority over active/server */
+
+ /*
+ * Fundamental filter typing. This is the one element of filter
+ * matching that doesn't exist as a (value, mask) tuple.
+ */
+ uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
+ uint32_t cap:1; /* 0 => LE-TCAM, 1 => Hash */
+
+ /*
+ * Packet dispatch information. Ingress packets which match the
+ * filter rules will be dropped, passed to the host or switched back
+ * out as egress packets.
+ */
+ uint32_t action:2; /* drop, pass, switch */
+
+ uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
+ uint32_t iq:10; /* ingress queue */
+
+ uint32_t eport:2; /* egress port to switch packet out */
+ uint32_t newsmac:1; /* rewrite source MAC address */
+ uint32_t newdmac:1; /* rewrite destination MAC address */
+ uint32_t swapmac:1; /* swap SMAC/DMAC for loopback packet */
+ uint32_t newvlan:2; /* rewrite VLAN Tag */
+ uint8_t smac[RTE_ETHER_ADDR_LEN]; /* new source MAC address */
+ uint8_t dmac[RTE_ETHER_ADDR_LEN]; /* new destination MAC address */
+ uint16_t vlan; /* VLAN Tag to insert */
+
+ /*
+ * Switch proxy/rewrite fields. An ingress packet which matches a
+ * filter with "switch" set will be looped back out as an egress
+ * packet -- potentially with some header rewriting.
+ */
+ uint32_t nat_mode:3; /* specify NAT operation mode */
+
+ uint8_t nat_lip[16]; /* local IP to use after NAT'ing */
+ uint8_t nat_fip[16]; /* foreign IP to use after NAT'ing */
+ uint16_t nat_lport; /* local port number to use after NAT'ing */
+ uint16_t nat_fport; /* foreign port number to use after NAT'ing */
+
+ /* Filter rule value/mask pairs. */
+ struct ch_filter_tuple val;
+ struct ch_filter_tuple mask;
+};
+
+enum {
+ FILTER_PASS = 0, /* default */
+ FILTER_DROP,
+ FILTER_SWITCH
+};
+
+enum {
+ VLAN_REMOVE = 1,
+ VLAN_INSERT,
+ VLAN_REWRITE
+};
+
+enum {
+ NAT_MODE_NONE = 0, /* No NAT performed */
+ NAT_MODE_DIP, /* NAT on Dst IP */
+ NAT_MODE_DIP_DP, /* NAT on Dst IP, Dst Port */
+ NAT_MODE_DIP_DP_SIP, /* NAT on Dst IP, Dst Port and Src IP */
+ NAT_MODE_DIP_DP_SP, /* NAT on Dst IP, Dst Port and Src Port */
+ NAT_MODE_SIP_SP, /* NAT on Src IP and Src Port */
+ NAT_MODE_DIP_SIP_SP, /* NAT on Dst IP, Src IP and Src Port */
+ NAT_MODE_ALL /* NAT on entire 4-tuple */
+};
+
+enum filter_type {
+ FILTER_TYPE_IPV4 = 0,
+ FILTER_TYPE_IPV6,
+};
+
+struct t4_completion {
+ unsigned int done; /* completion done (0 - No, 1 - Yes) */
+ rte_spinlock_t lock; /* completion lock */
+};
+
+/*
+ * Filter operation context to allow callers to wait for
+ * an asynchronous completion.
+ */
+struct filter_ctx {
+ struct t4_completion completion; /* completion rendezvous */
+ int result; /* result of operation */
+ u32 tid; /* to store tid of hash filter */
+};
+
+/*
+ * Host shadow copy of ingress filter entry. This is in host native format
+ * and doesn't match the ordering or bit order, etc. of the hardware or the
+ * firmware command.
+ */
+struct filter_entry {
+ /*
+ * Administrative fields for filter.
+ */
+ u32 valid:1; /* filter allocated and valid */
+ u32 locked:1; /* filter is administratively locked */
+ u32 pending:1; /* filter action is pending FW reply */
+ struct filter_ctx *ctx; /* caller's completion hook */
+ struct clip_entry *clipt; /* CLIP Table entry for IPv6 */
+ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
+ struct smt_entry *smt; /* Source Mac Table entry for smac */
+ struct rte_eth_dev *dev; /* Port's rte eth device */
+ void *private; /* For use by apps using filter_entry */
+
+ /* This will store the actual tid */
+ u32 tid;
+
+ /*
+ * The filter itself.
+ */
+ struct ch_filter_specification fs;
+};
+
+#define FILTER_ID_MAX (~0U)
+
+struct tid_info;
+struct adapter;
+
+/**
+ * Find first clear bit in the bitmap.
+ */
+static inline unsigned int cxgbe_find_first_zero_bit(struct rte_bitmap *bmap,
+ unsigned int size)
+{
+ unsigned int idx;
+
+ for (idx = 0; idx < size; idx++)
+ if (!rte_bitmap_get(bmap, idx))
+ break;
+
+ return idx;
+}
+
+/**
+ * Find a free region of 'num' consecutive entries.
+ */
+static inline unsigned int
+cxgbe_bitmap_find_free_region(struct rte_bitmap *bmap, unsigned int size,
+ unsigned int num)
+{
+ unsigned int idx, j, free = 0;
+
+ if (num > size)
+ return size;
+
+ for (idx = 0; idx < size; idx += num) {
+ for (j = 0; j < num; j++) {
+ if (!rte_bitmap_get(bmap, idx + j)) {
+ free++;
+ } else {
+ free = 0;
+ break;
+ }
+ }
+
+ /* Found the Region */
+ if (free == num)
+ break;
+
+ /* Reached the end and still no region found */
+ if ((idx + num) > size) {
+ idx = size;
+ break;
+ }
+ }
+
+ return idx;
+}
+
+u8 cxgbe_filter_slots(struct adapter *adap, u8 family);
+bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries);
+void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
+int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
+ struct ch_filter_specification *fs,
+ struct filter_ctx *ctx);
+int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries);
+int cxgbe_init_hash_filter(struct adapter *adap);
+void cxgbe_hash_filter_rpl(struct adapter *adap,
+ const struct cpl_act_open_rpl *rpl);
+void cxgbe_hash_del_filter_rpl(struct adapter *adap,
+ const struct cpl_abort_rpl_rss *rpl);
+int cxgbe_validate_filter(struct adapter *adap,
+ struct ch_filter_specification *fs);
+int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
+ u64 *c, int hash, bool get_byte);
+int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
+ int hash, bool clear_byte);
+#endif /* _CXGBE_FILTER_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c
new file mode 100644
index 000000000..166c39ba5
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.c
@@ -0,0 +1,1458 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#include "base/common.h"
+#include "cxgbe_flow.h"
+
+#define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
+do { \
+ if ((fs)->mask.elem && ((fs)->val.elem != (__v))) \
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, \
+ NULL, "Redefined match item with" \
+ " different values found"); \
+ (fs)->val.elem = (__v); \
+ (fs)->mask.elem = (__m); \
+} while (0)
+
+#define __CXGBE_FILL_FS_MEMCPY(__v, __m, fs, elem) \
+do { \
+ memcpy(&(fs)->val.elem, &(__v), sizeof(__v)); \
+ memcpy(&(fs)->mask.elem, &(__m), sizeof(__m)); \
+} while (0)
+
+#define CXGBE_FILL_FS(v, m, elem) \
+ __CXGBE_FILL_FS(v, m, fs, elem, e)
+
+#define CXGBE_FILL_FS_MEMCPY(v, m, elem) \
+ __CXGBE_FILL_FS_MEMCPY(v, m, fs, elem)
+
+static int
+cxgbe_validate_item(const struct rte_flow_item *i, struct rte_flow_error *e)
+{
+ /* rte_flow specification does not allow it. */
+ if (!i->spec && (i->mask || i->last))
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "last or mask given without spec");
+ /*
+ * We don't support it.
+ * Although, we can support values in last as 0's or last == spec.
+ * But this will not provide user with any additional functionality
+ * and will only increase the complexity for us.
+ */
+ if (i->last)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "last is not supported by chelsio pmd");
+ return 0;
+}
+
+/**
+ * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
+ * there's only 40-bits available to store match fields.
+ * So, to save space, optimize filter spec for some common
+ * known fields that hardware can parse against incoming
+ * packets automatically.
+ */
+static void
+cxgbe_tweak_filter_spec(struct adapter *adap,
+ struct ch_filter_specification *fs)
+{
+ /* Save 16-bit ethertype field space, by setting corresponding
+ * 1-bit flags in the filter spec for common known ethertypes.
+ * When hardware sees these flags, it automatically infers and
+ * matches incoming packets against the corresponding ethertype.
+ */
+ if (fs->mask.ethtype == 0xffff) {
+ switch (fs->val.ethtype) {
+ case RTE_ETHER_TYPE_IPV4:
+ if (adap->params.tp.ethertype_shift < 0) {
+ fs->type = FILTER_TYPE_IPV4;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ break;
+ case RTE_ETHER_TYPE_IPV6:
+ if (adap->params.tp.ethertype_shift < 0) {
+ fs->type = FILTER_TYPE_IPV6;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ break;
+ case RTE_ETHER_TYPE_VLAN:
+ if (adap->params.tp.ethertype_shift < 0 &&
+ adap->params.tp.vlan_shift >= 0) {
+ fs->val.ivlan_vld = 1;
+ fs->mask.ivlan_vld = 1;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ break;
+ case RTE_ETHER_TYPE_QINQ:
+ if (adap->params.tp.ethertype_shift < 0 &&
+ adap->params.tp.vnic_shift >= 0) {
+ fs->val.ovlan_vld = 1;
+ fs->mask.ovlan_vld = 1;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+cxgbe_fill_filter_region(struct adapter *adap,
+ struct ch_filter_specification *fs)
+{
+ struct tp_params *tp = &adap->params.tp;
+ u64 hash_filter_mask = tp->hash_filter_mask;
+ u64 ntuple_mask = 0;
+
+ fs->cap = 0;
+
+ if (!is_hashfilter(adap))
+ return;
+
+ if (fs->type) {
+ uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff};
+ uint8_t bitoff[16] = {0};
+
+ if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, biton, sizeof(biton)))
+ return;
+ } else {
+ uint32_t biton = 0xffffffff;
+ uint32_t bitoff = 0x0U;
+
+ if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, &biton, sizeof(biton)))
+ return;
+ }
+
+ if (!fs->val.lport || fs->mask.lport != 0xffff)
+ return;
+ if (!fs->val.fport || fs->mask.fport != 0xffff)
+ return;
+
+ if (tp->protocol_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
+ if (tp->ethertype_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
+ if (tp->port_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
+ if (tp->macmatch_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
+ if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
+ ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
+ tp->vlan_shift;
+ if (tp->vnic_shift >= 0) {
+ if (fs->mask.ovlan_vld)
+ ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
+ fs->mask.ovlan) << tp->vnic_shift;
+ else if (fs->mask.pfvf_vld)
+ ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 |
+ fs->mask.pf << 13 |
+ fs->mask.vf) << tp->vnic_shift;
+ }
+ if (tp->tos_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
+
+ if (ntuple_mask != hash_filter_mask)
+ return;
+
+ fs->cap = 1; /* use hash region */
+}
+
+static int
+ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *umask = item->mask;
+ const struct rte_flow_item_eth *mask;
+
+ /* If user has not given any mask, then use chelsio supported mask. */
+ mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
+
+ if (!spec)
+ return 0;
+
+ /* we don't support SRC_MAC filtering*/
+ if (!rte_is_zero_ether_addr(&mask->src))
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "src mac filtering not supported");
+
+ if (!rte_is_zero_ether_addr(&mask->dst)) {
+ const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
+ const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
+ struct rte_flow *flow = (struct rte_flow *)fs->private;
+ struct port_info *pi = (struct port_info *)
+ (flow->dev->data->dev_private);
+ int idx;
+
+ idx = cxgbe_mpstcam_alloc(pi, addr, m);
+ if (idx <= 0)
+ return rte_flow_error_set(e, idx,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "unable to allocate mac"
+ " entry in h/w");
+ CXGBE_FILL_FS(idx, 0x1ff, macidx);
+ }
+
+ CXGBE_FILL_FS(be16_to_cpu(spec->type),
+ be16_to_cpu(mask->type), ethtype);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_phy_port *val = item->spec;
+ const struct rte_flow_item_phy_port *umask = item->mask;
+ const struct rte_flow_item_phy_port *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
+
+ if (!val)
+ return 0; /* Wildcard, match all physical ports */
+
+ if (val->index > 0x7)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "port index up to 0x7 is supported");
+
+ CXGBE_FILL_FS(val->index, mask->index, iport);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *umask = item->mask;
+ const struct rte_flow_item_vlan *mask;
+
+ /* If user has not given any mask, then use chelsio supported mask. */
+ mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
+
+ if (!fs->mask.ethtype)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Can't parse VLAN item without knowing ethertype");
+
+ /* If ethertype is already set and is not VLAN (0x8100) or
+ * QINQ(0x88A8), then don't proceed further. Otherwise,
+ * reset the outer ethertype, so that it can be replaced by
+ * innermost ethertype. Note that hardware will automatically
+ * match against VLAN or QINQ packets, based on 'ivlan_vld' or
+ * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
+ */
+ if (fs->mask.ethtype) {
+ if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
+ fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Ethertype must be 0x8100 or 0x88a8");
+ }
+
+ if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
+ CXGBE_FILL_FS(1, 1, ovlan_vld);
+ if (spec) {
+ CXGBE_FILL_FS(be16_to_cpu(spec->tci),
+ be16_to_cpu(mask->tci), ovlan);
+
+ fs->mask.ethtype = 0;
+ fs->val.ethtype = 0;
+ }
+ } else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) {
+ CXGBE_FILL_FS(1, 1, ivlan_vld);
+ if (spec) {
+ CXGBE_FILL_FS(be16_to_cpu(spec->tci),
+ be16_to_cpu(mask->tci), ivlan);
+
+ fs->mask.ethtype = 0;
+ fs->val.ethtype = 0;
+ }
+ }
+
+ if (spec)
+ CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
+ be16_to_cpu(mask->inner_type), ethtype);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_pf(const void *dmask __rte_unused,
+ const struct rte_flow_item *item __rte_unused,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e __rte_unused)
+{
+ struct rte_flow *flow = (struct rte_flow *)fs->private;
+ struct rte_eth_dev *dev = flow->dev;
+ struct adapter *adap = ethdev2adap(dev);
+
+ CXGBE_FILL_FS(1, 1, pfvf_vld);
+
+ CXGBE_FILL_FS(adap->pf, 0x7, pf);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_vf *umask = item->mask;
+ const struct rte_flow_item_vf *val = item->spec;
+ const struct rte_flow_item_vf *mask;
+
+ /* If user has not given any mask, then use chelsio supported mask. */
+ mask = umask ? umask : (const struct rte_flow_item_vf *)dmask;
+
+ CXGBE_FILL_FS(1, 1, pfvf_vld);
+
+ if (!val)
+ return 0; /* Wildcard, match all Vf */
+
+ if (val->id > UCHAR_MAX)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VF ID > MAX(255)");
+
+ CXGBE_FILL_FS(val->id, mask->id, vf);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_udp *val = item->spec;
+ const struct rte_flow_item_udp *umask = item->mask;
+ const struct rte_flow_item_udp *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_udp *)dmask;
+
+ if (mask->hdr.dgram_len || mask->hdr.dgram_cksum)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "udp: only src/dst port supported");
+
+ CXGBE_FILL_FS(IPPROTO_UDP, 0xff, proto);
+ if (!val)
+ return 0;
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+ be16_to_cpu(mask->hdr.src_port), fport);
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+ be16_to_cpu(mask->hdr.dst_port), lport);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_tcp(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_tcp *val = item->spec;
+ const struct rte_flow_item_tcp *umask = item->mask;
+ const struct rte_flow_item_tcp *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_tcp *)dmask;
+
+ if (mask->hdr.sent_seq || mask->hdr.recv_ack || mask->hdr.data_off ||
+ mask->hdr.tcp_flags || mask->hdr.rx_win || mask->hdr.cksum ||
+ mask->hdr.tcp_urp)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "tcp: only src/dst port supported");
+
+ CXGBE_FILL_FS(IPPROTO_TCP, 0xff, proto);
+ if (!val)
+ return 0;
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.src_port),
+ be16_to_cpu(mask->hdr.src_port), fport);
+ CXGBE_FILL_FS(be16_to_cpu(val->hdr.dst_port),
+ be16_to_cpu(mask->hdr.dst_port), lport);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_ipv4 *val = item->spec;
+ const struct rte_flow_item_ipv4 *umask = item->mask;
+ const struct rte_flow_item_ipv4 *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
+
+ if (mask->hdr.time_to_live)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "ttl is not supported");
+
+ if (fs->mask.ethtype &&
+ (fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Couldn't find IPv4 ethertype");
+ fs->type = FILTER_TYPE_IPV4;
+ if (!val)
+ return 0; /* ipv4 wild card */
+
+ CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+ CXGBE_FILL_FS(val->hdr.type_of_service, mask->hdr.type_of_service, tos);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_ipv6 *val = item->spec;
+ const struct rte_flow_item_ipv6 *umask = item->mask;
+ const struct rte_flow_item_ipv6 *mask;
+ u32 vtc_flow, vtc_flow_mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
+
+ vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow);
+
+ if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK ||
+ mask->hdr.payload_len || mask->hdr.hop_limits)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "flow/hop are not supported");
+
+ if (fs->mask.ethtype &&
+ (fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Couldn't find IPv6 ethertype");
+ fs->type = FILTER_TYPE_IPV6;
+ if (!val)
+ return 0; /* ipv6 wild card */
+
+ CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
+
+ vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
+ CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT,
+ (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT,
+ tos);
+
+ CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
+ CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+
+ return 0;
+}
+
+static int
+cxgbe_rtef_parse_attr(struct rte_flow *flow, const struct rte_flow_attr *attr,
+ struct rte_flow_error *e)
+{
+ if (attr->egress)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
+ attr, "attribute:<egress> is"
+ " not supported !");
+ if (attr->group > 0)
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR,
+ attr, "group parameter is"
+ " not supported.");
+
+ flow->fidx = attr->priority ? attr->priority - 1 : FILTER_ID_MAX;
+
+ return 0;
+}
+
+static inline int check_rxq(struct rte_eth_dev *dev, uint16_t rxq)
+{
+ struct port_info *pi = ethdev2pinfo(dev);
+
+ if (rxq > pi->n_rx_qsets)
+ return -EINVAL;
+ return 0;
+}
+
+static int cxgbe_validate_fidxondel(struct filter_entry *f, unsigned int fidx)
+{
+ struct adapter *adap = ethdev2adap(f->dev);
+ struct ch_filter_specification fs = f->fs;
+ u8 nentries;
+
+ if (fidx >= adap->tids.nftids) {
+ dev_err(adap, "invalid flow index %d.\n", fidx);
+ return -EINVAL;
+ }
+
+ nentries = cxgbe_filter_slots(adap, fs.type);
+ if (!cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
+ dev_err(adap, "Already free fidx:%d f:%p\n", fidx, f);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_validate_fidxonadd(struct ch_filter_specification *fs,
+ struct adapter *adap, unsigned int fidx)
+{
+ u8 nentries;
+
+ nentries = cxgbe_filter_slots(adap, fs->type);
+ if (cxgbe_is_filter_set(&adap->tids, fidx, nentries)) {
+ dev_err(adap, "filter index: %d is busy.\n", fidx);
+ return -EBUSY;
+ }
+
+ if (fidx >= adap->tids.nftids) {
+ dev_err(adap, "filter index (%u) >= max(%u)\n",
+ fidx, adap->tids.nftids);
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_verify_fidx(struct rte_flow *flow, unsigned int fidx, uint8_t del)
+{
+ if (flow->fs.cap)
+ return 0; /* Hash filters */
+ return del ? cxgbe_validate_fidxondel(flow->f, fidx) :
+ cxgbe_validate_fidxonadd(&flow->fs,
+ ethdev2adap(flow->dev), fidx);
+}
+
+static int cxgbe_get_fidx(struct rte_flow *flow, unsigned int *fidx)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ struct adapter *adap = ethdev2adap(flow->dev);
+
+ /* For tcam get the next available slot, if default value specified */
+ if (flow->fidx == FILTER_ID_MAX) {
+ u8 nentries;
+ int idx;
+
+ nentries = cxgbe_filter_slots(adap, fs->type);
+ idx = cxgbe_alloc_ftid(adap, nentries);
+ if (idx < 0) {
+ dev_err(adap, "unable to get a filter index in tcam\n");
+ return -ENOMEM;
+ }
+ *fidx = (unsigned int)idx;
+ } else {
+ *fidx = flow->fidx;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_get_flow_item_index(const struct rte_flow_item items[], u32 type)
+{
+ const struct rte_flow_item *i;
+ int j, index = -ENOENT;
+
+ for (i = items, j = 0; i->type != RTE_FLOW_ITEM_TYPE_END; i++, j++) {
+ if (i->type == type) {
+ index = j;
+ break;
+ }
+ }
+
+ return index;
+}
+
+static int
+ch_rte_parse_nat(uint8_t nmode, struct ch_filter_specification *fs)
+{
+ /* nmode:
+ * BIT_0 = [src_ip], BIT_1 = [dst_ip]
+ * BIT_2 = [src_port], BIT_3 = [dst_port]
+ *
+ * Only below cases are supported as per our spec.
+ */
+ switch (nmode) {
+ case 0: /* 0000b */
+ fs->nat_mode = NAT_MODE_NONE;
+ break;
+ case 2: /* 0010b */
+ fs->nat_mode = NAT_MODE_DIP;
+ break;
+ case 5: /* 0101b */
+ fs->nat_mode = NAT_MODE_SIP_SP;
+ break;
+ case 7: /* 0111b */
+ fs->nat_mode = NAT_MODE_DIP_SIP_SP;
+ break;
+ case 10: /* 1010b */
+ fs->nat_mode = NAT_MODE_DIP_DP;
+ break;
+ case 11: /* 1011b */
+ fs->nat_mode = NAT_MODE_DIP_DP_SIP;
+ break;
+ case 14: /* 1110b */
+ fs->nat_mode = NAT_MODE_DIP_DP_SP;
+ break;
+ case 15: /* 1111b */
+ fs->nat_mode = NAT_MODE_ALL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ch_rte_parse_atype_switch(const struct rte_flow_action *a,
+ const struct rte_flow_item items[],
+ uint8_t *nmode,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_action_of_set_vlan_vid *vlanid;
+ const struct rte_flow_action_of_set_vlan_pcp *vlanpcp;
+ const struct rte_flow_action_of_push_vlan *pushvlan;
+ const struct rte_flow_action_set_ipv4 *ipv4;
+ const struct rte_flow_action_set_ipv6 *ipv6;
+ const struct rte_flow_action_set_tp *tp_port;
+ const struct rte_flow_action_phy_port *port;
+ const struct rte_flow_action_set_mac *mac;
+ int item_index;
+ u16 tmp_vlan;
+
+ switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ vlanid = (const struct rte_flow_action_of_set_vlan_vid *)
+ a->conf;
+ /* If explicitly asked to push a new VLAN header,
+ * then don't set rewrite mode. Otherwise, the
+ * incoming VLAN packets will get their VLAN fields
+ * rewritten, instead of adding an additional outer
+ * VLAN header.
+ */
+ if (fs->newvlan != VLAN_INSERT)
+ fs->newvlan = VLAN_REWRITE;
+ tmp_vlan = fs->vlan & 0xe000;
+ fs->vlan = (be16_to_cpu(vlanid->vlan_vid) & 0xfff) | tmp_vlan;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ vlanpcp = (const struct rte_flow_action_of_set_vlan_pcp *)
+ a->conf;
+ /* If explicitly asked to push a new VLAN header,
+ * then don't set rewrite mode. Otherwise, the
+ * incoming VLAN packets will get their VLAN fields
+ * rewritten, instead of adding an additional outer
+ * VLAN header.
+ */
+ if (fs->newvlan != VLAN_INSERT)
+ fs->newvlan = VLAN_REWRITE;
+ tmp_vlan = fs->vlan & 0xfff;
+ fs->vlan = (vlanpcp->vlan_pcp << 13) | tmp_vlan;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ pushvlan = (const struct rte_flow_action_of_push_vlan *)
+ a->conf;
+ if (be16_to_cpu(pushvlan->ethertype) != RTE_ETHER_TYPE_VLAN)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "only ethertype 0x8100 "
+ "supported for push vlan.");
+ fs->newvlan = VLAN_INSERT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ fs->newvlan = VLAN_REMOVE;
+ break;
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ port = (const struct rte_flow_action_phy_port *)a->conf;
+ fs->eport = port->index;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_IPV4);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_IPV4 "
+ "found.");
+
+ ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
+ memcpy(fs->nat_fip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
+ *nmode |= 1 << 0;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_IPV4);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_IPV4 "
+ "found.");
+
+ ipv4 = (const struct rte_flow_action_set_ipv4 *)a->conf;
+ memcpy(fs->nat_lip, &ipv4->ipv4_addr, sizeof(ipv4->ipv4_addr));
+ *nmode |= 1 << 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_IPV6);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_IPV6 "
+ "found.");
+
+ ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
+ memcpy(fs->nat_fip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
+ *nmode |= 1 << 0;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_IPV6);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_IPV6 "
+ "found.");
+
+ ipv6 = (const struct rte_flow_action_set_ipv6 *)a->conf;
+ memcpy(fs->nat_lip, ipv6->ipv6_addr, sizeof(ipv6->ipv6_addr));
+ *nmode |= 1 << 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_TCP);
+ if (item_index < 0) {
+ item_index =
+ cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_UDP);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_TCP or "
+ "RTE_FLOW_ITEM_TYPE_UDP found");
+ }
+
+ tp_port = (const struct rte_flow_action_set_tp *)a->conf;
+ fs->nat_fport = be16_to_cpu(tp_port->port);
+ *nmode |= 1 << 2;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_TCP);
+ if (item_index < 0) {
+ item_index =
+ cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_UDP);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_TCP or "
+ "RTE_FLOW_ITEM_TYPE_UDP found");
+ }
+
+ tp_port = (const struct rte_flow_action_set_tp *)a->conf;
+ fs->nat_lport = be16_to_cpu(tp_port->port);
+ *nmode |= 1 << 3;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_ETH);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_ETH "
+ "found");
+ fs->swapmac = 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_ETH);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_ETH "
+ "found");
+ mac = (const struct rte_flow_action_set_mac *)a->conf;
+
+ fs->newsmac = 1;
+ memcpy(fs->smac, mac->mac_addr, sizeof(fs->smac));
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_ETH);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_ETH found");
+ mac = (const struct rte_flow_action_set_mac *)a->conf;
+
+ fs->newdmac = 1;
+ memcpy(fs->dmac, mac->mac_addr, sizeof(fs->dmac));
+ break;
+ default:
+ /* We are not supposed to come here */
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "Action not supported");
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_rtef_parse_actions(struct rte_flow *flow,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ uint8_t nmode = 0, nat_ipv4 = 0, nat_ipv6 = 0;
+ uint8_t vlan_set_vid = 0, vlan_set_pcp = 0;
+ const struct rte_flow_action_queue *q;
+ const struct rte_flow_action *a;
+ char abit = 0;
+ int ret;
+
+ for (a = action; a->type != RTE_FLOW_ACTION_TYPE_END; a++) {
+ switch (a->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ continue;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ if (abit++)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "specify only 1 pass/drop");
+ fs->action = FILTER_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ q = (const struct rte_flow_action_queue *)a->conf;
+ if (!q)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, q,
+ "specify rx queue index");
+ if (check_rxq(flow->dev, q->index))
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, q,
+ "Invalid rx queue");
+ if (abit++)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "specify only 1 pass/drop");
+ fs->action = FILTER_PASS;
+ fs->dirsteer = 1;
+ fs->iq = q->index;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ fs->hitcnts = 1;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ vlan_set_vid++;
+ goto action_switch;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ vlan_set_pcp++;
+ goto action_switch;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ case RTE_FLOW_ACTION_TYPE_PHY_PORT:
+ case RTE_FLOW_ACTION_TYPE_MAC_SWAP:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ nat_ipv4++;
+ goto action_switch;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ nat_ipv6++;
+ goto action_switch;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+action_switch:
+ /* We allow multiple switch actions, but switch is
+ * not compatible with either queue or drop
+ */
+ if (abit++ && fs->action != FILTER_SWITCH)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "overlapping action specified");
+ if (nat_ipv4 && nat_ipv6)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "Can't have one address ipv4 and the"
+ " other ipv6");
+
+ ret = ch_rte_parse_atype_switch(a, items, &nmode, fs,
+ e);
+ if (ret)
+ return ret;
+ fs->action = FILTER_SWITCH;
+ break;
+ default:
+ /* Not supported action : return error */
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ a, "Action not supported");
+ }
+ }
+
+ if (fs->newvlan == VLAN_REWRITE && (!vlan_set_vid || !vlan_set_pcp))
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "Both OF_SET_VLAN_VID and "
+ "OF_SET_VLAN_PCP must be specified");
+
+ if (ch_rte_parse_nat(nmode, fs))
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "invalid settings for swich action");
+ return 0;
+}
+
+static struct chrte_fparse parseitem[] = {
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .fptr = ch_rte_parsetype_eth,
+ .dmask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .type = 0xffff,
+ }
+ },
+
+ [RTE_FLOW_ITEM_TYPE_PHY_PORT] = {
+ .fptr = ch_rte_parsetype_port,
+ .dmask = &(const struct rte_flow_item_phy_port){
+ .index = 0x7,
+ }
+ },
+
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .fptr = ch_rte_parsetype_vlan,
+ .dmask = &(const struct rte_flow_item_vlan){
+ .tci = 0xffff,
+ .inner_type = 0xffff,
+ }
+ },
+
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .fptr = ch_rte_parsetype_ipv4,
+ .dmask = &(const struct rte_flow_item_ipv4) {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ },
+ },
+ },
+
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .fptr = ch_rte_parsetype_ipv6,
+ .dmask = &(const struct rte_flow_item_ipv6) {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xff000000),
+ },
+ },
+ },
+
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .fptr = ch_rte_parsetype_udp,
+ .dmask = &rte_flow_item_udp_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .fptr = ch_rte_parsetype_tcp,
+ .dmask = &rte_flow_item_tcp_mask,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_PF] = {
+ .fptr = ch_rte_parsetype_pf,
+ .dmask = NULL,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_VF] = {
+ .fptr = ch_rte_parsetype_vf,
+ .dmask = &(const struct rte_flow_item_vf){
+ .id = 0xffffffff,
+ }
+ },
+};
+
+static int
+cxgbe_rtef_parse_items(struct rte_flow *flow,
+ const struct rte_flow_item items[],
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(flow->dev);
+ const struct rte_flow_item *i;
+ char repeat[ARRAY_SIZE(parseitem)] = {0};
+
+ for (i = items; i->type != RTE_FLOW_ITEM_TYPE_END; i++) {
+ struct chrte_fparse *idx;
+ int ret;
+
+ if (i->type >= ARRAY_SIZE(parseitem))
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ i, "Item not supported");
+
+ switch (i->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ continue;
+ default:
+ /* check if item is repeated */
+ if (repeat[i->type] &&
+ i->type != RTE_FLOW_ITEM_TYPE_VLAN)
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, i,
+ "parse items cannot be repeated(except void/vlan)");
+
+ repeat[i->type] = 1;
+
+ /* validate the item */
+ ret = cxgbe_validate_item(i, e);
+ if (ret)
+ return ret;
+
+ idx = &flow->item_parser[i->type];
+ if (!idx || !idx->fptr) {
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, i,
+ "Item not supported");
+ } else {
+ ret = idx->fptr(idx->dmask, i, &flow->fs, e);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ cxgbe_fill_filter_region(adap, &flow->fs);
+ cxgbe_tweak_filter_spec(adap, &flow->fs);
+
+ return 0;
+}
+
+static int
+cxgbe_flow_parse(struct rte_flow *flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ int ret;
+ /* parse user request into ch_filter_specification */
+ ret = cxgbe_rtef_parse_attr(flow, attr, e);
+ if (ret)
+ return ret;
+ ret = cxgbe_rtef_parse_items(flow, item, e);
+ if (ret)
+ return ret;
+ return cxgbe_rtef_parse_actions(flow, item, action, e);
+}
+
+static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct ch_filter_specification *fs = &flow->fs;
+ struct adapter *adap = ethdev2adap(dev);
+ struct tid_info *t = &adap->tids;
+ struct filter_ctx ctx;
+ unsigned int fidx;
+ int err;
+
+ if (cxgbe_get_fidx(flow, &fidx))
+ return -ENOMEM;
+ if (cxgbe_verify_fidx(flow, fidx, 0))
+ return -1;
+
+ t4_init_completion(&ctx.completion);
+ /* go create the filter */
+ err = cxgbe_set_filter(dev, fidx, fs, &ctx);
+ if (err) {
+ dev_err(adap, "Error %d while creating filter.\n", err);
+ return err;
+ }
+
+ /* Poll the FW for reply */
+ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
+ CXGBE_FLOW_POLL_MS,
+ CXGBE_FLOW_POLL_CNT,
+ &ctx.completion);
+ if (err) {
+ dev_err(adap, "Filter set operation timed out (%d)\n", err);
+ return err;
+ }
+ if (ctx.result) {
+ dev_err(adap, "Hardware error %d while creating the filter.\n",
+ ctx.result);
+ return ctx.result;
+ }
+
+ if (fs->cap) { /* to destroy the filter */
+ flow->fidx = ctx.tid;
+ flow->f = lookup_tid(t, ctx.tid);
+ } else {
+ flow->fidx = fidx;
+ flow->f = &adap->tids.ftid_tab[fidx];
+ }
+
+ return 0;
+}
+
+static struct rte_flow *
+cxgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct rte_flow *flow;
+ int ret;
+
+ flow = t4_os_alloc(sizeof(struct rte_flow));
+ if (!flow) {
+ rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unable to allocate memory for"
+ " filter_entry");
+ return NULL;
+ }
+
+ flow->item_parser = parseitem;
+ flow->dev = dev;
+ flow->fs.private = (void *)flow;
+
+ if (cxgbe_flow_parse(flow, attr, item, action, e)) {
+ t4_os_free(flow);
+ return NULL;
+ }
+
+ t4_os_lock(&adap->flow_lock);
+ /* go, interact with cxgbe_filter */
+ ret = __cxgbe_flow_create(dev, flow);
+ t4_os_unlock(&adap->flow_lock);
+ if (ret) {
+ rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Unable to create flow rule");
+ t4_os_free(flow);
+ return NULL;
+ }
+
+ flow->f->private = flow; /* Will be used during flush */
+
+ return flow;
+}
+
+static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct filter_entry *f = flow->f;
+ struct ch_filter_specification *fs;
+ struct filter_ctx ctx;
+ int err;
+
+ fs = &f->fs;
+ if (cxgbe_verify_fidx(flow, flow->fidx, 1))
+ return -1;
+
+ t4_init_completion(&ctx.completion);
+ err = cxgbe_del_filter(dev, flow->fidx, fs, &ctx);
+ if (err) {
+ dev_err(adap, "Error %d while deleting filter.\n", err);
+ return err;
+ }
+
+ /* Poll the FW for reply */
+ err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
+ CXGBE_FLOW_POLL_MS,
+ CXGBE_FLOW_POLL_CNT,
+ &ctx.completion);
+ if (err) {
+ dev_err(adap, "Filter delete operation timed out (%d)\n", err);
+ return err;
+ }
+ if (ctx.result) {
+ dev_err(adap, "Hardware error %d while deleting the filter.\n",
+ ctx.result);
+ return ctx.result;
+ }
+
+ fs = &flow->fs;
+ if (fs->mask.macidx) {
+ struct port_info *pi = (struct port_info *)
+ (dev->data->dev_private);
+ int ret;
+
+ ret = cxgbe_mpstcam_remove(pi, fs->val.macidx);
+ if (!ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ int ret;
+
+ t4_os_lock(&adap->flow_lock);
+ ret = __cxgbe_flow_destroy(dev, flow);
+ t4_os_unlock(&adap->flow_lock);
+ if (ret)
+ return rte_flow_error_set(e, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
+ flow, "error destroying filter.");
+ t4_os_free(flow);
+ return 0;
+}
+
+static int __cxgbe_flow_query(struct rte_flow *flow, u64 *count,
+ u64 *byte_count)
+{
+ struct adapter *adap = ethdev2adap(flow->dev);
+ struct ch_filter_specification fs = flow->f->fs;
+ unsigned int fidx = flow->fidx;
+ int ret = 0;
+
+ ret = cxgbe_get_filter_count(adap, fidx, count, fs.cap, 0);
+ if (ret)
+ return ret;
+ return cxgbe_get_filter_count(adap, fidx, byte_count, fs.cap, 1);
+}
+
+static int
+cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *action, void *data,
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(flow->dev);
+ struct ch_filter_specification fs;
+ struct rte_flow_query_count *c;
+ struct filter_entry *f;
+ int ret;
+
+ RTE_SET_USED(dev);
+
+ f = flow->f;
+ fs = f->fs;
+
+ if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
+ return rte_flow_error_set(e, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "only count supported for query");
+
+ /*
+ * This is a valid operation, Since we are allowed to do chelsio
+ * specific operations in rte side of our code but not vise-versa
+ *
+ * So, fs can be queried/modified here BUT rte_flow_query_count
+ * cannot be worked on by the lower layer since we want to maintain
+ * it as rte_flow agnostic.
+ */
+ if (!fs.hitcnts)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ &fs, "filter hit counters were not"
+ " enabled during filter creation");
+
+ c = (struct rte_flow_query_count *)data;
+
+ t4_os_lock(&adap->flow_lock);
+ ret = __cxgbe_flow_query(flow, &c->hits, &c->bytes);
+ if (ret) {
+ rte_flow_error_set(e, -ret, RTE_FLOW_ERROR_TYPE_ACTION,
+ f, "cxgbe pmd failed to perform query");
+ goto out;
+ }
+
+ /* Query was successful */
+ c->bytes_set = 1;
+ c->hits_set = 1;
+ if (c->reset)
+ cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
+
+out:
+ t4_os_unlock(&adap->flow_lock);
+ return ret;
+}
+
+static int
+cxgbe_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct rte_flow *flow;
+ unsigned int fidx;
+ int ret = 0;
+
+ flow = t4_os_alloc(sizeof(struct rte_flow));
+ if (!flow)
+ return rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Unable to allocate memory for filter_entry");
+
+ flow->item_parser = parseitem;
+ flow->dev = dev;
+
+ ret = cxgbe_flow_parse(flow, attr, item, action, e);
+ if (ret) {
+ t4_os_free(flow);
+ return ret;
+ }
+
+ if (cxgbe_validate_filter(adap, &flow->fs)) {
+ t4_os_free(flow);
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "validation failed. Check f/w config file.");
+ }
+
+ t4_os_lock(&adap->flow_lock);
+ if (cxgbe_get_fidx(flow, &fidx)) {
+ ret = rte_flow_error_set(e, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "no memory in tcam.");
+ goto out;
+ }
+
+ if (cxgbe_verify_fidx(flow, fidx, 0)) {
+ ret = rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "validation failed");
+ goto out;
+ }
+
+out:
+ t4_os_unlock(&adap->flow_lock);
+ t4_os_free(flow);
+ return ret;
+}
+
+/*
+ * @ret : > 0 filter destroyed succsesfully
+ * < 0 error destroying filter
+ * == 1 filter not active / not found
+ */
+static int
+cxgbe_check_n_destroy(struct filter_entry *f, struct rte_eth_dev *dev)
+{
+ if (f && (f->valid || f->pending) &&
+ f->dev == dev && /* Only if user has asked for this port */
+ f->private) /* We (rte_flow) created this filter */
+ return __cxgbe_flow_destroy(dev, (struct rte_flow *)f->private);
+ return 1;
+}
+
+static int cxgbe_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *e)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ unsigned int i;
+ int ret = 0;
+
+ t4_os_lock(&adap->flow_lock);
+ if (adap->tids.ftid_tab) {
+ struct filter_entry *f = &adap->tids.ftid_tab[0];
+
+ for (i = 0; i < adap->tids.nftids; i++, f++) {
+ ret = cxgbe_check_n_destroy(f, dev);
+ if (ret < 0) {
+ rte_flow_error_set(e, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ f->private,
+ "error destroying TCAM "
+ "filter.");
+ goto out;
+ }
+ }
+ }
+
+ if (is_hashfilter(adap) && adap->tids.tid_tab) {
+ struct filter_entry *f;
+
+ for (i = adap->tids.hash_base; i <= adap->tids.ntids; i++) {
+ f = (struct filter_entry *)adap->tids.tid_tab[i];
+
+ ret = cxgbe_check_n_destroy(f, dev);
+ if (ret < 0) {
+ rte_flow_error_set(e, ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ f->private,
+ "error destroying HASH "
+ "filter.");
+ goto out;
+ }
+ }
+ }
+
+out:
+ t4_os_unlock(&adap->flow_lock);
+ return ret >= 0 ? 0 : ret;
+}
+
+static const struct rte_flow_ops cxgbe_flow_ops = {
+ .validate = cxgbe_flow_validate,
+ .create = cxgbe_flow_create,
+ .destroy = cxgbe_flow_destroy,
+ .flush = cxgbe_flow_flush,
+ .query = cxgbe_flow_query,
+ .isolate = NULL,
+};
+
+int
+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ RTE_SET_USED(dev);
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &cxgbe_flow_ops;
+ break;
+ default:
+ ret = -ENOTSUP;
+ break;
+ }
+ return ret;
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h
new file mode 100644
index 000000000..ec8e47aeb
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_flow.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#ifndef _CXGBE_FLOW_H_
+#define _CXGBE_FLOW_H_
+
+#include <rte_flow_driver.h>
+#include "cxgbe_filter.h"
+#include "mps_tcam.h"
+#include "cxgbe.h"
+
+/* Max poll time is 100 * 100msec = 10 sec */
+#define CXGBE_FLOW_POLL_MS 100 /* 100 milliseconds */
+#define CXGBE_FLOW_POLL_CNT 100 /* Max number of times to poll */
+
+struct chrte_fparse {
+ int (*fptr)(const void *mask, /* currently supported mask */
+ const struct rte_flow_item *item, /* user input */
+ struct ch_filter_specification *fs, /* where to parse */
+ struct rte_flow_error *e);
+ const void *dmask; /* Specify what is supported by chelsio by default*/
+};
+
+struct rte_flow {
+ struct filter_entry *f;
+ struct ch_filter_specification fs; /* temp, to create filter */
+ struct chrte_fparse *item_parser;
+ /*
+ * filter_entry doesn't store user priority.
+ * Post creation of filter this will indicate the
+ * flow index (fidx) for both hash and tcam filters
+ */
+ unsigned int fidx;
+ struct rte_eth_dev *dev;
+};
+
+int
+cxgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+
+#endif /* _CXGBE_FLOW_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c
new file mode 100644
index 000000000..a541d95cc
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_main.c
@@ -0,0 +1,2238 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+
+#include "base/common.h"
+#include "base/t4_regs.h"
+#include "base/t4_msg.h"
+#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
+#include "clip_tbl.h"
+#include "l2t.h"
+#include "smt.h"
+#include "mps_tcam.h"
+
+static const u16 cxgbe_filter_mode_features[] = {
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE |
+ F_PROTOCOL | F_PORT),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE |
+ F_PROTOCOL | F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS |
+ F_PORT),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_PORT |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_TOS |
+ F_PORT | F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VLAN |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VNIC_ID |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VLAN |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VNIC_ID |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_VLAN | F_PORT |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_MACMATCH | F_VNIC_ID | F_PORT |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_PROTOCOL | F_TOS |
+ F_PORT | F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_PORT),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_PORT),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_PORT),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID |
+ F_PORT),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_VLAN | F_PORT |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_PROTOCOL | F_VNIC_ID | F_PORT |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_TOS | F_VLAN | F_PORT | F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
+ (F_FRAGMENTATION | F_MPSHITTYPE | F_VLAN | F_VNIC_ID | F_FCOE),
+ (F_FRAGMENTATION | F_MACMATCH | F_ETHERTYPE | F_PROTOCOL | F_PORT |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MACMATCH | F_ETHERTYPE | F_TOS | F_PORT | F_FCOE),
+ (F_FRAGMENTATION | F_MACMATCH | F_PROTOCOL | F_VLAN | F_PORT | F_FCOE),
+ (F_FRAGMENTATION | F_MACMATCH | F_PROTOCOL | F_VNIC_ID | F_PORT |
+ F_FCOE),
+ (F_FRAGMENTATION | F_MACMATCH | F_TOS | F_VLAN | F_PORT | F_FCOE),
+ (F_FRAGMENTATION | F_MACMATCH | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
+ (F_FRAGMENTATION | F_ETHERTYPE | F_VLAN | F_PORT | F_FCOE),
+ (F_FRAGMENTATION | F_ETHERTYPE | F_VNIC_ID | F_PORT | F_FCOE),
+ (F_FRAGMENTATION | F_PROTOCOL | F_TOS | F_VLAN | F_FCOE),
+ (F_FRAGMENTATION | F_PROTOCOL | F_TOS | F_VNIC_ID | F_FCOE),
+ (F_FRAGMENTATION | F_VLAN | F_VNIC_ID | F_PORT | F_FCOE),
+ (F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_PROTOCOL | F_PORT |
+ F_FCOE),
+ (F_MPSHITTYPE | F_MACMATCH | F_ETHERTYPE | F_TOS | F_PORT | F_FCOE),
+ (F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VLAN | F_PORT),
+ (F_MPSHITTYPE | F_MACMATCH | F_PROTOCOL | F_VNIC_ID | F_PORT),
+ (F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VLAN | F_PORT),
+ (F_MPSHITTYPE | F_MACMATCH | F_TOS | F_VNIC_ID | F_PORT),
+ (F_MPSHITTYPE | F_ETHERTYPE | F_VLAN | F_PORT | F_FCOE),
+ (F_MPSHITTYPE | F_ETHERTYPE | F_VNIC_ID | F_PORT | F_FCOE),
+ (F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VLAN | F_PORT | F_FCOE),
+ (F_MPSHITTYPE | F_PROTOCOL | F_TOS | F_VNIC_ID | F_PORT | F_FCOE),
+ (F_MPSHITTYPE | F_VLAN | F_VNIC_ID | F_PORT),
+};
+
+/**
+ * Allocate a chunk of memory. The allocated memory is cleared.
+ */
+void *t4_alloc_mem(size_t size)
+{
+ return rte_zmalloc(NULL, size, 0);
+}
+
+/**
+ * Free memory allocated through t4_alloc_mem().
+ */
+void t4_free_mem(void *addr)
+{
+ rte_free(addr);
+}
+
+/*
+ * Response queue handler for the FW event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
+ __rte_unused const struct pkt_gl *gl)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+ rsp++; /* skip RSS header */
+
+ /*
+ * FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
+ */
+ if (unlikely(opcode == CPL_FW4_MSG &&
+ ((const struct cpl_fw4_msg *)rsp)->type ==
+ FW_TYPE_RSSCPL)) {
+ rsp++;
+ opcode = ((const struct rss_header *)rsp)->opcode;
+ rsp++;
+ if (opcode != CPL_SGE_EGR_UPDATE) {
+ dev_err(q->adapter, "unexpected FW4/CPL %#x on FW event queue\n",
+ opcode);
+ goto out;
+ }
+ }
+
+ if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
+ /* do nothing */
+ } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
+ const struct cpl_fw6_msg *msg = (const void *)rsp;
+
+ t4_handle_fw_rpl(q->adapter, msg->data);
+ } else if (opcode == CPL_ABORT_RPL_RSS) {
+ const struct cpl_abort_rpl_rss *p = (const void *)rsp;
+
+ cxgbe_hash_del_filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_SET_TCB_RPL) {
+ const struct cpl_set_tcb_rpl *p = (const void *)rsp;
+
+ cxgbe_filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_ACT_OPEN_RPL) {
+ const struct cpl_act_open_rpl *p = (const void *)rsp;
+
+ cxgbe_hash_filter_rpl(q->adapter, p);
+ } else if (opcode == CPL_L2T_WRITE_RPL) {
+ const struct cpl_l2t_write_rpl *p = (const void *)rsp;
+
+ cxgbe_do_l2t_write_rpl(q->adapter, p);
+ } else if (opcode == CPL_SMT_WRITE_RPL) {
+ const struct cpl_smt_write_rpl *p = (const void *)rsp;
+
+ cxgbe_do_smt_write_rpl(q->adapter, p);
+ } else {
+ dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
+ opcode);
+ }
+out:
+ return 0;
+}
+
+/**
+ * Setup sge control queues to pass control information.
+ */
+int cxgbe_setup_sge_ctrl_txq(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int err = 0, i = 0;
+
+ for_each_port(adapter, i) {
+ struct port_info *pi = adap2pinfo(adapter, i);
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct sge_ctrl_txq *q = &s->ctrlq[i];
+
+ q->q.size = 1024;
+ err = t4_sge_alloc_ctrl_txq(adapter, q,
+ adapter->eth_dev, i,
+ s->fw_evtq.cntxt_id,
+ rte_socket_id());
+ if (err) {
+ dev_err(adapter, "Failed to alloc ctrl txq. Err: %d",
+ err);
+ goto out;
+ }
+ snprintf(name, sizeof(name), "%s_ctrl_pool_%d",
+ pi->eth_dev->device->driver->name,
+ pi->eth_dev->data->port_id);
+ q->mb_pool = rte_pktmbuf_pool_create(name, s->ctrlq[i].q.size,
+ RTE_CACHE_LINE_SIZE,
+ RTE_MBUF_PRIV_ALIGN,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ SOCKET_ID_ANY);
+ if (!q->mb_pool) {
+ err = -rte_errno;
+ dev_err(adapter,
+ "Can't create ctrl pool for port %d. Err: %d\n",
+ pi->eth_dev->data->port_id, err);
+ goto out;
+ }
+ }
+ return 0;
+out:
+ t4_free_sge_resources(adapter);
+ return err;
+}
+
+/**
+ * cxgbe_poll_for_completion: Poll rxq for completion
+ * @q: rxq to poll
+ * @ms: milliseconds to delay
+ * @cnt: number of times to poll
+ * @c: completion to check for 'done' status
+ *
+ * Polls the rxq for reples until completion is done or the count
+ * expires.
+ */
+int cxgbe_poll_for_completion(struct sge_rspq *q, unsigned int ms,
+ unsigned int cnt, struct t4_completion *c)
+{
+ unsigned int i;
+ unsigned int work_done, budget = 32;
+
+ if (!c)
+ return -EINVAL;
+
+ for (i = 0; i < cnt; i++) {
+ cxgbe_poll(q, NULL, budget, &work_done);
+ t4_os_lock(&c->lock);
+ if (c->done) {
+ t4_os_unlock(&c->lock);
+ return 0;
+ }
+ t4_os_unlock(&c->lock);
+ rte_delay_ms(ms);
+ }
+ return -ETIMEDOUT;
+}
+
+int cxgbe_setup_sge_fwevtq(struct adapter *adapter)
+{
+ struct sge *s = &adapter->sge;
+ int err = 0;
+ int msi_idx = 0;
+
+ err = t4_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->eth_dev,
+ msi_idx, NULL, fwevtq_handler, -1, NULL, 0,
+ rte_socket_id());
+ return err;
+}
+
+static int closest_timer(const struct sge *s, int time)
+{
+ unsigned int i, match = 0;
+ int delta, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ delta = time - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+ unsigned int i, match = 0;
+ int delta, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = thres - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+/**
+ * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
+ * @q: the Rx queue
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Sets an Rx queue's interrupt hold-off time and packet count. At least
+ * one of the two needs to be enabled for the queue to generate interrupts.
+ */
+int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
+ unsigned int cnt)
+{
+ struct adapter *adap = q->adapter;
+ unsigned int timer_val;
+
+ if (cnt) {
+ int err;
+ u32 v, new_idx;
+
+ new_idx = closest_thres(&adap->sge, cnt);
+ if (q->desc && q->pktcnt_idx != new_idx) {
+ /* the queue has already been created, update it */
+ v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+ V_FW_PARAMS_PARAM_X(
+ FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+ V_FW_PARAMS_PARAM_YZ(q->cntxt_id);
+ err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &v, &new_idx);
+ if (err)
+ return err;
+ }
+ q->pktcnt_idx = new_idx;
+ }
+
+ timer_val = (us == 0) ? X_TIMERREG_RESTART_COUNTER :
+ closest_timer(&adap->sge, us);
+
+ if ((us | cnt) == 0)
+ q->intr_params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
+ else
+ q->intr_params = V_QINTR_TIMER_IDX(timer_val) |
+ V_QINTR_CNT_EN(cnt > 0);
+ return 0;
+}
+
+/**
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgbe_alloc_atid(struct tid_info *t, void *data)
+{
+ int atid = -1;
+
+ t4_os_lock(&t->atid_lock);
+ if (t->afree) {
+ union aopen_entry *p = t->afree;
+
+ atid = p - t->atid_tab;
+ t->afree = p->next;
+ p->data = data;
+ t->atids_in_use++;
+ }
+ t4_os_unlock(&t->atid_lock);
+ return atid;
+}
+
+/**
+ * Release an active-open TID.
+ */
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
+{
+ union aopen_entry *p = &t->atid_tab[atid];
+
+ t4_os_lock(&t->atid_lock);
+ p->next = t->afree;
+ t->afree = p;
+ t->atids_in_use--;
+ t4_os_unlock(&t->atid_lock);
+}
+
+/**
+ * Populate a TID_RELEASE WR. Caller must properly size the skb.
+ */
+static void mk_tid_release(struct rte_mbuf *mbuf, unsigned int tid)
+{
+ struct cpl_tid_release *req;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_tid_release *);
+ INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid);
+}
+
+/**
+ * Release a TID and inform HW. If we are unable to allocate the release
+ * message we defer to a work queue.
+ */
+void cxgbe_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
+ unsigned short family)
+{
+ struct rte_mbuf *mbuf;
+ struct adapter *adap = container_of(t, struct adapter, tids);
+
+ WARN_ON(tid >= t->ntids);
+
+ if (t->tid_tab[tid]) {
+ t->tid_tab[tid] = NULL;
+ rte_atomic32_dec(&t->conns_in_use);
+ if (t->hash_base && tid >= t->hash_base) {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_dec(&t->hash_tids_in_use);
+ } else {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_dec(&t->tids_in_use);
+ }
+ }
+
+ mbuf = rte_pktmbuf_alloc((&adap->sge.ctrlq[chan])->mb_pool);
+ if (mbuf) {
+ mbuf->data_len = sizeof(struct cpl_tid_release);
+ mbuf->pkt_len = mbuf->data_len;
+ mk_tid_release(mbuf, tid);
+ t4_mgmt_tx(&adap->sge.ctrlq[chan], mbuf);
+ }
+}
+
+/**
+ * Insert a TID.
+ */
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+ unsigned short family)
+{
+ t->tid_tab[tid] = data;
+ if (t->hash_base && tid >= t->hash_base) {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_inc(&t->hash_tids_in_use);
+ } else {
+ if (family == FILTER_TYPE_IPV4)
+ rte_atomic32_inc(&t->tids_in_use);
+ }
+
+ rte_atomic32_inc(&t->conns_in_use);
+}
+
+/**
+ * Free TID tables.
+ */
+static void tid_free(struct tid_info *t)
+{
+ if (t->tid_tab) {
+ if (t->ftid_bmap)
+ rte_bitmap_free(t->ftid_bmap);
+
+ if (t->ftid_bmap_array)
+ t4_os_free(t->ftid_bmap_array);
+
+ t4_os_free(t->tid_tab);
+ }
+
+ memset(t, 0, sizeof(struct tid_info));
+}
+
+/**
+ * Allocate and initialize the TID tables. Returns 0 on success.
+ */
+static int tid_init(struct tid_info *t)
+{
+ size_t size;
+ unsigned int ftid_bmap_size;
+ unsigned int natids = t->natids;
+ unsigned int max_ftids = t->nftids;
+
+ ftid_bmap_size = rte_bitmap_get_memory_footprint(t->nftids);
+ size = t->ntids * sizeof(*t->tid_tab) +
+ max_ftids * sizeof(*t->ftid_tab) +
+ natids * sizeof(*t->atid_tab);
+
+ t->tid_tab = t4_os_alloc(size);
+ if (!t->tid_tab)
+ return -ENOMEM;
+
+ t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+ t->ftid_tab = (struct filter_entry *)&t->atid_tab[t->natids];
+ t->ftid_bmap_array = t4_os_alloc(ftid_bmap_size);
+ if (!t->ftid_bmap_array) {
+ tid_free(t);
+ return -ENOMEM;
+ }
+
+ t4_os_lock_init(&t->atid_lock);
+ t4_os_lock_init(&t->ftid_lock);
+
+ t->afree = NULL;
+ t->atids_in_use = 0;
+ rte_atomic32_init(&t->tids_in_use);
+ rte_atomic32_set(&t->tids_in_use, 0);
+ rte_atomic32_init(&t->conns_in_use);
+ rte_atomic32_set(&t->conns_in_use, 0);
+
+ /* Setup the free list for atid_tab and clear the stid bitmap. */
+ if (natids) {
+ while (--natids)
+ t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+ t->afree = t->atid_tab;
+ }
+
+ t->ftid_bmap = rte_bitmap_init(t->nftids, t->ftid_bmap_array,
+ ftid_bmap_size);
+ if (!t->ftid_bmap) {
+ tid_free(t);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static inline bool is_x_1g_port(const struct link_config *lc)
+{
+ return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
+}
+
+static inline bool is_x_10g_port(const struct link_config *lc)
+{
+ unsigned int speeds, high_speeds;
+
+ speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps));
+ high_speeds = speeds &
+ ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
+
+ return high_speeds != 0;
+}
+
+static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
+ unsigned int us, unsigned int cnt,
+ unsigned int size, unsigned int iqe_size)
+{
+ q->adapter = adap;
+ cxgb4_set_rspq_intr_params(q, us, cnt);
+ q->iqe_len = iqe_size;
+ q->size = size;
+}
+
+int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ unsigned int max_queues = s->max_ethqsets / adap->params.nports;
+
+ if ((eth_dev->data->nb_rx_queues < 1) ||
+ (eth_dev->data->nb_tx_queues < 1))
+ return -EINVAL;
+
+ if ((eth_dev->data->nb_rx_queues > max_queues) ||
+ (eth_dev->data->nb_tx_queues > max_queues))
+ return -EINVAL;
+
+ if (eth_dev->data->nb_rx_queues > pi->rss_size)
+ return -EINVAL;
+
+ /* We must configure RSS, since config has changed*/
+ pi->flags &= ~PORT_RSS_DONE;
+
+ pi->n_rx_qsets = eth_dev->data->nb_rx_queues;
+ pi->n_tx_qsets = eth_dev->data->nb_tx_queues;
+
+ return 0;
+}
+
+void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ unsigned int i, nb_ports = 0, qidx = 0;
+ unsigned int q_per_port = 0;
+
+ if (!(adap->flags & CFG_QUEUES)) {
+ for_each_port(adap, i) {
+ struct port_info *tpi = adap2pinfo(adap, i);
+
+ nb_ports += (is_x_10g_port(&tpi->link_cfg)) ||
+ is_x_1g_port(&tpi->link_cfg) ? 1 : 0;
+ }
+
+ /*
+ * We default up to # of cores queues per 1G/10G port.
+ */
+ if (nb_ports)
+ q_per_port = (s->max_ethqsets -
+ (adap->params.nports - nb_ports)) /
+ nb_ports;
+
+ if (q_per_port > rte_lcore_count())
+ q_per_port = rte_lcore_count();
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->first_qset = qidx;
+
+ /* Initially n_rx_qsets == n_tx_qsets */
+ pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) ||
+ is_x_1g_port(&pi->link_cfg)) ?
+ q_per_port : 1;
+ pi->n_tx_qsets = pi->n_rx_qsets;
+
+ if (pi->n_rx_qsets > pi->rss_size)
+ pi->n_rx_qsets = pi->rss_size;
+
+ qidx += pi->n_rx_qsets;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
+ struct sge_eth_rxq *r = &s->ethrxq[i];
+
+ init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
+ r->usembufs = 1;
+ r->fl.size = (r->usembufs ? 1024 : 72);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
+ s->ethtxq[i].q.size = 1024;
+
+ init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
+ adap->flags |= CFG_QUEUES;
+ }
+}
+
+void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats)
+{
+ t4_get_port_stats_offset(pi->adapter, pi->tx_chan, stats,
+ &pi->stats_base);
+}
+
+void cxgbe_stats_reset(struct port_info *pi)
+{
+ t4_clr_port_stats(pi->adapter, pi->tx_chan);
+}
+
+static void setup_memwin(struct adapter *adap)
+{
+ u32 mem_win0_base;
+
+ /* For T5, only relative offset inside the PCIe BAR is passed */
+ mem_win0_base = MEMWIN0_BASE;
+
+ /*
+ * Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
+ MEMWIN_NIC),
+ mem_win0_base | V_BIR(0) |
+ V_WINDOW(ilog2(MEMWIN0_APERTURE) - X_WINDOW_SHIFT));
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
+ MEMWIN_NIC));
+}
+
+int cxgbe_init_rss(struct adapter *adap)
+{
+ unsigned int i;
+
+ if (is_pf4(adap)) {
+ int err;
+
+ err = t4_init_rss_mode(adap, adap->mbox);
+ if (err)
+ return err;
+ }
+
+ for_each_port(adap, i) {
+ struct port_info *pi = adap2pinfo(adap, i);
+
+ pi->rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
+ if (!pi->rss)
+ return -ENOMEM;
+
+ pi->rss_hf = CXGBE_RSS_HF_ALL;
+ }
+ return 0;
+}
+
+/**
+ * Dump basic information about the adapter.
+ */
+void cxgbe_print_adapter_info(struct adapter *adap)
+{
+ /**
+ * Hardware/Firmware/etc. Version/Revision IDs.
+ */
+ t4_dump_version_info(adap);
+}
+
+void cxgbe_print_port_info(struct adapter *adap)
+{
+ int i;
+ char buf[80];
+ struct rte_pci_addr *loc = &adap->pdev->addr;
+
+ for_each_port(adap, i) {
+ const struct port_info *pi = adap2pinfo(adap, i);
+ char *bufp = buf;
+
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
+ bufp += sprintf(bufp, "100M/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
+ bufp += sprintf(bufp, "1G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
+ bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
+ bufp += sprintf(bufp, "25G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
+ bufp += sprintf(bufp, "40G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
+ bufp += sprintf(bufp, "50G/");
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
+ bufp += sprintf(bufp, "100G/");
+ if (bufp != buf)
+ --bufp;
+ sprintf(bufp, "BASE-%s",
+ t4_get_port_type_description(
+ (enum fw_port_type)pi->port_type));
+
+ dev_info(adap,
+ " " PCI_PRI_FMT " Chelsio rev %d %s %s\n",
+ loc->domain, loc->bus, loc->devid, loc->function,
+ CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
+ (adap->flags & USING_MSIX) ? " MSI-X" :
+ (adap->flags & USING_MSI) ? " MSI" : "");
+ }
+}
+
+static int check_devargs_handler(const char *key, const char *value, void *p)
+{
+ if (!strncmp(key, CXGBE_DEVARG_CMN_KEEP_OVLAN, strlen(key)) ||
+ !strncmp(key, CXGBE_DEVARG_CMN_TX_MODE_LATENCY, strlen(key)) ||
+ !strncmp(key, CXGBE_DEVARG_VF_FORCE_LINK_UP, strlen(key))) {
+ if (!strncmp(value, "1", 1)) {
+ bool *dst_val = (bool *)p;
+
+ *dst_val = true;
+ }
+ }
+
+ if (!strncmp(key, CXGBE_DEVARG_PF_FILTER_MODE, strlen(key)) ||
+ !strncmp(key, CXGBE_DEVARG_PF_FILTER_MASK, strlen(key))) {
+ u32 *dst_val = (u32 *)p;
+ char *endptr = NULL;
+ u32 arg_val;
+
+ arg_val = strtoul(value, &endptr, 16);
+ if (errno || endptr == value)
+ return -EINVAL;
+
+ *dst_val = arg_val;
+ }
+
+ return 0;
+}
+
+static int cxgbe_get_devargs(struct rte_devargs *devargs, const char *key,
+ void *p)
+{
+ struct rte_kvargs *kvlist;
+ int ret = 0;
+
+ if (!devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (!kvlist)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key))
+ goto out;
+
+ ret = rte_kvargs_process(kvlist, key, check_devargs_handler, p);
+
+out:
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+static void cxgbe_get_devargs_int(struct adapter *adap, bool *dst,
+ const char *key, bool default_value)
+{
+ struct rte_pci_device *pdev = adap->pdev;
+ int ret;
+ bool devarg_value = default_value;
+
+ *dst = default_value;
+ if (!pdev)
+ return;
+
+ ret = cxgbe_get_devargs(pdev->device.devargs, key, &devarg_value);
+ if (ret)
+ return;
+
+ *dst = devarg_value;
+}
+
+static void cxgbe_get_devargs_u32(struct adapter *adap, u32 *dst,
+ const char *key, u32 default_value)
+{
+ struct rte_pci_device *pdev = adap->pdev;
+ u32 devarg_value = default_value;
+ int ret;
+
+ *dst = default_value;
+ if (!pdev)
+ return;
+
+ ret = cxgbe_get_devargs(pdev->device.devargs, key, &devarg_value);
+ if (ret)
+ return;
+
+ *dst = devarg_value;
+}
+
+void cxgbe_process_devargs(struct adapter *adap)
+{
+ cxgbe_get_devargs_int(adap, &adap->devargs.keep_ovlan,
+ CXGBE_DEVARG_CMN_KEEP_OVLAN, false);
+ cxgbe_get_devargs_int(adap, &adap->devargs.tx_mode_latency,
+ CXGBE_DEVARG_CMN_TX_MODE_LATENCY, false);
+ cxgbe_get_devargs_int(adap, &adap->devargs.force_link_up,
+ CXGBE_DEVARG_VF_FORCE_LINK_UP, false);
+ cxgbe_get_devargs_u32(adap, &adap->devargs.filtermode,
+ CXGBE_DEVARG_PF_FILTER_MODE, 0);
+ cxgbe_get_devargs_u32(adap, &adap->devargs.filtermask,
+ CXGBE_DEVARG_PF_FILTER_MASK, 0);
+}
+
+static void configure_vlan_types(struct adapter *adapter)
+{
+ int i;
+
+ for_each_port(adapter, i) {
+ /* OVLAN Type 0x88a8 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN0),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x88a8));
+ /* OVLAN Type 0x9100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_OVLAN_REG(i, A_RX_OVLAN1),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(M_OVLAN_ETYPE),
+ V_OVLAN_MASK(M_OVLAN_MASK) |
+ V_OVLAN_ETYPE(0x9100));
+
+ /* IVLAN 0X8100 */
+ t4_set_reg_field(adapter, MPS_PORT_RX_IVLAN(i),
+ V_IVLAN_ETYPE(M_IVLAN_ETYPE),
+ V_IVLAN_ETYPE(0x8100));
+
+ t4_set_reg_field(adapter, MPS_PORT_RX_CTL(i),
+ F_OVLAN_EN0 | F_OVLAN_EN1 |
+ F_IVLAN_EN,
+ F_OVLAN_EN0 | F_OVLAN_EN1 |
+ F_IVLAN_EN);
+ }
+
+ t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG, V_RM_OVLAN(1),
+ V_RM_OVLAN(!adapter->devargs.keep_ovlan));
+}
+
+static int cxgbe_get_filter_vnic_mode_from_devargs(u32 val)
+{
+ u32 vnic_mode;
+
+ vnic_mode = val & (CXGBE_DEVARGS_FILTER_MODE_PF_VF |
+ CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER);
+ if (vnic_mode) {
+ switch (vnic_mode) {
+ case CXGBE_DEVARGS_FILTER_MODE_VLAN_OUTER:
+ return CXGBE_FILTER_VNIC_MODE_OVLAN;
+ case CXGBE_DEVARGS_FILTER_MODE_PF_VF:
+ return CXGBE_FILTER_VNIC_MODE_PFVF;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return CXGBE_FILTER_VNIC_MODE_NONE;
+}
+
+static int cxgbe_get_filter_mode_from_devargs(u32 val, bool closest_match)
+{
+ int vnic_mode, fmode = 0;
+ bool found = false;
+ u8 i;
+
+ if (val >= CXGBE_DEVARGS_FILTER_MODE_MAX) {
+ pr_err("Unsupported flags set in filter mode. Must be < 0x%x\n",
+ CXGBE_DEVARGS_FILTER_MODE_MAX);
+ return -ERANGE;
+ }
+
+ vnic_mode = cxgbe_get_filter_vnic_mode_from_devargs(val);
+ if (vnic_mode < 0) {
+ pr_err("Unsupported Vnic-mode, more than 1 Vnic-mode selected\n");
+ return vnic_mode;
+ }
+
+ if (vnic_mode)
+ fmode |= F_VNIC_ID;
+ if (val & CXGBE_DEVARGS_FILTER_MODE_PHYSICAL_PORT)
+ fmode |= F_PORT;
+ if (val & CXGBE_DEVARGS_FILTER_MODE_ETHERNET_DSTMAC)
+ fmode |= F_MACMATCH;
+ if (val & CXGBE_DEVARGS_FILTER_MODE_ETHERNET_ETHTYPE)
+ fmode |= F_ETHERTYPE;
+ if (val & CXGBE_DEVARGS_FILTER_MODE_VLAN_INNER)
+ fmode |= F_VLAN;
+ if (val & CXGBE_DEVARGS_FILTER_MODE_IP_TOS)
+ fmode |= F_TOS;
+ if (val & CXGBE_DEVARGS_FILTER_MODE_IP_PROTOCOL)
+ fmode |= F_PROTOCOL;
+
+ for (i = 0; i < ARRAY_SIZE(cxgbe_filter_mode_features); i++) {
+ if ((cxgbe_filter_mode_features[i] & fmode) == fmode) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
+
+ return closest_match ? cxgbe_filter_mode_features[i] : fmode;
+}
+
+static int configure_filter_mode_mask(struct adapter *adap)
+{
+ u32 params[2], val[2], nparams = 0;
+ int ret;
+
+ if (!adap->devargs.filtermode && !adap->devargs.filtermask)
+ return 0;
+
+ if (!adap->devargs.filtermode || !adap->devargs.filtermask) {
+ pr_err("Unsupported, Provide both filtermode and filtermask devargs\n");
+ return -EINVAL;
+ }
+
+ if (adap->devargs.filtermask & ~adap->devargs.filtermode) {
+ pr_err("Unsupported, filtermask (0x%x) must be subset of filtermode (0x%x)\n",
+ adap->devargs.filtermask, adap->devargs.filtermode);
+
+ return -EINVAL;
+ }
+
+ params[0] = CXGBE_FW_PARAM_DEV(FILTER) |
+ V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK);
+
+ ret = cxgbe_get_filter_mode_from_devargs(adap->devargs.filtermode,
+ true);
+ if (ret < 0) {
+ pr_err("Unsupported filtermode devargs combination:0x%x\n",
+ adap->devargs.filtermode);
+ return ret;
+ }
+
+ val[0] = V_FW_PARAMS_PARAM_FILTER_MODE(ret);
+
+ ret = cxgbe_get_filter_mode_from_devargs(adap->devargs.filtermask,
+ false);
+ if (ret < 0) {
+ pr_err("Unsupported filtermask devargs combination:0x%x\n",
+ adap->devargs.filtermask);
+ return ret;
+ }
+
+ val[0] |= V_FW_PARAMS_PARAM_FILTER_MASK(ret);
+
+ nparams++;
+
+ ret = cxgbe_get_filter_vnic_mode_from_devargs(adap->devargs.filtermode);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ params[1] = CXGBE_FW_PARAM_DEV(FILTER) |
+ V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_VNIC_MODE);
+
+ val[1] = ret - 1;
+
+ nparams++;
+ }
+
+ return t4_set_params(adap, adap->mbox, adap->pf, 0, nparams,
+ params, val);
+}
+
+static void configure_pcie_ext_tag(struct adapter *adapter)
+{
+ u16 v;
+ int pos = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
+
+ if (!pos)
+ return;
+
+ if (pos > 0) {
+ t4_os_pci_read_cfg2(adapter, pos + PCI_EXP_DEVCTL, &v);
+ v |= PCI_EXP_DEVCTL_EXT_TAG;
+ t4_os_pci_write_cfg2(adapter, pos + PCI_EXP_DEVCTL, v);
+ if (is_t6(adapter->params.chip)) {
+ t4_set_reg_field(adapter, A_PCIE_CFG2,
+ V_T6_TOTMAXTAG(M_T6_TOTMAXTAG),
+ V_T6_TOTMAXTAG(7));
+ t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
+ V_T6_MINTAG(M_T6_MINTAG),
+ V_T6_MINTAG(8));
+ } else {
+ t4_set_reg_field(adapter, A_PCIE_CFG2,
+ V_TOTMAXTAG(M_TOTMAXTAG),
+ V_TOTMAXTAG(3));
+ t4_set_reg_field(adapter, A_PCIE_CMD_CFG,
+ V_MINTAG(M_MINTAG),
+ V_MINTAG(8));
+ }
+ }
+}
+
+/* Figure out how many Queue Sets we can support */
+void cxgbe_configure_max_ethqsets(struct adapter *adapter)
+{
+ unsigned int ethqsets;
+
+ /*
+ * We need to reserve an Ingress Queue for the Asynchronous Firmware
+ * Event Queue.
+ *
+ * For each Queue Set, we'll need the ability to allocate two Egress
+ * Contexts -- one for the Ingress Queue Free List and one for the TX
+ * Ethernet Queue.
+ */
+ if (is_pf4(adapter)) {
+ struct pf_resources *pfres = &adapter->params.pfres;
+
+ ethqsets = pfres->niqflint - 1;
+ if (pfres->neq < ethqsets * 2)
+ ethqsets = pfres->neq / 2;
+ } else {
+ struct vf_resources *vfres = &adapter->params.vfres;
+
+ ethqsets = vfres->niqflint - 1;
+ if (vfres->nethctrl != ethqsets)
+ ethqsets = min(vfres->nethctrl, ethqsets);
+ if (vfres->neq < ethqsets * 2)
+ ethqsets = vfres->neq / 2;
+ }
+
+ if (ethqsets > MAX_ETH_QSETS)
+ ethqsets = MAX_ETH_QSETS;
+ adapter->sge.max_ethqsets = ethqsets;
+}
+
+/*
+ * Tweak configuration based on system architecture, etc. Most of these have
+ * defaults assigned to them by Firmware Configuration Files (if we're using
+ * them) but need to be explicitly set if we're using hard-coded
+ * initialization. So these are essentially common tweaks/settings for
+ * Configuration Files and hard-coded initialization ...
+ */
+static int adap_init0_tweaks(struct adapter *adapter)
+{
+ u8 rx_dma_offset;
+
+ /*
+ * Fix up various Host-Dependent Parameters like Page Size, Cache
+ * Line Size, etc. The firmware default is for a 4KB Page Size and
+ * 64B Cache Line Size ...
+ */
+ t4_fixup_host_params_compat(adapter, CXGBE_PAGE_SIZE, L1_CACHE_BYTES,
+ T5_LAST_REV);
+
+ /*
+ * Keep the chip default offset to deliver Ingress packets into our
+ * DMA buffers to zero
+ */
+ rx_dma_offset = 0;
+ t4_set_reg_field(adapter, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT),
+ V_PKTSHIFT(rx_dma_offset));
+
+ t4_set_reg_field(adapter, A_SGE_FLM_CFG,
+ V_CREDITCNT(M_CREDITCNT) | M_CREDITCNTPACKING,
+ V_CREDITCNT(3) | V_CREDITCNTPACKING(1));
+
+ t4_set_reg_field(adapter, A_SGE_INGRESS_RX_THRESHOLD,
+ V_THRESHOLD_3(M_THRESHOLD_3), V_THRESHOLD_3(32U));
+
+ t4_set_reg_field(adapter, A_SGE_CONTROL2, V_IDMAARBROUNDROBIN(1U),
+ V_IDMAARBROUNDROBIN(1U));
+
+ /*
+ * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
+ * adds the pseudo header itself.
+ */
+ t4_tp_wr_bits_indirect(adapter, A_TP_INGRESS_CONFIG,
+ F_CSUM_HAS_PSEUDO_HDR, 0);
+
+ return 0;
+}
+
+/*
+ * Attempt to initialize the adapter via a Firmware Configuration File.
+ */
+static int adap_init0_config(struct adapter *adapter, int reset)
+{
+ struct fw_caps_config_cmd caps_cmd;
+ unsigned long mtype = 0, maddr = 0;
+ u32 finiver, finicsum, cfcsum;
+ int ret;
+ int config_issued = 0;
+ int cfg_addr;
+ char config_name[20];
+
+ /*
+ * Reset device if necessary.
+ */
+ if (reset) {
+ ret = t4_fw_reset(adapter, adapter->mbox,
+ F_PIORSTMODE | F_PIORST);
+ if (ret < 0) {
+ dev_warn(adapter, "Firmware reset failed, error %d\n",
+ -ret);
+ goto bye;
+ }
+ }
+
+ cfg_addr = t4_flash_cfg_addr(adapter);
+ if (cfg_addr < 0) {
+ ret = cfg_addr;
+ dev_warn(adapter, "Finding address for firmware config file in flash failed, error %d\n",
+ -ret);
+ goto bye;
+ }
+
+ strcpy(config_name, "On Flash");
+ mtype = FW_MEMTYPE_CF_FLASH;
+ maddr = cfg_addr;
+
+ /*
+ * Issue a Capability Configuration command to the firmware to get it
+ * to parse the Configuration File. We don't use t4_fw_config_file()
+ * because we want the ability to modify various features after we've
+ * processed the configuration file ...
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 =
+ cpu_to_be32(F_FW_CAPS_CONFIG_CMD_CFVALID |
+ V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+ V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+ FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+ &caps_cmd);
+ /*
+ * If the CAPS_CONFIG failed with an ENOENT (for a Firmware
+ * Configuration File in FLASH), our last gasp effort is to use the
+ * Firmware Configuration File which is embedded in the firmware. A
+ * very few early versions of the firmware didn't have one embedded
+ * but we can ignore those.
+ */
+ if (ret == -ENOENT) {
+ dev_info(adapter, "%s: Going for embedded config in firmware..\n",
+ __func__);
+
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write =
+ cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 = cpu_to_be32(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
+ sizeof(caps_cmd), &caps_cmd);
+ strcpy(config_name, "Firmware Default");
+ }
+
+ config_issued = 1;
+ if (ret < 0)
+ goto bye;
+
+ finiver = be32_to_cpu(caps_cmd.finiver);
+ finicsum = be32_to_cpu(caps_cmd.finicsum);
+ cfcsum = be32_to_cpu(caps_cmd.cfcsum);
+ if (finicsum != cfcsum)
+ dev_warn(adapter, "Configuration File checksum mismatch: [fini] csum=%#x, computed csum=%#x\n",
+ finicsum, cfcsum);
+
+ /*
+ * If we're a pure NIC driver then disable all offloading facilities.
+ * This will allow the firmware to optimize aspects of the hardware
+ * configuration which will result in improved performance.
+ */
+ caps_cmd.niccaps &= cpu_to_be16(~FW_CAPS_CONFIG_NIC_ETHOFLD);
+ caps_cmd.toecaps = 0;
+ caps_cmd.iscsicaps = 0;
+ caps_cmd.rdmacaps = 0;
+ caps_cmd.fcoecaps = 0;
+
+ /*
+ * And now tell the firmware to use the configuration we just loaded.
+ */
+ caps_cmd.op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
+ NULL);
+ if (ret < 0) {
+ dev_warn(adapter, "Unable to finalize Firmware Capabilities %d\n",
+ -ret);
+ goto bye;
+ }
+
+ /*
+ * Tweak configuration based on system architecture, etc.
+ */
+ ret = adap_init0_tweaks(adapter);
+ if (ret < 0) {
+ dev_warn(adapter, "Unable to do init0-tweaks %d\n", -ret);
+ goto bye;
+ }
+
+ /*
+ * And finally tell the firmware to initialize itself using the
+ * parameters from the Configuration File.
+ */
+ ret = t4_fw_initialize(adapter, adapter->mbox);
+ if (ret < 0) {
+ dev_warn(adapter, "Initializing Firmware failed, error %d\n",
+ -ret);
+ goto bye;
+ }
+
+ /*
+ * Return successfully and note that we're operating with parameters
+ * not supplied by the driver, rather than from hard-wired
+ * initialization constants buried in the driver.
+ */
+ dev_info(adapter,
+ "Successfully configured using Firmware Configuration File \"%s\", version %#x, computed checksum %#x\n",
+ config_name, finiver, cfcsum);
+
+ return 0;
+
+ /*
+ * Something bad happened. Return the error ... (If the "error"
+ * is that there's no Configuration File on the adapter we don't
+ * want to issue a warning since this is fairly common.)
+ */
+bye:
+ if (config_issued && ret != -ENOENT)
+ dev_warn(adapter, "\"%s\" configuration file error %d\n",
+ config_name, -ret);
+
+ dev_debug(adapter, "%s: returning ret = %d ..\n", __func__, ret);
+ return ret;
+}
+
+static int adap_init0(struct adapter *adap)
+{
+ struct fw_caps_config_cmd caps_cmd;
+ int ret = 0;
+ u32 v, port_vec;
+ enum dev_state state;
+ u32 params[7], val[7];
+ int reset = 1;
+ int mbox = adap->mbox;
+
+ /*
+ * Contact FW, advertising Master capability.
+ */
+ ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
+ if (ret < 0) {
+ dev_err(adap, "%s: could not connect to FW, error %d\n",
+ __func__, -ret);
+ goto bye;
+ }
+
+ CXGBE_DEBUG_MBOX(adap, "%s: adap->mbox = %d; ret = %d\n", __func__,
+ adap->mbox, ret);
+
+ if (ret == mbox)
+ adap->flags |= MASTER_PF;
+
+ if (state == DEV_STATE_INIT) {
+ /*
+ * Force halt and reset FW because a previous instance may have
+ * exited abnormally without properly shutting down
+ */
+ ret = t4_fw_halt(adap, adap->mbox, reset);
+ if (ret < 0) {
+ dev_err(adap, "Failed to halt. Exit.\n");
+ goto bye;
+ }
+
+ ret = t4_fw_restart(adap, adap->mbox, reset);
+ if (ret < 0) {
+ dev_err(adap, "Failed to restart. Exit.\n");
+ goto bye;
+ }
+ state = (enum dev_state)((unsigned)state & ~DEV_STATE_INIT);
+ }
+
+ t4_get_version_info(adap);
+
+ ret = t4_get_core_clock(adap, &adap->params.vpd);
+ if (ret < 0) {
+ dev_err(adap, "%s: could not get core clock, error %d\n",
+ __func__, -ret);
+ goto bye;
+ }
+
+ /*
+ * If the firmware is initialized already (and we're not forcing a
+ * master initialization), note that we're living with existing
+ * adapter parameters. Otherwise, it's time to try initializing the
+ * adapter ...
+ */
+ if (state == DEV_STATE_INIT) {
+ dev_info(adap, "Coming up as %s: Adapter already initialized\n",
+ adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
+ } else {
+ dev_info(adap, "Coming up as MASTER: Initializing adapter\n");
+
+ ret = adap_init0_config(adap, reset);
+ if (ret == -ENOENT) {
+ dev_err(adap,
+ "No Configuration File present on adapter. Using hard-wired configuration parameters.\n");
+ goto bye;
+ }
+ }
+ if (ret < 0) {
+ dev_err(adap, "could not initialize adapter, error %d\n", -ret);
+ goto bye;
+ }
+
+ /* Now that we've successfully configured and initialized the adapter
+ * (or found it already initialized), we can ask the Firmware what
+ * resources it has provisioned for us.
+ */
+ ret = t4_get_pfres(adap);
+ if (ret) {
+ dev_err(adap->pdev_dev,
+ "Unable to retrieve resource provisioning info\n");
+ goto bye;
+ }
+
+ /* Find out what ports are available to us. */
+ v = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
+ if (ret < 0) {
+ dev_err(adap, "%s: failure in t4_query_params; error = %d\n",
+ __func__, ret);
+ goto bye;
+ }
+
+ adap->params.nports = hweight32(port_vec);
+ adap->params.portvec = port_vec;
+
+ dev_debug(adap, "%s: adap->params.nports = %u\n", __func__,
+ adap->params.nports);
+
+ /*
+ * Give the SGE code a chance to pull in anything that it needs ...
+ * Note that this must be called after we retrieve our VPD parameters
+ * in order to know how to convert core ticks to seconds, etc.
+ */
+ ret = t4_sge_init(adap);
+ if (ret < 0) {
+ dev_err(adap, "t4_sge_init failed with error %d\n",
+ -ret);
+ goto bye;
+ }
+
+ /*
+ * Grab some of our basic fundamental operating parameters.
+ */
+ params[0] = CXGBE_FW_PARAM_PFVF(L2T_START);
+ params[1] = CXGBE_FW_PARAM_PFVF(L2T_END);
+ params[2] = CXGBE_FW_PARAM_PFVF(FILTER_START);
+ params[3] = CXGBE_FW_PARAM_PFVF(FILTER_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 4, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->l2t_start = val[0];
+ adap->l2t_end = val[1];
+ adap->tids.ftid_base = val[2];
+ adap->tids.nftids = val[3] - val[2] + 1;
+
+ params[0] = CXGBE_FW_PARAM_PFVF(CLIP_START);
+ params[1] = CXGBE_FW_PARAM_PFVF(CLIP_END);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
+ if (ret < 0)
+ goto bye;
+ adap->clipt_start = val[0];
+ adap->clipt_end = val[1];
+
+ /*
+ * Get device capabilities so we can determine what resources we need
+ * to manage.
+ */
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
+ &caps_cmd);
+ if (ret < 0)
+ goto bye;
+
+ if ((caps_cmd.niccaps & cpu_to_be16(FW_CAPS_CONFIG_NIC_HASHFILTER)) &&
+ is_t6(adap->params.chip)) {
+ if (cxgbe_init_hash_filter(adap) < 0)
+ goto bye;
+ }
+
+ /* See if FW supports FW_FILTER2 work request */
+ if (is_t4(adap->params.chip)) {
+ adap->params.filter2_wr_support = 0;
+ } else {
+ params[0] = CXGBE_FW_PARAM_DEV(FILTER2_WR);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
+ }
+
+ /* Check if FW supports returning vin.
+ * If this is not supported, driver will interpret
+ * these values from viid.
+ */
+ params[0] = CXGBE_FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
+
+ /* query tid-related parameters */
+ params[0] = CXGBE_FW_PARAM_DEV(NTID);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
+ params, val);
+ if (ret < 0)
+ goto bye;
+ adap->tids.ntids = val[0];
+ adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+
+ /* If we're running on newer firmware, let it know that we're
+ * prepared to deal with encapsulated CPL messages. Older
+ * firmware won't understand this and we'll just get
+ * unencapsulated messages ...
+ */
+ params[0] = CXGBE_FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
+ val[0] = 1;
+ (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
+
+ /*
+ * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
+ * capability. Earlier versions of the firmware didn't have the
+ * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
+ * permission to use ULPTX MEMWRITE DSGL.
+ */
+ if (is_t4(adap->params.chip)) {
+ adap->params.ulptx_memwrite_dsgl = false;
+ } else {
+ params[0] = CXGBE_FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
+ 1, params, val);
+ adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
+ }
+
+ /* Query for max number of packets that can be coalesced for Tx */
+ params[0] = CXGBE_FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
+ if (!ret && val[0] > 0)
+ adap->params.max_tx_coalesce_num = val[0];
+ else
+ adap->params.max_tx_coalesce_num = ETH_COALESCE_PKT_NUM;
+
+ /*
+ * The MTU/MSS Table is initialized by now, so load their values. If
+ * we're initializing the adapter, then we'll make any modifications
+ * we want to the MTU/MSS Table and also initialize the congestion
+ * parameters.
+ */
+ t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
+ if (state != DEV_STATE_INIT) {
+ int i;
+
+ /*
+ * The default MTU Table contains values 1492 and 1500.
+ * However, for TCP, it's better to have two values which are
+ * a multiple of 8 +/- 4 bytes apart near this popular MTU.
+ * This allows us to have a TCP Data Payload which is a
+ * multiple of 8 regardless of what combination of TCP Options
+ * are in use (always a multiple of 4 bytes) which is
+ * important for performance reasons. For instance, if no
+ * options are in use, then we have a 20-byte IP header and a
+ * 20-byte TCP header. In this case, a 1500-byte MSS would
+ * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
+ * which is not a multiple of 8. So using an MSS of 1488 in
+ * this case results in a TCP Data Payload of 1448 bytes which
+ * is a multiple of 8. On the other hand, if 12-byte TCP Time
+ * Stamps have been negotiated, then an MTU of 1500 bytes
+ * results in a TCP Data Payload of 1448 bytes which, as
+ * above, is a multiple of 8 bytes ...
+ */
+ for (i = 0; i < NMTUS; i++)
+ if (adap->params.mtus[i] == 1492) {
+ adap->params.mtus[i] = 1488;
+ break;
+ }
+
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+ }
+ t4_init_sge_params(adap);
+ ret = configure_filter_mode_mask(adap);
+ if (ret < 0)
+ goto bye;
+ t4_init_tp_params(adap);
+ configure_pcie_ext_tag(adap);
+ configure_vlan_types(adap);
+ cxgbe_configure_max_ethqsets(adap);
+
+ adap->params.drv_memwin = MEMWIN_NIC;
+ adap->flags |= FW_OK;
+ dev_debug(adap, "%s: returning zero..\n", __func__);
+ return 0;
+
+ /*
+ * Something bad happened. If a command timed out or failed with EIO
+ * FW does not operate within its spec or something catastrophic
+ * happened to HW/FW, stop issuing commands.
+ */
+bye:
+ if (ret != -ETIMEDOUT && ret != -EIO)
+ t4_fw_bye(adap, adap->mbox);
+ return ret;
+}
+
+/**
+ * t4_os_portmod_changed - handle port module changes
+ * @adap: the adapter associated with the module change
+ * @port_id: the port index whose module status has changed
+ *
+ * This is the OS-dependent handler for port module changes. It is
+ * invoked when a port module is removed or inserted for any OS-specific
+ * processing.
+ */
+void t4_os_portmod_changed(const struct adapter *adap, int port_id)
+{
+ static const char * const mod_str[] = {
+ NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+ };
+
+ const struct port_info *pi = adap2pinfo(adap, port_id);
+
+ if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+ dev_info(adap, "Port%d: port module unplugged\n", pi->port_id);
+ else if (pi->mod_type < ARRAY_SIZE(mod_str))
+ dev_info(adap, "Port%d: %s port module inserted\n", pi->port_id,
+ mod_str[pi->mod_type]);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ dev_info(adap, "Port%d: unsupported port module inserted\n",
+ pi->port_id);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ dev_info(adap, "Port%d: unknown port module inserted\n",
+ pi->port_id);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ dev_info(adap, "Port%d: transceiver module error\n",
+ pi->port_id);
+ else
+ dev_info(adap, "Port%d: unknown module type %d inserted\n",
+ pi->port_id, pi->mod_type);
+}
+
+bool cxgbe_force_linkup(struct adapter *adap)
+{
+ if (is_pf4(adap))
+ return false; /* force_linkup not required for pf driver */
+
+ return adap->devargs.force_link_up;
+}
+
+/**
+ * link_start - enable a port
+ * @dev: the port to enable
+ *
+ * Performs the MAC and PHY actions needed to enable a port.
+ */
+int cxgbe_link_start(struct port_info *pi)
+{
+ struct adapter *adapter = pi->adapter;
+ u64 conf_offloads;
+ unsigned int mtu;
+ int ret;
+
+ mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
+ (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
+
+ conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads;
+
+ /*
+ * We do not set address filters and promiscuity here, the stack does
+ * that step explicitly.
+ */
+ ret = t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, -1, -1, -1,
+ !!(conf_offloads & DEV_RX_OFFLOAD_VLAN_STRIP),
+ true);
+ if (ret == 0) {
+ ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt,
+ (u8 *)&pi->eth_dev->data->mac_addrs[0]);
+ if (ret >= 0) {
+ pi->xact_addr_filt = ret;
+ ret = 0;
+ }
+ }
+ if (ret == 0 && is_pf4(adapter))
+ ret = t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
+ &pi->link_cfg);
+ if (ret == 0) {
+ /*
+ * Enabling a Virtual Interface can result in an interrupt
+ * during the processing of the VI Enable command and, in some
+ * paths, result in an attempt to issue another command in the
+ * interrupt context. Thus, we disable interrupts during the
+ * course of the VI Enable command ...
+ */
+ ret = t4_enable_vi_params(adapter, adapter->mbox, pi->viid,
+ true, true, false);
+ }
+
+ if (ret == 0 && cxgbe_force_linkup(adapter))
+ pi->eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ return ret;
+}
+
+/**
+ * cxgbe_write_rss_conf - flash the RSS configuration for a given port
+ * @pi: the port
+ * @rss_hf: Hash configuration to apply
+ */
+int cxgbe_write_rss_conf(const struct port_info *pi, uint64_t rss_hf)
+{
+ struct adapter *adapter = pi->adapter;
+ const struct sge_eth_rxq *rxq;
+ u64 flags = 0;
+ u16 rss;
+ int err;
+
+ /* Should never be called before setting up sge eth rx queues */
+ if (!(adapter->flags & FULL_INIT_DONE)) {
+ dev_err(adap, "%s No RXQs available on port %d\n",
+ __func__, pi->port_id);
+ return -EINVAL;
+ }
+
+ /* Don't allow unsupported hash functions */
+ if (rss_hf & ~CXGBE_RSS_HF_ALL)
+ return -EINVAL;
+
+ if (rss_hf & CXGBE_RSS_HF_IPV4_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_UDPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_TCP_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN;
+
+ if (rss_hf & CXGBE_RSS_HF_UDP_IPV6_MASK)
+ flags |= F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
+ F_FW_RSS_VI_CONFIG_CMD_UDPEN;
+
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
+ rss = rxq[0].rspq.abs_id;
+
+ /* If Tunnel All Lookup isn't specified in the global RSS
+ * Configuration, then we need to specify a default Ingress
+ * Queue for any ingress packets which aren't hashed. We'll
+ * use our first ingress queue ...
+ */
+ err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
+ flags, rss);
+ return err;
+}
+
+/**
+ * cxgbe_write_rss - write the RSS table for a given port
+ * @pi: the port
+ * @queues: array of queue indices for RSS
+ *
+ * Sets up the portion of the HW RSS table for the port's VI to distribute
+ * packets to the Rx queues in @queues.
+ */
+int cxgbe_write_rss(const struct port_info *pi, const u16 *queues)
+{
+ u16 *rss;
+ int i, err;
+ struct adapter *adapter = pi->adapter;
+ const struct sge_eth_rxq *rxq;
+
+ /* Should never be called before setting up sge eth rx queues */
+ BUG_ON(!(adapter->flags & FULL_INIT_DONE));
+
+ rxq = &adapter->sge.ethrxq[pi->first_qset];
+ rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
+ if (!rss)
+ return -ENOMEM;
+
+ /* map the queue indices to queue ids */
+ for (i = 0; i < pi->rss_size; i++, queues++)
+ rss[i] = rxq[*queues].rspq.abs_id;
+
+ err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
+ pi->rss_size, rss, pi->rss_size);
+ rte_free(rss);
+ return err;
+}
+
+/**
+ * setup_rss - configure RSS
+ * @adapter: the adapter
+ *
+ * Sets up RSS to distribute packets to multiple receive queues. We
+ * configure the RSS CPU lookup table to distribute to the number of HW
+ * receive queues, and the response queue lookup table to narrow that
+ * down to the response queues actually configured for each port.
+ * We always configure the RSS mapping for all ports since the mapping
+ * table has plenty of entries.
+ */
+int cxgbe_setup_rss(struct port_info *pi)
+{
+ int j, err;
+ struct adapter *adapter = pi->adapter;
+
+ dev_debug(adapter, "%s: pi->rss_size = %u; pi->n_rx_qsets = %u\n",
+ __func__, pi->rss_size, pi->n_rx_qsets);
+
+ if (!(pi->flags & PORT_RSS_DONE)) {
+ if (adapter->flags & FULL_INIT_DONE) {
+ /* Fill default values with equal distribution */
+ for (j = 0; j < pi->rss_size; j++)
+ pi->rss[j] = j % pi->n_rx_qsets;
+
+ err = cxgbe_write_rss(pi, pi->rss);
+ if (err)
+ return err;
+
+ err = cxgbe_write_rss_conf(pi, pi->rss_hf);
+ if (err)
+ return err;
+ pi->flags |= PORT_RSS_DONE;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Rx queues.
+ */
+static void enable_rx(struct adapter *adap, struct sge_rspq *q)
+{
+ /* 0-increment GTS to start the timer and enable interrupts */
+ t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
+ T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
+ V_SEINTARM(q->intr_params) |
+ V_INGRESSQID(q->cntxt_id));
+}
+
+void cxgbe_enable_rx_queues(struct port_info *pi)
+{
+ struct adapter *adap = pi->adapter;
+ struct sge *s = &adap->sge;
+ unsigned int i;
+
+ for (i = 0; i < pi->n_rx_qsets; i++)
+ enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq);
+}
+
+/**
+ * fw_caps_to_speed_caps - translate Firmware Port Caps to Speed Caps.
+ * @port_type: Firmware Port Type
+ * @fw_caps: Firmware Port Capabilities
+ * @speed_caps: Device Info Speed Capabilities
+ *
+ * Translate a Firmware Port Capabilities specification to Device Info
+ * Speed Capabilities.
+ */
+static void fw_caps_to_speed_caps(enum fw_port_type port_type,
+ unsigned int fw_caps,
+ u32 *speed_caps)
+{
+#define SET_SPEED(__speed_name) \
+ do { \
+ *speed_caps |= ETH_LINK_ ## __speed_name; \
+ } while (0)
+
+#define FW_CAPS_TO_SPEED(__fw_name) \
+ do { \
+ if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
+ SET_SPEED(__fw_name); \
+ } while (0)
+
+ switch (port_type) {
+ case FW_PORT_TYPE_BT_SGMII:
+ case FW_PORT_TYPE_BT_XFI:
+ case FW_PORT_TYPE_BT_XAUI:
+ FW_CAPS_TO_SPEED(SPEED_100M);
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_KX4:
+ case FW_PORT_TYPE_KX:
+ case FW_PORT_TYPE_FIBER_XFI:
+ case FW_PORT_TYPE_FIBER_XAUI:
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_QSA:
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_KR:
+ SET_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_BP_AP:
+ case FW_PORT_TYPE_BP4_AP:
+ SET_SPEED(SPEED_1G);
+ SET_SPEED(SPEED_10G);
+ break;
+
+ case FW_PORT_TYPE_BP40_BA:
+ case FW_PORT_TYPE_QSFP:
+ SET_SPEED(SPEED_40G);
+ break;
+
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_SFP28:
+ case FW_PORT_TYPE_KR_SFP28:
+ FW_CAPS_TO_SPEED(SPEED_1G);
+ FW_CAPS_TO_SPEED(SPEED_10G);
+ FW_CAPS_TO_SPEED(SPEED_25G);
+ break;
+
+ case FW_PORT_TYPE_CR2_QSFP:
+ SET_SPEED(SPEED_50G);
+ break;
+
+ case FW_PORT_TYPE_KR4_100G:
+ case FW_PORT_TYPE_CR4_QSFP:
+ FW_CAPS_TO_SPEED(SPEED_25G);
+ FW_CAPS_TO_SPEED(SPEED_40G);
+ FW_CAPS_TO_SPEED(SPEED_50G);
+ FW_CAPS_TO_SPEED(SPEED_100G);
+ break;
+
+ default:
+ break;
+ }
+
+#undef FW_CAPS_TO_SPEED
+#undef SET_SPEED
+}
+
+/**
+ * cxgbe_get_speed_caps - Fetch supported speed capabilities
+ * @pi: Underlying port's info
+ * @speed_caps: Device Info speed capabilities
+ *
+ * Fetch supported speed capabilities of the underlying port.
+ */
+void cxgbe_get_speed_caps(struct port_info *pi, u32 *speed_caps)
+{
+ *speed_caps = 0;
+
+ fw_caps_to_speed_caps(pi->port_type, pi->link_cfg.pcaps,
+ speed_caps);
+
+ if (!(pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG))
+ *speed_caps |= ETH_LINK_SPEED_FIXED;
+}
+
+/**
+ * cxgbe_set_link_status - Set device link up or down.
+ * @pi: Underlying port's info
+ * @status: 0 - down, 1 - up
+ *
+ * Set the device link up or down.
+ */
+int cxgbe_set_link_status(struct port_info *pi, bool status)
+{
+ struct adapter *adapter = pi->adapter;
+ int err = 0;
+
+ err = t4_enable_vi(adapter, adapter->mbox, pi->viid, status, status);
+ if (err) {
+ dev_err(adapter, "%s: disable_vi failed: %d\n", __func__, err);
+ return err;
+ }
+
+ if (!status)
+ t4_reset_link_config(adapter, pi->pidx);
+
+ return 0;
+}
+
+/**
+ * cxgb_up - enable the adapter
+ * @adap: adapter being enabled
+ *
+ * Called when the first port is enabled, this function performs the
+ * actions necessary to make an adapter operational, such as completing
+ * the initialization of HW modules, and enabling interrupts.
+ */
+int cxgbe_up(struct adapter *adap)
+{
+ enable_rx(adap, &adap->sge.fw_evtq);
+ t4_sge_tx_monitor_start(adap);
+ if (is_pf4(adap))
+ t4_intr_enable(adap);
+ adap->flags |= FULL_INIT_DONE;
+
+ /* TODO: deadman watchdog ?? */
+ return 0;
+}
+
+/*
+ * Close the port
+ */
+int cxgbe_down(struct port_info *pi)
+{
+ return cxgbe_set_link_status(pi, false);
+}
+
+/*
+ * Release resources when all the ports have been stopped.
+ */
+void cxgbe_close(struct adapter *adapter)
+{
+ struct port_info *pi;
+ int i;
+
+ if (adapter->flags & FULL_INIT_DONE) {
+ tid_free(&adapter->tids);
+ t4_cleanup_mpstcam(adapter);
+ t4_cleanup_clip_tbl(adapter);
+ t4_cleanup_l2t(adapter);
+ t4_cleanup_smt(adapter);
+ if (is_pf4(adapter))
+ t4_intr_disable(adapter);
+ t4_sge_tx_monitor_stop(adapter);
+ t4_free_sge_resources(adapter);
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox,
+ adapter->pf, 0, pi->viid);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
+ adapter->flags &= ~FULL_INIT_DONE;
+ }
+
+ if (is_pf4(adapter) && (adapter->flags & FW_OK))
+ t4_fw_bye(adapter, adapter->mbox);
+}
+
+static void adap_smt_index(struct adapter *adapter, u32 *smt_start_idx,
+ u32 *smt_size)
+{
+ u32 params[2], smt_val[2];
+ int ret;
+
+ params[0] = CXGBE_FW_PARAM_PFVF(GET_SMT_START);
+ params[1] = CXGBE_FW_PARAM_PFVF(GET_SMT_SIZE);
+
+ ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
+ 2, params, smt_val);
+
+ /* if FW doesn't recognize this command then set it to default setting
+ * which is start index as 0 and size as 256.
+ */
+ if (ret < 0) {
+ *smt_start_idx = 0;
+ *smt_size = SMT_SIZE;
+ } else {
+ *smt_start_idx = smt_val[0];
+ /* smt size can be zero, if nsmt is not yet configured in
+ * the config file or set as zero, then configure all the
+ * remaining entries to this PF itself.
+ */
+ if (!smt_val[1])
+ *smt_size = SMT_SIZE - *smt_start_idx;
+ else
+ *smt_size = smt_val[1];
+ }
+}
+
+int cxgbe_probe(struct adapter *adapter)
+{
+ u32 smt_start_idx, smt_size;
+ struct port_info *pi;
+ int func, i;
+ int err = 0;
+ u32 whoami;
+ int chip;
+
+ whoami = t4_read_reg(adapter, A_PL_WHOAMI);
+ chip = t4_get_chip_type(adapter,
+ CHELSIO_PCI_ID_VER(adapter->pdev->id.device_id));
+ if (chip < 0)
+ return chip;
+
+ func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
+ G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
+
+ adapter->mbox = func;
+ adapter->pf = func;
+
+ t4_os_lock_init(&adapter->mbox_lock);
+ TAILQ_INIT(&adapter->mbox_list);
+ t4_os_lock_init(&adapter->win0_lock);
+
+ err = t4_prep_adapter(adapter);
+ if (err)
+ return err;
+
+ setup_memwin(adapter);
+ err = adap_init0(adapter);
+ if (err) {
+ dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
+ __func__, err);
+ goto out_free;
+ }
+
+ if (!is_t4(adapter->params.chip)) {
+ /*
+ * The userspace doorbell BAR is split evenly into doorbell
+ * regions, each associated with an egress queue. If this
+ * per-queue region is large enough (at least UDBS_SEG_SIZE)
+ * then it can be used to submit a tx work request with an
+ * implied doorbell. Enable write combining on the BAR if
+ * there is room for such work requests.
+ */
+ int s_qpp, qpp, num_seg;
+
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) *
+ adapter->pf);
+ qpp = 1 << ((t4_read_reg(adapter,
+ A_SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+ num_seg = CXGBE_PAGE_SIZE / UDBS_SEG_SIZE;
+ if (qpp > num_seg)
+ dev_warn(adapter, "Incorrect SGE EGRESS QUEUES_PER_PAGE configuration, continuing in debug mode\n");
+
+ adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
+ if (!adapter->bar2) {
+ dev_err(adapter, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ goto out_free;
+ }
+ t4_write_reg(adapter, A_SGE_STAT_CFG, V_STATSOURCE_T5(7) |
+ V_STATMODE(0));
+ }
+
+ for_each_port(adapter, i) {
+ const unsigned int numa_node = rte_socket_id();
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+
+ snprintf(name, sizeof(name), "%s_%d",
+ adapter->pdev->device.name, i);
+
+ if (i == 0) {
+ /* First port is already allocated by DPDK */
+ eth_dev = adapter->eth_dev;
+ goto allocate_mac;
+ }
+
+ /*
+ * now do all data allocation - for eth_dev structure,
+ * and internal (private) data for the remaining ports
+ */
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev)
+ goto out_free;
+
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, sizeof(struct port_info),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!eth_dev->data->dev_private)
+ goto out_free;
+
+allocate_mac:
+ pi = eth_dev->data->dev_private;
+ adapter->port[i] = pi;
+ pi->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = i;
+ pi->pidx = i;
+
+ pi->eth_dev->device = &adapter->pdev->device;
+ pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
+ pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
+ pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
+
+ pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
+ RTE_ETHER_ADDR_LEN, 0);
+ if (!pi->eth_dev->data->mac_addrs) {
+ dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
+ __func__);
+ err = -1;
+ goto out_free;
+ }
+
+ if (i > 0) {
+ /* First port will be notified by upper layer */
+ rte_eth_dev_probing_finish(eth_dev);
+ }
+ }
+
+ if (adapter->flags & FW_OK) {
+ err = t4_port_init(adapter, adapter->mbox, adapter->pf, 0);
+ if (err) {
+ dev_err(adapter, "%s: t4_port_init failed with err %d\n",
+ __func__, err);
+ goto out_free;
+ }
+ }
+
+ cxgbe_cfg_queues(adapter->eth_dev);
+
+ cxgbe_print_adapter_info(adapter);
+ cxgbe_print_port_info(adapter);
+
+ adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
+ adapter->clipt_end);
+ if (!adapter->clipt) {
+ /* We tolerate a lack of clip_table, giving up some
+ * functionality
+ */
+ dev_warn(adapter, "could not allocate CLIP. Continuing\n");
+ }
+
+ adap_smt_index(adapter, &smt_start_idx, &smt_size);
+ adapter->smt = t4_init_smt(smt_start_idx, smt_size);
+ if (!adapter->smt)
+ dev_warn(adapter, "could not allocate SMT, continuing\n");
+
+ adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
+ if (!adapter->l2t) {
+ /* We tolerate a lack of L2T, giving up some functionality */
+ dev_warn(adapter, "could not allocate L2T. Continuing\n");
+ }
+
+ if (tid_init(&adapter->tids) < 0) {
+ /* Disable filtering support */
+ dev_warn(adapter, "could not allocate TID table, "
+ "filter support disabled. Continuing\n");
+ }
+
+ t4_os_lock_init(&adapter->flow_lock);
+
+ adapter->mpstcam = t4_init_mpstcam(adapter);
+ if (!adapter->mpstcam)
+ dev_warn(adapter, "could not allocate mps tcam table."
+ " Continuing\n");
+
+ if (is_hashfilter(adapter)) {
+ if (t4_read_reg(adapter, A_LE_DB_CONFIG) & F_HASHEN) {
+ u32 hash_base, hash_reg;
+
+ hash_reg = A_LE_DB_TID_HASHBASE;
+ hash_base = t4_read_reg(adapter, hash_reg);
+ adapter->tids.hash_base = hash_base / 4;
+ }
+ } else {
+ /* Disable hash filtering support */
+ dev_warn(adapter,
+ "Maskless filter support disabled. Continuing\n");
+ }
+
+ err = cxgbe_init_rss(adapter);
+ if (err)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
+
+ if (adapter->flags & FW_OK)
+ t4_fw_bye(adapter, adapter->mbox);
+ return -err;
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h
new file mode 100644
index 000000000..50931ed04
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_ofld.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_OFLD_H_
+#define _CXGBE_OFLD_H_
+
+#include <rte_bitmap.h>
+
+#include "cxgbe_filter.h"
+
+#define INIT_TP_WR(w, tid) do { \
+ (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_TP_WR) | \
+ V_FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
+ (w)->wr.wr_mid = cpu_to_be32( \
+ V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
+ V_FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+#define INIT_TP_WR_MIT_CPL(w, cpl, tid) do { \
+ INIT_TP_WR(w, tid); \
+ OPCODE_TID(w) = cpu_to_be32(MK_OPCODE_TID(cpl, tid)); \
+} while (0)
+
+#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
+ (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) | \
+ V_FW_WR_ATOMIC(atomic)); \
+ (w)->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
+ V_FW_WR_FLOWID(tid)); \
+ (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+/*
+ * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
+ */
+#define MAX_ATIDS 8192U
+
+union aopen_entry {
+ void *data;
+ union aopen_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of filter TID.
+ * The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+ void **tid_tab;
+ unsigned int ntids;
+ struct filter_entry *ftid_tab; /* Normal filters */
+ union aopen_entry *atid_tab;
+ struct rte_bitmap *ftid_bmap;
+ uint8_t *ftid_bmap_array;
+ unsigned int nftids, natids;
+ unsigned int ftid_base, hash_base;
+
+ union aopen_entry *afree;
+ unsigned int atids_in_use;
+
+ /* TIDs in the TCAM */
+ rte_atomic32_t tids_in_use;
+ /* TIDs in the HASH */
+ rte_atomic32_t hash_tids_in_use;
+ rte_atomic32_t conns_in_use;
+
+ rte_spinlock_t atid_lock __rte_cache_aligned;
+ rte_spinlock_t ftid_lock;
+};
+
+static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
+{
+ return tid < t->ntids ? t->tid_tab[tid] : NULL;
+}
+
+static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
+{
+ return atid < t->natids ? t->atid_tab[atid].data : NULL;
+}
+
+int cxgbe_alloc_atid(struct tid_info *t, void *data);
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid);
+void cxgbe_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid,
+ unsigned short family);
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+ unsigned short family);
+
+#endif /* _CXGBE_OFLD_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h
new file mode 100644
index 000000000..0b7c52aec
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbe_pfvf.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_PFVF_H_
+#define _CXGBE_PFVF_H_
+
+#define CXGBE_FW_PARAM_DEV(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define CXGBE_FW_PARAM_PFVF(param) \
+ (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
+ V_FW_PARAMS_PARAM_Y(0) | \
+ V_FW_PARAMS_PARAM_Z(0))
+
+void cxgbe_dev_rx_queue_release(void *q);
+void cxgbe_dev_tx_queue_release(void *q);
+void cxgbe_dev_stop(struct rte_eth_dev *eth_dev);
+void cxgbe_dev_close(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *device_info);
+int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev);
+int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr);
+int cxgbe_dev_configure(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+int cxgbe_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id);
+int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id);
+int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id);
+int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu);
+int cxgbe_dev_start(struct rte_eth_dev *eth_dev);
+int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete);
+int cxgbe_dev_set_link_up(struct rte_eth_dev *dev);
+int cxgbe_dev_set_link_down(struct rte_eth_dev *dev);
+uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+const uint32_t *cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev);
+#endif /* _CXGBE_PFVF_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c
new file mode 100644
index 000000000..4165ba986
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_ethdev.c
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+
+#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
+
+/*
+ * Macros needed to support the PCI Device ID Table ...
+ */
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
+ static const struct rte_pci_id cxgb4vf_pci_tbl[] = {
+#define CH_PCI_DEVICE_ID_FUNCTION 0x8
+
+#define PCI_VENDOR_ID_CHELSIO 0x1425
+
+#define CH_PCI_ID_TABLE_ENTRY(devid) \
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CHELSIO, (devid)) }
+
+#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
+ { .vendor_id = 0, } \
+ }
+
+/*
+ *... and the PCI ID Table itself ...
+ */
+#include "base/t4_pci_id_tbl.h"
+
+/*
+ * Get port statistics.
+ */
+static int cxgbevf_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *eth_stats)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ struct sge *s = &adapter->sge;
+ struct port_stats ps;
+ unsigned int i;
+
+ cxgbevf_stats_get(pi, &ps);
+
+ /* RX Stats */
+ eth_stats->ierrors = ps.rx_len_err;
+
+ /* TX Stats */
+ eth_stats->opackets = ps.tx_bcast_frames + ps.tx_mcast_frames +
+ ps.tx_ucast_frames;
+ eth_stats->obytes = ps.tx_octets;
+ eth_stats->oerrors = ps.tx_drop;
+
+ for (i = 0; i < pi->n_rx_qsets; i++) {
+ struct sge_eth_rxq *rxq =
+ &s->ethrxq[pi->first_qset + i];
+
+ eth_stats->q_ipackets[i] = rxq->stats.pkts;
+ eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
+ eth_stats->ipackets += eth_stats->q_ipackets[i];
+ eth_stats->ibytes += eth_stats->q_ibytes[i];
+ }
+
+ for (i = 0; i < pi->n_tx_qsets; i++) {
+ struct sge_eth_txq *txq =
+ &s->ethtxq[pi->first_qset + i];
+
+ eth_stats->q_opackets[i] = txq->stats.pkts;
+ eth_stats->q_obytes[i] = txq->stats.tx_bytes;
+ }
+ return 0;
+}
+
+static const struct eth_dev_ops cxgbevf_eth_dev_ops = {
+ .dev_start = cxgbe_dev_start,
+ .dev_stop = cxgbe_dev_stop,
+ .dev_close = cxgbe_dev_close,
+ .promiscuous_enable = cxgbe_dev_promiscuous_enable,
+ .promiscuous_disable = cxgbe_dev_promiscuous_disable,
+ .allmulticast_enable = cxgbe_dev_allmulticast_enable,
+ .allmulticast_disable = cxgbe_dev_allmulticast_disable,
+ .dev_configure = cxgbe_dev_configure,
+ .dev_infos_get = cxgbe_dev_info_get,
+ .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get,
+ .link_update = cxgbe_dev_link_update,
+ .dev_set_link_up = cxgbe_dev_set_link_up,
+ .dev_set_link_down = cxgbe_dev_set_link_down,
+ .mtu_set = cxgbe_dev_mtu_set,
+ .tx_queue_setup = cxgbe_dev_tx_queue_setup,
+ .tx_queue_start = cxgbe_dev_tx_queue_start,
+ .tx_queue_stop = cxgbe_dev_tx_queue_stop,
+ .tx_queue_release = cxgbe_dev_tx_queue_release,
+ .rx_queue_setup = cxgbe_dev_rx_queue_setup,
+ .rx_queue_start = cxgbe_dev_rx_queue_start,
+ .rx_queue_stop = cxgbe_dev_rx_queue_stop,
+ .rx_queue_release = cxgbe_dev_rx_queue_release,
+ .stats_get = cxgbevf_dev_stats_get,
+ .mac_addr_set = cxgbe_mac_addr_set,
+};
+
+/*
+ * Initialize driver
+ * It returns 0 on success.
+ */
+static int eth_cxgbevf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct adapter *adapter = NULL;
+ int err = 0;
+
+ CXGBE_FUNC_TRACE();
+
+ eth_dev->dev_ops = &cxgbevf_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &cxgbe_recv_pkts;
+ eth_dev->tx_pkt_burst = &cxgbe_xmit_pkts;
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ /* for secondary processes, we attach to ethdevs allocated by primary
+ * and do minimal initialization.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ int i;
+
+ for (i = 1; i < MAX_NPORTS; i++) {
+ struct rte_eth_dev *rest_eth_dev;
+ char namei[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(namei, sizeof(namei), "%s_%d",
+ pci_dev->device.name, i);
+ rest_eth_dev = rte_eth_dev_attach_secondary(namei);
+ if (rest_eth_dev) {
+ rest_eth_dev->device = &pci_dev->device;
+ rest_eth_dev->dev_ops =
+ eth_dev->dev_ops;
+ rest_eth_dev->rx_pkt_burst =
+ eth_dev->rx_pkt_burst;
+ rest_eth_dev->tx_pkt_burst =
+ eth_dev->tx_pkt_burst;
+ rte_eth_dev_probing_finish(rest_eth_dev);
+ }
+ }
+ return 0;
+ }
+
+ snprintf(name, sizeof(name), "cxgbevfadapter%d",
+ eth_dev->data->port_id);
+ adapter = rte_zmalloc(name, sizeof(*adapter), 0);
+ if (!adapter)
+ return -1;
+
+ adapter->use_unpacked_mode = 1;
+ adapter->regs = (void *)pci_dev->mem_resource[0].addr;
+ if (!adapter->regs) {
+ dev_err(adapter, "%s: cannot map device registers\n", __func__);
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ adapter->pdev = pci_dev;
+ adapter->eth_dev = eth_dev;
+ pi->adapter = adapter;
+
+ cxgbe_process_devargs(adapter);
+
+ err = cxgbevf_probe(adapter);
+ if (err) {
+ dev_err(adapter, "%s: cxgbevf probe failed with err %d\n",
+ __func__, err);
+ goto out_free_adapter;
+ }
+
+ return 0;
+
+out_free_adapter:
+ rte_free(adapter);
+ return err;
+}
+
+static int eth_cxgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct port_info *pi = eth_dev->data->dev_private;
+ struct adapter *adap = pi->adapter;
+
+ /* Free up other ports and all resources */
+ cxgbe_close(adap);
+ return 0;
+}
+
+static int eth_cxgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct port_info),
+ eth_cxgbevf_dev_init);
+}
+
+static int eth_cxgbevf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbevf_dev_uninit);
+}
+
+static struct rte_pci_driver rte_cxgbevf_pmd = {
+ .id_table = cxgb4vf_pci_tbl,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_cxgbevf_pci_probe,
+ .remove = eth_cxgbevf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_cxgbevf, rte_cxgbevf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_cxgbevf, cxgb4vf_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci");
+RTE_PMD_REGISTER_PARAM_STRING(net_cxgbevf,
+ CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
+ CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
+ CXGBE_DEVARG_VF_FORCE_LINK_UP "=<0|1> ");
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c
new file mode 100644
index 000000000..66fb92375
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/cxgbevf_main.c
@@ -0,0 +1,302 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+
+#include "base/common.h"
+#include "base/t4_regs.h"
+#include "base/t4_msg.h"
+#include "cxgbe.h"
+#include "cxgbe_pfvf.h"
+#include "mps_tcam.h"
+
+/*
+ * Figure out how many Ports and Queue Sets we can support. This depends on
+ * knowing our Virtual Function Resources and may be called a second time if
+ * we fall back from MSI-X to MSI Interrupt Mode.
+ */
+static void size_nports_qsets(struct adapter *adapter)
+{
+ struct vf_resources *vfres = &adapter->params.vfres;
+ unsigned int pmask_nports;
+
+ /*
+ * The number of "ports" which we support is equal to the number of
+ * Virtual Interfaces with which we've been provisioned.
+ */
+ adapter->params.nports = vfres->nvi;
+ if (adapter->params.nports > MAX_NPORTS) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
+ " allowed virtual interfaces\n", MAX_NPORTS,
+ adapter->params.nports);
+ adapter->params.nports = MAX_NPORTS;
+ }
+
+ /*
+ * We may have been provisioned with more VIs than the number of
+ * ports we're allowed to access (our Port Access Rights Mask).
+ * This is obviously a configuration conflict but we don't want to
+ * do anything silly just because of that.
+ */
+ pmask_nports = hweight32(adapter->params.vfres.pmask);
+ if (pmask_nports < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
+ " virtual interfaces; limited by Port Access Rights"
+ " mask %#x\n", pmask_nports, adapter->params.nports,
+ adapter->params.vfres.pmask);
+ adapter->params.nports = pmask_nports;
+ }
+
+ cxgbe_configure_max_ethqsets(adapter);
+ if (adapter->sge.max_ethqsets < adapter->params.nports) {
+ dev_warn(adapter->pdev_dev, "only using %d of %d available"
+ " virtual interfaces (too few Queue Sets)\n",
+ adapter->sge.max_ethqsets, adapter->params.nports);
+ adapter->params.nports = adapter->sge.max_ethqsets;
+ }
+}
+
+void cxgbevf_stats_get(struct port_info *pi, struct port_stats *stats)
+{
+ t4vf_get_port_stats(pi->adapter, pi->pidx, stats);
+}
+
+static int adap_init0vf(struct adapter *adapter)
+{
+ u32 param, val = 0;
+ int err;
+
+ err = t4vf_fw_reset(adapter);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Grab basic operational parameters. These will predominantly have
+ * been set up by the Physical Function Driver or will be hard coded
+ * into the adapter. We just have to live with them ... Note that
+ * we _must_ get our VPD parameters before our SGE parameters because
+ * we need to know the adapter's core clock from the VPD in order to
+ * properly decode the SGE Timer Values.
+ */
+ err = t4vf_get_dev_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " device parameters: err=%d\n", err);
+ return err;
+ }
+
+ err = t4vf_get_vpd_params(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " VPD parameters: err=%d\n", err);
+ return err;
+ }
+
+ adapter->pf = t4vf_get_pf_from_vf(adapter);
+ err = t4vf_sge_init(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "error in sge init\n");
+ return err;
+ }
+
+ err = t4vf_get_rss_glb_config(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to retrieve adapter"
+ " RSS parameters: err=%d\n", err);
+ return err;
+ }
+ if (adapter->params.rss.mode !=
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ dev_err(adapter->pdev_dev, "unable to operate with global RSS"
+ " mode %d\n", adapter->params.rss.mode);
+ return -EINVAL;
+ }
+
+ /* If we're running on newer firmware, let it know that we're
+ * prepared to deal with encapsulated CPL messages. Older
+ * firmware won't understand this and we'll just get
+ * unencapsulated messages ...
+ */
+ param = CXGBE_FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
+ val = 1;
+ t4vf_set_params(adapter, 1, &param, &val);
+
+ /* Query for max number of packets that can be coalesced for Tx */
+ param = CXGBE_FW_PARAM_PFVF(MAX_PKTS_PER_ETH_TX_PKTS_WR);
+ err = t4vf_query_params(adapter, 1, &param, &val);
+ if (!err && val > 0)
+ adapter->params.max_tx_coalesce_num = val;
+ else
+ adapter->params.max_tx_coalesce_num = ETH_COALESCE_VF_PKT_NUM;
+
+ /*
+ * Grab our Virtual Interface resource allocation, extract the
+ * features that we're interested in and do a bit of sanity testing on
+ * what we discover.
+ */
+ err = t4vf_get_vfres(adapter);
+ if (err) {
+ dev_err(adapter->pdev_dev, "unable to get virtual interface"
+ " resources: err=%d\n", err);
+ return err;
+ }
+
+ /*
+ * Check for various parameter sanity issues.
+ */
+ if (adapter->params.vfres.pmask == 0) {
+ dev_err(adapter->pdev_dev, "no port access configured\n"
+ "usable!\n");
+ return -EINVAL;
+ }
+ if (adapter->params.vfres.nvi == 0) {
+ dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
+ "usable!\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Initialize nports and max_ethqsets now that we have our Virtual
+ * Function Resources.
+ */
+ size_nports_qsets(adapter);
+ adapter->flags |= FW_OK;
+ return 0;
+}
+
+int cxgbevf_probe(struct adapter *adapter)
+{
+ struct port_info *pi;
+ unsigned int pmask;
+ int err = 0;
+ int i;
+
+ t4_os_lock_init(&adapter->mbox_lock);
+ TAILQ_INIT(&adapter->mbox_list);
+ err = t4vf_prep_adapter(adapter);
+ if (err)
+ return err;
+
+ if (!is_t4(adapter->params.chip)) {
+ adapter->bar2 = (void *)adapter->pdev->mem_resource[2].addr;
+ if (!adapter->bar2) {
+ dev_err(adapter, "cannot map device bar2 region\n");
+ err = -ENOMEM;
+ return err;
+ }
+ }
+
+ err = adap_init0vf(adapter);
+ if (err) {
+ dev_err(adapter, "%s: Adapter initialization failed, error %d\n",
+ __func__, err);
+ goto out_free;
+ }
+
+ pmask = adapter->params.vfres.pmask;
+ for_each_port(adapter, i) {
+ const unsigned int numa_node = rte_socket_id();
+ char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_eth_dev *eth_dev;
+ int port_id;
+
+ if (pmask == 0)
+ break;
+ port_id = ffs(pmask) - 1;
+ pmask &= ~(1 << port_id);
+
+ snprintf(name, sizeof(name), "%s_%d",
+ adapter->pdev->device.name, i);
+
+ if (i == 0) {
+ /* First port is already allocated by DPDK */
+ eth_dev = adapter->eth_dev;
+ goto allocate_mac;
+ }
+
+ /*
+ * now do all data allocation - for eth_dev structure,
+ * and internal (private) data for the remaining ports
+ */
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+ eth_dev->data->dev_private =
+ rte_zmalloc_socket(name, sizeof(struct port_info),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!eth_dev->data->dev_private)
+ goto out_free;
+
+allocate_mac:
+ pi = eth_dev->data->dev_private;
+ adapter->port[i] = pi;
+ pi->eth_dev = eth_dev;
+ pi->adapter = adapter;
+ pi->xact_addr_filt = -1;
+ pi->port_id = port_id;
+ pi->pidx = i;
+
+ pi->eth_dev->device = &adapter->pdev->device;
+ pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
+ pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
+ pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
+ pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
+ RTE_ETHER_ADDR_LEN, 0);
+ if (!pi->eth_dev->data->mac_addrs) {
+ dev_err(adapter, "%s: Mem allocation failed for storing mac addr, aborting\n",
+ __func__);
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ if (i > 0) {
+ /* First port will be notified by upper layer */
+ rte_eth_dev_probing_finish(eth_dev);
+ }
+ }
+
+ if (adapter->flags & FW_OK) {
+ err = t4vf_port_init(adapter);
+ if (err) {
+ dev_err(adapter, "%s: t4_port_init failed with err %d\n",
+ __func__, err);
+ goto out_free;
+ }
+ }
+
+ cxgbe_cfg_queues(adapter->eth_dev);
+ cxgbe_print_adapter_info(adapter);
+ cxgbe_print_port_info(adapter);
+
+ adapter->mpstcam = t4_init_mpstcam(adapter);
+ if (!adapter->mpstcam)
+ dev_warn(adapter,
+ "VF could not allocate mps tcam table. Continuing\n");
+
+ err = cxgbe_init_rss(adapter);
+ if (err)
+ goto out_free;
+ return 0;
+
+out_free:
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ if (pi->viid != 0)
+ t4_free_vi(adapter, adapter->mbox, adapter->pf,
+ 0, pi->viid);
+ rte_eth_dev_release_port(pi->eth_dev);
+ }
+ return -err;
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/l2t.c b/src/spdk/dpdk/drivers/net/cxgbe/l2t.c
new file mode 100644
index 000000000..f9d651fe0
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/l2t.c
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include "base/common.h"
+#include "l2t.h"
+
+/**
+ * cxgbe_l2t_release - Release associated L2T entry
+ * @e: L2T entry to release
+ *
+ * Releases ref count and frees up an L2T entry from L2T table
+ */
+void cxgbe_l2t_release(struct l2t_entry *e)
+{
+ if (rte_atomic32_read(&e->refcnt) != 0)
+ rte_atomic32_dec(&e->refcnt);
+}
+
+/**
+ * Process a CPL_L2T_WRITE_RPL. Note that the TID in the reply is really
+ * the L2T index it refers to.
+ */
+void cxgbe_do_l2t_write_rpl(struct adapter *adap,
+ const struct cpl_l2t_write_rpl *rpl)
+{
+ struct l2t_data *d = adap->l2t;
+ unsigned int tid = GET_TID(rpl);
+ unsigned int l2t_idx = tid % L2T_SIZE;
+
+ if (unlikely(rpl->status != CPL_ERR_NONE)) {
+ dev_err(adap,
+ "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+ rpl->status, l2t_idx);
+ return;
+ }
+
+ if (tid & F_SYNC_WR) {
+ struct l2t_entry *e = &d->l2tab[l2t_idx - d->l2t_start];
+
+ t4_os_lock(&e->lock);
+ if (e->state != L2T_STATE_SWITCHING)
+ e->state = L2T_STATE_VALID;
+ t4_os_unlock(&e->lock);
+ }
+}
+
+/**
+ * Write an L2T entry. Must be called with the entry locked.
+ * The write may be synchronous or asynchronous.
+ */
+static int write_l2e(struct rte_eth_dev *dev, struct l2t_entry *e, int sync,
+ bool loopback, bool arpmiss)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct l2t_data *d = adap->l2t;
+ struct rte_mbuf *mbuf;
+ struct cpl_l2t_write_req *req;
+ struct sge_ctrl_txq *ctrlq;
+ unsigned int l2t_idx = e->idx + d->l2t_start;
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+
+ ctrlq = &adap->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf)
+ return -ENOMEM;
+
+ mbuf->data_len = sizeof(*req);
+ mbuf->pkt_len = mbuf->data_len;
+
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_l2t_write_req *);
+ INIT_TP_WR(req, 0);
+
+ OPCODE_TID(req) =
+ cpu_to_be32(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
+ l2t_idx | V_SYNC_WR(sync) |
+ V_TID_QID(adap->sge.fw_evtq.abs_id)));
+ req->params = cpu_to_be16(V_L2T_W_PORT(e->lport) |
+ V_L2T_W_LPBK(loopback) |
+ V_L2T_W_ARPMISS(arpmiss) |
+ V_L2T_W_NOREPLY(!sync));
+ req->l2t_idx = cpu_to_be16(l2t_idx);
+ req->vlan = cpu_to_be16(e->vlan);
+ rte_memcpy(req->dst_mac, e->dmac, RTE_ETHER_ADDR_LEN);
+
+ if (loopback)
+ memset(req->dst_mac, 0, RTE_ETHER_ADDR_LEN);
+
+ t4_mgmt_tx(ctrlq, mbuf);
+
+ if (sync && e->state != L2T_STATE_SWITCHING)
+ e->state = L2T_STATE_SYNC_WRITE;
+
+ return 0;
+}
+
+/**
+ * find_or_alloc_l2e - Find/Allocate a free L2T entry
+ * @d: L2T table
+ * @vlan: VLAN id to compare/add
+ * @port: port id to compare/add
+ * @dmac: Destination MAC address to compare/add
+ * Returns pointer to the L2T entry found/created
+ *
+ * Finds/Allocates an L2T entry to be used by switching rule of a filter.
+ */
+static struct l2t_entry *find_or_alloc_l2e(struct l2t_data *d, u16 vlan,
+ u8 port, u8 *dmac)
+{
+ struct l2t_entry *end, *e;
+ struct l2t_entry *first_free = NULL;
+
+ for (e = &d->l2tab[0], end = &d->l2tab[d->l2t_size]; e != end; ++e) {
+ if (rte_atomic32_read(&e->refcnt) == 0) {
+ if (!first_free)
+ first_free = e;
+ } else {
+ if (e->state == L2T_STATE_SWITCHING) {
+ if ((!memcmp(e->dmac, dmac, RTE_ETHER_ADDR_LEN)) &&
+ e->vlan == vlan && e->lport == port)
+ goto exists;
+ }
+ }
+ }
+
+ if (first_free) {
+ e = first_free;
+ goto found;
+ }
+
+ return NULL;
+
+found:
+ e->state = L2T_STATE_UNUSED;
+
+exists:
+ return e;
+}
+
+static struct l2t_entry *t4_l2t_alloc_switching(struct rte_eth_dev *dev,
+ u16 vlan, u8 port,
+ u8 *eth_addr)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct l2t_data *d = adap->l2t;
+ struct l2t_entry *e;
+ int ret = 0;
+
+ t4_os_write_lock(&d->lock);
+ e = find_or_alloc_l2e(d, vlan, port, eth_addr);
+ if (e) {
+ t4_os_lock(&e->lock);
+ if (!rte_atomic32_read(&e->refcnt)) {
+ e->state = L2T_STATE_SWITCHING;
+ e->vlan = vlan;
+ e->lport = port;
+ rte_memcpy(e->dmac, eth_addr, RTE_ETHER_ADDR_LEN);
+ rte_atomic32_set(&e->refcnt, 1);
+ ret = write_l2e(dev, e, 0, !L2T_LPBK, !L2T_ARPMISS);
+ if (ret < 0)
+ dev_debug(adap, "Failed to write L2T entry: %d",
+ ret);
+ } else {
+ rte_atomic32_inc(&e->refcnt);
+ }
+ t4_os_unlock(&e->lock);
+ }
+ t4_os_write_unlock(&d->lock);
+
+ return ret ? NULL : e;
+}
+
+/**
+ * cxgbe_l2t_alloc_switching - Allocate a L2T entry for switching rule
+ * @dev: rte_eth_dev pointer
+ * @vlan: VLAN Id
+ * @port: Associated port
+ * @dmac: Destination MAC address to add to L2T
+ * Returns pointer to the allocated l2t entry
+ *
+ * Allocates a L2T entry for use by switching rule of a filter
+ */
+struct l2t_entry *cxgbe_l2t_alloc_switching(struct rte_eth_dev *dev, u16 vlan,
+ u8 port, u8 *dmac)
+{
+ return t4_l2t_alloc_switching(dev, vlan, port, dmac);
+}
+
+/**
+ * Initialize L2 Table
+ */
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end)
+{
+ unsigned int l2t_size;
+ unsigned int i;
+ struct l2t_data *d;
+
+ if (l2t_start >= l2t_end || l2t_end >= L2T_SIZE)
+ return NULL;
+ l2t_size = l2t_end - l2t_start + 1;
+
+ d = t4_os_alloc(sizeof(*d) + l2t_size * sizeof(struct l2t_entry));
+ if (!d)
+ return NULL;
+
+ d->l2t_start = l2t_start;
+ d->l2t_size = l2t_size;
+
+ t4_os_rwlock_init(&d->lock);
+
+ for (i = 0; i < d->l2t_size; ++i) {
+ d->l2tab[i].idx = i;
+ d->l2tab[i].state = L2T_STATE_UNUSED;
+ t4_os_lock_init(&d->l2tab[i].lock);
+ rte_atomic32_set(&d->l2tab[i].refcnt, 0);
+ }
+
+ return d;
+}
+
+/**
+ * Cleanup L2 Table
+ */
+void t4_cleanup_l2t(struct adapter *adap)
+{
+ if (adap->l2t)
+ t4_os_free(adap->l2t);
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/l2t.h b/src/spdk/dpdk/drivers/net/cxgbe/l2t.h
new file mode 100644
index 000000000..2c489e4aa
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/l2t.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+#ifndef _CXGBE_L2T_H_
+#define _CXGBE_L2T_H_
+
+#include "base/t4_msg.h"
+
+enum {
+ L2T_SIZE = 4096 /* # of L2T entries */
+};
+
+enum {
+ L2T_STATE_VALID, /* entry is up to date */
+ L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+
+ /* when state is one of the below the entry is not hashed */
+ L2T_STATE_SWITCHING, /* entry is being used by a switching filter */
+ L2T_STATE_UNUSED /* entry not in use */
+};
+
+/*
+ * State for the corresponding entry of the HW L2 table.
+ */
+struct l2t_entry {
+ u16 state; /* entry state */
+ u16 idx; /* entry index within in-memory table */
+ u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
+ u8 lport; /* destination port */
+ u8 dmac[RTE_ETHER_ADDR_LEN]; /* destination MAC address */
+ rte_spinlock_t lock; /* entry lock */
+ rte_atomic32_t refcnt; /* entry reference count */
+};
+
+struct l2t_data {
+ unsigned int l2t_start; /* start index of our piece of the L2T */
+ unsigned int l2t_size; /* number of entries in l2tab */
+ rte_rwlock_t lock; /* table rw lock */
+ struct l2t_entry l2tab[0]; /* MUST BE LAST */
+};
+
+#define L2T_LPBK true
+#define L2T_ARPMISS true
+
+/* identifies sync vs async L2T_WRITE_REQs */
+#define S_SYNC_WR 12
+#define V_SYNC_WR(x) ((x) << S_SYNC_WR)
+#define F_SYNC_WR V_SYNC_WR(1)
+
+struct l2t_data *t4_init_l2t(unsigned int l2t_start, unsigned int l2t_end);
+void t4_cleanup_l2t(struct adapter *adap);
+struct l2t_entry *cxgbe_l2t_alloc_switching(struct rte_eth_dev *dev, u16 vlan,
+ u8 port, u8 *dmac);
+void cxgbe_l2t_release(struct l2t_entry *e);
+void cxgbe_do_l2t_write_rpl(struct adapter *p,
+ const struct cpl_l2t_write_rpl *rpl);
+#endif /* _CXGBE_L2T_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/meson.build b/src/spdk/dpdk/drivers/net/cxgbe/meson.build
new file mode 100644
index 000000000..3992aba44
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/meson.build
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+sources = files('cxgbe_ethdev.c',
+ 'cxgbe_main.c',
+ 'cxgbevf_ethdev.c',
+ 'cxgbevf_main.c',
+ 'sge.c',
+ 'cxgbe_filter.c',
+ 'cxgbe_flow.c',
+ 'clip_tbl.c',
+ 'mps_tcam.c',
+ 'l2t.c',
+ 'smt.c',
+ 'base/t4_hw.c',
+ 'base/t4vf_hw.c')
+includes += include_directories('base')
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.c b/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.c
new file mode 100644
index 000000000..5302d1343
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.c
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include "mps_tcam.h"
+
+static inline bool
+match_entry(struct mps_tcam_entry *entry, const u8 *eth_addr, const u8 *mask)
+{
+ if (!memcmp(eth_addr, entry->eth_addr, RTE_ETHER_ADDR_LEN) &&
+ !memcmp(mask, entry->mask, RTE_ETHER_ADDR_LEN))
+ return true;
+ return false;
+}
+
+static int cxgbe_update_free_idx(struct mpstcam_table *t)
+{
+ struct mps_tcam_entry *entry = t->entry;
+ u16 i, next = t->free_idx + 1;
+
+ if (entry[t->free_idx].state == MPS_ENTRY_UNUSED)
+ /* You are already pointing to a free entry !! */
+ return 0;
+
+ /* loop, till we don't rollback to same index where we started */
+ for (i = next; i != t->free_idx; i++) {
+ if (i == t->size)
+ /* rollback and search free entry from start */
+ i = 0;
+
+ if (entry[i].state == MPS_ENTRY_UNUSED) {
+ t->free_idx = i;
+ return 0;
+ }
+ }
+
+ return -1; /* table is full */
+}
+
+static struct mps_tcam_entry *
+cxgbe_mpstcam_lookup(struct mpstcam_table *t, const u8 *eth_addr,
+ const u8 *mask)
+{
+ struct mps_tcam_entry *entry = t->entry;
+ int i;
+
+ if (!entry)
+ return NULL;
+
+ for (i = 0; i < t->size; i++) {
+ if (entry[i].state == MPS_ENTRY_UNUSED)
+ continue; /* entry is not being used */
+ if (match_entry(&entry[i], eth_addr, mask))
+ return &entry[i];
+ }
+
+ return NULL;
+}
+
+int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *eth_addr,
+ const u8 *mask)
+{
+ struct adapter *adap = pi->adapter;
+ struct mpstcam_table *mpstcam = adap->mpstcam;
+ struct mps_tcam_entry *entry;
+ int ret;
+
+ if (!adap->mpstcam) {
+ dev_err(adap, "mpstcam table is not available\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If entry already present, return it. */
+ t4_os_write_lock(&mpstcam->lock);
+ entry = cxgbe_mpstcam_lookup(adap->mpstcam, eth_addr, mask);
+ if (entry) {
+ rte_atomic32_add(&entry->refcnt, 1);
+ t4_os_write_unlock(&mpstcam->lock);
+ return entry->idx;
+ }
+
+ if (mpstcam->full) {
+ t4_os_write_unlock(&mpstcam->lock);
+ dev_err(adap, "mps-tcam table is full\n");
+ return -ENOMEM;
+ }
+
+ ret = t4_alloc_raw_mac_filt(adap, pi->viid, eth_addr, mask,
+ mpstcam->free_idx, 0, pi->port_id, false);
+ if (ret <= 0) {
+ t4_os_write_unlock(&mpstcam->lock);
+ return ret;
+ }
+
+ /* Fill in the new values */
+ entry = &mpstcam->entry[ret];
+ memcpy(entry->eth_addr, eth_addr, RTE_ETHER_ADDR_LEN);
+ memcpy(entry->mask, mask, RTE_ETHER_ADDR_LEN);
+ rte_atomic32_set(&entry->refcnt, 1);
+ entry->state = MPS_ENTRY_USED;
+
+ if (cxgbe_update_free_idx(mpstcam))
+ mpstcam->full = true;
+
+ t4_os_write_unlock(&mpstcam->lock);
+ return ret;
+}
+
+int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr)
+{
+ struct adapter *adap = pi->adapter;
+ struct mpstcam_table *mpstcam = adap->mpstcam;
+ struct mps_tcam_entry *entry;
+
+ if (!mpstcam)
+ return -EOPNOTSUPP;
+ t4_os_write_lock(&mpstcam->lock);
+ if (idx != -1 && idx >= mpstcam->size) {
+ t4_os_write_unlock(&mpstcam->lock);
+ return -EINVAL;
+ }
+ if (idx >= 0) {
+ entry = &mpstcam->entry[idx];
+ /* user wants to modify an existing entry.
+ * verify if entry exists
+ */
+ if (entry->state != MPS_ENTRY_USED) {
+ t4_os_write_unlock(&mpstcam->lock);
+ return -EINVAL;
+ }
+ }
+
+ idx = t4_change_mac(adap, adap->mbox, pi->viid, idx, addr, true, true);
+ if (idx < 0) {
+ t4_os_write_unlock(&mpstcam->lock);
+ return idx;
+ }
+
+ /* idx can now be different from what user provided */
+ entry = &mpstcam->entry[idx];
+ memcpy(entry->eth_addr, addr, RTE_ETHER_ADDR_LEN);
+ /* NOTE: we have considered the case that idx returned by t4_change_mac
+ * will be different from the user provided value only if user
+ * provided value is -1
+ */
+ if (entry->state == MPS_ENTRY_UNUSED) {
+ rte_atomic32_set(&entry->refcnt, 1);
+ entry->state = MPS_ENTRY_USED;
+ }
+
+ if (cxgbe_update_free_idx(mpstcam))
+ mpstcam->full = true;
+
+ t4_os_write_unlock(&mpstcam->lock);
+ return idx;
+}
+
+/**
+ * hold appropriate locks while calling this.
+ */
+static inline void reset_mpstcam_entry(struct mps_tcam_entry *entry)
+{
+ memset(entry->eth_addr, 0, RTE_ETHER_ADDR_LEN);
+ memset(entry->mask, 0, RTE_ETHER_ADDR_LEN);
+ rte_atomic32_clear(&entry->refcnt);
+ entry->state = MPS_ENTRY_UNUSED;
+}
+
+/**
+ * ret < 0: fatal error
+ * ret = 0: entry removed in h/w
+ * ret > 0: updated refcount.
+ */
+int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx)
+{
+ struct adapter *adap = pi->adapter;
+ struct mpstcam_table *t = adap->mpstcam;
+ struct mps_tcam_entry *entry;
+ int ret;
+
+ if (!t)
+ return -EOPNOTSUPP;
+ t4_os_write_lock(&t->lock);
+ entry = &t->entry[idx];
+ if (entry->state == MPS_ENTRY_UNUSED) {
+ t4_os_write_unlock(&t->lock);
+ return -EINVAL;
+ }
+
+ if (rte_atomic32_read(&entry->refcnt) == 1)
+ ret = t4_free_raw_mac_filt(adap, pi->viid, entry->eth_addr,
+ entry->mask, idx, 1, pi->port_id,
+ false);
+ else
+ ret = rte_atomic32_sub_return(&entry->refcnt, 1);
+
+ if (ret == 0) {
+ reset_mpstcam_entry(entry);
+ t->full = false; /* We have atleast 1 free entry */
+ cxgbe_update_free_idx(t);
+ }
+
+ t4_os_write_unlock(&t->lock);
+ return ret;
+}
+
+struct mpstcam_table *t4_init_mpstcam(struct adapter *adap)
+{
+ struct mpstcam_table *t;
+ int i;
+ u16 size = adap->params.arch.mps_tcam_size;
+
+ t = t4_os_alloc(sizeof(*t) + size * sizeof(struct mps_tcam_entry));
+ if (!t)
+ return NULL;
+
+ t4_os_rwlock_init(&t->lock);
+ t->full = false;
+ t->size = size;
+
+ for (i = 0; i < size; i++) {
+ reset_mpstcam_entry(&t->entry[i]);
+ t->entry[i].mpstcam = t;
+ t->entry[i].idx = i;
+ }
+
+ /* first entry is used by chip. this is overwritten only
+ * in t4_cleanup_mpstcam()
+ */
+ t->entry[0].state = MPS_ENTRY_USED;
+ t->free_idx = 1;
+
+ return t;
+}
+
+void t4_cleanup_mpstcam(struct adapter *adap)
+{
+ if (adap->mpstcam)
+ t4_os_free(adap->mpstcam);
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.h b/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.h
new file mode 100644
index 000000000..3d1e8d3db
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/mps_tcam.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#ifndef _CXGBE_MPSTCAM_H_
+#define _CXGBE_MPSTCAM_H_
+
+#include "base/common.h"
+
+enum {
+ MPS_ENTRY_UNUSED, /* Keep this first so memset 0 renders
+ * the correct state. Other states can
+ * be added in future like MPS_ENTRY_BUSY
+ * to reduce contention while mboxing
+ * the request to f/w or to denote attributes
+ * for a specific entry
+ */
+ MPS_ENTRY_USED,
+};
+
+struct mps_tcam_entry {
+ u8 state;
+ u16 idx;
+
+ /* add data here which uniquely defines an entry */
+ u8 eth_addr[RTE_ETHER_ADDR_LEN];
+ u8 mask[RTE_ETHER_ADDR_LEN];
+
+ struct mpstcam_table *mpstcam; /* backptr */
+ rte_atomic32_t refcnt;
+};
+
+struct mpstcam_table {
+ u16 size;
+ rte_rwlock_t lock;
+ u16 free_idx; /* next free index */
+ bool full; /* since free index can be present
+ * anywhere in the table, size and
+ * free_idx cannot alone determine
+ * if the table is full
+ */
+ struct mps_tcam_entry entry[0];
+};
+
+struct mpstcam_table *t4_init_mpstcam(struct adapter *adap);
+void t4_cleanup_mpstcam(struct adapter *adap);
+int cxgbe_mpstcam_alloc(struct port_info *pi, const u8 *mac, const u8 *mask);
+int cxgbe_mpstcam_remove(struct port_info *pi, u16 idx);
+int cxgbe_mpstcam_modify(struct port_info *pi, int idx, const u8 *addr);
+
+#endif /* _CXGBE_MPSTCAM_H_ */
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map b/src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map
new file mode 100644
index 000000000..f9f17e4f6
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/rte_pmd_cxgbe_version.map
@@ -0,0 +1,3 @@
+DPDK_20.0 {
+ local: *;
+};
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/sge.c b/src/spdk/dpdk/drivers/net/cxgbe/sge.c
new file mode 100644
index 000000000..aba85a209
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/sge.c
@@ -0,0 +1,2658 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev_driver.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+
+#include "base/common.h"
+#include "base/t4_regs.h"
+#include "base/t4_msg.h"
+#include "cxgbe.h"
+
+static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
+ struct sge_eth_txq *txq);
+
+/*
+ * Max number of Rx buffers we replenish at a time.
+ */
+#define MAX_RX_REFILL 64U
+
+#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 256
+
+/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
+/*
+ * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
+ * per mbuf buffer). We currently only support two sizes for 1500- and
+ * 9000-byte MTUs. We could easily support more but there doesn't seem to be
+ * much need for that ...
+ */
+#define FL_MTU_SMALL 1500
+#define FL_MTU_LARGE 9000
+
+static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
+ unsigned int mtu)
+{
+ struct sge *s = &adapter->sge;
+
+ return CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu,
+ s->fl_align);
+}
+
+#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
+#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
+
+/*
+ * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
+ * these to specify the buffer size as an index into the SGE Free List Buffer
+ * Size register array. We also use bit 4, when the buffer has been unmapped
+ * for DMA, but this is of course never sent to the hardware and is only used
+ * to prevent double unmappings. All of the above requires that the Free List
+ * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
+ * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
+ * Free List Buffer alignment is 32 bytes, this works out for us ...
+ */
+enum {
+ RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
+ RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
+ RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
+
+ /*
+ * XXX We shouldn't depend on being able to use these indices.
+ * XXX Especially when some other Master PF has initialized the
+ * XXX adapter or we use the Firmware Configuration File. We
+ * XXX should really search through the Host Buffer Size register
+ * XXX array for the appropriately sized buffer indices.
+ */
+ RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
+ RX_LARGE_PG_BUF = 0x1, /* buffer large page buffer */
+
+ RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
+ RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
+};
+
+/**
+ * txq_avail - return the number of available slots in a Tx queue
+ * @q: the Tx queue
+ *
+ * Returns the number of descriptors in a Tx queue available to write new
+ * packets.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+ return q->size - 1 - q->in_use;
+}
+
+static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr)
+{
+ struct rte_mbuf *m = mbuf;
+
+ for (; m; m = m->next, addr++) {
+ *addr = m->buf_iova + rte_pktmbuf_headroom(m);
+ if (*addr == 0)
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ return -ENOMEM;
+}
+
+/**
+ * free_tx_desc - reclaims Tx descriptors and their buffers
+ * @q: the Tx queue to reclaim descriptors from
+ * @n: the number of descriptors to reclaim
+ *
+ * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ * Tx buffers. Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct sge_txq *q, unsigned int n)
+{
+ struct tx_sw_desc *d;
+ unsigned int cidx = 0;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->mbuf) { /* an SGL is present */
+ rte_pktmbuf_free(d->mbuf);
+ d->mbuf = NULL;
+ }
+ if (d->coalesce.idx) {
+ int i;
+
+ for (i = 0; i < d->coalesce.idx; i++) {
+ rte_pktmbuf_free(d->coalesce.mbuf[i]);
+ d->coalesce.mbuf[i] = NULL;
+ }
+ d->coalesce.idx = 0;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool);
+ }
+}
+
+static void reclaim_tx_desc(struct sge_txq *q, unsigned int n)
+{
+ struct tx_sw_desc *d;
+ unsigned int cidx = q->cidx;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->mbuf) { /* an SGL is present */
+ rte_pktmbuf_free(d->mbuf);
+ d->mbuf = NULL;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ }
+ q->cidx = cidx;
+}
+
+/**
+ * fl_cap - return the capacity of a free-buffer list
+ * @fl: the FL
+ *
+ * Returns the capacity of a free-buffer list. The capacity is less than
+ * the size because one descriptor needs to be left unpopulated, otherwise
+ * HW will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+ return fl->size - 8; /* 1 descriptor = 8 buffers */
+}
+
+/**
+ * fl_starving - return whether a Free List is starving.
+ * @adapter: pointer to the adapter
+ * @fl: the Free List
+ *
+ * Tests specified Free List to see whether the number of buffers
+ * available to the hardware has falled below our "starvation"
+ * threshold.
+ */
+static inline bool fl_starving(const struct adapter *adapter,
+ const struct sge_fl *fl)
+{
+ const struct sge *s = &adapter->sge;
+
+ return fl->avail - fl->pend_cred <= s->fl_starve_thres;
+}
+
+static inline unsigned int get_buf_size(struct adapter *adapter,
+ const struct rx_sw_desc *d)
+{
+ unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
+ unsigned int buf_size = 0;
+
+ switch (rx_buf_size_idx) {
+ case RX_SMALL_MTU_BUF:
+ buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
+ break;
+
+ case RX_LARGE_MTU_BUF:
+ buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
+ break;
+
+ default:
+ BUG_ON(1);
+ /* NOT REACHED */
+ }
+
+ return buf_size;
+}
+
+/**
+ * free_rx_bufs - free the Rx buffers on an SGE free list
+ * @q: the SGE free list to free buffers from
+ * @n: how many buffers to free
+ *
+ * Release the next @n buffers on an SGE free-buffer Rx queue. The
+ * buffers must be made inaccessible to HW before calling this function.
+ */
+static void free_rx_bufs(struct sge_fl *q, int n)
+{
+ unsigned int cidx = q->cidx;
+ struct rx_sw_desc *d;
+
+ d = &q->sdesc[cidx];
+ while (n--) {
+ if (d->buf) {
+ rte_pktmbuf_free(d->buf);
+ d->buf = NULL;
+ }
+ ++d;
+ if (++cidx == q->size) {
+ cidx = 0;
+ d = q->sdesc;
+ }
+ q->avail--;
+ }
+ q->cidx = cidx;
+}
+
+/**
+ * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
+ * @q: the SGE free list
+ *
+ * Unmap the current buffer on an SGE free-buffer Rx queue. The
+ * buffer must be made inaccessible to HW before calling this function.
+ *
+ * This is similar to @free_rx_bufs above but does not free the buffer.
+ * Do note that the FL still loses any further access to the buffer.
+ */
+static void unmap_rx_buf(struct sge_fl *q)
+{
+ if (++q->cidx == q->size)
+ q->cidx = 0;
+ q->avail--;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+ if (q->pend_cred >= 64) {
+ u32 val = adap->params.arch.sge_fl_db;
+
+ if (is_t4(adap->params.chip))
+ val |= V_PIDX(q->pend_cred / 8);
+ else
+ val |= V_PIDX_T5(q->pend_cred / 8);
+
+ /*
+ * Make sure all memory writes to the Free List queue are
+ * committed before we tell the hardware about them.
+ */
+ wmb();
+
+ /*
+ * If we don't have access to the new User Doorbell (T5+), use
+ * the old doorbell mechanism; otherwise use the new BAR2
+ * mechanism.
+ */
+ if (unlikely(!q->bar2_addr)) {
+ u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_KDOORBELL;
+
+ t4_write_reg_relaxed(adap, reg,
+ val | V_QID(q->cntxt_id));
+ } else {
+ writel_relaxed(val | V_QID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr +
+ SGE_UDB_KDOORBELL));
+
+ /*
+ * This Write memory Barrier will force the write to
+ * the User Doorbell area to be flushed.
+ */
+ wmb();
+ }
+ q->pend_cred &= 7;
+ }
+}
+
+static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf,
+ dma_addr_t mapping)
+{
+ sd->buf = buf;
+ sd->dma_addr = mapping; /* includes size low bits */
+}
+
+/**
+ * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs
+ * @adap: the adapter
+ * @q: the ring to refill
+ * @n: the number of new buffers to allocate
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity. If afterwards the queue is
+ * found critically low mark it as starving in the bitmap of starving FLs.
+ *
+ * Returns the number of buffers allocated.
+ */
+static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
+ int n)
+{
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, fl);
+ unsigned int cred = q->avail;
+ __be64 *d = &q->desc[q->pidx];
+ struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+ unsigned int buf_size_idx = RX_SMALL_MTU_BUF;
+ struct rte_mbuf *buf_bulk[n];
+ int ret, i;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
+ mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
+ if (jumbo_en &&
+ ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
+ buf_size_idx = RX_LARGE_MTU_BUF;
+
+ ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
+ if (unlikely(ret != 0)) {
+ dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n",
+ __func__);
+ q->alloc_failed++;
+ rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
+ goto out;
+ }
+
+ for (i = 0; i < n; i++) {
+ struct rte_mbuf *mbuf = buf_bulk[i];
+ dma_addr_t mapping;
+
+ if (!mbuf) {
+ dev_debug(adap, "%s: mbuf alloc failed\n", __func__);
+ q->alloc_failed++;
+ rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
+ goto out;
+ }
+
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->data_off =
+ (uint16_t)((char *)
+ RTE_PTR_ALIGN((char *)mbuf->buf_addr +
+ RTE_PKTMBUF_HEADROOM,
+ adap->sge.fl_align) -
+ (char *)mbuf->buf_addr);
+ mbuf->next = NULL;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->rspq.port_id;
+
+ mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_iova +
+ mbuf->data_off,
+ adap->sge.fl_align);
+ mapping |= buf_size_idx;
+ *d++ = cpu_to_be64(mapping);
+ set_rx_sw_desc(sd, mbuf, mapping);
+ sd++;
+
+ q->avail++;
+ if (++q->pidx == q->size) {
+ q->pidx = 0;
+ sd = q->sdesc;
+ d = q->desc;
+ }
+ }
+
+out: cred = q->avail - cred;
+ q->pend_cred += cred;
+ ring_fl_db(adap, q);
+
+ if (unlikely(fl_starving(adap, q))) {
+ /*
+ * Make sure data has been written to free list
+ */
+ wmb();
+ q->low++;
+ }
+
+ return cred;
+}
+
+/**
+ * refill_fl - refill an SGE Rx buffer ring with mbufs
+ * @adap: the adapter
+ * @q: the ring to refill
+ * @n: the number of new buffers to allocate
+ *
+ * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ * allocated with the supplied gfp flags. The caller must assure that
+ * @n does not exceed the queue's capacity. Returns the number of buffers
+ * allocated.
+ */
+static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n)
+{
+ return refill_fl_usembufs(adap, q, n);
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+ refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail));
+}
+
+/*
+ * Return the number of reclaimable descriptors in a Tx queue.
+ */
+static inline int reclaimable(const struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+
+ hw_cidx -= q->cidx;
+ if (hw_cidx < 0)
+ return hw_cidx + q->size;
+ return hw_cidx;
+}
+
+/**
+ * reclaim_completed_tx - reclaims completed Tx descriptors
+ * @q: the Tx queue to reclaim completed descriptors from
+ *
+ * Reclaims Tx descriptors that the SGE has indicated it has processed.
+ */
+void reclaim_completed_tx(struct sge_txq *q)
+{
+ unsigned int avail = reclaimable(q);
+
+ do {
+ /* reclaim as much as possible */
+ reclaim_tx_desc(q, avail);
+ q->in_use -= avail;
+ avail = reclaimable(q);
+ } while (avail);
+}
+
+/**
+ * sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ *
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+ /*
+ * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
+ * addresses. The DSGL Work Request starts off with a 32-bit DSGL
+ * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
+ * repeated sequences of { Length[i], Length[i+1], Address[i],
+ * Address[i+1] } (this ensures that all addresses are on 64-bit
+ * boundaries). If N is even, then Length[N+1] should be set to 0 and
+ * Address[N+1] is omitted.
+ *
+ * The following calculation incorporates all of the above. It's
+ * somewhat hard to follow but, briefly: the "+2" accounts for the
+ * first two flits which include the DSGL header, Length0 and
+ * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
+ * flits for every pair of the remaining N) +1 if (n-1) is odd; and
+ * finally the "+((n-1)&1)" adds the one remaining flit needed if
+ * (n-1) is odd ...
+ */
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ * flits_to_desc - returns the num of Tx descriptors for the given flits
+ * @n: the number of flits
+ *
+ * Returns the number of Tx descriptors needed for the supplied number
+ * of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+ return DIV_ROUND_UP(n, 8);
+}
+
+/**
+ * is_eth_imm - can an Ethernet packet be sent as immediate data?
+ * @m: the packet
+ *
+ * Returns whether an Ethernet packet is small enough to fit as
+ * immediate data. Return value corresponds to the headroom required.
+ */
+static inline int is_eth_imm(const struct rte_mbuf *m)
+{
+ unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (m->pkt_len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+
+ return 0;
+}
+
+/**
+ * calc_tx_flits - calculate the number of flits for a packet Tx WR
+ * @m: the packet
+ * @adap: adapter structure pointer
+ *
+ * Returns the number of flits needed for a Tx WR for the given Ethernet
+ * packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct rte_mbuf *m,
+ struct adapter *adap)
+{
+ size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) :
+ sizeof(struct fw_eth_tx_pkt_vm_wr);
+ unsigned int flits;
+ int hdrlen;
+
+ /*
+ * If the mbuf is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+ * TX Packet header plus the mbuf data in the Work Request.
+ */
+
+ hdrlen = is_eth_imm(m);
+ if (hdrlen)
+ return DIV_ROUND_UP(m->pkt_len + hdrlen, sizeof(__be64));
+
+ /*
+ * Otherwise, we're going to have to construct a Scatter gather list
+ * of the mbuf body and fragments. We also include the flits necessary
+ * for the TX Packet Work Request and CPL. We always have a firmware
+ * Write Header (incorporated as part of the cpl_tx_pkt_lso and
+ * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
+ * message or, if we're doing a Large Send Offload, an LSO CPL message
+ * with an embedded TX Packet Write CPL message.
+ */
+ flits = sgl_len(m->nb_segs);
+ if (m->tso_segsz)
+ flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ else
+ flits += (wr_size +
+ sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ return flits;
+}
+
+/**
+ * write_sgl - populate a scatter/gather list for a packet
+ * @mbuf: the packet
+ * @q: the Tx queue we are writing into
+ * @sgl: starting location for writing the SGL
+ * @end: points right after the end of the SGL
+ * @start: start offset into mbuf main-body data to include in the SGL
+ * @addr: address of mapped region
+ *
+ * Generates a scatter/gather list for the buffers that make up a packet.
+ * The caller must provide adequate space for the SGL that will be written.
+ * The SGL includes all of the packet's page fragments and the data in its
+ * main body except for the first @start bytes. @sgl must be 16-byte
+ * aligned and within a Tx descriptor with available space. @end points
+ * write after the end of the SGL but does not account for any potential
+ * wrap around, i.e., @end > @sgl.
+ */
+static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr)
+{
+ unsigned int i, len;
+ struct ulptx_sge_pair *to;
+ struct rte_mbuf *m = mbuf;
+ unsigned int nfrags = m->nb_segs;
+ struct ulptx_sge_pair buf[nfrags / 2];
+
+ len = m->data_len - start;
+ sgl->len0 = htonl(len);
+ sgl->addr0 = rte_cpu_to_be_64(addr[0]);
+
+ sgl->cmd_nsge = htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
+ V_ULPTX_NSGE(nfrags));
+ if (likely(--nfrags == 0))
+ return;
+ /*
+ * Most of the complexity below deals with the possibility we hit the
+ * end of the queue in the middle of writing the SGL. For this case
+ * only we create the SGL in a temporary buffer and then copy it.
+ */
+ to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+ for (i = 0; nfrags >= 2; nfrags -= 2, to++) {
+ m = m->next;
+ to->len[0] = rte_cpu_to_be_32(m->data_len);
+ to->addr[0] = rte_cpu_to_be_64(addr[++i]);
+ m = m->next;
+ to->len[1] = rte_cpu_to_be_32(m->data_len);
+ to->addr[1] = rte_cpu_to_be_64(addr[++i]);
+ }
+ if (nfrags) {
+ m = m->next;
+ to->len[0] = rte_cpu_to_be_32(m->data_len);
+ to->len[1] = rte_cpu_to_be_32(0);
+ to->addr[0] = rte_cpu_to_be_64(addr[i + 1]);
+ }
+ if (unlikely((u8 *)end > (u8 *)q->stat)) {
+ unsigned int part0 = RTE_PTR_DIFF((u8 *)q->stat,
+ (u8 *)sgl->sge);
+ unsigned int part1;
+
+ if (likely(part0))
+ memcpy(sgl->sge, buf, part0);
+ part1 = RTE_PTR_DIFF((u8 *)end, (u8 *)q->stat);
+ rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1);
+ end = RTE_PTR_ADD((void *)q->desc, part1);
+ }
+ if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
+ *(u64 *)end = 0;
+}
+
+#define IDXDIFF(head, tail, wrap) \
+ ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
+
+#define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
+#define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
+
+#define PIDXDIFF(head, tail, wrap) \
+ ((tail) >= (head) ? (tail) - (head) : (wrap) - (head) + (tail))
+#define P_IDXDIFF(q, idx) PIDXDIFF((q)->cidx, idx, (q)->size)
+
+/**
+ * ring_tx_db - ring a Tx queue's doorbell
+ * @adap: the adapter
+ * @q: the Tx queue
+ * @n: number of new descriptors to give to HW
+ *
+ * Ring the doorbel for a Tx queue.
+ */
+static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q)
+{
+ int n = Q_IDXDIFF(q, dbidx);
+
+ /*
+ * Make sure that all writes to the TX Descriptors are committed
+ * before we tell the hardware about them.
+ */
+ rte_wmb();
+
+ /*
+ * If we don't have access to the new User Doorbell (T5+), use the old
+ * doorbell mechanism; otherwise use the new BAR2 mechanism.
+ */
+ if (unlikely(!q->bar2_addr)) {
+ u32 val = V_PIDX(n);
+
+ /*
+ * For T4 we need to participate in the Doorbell Recovery
+ * mechanism.
+ */
+ if (!q->db_disabled)
+ t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+ V_QID(q->cntxt_id) | val);
+ else
+ q->db_pidx_inc += n;
+ q->db_pidx = q->pidx;
+ } else {
+ u32 val = V_PIDX_T5(n);
+
+ /*
+ * T4 and later chips share the same PIDX field offset within
+ * the doorbell, but T5 and later shrank the field in order to
+ * gain a bit for Doorbell Priority. The field was absurdly
+ * large in the first place (14 bits) so we just use the T5
+ * and later limits and warn if a Queue ID is too large.
+ */
+ WARN_ON(val & F_DBPRIO);
+
+ writel(val | V_QID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr + SGE_UDB_KDOORBELL));
+
+ /*
+ * This Write Memory Barrier will force the write to the User
+ * Doorbell area to be flushed. This is needed to prevent
+ * writes on different CPUs for the same queue from hitting
+ * the adapter out of order. This is required when some Work
+ * Requests take the Write Combine Gather Buffer path (user
+ * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
+ * take the traditional path where we simply increment the
+ * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
+ * hardware DMA read the actual Work Request.
+ */
+ rte_wmb();
+ }
+ q->dbidx = q->pidx;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m)
+{
+ int csum_type;
+
+ if (m->ol_flags & PKT_TX_IP_CKSUM) {
+ switch (m->ol_flags & PKT_TX_L4_MASK) {
+ case PKT_TX_TCP_CKSUM:
+ csum_type = TX_CSUM_TCPIP;
+ break;
+ case PKT_TX_UDP_CKSUM:
+ csum_type = TX_CSUM_UDPIP;
+ break;
+ default:
+ goto nocsum;
+ }
+ } else {
+ goto nocsum;
+ }
+
+ if (likely(csum_type >= TX_CSUM_TCPIP)) {
+ u64 hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
+ int eth_hdr_len = m->l2_len;
+
+ if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
+ hdr_len |= V_TXPKT_ETHHDR_LEN(eth_hdr_len);
+ else
+ hdr_len |= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len);
+ return V_TXPKT_CSUM_TYPE(csum_type) | hdr_len;
+ }
+nocsum:
+ /*
+ * unknown protocol, disable HW csum
+ * and hope a bad packet is detected
+ */
+ return F_TXPKT_L4CSUM_DIS;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+ q->in_use += n;
+ q->pidx += n;
+ if (q->pidx >= q->size)
+ q->pidx -= q->size;
+}
+
+#define MAX_COALESCE_LEN 64000
+
+static inline int wraps_around(struct sge_txq *q, int ndesc)
+{
+ return (q->pidx + ndesc) > q->size ? 1 : 0;
+}
+
+static void tx_timer_cb(void *data)
+{
+ struct adapter *adap = (struct adapter *)data;
+ struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
+ int i;
+ unsigned int coal_idx;
+
+ /* monitor any pending tx */
+ for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) {
+ if (t4_os_trylock(&txq->txq_lock)) {
+ coal_idx = txq->q.coalesce.idx;
+ if (coal_idx) {
+ if (coal_idx == txq->q.last_coal_idx &&
+ txq->q.pidx == txq->q.last_pidx) {
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ } else {
+ txq->q.last_coal_idx = coal_idx;
+ txq->q.last_pidx = txq->q.pidx;
+ }
+ }
+ t4_os_unlock(&txq->txq_lock);
+ }
+ }
+ rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
+}
+
+/**
+ * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR
+ * @ adap: adapter structure
+ * @txq: tx queue
+ *
+ * writes the different fields of the pkts WR and sends it.
+ */
+static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
+ struct sge_eth_txq *txq)
+{
+ struct fw_eth_tx_pkts_vm_wr *vmwr;
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
+ struct fw_eth_tx_pkts_wr *wr;
+ struct sge_txq *q = &txq->q;
+ unsigned int ndesc;
+ u32 wr_mid;
+
+ /* fill the pkts WR header */
+ wr = (void *)&q->desc[q->pidx];
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ vmwr = (void *)&q->desc[q->pidx];
+
+ wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
+ ndesc = flits_to_desc(q->coalesce.flits);
+ wr->equiq_to_len16 = htonl(wr_mid);
+ wr->plen = cpu_to_be16(q->coalesce.len);
+ wr->npkt = q->coalesce.idx;
+ wr->r3 = 0;
+ if (is_pf4(adap)) {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ wr->type = q->coalesce.type;
+ } else {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
+ vmwr->r4 = 0;
+ memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst,
+ fw_hdr_copy_len);
+ }
+
+ /* zero out coalesce structure members */
+ memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce));
+
+ txq_advance(q, ndesc);
+ txq->stats.coal_wr++;
+ txq->stats.coal_pkts += wr->npkt;
+
+ if (Q_IDXDIFF(q, equeidx) >= q->size / 2) {
+ q->equeidx = q->pidx;
+ wr_mid |= F_FW_WR_EQUEQ;
+ wr->equiq_to_len16 = htonl(wr_mid);
+ }
+ ring_tx_db(adap, q);
+}
+
+/**
+ * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not
+ * @txq: tx queue where the mbuf is sent
+ * @mbuf: mbuf to be sent
+ * @nflits: return value for number of flits needed
+ * @adap: adapter structure
+ *
+ * This function decides if a packet should be coalesced or not.
+ */
+static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
+ struct rte_mbuf *mbuf,
+ unsigned int *nflits,
+ struct adapter *adap)
+{
+ struct fw_eth_tx_pkts_vm_wr *wr;
+ const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci));
+ struct sge_txq *q = &txq->q;
+ unsigned int flits, ndesc;
+ unsigned char type = 0;
+ int credits, wr_size;
+
+ /* use coal WR type 1 when no frags are present */
+ type = (mbuf->nb_segs == 1) ? 1 : 0;
+ if (!is_pf4(adap)) {
+ if (!type)
+ return 0;
+
+ if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *),
+ fw_hdr_copy_len))
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ }
+
+ if (unlikely(type != q->coalesce.type && q->coalesce.idx))
+ ship_tx_pkt_coalesce_wr(adap, txq);
+
+ /* calculate the number of flits required for coalescing this packet
+ * without the 2 flits of the WR header. These are added further down
+ * if we are just starting in new PKTS WR. sgl_len doesn't account for
+ * the possible 16 bytes alignment ULP TX commands so we do it here.
+ */
+ flits = (sgl_len(mbuf->nb_segs) + 1) & ~1U;
+ if (type == 0)
+ flits += (sizeof(struct ulp_txpkt) +
+ sizeof(struct ulptx_idata)) / sizeof(__be64);
+ flits += sizeof(struct cpl_tx_pkt_core) / sizeof(__be64);
+ *nflits = flits;
+
+ /* If coalescing is on, the mbuf is added to a pkts WR */
+ if (q->coalesce.idx) {
+ ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
+ credits = txq_avail(q) - ndesc;
+
+ /* If we are wrapping or this is last mbuf then, send the
+ * already coalesced mbufs and let the non-coalesce pass
+ * handle the mbuf.
+ */
+ if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ return 0;
+ }
+
+ /* If the max coalesce len or the max WR len is reached
+ * ship the WR and keep coalescing on.
+ */
+ if (unlikely((q->coalesce.len + mbuf->pkt_len >
+ MAX_COALESCE_LEN) ||
+ (q->coalesce.flits + flits >
+ q->coalesce.max))) {
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ goto new;
+ }
+ return 1;
+ }
+
+new:
+ /* start a new pkts WR, the WR header is not filled below */
+ wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) :
+ sizeof(struct fw_eth_tx_pkts_vm_wr);
+ flits += wr_size / sizeof(__be64);
+ ndesc = flits_to_desc(q->coalesce.flits + flits);
+ credits = txq_avail(q) - ndesc;
+
+ if (unlikely(credits < 0 || wraps_around(q, ndesc)))
+ return 0;
+ q->coalesce.flits += wr_size / sizeof(__be64);
+ q->coalesce.type = type;
+ q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
+ q->coalesce.flits * sizeof(__be64);
+ if (!is_pf4(adap))
+ memcpy((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len);
+ return 1;
+}
+
+/**
+ * tx_do_packet_coalesce - add an mbuf to a coalesce WR
+ * @txq: sge_eth_txq used send the mbuf
+ * @mbuf: mbuf to be sent
+ * @flits: flits needed for this mbuf
+ * @adap: adapter structure
+ * @pi: port_info structure
+ * @addr: mapped address of the mbuf
+ *
+ * Adds an mbuf to be sent as part of a coalesce WR by filling a
+ * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and
+ * ulp_tx_sc_dsgl command.
+ */
+static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
+ struct rte_mbuf *mbuf,
+ int flits, struct adapter *adap,
+ const struct port_info *pi,
+ dma_addr_t *addr, uint16_t nb_pkts)
+{
+ u64 cntrl, *end;
+ struct sge_txq *q = &txq->q;
+ struct ulp_txpkt *mc;
+ struct ulptx_idata *sc_imm;
+ struct cpl_tx_pkt_core *cpl;
+ struct tx_sw_desc *sd;
+ unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
+
+ if (q->coalesce.type == 0) {
+ mc = (struct ulp_txpkt *)q->coalesce.ptr;
+ mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
+ V_ULP_TXPKT_FID(adap->sge.fw_evtq.cntxt_id) |
+ F_ULP_TXPKT_RO);
+ mc->len = htonl(DIV_ROUND_UP(flits, 2));
+ sc_imm = (struct ulptx_idata *)(mc + 1);
+ sc_imm->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ F_ULP_TX_SC_MORE);
+ sc_imm->len = htonl(sizeof(*cpl));
+ end = (u64 *)mc + flits;
+ cpl = (struct cpl_tx_pkt_core *)(sc_imm + 1);
+ } else {
+ end = (u64 *)q->coalesce.ptr + flits;
+ cpl = (struct cpl_tx_pkt_core *)q->coalesce.ptr;
+ }
+
+ /* update coalesce structure for this txq */
+ q->coalesce.flits += flits;
+ q->coalesce.ptr += flits * sizeof(__be64);
+ q->coalesce.len += mbuf->pkt_len;
+
+ /* fill the cpl message, same as in t4_eth_xmit, this should be kept
+ * similar to t4_eth_xmit
+ */
+ if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
+ cntrl = hwcsum(adap->params.chip, mbuf) |
+ F_TXPKT_IPCSUM_DIS;
+ txq->stats.tx_cso++;
+ } else {
+ cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
+ }
+
+ if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ txq->stats.vlan_ins++;
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
+ }
+
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id));
+ cpl->pack = htons(0);
+ cpl->len = htons(len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+ write_sgl(mbuf, q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr);
+ txq->stats.pkts++;
+ txq->stats.tx_bytes += len;
+
+ sd = &q->sdesc[q->pidx + (idx >> 1)];
+ if (!(idx & 1)) {
+ if (sd->coalesce.idx) {
+ int i;
+
+ for (i = 0; i < sd->coalesce.idx; i++) {
+ rte_pktmbuf_free(sd->coalesce.mbuf[i]);
+ sd->coalesce.mbuf[i] = NULL;
+ }
+ }
+ }
+
+ /* store pointers to the mbuf and the sgl used in free_tx_desc.
+ * each tx desc can hold two pointers corresponding to the value
+ * of ETH_COALESCE_PKT_PER_DESC
+ */
+ sd->coalesce.mbuf[idx & 1] = mbuf;
+ sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1);
+ sd->coalesce.idx = (idx & 1) + 1;
+
+ /* Send the coalesced work request, only if max reached. However,
+ * if lower latency is preferred over throughput, then don't wait
+ * for coalescing the next Tx burst and send the packets now.
+ */
+ q->coalesce.idx++;
+ if (q->coalesce.idx == adap->params.max_tx_coalesce_num ||
+ (adap->devargs.tx_mode_latency && q->coalesce.idx >= nb_pkts))
+ ship_tx_pkt_coalesce_wr(adap, txq);
+
+ return 0;
+}
+
+/**
+ * t4_eth_xmit - add a packet to an Ethernet Tx queue
+ * @txq: the egress queue
+ * @mbuf: the packet
+ *
+ * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
+ */
+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ uint16_t nb_pkts)
+{
+ const struct port_info *pi;
+ struct cpl_tx_pkt_lso_core *lso;
+ struct adapter *adap;
+ struct rte_mbuf *m = mbuf;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct fw_eth_tx_pkt_vm_wr *vmwr;
+ struct cpl_tx_pkt_core *cpl;
+ struct tx_sw_desc *d;
+ dma_addr_t addr[m->nb_segs];
+ unsigned int flits, ndesc, cflits;
+ int l3hdr_len, l4hdr_len, eth_xtra_len;
+ int len, last_desc;
+ int credits;
+ u32 wr_mid;
+ u64 cntrl, *end;
+ bool v6;
+ u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ /* Reject xmit if queue is stopped */
+ if (unlikely(txq->flags & EQ_STOPPED))
+ return -(EBUSY);
+
+ /*
+ * The chip min packet length is 10 octets but play safe and reject
+ * anything shorter than an Ethernet header.
+ */
+ if (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) {
+out_free:
+ rte_pktmbuf_free(m);
+ return 0;
+ }
+
+ if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
+ (unlikely(m->pkt_len > max_pkt_len)))
+ goto out_free;
+
+ pi = txq->data->dev_private;
+ adap = pi->adapter;
+
+ cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
+ /* align the end of coalesce WR to a 512 byte boundary */
+ txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
+
+ if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
+ m->pkt_len > RTE_ETHER_MAX_LEN)) {
+ if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
+ if (unlikely(map_mbuf(mbuf, addr) < 0)) {
+ dev_warn(adap, "%s: mapping err for coalesce\n",
+ __func__);
+ txq->stats.mapping_err++;
+ goto out_free;
+ }
+ return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
+ pi, addr, nb_pkts);
+ } else {
+ return -EBUSY;
+ }
+ }
+
+ if (txq->q.coalesce.idx)
+ ship_tx_pkt_coalesce_wr(adap, txq);
+
+ flits = calc_tx_flits(m, adap);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&txq->q) - ndesc;
+
+ if (unlikely(credits < 0)) {
+ dev_debug(adap, "%s: Tx ring %u full; credits = %d\n",
+ __func__, txq->q.cntxt_id, credits);
+ return -EBUSY;
+ }
+
+ if (unlikely(map_mbuf(m, addr) < 0)) {
+ txq->stats.mapping_err++;
+ goto out_free;
+ }
+
+ wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+ if (Q_IDXDIFF(&txq->q, equeidx) >= 64) {
+ txq->q.equeidx = txq->q.pidx;
+ wr_mid |= F_FW_WR_EQUEQ;
+ }
+
+ wr = (void *)&txq->q.desc[txq->q.pidx];
+ vmwr = (void *)&txq->q.desc[txq->q.pidx];
+ wr->equiq_to_len16 = htonl(wr_mid);
+ if (is_pf4(adap)) {
+ wr->r3 = rte_cpu_to_be_64(0);
+ end = (u64 *)wr + flits;
+ } else {
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
+
+ vmwr->r3[0] = rte_cpu_to_be_32(0);
+ vmwr->r3[1] = rte_cpu_to_be_32(0);
+ memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *),
+ fw_hdr_copy_len);
+ end = (u64 *)vmwr + flits;
+ }
+
+ len = 0;
+ len += sizeof(*cpl);
+
+ /* Coalescing skipped and we send through normal path */
+ if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
+ V_FW_WR_IMMDLEN(len));
+ if (is_pf4(adap))
+ cpl = (void *)(wr + 1);
+ else
+ cpl = (void *)(vmwr + 1);
+ if (m->ol_flags & PKT_TX_IP_CKSUM) {
+ cntrl = hwcsum(adap->params.chip, m) |
+ F_TXPKT_IPCSUM_DIS;
+ txq->stats.tx_cso++;
+ }
+ } else {
+ if (is_pf4(adap))
+ lso = (void *)(wr + 1);
+ else
+ lso = (void *)(vmwr + 1);
+ v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
+ l3hdr_len = m->l3_len;
+ l4hdr_len = m->l4_len;
+ eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;
+ len += sizeof(*lso);
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
+ V_FW_WR_IMMDLEN(len));
+ lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
+ F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
+ V_LSO_IPV6(v6) |
+ V_LSO_ETHHDR_LEN(eth_xtra_len / 4) |
+ V_LSO_IPHDR_LEN(l3hdr_len / 4) |
+ V_LSO_TCPHDR_LEN(l4hdr_len / 4));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(m->tso_segsz);
+ lso->seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->len = htonl(m->pkt_len);
+ else
+ lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
+ cpl = (void *)(lso + 1);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ cntrl = V_TXPKT_ETHHDR_LEN(eth_xtra_len);
+ else
+ cntrl = V_T6_TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+ cntrl |= V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 :
+ TX_CSUM_TCPIP) |
+ V_TXPKT_IPHDR_LEN(l3hdr_len);
+ txq->stats.tso++;
+ txq->stats.tx_cso += m->tso_segsz;
+ }
+
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ txq->stats.vlan_ins++;
+ cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
+ }
+
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) |
+ V_TXPKT_PF(0));
+
+ cpl->pack = htons(0);
+ cpl->len = htons(m->pkt_len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ txq->stats.pkts++;
+ txq->stats.tx_bytes += m->pkt_len;
+ last_desc = txq->q.pidx + ndesc - 1;
+ if (last_desc >= (int)txq->q.size)
+ last_desc -= txq->q.size;
+
+ d = &txq->q.sdesc[last_desc];
+ if (d->coalesce.idx) {
+ int i;
+
+ for (i = 0; i < d->coalesce.idx; i++) {
+ rte_pktmbuf_free(d->coalesce.mbuf[i]);
+ d->coalesce.mbuf[i] = NULL;
+ }
+ d->coalesce.idx = 0;
+ }
+ write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
+ addr);
+ txq->q.sdesc[last_desc].mbuf = m;
+ txq->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
+ txq_advance(&txq->q, ndesc);
+ ring_tx_db(adap, &txq->q);
+ return 0;
+}
+
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any mbufs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+/**
+ * is_imm - check whether a packet can be sent as immediate data
+ * @mbuf: the packet
+ *
+ * Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct rte_mbuf *mbuf)
+{
+ return mbuf->pkt_len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ * inline_tx_mbuf: inline a packet's data into TX descriptors
+ * @q: the TX queue where the packet will be inlined
+ * @from: pointer to data portion of packet
+ * @to: pointer after cpl where data has to be inlined
+ * @len: length of data to inline
+ *
+ * Inline a packet's contents directly to TX descriptors, starting at
+ * the given position within the TX DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to,
+ int len)
+{
+ int left = RTE_PTR_DIFF(q->stat, *to);
+
+ if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) {
+ rte_memcpy(*to, from, len);
+ *to = RTE_PTR_ADD(*to, len);
+ } else {
+ rte_memcpy(*to, from, left);
+ from = RTE_PTR_ADD(from, left);
+ left = len - left;
+ rte_memcpy((void *)q->desc, from, left);
+ *to = RTE_PTR_ADD((void *)q->desc, left);
+ }
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @q: the control queue
+ * @mbuf: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ unsigned int ndesc;
+ struct fw_wr_hdr *wr;
+ caddr_t dst;
+
+ if (unlikely(!is_imm(mbuf))) {
+ WARN_ON(1);
+ rte_pktmbuf_free(mbuf);
+ return -1;
+ }
+
+ reclaim_completed_tx_imm(&q->q);
+ ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc));
+ t4_os_lock(&q->ctrlq_lock);
+
+ q->full = txq_avail(&q->q) < ndesc ? 1 : 0;
+ if (unlikely(q->full)) {
+ t4_os_unlock(&q->ctrlq_lock);
+ return -1;
+ }
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ dst = (void *)wr;
+ inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t),
+ &dst, mbuf->data_len);
+
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < 64))
+ wr->lo |= htonl(F_FW_WR_EQUEQ);
+
+ q->txp++;
+
+ ring_tx_db(q->adapter, &q->q);
+ t4_os_unlock(&q->ctrlq_lock);
+
+ rte_pktmbuf_free(mbuf);
+ return 0;
+}
+
+/**
+ * t4_mgmt_tx - send a management message
+ * @q: the control queue
+ * @mbuf: the packet containing the management message
+ *
+ * Send a management message through control queue.
+ */
+int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ return ctrl_xmit(q, mbuf);
+}
+
+/**
+ * alloc_ring - allocate resources for an SGE descriptor ring
+ * @dev: the PCI device's core device
+ * @nelem: the number of descriptors
+ * @elem_size: the size of each descriptor
+ * @sw_size: the size of the SW state associated with each ring element
+ * @phys: the physical address of the allocated ring
+ * @metadata: address of the array holding the SW state for the ring
+ * @stat_size: extra space in HW ring for status information
+ * @node: preferred node for memory allocations
+ *
+ * Allocates resources for an SGE descriptor ring, such as Tx queues,
+ * free buffer lists, or response queues. Each SGE ring requires
+ * space for its HW descriptors plus, optionally, space for the SW state
+ * associated with each HW entry (the metadata). The function returns
+ * three values: the virtual address for the HW ring (the return value
+ * of the function), the bus address of the HW ring, and the address
+ * of the SW ring.
+ */
+static void *alloc_ring(size_t nelem, size_t elem_size,
+ size_t sw_size, dma_addr_t *phys, void *metadata,
+ size_t stat_size, __rte_unused uint16_t queue_id,
+ int socket_id, const char *z_name,
+ const char *z_name_sw)
+{
+ size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size;
+ const struct rte_memzone *tz;
+ void *s = NULL;
+
+ dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; "
+ "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;"
+ " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size,
+ stat_size, queue_id, socket_id, z_name, z_name_sw);
+
+ tz = rte_memzone_lookup(z_name);
+ if (tz) {
+ dev_debug(adapter, "%s: tz exists...returning existing..\n",
+ __func__);
+ goto alloc_sw_ring;
+ }
+
+ /*
+ * Allocate TX/RX ring hardware descriptors. A memzone large enough to
+ * handle the maximum ring size is allocated in order to allow for
+ * resizing in later calls to the queue setup function.
+ */
+ tz = rte_memzone_reserve_aligned(z_name, len, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, 4096);
+ if (!tz)
+ return NULL;
+
+alloc_sw_ring:
+ memset(tz->addr, 0, len);
+ if (sw_size) {
+ s = rte_zmalloc_socket(z_name_sw, nelem * sw_size,
+ RTE_CACHE_LINE_SIZE, socket_id);
+
+ if (!s) {
+ dev_err(adapter, "%s: failed to get sw_ring memory\n",
+ __func__);
+ return NULL;
+ }
+ }
+ if (metadata)
+ *(void **)metadata = s;
+
+ *phys = (uint64_t)tz->iova;
+ return tz->addr;
+}
+
+#define CXGB4_MSG_AN ((void *)1)
+
+/**
+ * rspq_next - advance to the next entry in a response queue
+ * @q: the queue
+ *
+ * Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *q)
+{
+ q->cur_desc = (const __be64 *)((const char *)q->cur_desc + q->iqe_len);
+ if (unlikely(++q->cidx == q->size)) {
+ q->cidx = 0;
+ q->gen ^= 1;
+ q->cur_desc = q->desc;
+ }
+}
+
+static inline void cxgbe_set_mbuf_info(struct rte_mbuf *pkt, uint32_t ptype,
+ uint64_t ol_flags)
+{
+ pkt->packet_type |= ptype;
+ pkt->ol_flags |= ol_flags;
+}
+
+static inline void cxgbe_fill_mbuf_info(struct adapter *adap,
+ const struct cpl_rx_pkt *cpl,
+ struct rte_mbuf *pkt)
+{
+ bool csum_ok;
+ u16 err_vec;
+
+ if (adap->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
+ else
+ err_vec = ntohs(cpl->err_vec);
+
+ csum_ok = cpl->csum_calc && !err_vec;
+
+ if (cpl->vlan_ex)
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ else
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0);
+
+ if (cpl->l2info & htonl(F_RXF_IP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4,
+ csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+ PKT_RX_IP_CKSUM_BAD);
+ else if (cpl->l2info & htonl(F_RXF_IP6))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6,
+ csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+ PKT_RX_IP_CKSUM_BAD);
+
+ if (cpl->l2info & htonl(F_RXF_TCP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP,
+ csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+ PKT_RX_L4_CKSUM_BAD);
+ else if (cpl->l2info & htonl(F_RXF_UDP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP,
+ csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+ PKT_RX_L4_CKSUM_BAD);
+}
+
+/**
+ * process_responses - process responses from an SGE response queue
+ * @q: the ingress queue to process
+ * @budget: how many responses can be processed in this round
+ * @rx_pkts: mbuf to put the pkts
+ *
+ * Process responses from an SGE response queue up to the supplied budget.
+ * Responses include received packets as well as control messages from FW
+ * or HW.
+ *
+ * Additionally choose the interrupt holdoff time for the next interrupt
+ * on this queue. If the system is under memory shortage use a fairly
+ * long delay to help recovery.
+ */
+static int process_responses(struct sge_rspq *q, int budget,
+ struct rte_mbuf **rx_pkts)
+{
+ int ret = 0, rsp_type;
+ int budget_left = budget;
+ const struct rsp_ctrl *rc;
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+ while (likely(budget_left)) {
+ if (q->cidx == ntohs(q->stat->pidx))
+ break;
+
+ rc = (const struct rsp_ctrl *)
+ ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));
+
+ /*
+ * Ensure response has been read
+ */
+ rmb();
+ rsp_type = G_RSPD_TYPE(rc->u.type_gen);
+
+ if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
+ struct sge *s = &q->adapter->sge;
+ unsigned int stat_pidx;
+ int stat_pidx_diff;
+
+ stat_pidx = ntohs(q->stat->pidx);
+ stat_pidx_diff = P_IDXDIFF(q, stat_pidx);
+ while (stat_pidx_diff && budget_left) {
+ const struct rx_sw_desc *rsd =
+ &rxq->fl.sdesc[rxq->fl.cidx];
+ const struct rss_header *rss_hdr =
+ (const void *)q->cur_desc;
+ const struct cpl_rx_pkt *cpl =
+ (const void *)&q->cur_desc[1];
+ struct rte_mbuf *pkt, *npkt;
+ u32 len, bufsz;
+
+ rc = (const struct rsp_ctrl *)
+ ((const char *)q->cur_desc +
+ (q->iqe_len - sizeof(*rc)));
+
+ rsp_type = G_RSPD_TYPE(rc->u.type_gen);
+ if (unlikely(rsp_type != X_RSPD_TYPE_FLBUF))
+ break;
+
+ len = ntohl(rc->pldbuflen_qid);
+ BUG_ON(!(len & F_RSPD_NEWBUF));
+ pkt = rsd->buf;
+ npkt = pkt;
+ len = G_RSPD_LEN(len);
+ pkt->pkt_len = len;
+
+ /* Chain mbufs into len if necessary */
+ while (len) {
+ struct rte_mbuf *new_pkt = rsd->buf;
+
+ bufsz = min(get_buf_size(q->adapter,
+ rsd), len);
+ new_pkt->data_len = bufsz;
+ unmap_rx_buf(&rxq->fl);
+ len -= bufsz;
+ npkt->next = new_pkt;
+ npkt = new_pkt;
+ pkt->nb_segs++;
+ rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+ }
+ npkt->next = NULL;
+ pkt->nb_segs--;
+
+ cxgbe_fill_mbuf_info(q->adapter, cpl, pkt);
+
+ if (!rss_hdr->filter_tid &&
+ rss_hdr->hash_type) {
+ pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->hash.rss =
+ ntohl(rss_hdr->hash_val);
+ }
+
+ if (cpl->vlan_ex)
+ pkt->vlan_tci = ntohs(cpl->vlan);
+
+ rte_pktmbuf_adj(pkt, s->pktshift);
+ rxq->stats.pkts++;
+ rxq->stats.rx_bytes += pkt->pkt_len;
+ rx_pkts[budget - budget_left] = pkt;
+
+ rspq_next(q);
+ budget_left--;
+ stat_pidx_diff--;
+ }
+ continue;
+ } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
+ ret = q->handler(q, q->cur_desc, NULL);
+ } else {
+ ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
+ }
+
+ if (unlikely(ret)) {
+ /* couldn't process descriptor, back off for recovery */
+ q->next_intr_params = V_QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+ break;
+ }
+
+ rspq_next(q);
+ budget_left--;
+ }
+
+ /*
+ * If this is a Response Queue with an associated Free List and
+ * there's room for another chunk of new Free List buffer pointers,
+ * refill the Free List.
+ */
+
+ if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
+ __refill_fl(q->adapter, &rxq->fl);
+
+ return budget - budget_left;
+}
+
+int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
+ unsigned int budget, unsigned int *work_done)
+{
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ unsigned int cidx_inc;
+ unsigned int params;
+ u32 val;
+
+ *work_done = process_responses(q, budget, rx_pkts);
+
+ if (*work_done) {
+ cidx_inc = R_IDXDIFF(q, gts_idx);
+
+ if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
+ __refill_fl(q->adapter, &rxq->fl);
+
+ params = q->intr_params;
+ q->next_intr_params = params;
+ val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
+
+ if (unlikely(!q->bar2_addr)) {
+ u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_GTS;
+
+ t4_write_reg(q->adapter, reg,
+ val | V_INGRESSQID((u32)q->cntxt_id));
+ } else {
+ writel(val | V_INGRESSQID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr + SGE_UDB_GTS));
+ /* This Write memory Barrier will force the
+ * write to the User Doorbell area to be
+ * flushed.
+ */
+ wmb();
+ }
+ q->gts_idx = q->cidx;
+ }
+ return 0;
+}
+
+/**
+ * bar2_address - return the BAR2 address for an SGE Queue's Registers
+ * @adapter: the adapter
+ * @qid: the SGE Queue ID
+ * @qtype: the SGE Queue Type (Egress or Ingress)
+ * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
+ *
+ * Returns the BAR2 address for the SGE Queue Registers associated with
+ * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
+ * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
+ * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
+ * Registers are supported (e.g. the Write Combining Doorbell Buffer).
+ */
+static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ unsigned int *pbar2_qid)
+{
+ u64 bar2_qoffset;
+ int ret;
+
+ ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid);
+ if (ret)
+ return NULL;
+
+ return adapter->bar2 + bar2_qoffset;
+}
+
+int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq)
+{
+ struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
+ unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+
+ return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0,
+ rq->cntxt_id, fl_id, 0xffff);
+}
+
+int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq)
+{
+ struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
+ unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+
+ return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0,
+ rq->cntxt_id, fl_id, 0xffff);
+}
+
+/*
+ * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
+ * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
+ */
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+ struct rte_eth_dev *eth_dev, int intr_idx,
+ struct sge_fl *fl, rspq_handler_t hnd, int cong,
+ struct rte_mempool *mp, int queue_id, int socket_id)
+{
+ int ret, flsz = 0;
+ struct fw_iq_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = eth_dev->data->dev_private;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+ unsigned int nb_refill;
+ u8 pciechan;
+
+ /* Size needs to be multiple of 16, including status entry. */
+ iq->size = cxgbe_roundup(iq->size, 16);
+
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id,
+ fwevtq ? "fwq_ring" : "rx_ring");
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
+ queue_id, socket_id, z_name, z_name_sw);
+ if (!iq->desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+
+ if (is_pf4(adap)) {
+ pciechan = pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
+ V_FW_IQ_CMD_VFN(0));
+ if (cong >= 0)
+ c.iqns_to_fl0congen =
+ htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+ V_FW_IQ_CMD_IQTYPE(cong ?
+ FW_IQ_IQTYPE_NIC :
+ FW_IQ_IQTYPE_OFLD) |
+ F_FW_IQ_CMD_IQRO);
+ } else {
+ pciechan = pi->port_id;
+ }
+
+ c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
+ (sizeof(c) / 16));
+ c.type_to_iqandstindex =
+ htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+ V_FW_IQ_CMD_IQASYNCH(fwevtq) |
+ V_FW_IQ_CMD_VIID(pi->viid) |
+ V_FW_IQ_CMD_IQANDST(intr_idx < 0) |
+ V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_STATUS_PAGE) |
+ V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
+ -intr_idx - 1));
+ c.iqdroprss_to_iqesize =
+ htons(V_FW_IQ_CMD_IQPCIECH(pciechan) |
+ F_FW_IQ_CMD_IQGTSMODE |
+ V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
+ V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
+ c.iqsize = htons(iq->size);
+ c.iqaddr = cpu_to_be64(iq->phys_addr);
+
+ if (fl) {
+ struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
+ fl);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ /*
+ * Allocate the ring for the hardware free list (with space
+ * for its status page) along with the associated software
+ * descriptor ring. The free list size needs to be a multiple
+ * of the Egress Queue Unit and at least 2 Egress Units larger
+ * than the SGE's Egress Congrestion Threshold
+ * (fl_starve_thres - 1).
+ */
+ if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
+ fl->size = s->fl_starve_thres - 1 + 2 * 8;
+ fl->size = cxgbe_roundup(fl->size, 8);
+
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id,
+ fwevtq ? "fwq_ring" : "fl_ring");
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ fl->desc = alloc_ring(fl->size, sizeof(__be64),
+ sizeof(struct rx_sw_desc),
+ &fl->addr, &fl->sdesc, s->stat_len,
+ queue_id, socket_id, z_name, z_name_sw);
+
+ if (!fl->desc)
+ goto fl_nomem;
+
+ flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
+ c.iqns_to_fl0congen |=
+ htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ (unlikely(rxq->usembufs) ?
+ 0 : F_FW_IQ_CMD_FL0PACKEN) |
+ F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
+ F_FW_IQ_CMD_FL0PADEN);
+ if (is_pf4(adap) && cong >= 0)
+ c.iqns_to_fl0congen |=
+ htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
+ F_FW_IQ_CMD_FL0CONGCIF |
+ F_FW_IQ_CMD_FL0CONGEN);
+
+ /* In T6, for egress queue type FL there is internal overhead
+ * of 16B for header going into FLM module.
+ * Hence maximum allowed burst size will be 448 bytes.
+ */
+ c.fl0dcaen_to_fl0cidxfthresh =
+ htons(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5 ?
+ X_FETCHBURSTMIN_128B :
+ X_FETCHBURSTMIN_64B) |
+ V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5 ?
+ X_FETCHBURSTMAX_512B :
+ X_FETCHBURSTMAX_256B));
+ c.fl0size = htons(flsz);
+ c.fl0addr = cpu_to_be64(fl->addr);
+ }
+
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
+ if (ret)
+ goto err;
+
+ iq->cur_desc = iq->desc;
+ iq->cidx = 0;
+ iq->gts_idx = 0;
+ iq->gen = 1;
+ iq->next_intr_params = iq->intr_params;
+ iq->cntxt_id = ntohs(c.iqid);
+ iq->abs_id = ntohs(c.physiqid);
+ iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,
+ &iq->bar2_qid);
+ iq->size--; /* subtract status entry */
+ iq->stat = (void *)&iq->desc[iq->size * 8];
+ iq->eth_dev = eth_dev;
+ iq->handler = hnd;
+ iq->port_id = pi->pidx;
+ iq->mb_pool = mp;
+
+ /* set offset to -1 to distinguish ingress queues without FL */
+ iq->offset = fl ? 0 : -1;
+
+ if (fl) {
+ fl->cntxt_id = ntohs(c.fl0id);
+ fl->avail = 0;
+ fl->pend_cred = 0;
+ fl->pidx = 0;
+ fl->cidx = 0;
+ fl->alloc_failed = 0;
+
+ /*
+ * Note, we must initialize the BAR2 Free List User Doorbell
+ * information before refilling the Free List!
+ */
+ fl->bar2_addr = bar2_address(adap, fl->cntxt_id,
+ T4_BAR2_QTYPE_EGRESS,
+ &fl->bar2_qid);
+
+ nb_refill = refill_fl(adap, fl, fl_cap(fl));
+ if (nb_refill != fl_cap(fl)) {
+ ret = -ENOMEM;
+ dev_err(adap, "%s: mbuf alloc failed with error: %d\n",
+ __func__, ret);
+ goto refill_fl_err;
+ }
+ }
+
+ /*
+ * For T5 and later we attempt to set up the Congestion Manager values
+ * of the new RX Ethernet Queue. This should really be handled by
+ * firmware because it's more complex than any host driver wants to
+ * get involved with and it's different per chip and this is almost
+ * certainly wrong. Formware would be wrong as well, but it would be
+ * a lot easier to fix in one place ... For now we do something very
+ * simple (and hopefully less wrong).
+ */
+ if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
+ u32 param, val;
+ int i;
+
+ param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
+ V_FW_PARAMS_PARAM_YZ(iq->cntxt_id));
+ if (cong == 0) {
+ val = V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE);
+ } else {
+ val = V_CONMCTXT_CNGTPMODE(
+ X_CONMCTXT_CNGTPMODE_CHANNEL);
+ for (i = 0; i < 4; i++) {
+ if (cong & (1 << i))
+ val |= V_CONMCTXT_CNGCHMAP(1 <<
+ (i << 2));
+ }
+ }
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &param, &val);
+ if (ret)
+ dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n",
+ iq->cntxt_id, -ret);
+ }
+
+ return 0;
+
+refill_fl_err:
+ t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
+ iq->cntxt_id, fl->cntxt_id, 0xffff);
+fl_nomem:
+ ret = -ENOMEM;
+err:
+ iq->cntxt_id = 0;
+ iq->abs_id = 0;
+ if (iq->desc)
+ iq->desc = NULL;
+
+ if (fl && fl->desc) {
+ rte_free(fl->sdesc);
+ fl->cntxt_id = 0;
+ fl->sdesc = NULL;
+ fl->desc = NULL;
+ }
+ return ret;
+}
+
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id,
+ unsigned int abs_id)
+{
+ q->cntxt_id = id;
+ q->abs_id = abs_id;
+ q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
+ &q->bar2_qid);
+ q->cidx = 0;
+ q->pidx = 0;
+ q->dbidx = 0;
+ q->in_use = 0;
+ q->equeidx = 0;
+ q->coalesce.idx = 0;
+ q->coalesce.len = 0;
+ q->coalesce.flits = 0;
+ q->last_coal_idx = 0;
+ q->last_pidx = 0;
+ q->stat = (void *)&q->desc[q->size];
+}
+
+int t4_sge_eth_txq_start(struct sge_eth_txq *txq)
+{
+ /*
+ * TODO: For flow-control, queue may be stopped waiting to reclaim
+ * credits.
+ * Ensure queue is in EQ_STOPPED state before starting it.
+ */
+ if (!(txq->flags & EQ_STOPPED))
+ return -(EBUSY);
+
+ txq->flags &= ~EQ_STOPPED;
+
+ return 0;
+}
+
+int t4_sge_eth_txq_stop(struct sge_eth_txq *txq)
+{
+ txq->flags |= EQ_STOPPED;
+
+ return 0;
+}
+
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id)
+{
+ int ret, nentries;
+ struct fw_eq_eth_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = eth_dev->data->dev_private;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+ u8 pciechan;
+
+ /* Add status entries */
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id, "tx_ring");
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
+ sizeof(struct tx_sw_desc), &txq->q.phys_addr,
+ &txq->q.sdesc, s->stat_len, queue_id,
+ socket_id, z_name, z_name_sw);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+ if (is_pf4(adap)) {
+ pciechan = pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) |
+ V_FW_EQ_ETH_CMD_VFN(0));
+ } else {
+ pciechan = pi->port_id;
+ }
+
+ c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
+ F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
+ c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
+ V_FW_EQ_ETH_CMD_VIID(pi->viid));
+ c.fetchszm_to_iqid =
+ htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ V_FW_EQ_ETH_CMD_PCIECHN(pciechan) |
+ F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
+ c.dcaen_to_eqsize =
+ htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+ V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_ETH_CMD_EQSIZE(nentries));
+ c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
+
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
+ if (ret) {
+ rte_free(txq->q.sdesc);
+ txq->q.sdesc = NULL;
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)),
+ G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd)));
+ txq->stats.tso = 0;
+ txq->stats.pkts = 0;
+ txq->stats.tx_cso = 0;
+ txq->stats.coal_wr = 0;
+ txq->stats.vlan_ins = 0;
+ txq->stats.tx_bytes = 0;
+ txq->stats.coal_pkts = 0;
+ txq->stats.mapping_err = 0;
+ txq->flags |= EQ_STOPPED;
+ txq->eth_dev = eth_dev;
+ txq->data = eth_dev->data;
+ t4_os_lock_init(&txq->txq_lock);
+ return 0;
+}
+
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id)
+{
+ int ret, nentries;
+ struct fw_eq_ctrl_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = eth_dev->data->dev_private;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+
+ /* Add status entries */
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id, "ctrl_tx_ring");
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
+ 0, &txq->q.phys_addr,
+ NULL, 0, queue_id,
+ socket_id, z_name, z_name_sw);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_EQ_CTRL_CMD_PFN(adap->pf) |
+ V_FW_EQ_CTRL_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC |
+ F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16));
+ c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0));
+ c.physeqid_pkd = htonl(0);
+ c.fetchszm_to_iqid =
+ htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+ F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid));
+ c.dcaen_to_eqsize =
+ htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+ V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_CTRL_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret) {
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)),
+ G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd)));
+ txq->adapter = adap;
+ txq->full = 0;
+ return 0;
+}
+
+static void free_txq(struct sge_txq *q)
+{
+ q->cntxt_id = 0;
+ q->sdesc = NULL;
+ q->desc = NULL;
+}
+
+static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
+ struct sge_fl *fl)
+{
+ unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
+
+ t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
+ rq->cntxt_id, fl_id, 0xffff);
+ rq->cntxt_id = 0;
+ rq->abs_id = 0;
+ rq->desc = NULL;
+
+ if (fl) {
+ free_rx_bufs(fl, fl->avail);
+ rte_free(fl->sdesc);
+ fl->sdesc = NULL;
+ fl->cntxt_id = 0;
+ fl->desc = NULL;
+ }
+}
+
+/*
+ * Clear all queues of the port
+ *
+ * Note: This function must only be called after rx and tx path
+ * of the port have been disabled.
+ */
+void t4_sge_eth_clear_queues(struct port_info *pi)
+{
+ int i;
+ struct adapter *adap = pi->adapter;
+ struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset];
+ struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
+
+ for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
+ if (rxq->rspq.desc)
+ t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ }
+ for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
+ if (txq->q.desc) {
+ struct sge_txq *q = &txq->q;
+
+ t4_sge_eth_txq_stop(txq);
+ reclaim_completed_tx(q);
+ free_tx_desc(q, q->size);
+ q->equeidx = q->pidx;
+ }
+ }
+}
+
+void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq)
+{
+ if (rxq->rspq.desc) {
+ t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL);
+ }
+}
+
+void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq)
+{
+ if (txq->q.desc) {
+ t4_sge_eth_txq_stop(txq);
+ reclaim_completed_tx(&txq->q);
+ t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id);
+ free_tx_desc(&txq->q, txq->q.size);
+ rte_free(txq->q.sdesc);
+ free_txq(&txq->q);
+ }
+}
+
+void t4_sge_tx_monitor_start(struct adapter *adap)
+{
+ rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
+}
+
+void t4_sge_tx_monitor_stop(struct adapter *adap)
+{
+ rte_eal_alarm_cancel(tx_timer_cb, (void *)adap);
+}
+
+/**
+ * t4_free_sge_resources - free SGE resources
+ * @adap: the adapter
+ *
+ * Frees resources used by the SGE queue sets.
+ */
+void t4_free_sge_resources(struct adapter *adap)
+{
+ unsigned int i;
+ struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
+ struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
+
+ /* clean up Ethernet Tx/Rx queues */
+ for (i = 0; i < adap->sge.max_ethqsets; i++, rxq++, txq++) {
+ /* Free only the queues allocated */
+ if (rxq->rspq.desc) {
+ t4_sge_eth_rxq_release(adap, rxq);
+ rxq->rspq.eth_dev = NULL;
+ }
+ if (txq->q.desc) {
+ t4_sge_eth_txq_release(adap, txq);
+ txq->eth_dev = NULL;
+ }
+ }
+
+ /* clean up control Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+ if (cq->q.desc) {
+ reclaim_completed_tx_imm(&cq->q);
+ t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
+ cq->q.cntxt_id);
+ free_txq(&cq->q);
+ }
+ }
+
+ if (adap->sge.fw_evtq.desc)
+ free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+}
+
+/**
+ * t4_sge_init - initialize SGE
+ * @adap: the adapter
+ *
+ * Performs SGE initialization needed every time after a chip reset.
+ * We do not initialize any of the queues here, instead the driver
+ * top-level must request those individually.
+ *
+ * Called in two different modes:
+ *
+ * 1. Perform actual hardware initialization and record hard-coded
+ * parameters which were used. This gets used when we're the
+ * Master PF and the Firmware Configuration File support didn't
+ * work for some reason.
+ *
+ * 2. We're not the Master PF or initialization was performed with
+ * a Firmware Configuration File. In this case we need to grab
+ * any of the SGE operating parameters that we need to have in
+ * order to do our job and make sure we can live with them ...
+ */
+static int t4_sge_init_soft(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
+ u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+ u32 ingress_rx_threshold;
+
+ /*
+ * Verify that CPL messages are going to the Ingress Queue for
+ * process_responses() and that only packet data is going to the
+ * Free Lists.
+ */
+ if ((t4_read_reg(adap, A_SGE_CONTROL) & F_RXPKTCPLMODE) !=
+ V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ dev_err(adap, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Validate the Host Buffer Register Array indices that we want to
+ * use ...
+ *
+ * XXX Note that we should really read through the Host Buffer Size
+ * XXX register array and find the indices of the Buffer Sizes which
+ * XXX meet our needs!
+ */
+#define READ_FL_BUF(x) \
+ t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32))
+
+ fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
+ fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
+ fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
+ fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
+
+ /*
+ * We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+#undef READ_FL_BUF
+
+ /*
+ * The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != CXGBE_PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
+ dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n",
+ fl_small_pg, fl_large_pg);
+ return -EINVAL;
+ }
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+
+ if (adap->use_unpacked_mode) {
+ int err = 0;
+
+ if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap)) {
+ dev_err(adap, "bad SGE FL small MTU %d\n",
+ fl_small_mtu);
+ err = -EINVAL;
+ }
+ if (fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
+ dev_err(adap, "bad SGE FL large MTU %d\n",
+ fl_large_mtu);
+ err = -EINVAL;
+ }
+ if (err)
+ return err;
+ }
+
+ /*
+ * Retrieve our RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ timer_value_0_and_1 = t4_read_reg(adap, A_SGE_TIMER_VALUE_0_AND_1);
+ timer_value_2_and_3 = t4_read_reg(adap, A_SGE_TIMER_VALUE_2_AND_3);
+ timer_value_4_and_5 = t4_read_reg(adap, A_SGE_TIMER_VALUE_4_AND_5);
+ s->timer_val[0] = core_ticks_to_us(adap,
+ G_TIMERVALUE0(timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adap,
+ G_TIMERVALUE1(timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adap,
+ G_TIMERVALUE2(timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adap,
+ G_TIMERVALUE3(timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adap,
+ G_TIMERVALUE4(timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adap,
+ G_TIMERVALUE5(timer_value_4_and_5));
+
+ ingress_rx_threshold = t4_read_reg(adap, A_SGE_INGRESS_RX_THRESHOLD);
+ s->counter_val[0] = G_THRESHOLD_0(ingress_rx_threshold);
+ s->counter_val[1] = G_THRESHOLD_1(ingress_rx_threshold);
+ s->counter_val[2] = G_THRESHOLD_2(ingress_rx_threshold);
+ s->counter_val[3] = G_THRESHOLD_3(ingress_rx_threshold);
+
+ return 0;
+}
+
+int t4_sge_init(struct adapter *adap)
+{
+ struct sge *s = &adap->sge;
+ u32 sge_control, sge_conm_ctrl;
+ int ret, egress_threshold;
+
+ /*
+ * Ingress Padding Boundary and Egress Status Page Size are set up by
+ * t4_fixup_host_params().
+ */
+ sge_control = t4_read_reg(adap, A_SGE_CONTROL);
+ s->pktshift = G_PKTSHIFT(sge_control);
+ s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
+ s->fl_align = t4_fl_pkt_align(adap);
+ ret = t4_sge_init_soft(adap);
+ if (ret < 0) {
+ dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n",
+ __func__, -ret);
+ return ret;
+ }
+
+ /*
+ * A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.) For T4,
+ * there was only a single field to control this. For T5 there's the
+ * original field which now only applies to Unpacked Mode Free List
+ * buffers and a new field which only applies to Packed Mode Free List
+ * buffers.
+ */
+ sge_conm_ctrl = t4_read_reg(adap, A_SGE_CONM_CTRL);
+ if (is_t4(adap->params.chip) || adap->use_unpacked_mode)
+ egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl);
+ else
+ egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl);
+ s->fl_starve_thres = 2 * egress_threshold + 1;
+
+ return 0;
+}
+
+int t4vf_sge_init(struct adapter *adap)
+{
+ struct sge_params *sge_params = &adap->params.sge;
+ u32 sge_ingress_queues_per_page;
+ u32 sge_egress_queues_per_page;
+ u32 sge_control, sge_control2;
+ u32 fl_small_pg, fl_large_pg;
+ u32 sge_ingress_rx_threshold;
+ u32 sge_timer_value_0_and_1;
+ u32 sge_timer_value_2_and_3;
+ u32 sge_timer_value_4_and_5;
+ u32 sge_congestion_control;
+ struct sge *s = &adap->sge;
+ unsigned int s_hps, s_qpp;
+ u32 sge_host_page_size;
+ u32 params[7], vals[7];
+ int v;
+
+ /* query basic params from fw */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
+ params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0));
+ params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1));
+ params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
+ params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
+ params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
+ v = t4vf_query_params(adap, 7, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+
+ sge_control = vals[0];
+ sge_host_page_size = vals[1];
+ fl_small_pg = vals[2];
+ fl_large_pg = vals[3];
+ sge_timer_value_0_and_1 = vals[4];
+ sge_timer_value_2_and_3 = vals[5];
+ sge_timer_value_4_and_5 = vals[6];
+
+ /*
+ * Start by vetting the basic SGE parameters which have been set up by
+ * the Physical Function Driver.
+ */
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != CXGBE_PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
+ dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+ fl_small_pg, fl_large_pg);
+ return -EINVAL;
+ }
+
+ if ((sge_control & F_RXPKTCPLMODE) !=
+ V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+
+ /* Grab ingress packing boundary from SGE_CONTROL2 for */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
+ v = t4vf_query_params(adap, 1, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_err(adapter, "Unable to get SGE Control2; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_control2 = vals[0];
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ sge_ingress_rx_threshold = vals[0];
+ sge_congestion_control = vals[1];
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_warn(adap, "Unable to get VF SGE Queues/Page; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_egress_queues_per_page = vals[0];
+ sge_ingress_queues_per_page = vals[1];
+
+ /*
+ * We need the Queues/Page for our VF. This is based on the
+ * PF from which we're instantiated and is indexed in the
+ * register we just read.
+ */
+ s_hps = (S_HOSTPAGESIZEPF0 +
+ (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf);
+ sge_params->hps =
+ ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0);
+
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf);
+ sge_params->eq_qpp =
+ ((sge_egress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+ sge_params->iq_qpp =
+ ((sge_ingress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+
+ /*
+ * Now translate the queried parameters into our internal forms.
+ */
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+ s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE)
+ ? 128 : 64);
+ s->pktshift = G_PKTSHIFT(sge_control);
+ s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2);
+
+ /*
+ * A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.)
+ */
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T5:
+ s->fl_starve_thres =
+ G_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ case CHELSIO_T6:
+ default:
+ s->fl_starve_thres =
+ G_T6_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ }
+ s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
+
+ /*
+ * Save RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ s->timer_val[0] = core_ticks_to_us(adap,
+ G_TIMERVALUE0(sge_timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adap,
+ G_TIMERVALUE1(sge_timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adap,
+ G_TIMERVALUE2(sge_timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adap,
+ G_TIMERVALUE3(sge_timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adap,
+ G_TIMERVALUE4(sge_timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adap,
+ G_TIMERVALUE5(sge_timer_value_4_and_5));
+ s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold);
+ s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold);
+ s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold);
+ s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold);
+ return 0;
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/smt.c b/src/spdk/dpdk/drivers/net/cxgbe/smt.c
new file mode 100644
index 000000000..e8f38676e
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/smt.c
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Chelsio Communications.
+ * All rights reserved.
+ */
+
+#include "base/common.h"
+#include "smt.h"
+
+void cxgbe_do_smt_write_rpl(struct adapter *adap,
+ const struct cpl_smt_write_rpl *rpl)
+{
+ unsigned int smtidx = G_TID_TID(GET_TID(rpl));
+ struct smt_data *s = adap->smt;
+
+ if (unlikely(rpl->status != CPL_ERR_NONE)) {
+ struct smt_entry *e = &s->smtab[smtidx];
+
+ dev_err(adap,
+ "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
+ rpl->status, smtidx);
+ t4_os_lock(&e->lock);
+ e->state = SMT_STATE_ERROR;
+ t4_os_unlock(&e->lock);
+ }
+}
+
+static int write_smt_entry(struct rte_eth_dev *dev, struct smt_entry *e)
+{
+ unsigned int port_id = ethdev2pinfo(dev)->port_id;
+ struct adapter *adap = ethdev2adap(dev);
+ struct cpl_t6_smt_write_req *t6req;
+ struct smt_data *s = adap->smt;
+ struct cpl_smt_write_req *req;
+ struct sge_ctrl_txq *ctrlq;
+ struct rte_mbuf *mbuf;
+ u8 row;
+
+ ctrlq = &adap->sge.ctrlq[port_id];
+ mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+ if (!mbuf)
+ return -ENOMEM;
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
+ mbuf->data_len = sizeof(*req);
+ mbuf->pkt_len = mbuf->data_len;
+
+ /* Source MAC Table (SMT) contains 256 SMAC entries
+ * organized in 128 rows of 2 entries each.
+ */
+ req = rte_pktmbuf_mtod(mbuf, struct cpl_smt_write_req *);
+ INIT_TP_WR(req, 0);
+
+ /* Each row contains an SMAC pair.
+ * LSB selects the SMAC entry within a row
+ */
+ if (e->idx & 1) {
+ req->pfvf1 = 0x0;
+ rte_memcpy(req->src_mac1, e->src_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ /* fill pfvf0/src_mac0 with entry
+ * at prev index from smt-tab.
+ */
+ req->pfvf0 = 0x0;
+ rte_memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac,
+ RTE_ETHER_ADDR_LEN);
+ } else {
+ req->pfvf0 = 0x0;
+ rte_memcpy(req->src_mac0, e->src_mac,
+ RTE_ETHER_ADDR_LEN);
+
+ /* fill pfvf1/src_mac1 with entry
+ * at next index from smt-tab
+ */
+ req->pfvf1 = 0x0;
+ rte_memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac,
+ RTE_ETHER_ADDR_LEN);
+ }
+ row = (e->hw_idx >> 1);
+ } else {
+ mbuf->data_len = sizeof(*t6req);
+ mbuf->pkt_len = mbuf->data_len;
+
+ /* Source MAC Table (SMT) contains 256 SMAC entries */
+ t6req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_smt_write_req *);
+ INIT_TP_WR(t6req, 0);
+
+ /* fill pfvf0/src_mac0 from smt-tab */
+ t6req->pfvf0 = 0x0;
+ rte_memcpy(t6req->src_mac0, s->smtab[e->idx].src_mac,
+ RTE_ETHER_ADDR_LEN);
+ row = e->hw_idx;
+ req = (struct cpl_smt_write_req *)t6req;
+ }
+
+ OPCODE_TID(req) =
+ cpu_to_be32(MK_OPCODE_TID(CPL_SMT_WRITE_REQ,
+ e->hw_idx |
+ V_TID_QID(adap->sge.fw_evtq.abs_id)));
+
+ req->params = cpu_to_be32(V_SMTW_NORPL(0) |
+ V_SMTW_IDX(row) |
+ V_SMTW_OVLAN_IDX(0));
+ t4_mgmt_tx(ctrlq, mbuf);
+
+ return 0;
+}
+
+/**
+ * find_or_alloc_smte - Find/Allocate a free SMT entry
+ * @s: SMT table
+ * @smac: Source MAC address to compare/add
+ * Returns pointer to the SMT entry found/created
+ *
+ * Finds/Allocates an SMT entry to be used by switching rule of a filter.
+ */
+static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
+{
+ struct smt_entry *e, *end, *first_free = NULL;
+
+ for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
+ if (!rte_atomic32_read(&e->refcnt)) {
+ if (!first_free)
+ first_free = e;
+ } else {
+ if (e->state == SMT_STATE_SWITCHING) {
+ /* This entry is actually in use. See if we can
+ * re-use it ?
+ */
+ if (!memcmp(e->src_mac, smac,
+ RTE_ETHER_ADDR_LEN))
+ goto found;
+ }
+ }
+ }
+
+ if (!first_free)
+ return NULL;
+
+ e = first_free;
+ e->state = SMT_STATE_UNUSED;
+
+found:
+ return e;
+}
+
+static struct smt_entry *t4_smt_alloc_switching(struct rte_eth_dev *dev,
+ u16 pfvf, u8 *smac)
+{
+ struct adapter *adap = ethdev2adap(dev);
+ struct smt_data *s = adap->smt;
+ struct smt_entry *e;
+ int ret;
+
+ t4_os_write_lock(&s->lock);
+ e = find_or_alloc_smte(s, smac);
+ if (e) {
+ t4_os_lock(&e->lock);
+ if (!rte_atomic32_read(&e->refcnt)) {
+ e->pfvf = pfvf;
+ rte_memcpy(e->src_mac, smac, RTE_ETHER_ADDR_LEN);
+ ret = write_smt_entry(dev, e);
+ if (ret) {
+ e->pfvf = 0;
+ memset(e->src_mac, 0, RTE_ETHER_ADDR_LEN);
+ t4_os_unlock(&e->lock);
+ e = NULL;
+ goto out_write_unlock;
+ }
+ e->state = SMT_STATE_SWITCHING;
+ rte_atomic32_set(&e->refcnt, 1);
+ } else {
+ rte_atomic32_inc(&e->refcnt);
+ }
+ t4_os_unlock(&e->lock);
+ }
+
+out_write_unlock:
+ t4_os_write_unlock(&s->lock);
+ return e;
+}
+
+/**
+ * cxgbe_smt_alloc_switching - Allocate an SMT entry for switching rule
+ * @dev: rte_eth_dev pointer
+ * @smac: MAC address to add to SMT
+ * Returns pointer to the SMT entry created
+ *
+ * Allocates an SMT entry to be used by switching rule of a filter.
+ */
+struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac)
+{
+ return t4_smt_alloc_switching(dev, 0x0, smac);
+}
+
+/**
+ * Initialize Source MAC Table
+ */
+struct smt_data *t4_init_smt(u32 smt_start_idx, u32 smt_size)
+{
+ struct smt_data *s;
+ u32 i;
+
+ s = t4_alloc_mem(sizeof(*s) + smt_size * sizeof(struct smt_entry));
+ if (!s)
+ return NULL;
+
+ s->smt_start = smt_start_idx;
+ s->smt_size = smt_size;
+ t4_os_rwlock_init(&s->lock);
+
+ for (i = 0; i < s->smt_size; ++i) {
+ s->smtab[i].idx = i;
+ s->smtab[i].hw_idx = smt_start_idx + i;
+ s->smtab[i].state = SMT_STATE_UNUSED;
+ memset(&s->smtab[i].src_mac, 0, RTE_ETHER_ADDR_LEN);
+ t4_os_lock_init(&s->smtab[i].lock);
+ rte_atomic32_set(&s->smtab[i].refcnt, 0);
+ }
+ return s;
+}
+
+/**
+ * Cleanup Source MAC Table
+ */
+void t4_cleanup_smt(struct adapter *adap)
+{
+ if (adap->smt)
+ t4_os_free(adap->smt);
+}
diff --git a/src/spdk/dpdk/drivers/net/cxgbe/smt.h b/src/spdk/dpdk/drivers/net/cxgbe/smt.h
new file mode 100644
index 000000000..be1fab8ba
--- /dev/null
+++ b/src/spdk/dpdk/drivers/net/cxgbe/smt.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2020 Chelsio Communications.
+ * All rights reserved.
+ */
+#ifndef __CXGBE_SMT_H_
+#define __CXGBE_SMT_H_
+
+#include "base/t4_msg.h"
+
+enum {
+ SMT_STATE_SWITCHING,
+ SMT_STATE_UNUSED,
+ SMT_STATE_ERROR
+};
+
+enum {
+ SMT_SIZE = 256
+};
+
+struct smt_entry {
+ u16 state;
+ u16 idx;
+ u16 pfvf;
+ u16 hw_idx;
+ u8 src_mac[RTE_ETHER_ADDR_LEN];
+ rte_atomic32_t refcnt;
+ rte_spinlock_t lock;
+};
+
+struct smt_data {
+ unsigned int smt_size;
+ unsigned int smt_start;
+ rte_rwlock_t lock;
+ struct smt_entry smtab[0];
+};
+
+struct smt_data *t4_init_smt(u32 smt_start_idx, u32 smt_size);
+void t4_cleanup_smt(struct adapter *adap);
+void cxgbe_do_smt_write_rpl(struct adapter *adap,
+ const struct cpl_smt_write_rpl *rpl);
+struct smt_entry *cxgbe_smt_alloc_switching(struct rte_eth_dev *dev, u8 *smac);
+
+#endif /* __CXGBE_SMT_H_ */
+