summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ocrdma
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/infiniband/hw/ocrdma
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/infiniband/hw/ocrdma')
-rw-r--r--drivers/infiniband/hw/ocrdma/Kconfig9
-rw-r--r--drivers/infiniband/hw/ocrdma/Makefile6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h607
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c268
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h64
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c3240
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h159
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c432
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h2240
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c838
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h74
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2970
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h107
13 files changed, 11014 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ocrdma/Kconfig b/drivers/infiniband/hw/ocrdma/Kconfig
new file mode 100644
index 000000000..54bd70bc4
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_OCRDMA
+ tristate "Emulex One Connect HCA support"
+ depends on ETHERNET && NETDEVICES && PCI && INET && (IPV6 || IPV6=n)
+ select NET_VENDOR_EMULEX
+ select BE2NET
+ help
+ This driver provides low-level InfiniBand over Ethernet
+ support for Emulex One Connect host channel adapters (HCAs).
diff --git a/drivers/infiniband/hw/ocrdma/Makefile b/drivers/infiniband/hw/ocrdma/Makefile
new file mode 100644
index 000000000..14fba9502
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/drivers/net/ethernet/emulex/benet
+
+obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma.o
+
+ocrdma-y := ocrdma_main.o ocrdma_verbs.o ocrdma_hw.o ocrdma_ah.o ocrdma_stats.o
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
new file mode 100644
index 000000000..5eb61c110
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -0,0 +1,607 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef __OCRDMA_H__
+#define __OCRDMA_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+
+#include <be_roce.h>
+#include "ocrdma_sli.h"
+
+#define OCRDMA_ROCE_DRV_VERSION "11.0.0.0"
+
+#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
+#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
+
+#define OC_NAME_SH OCRDMA_NODE_DESC "(Skyhawk)"
+#define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)"
+
+#define OC_SKH_DEVICE_PF 0x720
+#define OC_SKH_DEVICE_VF 0x728
+#define OCRDMA_MAX_AH 512
+
+#define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
+
+#define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
+#define EQ_INTR_PER_SEC_THRSH_HI 150000
+#define EQ_INTR_PER_SEC_THRSH_LOW 100000
+#define EQ_AIC_MAX_EQD 20
+#define EQ_AIC_MIN_EQD 0
+
+void ocrdma_eqd_set_task(struct work_struct *work);
+
+struct ocrdma_dev_attr {
+ u8 fw_ver[32];
+ u32 vendor_id;
+ u32 device_id;
+ u16 max_pd;
+ u16 max_dpp_pds;
+ u16 max_cq;
+ u16 max_cqe;
+ u16 max_qp;
+ u16 max_wqe;
+ u16 max_rqe;
+ u16 max_srq;
+ u32 max_inline_data;
+ int max_send_sge;
+ int max_recv_sge;
+ int max_srq_sge;
+ int max_rdma_sge;
+ int max_mr;
+ u64 max_mr_size;
+ u32 max_num_mr_pbl;
+ int max_mw;
+ int max_map_per_fmr;
+ int max_pages_per_frmr;
+ u16 max_ord_per_qp;
+ u16 max_ird_per_qp;
+
+ int device_cap_flags;
+ u8 cq_overflow_detect;
+ u8 srq_supported;
+
+ u32 wqe_size;
+ u32 rqe_size;
+ u32 ird_page_size;
+ u8 local_ca_ack_delay;
+ u8 ird;
+ u8 num_ird_pages;
+ u8 udp_encap;
+};
+
+struct ocrdma_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+};
+
+struct ocrdma_pbl {
+ void *va;
+ dma_addr_t pa;
+};
+
+struct ocrdma_queue_info {
+ void *va;
+ dma_addr_t dma;
+ u32 size;
+ u16 len;
+ u16 entry_size; /* Size of an element in the queue */
+ u16 id; /* qid, where to ring the doorbell. */
+ u16 head, tail;
+ bool created;
+};
+
+struct ocrdma_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
+ u32 prev_eqd;
+ u64 eq_intr_cnt;
+ u64 prev_eq_intr_cnt;
+};
+
+struct ocrdma_eq {
+ struct ocrdma_queue_info q;
+ u32 vector;
+ int cq_cnt;
+ struct ocrdma_dev *dev;
+ char irq_name[32];
+ struct ocrdma_aic_obj aic_obj;
+};
+
+struct ocrdma_mq {
+ struct ocrdma_queue_info sq;
+ struct ocrdma_queue_info cq;
+ bool rearm_cq;
+};
+
+struct mqe_ctx {
+ struct mutex lock; /* for serializing mailbox commands on MQ */
+ wait_queue_head_t cmd_wait;
+ u32 tag;
+ u16 cqe_status;
+ u16 ext_status;
+ bool cmd_done;
+ bool fw_error_state;
+};
+
+struct ocrdma_hw_mr {
+ u32 lkey;
+ u8 fr_mr;
+ u8 remote_atomic;
+ u8 remote_rd;
+ u8 remote_wr;
+ u8 local_rd;
+ u8 local_wr;
+ u8 mw_bind;
+ u8 rsvd;
+ u64 len;
+ struct ocrdma_pbl *pbl_table;
+ u32 num_pbls;
+ u32 num_pbes;
+ u32 pbl_size;
+ u32 pbe_size;
+ u64 va;
+};
+
+struct ocrdma_mr {
+ struct ib_mr ibmr;
+ struct ib_umem *umem;
+ struct ocrdma_hw_mr hwmr;
+ u64 *pages;
+ u32 npages;
+};
+
+struct ocrdma_stats {
+ u8 type;
+ struct ocrdma_dev *dev;
+};
+
+struct ocrdma_pd_resource_mgr {
+ u32 pd_norm_start;
+ u16 pd_norm_count;
+ u16 pd_norm_thrsh;
+ u16 max_normal_pd;
+ u32 pd_dpp_start;
+ u16 pd_dpp_count;
+ u16 pd_dpp_thrsh;
+ u16 max_dpp_pd;
+ u16 dpp_page_index;
+ unsigned long *pd_norm_bitmap;
+ unsigned long *pd_dpp_bitmap;
+ bool pd_prealloc_valid;
+};
+
+struct stats_mem {
+ struct ocrdma_mqe mqe;
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+ char *debugfs_mem;
+};
+
+struct phy_info {
+ u16 auto_speeds_supported;
+ u16 fixed_speeds_supported;
+ u16 phy_type;
+ u16 interface_type;
+};
+
+enum ocrdma_flags {
+ OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01
+};
+
+struct ocrdma_dev {
+ struct ib_device ibdev;
+ struct ocrdma_dev_attr attr;
+
+ struct mutex dev_lock; /* provides syncronise access to device data */
+ spinlock_t flush_q_lock ____cacheline_aligned;
+
+ struct ocrdma_cq **cq_tbl;
+ struct ocrdma_qp **qp_tbl;
+
+ struct ocrdma_eq *eq_tbl;
+ int eq_cnt;
+ struct delayed_work eqd_work;
+ u16 base_eqid;
+ u16 max_eq;
+
+ /* provided synchronization to sgid table for
+ * updating gid entries triggered by notifier.
+ */
+ spinlock_t sgid_lock;
+
+ int gsi_qp_created;
+ struct ocrdma_cq *gsi_sqcq;
+ struct ocrdma_cq *gsi_rqcq;
+
+ struct {
+ struct ocrdma_av *va;
+ dma_addr_t pa;
+ u32 size;
+ u32 num_ah;
+ /* provide synchronization for av
+ * entry allocations.
+ */
+ spinlock_t lock;
+ u32 ahid;
+ struct ocrdma_pbl pbl;
+ } av_tbl;
+
+ void *mbx_cmd;
+ struct ocrdma_mq mq;
+ struct mqe_ctx mqe_ctx;
+
+ struct be_dev_info nic_info;
+ struct phy_info phy;
+ char model_number[32];
+ u32 hba_port_num;
+
+ struct list_head entry;
+ int id;
+ u64 *stag_arr;
+ u8 sl; /* service level */
+ bool pfc_state;
+ atomic_t update_sl;
+ u16 pvid;
+ u32 asic_id;
+ u32 flags;
+
+ ulong last_stats_time;
+ struct mutex stats_lock; /* provide synch for debugfs operations */
+ struct stats_mem stats_mem;
+ struct ocrdma_stats rsrc_stats;
+ struct ocrdma_stats rx_stats;
+ struct ocrdma_stats wqe_stats;
+ struct ocrdma_stats tx_stats;
+ struct ocrdma_stats db_err_stats;
+ struct ocrdma_stats tx_qp_err_stats;
+ struct ocrdma_stats rx_qp_err_stats;
+ struct ocrdma_stats tx_dbg_stats;
+ struct ocrdma_stats rx_dbg_stats;
+ struct ocrdma_stats driver_stats;
+ struct ocrdma_stats reset_stats;
+ struct dentry *dir;
+ atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS];
+ atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR];
+ struct ocrdma_pd_resource_mgr *pd_mgr;
+};
+
+struct ocrdma_cq {
+ struct ib_cq ibcq;
+ struct ocrdma_cqe *va;
+ u32 phase;
+ u32 getp; /* pointer to pending wrs to
+ * return to stack, wrap arounds
+ * at max_hw_cqe
+ */
+ u32 max_hw_cqe;
+ bool phase_change;
+ spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
+ * to cq polling
+ */
+ /* syncronizes cq completion handler invoked from multiple context */
+ spinlock_t comp_handler_lock ____cacheline_aligned;
+ u16 id;
+ u16 eqn;
+
+ struct ocrdma_ucontext *ucontext;
+ dma_addr_t pa;
+ u32 len;
+ u32 cqe_cnt;
+
+ /* head of all qp's sq and rq for which cqes need to be flushed
+ * by the software.
+ */
+ struct list_head sq_head, rq_head;
+};
+
+struct ocrdma_pd {
+ struct ib_pd ibpd;
+ struct ocrdma_ucontext *uctx;
+ u32 id;
+ int num_dpp_qp;
+ u32 dpp_page;
+ bool dpp_enabled;
+};
+
+struct ocrdma_ah {
+ struct ib_ah ibah;
+ struct ocrdma_av *av;
+ u16 sgid_index;
+ u32 id;
+ u8 hdr_type;
+};
+
+struct ocrdma_qp_hwq_info {
+ u8 *va; /* virtual address */
+ u32 max_sges;
+ u32 head, tail;
+ u32 entry_size;
+ u32 max_cnt;
+ u32 max_wqe_idx;
+ u16 dbid; /* qid, where to ring the doorbell. */
+ u32 len;
+ dma_addr_t pa;
+};
+
+struct ocrdma_srq {
+ struct ib_srq ibsrq;
+ u8 __iomem *db;
+ struct ocrdma_qp_hwq_info rq;
+ u64 *rqe_wr_id_tbl;
+ u32 *idx_bit_fields;
+ u32 bit_fields_len;
+
+ /* provide synchronization to multiple context(s) posting rqe */
+ spinlock_t q_lock ____cacheline_aligned;
+
+ struct ocrdma_pd *pd;
+ u32 id;
+};
+
+struct ocrdma_qp {
+ struct ib_qp ibqp;
+
+ u8 __iomem *sq_db;
+ struct ocrdma_qp_hwq_info sq;
+ struct {
+ uint64_t wrid;
+ uint16_t dpp_wqe_idx;
+ uint16_t dpp_wqe;
+ uint8_t signaled;
+ uint8_t rsvd[3];
+ } *wqe_wr_id_tbl;
+ u32 max_inline_data;
+
+ /* provide synchronization to multiple context(s) posting wqe, rqe */
+ spinlock_t q_lock ____cacheline_aligned;
+ struct ocrdma_cq *sq_cq;
+ /* list maintained per CQ to flush SQ errors */
+ struct list_head sq_entry;
+
+ u8 __iomem *rq_db;
+ struct ocrdma_qp_hwq_info rq;
+ u64 *rqe_wr_id_tbl;
+ struct ocrdma_cq *rq_cq;
+ struct ocrdma_srq *srq;
+ /* list maintained per CQ to flush RQ errors */
+ struct list_head rq_entry;
+
+ enum ocrdma_qp_state state; /* QP state */
+ int cap_flags;
+ u32 max_ord, max_ird;
+
+ u32 id;
+ struct ocrdma_pd *pd;
+
+ enum ib_qp_type qp_type;
+
+ int sgid_idx;
+ u32 qkey;
+ bool dpp_enabled;
+ u8 *ird_q_va;
+ bool signaled;
+};
+
+struct ocrdma_ucontext {
+ struct ib_ucontext ibucontext;
+
+ struct list_head mm_head;
+ struct mutex mm_list_lock; /* protects list entries of mm type */
+ struct ocrdma_pd *cntxt_pd;
+ int pd_in_use;
+
+ struct {
+ u32 *va;
+ dma_addr_t pa;
+ u32 len;
+ } ah_tbl;
+};
+
+struct ocrdma_mm {
+ struct {
+ u64 phy_addr;
+ unsigned long len;
+ } key;
+ struct list_head entry;
+};
+
+static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct ocrdma_dev, ibdev);
+}
+
+static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext
+ *ibucontext)
+{
+ return container_of(ibucontext, struct ocrdma_ucontext, ibucontext);
+}
+
+static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct ocrdma_pd, ibpd);
+}
+
+static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct ocrdma_cq, ibcq);
+}
+
+static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct ocrdma_qp, ibqp);
+}
+
+static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct ocrdma_mr, ibmr);
+}
+
+static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct ocrdma_ah, ibah);
+}
+
+static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct ocrdma_srq, ibsrq);
+}
+
+static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
+{
+ int cqe_valid;
+ cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
+ return (cqe_valid == cq->phase);
+}
+
+static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
+{
+ return (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_QTYPE) ? 0 : 1;
+}
+
+static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe)
+{
+ return (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_INVALIDATE) ? 1 : 0;
+}
+
+static inline int is_cqe_imm(struct ocrdma_cqe *cqe)
+{
+ return (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_IMM) ? 1 : 0;
+}
+
+static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
+{
+ return (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_WRITE_IMM) ? 1 : 0;
+}
+
+static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
+ struct rdma_ah_attr *ah_attr, u8 *mac_addr)
+{
+ struct in6_addr in6;
+
+ memcpy(&in6, rdma_ah_read_grh(ah_attr)->dgid.raw, sizeof(in6));
+ if (rdma_is_multicast_addr(&in6))
+ rdma_get_mcast_mac(&in6, mac_addr);
+ else if (rdma_link_local_addr(&in6))
+ rdma_get_ll_mac(&in6, mac_addr);
+ else
+ memcpy(mac_addr, ah_attr->roce.dmac, ETH_ALEN);
+ return 0;
+}
+
+static inline char *hca_name(struct ocrdma_dev *dev)
+{
+ switch (dev->nic_info.pdev->device) {
+ case OC_SKH_DEVICE_PF:
+ case OC_SKH_DEVICE_VF:
+ return OC_NAME_SH;
+ default:
+ return OC_NAME_UNKNOWN;
+ }
+}
+
+static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev,
+ int eqid)
+{
+ int indx;
+
+ for (indx = 0; indx < dev->eq_cnt; indx++) {
+ if (dev->eq_tbl[indx].q.id == eqid)
+ return indx;
+ }
+
+ return -EINVAL;
+}
+
+static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
+{
+ if (dev->nic_info.dev_family == 0xF && !dev->asic_id) {
+ pci_read_config_dword(
+ dev->nic_info.pdev,
+ OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id);
+ }
+
+ return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >>
+ OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;
+}
+
+static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio)
+{
+ return *(pfc + prio);
+}
+
+static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio)
+{
+ return *(app_prio + prio);
+}
+
+static inline u8 ocrdma_is_enabled_and_synced(u32 state)
+{ /* May also be used to interpret TC-state, QCN-state
+ * Appl-state and Logical-link-state in future.
+ */
+ return (state & OCRDMA_STATE_FLAG_ENABLED) &&
+ (state & OCRDMA_STATE_FLAG_SYNC);
+}
+
+static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
+{
+ return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
+}
+
+static inline bool ocrdma_is_udp_encap_supported(struct ocrdma_dev *dev)
+{
+ return (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV4) ||
+ (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV6);
+}
+
+#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
new file mode 100644
index 000000000..88c459283
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -0,0 +1,268 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <net/neighbour.h>
+#include <net/netevent.h>
+
+#include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_cache.h>
+
+#include "ocrdma.h"
+#include "ocrdma_verbs.h"
+#include "ocrdma_ah.h"
+#include "ocrdma_hw.h"
+#include "ocrdma_stats.h"
+
+#define OCRDMA_VID_PCP_SHIFT 0xD
+
+static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
+{
+ switch (hdr_type) {
+ case OCRDMA_L3_TYPE_IB_GRH:
+ return (u16)ETH_P_IBOE;
+ case OCRDMA_L3_TYPE_IPV4:
+ return (u16)0x0800;
+ case OCRDMA_L3_TYPE_IPV6:
+ return (u16)0x86dd;
+ default:
+ pr_err("ocrdma%d: Invalid network header\n", devid);
+ return 0;
+ }
+}
+
+static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
+ struct rdma_ah_attr *attr, const union ib_gid *sgid,
+ int pdid, bool *isvlan, u16 vlan_tag)
+{
+ int status;
+ struct ocrdma_eth_vlan eth;
+ struct ocrdma_grh grh;
+ int eth_sz;
+ u16 proto_num = 0;
+ u8 nxthdr = 0x11;
+ struct iphdr ipv4;
+ const struct ib_global_route *ib_grh;
+ union {
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
+
+ memset(&eth, 0, sizeof(eth));
+ memset(&grh, 0, sizeof(grh));
+
+ /* Protocol Number */
+ proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type);
+ if (!proto_num)
+ return -EINVAL;
+ nxthdr = (proto_num == ETH_P_IBOE) ? 0x1b : 0x11;
+ /* VLAN */
+ if (!vlan_tag || (vlan_tag > 0xFFF))
+ vlan_tag = dev->pvid;
+ if (vlan_tag || dev->pfc_state) {
+ if (!vlan_tag) {
+ pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
+ dev->id);
+ pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
+ dev->id);
+ }
+ eth.eth_type = cpu_to_be16(0x8100);
+ eth.roce_eth_type = cpu_to_be16(proto_num);
+ vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
+ eth.vlan_tag = cpu_to_be16(vlan_tag);
+ eth_sz = sizeof(struct ocrdma_eth_vlan);
+ *isvlan = true;
+ } else {
+ eth.eth_type = cpu_to_be16(proto_num);
+ eth_sz = sizeof(struct ocrdma_eth_basic);
+ }
+ /* MAC */
+ memcpy(&eth.smac[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
+ status = ocrdma_resolve_dmac(dev, attr, &eth.dmac[0]);
+ if (status)
+ return status;
+ ib_grh = rdma_ah_read_grh(attr);
+ ah->sgid_index = ib_grh->sgid_index;
+ /* Eth HDR */
+ memcpy(&ah->av->eth_hdr, &eth, eth_sz);
+ if (ah->hdr_type == RDMA_NETWORK_IPV4) {
+ *((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) |
+ ib_grh->traffic_class);
+ ipv4.id = cpu_to_be16(pdid);
+ ipv4.frag_off = htons(IP_DF);
+ ipv4.tot_len = htons(0);
+ ipv4.ttl = ib_grh->hop_limit;
+ ipv4.protocol = nxthdr;
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
+ ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
+ rdma_gid2ip((struct sockaddr*)&dgid_addr, &ib_grh->dgid);
+ ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
+ memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
+ } else {
+ memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
+ grh.tclass_flow = cpu_to_be32((6 << 28) |
+ (ib_grh->traffic_class << 24) |
+ ib_grh->flow_label);
+ memcpy(&grh.dgid[0], ib_grh->dgid.raw,
+ sizeof(ib_grh->dgid.raw));
+ grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
+ (nxthdr << 8) |
+ ib_grh->hop_limit);
+ memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
+ }
+ if (*isvlan)
+ ah->av->valid |= OCRDMA_AV_VLAN_VALID;
+ ah->av->valid = cpu_to_le32(ah->av->valid);
+ return status;
+}
+
+int ocrdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ u32 *ahid_addr;
+ int status;
+ struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
+ bool isvlan = false;
+ u16 vlan_tag = 0xffff;
+ const struct ib_gid_attr *sgid_attr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibah->pd);
+ struct rdma_ah_attr *attr = init_attr->ah_attr;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
+
+ if ((attr->type != RDMA_AH_ATTR_TYPE_ROCE) ||
+ !(rdma_ah_get_ah_flags(attr) & IB_AH_GRH))
+ return -EINVAL;
+
+ if (atomic_cmpxchg(&dev->update_sl, 1, 0))
+ ocrdma_init_service_level(dev);
+
+ sgid_attr = attr->grh.sgid_attr;
+ status = rdma_read_gid_l2_fields(sgid_attr, &vlan_tag, NULL);
+ if (status)
+ return status;
+
+ status = ocrdma_alloc_av(dev, ah);
+ if (status)
+ goto av_err;
+
+ /* Get network header type for this GID */
+ ah->hdr_type = rdma_gid_attr_network_type(sgid_attr);
+
+ status = set_av_attr(dev, ah, attr, &sgid_attr->gid, pd->id,
+ &isvlan, vlan_tag);
+ if (status)
+ goto av_conf_err;
+
+ /* if pd is for the user process, pass the ah_id to user space */
+ if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
+ ahid_addr = pd->uctx->ah_tbl.va + rdma_ah_get_dlid(attr);
+ *ahid_addr = 0;
+ *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
+ if (ocrdma_is_udp_encap_supported(dev)) {
+ *ahid_addr |= ((u32)ah->hdr_type &
+ OCRDMA_AH_L3_TYPE_MASK) <<
+ OCRDMA_AH_L3_TYPE_SHIFT;
+ }
+ if (isvlan)
+ *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
+ OCRDMA_AH_VLAN_VALID_SHIFT);
+ }
+
+ return 0;
+
+av_conf_err:
+ ocrdma_free_av(dev, ah);
+av_err:
+ return status;
+}
+
+int ocrdma_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
+
+ ocrdma_free_av(dev, ah);
+ return 0;
+}
+
+int ocrdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
+{
+ struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
+ struct ocrdma_av *av = ah->av;
+ struct ocrdma_grh *grh;
+
+ attr->type = ibah->type;
+ if (ah->av->valid & OCRDMA_AV_VALID) {
+ grh = (struct ocrdma_grh *)((u8 *)ah->av +
+ sizeof(struct ocrdma_eth_vlan));
+ rdma_ah_set_sl(attr, be16_to_cpu(av->eth_hdr.vlan_tag) >> 13);
+ } else {
+ grh = (struct ocrdma_grh *)((u8 *)ah->av +
+ sizeof(struct ocrdma_eth_basic));
+ rdma_ah_set_sl(attr, 0);
+ }
+ rdma_ah_set_grh(attr, NULL,
+ be32_to_cpu(grh->tclass_flow) & 0xffffffff,
+ ah->sgid_index,
+ be32_to_cpu(grh->pdid_hoplimit) & 0xff,
+ be32_to_cpu(grh->tclass_flow) >> 24);
+ rdma_ah_set_dgid_raw(attr, &grh->dgid[0]);
+ return 0;
+}
+
+int ocrdma_process_mad(struct ib_device *ibdev, int process_mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
+{
+ int status = IB_MAD_RESULT_SUCCESS;
+ struct ocrdma_dev *dev;
+
+ if (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) {
+ dev = get_ocrdma_dev(ibdev);
+ ocrdma_pma_counters(dev, out);
+ status |= IB_MAD_RESULT_REPLY;
+ }
+
+ return status;
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
new file mode 100644
index 000000000..2626679df
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -0,0 +1,64 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef __OCRDMA_AH_H__
+#define __OCRDMA_AH_H__
+
+enum {
+ OCRDMA_AH_ID_MASK = 0x3FF,
+ OCRDMA_AH_VLAN_VALID_MASK = 0x01,
+ OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F,
+ OCRDMA_AH_L3_TYPE_MASK = 0x03,
+ OCRDMA_AH_L3_TYPE_SHIFT = 0x1D /* 29 bits */
+};
+
+int ocrdma_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int ocrdma_destroy_ah(struct ib_ah *ah, u32 flags);
+int ocrdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
+
+int ocrdma_process_mad(struct ib_device *dev, int process_mad_flags,
+ u32 port_num, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const struct ib_mad *in,
+ struct ib_mad *out, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
+#endif /* __OCRDMA_AH_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
new file mode 100644
index 000000000..56f06c68f
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -0,0 +1,3240 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_ether.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_cache.h>
+
+#include "ocrdma.h"
+#include "ocrdma_hw.h"
+#include "ocrdma_verbs.h"
+#include "ocrdma_ah.h"
+
+enum mbx_status {
+ OCRDMA_MBX_STATUS_FAILED = 1,
+ OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3,
+ OCRDMA_MBX_STATUS_OOR = 100,
+ OCRDMA_MBX_STATUS_INVALID_PD = 101,
+ OCRDMA_MBX_STATUS_PD_INUSE = 102,
+ OCRDMA_MBX_STATUS_INVALID_CQ = 103,
+ OCRDMA_MBX_STATUS_INVALID_QP = 104,
+ OCRDMA_MBX_STATUS_INVALID_LKEY = 105,
+ OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106,
+ OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107,
+ OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108,
+ OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109,
+ OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110,
+ OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111,
+ OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112,
+ OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113,
+ OCRDMA_MBX_STATUS_MW_BOUND = 114,
+ OCRDMA_MBX_STATUS_INVALID_VA = 115,
+ OCRDMA_MBX_STATUS_INVALID_LENGTH = 116,
+ OCRDMA_MBX_STATUS_INVALID_FBO = 117,
+ OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118,
+ OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119,
+ OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120,
+ OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121,
+ OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129,
+ OCRDMA_MBX_STATUS_SRQ_ERROR = 133,
+ OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134,
+ OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135,
+ OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136,
+ OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137,
+ OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138,
+ OCRDMA_MBX_STATUS_QP_BOUND = 130,
+ OCRDMA_MBX_STATUS_INVALID_CHANGE = 139,
+ OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140,
+ OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141,
+ OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142,
+ OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143,
+ OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 144
+};
+
+enum additional_status {
+ OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22
+};
+
+enum cqe_status {
+ OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1,
+ OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2,
+ OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3,
+ OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4,
+ OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 5
+};
+
+static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
+{
+ return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
+}
+
+static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
+{
+ eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
+}
+
+static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
+{
+ struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
+ (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
+
+ if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
+ return NULL;
+ return cqe;
+}
+
+static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
+{
+ dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
+}
+
+static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
+{
+ return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
+}
+
+static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
+{
+ dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
+}
+
+static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
+{
+ return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
+}
+
+enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
+{
+ switch (qps) {
+ case OCRDMA_QPS_RST:
+ return IB_QPS_RESET;
+ case OCRDMA_QPS_INIT:
+ return IB_QPS_INIT;
+ case OCRDMA_QPS_RTR:
+ return IB_QPS_RTR;
+ case OCRDMA_QPS_RTS:
+ return IB_QPS_RTS;
+ case OCRDMA_QPS_SQD:
+ case OCRDMA_QPS_SQ_DRAINING:
+ return IB_QPS_SQD;
+ case OCRDMA_QPS_SQE:
+ return IB_QPS_SQE;
+ case OCRDMA_QPS_ERR:
+ return IB_QPS_ERR;
+ }
+ return IB_QPS_ERR;
+}
+
+static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
+{
+ switch (qps) {
+ case IB_QPS_RESET:
+ return OCRDMA_QPS_RST;
+ case IB_QPS_INIT:
+ return OCRDMA_QPS_INIT;
+ case IB_QPS_RTR:
+ return OCRDMA_QPS_RTR;
+ case IB_QPS_RTS:
+ return OCRDMA_QPS_RTS;
+ case IB_QPS_SQD:
+ return OCRDMA_QPS_SQD;
+ case IB_QPS_SQE:
+ return OCRDMA_QPS_SQE;
+ case IB_QPS_ERR:
+ return OCRDMA_QPS_ERR;
+ }
+ return OCRDMA_QPS_ERR;
+}
+
+static int ocrdma_get_mbx_errno(u32 status)
+{
+ int err_num;
+ u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
+ OCRDMA_MBX_RSP_STATUS_SHIFT;
+ u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
+ OCRDMA_MBX_RSP_ASTATUS_SHIFT;
+
+ switch (mbox_status) {
+ case OCRDMA_MBX_STATUS_OOR:
+ case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:
+ err_num = -EAGAIN;
+ break;
+
+ case OCRDMA_MBX_STATUS_INVALID_PD:
+ case OCRDMA_MBX_STATUS_INVALID_CQ:
+ case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:
+ case OCRDMA_MBX_STATUS_INVALID_QP:
+ case OCRDMA_MBX_STATUS_INVALID_CHANGE:
+ case OCRDMA_MBX_STATUS_MTU_EXCEEDS:
+ case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:
+ case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:
+ case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:
+ case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:
+ case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:
+ case OCRDMA_MBX_STATUS_INVALID_LKEY:
+ case OCRDMA_MBX_STATUS_INVALID_VA:
+ case OCRDMA_MBX_STATUS_INVALID_LENGTH:
+ case OCRDMA_MBX_STATUS_INVALID_FBO:
+ case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:
+ case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:
+ case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:
+ case OCRDMA_MBX_STATUS_SRQ_ERROR:
+ case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:
+ err_num = -EINVAL;
+ break;
+
+ case OCRDMA_MBX_STATUS_PD_INUSE:
+ case OCRDMA_MBX_STATUS_QP_BOUND:
+ case OCRDMA_MBX_STATUS_MW_STILL_BOUND:
+ case OCRDMA_MBX_STATUS_MW_BOUND:
+ err_num = -EBUSY;
+ break;
+
+ case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:
+ case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:
+ case OCRDMA_MBX_STATUS_RQE_EXCEEDS:
+ case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:
+ case OCRDMA_MBX_STATUS_ORD_EXCEEDS:
+ case OCRDMA_MBX_STATUS_IRD_EXCEEDS:
+ case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:
+ case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:
+ case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:
+ err_num = -ENOBUFS;
+ break;
+
+ case OCRDMA_MBX_STATUS_FAILED:
+ switch (add_status) {
+ case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
+ err_num = -EAGAIN;
+ break;
+ default:
+ err_num = -EFAULT;
+ }
+ break;
+ default:
+ err_num = -EFAULT;
+ }
+ return err_num;
+}
+
+char *port_speed_string(struct ocrdma_dev *dev)
+{
+ char *str = "";
+ u16 speeds_supported;
+
+ speeds_supported = dev->phy.fixed_speeds_supported |
+ dev->phy.auto_speeds_supported;
+ if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
+ str = "40Gbps ";
+ else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
+ str = "10Gbps ";
+ else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
+ str = "1Gbps ";
+
+ return str;
+}
+
+static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
+{
+ int err_num = -EINVAL;
+
+ switch (cqe_status) {
+ case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:
+ err_num = -EPERM;
+ break;
+ case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:
+ err_num = -EINVAL;
+ break;
+ case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
+ case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
+ err_num = -EINVAL;
+ break;
+ case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
+ default:
+ err_num = -EINVAL;
+ break;
+ }
+ return err_num;
+}
+
+void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
+ bool solicited, u16 cqe_popped)
+{
+ u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;
+
+ val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
+ OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);
+
+ if (armed)
+ val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
+ if (solicited)
+ val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
+ val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
+ iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
+}
+
+static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
+{
+ u32 val = 0;
+
+ val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
+ val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
+ iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
+}
+
+static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
+ bool arm, bool clear_int, u16 num_eqe)
+{
+ u32 val = 0;
+
+ val |= eq_id & OCRDMA_EQ_ID_MASK;
+ val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);
+ if (arm)
+ val |= (1 << OCRDMA_REARM_SHIFT);
+ if (clear_int)
+ val |= (1 << OCRDMA_EQ_CLR_SHIFT);
+ val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
+ val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
+ iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
+}
+
+static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
+ u8 opcode, u8 subsys, u32 cmd_len)
+{
+ cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
+ cmd_hdr->timeout = 20; /* seconds */
+ cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
+}
+
+static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
+{
+ struct ocrdma_mqe *mqe;
+
+ mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
+ if (!mqe)
+ return NULL;
+ mqe->hdr.spcl_sge_cnt_emb |=
+ (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &
+ OCRDMA_MQE_HDR_EMB_MASK;
+ mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
+
+ ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
+ mqe->hdr.pyld_len);
+ return mqe;
+}
+
+static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
+{
+ dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
+}
+
+static int ocrdma_alloc_q(struct ocrdma_dev *dev,
+ struct ocrdma_queue_info *q, u16 len, u16 entry_size)
+{
+ memset(q, 0, sizeof(*q));
+ q->len = len;
+ q->entry_size = entry_size;
+ q->size = len * entry_size;
+ q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma,
+ GFP_KERNEL);
+ if (!q->va)
+ return -ENOMEM;
+ return 0;
+}
+
+static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
+ dma_addr_t host_pa, int hw_page_size)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ q_pa[i].lo = (u32) (host_pa & 0xffffffff);
+ q_pa[i].hi = (u32) upper_32_bits(host_pa);
+ host_pa += hw_page_size;
+ }
+}
+
+static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev,
+ struct ocrdma_queue_info *q, int queue_type)
+{
+ u8 opcode = 0;
+ int status;
+ struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
+
+ switch (queue_type) {
+ case QTYPE_MCCQ:
+ opcode = OCRDMA_CMD_DELETE_MQ;
+ break;
+ case QTYPE_CQ:
+ opcode = OCRDMA_CMD_DELETE_CQ;
+ break;
+ case QTYPE_EQ:
+ opcode = OCRDMA_CMD_DELETE_EQ;
+ break;
+ default:
+ BUG();
+ }
+ memset(cmd, 0, sizeof(*cmd));
+ ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+ cmd->id = q->id;
+
+ status = be_roce_mcc_cmd(dev->nic_info.netdev,
+ cmd, sizeof(*cmd), NULL, NULL);
+ if (!status)
+ q->created = false;
+ return status;
+}
+
+static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+{
+ int status;
+ struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
+ struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
+
+ memset(cmd, 0, sizeof(*cmd));
+ ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
+ sizeof(*cmd));
+
+ cmd->req.rsvd_version = 2;
+ cmd->num_pages = 4;
+ cmd->valid = OCRDMA_CREATE_EQ_VALID;
+ cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
+
+ ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
+ PAGE_SIZE_4K);
+ status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
+ NULL);
+ if (!status) {
+ eq->q.id = rsp->vector_eqid & 0xffff;
+ eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
+ eq->q.created = true;
+ }
+ return status;
+}
+
+static int ocrdma_create_eq(struct ocrdma_dev *dev,
+ struct ocrdma_eq *eq, u16 q_len)
+{
+ int status;
+
+ status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
+ sizeof(struct ocrdma_eqe));
+ if (status)
+ return status;
+
+ status = ocrdma_mbx_create_eq(dev, eq);
+ if (status)
+ goto mbx_err;
+ eq->dev = dev;
+ ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
+
+ return 0;
+mbx_err:
+ ocrdma_free_q(dev, &eq->q);
+ return status;
+}
+
+int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+{
+ int irq;
+
+ if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
+ irq = dev->nic_info.pdev->irq;
+ else
+ irq = dev->nic_info.msix.vector_list[eq->vector];
+ return irq;
+}
+
+static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+{
+ if (eq->q.created) {
+ ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
+ ocrdma_free_q(dev, &eq->q);
+ }
+}
+
+static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
+{
+ int irq;
+
+ /* disarm EQ so that interrupts are not generated
+ * during freeing and EQ delete is in progress.
+ */
+ ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
+
+ irq = ocrdma_get_irq(dev, eq);
+ free_irq(irq, eq);
+ _ocrdma_destroy_eq(dev, eq);
+}
+
+static void ocrdma_destroy_eqs(struct ocrdma_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->eq_cnt; i++)
+ ocrdma_destroy_eq(dev, &dev->eq_tbl[i]);
+}
+
+static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
+ struct ocrdma_queue_info *cq,
+ struct ocrdma_queue_info *eq)
+{
+ struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
+ struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
+ int status;
+
+ memset(cmd, 0, sizeof(*cmd));
+ ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+ cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
+ cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
+ OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
+ cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
+
+ cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
+ cmd->eqn = eq->id;
+ cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe);
+
+ ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
+ cq->dma, PAGE_SIZE_4K);
+ status = be_roce_mcc_cmd(dev->nic_info.netdev,
+ cmd, sizeof(*cmd), NULL, NULL);
+ if (!status) {
+ cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
+ cq->created = true;
+ }
+ return status;
+}
+
+static u32 ocrdma_encoded_q_len(int q_len)
+{
+ u32 len_encoded = fls(q_len); /* log2(len) + 1 */
+
+ if (len_encoded == 16)
+ len_encoded = 0;
+ return len_encoded;
+}
+
+static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
+ struct ocrdma_queue_info *mq,
+ struct ocrdma_queue_info *cq)
+{
+ int num_pages, status;
+ struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
+ struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
+ struct ocrdma_pa *pa;
+
+ memset(cmd, 0, sizeof(*cmd));
+ num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
+
+ ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+ cmd->req.rsvd_version = 1;
+ cmd->cqid_pages = num_pages;
+ cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
+ cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
+
+ cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
+ cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
+ /* Request link events on this MQ. */
+ cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE);
+
+ cmd->async_cqid_ringsize = cq->id;
+ cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
+ OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
+ cmd->valid = OCRDMA_CREATE_MQ_VALID;
+ pa = &cmd->pa[0];
+
+ ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
+ status = be_roce_mcc_cmd(dev->nic_info.netdev,
+ cmd, sizeof(*cmd), NULL, NULL);
+ if (!status) {
+ mq->id = rsp->id;
+ mq->created = true;
+ }
+ return status;
+}
+
+static int ocrdma_create_mq(struct ocrdma_dev *dev)
+{
+ int status;
+
+ /* Alloc completion queue for Mailbox queue */
+ status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
+ sizeof(struct ocrdma_mcqe));
+ if (status)
+ goto alloc_err;
+
+ dev->eq_tbl[0].cq_cnt++;
+ status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
+ if (status)
+ goto mbx_cq_free;
+
+ memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
+ init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
+ mutex_init(&dev->mqe_ctx.lock);
+
+ /* Alloc Mailbox queue */
+ status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
+ sizeof(struct ocrdma_mqe));
+ if (status)
+ goto mbx_cq_destroy;
+ status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
+ if (status)
+ goto mbx_q_free;
+ ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
+ return 0;
+
+mbx_q_free:
+ ocrdma_free_q(dev, &dev->mq.sq);
+mbx_cq_destroy:
+ ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
+mbx_cq_free:
+ ocrdma_free_q(dev, &dev->mq.cq);
+alloc_err:
+ return status;
+}
+
+static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
+{
+ struct ocrdma_queue_info *mbxq, *cq;
+
+ /* mqe_ctx lock synchronizes with any other pending cmds. */
+ mutex_lock(&dev->mqe_ctx.lock);
+ mbxq = &dev->mq.sq;
+ if (mbxq->created) {
+ ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
+ ocrdma_free_q(dev, mbxq);
+ }
+ mutex_unlock(&dev->mqe_ctx.lock);
+
+ cq = &dev->mq.cq;
+ if (cq->created) {
+ ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
+ ocrdma_free_q(dev, cq);
+ }
+}
+
+static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
+ struct ocrdma_qp *qp)
+{
+ enum ib_qp_state new_ib_qps = IB_QPS_ERR;
+ enum ib_qp_state old_ib_qps;
+
+ if (qp == NULL)
+ BUG();
+ ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
+}
+
+static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
+ struct ocrdma_ae_mcqe *cqe)
+{
+ struct ocrdma_qp *qp = NULL;
+ struct ocrdma_cq *cq = NULL;
+ struct ib_event ib_evt;
+ int cq_event = 0;
+ int qp_event = 1;
+ int srq_event = 0;
+ int dev_event = 0;
+ int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
+ OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
+ u16 qpid = cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK;
+ u16 cqid = cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK;
+
+ /*
+ * Some FW version returns wrong qp or cq ids in CQEs.
+ * Checking whether the IDs are valid
+ */
+
+ if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) {
+ if (qpid < dev->attr.max_qp)
+ qp = dev->qp_tbl[qpid];
+ if (qp == NULL) {
+ pr_err("ocrdma%d:Async event - qpid %u is not valid\n",
+ dev->id, qpid);
+ return;
+ }
+ }
+
+ if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) {
+ if (cqid < dev->attr.max_cq)
+ cq = dev->cq_tbl[cqid];
+ if (cq == NULL) {
+ pr_err("ocrdma%d:Async event - cqid %u is not valid\n",
+ dev->id, cqid);
+ return;
+ }
+ }
+
+ memset(&ib_evt, 0, sizeof(ib_evt));
+
+ ib_evt.device = &dev->ibdev;
+
+ switch (type) {
+ case OCRDMA_CQ_ERROR:
+ ib_evt.element.cq = &cq->ibcq;
+ ib_evt.event = IB_EVENT_CQ_ERR;
+ cq_event = 1;
+ qp_event = 0;
+ break;
+ case OCRDMA_CQ_OVERRUN_ERROR:
+ ib_evt.element.cq = &cq->ibcq;
+ ib_evt.event = IB_EVENT_CQ_ERR;
+ cq_event = 1;
+ qp_event = 0;
+ break;
+ case OCRDMA_CQ_QPCAT_ERROR:
+ ib_evt.element.qp = &qp->ibqp;
+ ib_evt.event = IB_EVENT_QP_FATAL;
+ ocrdma_process_qpcat_error(dev, qp);
+ break;
+ case OCRDMA_QP_ACCESS_ERROR:
+ ib_evt.element.qp = &qp->ibqp;
+ ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ case OCRDMA_QP_COMM_EST_EVENT:
+ ib_evt.element.qp = &qp->ibqp;
+ ib_evt.event = IB_EVENT_COMM_EST;
+ break;
+ case OCRDMA_SQ_DRAINED_EVENT:
+ ib_evt.element.qp = &qp->ibqp;
+ ib_evt.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case OCRDMA_DEVICE_FATAL_EVENT:
+ ib_evt.element.port_num = 1;
+ ib_evt.event = IB_EVENT_DEVICE_FATAL;
+ qp_event = 0;
+ dev_event = 1;
+ break;
+ case OCRDMA_SRQCAT_ERROR:
+ ib_evt.element.srq = &qp->srq->ibsrq;
+ ib_evt.event = IB_EVENT_SRQ_ERR;
+ srq_event = 1;
+ qp_event = 0;
+ break;
+ case OCRDMA_SRQ_LIMIT_EVENT:
+ ib_evt.element.srq = &qp->srq->ibsrq;
+ ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ srq_event = 1;
+ qp_event = 0;
+ break;
+ case OCRDMA_QP_LAST_WQE_EVENT:
+ ib_evt.element.qp = &qp->ibqp;
+ ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ default:
+ cq_event = 0;
+ qp_event = 0;
+ srq_event = 0;
+ dev_event = 0;
+ pr_err("%s() unknown type=0x%x\n", __func__, type);
+ break;
+ }
+
+ if (type < OCRDMA_MAX_ASYNC_ERRORS)
+ atomic_inc(&dev->async_err_stats[type]);
+
+ if (qp_event) {
+ if (qp->ibqp.event_handler)
+ qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
+ } else if (cq_event) {
+ if (cq->ibcq.event_handler)
+ cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
+ } else if (srq_event) {
+ if (qp->srq->ibsrq.event_handler)
+ qp->srq->ibsrq.event_handler(&ib_evt,
+ qp->srq->ibsrq.
+ srq_context);
+ } else if (dev_event) {
+ dev_err(&dev->ibdev.dev, "Fatal event received\n");
+ ib_dispatch_event(&ib_evt);
+ }
+
+}
+
+static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
+ struct ocrdma_ae_mcqe *cqe)
+{
+ struct ocrdma_ae_pvid_mcqe *evt;
+ int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
+ OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
+
+ switch (type) {
+ case OCRDMA_ASYNC_EVENT_PVID_STATE:
+ evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
+ if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >>
+ OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT)
+ dev->pvid = ((evt->tag_enabled &
+ OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
+ OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
+ break;
+
+ case OCRDMA_ASYNC_EVENT_COS_VALUE:
+ atomic_set(&dev->update_sl, 1);
+ break;
+ default:
+ /* Not interested evts. */
+ break;
+ }
+}
+
+static void ocrdma_process_link_state(struct ocrdma_dev *dev,
+ struct ocrdma_ae_mcqe *cqe)
+{
+ struct ocrdma_ae_lnkst_mcqe *evt;
+ u8 lstate;
+
+ evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
+ lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn);
+
+ if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK))
+ return;
+
+ if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)
+ ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK));
+}
+
+static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
+{
+ /* async CQE processing */
+ struct ocrdma_ae_mcqe *cqe = ae_cqe;
+ u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
+ OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
+ switch (evt_code) {
+ case OCRDMA_ASYNC_LINK_EVE_CODE:
+ ocrdma_process_link_state(dev, cqe);
+ break;
+ case OCRDMA_ASYNC_RDMA_EVE_CODE:
+ ocrdma_dispatch_ibevent(dev, cqe);
+ break;
+ case OCRDMA_ASYNC_GRP5_EVE_CODE:
+ ocrdma_process_grp5_aync(dev, cqe);
+ break;
+ default:
+ pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
+ dev->id, evt_code);
+ }
+}
+
+static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
+{
+ if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
+ dev->mqe_ctx.cqe_status = (cqe->status &
+ OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;
+ dev->mqe_ctx.ext_status =
+ (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
+ >> OCRDMA_MCQE_ESTATUS_SHIFT;
+ dev->mqe_ctx.cmd_done = true;
+ wake_up(&dev->mqe_ctx.cmd_wait);
+ } else
+ pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
+ __func__, cqe->tag_lo, dev->mqe_ctx.tag);
+}
+
+static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
+{
+ u16 cqe_popped = 0;
+ struct ocrdma_mcqe *cqe;
+
+ while (1) {
+ cqe = ocrdma_get_mcqe(dev);
+ if (cqe == NULL)
+ break;
+ ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
+ cqe_popped += 1;
+ if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
+ ocrdma_process_acqe(dev, cqe);
+ else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
+ ocrdma_process_mcqe(dev, cqe);
+ memset(cqe, 0, sizeof(struct ocrdma_mcqe));
+ ocrdma_mcq_inc_tail(dev);
+ }
+ ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
+ return 0;
+}
+
+static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
+ struct ocrdma_cq *cq, bool sq)
+{
+ struct ocrdma_qp *qp;
+ struct list_head *cur;
+ struct ocrdma_cq *bcq = NULL;
+ struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
+
+ list_for_each(cur, head) {
+ if (sq)
+ qp = list_entry(cur, struct ocrdma_qp, sq_entry);
+ else
+ qp = list_entry(cur, struct ocrdma_qp, rq_entry);
+
+ if (qp->srq)
+ continue;
+ /* if wq and rq share the same cq, than comp_handler
+ * is already invoked.
+ */
+ if (qp->sq_cq == qp->rq_cq)
+ continue;
+ /* if completion came on sq, rq's cq is buddy cq.
+ * if completion came on rq, sq's cq is buddy cq.
+ */
+ if (qp->sq_cq == cq)
+ bcq = qp->rq_cq;
+ else
+ bcq = qp->sq_cq;
+ return bcq;
+ }
+ return NULL;
+}
+
+static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
+ struct ocrdma_cq *cq)
+{
+ unsigned long flags;
+ struct ocrdma_cq *bcq = NULL;
+
+ /* Go through list of QPs in error state which are using this CQ
+ * and invoke its callback handler to trigger CQE processing for
+ * error/flushed CQE. It is rare to find more than few entries in
+ * this list as most consumers stops after getting error CQE.
+ * List is traversed only once when a matching buddy cq found for a QP.
+ */
+ spin_lock_irqsave(&dev->flush_q_lock, flags);
+ /* Check if buddy CQ is present.
+ * true - Check for SQ CQ
+ * false - Check for RQ CQ
+ */
+ bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
+ if (bcq == NULL)
+ bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
+ spin_unlock_irqrestore(&dev->flush_q_lock, flags);
+
+ /* if there is valid buddy cq, look for its completion handler */
+ if (bcq && bcq->ibcq.comp_handler) {
+ spin_lock_irqsave(&bcq->comp_handler_lock, flags);
+ (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
+ spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
+ }
+}
+
+static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
+{
+ unsigned long flags;
+ struct ocrdma_cq *cq;
+
+ if (cq_idx >= OCRDMA_MAX_CQ)
+ BUG();
+
+ cq = dev->cq_tbl[cq_idx];
+ if (cq == NULL)
+ return;
+
+ if (cq->ibcq.comp_handler) {
+ spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
+ spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+ }
+ ocrdma_qp_buddy_cq_handler(dev, cq);
+}
+
+static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
+{
+ /* process the MQ-CQE. */
+ if (cq_id == dev->mq.cq.id)
+ ocrdma_mq_cq_handler(dev, cq_id);
+ else
+ ocrdma_qp_cq_handler(dev, cq_id);
+}
+
+static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
+{
+ struct ocrdma_eq *eq = handle;
+ struct ocrdma_dev *dev = eq->dev;
+ struct ocrdma_eqe eqe;
+ struct ocrdma_eqe *ptr;
+ u16 cq_id;
+ u8 mcode;
+ int budget = eq->cq_cnt;
+
+ do {
+ ptr = ocrdma_get_eqe(eq);
+ eqe = *ptr;
+ ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
+ mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK)
+ >> OCRDMA_EQE_MAJOR_CODE_SHIFT;
+ if (mcode == OCRDMA_MAJOR_CODE_SENTINAL)
+ pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
+ eq->q.id, eqe.id_valid);
+ if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
+ break;
+
+ ptr->id_valid = 0;
+ /* ring eq doorbell as soon as its consumed. */
+ ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
+ /* check whether its CQE or not. */
+ if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
+ cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
+ ocrdma_cq_handler(dev, cq_id);
+ }
+ ocrdma_eq_inc_tail(eq);
+
+ /* There can be a stale EQE after the last bound CQ is
+ * destroyed. EQE valid and budget == 0 implies this.
+ */
+ if (budget)
+ budget--;
+
+ } while (budget);
+
+ eq->aic_obj.eq_intr_cnt++;
+ ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
+ return IRQ_HANDLED;
+}
+
+static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
+{
+ struct ocrdma_mqe *mqe;
+
+ dev->mqe_ctx.tag = dev->mq.sq.head;
+ dev->mqe_ctx.cmd_done = false;
+ mqe = ocrdma_get_mqe(dev);
+ cmd->hdr.tag_lo = dev->mq.sq.head;
+ ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
+ /* make sure descriptor is written before ringing doorbell */
+ wmb();
+ ocrdma_mq_inc_head(dev);
+ ocrdma_ring_mq_db(dev);
+}
+
+static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
+{
+ long status;
+ /* 30 sec timeout */
+ status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
+ (dev->mqe_ctx.cmd_done != false),
+ msecs_to_jiffies(30000));
+ if (status)
+ return 0;
+ else {
+ dev->mqe_ctx.fw_error_state = true;
+ pr_err("%s(%d) mailbox timeout: fw not responding\n",
+ __func__, dev->id);
+ return -1;
+ }
+}
+
+/* issue a mailbox command on the MQ */
+static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
+{
+ int status = 0;
+ u16 cqe_status, ext_status;
+ struct ocrdma_mqe *rsp_mqe;
+ struct ocrdma_mbx_rsp *rsp = NULL;
+
+ mutex_lock(&dev->mqe_ctx.lock);
+ if (dev->mqe_ctx.fw_error_state)
+ goto mbx_err;
+ ocrdma_post_mqe(dev, mqe);
+ status = ocrdma_wait_mqe_cmpl(dev);
+ if (status)
+ goto mbx_err;
+ cqe_status = dev->mqe_ctx.cqe_status;
+ ext_status = dev->mqe_ctx.ext_status;
+ rsp_mqe = ocrdma_get_mqe_rsp(dev);
+ ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe)));
+ if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
+ OCRDMA_MQE_HDR_EMB_SHIFT)
+ rsp = &mqe->u.rsp;
+
+ if (cqe_status || ext_status) {
+ pr_err("%s() cqe_status=0x%x, ext_status=0x%x,\n",
+ __func__, cqe_status, ext_status);
+ if (rsp) {
+ /* This is for embedded cmds. */
+ pr_err("opcode=0x%x, subsystem=0x%x\n",
+ (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
+ OCRDMA_MBX_RSP_OPCODE_SHIFT,
+ (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
+ OCRDMA_MBX_RSP_SUBSYS_SHIFT);
+ }
+ status = ocrdma_get_mbx_cqe_errno(cqe_status);
+ goto mbx_err;
+ }
+ /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
+ if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK))
+ status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
+mbx_err:
+ mutex_unlock(&dev->mqe_ctx.lock);
+ return status;
+}
+
+static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
+ void *payload_va)
+{
+ int status;
+ struct ocrdma_mbx_rsp *rsp = payload_va;
+
+ if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
+ OCRDMA_MQE_HDR_EMB_SHIFT)
+ BUG();
+
+ status = ocrdma_mbx_cmd(dev, mqe);
+ if (!status)
+ /* For non embedded, only CQE failures are handled in
+ * ocrdma_mbx_cmd. We need to check for RSP errors.
+ */
+ if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK)
+ status = ocrdma_get_mbx_errno(rsp->status);
+
+ if (status)
+ pr_err("opcode=0x%x, subsystem=0x%x\n",
+ (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
+ OCRDMA_MBX_RSP_OPCODE_SHIFT,
+ (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
+ OCRDMA_MBX_RSP_SUBSYS_SHIFT);
+ return status;
+}
+
+static void ocrdma_get_attr(struct ocrdma_dev *dev,
+ struct ocrdma_dev_attr *attr,
+ struct ocrdma_mbx_query_config *rsp)
+{
+ attr->max_pd =
+ (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
+ attr->udp_encap = (rsp->max_pd_ca_ack_delay &
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT;
+ attr->max_dpp_pds =
+ (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
+ attr->max_qp =
+ (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
+ attr->max_srq =
+ (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
+ attr->max_send_sge = ((rsp->max_recv_send_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
+ attr->max_recv_sge = (rsp->max_recv_send_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT;
+ attr->max_srq_sge = (rsp->max_srq_rqe_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
+ attr->max_rdma_sge = (rsp->max_wr_rd_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT;
+ attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
+ OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
+ attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
+ OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
+ attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
+ OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;
+ attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
+ OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;
+ attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
+ OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
+ attr->max_mw = rsp->max_mw;
+ attr->max_mr = rsp->max_mr;
+ attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
+ rsp->max_mr_size_lo;
+ attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
+ attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
+ attr->max_cqe = rsp->max_cq_cqes_per_cq &
+ OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
+ attr->max_cq = (rsp->max_cq_cqes_per_cq &
+ OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET;
+ attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
+ OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
+ OCRDMA_WQE_STRIDE;
+ attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
+ OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *
+ OCRDMA_WQE_STRIDE;
+ attr->max_inline_data =
+ attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
+ sizeof(struct ocrdma_sge));
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
+ attr->ird = 1;
+ attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
+ attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
+ }
+ dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
+ OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
+ dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
+ OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
+}
+
+static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
+ struct ocrdma_fw_conf_rsp *conf)
+{
+ u32 fn_mode;
+
+ fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
+ if (fn_mode != OCRDMA_FN_MODE_RDMA)
+ return -EINVAL;
+ dev->base_eqid = conf->base_eqid;
+ dev->max_eq = conf->max_eq;
+ return 0;
+}
+
+/* can be issued only during init time. */
+static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_mqe *cmd;
+ struct ocrdma_fw_ver_rsp *rsp;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
+ if (!cmd)
+ return -ENOMEM;
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+ OCRDMA_CMD_GET_FW_VER,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+ rsp = (struct ocrdma_fw_ver_rsp *)cmd;
+ memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
+ memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
+ sizeof(rsp->running_ver));
+ ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
+/* can be issued only during init time. */
+static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_mqe *cmd;
+ struct ocrdma_fw_conf_rsp *rsp;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
+ if (!cmd)
+ return -ENOMEM;
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+ OCRDMA_CMD_GET_FW_CONFIG,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+ rsp = (struct ocrdma_fw_conf_rsp *)cmd;
+ status = ocrdma_check_fw_config(dev, rsp);
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
+int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
+{
+ struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
+ struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
+ struct ocrdma_rdma_stats_resp *old_stats;
+ int status;
+
+ old_stats = kmalloc(sizeof(*old_stats), GFP_KERNEL);
+ if (old_stats == NULL)
+ return -ENOMEM;
+
+ memset(mqe, 0, sizeof(*mqe));
+ mqe->hdr.pyld_len = dev->stats_mem.size;
+ mqe->hdr.spcl_sge_cnt_emb |=
+ (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
+ OCRDMA_MQE_HDR_SGE_CNT_MASK;
+ mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
+ mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
+ mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
+
+ /* Cache the old stats */
+ memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
+ memset(req, 0, dev->stats_mem.size);
+
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
+ OCRDMA_CMD_GET_RDMA_STATS,
+ OCRDMA_SUBSYS_ROCE,
+ dev->stats_mem.size);
+ if (reset)
+ req->reset_stats = reset;
+
+ status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
+ if (status)
+ /* Copy from cache, if mbox fails */
+ memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
+ else
+ ocrdma_le32_to_cpu(req, dev->stats_mem.size);
+
+ kfree(old_stats);
+ return status;
+}
+
+static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_dma_mem dma;
+ struct ocrdma_mqe *mqe;
+ struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
+ struct mgmt_hba_attribs *hba_attribs;
+
+ mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
+ if (!mqe)
+ return status;
+
+ dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
+ dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
+ dma.size, &dma.pa, GFP_KERNEL);
+ if (!dma.va)
+ goto free_mqe;
+
+ mqe->hdr.pyld_len = dma.size;
+ mqe->hdr.spcl_sge_cnt_emb |=
+ (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
+ OCRDMA_MQE_HDR_SGE_CNT_MASK;
+ mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
+ mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
+ mqe->u.nonemb_req.sge[0].len = dma.size;
+
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
+ OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
+ OCRDMA_SUBSYS_COMMON,
+ dma.size);
+
+ status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
+ if (!status) {
+ ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
+ hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
+
+ dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
+ OCRDMA_HBA_ATTRB_PTNUM_MASK)
+ >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
+ strscpy(dev->model_number,
+ hba_attribs->controller_model_number,
+ sizeof(dev->model_number));
+ }
+ dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
+free_mqe:
+ kfree(mqe);
+ return status;
+}
+
+static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_mbx_query_config *rsp;
+ struct ocrdma_mqe *cmd;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
+ if (!cmd)
+ return status;
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+ rsp = (struct ocrdma_mbx_query_config *)cmd;
+ ocrdma_get_attr(dev, &dev->attr, rsp);
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
+ u8 *lnk_state)
+{
+ int status = -ENOMEM;
+ struct ocrdma_get_link_speed_rsp *rsp;
+ struct ocrdma_mqe *cmd;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
+ sizeof(*cmd));
+ if (!cmd)
+ return status;
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+ OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+ ((struct ocrdma_mbx_hdr *)cmd->u.cmd)->rsvd_version = 0x1;
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
+ if (lnk_speed)
+ *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
+ >> OCRDMA_PHY_PS_SHIFT;
+ if (lnk_state)
+ *lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK);
+
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
+static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_mqe *cmd;
+ struct ocrdma_get_phy_info_rsp *rsp;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
+ if (!cmd)
+ return status;
+
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+ OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
+ sizeof(*cmd));
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
+ dev->phy.phy_type =
+ (rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK);
+ dev->phy.interface_type =
+ (rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK)
+ >> OCRDMA_IF_TYPE_SHIFT;
+ dev->phy.auto_speeds_supported =
+ (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK);
+ dev->phy.fixed_speeds_supported =
+ (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK)
+ >> OCRDMA_FSPEED_SUPP_SHIFT;
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
+int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
+{
+ int status = -ENOMEM;
+ struct ocrdma_alloc_pd *cmd;
+ struct ocrdma_alloc_pd_rsp *rsp;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
+ if (!cmd)
+ return status;
+ if (pd->dpp_enabled)
+ cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+ rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
+ pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;
+ if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {
+ pd->dpp_enabled = true;
+ pd->dpp_page = rsp->dpp_page_pdid >>
+ OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
+ } else {
+ pd->dpp_enabled = false;
+ pd->num_dpp_qp = 0;
+ }
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
+int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
+{
+ int status = -ENOMEM;
+ struct ocrdma_dealloc_pd *cmd;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
+ if (!cmd)
+ return status;
+ cmd->id = pd->id;
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ kfree(cmd);
+ return status;
+}
+
+
+static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
+{
+ int status = -ENOMEM;
+ struct ocrdma_alloc_pd_range *cmd;
+ struct ocrdma_alloc_pd_range_rsp *rsp;
+
+ /* Pre allocate the DPP PDs */
+ if (dev->attr.max_dpp_pds) {
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE,
+ sizeof(*cmd));
+ if (!cmd)
+ return -ENOMEM;
+ cmd->pd_count = dev->attr.max_dpp_pds;
+ cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+
+ if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) &&
+ rsp->pd_count) {
+ dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
+ OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
+ dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
+ OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+ dev->pd_mgr->max_dpp_pd = rsp->pd_count;
+ dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count,
+ GFP_KERNEL);
+ }
+ kfree(cmd);
+ }
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+ if (!status && rsp->pd_count) {
+ dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
+ OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+ dev->pd_mgr->max_normal_pd = rsp->pd_count;
+ dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count,
+ GFP_KERNEL);
+ }
+ kfree(cmd);
+
+ if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
+ /* Enable PD resource manager */
+ dev->pd_mgr->pd_prealloc_valid = true;
+ return 0;
+ }
+ return status;
+}
+
+static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
+{
+ struct ocrdma_dealloc_pd_range *cmd;
+
+ /* return normal PDs to firmware */
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
+ if (!cmd)
+ goto mbx_err;
+
+ if (dev->pd_mgr->max_normal_pd) {
+ cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
+ cmd->pd_count = dev->pd_mgr->max_normal_pd;
+ ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ }
+
+ if (dev->pd_mgr->max_dpp_pd) {
+ kfree(cmd);
+ /* return DPP PDs to firmware */
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
+ sizeof(*cmd));
+ if (!cmd)
+ goto mbx_err;
+
+ cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
+ cmd->pd_count = dev->pd_mgr->max_dpp_pd;
+ ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ }
+mbx_err:
+ kfree(cmd);
+}
+
+void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
+{
+ int status;
+
+ dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
+ GFP_KERNEL);
+ if (!dev->pd_mgr)
+ return;
+
+ status = ocrdma_mbx_alloc_pd_range(dev);
+ if (status) {
+ pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
+ __func__, dev->id);
+ }
+}
+
+static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
+{
+ ocrdma_mbx_dealloc_pd_range(dev);
+ bitmap_free(dev->pd_mgr->pd_norm_bitmap);
+ bitmap_free(dev->pd_mgr->pd_dpp_bitmap);
+ kfree(dev->pd_mgr);
+}
+
+static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
+ int *num_pages, int *page_size)
+{
+ int i;
+ int mem_size;
+
+ *num_entries = roundup_pow_of_two(*num_entries);
+ mem_size = *num_entries * entry_size;
+ /* find the possible lowest possible multiplier */
+ for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
+ if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
+ break;
+ }
+ if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
+ return -EINVAL;
+ mem_size = roundup(mem_size,
+ ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));
+ *num_pages =
+ mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
+ *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
+ *num_entries = mem_size / entry_size;
+ return 0;
+}
+
+static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
+{
+ int i;
+ int status = -ENOMEM;
+ int max_ah;
+ struct ocrdma_create_ah_tbl *cmd;
+ struct ocrdma_create_ah_tbl_rsp *rsp;
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ dma_addr_t pa;
+ struct ocrdma_pbe *pbes;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
+ if (!cmd)
+ return status;
+
+ max_ah = OCRDMA_MAX_AH;
+ dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
+
+ /* number of PBEs in PBL */
+ cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
+ OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &
+ OCRDMA_CREATE_AH_NUM_PAGES_MASK;
+
+ /* page size */
+ for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
+ if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
+ break;
+ }
+ cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
+ OCRDMA_CREATE_AH_PAGE_SIZE_MASK;
+
+ /* ah_entry size */
+ cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
+ OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &
+ OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;
+
+ dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+ &dev->av_tbl.pbl.pa,
+ GFP_KERNEL);
+ if (dev->av_tbl.pbl.va == NULL)
+ goto mem_err;
+
+ dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
+ &pa, GFP_KERNEL);
+ if (dev->av_tbl.va == NULL)
+ goto mem_err_ah;
+ dev->av_tbl.pa = pa;
+ dev->av_tbl.num_ah = max_ah;
+
+ pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
+ for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
+ pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff);
+ pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa));
+ pa += PAGE_SIZE;
+ }
+ cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
+ cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+ rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
+ dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
+ kfree(cmd);
+ return 0;
+
+mbx_err:
+ dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
+ dev->av_tbl.pa);
+ dev->av_tbl.va = NULL;
+mem_err_ah:
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
+ dev->av_tbl.pbl.pa);
+ dev->av_tbl.pbl.va = NULL;
+ dev->av_tbl.size = 0;
+mem_err:
+ kfree(cmd);
+ return status;
+}
+
+static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
+{
+ struct ocrdma_delete_ah_tbl *cmd;
+ struct pci_dev *pdev = dev->nic_info.pdev;
+
+ if (dev->av_tbl.va == NULL)
+ return;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
+ if (!cmd)
+ return;
+ cmd->ahid = dev->av_tbl.ahid;
+
+ ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
+ dev->av_tbl.pa);
+ dev->av_tbl.va = NULL;
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
+ dev->av_tbl.pbl.pa);
+ kfree(cmd);
+}
+
+/* Multiple CQs uses the EQ. This routine returns least used
+ * EQ to associate with CQ. This will distributes the interrupt
+ * processing and CPU load to associated EQ, vector and so to that CPU.
+ */
+static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
+{
+ int i, selected_eq = 0, cq_cnt = 0;
+ u16 eq_id;
+
+ mutex_lock(&dev->dev_lock);
+ cq_cnt = dev->eq_tbl[0].cq_cnt;
+ eq_id = dev->eq_tbl[0].q.id;
+ /* find the EQ which is has the least number of
+ * CQs associated with it.
+ */
+ for (i = 0; i < dev->eq_cnt; i++) {
+ if (dev->eq_tbl[i].cq_cnt < cq_cnt) {
+ cq_cnt = dev->eq_tbl[i].cq_cnt;
+ eq_id = dev->eq_tbl[i].q.id;
+ selected_eq = i;
+ }
+ }
+ dev->eq_tbl[selected_eq].cq_cnt += 1;
+ mutex_unlock(&dev->dev_lock);
+ return eq_id;
+}
+
+static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
+{
+ int i;
+
+ mutex_lock(&dev->dev_lock);
+ i = ocrdma_get_eq_table_index(dev, eq_id);
+ if (i == -EINVAL)
+ BUG();
+ dev->eq_tbl[i].cq_cnt -= 1;
+ mutex_unlock(&dev->dev_lock);
+}
+
+int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
+ int entries, int dpp_cq, u16 pd_id)
+{
+ int status = -ENOMEM; int max_hw_cqe;
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ struct ocrdma_create_cq *cmd;
+ struct ocrdma_create_cq_rsp *rsp;
+ u32 hw_pages, cqe_size, page_size, cqe_count;
+
+ if (entries > dev->attr.max_cqe) {
+ pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
+ __func__, dev->id, dev->attr.max_cqe, entries);
+ return -EINVAL;
+ }
+ if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R))
+ return -EINVAL;
+
+ if (dpp_cq) {
+ cq->max_hw_cqe = 1;
+ max_hw_cqe = 1;
+ cqe_size = OCRDMA_DPP_CQE_SIZE;
+ hw_pages = 1;
+ } else {
+ cq->max_hw_cqe = dev->attr.max_cqe;
+ max_hw_cqe = dev->attr.max_cqe;
+ cqe_size = sizeof(struct ocrdma_cqe);
+ hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
+ }
+
+ cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
+ if (!cmd)
+ return -ENOMEM;
+ ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+ cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
+ if (!cq->va) {
+ status = -ENOMEM;
+ goto mem_err;
+ }
+ page_size = cq->len / hw_pages;
+ cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
+ OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
+ cmd->cmd.pgsz_pgcnt |= hw_pages;
+ cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
+
+ cq->eqn = ocrdma_bind_eq(dev);
+ cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
+ cqe_count = cq->len / cqe_size;
+ cq->cqe_cnt = cqe_count;
+ if (cqe_count > 1024) {
+ /* Set cnt to 3 to indicate more than 1024 cq entries */
+ cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
+ } else {
+ u8 count = 0;
+ switch (cqe_count) {
+ case 256:
+ count = 0;
+ break;
+ case 512:
+ count = 1;
+ break;
+ case 1024:
+ count = 2;
+ break;
+ default:
+ goto mbx_err;
+ }
+ cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
+ }
+ /* shared eq between all the consumer cqs. */
+ cmd->cmd.eqn = cq->eqn;
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
+ if (dpp_cq)
+ cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
+ OCRDMA_CREATE_CQ_TYPE_SHIFT;
+ cq->phase_change = false;
+ cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
+ } else {
+ cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
+ cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
+ cq->phase_change = true;
+ }
+
+ /* pd_id valid only for v3 */
+ cmd->cmd.pdid_cqecnt |= (pd_id <<
+ OCRDMA_CREATE_CQ_CMD_PDID_SHIFT);
+ ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_create_cq_rsp *)cmd;
+ cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
+ kfree(cmd);
+ return 0;
+mbx_err:
+ ocrdma_unbind_eq(dev, cq->eqn);
+ dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
+mem_err:
+ kfree(cmd);
+ return status;
+}
+
+void ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
+{
+ struct ocrdma_destroy_cq *cmd;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
+ if (!cmd)
+ return;
+ ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+ cmd->bypass_flush_qid |=
+ (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
+ OCRDMA_DESTROY_CQ_QID_MASK;
+
+ ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ ocrdma_unbind_eq(dev, cq->eqn);
+ dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
+ kfree(cmd);
+}
+
+int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
+ u32 pdid, int addr_check)
+{
+ int status = -ENOMEM;
+ struct ocrdma_alloc_lkey *cmd;
+ struct ocrdma_alloc_lkey_rsp *rsp;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
+ if (!cmd)
+ return status;
+ cmd->pdid = pdid;
+ cmd->pbl_sz_flags |= addr_check;
+ cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
+ cmd->pbl_sz_flags |=
+ (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);
+ cmd->pbl_sz_flags |=
+ (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);
+ cmd->pbl_sz_flags |=
+ (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);
+ cmd->pbl_sz_flags |=
+ (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);
+ cmd->pbl_sz_flags |=
+ (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+ rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
+ hwmr->lkey = rsp->lrkey;
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
+int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
+{
+ int status;
+ struct ocrdma_dealloc_lkey *cmd;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
+ if (!cmd)
+ return -ENOMEM;
+ cmd->lkey = lkey;
+ cmd->rsvd_frmr = fr_mr ? 1 : 0;
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+
+ kfree(cmd);
+ return status;
+}
+
+static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
+ u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
+{
+ int status = -ENOMEM;
+ int i;
+ struct ocrdma_reg_nsmr *cmd;
+ struct ocrdma_reg_nsmr_rsp *rsp;
+ u64 fbo = hwmr->va & (hwmr->pbe_size - 1);
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
+ if (!cmd)
+ return -ENOMEM;
+ cmd->num_pbl_pdid =
+ pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
+ cmd->fr_mr = hwmr->fr_mr;
+
+ cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
+ OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
+ cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
+ OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);
+ cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
+ OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);
+ cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
+ OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);
+ cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
+ OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);
+ cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);
+
+ cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);
+ cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<
+ OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
+ cmd->totlen_low = hwmr->len;
+ cmd->totlen_high = upper_32_bits(hwmr->len);
+ cmd->fbo_low = lower_32_bits(fbo);
+ cmd->fbo_high = upper_32_bits(fbo);
+ cmd->va_loaddr = (u32) hwmr->va;
+ cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
+
+ for (i = 0; i < pbl_cnt; i++) {
+ cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
+ cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
+ }
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+ rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
+ hwmr->lkey = rsp->lrkey;
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
+static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
+ struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
+ u32 pbl_offset, u32 last)
+{
+ int status;
+ int i;
+ struct ocrdma_reg_nsmr_cont *cmd;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
+ if (!cmd)
+ return -ENOMEM;
+ cmd->lrkey = hwmr->lkey;
+ cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |
+ (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
+ cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
+
+ for (i = 0; i < pbl_cnt; i++) {
+ cmd->pbl[i].lo =
+ (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
+ cmd->pbl[i].hi =
+ upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
+ }
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+
+ kfree(cmd);
+ return status;
+}
+
+int ocrdma_reg_mr(struct ocrdma_dev *dev,
+ struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
+{
+ int status;
+ u32 last = 0;
+ u32 cur_pbl_cnt, pbl_offset;
+ u32 pending_pbl_cnt = hwmr->num_pbls;
+
+ pbl_offset = 0;
+ cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
+ if (cur_pbl_cnt == pending_pbl_cnt)
+ last = 1;
+
+ status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
+ cur_pbl_cnt, hwmr->pbe_size, last);
+ if (status) {
+ pr_err("%s() status=%d\n", __func__, status);
+ return status;
+ }
+ /* if there is no more pbls to register then exit. */
+ if (last)
+ return 0;
+
+ while (!last) {
+ pbl_offset += cur_pbl_cnt;
+ pending_pbl_cnt -= cur_pbl_cnt;
+ cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
+ /* if we reach the end of the pbls, then need to set the last
+ * bit, indicating no more pbls to register for this memory key.
+ */
+ if (cur_pbl_cnt == pending_pbl_cnt)
+ last = 1;
+
+ status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
+ pbl_offset, last);
+ if (status)
+ break;
+ }
+ if (status)
+ pr_err("%s() err. status=%d\n", __func__, status);
+
+ return status;
+}
+
+bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
+{
+ struct ocrdma_qp *tmp;
+ bool found = false;
+ list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
+ if (qp == tmp) {
+ found = true;
+ break;
+ }
+ }
+ return found;
+}
+
+bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
+{
+ struct ocrdma_qp *tmp;
+ bool found = false;
+ list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
+ if (qp == tmp) {
+ found = true;
+ break;
+ }
+ }
+ return found;
+}
+
+void ocrdma_flush_qp(struct ocrdma_qp *qp)
+{
+ bool found;
+ unsigned long flags;
+ struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+
+ spin_lock_irqsave(&dev->flush_q_lock, flags);
+ found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
+ if (!found)
+ list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
+ if (!qp->srq) {
+ found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
+ if (!found)
+ list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
+ }
+ spin_unlock_irqrestore(&dev->flush_q_lock, flags);
+}
+
+static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
+{
+ qp->sq.head = 0;
+ qp->sq.tail = 0;
+ qp->rq.head = 0;
+ qp->rq.tail = 0;
+}
+
+int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
+ enum ib_qp_state *old_ib_state)
+{
+ unsigned long flags;
+ enum ocrdma_qp_state new_state;
+ new_state = get_ocrdma_qp_state(new_ib_state);
+
+ /* sync with wqe and rqe posting */
+ spin_lock_irqsave(&qp->q_lock, flags);
+
+ if (old_ib_state)
+ *old_ib_state = get_ibqp_state(qp->state);
+ if (new_state == qp->state) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ return 1;
+ }
+
+
+ if (new_state == OCRDMA_QPS_INIT) {
+ ocrdma_init_hwq_ptr(qp);
+ ocrdma_del_flush_qp(qp);
+ } else if (new_state == OCRDMA_QPS_ERR) {
+ ocrdma_flush_qp(qp);
+ }
+
+ qp->state = new_state;
+
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ return 0;
+}
+
+static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
+{
+ u32 flags = 0;
+ if (qp->cap_flags & OCRDMA_QP_INB_RD)
+ flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;
+ if (qp->cap_flags & OCRDMA_QP_INB_WR)
+ flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;
+ if (qp->cap_flags & OCRDMA_QP_MW_BIND)
+ flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;
+ if (qp->cap_flags & OCRDMA_QP_LKEY0)
+ flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;
+ if (qp->cap_flags & OCRDMA_QP_FAST_REG)
+ flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;
+ return flags;
+}
+
+static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
+ struct ib_qp_init_attr *attrs,
+ struct ocrdma_qp *qp)
+{
+ int status;
+ u32 len, hw_pages, hw_page_size;
+ dma_addr_t pa;
+ struct ocrdma_pd *pd = qp->pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ u32 max_wqe_allocated;
+ u32 max_sges = attrs->cap.max_send_sge;
+
+ /* QP1 may exceed 127 */
+ max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1,
+ dev->attr.max_wqe);
+
+ status = ocrdma_build_q_conf(&max_wqe_allocated,
+ dev->attr.wqe_size, &hw_pages, &hw_page_size);
+ if (status) {
+ pr_err("%s() req. max_send_wr=0x%x\n", __func__,
+ max_wqe_allocated);
+ return -EINVAL;
+ }
+ qp->sq.max_cnt = max_wqe_allocated;
+ len = (hw_pages * hw_page_size);
+
+ qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ if (!qp->sq.va)
+ return -EINVAL;
+ qp->sq.len = len;
+ qp->sq.pa = pa;
+ qp->sq.entry_size = dev->attr.wqe_size;
+ ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
+
+ cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
+ << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);
+ cmd->num_wq_rq_pages |= (hw_pages <<
+ OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;
+ cmd->max_sge_send_write |= (max_sges <<
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;
+ cmd->max_sge_send_write |= (max_sges <<
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;
+ cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
+ OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;
+ cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
+ OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;
+ return 0;
+}
+
+static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
+ struct ib_qp_init_attr *attrs,
+ struct ocrdma_qp *qp)
+{
+ int status;
+ u32 len, hw_pages, hw_page_size;
+ dma_addr_t pa = 0;
+ struct ocrdma_pd *pd = qp->pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
+
+ status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
+ &hw_pages, &hw_page_size);
+ if (status) {
+ pr_err("%s() req. max_recv_wr=0x%x\n", __func__,
+ attrs->cap.max_recv_wr + 1);
+ return status;
+ }
+ qp->rq.max_cnt = max_rqe_allocated;
+ len = (hw_pages * hw_page_size);
+
+ qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ if (!qp->rq.va)
+ return -ENOMEM;
+ qp->rq.pa = pa;
+ qp->rq.len = len;
+ qp->rq.entry_size = dev->attr.rqe_size;
+
+ ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
+ cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
+ OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);
+ cmd->num_wq_rq_pages |=
+ (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;
+ cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;
+ cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
+ OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;
+ cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
+ OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;
+ return 0;
+}
+
+static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
+ struct ocrdma_pd *pd,
+ struct ocrdma_qp *qp,
+ u8 enable_dpp_cq, u16 dpp_cq_id)
+{
+ pd->num_dpp_qp--;
+ qp->dpp_enabled = true;
+ cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
+ if (!enable_dpp_cq)
+ return;
+ cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
+ cmd->dpp_credits_cqid = dpp_cq_id;
+ cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<
+ OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;
+}
+
+static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
+ struct ocrdma_qp *qp)
+{
+ struct ocrdma_pd *pd = qp->pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ dma_addr_t pa = 0;
+ int ird_page_size = dev->attr.ird_page_size;
+ int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
+ struct ocrdma_hdr_wqe *rqe;
+ int i = 0;
+
+ if (dev->attr.ird == 0)
+ return 0;
+
+ qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len, &pa,
+ GFP_KERNEL);
+ if (!qp->ird_q_va)
+ return -ENOMEM;
+ ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
+ pa, ird_page_size);
+ for (; i < ird_q_len / dev->attr.rqe_size; i++) {
+ rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va +
+ (i * dev->attr.rqe_size));
+ rqe->cw = 0;
+ rqe->cw |= 2;
+ rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
+ rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT);
+ rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT);
+ }
+ return 0;
+}
+
+static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
+ struct ocrdma_qp *qp,
+ struct ib_qp_init_attr *attrs,
+ u16 *dpp_offset, u16 *dpp_credit_lmt)
+{
+ u32 max_wqe_allocated, max_rqe_allocated;
+ qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;
+ qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
+ qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
+ qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;
+ qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);
+ qp->dpp_enabled = false;
+ if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {
+ qp->dpp_enabled = true;
+ *dpp_credit_lmt = (rsp->dpp_response &
+ OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>
+ OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;
+ *dpp_offset = (rsp->dpp_response &
+ OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>
+ OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;
+ }
+ max_wqe_allocated =
+ rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;
+ max_wqe_allocated = 1 << max_wqe_allocated;
+ max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
+
+ qp->sq.max_cnt = max_wqe_allocated;
+ qp->sq.max_wqe_idx = max_wqe_allocated - 1;
+
+ if (!attrs->srq) {
+ qp->rq.max_cnt = max_rqe_allocated;
+ qp->rq.max_wqe_idx = max_rqe_allocated - 1;
+ }
+}
+
+int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
+ u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
+ u16 *dpp_credit_lmt)
+{
+ int status = -ENOMEM;
+ u32 flags = 0;
+ struct ocrdma_pd *pd = qp->pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ struct ocrdma_cq *cq;
+ struct ocrdma_create_qp_req *cmd;
+ struct ocrdma_create_qp_rsp *rsp;
+ int qptype;
+
+ switch (attrs->qp_type) {
+ case IB_QPT_GSI:
+ qptype = OCRDMA_QPT_GSI;
+ break;
+ case IB_QPT_RC:
+ qptype = OCRDMA_QPT_RC;
+ break;
+ case IB_QPT_UD:
+ qptype = OCRDMA_QPT_UD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
+ if (!cmd)
+ return status;
+ cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_QPT_MASK;
+ status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
+ if (status)
+ goto sq_err;
+
+ if (attrs->srq) {
+ struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
+ cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;
+ cmd->rq_addr[0].lo = srq->id;
+ qp->srq = srq;
+ } else {
+ status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
+ if (status)
+ goto rq_err;
+ }
+
+ status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
+ if (status)
+ goto mbx_err;
+
+ cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_PD_ID_MASK;
+
+ flags = ocrdma_set_create_qp_mbx_access_flags(qp);
+
+ cmd->max_sge_recv_flags |= flags;
+ cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
+ OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;
+ cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
+ OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;
+ cq = get_ocrdma_cq(attrs->send_cq);
+ cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;
+ qp->sq_cq = cq;
+ cq = get_ocrdma_cq(attrs->recv_cq);
+ cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &
+ OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
+ qp->rq_cq = cq;
+
+ if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
+ (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
+ ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
+ dpp_cq_id);
+ }
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+ rsp = (struct ocrdma_create_qp_rsp *)cmd;
+ ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
+ qp->state = OCRDMA_QPS_RST;
+ kfree(cmd);
+ return 0;
+mbx_err:
+ if (qp->rq.va)
+ dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
+rq_err:
+ pr_err("%s(%d) rq_err\n", __func__, dev->id);
+ dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
+sq_err:
+ pr_err("%s(%d) sq_err\n", __func__, dev->id);
+ kfree(cmd);
+ return status;
+}
+
+int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
+ struct ocrdma_qp_params *param)
+{
+ int status = -ENOMEM;
+ struct ocrdma_query_qp *cmd;
+ struct ocrdma_query_qp_rsp *rsp;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp));
+ if (!cmd)
+ return status;
+ cmd->qp_id = qp->id;
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+ rsp = (struct ocrdma_query_qp_rsp *)cmd;
+ memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
+static int ocrdma_set_av_params(struct ocrdma_qp *qp,
+ struct ocrdma_modify_qp *cmd,
+ struct ib_qp_attr *attrs,
+ int attr_mask)
+{
+ int status;
+ struct rdma_ah_attr *ah_attr = &attrs->ah_attr;
+ const struct ib_gid_attr *sgid_attr;
+ u16 vlan_id = 0xFFFF;
+ u8 mac_addr[6], hdr_type;
+ union {
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
+ struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+ const struct ib_global_route *grh;
+
+ if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) == 0)
+ return -EINVAL;
+ grh = rdma_ah_read_grh(ah_attr);
+ if (atomic_cmpxchg(&dev->update_sl, 1, 0))
+ ocrdma_init_service_level(dev);
+ cmd->params.tclass_sq_psn |=
+ (grh->traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
+ cmd->params.rnt_rc_sl_fl |=
+ (grh->flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
+ cmd->params.rnt_rc_sl_fl |= (rdma_ah_get_sl(ah_attr) <<
+ OCRDMA_QP_PARAMS_SL_SHIFT);
+ cmd->params.hop_lmt_rq_psn |=
+ (grh->hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
+ cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
+
+ /* GIDs */
+ memcpy(&cmd->params.dgid[0], &grh->dgid.raw[0],
+ sizeof(cmd->params.dgid));
+
+ sgid_attr = ah_attr->grh.sgid_attr;
+ status = rdma_read_gid_l2_fields(sgid_attr, &vlan_id, &mac_addr[0]);
+ if (status)
+ return status;
+
+ qp->sgid_idx = grh->sgid_index;
+ memcpy(&cmd->params.sgid[0], &sgid_attr->gid.raw[0],
+ sizeof(cmd->params.sgid));
+ status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
+ if (status)
+ return status;
+
+ cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
+ (mac_addr[2] << 16) | (mac_addr[3] << 24);
+
+ hdr_type = rdma_gid_attr_network_type(sgid_attr);
+ if (hdr_type == RDMA_NETWORK_IPV4) {
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, &grh->dgid);
+ memcpy(&cmd->params.dgid[0],
+ &dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
+ memcpy(&cmd->params.sgid[0],
+ &sgid_addr._sockaddr_in.sin_addr.s_addr, 4);
+ }
+ /* convert them to LE format. */
+ ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
+ ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
+ cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
+
+ if (vlan_id == 0xFFFF)
+ vlan_id = 0;
+ if (vlan_id || dev->pfc_state) {
+ if (!vlan_id) {
+ pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
+ dev->id);
+ pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
+ dev->id);
+ }
+ cmd->params.vlan_dmac_b4_to_b5 |=
+ vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
+ cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
+ cmd->params.rnt_rc_sl_fl |=
+ (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
+ }
+ cmd->params.max_sge_recv_flags |= ((hdr_type <<
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT) &
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK);
+ return 0;
+}
+
+static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
+ struct ocrdma_modify_qp *cmd,
+ struct ib_qp_attr *attrs, int attr_mask)
+{
+ int status = 0;
+ struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
+ OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);
+ cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;
+ }
+ if (attr_mask & IB_QP_QKEY) {
+ qp->qkey = attrs->qkey;
+ cmd->params.qkey = attrs->qkey;
+ cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
+ }
+ if (attr_mask & IB_QP_AV) {
+ status = ocrdma_set_av_params(qp, cmd, attrs, attr_mask);
+ if (status)
+ return status;
+ } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
+ /* set the default mac address for UD, GSI QPs */
+ cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
+ (dev->nic_info.mac_addr[1] << 8) |
+ (dev->nic_info.mac_addr[2] << 16) |
+ (dev->nic_info.mac_addr[3] << 24);
+ cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
+ (dev->nic_info.mac_addr[5] << 8);
+ }
+ if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
+ attrs->en_sqd_async_notify) {
+ cmd->params.max_sge_recv_flags |=
+ OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;
+ cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
+ }
+ if (attr_mask & IB_QP_DEST_QPN) {
+ cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
+ OCRDMA_QP_PARAMS_DEST_QPN_MASK);
+ cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
+ }
+ if (attr_mask & IB_QP_PATH_MTU) {
+ if (attrs->path_mtu < IB_MTU_512 ||
+ attrs->path_mtu > IB_MTU_4096) {
+ pr_err("ocrdma%d: IB MTU %d is not supported\n",
+ dev->id, ib_mtu_enum_to_int(attrs->path_mtu));
+ status = -EINVAL;
+ goto pmtu_err;
+ }
+ cmd->params.path_mtu_pkey_indx |=
+ (ib_mtu_enum_to_int(attrs->path_mtu) <<
+ OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &
+ OCRDMA_QP_PARAMS_PATH_MTU_MASK;
+ cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;
+ }
+ if (attr_mask & IB_QP_TIMEOUT) {
+ cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
+ OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
+ cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;
+ }
+ if (attr_mask & IB_QP_RETRY_CNT) {
+ cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
+ OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &
+ OCRDMA_QP_PARAMS_RETRY_CNT_MASK;
+ cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;
+ }
+ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
+ cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
+ OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &
+ OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;
+ cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;
+ }
+ if (attr_mask & IB_QP_RNR_RETRY) {
+ cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
+ OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)
+ & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;
+ cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;
+ }
+ if (attr_mask & IB_QP_SQ_PSN) {
+ cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
+ cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;
+ }
+ if (attr_mask & IB_QP_RQ_PSN) {
+ cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
+ cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
+ }
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
+ status = -EINVAL;
+ goto pmtu_err;
+ }
+ qp->max_ord = attrs->max_rd_atomic;
+ cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
+ }
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
+ status = -EINVAL;
+ goto pmtu_err;
+ }
+ qp->max_ird = attrs->max_dest_rd_atomic;
+ cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;
+ }
+ cmd->params.max_ord_ird = (qp->max_ord <<
+ OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |
+ (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);
+pmtu_err:
+ return status;
+}
+
+int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
+ struct ib_qp_attr *attrs, int attr_mask)
+{
+ int status = -ENOMEM;
+ struct ocrdma_modify_qp *cmd;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
+ if (!cmd)
+ return status;
+
+ cmd->params.id = qp->id;
+ cmd->flags = 0;
+ if (attr_mask & IB_QP_STATE) {
+ cmd->params.max_sge_recv_flags |=
+ (get_ocrdma_qp_state(attrs->qp_state) <<
+ OCRDMA_QP_PARAMS_STATE_SHIFT) &
+ OCRDMA_QP_PARAMS_STATE_MASK;
+ cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
+ } else {
+ cmd->params.max_sge_recv_flags |=
+ (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
+ OCRDMA_QP_PARAMS_STATE_MASK;
+ }
+
+ status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask);
+ if (status)
+ goto mbx_err;
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
+int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
+{
+ int status = -ENOMEM;
+ struct ocrdma_destroy_qp *cmd;
+ struct pci_dev *pdev = dev->nic_info.pdev;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
+ if (!cmd)
+ return status;
+ cmd->qp_id = qp->id;
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+
+mbx_err:
+ kfree(cmd);
+ if (qp->sq.va)
+ dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
+ if (!qp->srq && qp->rq.va)
+ dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
+ if (qp->dpp_enabled)
+ qp->pd->num_dpp_qp++;
+ return status;
+}
+
+int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
+ struct ib_srq_init_attr *srq_attr,
+ struct ocrdma_pd *pd)
+{
+ int status = -ENOMEM;
+ int hw_pages, hw_page_size;
+ int len;
+ struct ocrdma_create_srq_rsp *rsp;
+ struct ocrdma_create_srq *cmd;
+ dma_addr_t pa;
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ u32 max_rqe_allocated;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
+ if (!cmd)
+ return status;
+
+ cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;
+ max_rqe_allocated = srq_attr->attr.max_wr + 1;
+ status = ocrdma_build_q_conf(&max_rqe_allocated,
+ dev->attr.rqe_size,
+ &hw_pages, &hw_page_size);
+ if (status) {
+ pr_err("%s() req. max_wr=0x%x\n", __func__,
+ srq_attr->attr.max_wr);
+ status = -EINVAL;
+ goto ret;
+ }
+ len = hw_pages * hw_page_size;
+ srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
+ if (!srq->rq.va) {
+ status = -ENOMEM;
+ goto ret;
+ }
+ ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
+
+ srq->rq.entry_size = dev->attr.rqe_size;
+ srq->rq.pa = pa;
+ srq->rq.len = len;
+ srq->rq.max_cnt = max_rqe_allocated;
+
+ cmd->max_sge_rqe = ilog2(max_rqe_allocated);
+ cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
+ OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;
+
+ cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
+ << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);
+ cmd->pages_rqe_sz |= (dev->attr.rqe_size
+ << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)
+ & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;
+ cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+ rsp = (struct ocrdma_create_srq_rsp *)cmd;
+ srq->id = rsp->id;
+ srq->rq.dbid = rsp->id;
+ max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
+ OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>
+ OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);
+ max_rqe_allocated = (1 << max_rqe_allocated);
+ srq->rq.max_cnt = max_rqe_allocated;
+ srq->rq.max_wqe_idx = max_rqe_allocated - 1;
+ srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
+ OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>
+ OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;
+ goto ret;
+mbx_err:
+ dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
+ret:
+ kfree(cmd);
+ return status;
+}
+
+int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
+{
+ int status = -ENOMEM;
+ struct ocrdma_modify_srq *cmd;
+ struct ocrdma_pd *pd = srq->pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ, sizeof(*cmd));
+ if (!cmd)
+ return status;
+ cmd->id = srq->id;
+ cmd->limit_max_rqe |= srq_attr->srq_limit <<
+ OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ kfree(cmd);
+ return status;
+}
+
+int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
+{
+ int status = -ENOMEM;
+ struct ocrdma_query_srq *cmd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ, sizeof(*cmd));
+ if (!cmd)
+ return status;
+ cmd->id = srq->rq.dbid;
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status == 0) {
+ struct ocrdma_query_srq_rsp *rsp =
+ (struct ocrdma_query_srq_rsp *)cmd;
+ srq_attr->max_sge =
+ rsp->srq_lmt_max_sge &
+ OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;
+ srq_attr->max_wr =
+ rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;
+ srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
+ OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;
+ }
+ kfree(cmd);
+ return status;
+}
+
+void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
+{
+ struct ocrdma_destroy_srq *cmd;
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
+ if (!cmd)
+ return;
+ cmd->id = srq->id;
+ ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (srq->rq.va)
+ dma_free_coherent(&pdev->dev, srq->rq.len,
+ srq->rq.va, srq->rq.pa);
+ kfree(cmd);
+}
+
+static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
+ struct ocrdma_dcbx_cfg *dcbxcfg)
+{
+ int status;
+ dma_addr_t pa;
+ struct ocrdma_mqe cmd;
+
+ struct ocrdma_get_dcbx_cfg_req *req = NULL;
+ struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL;
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge;
+
+ memset(&cmd, 0, sizeof(struct ocrdma_mqe));
+ cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp),
+ sizeof(struct ocrdma_get_dcbx_cfg_req));
+ req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
+ if (!req) {
+ status = -ENOMEM;
+ goto mem_err;
+ }
+
+ cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
+ OCRDMA_MQE_HDR_SGE_CNT_MASK;
+ mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL);
+ mqe_sge->pa_hi = (u32) upper_32_bits(pa);
+ mqe_sge->len = cmd.hdr.pyld_len;
+
+ ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
+ OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
+ req->param_type = ptype;
+
+ status = ocrdma_mbx_cmd(dev, &cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
+ ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp));
+ memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg));
+
+mbx_err:
+ dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
+mem_err:
+ return status;
+}
+
+#define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08
+#define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05
+
+static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
+ struct ocrdma_dcbx_cfg *dcbxcfg,
+ u8 *srvc_lvl)
+{
+ int status = -EINVAL, indx, slindx;
+ int ventry_cnt;
+ struct ocrdma_app_parameter *app_param;
+ u8 valid, proto_sel;
+ u8 app_prio, pfc_prio;
+ u16 proto;
+
+ if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) {
+ pr_info("%s ocrdma%d DCBX is disabled\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ goto out;
+ }
+
+ if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) {
+ pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id,
+ (ptype > 0 ? "operational" : "admin"),
+ (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ?
+ "enabled" : "disabled",
+ (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ?
+ "" : ", not sync'ed");
+ goto out;
+ } else {
+ pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ }
+
+ ventry_cnt = (dcbxcfg->tcv_aev_opv_st >>
+ OCRDMA_DCBX_APP_ENTRY_SHIFT)
+ & OCRDMA_DCBX_STATE_MASK;
+
+ for (indx = 0; indx < ventry_cnt; indx++) {
+ app_param = &dcbxcfg->app_param[indx];
+ valid = (app_param->valid_proto_app >>
+ OCRDMA_APP_PARAM_VALID_SHIFT)
+ & OCRDMA_APP_PARAM_VALID_MASK;
+ proto_sel = (app_param->valid_proto_app
+ >> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT)
+ & OCRDMA_APP_PARAM_PROTO_SEL_MASK;
+ proto = app_param->valid_proto_app &
+ OCRDMA_APP_PARAM_APP_PROTO_MASK;
+
+ if (
+ valid && proto == ETH_P_IBOE &&
+ proto_sel == OCRDMA_PROTO_SELECT_L2) {
+ for (slindx = 0; slindx <
+ OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
+ app_prio = ocrdma_get_app_prio(
+ (u8 *)app_param->app_prio,
+ slindx);
+ pfc_prio = ocrdma_get_pfc_prio(
+ (u8 *)dcbxcfg->pfc_prio,
+ slindx);
+
+ if (app_prio && pfc_prio) {
+ *srvc_lvl = slindx;
+ status = 0;
+ goto out;
+ }
+ }
+ if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) {
+ pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
+ dev_name(&dev->nic_info.pdev->dev),
+ dev->id, proto);
+ }
+ }
+ }
+
+out:
+ return status;
+}
+
+void ocrdma_init_service_level(struct ocrdma_dev *dev)
+{
+ int status = 0, indx;
+ struct ocrdma_dcbx_cfg dcbxcfg;
+ u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL;
+ int ptype = OCRDMA_PARAMETER_TYPE_OPER;
+
+ for (indx = 0; indx < 2; indx++) {
+ status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg);
+ if (status) {
+ pr_err("%s(): status=%d\n", __func__, status);
+ ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
+ continue;
+ }
+
+ status = ocrdma_parse_dcbxcfg_rsp(dev, ptype,
+ &dcbxcfg, &srvc_lvl);
+ if (status) {
+ ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
+ continue;
+ }
+
+ break;
+ }
+
+ if (status)
+ pr_info("%s ocrdma%d service level default\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ else
+ pr_info("%s ocrdma%d service level %d\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id,
+ srvc_lvl);
+
+ dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state);
+ dev->sl = srvc_lvl;
+}
+
+int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
+{
+ int i;
+ int status = -EINVAL;
+ struct ocrdma_av *av;
+ unsigned long flags;
+
+ av = dev->av_tbl.va;
+ spin_lock_irqsave(&dev->av_tbl.lock, flags);
+ for (i = 0; i < dev->av_tbl.num_ah; i++) {
+ if (av->valid == 0) {
+ av->valid = OCRDMA_AV_VALID;
+ ah->av = av;
+ ah->id = i;
+ status = 0;
+ break;
+ }
+ av++;
+ }
+ if (i == dev->av_tbl.num_ah)
+ status = -EAGAIN;
+ spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
+ return status;
+}
+
+void ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&dev->av_tbl.lock, flags);
+ ah->av->valid = 0;
+ spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
+}
+
+static int ocrdma_create_eqs(struct ocrdma_dev *dev)
+{
+ int num_eq, i, status = 0;
+ int irq;
+ unsigned long flags = 0;
+
+ num_eq = dev->nic_info.msix.num_vectors -
+ dev->nic_info.msix.start_vector;
+ if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
+ num_eq = 1;
+ flags = IRQF_SHARED;
+ } else {
+ num_eq = min_t(u32, num_eq, num_online_cpus());
+ }
+
+ if (!num_eq)
+ return -EINVAL;
+
+ dev->eq_tbl = kcalloc(num_eq, sizeof(struct ocrdma_eq), GFP_KERNEL);
+ if (!dev->eq_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < num_eq; i++) {
+ status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
+ OCRDMA_EQ_LEN);
+ if (status) {
+ status = -EINVAL;
+ break;
+ }
+ sprintf(dev->eq_tbl[i].irq_name, "ocrdma%d-%d",
+ dev->id, i);
+ irq = ocrdma_get_irq(dev, &dev->eq_tbl[i]);
+ status = request_irq(irq, ocrdma_irq_handler, flags,
+ dev->eq_tbl[i].irq_name,
+ &dev->eq_tbl[i]);
+ if (status)
+ goto done;
+ dev->eq_cnt += 1;
+ }
+ /* one eq is sufficient for data path to work */
+ return 0;
+done:
+ ocrdma_destroy_eqs(dev);
+ return status;
+}
+
+static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
+ int num)
+{
+ int i, status;
+ struct ocrdma_modify_eqd_req *cmd;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
+ if (!cmd)
+ return -ENOMEM;
+
+ ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+ cmd->cmd.num_eq = num;
+ for (i = 0; i < num; i++) {
+ cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
+ cmd->cmd.set_eqd[i].phase = 0;
+ cmd->cmd.set_eqd[i].delay_multiplier =
+ (eq[i].aic_obj.prev_eqd * 65)/100;
+ }
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+
+ kfree(cmd);
+ return status;
+}
+
+static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
+ int num)
+{
+ int num_eqs, i = 0;
+ if (num > 8) {
+ while (num) {
+ num_eqs = min(num, 8);
+ ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
+ i += num_eqs;
+ num -= num_eqs;
+ }
+ } else {
+ ocrdma_mbx_modify_eqd(dev, eq, num);
+ }
+ return 0;
+}
+
+void ocrdma_eqd_set_task(struct work_struct *work)
+{
+ struct ocrdma_dev *dev =
+ container_of(work, struct ocrdma_dev, eqd_work.work);
+ struct ocrdma_eq *eq = NULL;
+ int i, num = 0;
+ u64 eq_intr;
+
+ for (i = 0; i < dev->eq_cnt; i++) {
+ eq = &dev->eq_tbl[i];
+ if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
+ eq_intr = eq->aic_obj.eq_intr_cnt -
+ eq->aic_obj.prev_eq_intr_cnt;
+ if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
+ (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
+ eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
+ num++;
+ } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
+ (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
+ eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
+ num++;
+ }
+ }
+ eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
+ }
+
+ if (num)
+ ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
+ schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
+}
+
+int ocrdma_init_hw(struct ocrdma_dev *dev)
+{
+ int status;
+
+ /* create the eqs */
+ status = ocrdma_create_eqs(dev);
+ if (status)
+ goto qpeq_err;
+ status = ocrdma_create_mq(dev);
+ if (status)
+ goto mq_err;
+ status = ocrdma_mbx_query_fw_config(dev);
+ if (status)
+ goto conf_err;
+ status = ocrdma_mbx_query_dev(dev);
+ if (status)
+ goto conf_err;
+ status = ocrdma_mbx_query_fw_ver(dev);
+ if (status)
+ goto conf_err;
+ status = ocrdma_mbx_create_ah_tbl(dev);
+ if (status)
+ goto conf_err;
+ status = ocrdma_mbx_get_phy_info(dev);
+ if (status)
+ goto info_attrb_err;
+ status = ocrdma_mbx_get_ctrl_attribs(dev);
+ if (status)
+ goto info_attrb_err;
+
+ return 0;
+
+info_attrb_err:
+ ocrdma_mbx_delete_ah_tbl(dev);
+conf_err:
+ ocrdma_destroy_mq(dev);
+mq_err:
+ ocrdma_destroy_eqs(dev);
+qpeq_err:
+ pr_err("%s() status=%d\n", __func__, status);
+ return status;
+}
+
+void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
+{
+ ocrdma_free_pd_pool(dev);
+ ocrdma_mbx_delete_ah_tbl(dev);
+
+ /* cleanup the control path */
+ ocrdma_destroy_mq(dev);
+
+ /* cleanup the eqs */
+ ocrdma_destroy_eqs(dev);
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
new file mode 100644
index 000000000..12c23a765
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -0,0 +1,159 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef __OCRDMA_HW_H__
+#define __OCRDMA_HW_H__
+
+#include "ocrdma_sli.h"
+
+static inline void ocrdma_cpu_to_le32(void *dst, u32 len)
+{
+#ifdef __BIG_ENDIAN
+ int i = 0;
+ u32 *src_ptr = dst;
+ u32 *dst_ptr = dst;
+ for (; i < (len / 4); i++)
+ *(dst_ptr + i) = cpu_to_le32p(src_ptr + i);
+#endif
+}
+
+static inline void ocrdma_le32_to_cpu(void *dst, u32 len)
+{
+#ifdef __BIG_ENDIAN
+ int i = 0;
+ u32 *src_ptr = dst;
+ u32 *dst_ptr = dst;
+ for (; i < (len / sizeof(u32)); i++)
+ *(dst_ptr + i) = le32_to_cpu(*(src_ptr + i));
+#endif
+}
+
+static inline void ocrdma_copy_cpu_to_le32(void *dst, void *src, u32 len)
+{
+#ifdef __BIG_ENDIAN
+ int i = 0;
+ u32 *src_ptr = src;
+ u32 *dst_ptr = dst;
+ for (; i < (len / sizeof(u32)); i++)
+ *(dst_ptr + i) = cpu_to_le32p(src_ptr + i);
+#else
+ memcpy(dst, src, len);
+#endif
+}
+
+static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len)
+{
+#ifdef __BIG_ENDIAN
+ int i = 0;
+ u32 *src_ptr = src;
+ u32 *dst_ptr = dst;
+ for (; i < len / sizeof(u32); i++)
+ *(dst_ptr + i) = le32_to_cpu(*(src_ptr + i));
+#else
+ memcpy(dst, src, len);
+#endif
+}
+
+static inline u64 ocrdma_get_db_addr(struct ocrdma_dev *dev, u32 pdid)
+{
+ return dev->nic_info.unmapped_db + (pdid * dev->nic_info.db_page_size);
+}
+
+int ocrdma_init_hw(struct ocrdma_dev *);
+void ocrdma_cleanup_hw(struct ocrdma_dev *);
+
+enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps);
+void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
+ bool solicited, u16 cqe_popped);
+
+/* verbs specific mailbox commands */
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
+ u8 *lnk_st);
+int ocrdma_query_config(struct ocrdma_dev *,
+ struct ocrdma_mbx_query_config *config);
+
+int ocrdma_mbx_alloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
+int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *, struct ocrdma_pd *);
+
+int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
+ u32 pd_id, int addr_check);
+int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);
+
+int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
+ u32 pd_id, int acc);
+int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *,
+ int entries, int dpp_cq, u16 pd_id);
+void ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq);
+
+int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,
+ u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
+ u16 *dpp_credit_lmt);
+int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *,
+ struct ib_qp_attr *attrs, int attr_mask);
+int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *,
+ struct ocrdma_qp_params *param);
+int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *);
+int ocrdma_mbx_create_srq(struct ocrdma_dev *, struct ocrdma_srq *,
+ struct ib_srq_init_attr *,
+ struct ocrdma_pd *);
+int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
+int ocrdma_mbx_query_srq(struct ocrdma_srq *, struct ib_srq_attr *);
+void ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq);
+
+int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah);
+void ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah);
+
+int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state,
+ enum ib_qp_state *old_ib_state);
+bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
+bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
+void ocrdma_flush_qp(struct ocrdma_qp *);
+int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
+
+int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
+char *port_speed_string(struct ocrdma_dev *dev);
+void ocrdma_init_service_level(struct ocrdma_dev *);
+void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
+void ocrdma_free_pd_range(struct ocrdma_dev *dev);
+void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate);
+
+#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
new file mode 100644
index 000000000..5d4b3bc16
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -0,0 +1,432 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
+
+#include <linux/netdevice.h>
+#include <net/addrconf.h>
+
+#include "ocrdma.h"
+#include "ocrdma_verbs.h"
+#include "ocrdma_ah.h"
+#include "be_roce.h"
+#include "ocrdma_hw.h"
+#include "ocrdma_stats.h"
+#include <rdma/ocrdma-abi.h>
+
+MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
+MODULE_AUTHOR("Emulex Corporation");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
+ u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int ocrdma_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ struct ocrdma_dev *dev;
+ int err;
+
+ dev = get_ocrdma_dev(ibdev);
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ if (ocrdma_is_udp_encap_supported(dev))
+ immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static void get_dev_fw_str(struct ib_device *device, char *str)
+{
+ struct ocrdma_dev *dev = get_ocrdma_dev(device);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", &dev->attr.fw_ver[0]);
+}
+
+/* OCRDMA sysfs interface */
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev =
+ rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev);
+
+ return sysfs_emit(buf, "0x%x\n", dev->nic_info.pdev->vendor);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev =
+ rdma_device_to_drv_device(device, struct ocrdma_dev, ibdev);
+
+ return sysfs_emit(buf, "%s\n", &dev->model_number[0]);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *ocrdma_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group ocrdma_attr_group = {
+ .attrs = ocrdma_attributes,
+};
+
+static const struct ib_device_ops ocrdma_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_OCRDMA,
+ .uverbs_abi_ver = OCRDMA_ABI_VERSION,
+
+ .alloc_mr = ocrdma_alloc_mr,
+ .alloc_pd = ocrdma_alloc_pd,
+ .alloc_ucontext = ocrdma_alloc_ucontext,
+ .create_ah = ocrdma_create_ah,
+ .create_cq = ocrdma_create_cq,
+ .create_qp = ocrdma_create_qp,
+ .create_user_ah = ocrdma_create_ah,
+ .dealloc_pd = ocrdma_dealloc_pd,
+ .dealloc_ucontext = ocrdma_dealloc_ucontext,
+ .dereg_mr = ocrdma_dereg_mr,
+ .destroy_ah = ocrdma_destroy_ah,
+ .destroy_cq = ocrdma_destroy_cq,
+ .destroy_qp = ocrdma_destroy_qp,
+ .device_group = &ocrdma_attr_group,
+ .get_dev_fw_str = get_dev_fw_str,
+ .get_dma_mr = ocrdma_get_dma_mr,
+ .get_link_layer = ocrdma_link_layer,
+ .get_port_immutable = ocrdma_port_immutable,
+ .map_mr_sg = ocrdma_map_mr_sg,
+ .mmap = ocrdma_mmap,
+ .modify_qp = ocrdma_modify_qp,
+ .poll_cq = ocrdma_poll_cq,
+ .post_recv = ocrdma_post_recv,
+ .post_send = ocrdma_post_send,
+ .process_mad = ocrdma_process_mad,
+ .query_ah = ocrdma_query_ah,
+ .query_device = ocrdma_query_device,
+ .query_pkey = ocrdma_query_pkey,
+ .query_port = ocrdma_query_port,
+ .query_qp = ocrdma_query_qp,
+ .reg_user_mr = ocrdma_reg_user_mr,
+ .req_notify_cq = ocrdma_arm_cq,
+ .resize_cq = ocrdma_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, ocrdma_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, ocrdma_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, ocrdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, ocrdma_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, ocrdma_ucontext, ibucontext),
+};
+
+static const struct ib_device_ops ocrdma_dev_srq_ops = {
+ .create_srq = ocrdma_create_srq,
+ .destroy_srq = ocrdma_destroy_srq,
+ .modify_srq = ocrdma_modify_srq,
+ .post_srq_recv = ocrdma_post_srq_recv,
+ .query_srq = ocrdma_query_srq,
+
+ INIT_RDMA_OBJ_SIZE(ib_srq, ocrdma_srq, ibsrq),
+};
+
+static int ocrdma_register_device(struct ocrdma_dev *dev)
+{
+ int ret;
+
+ addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
+ dev->nic_info.mac_addr);
+ BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
+ memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
+ sizeof(OCRDMA_NODE_DESC));
+
+ dev->ibdev.node_type = RDMA_NODE_IB_CA;
+ dev->ibdev.phys_port_cnt = 1;
+ dev->ibdev.num_comp_vectors = dev->eq_cnt;
+
+ /* mandatory to support user space verbs consumer. */
+ dev->ibdev.dev.parent = &dev->nic_info.pdev->dev;
+
+ ib_set_device_ops(&dev->ibdev, &ocrdma_dev_ops);
+
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R)
+ ib_set_device_ops(&dev->ibdev, &ocrdma_dev_srq_ops);
+
+ ret = ib_device_set_netdev(&dev->ibdev, dev->nic_info.netdev, 1);
+ if (ret)
+ return ret;
+
+ dma_set_max_seg_size(&dev->nic_info.pdev->dev, UINT_MAX);
+ return ib_register_device(&dev->ibdev, "ocrdma%d",
+ &dev->nic_info.pdev->dev);
+}
+
+static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
+{
+ mutex_init(&dev->dev_lock);
+ dev->cq_tbl = kcalloc(OCRDMA_MAX_CQ, sizeof(struct ocrdma_cq *),
+ GFP_KERNEL);
+ if (!dev->cq_tbl)
+ goto alloc_err;
+
+ if (dev->attr.max_qp) {
+ dev->qp_tbl = kcalloc(OCRDMA_MAX_QP,
+ sizeof(struct ocrdma_qp *),
+ GFP_KERNEL);
+ if (!dev->qp_tbl)
+ goto alloc_err;
+ }
+
+ dev->stag_arr = kcalloc(OCRDMA_MAX_STAG, sizeof(u64), GFP_KERNEL);
+ if (dev->stag_arr == NULL)
+ goto alloc_err;
+
+ ocrdma_alloc_pd_pool(dev);
+
+ if (!ocrdma_alloc_stats_resources(dev)) {
+ pr_err("%s: stats resource allocation failed\n", __func__);
+ goto alloc_err;
+ }
+
+ spin_lock_init(&dev->av_tbl.lock);
+ spin_lock_init(&dev->flush_q_lock);
+ return 0;
+alloc_err:
+ pr_err("%s(%d) error.\n", __func__, dev->id);
+ return -ENOMEM;
+}
+
+static void ocrdma_free_resources(struct ocrdma_dev *dev)
+{
+ ocrdma_release_stats_resources(dev);
+ kfree(dev->stag_arr);
+ kfree(dev->qp_tbl);
+ kfree(dev->cq_tbl);
+}
+
+static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
+{
+ int status = 0;
+ u8 lstate = 0;
+ struct ocrdma_dev *dev;
+
+ dev = ib_alloc_device(ocrdma_dev, ibdev);
+ if (!dev) {
+ pr_err("Unable to allocate ib device\n");
+ return NULL;
+ }
+
+ dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL);
+ if (!dev->mbx_cmd)
+ goto init_err;
+
+ memcpy(&dev->nic_info, dev_info, sizeof(*dev_info));
+ dev->id = PCI_FUNC(dev->nic_info.pdev->devfn);
+ status = ocrdma_init_hw(dev);
+ if (status)
+ goto init_err;
+
+ status = ocrdma_alloc_resources(dev);
+ if (status)
+ goto alloc_err;
+
+ ocrdma_init_service_level(dev);
+ status = ocrdma_register_device(dev);
+ if (status)
+ goto alloc_err;
+
+ /* Query Link state and update */
+ status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate);
+ if (!status)
+ ocrdma_update_link_state(dev, lstate);
+
+ /* Init stats */
+ ocrdma_add_port_stats(dev);
+ /* Interrupt Moderation */
+ INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task);
+ schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
+
+ pr_info("%s %s: %s \"%s\" port %d\n",
+ dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
+ port_speed_string(dev), dev->model_number,
+ dev->hba_port_num);
+ pr_info("%s ocrdma%d driver loaded successfully\n",
+ dev_name(&dev->nic_info.pdev->dev), dev->id);
+ return dev;
+
+alloc_err:
+ ocrdma_free_resources(dev);
+ ocrdma_cleanup_hw(dev);
+init_err:
+ kfree(dev->mbx_cmd);
+ ib_dealloc_device(&dev->ibdev);
+ pr_err("%s() leaving. ret=%d\n", __func__, status);
+ return NULL;
+}
+
+static void ocrdma_remove_free(struct ocrdma_dev *dev)
+{
+
+ kfree(dev->mbx_cmd);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static void ocrdma_remove(struct ocrdma_dev *dev)
+{
+ /* first unregister with stack to stop all the active traffic
+ * of the registered clients.
+ */
+ cancel_delayed_work_sync(&dev->eqd_work);
+ ib_unregister_device(&dev->ibdev);
+
+ ocrdma_rem_port_stats(dev);
+ ocrdma_free_resources(dev);
+ ocrdma_cleanup_hw(dev);
+ ocrdma_remove_free(dev);
+}
+
+static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev)
+{
+ struct ib_event port_event;
+
+ port_event.event = IB_EVENT_PORT_ACTIVE;
+ port_event.element.port_num = 1;
+ port_event.device = &dev->ibdev;
+ ib_dispatch_event(&port_event);
+ return 0;
+}
+
+static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev)
+{
+ struct ib_event err_event;
+
+ err_event.event = IB_EVENT_PORT_ERR;
+ err_event.element.port_num = 1;
+ err_event.device = &dev->ibdev;
+ ib_dispatch_event(&err_event);
+ return 0;
+}
+
+static void ocrdma_shutdown(struct ocrdma_dev *dev)
+{
+ ocrdma_dispatch_port_error(dev);
+ ocrdma_remove(dev);
+}
+
+/* event handling via NIC driver ensures that all the NIC specific
+ * initialization done before RoCE driver notifies
+ * event to stack.
+ */
+static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
+{
+ switch (event) {
+ case BE_DEV_SHUTDOWN:
+ ocrdma_shutdown(dev);
+ break;
+ default:
+ break;
+ }
+}
+
+void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate)
+{
+ if (!(dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)) {
+ dev->flags |= OCRDMA_FLAGS_LINK_STATUS_INIT;
+ if (!lstate)
+ return;
+ }
+
+ if (!lstate)
+ ocrdma_dispatch_port_error(dev);
+ else
+ ocrdma_dispatch_port_active(dev);
+}
+
+static struct ocrdma_driver ocrdma_drv = {
+ .name = "ocrdma_driver",
+ .add = ocrdma_add,
+ .remove = ocrdma_remove,
+ .state_change_handler = ocrdma_event_handler,
+ .be_abi_version = OCRDMA_BE_ROCE_ABI_VERSION,
+};
+
+static int __init ocrdma_init_module(void)
+{
+ int status;
+
+ ocrdma_init_debugfs();
+
+ status = be_roce_register_driver(&ocrdma_drv);
+ if (status)
+ goto err_be_reg;
+
+ return 0;
+
+err_be_reg:
+
+ return status;
+}
+
+static void __exit ocrdma_exit_module(void)
+{
+ be_roce_unregister_driver(&ocrdma_drv);
+ ocrdma_rem_debugfs();
+}
+
+module_init(ocrdma_init_module);
+module_exit(ocrdma_exit_module);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
new file mode 100644
index 000000000..c2e0d0fa4
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -0,0 +1,2240 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef __OCRDMA_SLI_H__
+#define __OCRDMA_SLI_H__
+
+enum {
+ OCRDMA_ASIC_GEN_SKH_R = 0x04,
+ OCRDMA_ASIC_GEN_LANCER = 0x0B
+};
+
+enum {
+ OCRDMA_ASIC_REV_A0 = 0x00,
+ OCRDMA_ASIC_REV_B0 = 0x10,
+ OCRDMA_ASIC_REV_C0 = 0x20
+};
+
+#define OCRDMA_SUBSYS_ROCE 10
+enum {
+ OCRDMA_CMD_QUERY_CONFIG = 1,
+ OCRDMA_CMD_ALLOC_PD = 2,
+ OCRDMA_CMD_DEALLOC_PD = 3,
+
+ OCRDMA_CMD_CREATE_AH_TBL = 4,
+ OCRDMA_CMD_DELETE_AH_TBL = 5,
+
+ OCRDMA_CMD_CREATE_QP = 6,
+ OCRDMA_CMD_QUERY_QP = 7,
+ OCRDMA_CMD_MODIFY_QP = 8 ,
+ OCRDMA_CMD_DELETE_QP = 9,
+
+ OCRDMA_CMD_RSVD1 = 10,
+ OCRDMA_CMD_ALLOC_LKEY = 11,
+ OCRDMA_CMD_DEALLOC_LKEY = 12,
+ OCRDMA_CMD_REGISTER_NSMR = 13,
+ OCRDMA_CMD_REREGISTER_NSMR = 14,
+ OCRDMA_CMD_REGISTER_NSMR_CONT = 15,
+ OCRDMA_CMD_QUERY_NSMR = 16,
+ OCRDMA_CMD_ALLOC_MW = 17,
+ OCRDMA_CMD_QUERY_MW = 18,
+
+ OCRDMA_CMD_CREATE_SRQ = 19,
+ OCRDMA_CMD_QUERY_SRQ = 20,
+ OCRDMA_CMD_MODIFY_SRQ = 21,
+ OCRDMA_CMD_DELETE_SRQ = 22,
+
+ OCRDMA_CMD_ATTACH_MCAST = 23,
+ OCRDMA_CMD_DETACH_MCAST = 24,
+
+ OCRDMA_CMD_CREATE_RBQ = 25,
+ OCRDMA_CMD_DESTROY_RBQ = 26,
+
+ OCRDMA_CMD_GET_RDMA_STATS = 27,
+ OCRDMA_CMD_ALLOC_PD_RANGE = 28,
+ OCRDMA_CMD_DEALLOC_PD_RANGE = 29,
+
+ OCRDMA_CMD_MAX
+};
+
+#define OCRDMA_SUBSYS_COMMON 1
+enum {
+ OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1 = 5,
+ OCRDMA_CMD_CREATE_CQ = 12,
+ OCRDMA_CMD_CREATE_EQ = 13,
+ OCRDMA_CMD_CREATE_MQ = 21,
+ OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32,
+ OCRDMA_CMD_GET_FW_VER = 35,
+ OCRDMA_CMD_MODIFY_EQ_DELAY = 41,
+ OCRDMA_CMD_DELETE_MQ = 53,
+ OCRDMA_CMD_DELETE_CQ = 54,
+ OCRDMA_CMD_DELETE_EQ = 55,
+ OCRDMA_CMD_GET_FW_CONFIG = 58,
+ OCRDMA_CMD_CREATE_MQ_EXT = 90,
+ OCRDMA_CMD_PHY_DETAILS = 102
+};
+
+enum {
+ QTYPE_EQ = 1,
+ QTYPE_CQ = 2,
+ QTYPE_MCCQ = 3
+};
+
+#define OCRDMA_MAX_SGID 16
+
+#define OCRDMA_MAX_QP 2048
+#define OCRDMA_MAX_CQ 2048
+#define OCRDMA_MAX_STAG 16384
+
+enum {
+ OCRDMA_DB_RQ_OFFSET = 0xE0,
+ OCRDMA_DB_GEN2_RQ_OFFSET = 0x100,
+ OCRDMA_DB_SQ_OFFSET = 0x60,
+ OCRDMA_DB_GEN2_SQ_OFFSET = 0x1C0,
+ OCRDMA_DB_SRQ_OFFSET = OCRDMA_DB_RQ_OFFSET,
+ OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ_OFFSET,
+ OCRDMA_DB_CQ_OFFSET = 0x120,
+ OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET,
+ OCRDMA_DB_MQ_OFFSET = 0x140,
+
+ OCRDMA_DB_SQ_SHIFT = 16,
+ OCRDMA_DB_RQ_SHIFT = 24
+};
+
+enum {
+ OCRDMA_L3_TYPE_IB_GRH = 0x00,
+ OCRDMA_L3_TYPE_IPV4 = 0x01,
+ OCRDMA_L3_TYPE_IPV6 = 0x02
+};
+
+#define OCRDMA_DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
+#define OCRDMA_DB_CQ_RING_ID_EXT_MASK 0x0C00 /* bits 10-11 of qid at 12-11 */
+/* qid #2 msbits at 12-11 */
+#define OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT 0x1
+#define OCRDMA_DB_CQ_NUM_POPPED_SHIFT 16 /* bits 16 - 28 */
+/* Rearm bit */
+#define OCRDMA_DB_CQ_REARM_SHIFT 29 /* bit 29 */
+/* solicited bit */
+#define OCRDMA_DB_CQ_SOLICIT_SHIFT 31 /* bit 31 */
+
+#define OCRDMA_EQ_ID_MASK 0x1FF /* bits 0 - 8 */
+#define OCRDMA_EQ_ID_EXT_MASK 0x3e00 /* bits 9-13 */
+#define OCRDMA_EQ_ID_EXT_MASK_SHIFT 2 /* qid bits 9-13 at 11-15 */
+
+/* Clear the interrupt for this eq */
+#define OCRDMA_EQ_CLR_SHIFT 9 /* bit 9 */
+/* Must be 1 */
+#define OCRDMA_EQ_TYPE_SHIFT 10 /* bit 10 */
+/* Number of event entries processed */
+#define OCRDMA_NUM_EQE_SHIFT 16 /* bits 16 - 28 */
+/* Rearm bit */
+#define OCRDMA_REARM_SHIFT 29 /* bit 29 */
+
+#define OCRDMA_MQ_ID_MASK 0x7FF /* bits 0 - 10 */
+/* Number of entries posted */
+#define OCRDMA_MQ_NUM_MQE_SHIFT 16 /* bits 16 - 29 */
+
+#define OCRDMA_MIN_HPAGE_SIZE 4096
+
+#define OCRDMA_MIN_Q_PAGE_SIZE 4096
+#define OCRDMA_MAX_Q_PAGES 8
+
+#define OCRDMA_SLI_ASIC_ID_OFFSET 0x9C
+#define OCRDMA_SLI_ASIC_REV_MASK 0x000000FF
+#define OCRDMA_SLI_ASIC_GEN_NUM_MASK 0x0000FF00
+#define OCRDMA_SLI_ASIC_GEN_NUM_SHIFT 0x08
+/*
+# 0: 4K Bytes
+# 1: 8K Bytes
+# 2: 16K Bytes
+# 3: 32K Bytes
+# 4: 64K Bytes
+# 5: 128K Bytes
+# 6: 256K Bytes
+# 7: 512K Bytes
+*/
+#define OCRDMA_MAX_Q_PAGE_SIZE_CNT 8
+#define OCRDMA_Q_PAGE_BASE_SIZE (OCRDMA_MIN_Q_PAGE_SIZE * OCRDMA_MAX_Q_PAGES)
+
+#define MAX_OCRDMA_QP_PAGES 8
+#define OCRDMA_MAX_WQE_MEM_SIZE (MAX_OCRDMA_QP_PAGES * OCRDMA_MIN_HQ_PAGE_SIZE)
+
+#define OCRDMA_CREATE_CQ_MAX_PAGES 4
+#define OCRDMA_DPP_CQE_SIZE 4
+
+#define OCRDMA_GEN2_MAX_CQE 1024
+#define OCRDMA_GEN2_CQ_PAGE_SIZE 4096
+#define OCRDMA_GEN2_WQE_SIZE 256
+#define OCRDMA_MAX_CQE 4095
+#define OCRDMA_CQ_PAGE_SIZE 16384
+#define OCRDMA_WQE_SIZE 128
+#define OCRDMA_WQE_STRIDE 8
+#define OCRDMA_WQE_ALIGN_BYTES 16
+
+#define MAX_OCRDMA_SRQ_PAGES MAX_OCRDMA_QP_PAGES
+
+enum {
+ OCRDMA_MCH_OPCODE_SHIFT = 0,
+ OCRDMA_MCH_OPCODE_MASK = 0xFF,
+ OCRDMA_MCH_SUBSYS_SHIFT = 8,
+ OCRDMA_MCH_SUBSYS_MASK = 0xFF00
+};
+
+/* mailbox cmd header */
+struct ocrdma_mbx_hdr {
+ u32 subsys_op;
+ u32 timeout; /* in seconds */
+ u32 cmd_len;
+ u32 rsvd_version;
+};
+
+enum {
+ OCRDMA_MBX_RSP_OPCODE_SHIFT = 0,
+ OCRDMA_MBX_RSP_OPCODE_MASK = 0xFF,
+ OCRDMA_MBX_RSP_SUBSYS_SHIFT = 8,
+ OCRDMA_MBX_RSP_SUBSYS_MASK = 0xFF << OCRDMA_MBX_RSP_SUBSYS_SHIFT,
+
+ OCRDMA_MBX_RSP_STATUS_SHIFT = 0,
+ OCRDMA_MBX_RSP_STATUS_MASK = 0xFF,
+ OCRDMA_MBX_RSP_ASTATUS_SHIFT = 8,
+ OCRDMA_MBX_RSP_ASTATUS_MASK = 0xFF << OCRDMA_MBX_RSP_ASTATUS_SHIFT
+};
+
+/* mailbox cmd response */
+struct ocrdma_mbx_rsp {
+ u32 subsys_op;
+ u32 status;
+ u32 rsp_len;
+ u32 add_rsp_len;
+};
+
+enum {
+ OCRDMA_MQE_EMBEDDED = 1,
+ OCRDMA_MQE_NONEMBEDDED = 0
+};
+
+struct ocrdma_mqe_sge {
+ u32 pa_lo;
+ u32 pa_hi;
+ u32 len;
+};
+
+enum {
+ OCRDMA_MQE_HDR_EMB_SHIFT = 0,
+ OCRDMA_MQE_HDR_EMB_MASK = BIT(0),
+ OCRDMA_MQE_HDR_SGE_CNT_SHIFT = 3,
+ OCRDMA_MQE_HDR_SGE_CNT_MASK = 0x1F << OCRDMA_MQE_HDR_SGE_CNT_SHIFT,
+ OCRDMA_MQE_HDR_SPECIAL_SHIFT = 24,
+ OCRDMA_MQE_HDR_SPECIAL_MASK = 0xFF << OCRDMA_MQE_HDR_SPECIAL_SHIFT
+};
+
+struct ocrdma_mqe_hdr {
+ u32 spcl_sge_cnt_emb;
+ u32 pyld_len;
+ u32 tag_lo;
+ u32 tag_hi;
+ u32 rsvd3;
+};
+
+struct ocrdma_mqe_emb_cmd {
+ struct ocrdma_mbx_hdr mch;
+ u8 pyld[220];
+};
+
+struct ocrdma_mqe {
+ struct ocrdma_mqe_hdr hdr;
+ union {
+ struct ocrdma_mqe_emb_cmd emb_req;
+ struct {
+ struct ocrdma_mqe_sge sge[19];
+ } nonemb_req;
+ u8 cmd[236];
+ struct ocrdma_mbx_rsp rsp;
+ } u;
+};
+
+#define OCRDMA_EQ_LEN 4096
+#define OCRDMA_MQ_CQ_LEN 256
+#define OCRDMA_MQ_LEN 128
+
+#define PAGE_SHIFT_4K 12
+#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
+
+/* Returns number of pages spanned by the data starting at the given addr */
+#define PAGES_4K_SPANNED(_address, size) \
+ ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
+ (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
+
+struct ocrdma_delete_q_req {
+ struct ocrdma_mbx_hdr req;
+ u32 id;
+};
+
+struct ocrdma_pa {
+ u32 lo;
+ u32 hi;
+};
+
+#define MAX_OCRDMA_EQ_PAGES 8
+struct ocrdma_create_eq_req {
+ struct ocrdma_mbx_hdr req;
+ u32 num_pages;
+ u32 valid;
+ u32 cnt;
+ u32 delay;
+ u32 rsvd;
+ struct ocrdma_pa pa[MAX_OCRDMA_EQ_PAGES];
+};
+
+enum {
+ OCRDMA_CREATE_EQ_VALID = BIT(29),
+ OCRDMA_CREATE_EQ_CNT_SHIFT = 26,
+ OCRDMA_CREATE_CQ_DELAY_SHIFT = 13,
+};
+
+struct ocrdma_create_eq_rsp {
+ struct ocrdma_mbx_rsp rsp;
+ u32 vector_eqid;
+};
+
+#define OCRDMA_EQ_MINOR_OTHER 0x1
+
+struct ocrmda_set_eqd {
+ u32 eq_id;
+ u32 phase;
+ u32 delay_multiplier;
+};
+
+struct ocrdma_modify_eqd_cmd {
+ struct ocrdma_mbx_hdr req;
+ u32 num_eq;
+ struct ocrmda_set_eqd set_eqd[8];
+} __packed;
+
+struct ocrdma_modify_eqd_req {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_modify_eqd_cmd cmd;
+};
+
+
+struct ocrdma_modify_eq_delay_rsp {
+ struct ocrdma_mbx_rsp hdr;
+ u32 rsvd0;
+} __packed;
+
+enum {
+ OCRDMA_MCQE_STATUS_SHIFT = 0,
+ OCRDMA_MCQE_STATUS_MASK = 0xFFFF,
+ OCRDMA_MCQE_ESTATUS_SHIFT = 16,
+ OCRDMA_MCQE_ESTATUS_MASK = 0xFFFF << OCRDMA_MCQE_ESTATUS_SHIFT,
+ OCRDMA_MCQE_CONS_SHIFT = 27,
+ OCRDMA_MCQE_CONS_MASK = BIT(27),
+ OCRDMA_MCQE_CMPL_SHIFT = 28,
+ OCRDMA_MCQE_CMPL_MASK = BIT(28),
+ OCRDMA_MCQE_AE_SHIFT = 30,
+ OCRDMA_MCQE_AE_MASK = BIT(30),
+ OCRDMA_MCQE_VALID_SHIFT = 31,
+ OCRDMA_MCQE_VALID_MASK = BIT(31)
+};
+
+struct ocrdma_mcqe {
+ u32 status;
+ u32 tag_lo;
+ u32 tag_hi;
+ u32 valid_ae_cmpl_cons;
+};
+
+enum {
+ OCRDMA_AE_MCQE_QPVALID = BIT(31),
+ OCRDMA_AE_MCQE_QPID_MASK = 0xFFFF,
+
+ OCRDMA_AE_MCQE_CQVALID = BIT(31),
+ OCRDMA_AE_MCQE_CQID_MASK = 0xFFFF,
+ OCRDMA_AE_MCQE_VALID = BIT(31),
+ OCRDMA_AE_MCQE_AE = BIT(30),
+ OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT = 16,
+ OCRDMA_AE_MCQE_EVENT_TYPE_MASK =
+ 0xFF << OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT,
+ OCRDMA_AE_MCQE_EVENT_CODE_SHIFT = 8,
+ OCRDMA_AE_MCQE_EVENT_CODE_MASK =
+ 0xFF << OCRDMA_AE_MCQE_EVENT_CODE_SHIFT
+};
+struct ocrdma_ae_mcqe {
+ u32 qpvalid_qpid;
+ u32 cqvalid_cqid;
+ u32 evt_tag;
+ u32 valid_ae_event;
+};
+
+enum {
+ OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT = 0,
+ OCRDMA_AE_PVID_MCQE_ENABLED_MASK = 0xFF,
+ OCRDMA_AE_PVID_MCQE_TAG_SHIFT = 16,
+ OCRDMA_AE_PVID_MCQE_TAG_MASK = 0xFFFF << OCRDMA_AE_PVID_MCQE_TAG_SHIFT
+};
+
+struct ocrdma_ae_pvid_mcqe {
+ u32 tag_enabled;
+ u32 event_tag;
+ u32 rsvd1;
+ u32 rsvd2;
+};
+
+enum {
+ OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT = 16,
+ OCRDMA_AE_MPA_MCQE_REQ_ID_MASK = 0xFFFF <<
+ OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT,
+
+ OCRDMA_AE_MPA_MCQE_EVENT_CODE_SHIFT = 8,
+ OCRDMA_AE_MPA_MCQE_EVENT_CODE_MASK = 0xFF <<
+ OCRDMA_AE_MPA_MCQE_EVENT_CODE_SHIFT,
+ OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT = 16,
+ OCRDMA_AE_MPA_MCQE_EVENT_TYPE_MASK = 0xFF <<
+ OCRDMA_AE_MPA_MCQE_EVENT_TYPE_SHIFT,
+ OCRDMA_AE_MPA_MCQE_EVENT_AE_SHIFT = 30,
+ OCRDMA_AE_MPA_MCQE_EVENT_AE_MASK = BIT(30),
+ OCRDMA_AE_MPA_MCQE_EVENT_VALID_SHIFT = 31,
+ OCRDMA_AE_MPA_MCQE_EVENT_VALID_MASK = BIT(31)
+};
+
+struct ocrdma_ae_mpa_mcqe {
+ u32 req_id;
+ u32 w1;
+ u32 w2;
+ u32 valid_ae_event;
+};
+
+enum {
+ OCRDMA_AE_QP_MCQE_NEW_QP_STATE_SHIFT = 0,
+ OCRDMA_AE_QP_MCQE_NEW_QP_STATE_MASK = 0xFFFF,
+ OCRDMA_AE_QP_MCQE_QP_ID_SHIFT = 16,
+ OCRDMA_AE_QP_MCQE_QP_ID_MASK = 0xFFFF <<
+ OCRDMA_AE_QP_MCQE_QP_ID_SHIFT,
+
+ OCRDMA_AE_QP_MCQE_EVENT_CODE_SHIFT = 8,
+ OCRDMA_AE_QP_MCQE_EVENT_CODE_MASK = 0xFF <<
+ OCRDMA_AE_QP_MCQE_EVENT_CODE_SHIFT,
+ OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT = 16,
+ OCRDMA_AE_QP_MCQE_EVENT_TYPE_MASK = 0xFF <<
+ OCRDMA_AE_QP_MCQE_EVENT_TYPE_SHIFT,
+ OCRDMA_AE_QP_MCQE_EVENT_AE_SHIFT = 30,
+ OCRDMA_AE_QP_MCQE_EVENT_AE_MASK = BIT(30),
+ OCRDMA_AE_QP_MCQE_EVENT_VALID_SHIFT = 31,
+ OCRDMA_AE_QP_MCQE_EVENT_VALID_MASK = BIT(31)
+};
+
+struct ocrdma_ae_qp_mcqe {
+ u32 qp_id_state;
+ u32 w1;
+ u32 w2;
+ u32 valid_ae_event;
+};
+
+enum ocrdma_async_event_code {
+ OCRDMA_ASYNC_LINK_EVE_CODE = 0x01,
+ OCRDMA_ASYNC_GRP5_EVE_CODE = 0x05,
+ OCRDMA_ASYNC_RDMA_EVE_CODE = 0x14
+};
+
+enum ocrdma_async_grp5_events {
+ OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
+ OCRDMA_ASYNC_EVENT_COS_VALUE = 0x02,
+ OCRDMA_ASYNC_EVENT_PVID_STATE = 0x03
+};
+
+enum OCRDMA_ASYNC_EVENT_TYPE {
+ OCRDMA_CQ_ERROR = 0x00,
+ OCRDMA_CQ_OVERRUN_ERROR = 0x01,
+ OCRDMA_CQ_QPCAT_ERROR = 0x02,
+ OCRDMA_QP_ACCESS_ERROR = 0x03,
+ OCRDMA_QP_COMM_EST_EVENT = 0x04,
+ OCRDMA_SQ_DRAINED_EVENT = 0x05,
+ OCRDMA_DEVICE_FATAL_EVENT = 0x08,
+ OCRDMA_SRQCAT_ERROR = 0x0E,
+ OCRDMA_SRQ_LIMIT_EVENT = 0x0F,
+ OCRDMA_QP_LAST_WQE_EVENT = 0x10,
+
+ OCRDMA_MAX_ASYNC_ERRORS
+};
+
+struct ocrdma_ae_lnkst_mcqe {
+ u32 speed_state_ptn;
+ u32 qos_reason_falut;
+ u32 evt_tag;
+ u32 valid_ae_event;
+};
+
+enum {
+ OCRDMA_AE_LSC_PORT_NUM_MASK = 0x3F,
+ OCRDMA_AE_LSC_PT_SHIFT = 0x06,
+ OCRDMA_AE_LSC_PT_MASK = (0x03 <<
+ OCRDMA_AE_LSC_PT_SHIFT),
+ OCRDMA_AE_LSC_LS_SHIFT = 0x08,
+ OCRDMA_AE_LSC_LS_MASK = (0xFF <<
+ OCRDMA_AE_LSC_LS_SHIFT),
+ OCRDMA_AE_LSC_LD_SHIFT = 0x10,
+ OCRDMA_AE_LSC_LD_MASK = (0xFF <<
+ OCRDMA_AE_LSC_LD_SHIFT),
+ OCRDMA_AE_LSC_PPS_SHIFT = 0x18,
+ OCRDMA_AE_LSC_PPS_MASK = (0xFF <<
+ OCRDMA_AE_LSC_PPS_SHIFT),
+ OCRDMA_AE_LSC_PPF_MASK = 0xFF,
+ OCRDMA_AE_LSC_ER_SHIFT = 0x08,
+ OCRDMA_AE_LSC_ER_MASK = (0xFF <<
+ OCRDMA_AE_LSC_ER_SHIFT),
+ OCRDMA_AE_LSC_QOS_SHIFT = 0x10,
+ OCRDMA_AE_LSC_QOS_MASK = (0xFFFF <<
+ OCRDMA_AE_LSC_QOS_SHIFT)
+};
+
+enum {
+ OCRDMA_AE_LSC_PLINK_DOWN = 0x00,
+ OCRDMA_AE_LSC_PLINK_UP = 0x01,
+ OCRDMA_AE_LSC_LLINK_DOWN = 0x02,
+ OCRDMA_AE_LSC_LLINK_MASK = 0x02,
+ OCRDMA_AE_LSC_LLINK_UP = 0x03
+};
+
+/* mailbox command request and responses */
+enum {
+ OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2,
+ OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK = BIT(2),
+ OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT = 3,
+ OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK = BIT(3),
+ OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT = 8,
+ OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK = 0xFFFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT,
+
+ OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT,
+ OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT = 8,
+ OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK = 0xFF <<
+ OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT,
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT = 3,
+ OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK = 0x18,
+ OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_RECV_SGE_SHIFT,
+
+ OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
+ OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT,
+
+ OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET = 24,
+ OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK = 0xFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK = 0xFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_OFFSET = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_CQES_OFFSET,
+
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_OFFSET = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_RPIR_QPS_OFFSET,
+
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_OFFSET = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_CREDITS_OFFSET,
+
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_OFFSET = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_DPP_QPS_OFFSET,
+
+ OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET,
+
+ OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_OFFSET = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_OFFSET,
+
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_OFFSET = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_RQE_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_SHIFT = 0,
+ OCRDMA_MBX_QUERY_CFG_MAX_RD_SGE_MASK = 0xFFFF,
+};
+
+struct ocrdma_mbx_query_config {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+ u32 qp_srq_cq_ird_ord;
+ u32 max_pd_ca_ack_delay;
+ u32 max_recv_send_sge;
+ u32 max_ird_ord_per_qp;
+ u32 max_shared_ird_ord;
+ u32 max_mr;
+ u32 max_mr_size_hi;
+ u32 max_mr_size_lo;
+ u32 max_num_mr_pbl;
+ u32 max_mw;
+ u32 max_fmr;
+ u32 max_pages_per_frmr;
+ u32 max_mcast_group;
+ u32 max_mcast_qp_attach;
+ u32 max_total_mcast_qp_attach;
+ u32 wqe_rqe_stride_max_dpp_cqs;
+ u32 max_srq_rpir_qps;
+ u32 max_dpp_pds_credits;
+ u32 max_dpp_credits_pds_per_pd;
+ u32 max_wqes_rqes_per_q;
+ u32 max_cq_cqes_per_cq;
+ u32 max_srq_rqe_sge;
+ u32 max_wr_rd_sge;
+ u32 ird_pgsz_num_pages;
+};
+
+struct ocrdma_fw_ver_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u8 running_ver[32];
+};
+
+struct ocrdma_fw_conf_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u32 config_num;
+ u32 asic_revision;
+ u32 phy_port;
+ u32 fn_mode;
+ struct {
+ u32 mode;
+ u32 nic_wqid_base;
+ u32 nic_wq_tot;
+ u32 prot_wqid_base;
+ u32 prot_wq_tot;
+ u32 prot_rqid_base;
+ u32 prot_rqid_tot;
+ u32 rsvd[6];
+ } ulp[2];
+ u32 fn_capabilities;
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 base_eqid;
+ u32 max_eq;
+
+};
+
+enum {
+ OCRDMA_FN_MODE_RDMA = 0x4
+};
+
+enum {
+ OCRDMA_IF_TYPE_MASK = 0xFFFF0000,
+ OCRDMA_IF_TYPE_SHIFT = 0x10,
+ OCRDMA_PHY_TYPE_MASK = 0x0000FFFF,
+ OCRDMA_FUTURE_DETAILS_MASK = 0xFFFF0000,
+ OCRDMA_FUTURE_DETAILS_SHIFT = 0x10,
+ OCRDMA_EX_PHY_DETAILS_MASK = 0x0000FFFF,
+ OCRDMA_FSPEED_SUPP_MASK = 0xFFFF0000,
+ OCRDMA_FSPEED_SUPP_SHIFT = 0x10,
+ OCRDMA_ASPEED_SUPP_MASK = 0x0000FFFF
+};
+
+struct ocrdma_get_phy_info_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u32 ityp_ptyp;
+ u32 misc_params;
+ u32 ftrdtl_exphydtl;
+ u32 fspeed_aspeed;
+ u32 future_use[2];
+};
+
+enum {
+ OCRDMA_PHY_SPEED_ZERO = 0x0,
+ OCRDMA_PHY_SPEED_10MBPS = 0x1,
+ OCRDMA_PHY_SPEED_100MBPS = 0x2,
+ OCRDMA_PHY_SPEED_1GBPS = 0x4,
+ OCRDMA_PHY_SPEED_10GBPS = 0x8,
+ OCRDMA_PHY_SPEED_40GBPS = 0x20
+};
+
+enum {
+ OCRDMA_PORT_NUM_MASK = 0x3F,
+ OCRDMA_PT_MASK = 0xC0,
+ OCRDMA_PT_SHIFT = 0x6,
+ OCRDMA_LINK_DUP_MASK = 0x0000FF00,
+ OCRDMA_LINK_DUP_SHIFT = 0x8,
+ OCRDMA_PHY_PS_MASK = 0x00FF0000,
+ OCRDMA_PHY_PS_SHIFT = 0x10,
+ OCRDMA_PHY_PFLT_MASK = 0xFF000000,
+ OCRDMA_PHY_PFLT_SHIFT = 0x18,
+ OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
+ OCRDMA_QOS_LNKSP_SHIFT = 0x10,
+ OCRDMA_LINK_ST_MASK = 0x01,
+ OCRDMA_PLFC_MASK = 0x00000400,
+ OCRDMA_PLFC_SHIFT = 0x8,
+ OCRDMA_PLRFC_MASK = 0x00000200,
+ OCRDMA_PLRFC_SHIFT = 0x8,
+ OCRDMA_PLTFC_MASK = 0x00000100,
+ OCRDMA_PLTFC_SHIFT = 0x8
+};
+
+struct ocrdma_get_link_speed_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u32 pflt_pps_ld_pnum;
+ u32 qos_lsp;
+ u32 res_lnk_st;
+};
+
+enum {
+ OCRDMA_PHYS_LINK_SPEED_ZERO = 0x0,
+ OCRDMA_PHYS_LINK_SPEED_10MBPS = 0x1,
+ OCRDMA_PHYS_LINK_SPEED_100MBPS = 0x2,
+ OCRDMA_PHYS_LINK_SPEED_1GBPS = 0x3,
+ OCRDMA_PHYS_LINK_SPEED_10GBPS = 0x4,
+ OCRDMA_PHYS_LINK_SPEED_20GBPS = 0x5,
+ OCRDMA_PHYS_LINK_SPEED_25GBPS = 0x6,
+ OCRDMA_PHYS_LINK_SPEED_40GBPS = 0x7,
+ OCRDMA_PHYS_LINK_SPEED_100GBPS = 0x8
+};
+
+enum {
+ OCRDMA_CREATE_CQ_VER2 = 2,
+ OCRDMA_CREATE_CQ_VER3 = 3,
+
+ OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF,
+ OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16,
+ OCRDMA_CREATE_CQ_PAGE_SIZE_MASK = 0xFF,
+
+ OCRDMA_CREATE_CQ_COALESCWM_SHIFT = 12,
+ OCRDMA_CREATE_CQ_COALESCWM_MASK = BIT(13) | BIT(12),
+ OCRDMA_CREATE_CQ_FLAGS_NODELAY = BIT(14),
+ OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID = BIT(15),
+
+ OCRDMA_CREATE_CQ_EQ_ID_MASK = 0xFFFF,
+ OCRDMA_CREATE_CQ_CQE_COUNT_MASK = 0xFFFF
+};
+
+enum {
+ OCRDMA_CREATE_CQ_VER0 = 0,
+ OCRDMA_CREATE_CQ_DPP = 1,
+ OCRDMA_CREATE_CQ_TYPE_SHIFT = 24,
+ OCRDMA_CREATE_CQ_EQID_SHIFT = 22,
+
+ OCRDMA_CREATE_CQ_CNT_SHIFT = 27,
+ OCRDMA_CREATE_CQ_FLAGS_VALID = BIT(29),
+ OCRDMA_CREATE_CQ_FLAGS_EVENTABLE = BIT(31),
+ OCRDMA_CREATE_CQ_DEF_FLAGS = OCRDMA_CREATE_CQ_FLAGS_VALID |
+ OCRDMA_CREATE_CQ_FLAGS_EVENTABLE |
+ OCRDMA_CREATE_CQ_FLAGS_NODELAY
+};
+
+struct ocrdma_create_cq_cmd {
+ struct ocrdma_mbx_hdr req;
+ u32 pgsz_pgcnt;
+ u32 ev_cnt_flags;
+ u32 eqn;
+ u32 pdid_cqecnt;
+ u32 rsvd6;
+ struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES];
+};
+
+struct ocrdma_create_cq {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_create_cq_cmd cmd;
+};
+
+enum {
+ OCRDMA_CREATE_CQ_CMD_PDID_SHIFT = 0x10
+};
+
+enum {
+ OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF
+};
+
+struct ocrdma_create_cq_cmd_rsp {
+ struct ocrdma_mbx_rsp rsp;
+ u32 cq_id;
+};
+
+struct ocrdma_create_cq_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_create_cq_cmd_rsp rsp;
+};
+
+enum {
+ OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT = 22,
+ OCRDMA_CREATE_MQ_CQ_ID_SHIFT = 16,
+ OCRDMA_CREATE_MQ_RING_SIZE_SHIFT = 16,
+ OCRDMA_CREATE_MQ_VALID = BIT(31),
+ OCRDMA_CREATE_MQ_ASYNC_CQ_VALID = BIT(0)
+};
+
+struct ocrdma_create_mq_req {
+ struct ocrdma_mbx_hdr req;
+ u32 cqid_pages;
+ u32 async_event_bitmap;
+ u32 async_cqid_ringsize;
+ u32 valid;
+ u32 async_cqid_valid;
+ u32 rsvd;
+ struct ocrdma_pa pa[8];
+};
+
+struct ocrdma_create_mq_rsp {
+ struct ocrdma_mbx_rsp rsp;
+ u32 id;
+};
+
+enum {
+ OCRDMA_DESTROY_CQ_QID_SHIFT = 0,
+ OCRDMA_DESTROY_CQ_QID_MASK = 0xFFFF,
+ OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_SHIFT = 16,
+ OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_MASK = 0xFFFF <<
+ OCRDMA_DESTROY_CQ_QID_BYPASS_FLUSH_SHIFT
+};
+
+struct ocrdma_destroy_cq {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+
+ u32 bypass_flush_qid;
+};
+
+struct ocrdma_destroy_cq_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+};
+
+enum {
+ OCRDMA_QPT_GSI = 1,
+ OCRDMA_QPT_RC = 2,
+ OCRDMA_QPT_UD = 4,
+};
+
+enum {
+ OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT = 0,
+ OCRDMA_CREATE_QP_REQ_PD_ID_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT = 16,
+ OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT = 19,
+ OCRDMA_CREATE_QP_REQ_QPT_SHIFT = 29,
+ OCRDMA_CREATE_QP_REQ_QPT_MASK = BIT(31) | BIT(30) | BIT(29),
+
+ OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT = 0,
+ OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT = 16,
+ OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT,
+
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT = 0,
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT = 16,
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT,
+
+ OCRDMA_CREATE_QP_REQ_FMR_EN_SHIFT = 0,
+ OCRDMA_CREATE_QP_REQ_FMR_EN_MASK = BIT(0),
+ OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_SHIFT = 1,
+ OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK = BIT(1),
+ OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_SHIFT = 2,
+ OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK = BIT(2),
+ OCRDMA_CREATE_QP_REQ_INB_WREN_SHIFT = 3,
+ OCRDMA_CREATE_QP_REQ_INB_WREN_MASK = BIT(3),
+ OCRDMA_CREATE_QP_REQ_INB_RDEN_SHIFT = 4,
+ OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK = BIT(4),
+ OCRDMA_CREATE_QP_REQ_USE_SRQ_SHIFT = 5,
+ OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK = BIT(5),
+ OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_SHIFT = 6,
+ OCRDMA_CREATE_QP_REQ_ENABLE_RPIR_MASK = BIT(6),
+ OCRDMA_CREATE_QP_REQ_ENABLE_DPP_SHIFT = 7,
+ OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK = BIT(7),
+ OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_SHIFT = 8,
+ OCRDMA_CREATE_QP_REQ_ENABLE_DPP_CQ_MASK = BIT(8),
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT = 16,
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT,
+
+ OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT = 0,
+ OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT = 16,
+ OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT,
+
+ OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT = 0,
+ OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT = 16,
+ OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT,
+
+ OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT = 0,
+ OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT = 16,
+ OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT,
+
+ OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT = 0,
+ OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT = 16,
+ OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT,
+
+ OCRDMA_CREATE_QP_REQ_DPP_CQPID_SHIFT = 0,
+ OCRDMA_CREATE_QP_REQ_DPP_CQPID_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT = 16,
+ OCRDMA_CREATE_QP_REQ_DPP_CREDIT_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT
+};
+
+enum {
+ OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT = 16,
+ OCRDMA_CREATE_QP_RSP_DPP_PAGE_SHIFT = 1
+};
+
+#define MAX_OCRDMA_IRD_PAGES 4
+
+enum ocrdma_qp_flags {
+ OCRDMA_QP_MW_BIND = 1,
+ OCRDMA_QP_LKEY0 = (1 << 1),
+ OCRDMA_QP_FAST_REG = (1 << 2),
+ OCRDMA_QP_INB_RD = (1 << 6),
+ OCRDMA_QP_INB_WR = (1 << 7),
+};
+
+enum ocrdma_qp_state {
+ OCRDMA_QPS_RST = 0,
+ OCRDMA_QPS_INIT = 1,
+ OCRDMA_QPS_RTR = 2,
+ OCRDMA_QPS_RTS = 3,
+ OCRDMA_QPS_SQE = 4,
+ OCRDMA_QPS_SQ_DRAINING = 5,
+ OCRDMA_QPS_ERR = 6,
+ OCRDMA_QPS_SQD = 7
+};
+
+struct ocrdma_create_qp_req {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+
+ u32 type_pgsz_pdn;
+ u32 max_wqe_rqe;
+ u32 max_sge_send_write;
+ u32 max_sge_recv_flags;
+ u32 max_ord_ird;
+ u32 num_wq_rq_pages;
+ u32 wqe_rqe_size;
+ u32 wq_rq_cqid;
+ struct ocrdma_pa wq_addr[MAX_OCRDMA_QP_PAGES];
+ struct ocrdma_pa rq_addr[MAX_OCRDMA_QP_PAGES];
+ u32 dpp_credits_cqid;
+ u32 rpir_lkey;
+ struct ocrdma_pa ird_addr[MAX_OCRDMA_IRD_PAGES];
+};
+
+enum {
+ OCRDMA_CREATE_QP_RSP_QP_ID_SHIFT = 0,
+ OCRDMA_CREATE_QP_RSP_QP_ID_MASK = 0xFFFF,
+
+ OCRDMA_CREATE_QP_RSP_MAX_RQE_SHIFT = 0,
+ OCRDMA_CREATE_QP_RSP_MAX_RQE_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT = 16,
+ OCRDMA_CREATE_QP_RSP_MAX_WQE_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT,
+
+ OCRDMA_CREATE_QP_RSP_MAX_SGE_WRITE_SHIFT = 0,
+ OCRDMA_CREATE_QP_RSP_MAX_SGE_WRITE_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_SHIFT = 16,
+ OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_RSP_MAX_SGE_SEND_SHIFT,
+
+ OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_SHIFT = 16,
+ OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_RSP_MAX_SGE_RECV_SHIFT,
+
+ OCRDMA_CREATE_QP_RSP_MAX_IRD_SHIFT = 0,
+ OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT = 16,
+ OCRDMA_CREATE_QP_RSP_MAX_ORD_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT,
+
+ OCRDMA_CREATE_QP_RSP_RQ_ID_SHIFT = 0,
+ OCRDMA_CREATE_QP_RSP_RQ_ID_MASK = 0xFFFF,
+ OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT = 16,
+ OCRDMA_CREATE_QP_RSP_SQ_ID_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT,
+
+ OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK = BIT(0),
+ OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT = 1,
+ OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK = 0x7FFF <<
+ OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT,
+ OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT = 16,
+ OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK = 0xFFFF <<
+ OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT,
+};
+
+struct ocrdma_create_qp_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u32 qp_id;
+ u32 max_wqe_rqe;
+ u32 max_sge_send_write;
+ u32 max_sge_recv;
+ u32 max_ord_ird;
+ u32 sq_rq_id;
+ u32 dpp_response;
+};
+
+struct ocrdma_destroy_qp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+ u32 qp_id;
+};
+
+struct ocrdma_destroy_qp_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+};
+
+enum {
+ OCRDMA_MODIFY_QP_ID_SHIFT = 0,
+ OCRDMA_MODIFY_QP_ID_MASK = 0xFFFF,
+
+ OCRDMA_QP_PARA_QPS_VALID = BIT(0),
+ OCRDMA_QP_PARA_SQD_ASYNC_VALID = BIT(1),
+ OCRDMA_QP_PARA_PKEY_VALID = BIT(2),
+ OCRDMA_QP_PARA_QKEY_VALID = BIT(3),
+ OCRDMA_QP_PARA_PMTU_VALID = BIT(4),
+ OCRDMA_QP_PARA_ACK_TO_VALID = BIT(5),
+ OCRDMA_QP_PARA_RETRY_CNT_VALID = BIT(6),
+ OCRDMA_QP_PARA_RRC_VALID = BIT(7),
+ OCRDMA_QP_PARA_RQPSN_VALID = BIT(8),
+ OCRDMA_QP_PARA_MAX_IRD_VALID = BIT(9),
+ OCRDMA_QP_PARA_MAX_ORD_VALID = BIT(10),
+ OCRDMA_QP_PARA_RNT_VALID = BIT(11),
+ OCRDMA_QP_PARA_SQPSN_VALID = BIT(12),
+ OCRDMA_QP_PARA_DST_QPN_VALID = BIT(13),
+ OCRDMA_QP_PARA_MAX_WQE_VALID = BIT(14),
+ OCRDMA_QP_PARA_MAX_RQE_VALID = BIT(15),
+ OCRDMA_QP_PARA_SGE_SEND_VALID = BIT(16),
+ OCRDMA_QP_PARA_SGE_RECV_VALID = BIT(17),
+ OCRDMA_QP_PARA_SGE_WR_VALID = BIT(18),
+ OCRDMA_QP_PARA_INB_RDEN_VALID = BIT(19),
+ OCRDMA_QP_PARA_INB_WREN_VALID = BIT(20),
+ OCRDMA_QP_PARA_FLOW_LBL_VALID = BIT(21),
+ OCRDMA_QP_PARA_BIND_EN_VALID = BIT(22),
+ OCRDMA_QP_PARA_ZLKEY_EN_VALID = BIT(23),
+ OCRDMA_QP_PARA_FMR_EN_VALID = BIT(24),
+ OCRDMA_QP_PARA_INBAT_EN_VALID = BIT(25),
+ OCRDMA_QP_PARA_VLAN_EN_VALID = BIT(26),
+
+ OCRDMA_MODIFY_QP_FLAGS_RD = BIT(0),
+ OCRDMA_MODIFY_QP_FLAGS_WR = BIT(1),
+ OCRDMA_MODIFY_QP_FLAGS_SEND = BIT(2),
+ OCRDMA_MODIFY_QP_FLAGS_ATOMIC = BIT(3)
+};
+
+enum {
+ OCRDMA_QP_PARAMS_SRQ_ID_SHIFT = 0,
+ OCRDMA_QP_PARAMS_SRQ_ID_MASK = 0xFFFF,
+
+ OCRDMA_QP_PARAMS_MAX_RQE_SHIFT = 0,
+ OCRDMA_QP_PARAMS_MAX_RQE_MASK = 0xFFFF,
+ OCRDMA_QP_PARAMS_MAX_WQE_SHIFT = 16,
+ OCRDMA_QP_PARAMS_MAX_WQE_MASK = 0xFFFF <<
+ OCRDMA_QP_PARAMS_MAX_WQE_SHIFT,
+
+ OCRDMA_QP_PARAMS_MAX_SGE_WRITE_SHIFT = 0,
+ OCRDMA_QP_PARAMS_MAX_SGE_WRITE_MASK = 0xFFFF,
+ OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT = 16,
+ OCRDMA_QP_PARAMS_MAX_SGE_SEND_MASK = 0xFFFF <<
+ OCRDMA_QP_PARAMS_MAX_SGE_SEND_SHIFT,
+
+ OCRDMA_QP_PARAMS_FLAGS_FMR_EN = BIT(0),
+ OCRDMA_QP_PARAMS_FLAGS_LKEY_0_EN = BIT(1),
+ OCRDMA_QP_PARAMS_FLAGS_BIND_MW_EN = BIT(2),
+ OCRDMA_QP_PARAMS_FLAGS_INBWR_EN = BIT(3),
+ OCRDMA_QP_PARAMS_FLAGS_INBRD_EN = BIT(4),
+ OCRDMA_QP_PARAMS_STATE_SHIFT = 5,
+ OCRDMA_QP_PARAMS_STATE_MASK = BIT(5) | BIT(6) | BIT(7),
+ OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC = BIT(8),
+ OCRDMA_QP_PARAMS_FLAGS_INB_ATEN = BIT(9),
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT = 11,
+ OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK = BIT(11) | BIT(12) | BIT(13),
+ OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT = 16,
+ OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK = 0xFFFF <<
+ OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT,
+
+ OCRDMA_QP_PARAMS_MAX_IRD_SHIFT = 0,
+ OCRDMA_QP_PARAMS_MAX_IRD_MASK = 0xFFFF,
+ OCRDMA_QP_PARAMS_MAX_ORD_SHIFT = 16,
+ OCRDMA_QP_PARAMS_MAX_ORD_MASK = 0xFFFF <<
+ OCRDMA_QP_PARAMS_MAX_ORD_SHIFT,
+
+ OCRDMA_QP_PARAMS_RQ_CQID_SHIFT = 0,
+ OCRDMA_QP_PARAMS_RQ_CQID_MASK = 0xFFFF,
+ OCRDMA_QP_PARAMS_WQ_CQID_SHIFT = 16,
+ OCRDMA_QP_PARAMS_WQ_CQID_MASK = 0xFFFF <<
+ OCRDMA_QP_PARAMS_WQ_CQID_SHIFT,
+
+ OCRDMA_QP_PARAMS_RQ_PSN_SHIFT = 0,
+ OCRDMA_QP_PARAMS_RQ_PSN_MASK = 0xFFFFFF,
+ OCRDMA_QP_PARAMS_HOP_LMT_SHIFT = 24,
+ OCRDMA_QP_PARAMS_HOP_LMT_MASK = 0xFF <<
+ OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
+
+ OCRDMA_QP_PARAMS_SQ_PSN_SHIFT = 0,
+ OCRDMA_QP_PARAMS_SQ_PSN_MASK = 0xFFFFFF,
+ OCRDMA_QP_PARAMS_TCLASS_SHIFT = 24,
+ OCRDMA_QP_PARAMS_TCLASS_MASK = 0xFF <<
+ OCRDMA_QP_PARAMS_TCLASS_SHIFT,
+
+ OCRDMA_QP_PARAMS_DEST_QPN_SHIFT = 0,
+ OCRDMA_QP_PARAMS_DEST_QPN_MASK = 0xFFFFFF,
+ OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT = 24,
+ OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK = 0x7 <<
+ OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT,
+ OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT = 27,
+ OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK = 0x1F <<
+ OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT,
+
+ OCRDMA_QP_PARAMS_PKEY_IDNEX_SHIFT = 0,
+ OCRDMA_QP_PARAMS_PKEY_INDEX_MASK = 0xFFFF,
+ OCRDMA_QP_PARAMS_PATH_MTU_SHIFT = 18,
+ OCRDMA_QP_PARAMS_PATH_MTU_MASK = 0x3FFF <<
+ OCRDMA_QP_PARAMS_PATH_MTU_SHIFT,
+
+ OCRDMA_QP_PARAMS_FLOW_LABEL_SHIFT = 0,
+ OCRDMA_QP_PARAMS_FLOW_LABEL_MASK = 0xFFFFF,
+ OCRDMA_QP_PARAMS_SL_SHIFT = 20,
+ OCRDMA_QP_PARAMS_SL_MASK = 0xF <<
+ OCRDMA_QP_PARAMS_SL_SHIFT,
+ OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT = 24,
+ OCRDMA_QP_PARAMS_RETRY_CNT_MASK = 0x7 <<
+ OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT,
+ OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT = 27,
+ OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK = 0x1F <<
+ OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT,
+
+ OCRDMA_QP_PARAMS_DMAC_B4_TO_B5_SHIFT = 0,
+ OCRDMA_QP_PARAMS_DMAC_B4_TO_B5_MASK = 0xFFFF,
+ OCRDMA_QP_PARAMS_VLAN_SHIFT = 16,
+ OCRDMA_QP_PARAMS_VLAN_MASK = 0xFFFF <<
+ OCRDMA_QP_PARAMS_VLAN_SHIFT
+};
+
+struct ocrdma_qp_params {
+ u32 id;
+ u32 max_wqe_rqe;
+ u32 max_sge_send_write;
+ u32 max_sge_recv_flags;
+ u32 max_ord_ird;
+ u32 wq_rq_cqid;
+ u32 hop_lmt_rq_psn;
+ u32 tclass_sq_psn;
+ u32 ack_to_rnr_rtc_dest_qpn;
+ u32 path_mtu_pkey_indx;
+ u32 rnt_rc_sl_fl;
+ u8 sgid[16];
+ u8 dgid[16];
+ u32 dmac_b0_to_b3;
+ u32 vlan_dmac_b4_to_b5;
+ u32 qkey;
+};
+
+
+struct ocrdma_modify_qp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+
+ struct ocrdma_qp_params params;
+ u32 flags;
+ u32 rdma_flags;
+ u32 num_outstanding_atomic_rd;
+};
+
+enum {
+ OCRDMA_MODIFY_QP_RSP_MAX_RQE_SHIFT = 0,
+ OCRDMA_MODIFY_QP_RSP_MAX_RQE_MASK = 0xFFFF,
+ OCRDMA_MODIFY_QP_RSP_MAX_WQE_SHIFT = 16,
+ OCRDMA_MODIFY_QP_RSP_MAX_WQE_MASK = 0xFFFF <<
+ OCRDMA_MODIFY_QP_RSP_MAX_WQE_SHIFT,
+
+ OCRDMA_MODIFY_QP_RSP_MAX_IRD_SHIFT = 0,
+ OCRDMA_MODIFY_QP_RSP_MAX_IRD_MASK = 0xFFFF,
+ OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT = 16,
+ OCRDMA_MODIFY_QP_RSP_MAX_ORD_MASK = 0xFFFF <<
+ OCRDMA_MODIFY_QP_RSP_MAX_ORD_SHIFT
+};
+
+struct ocrdma_modify_qp_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u32 max_wqe_rqe;
+ u32 max_ord_ird;
+};
+
+struct ocrdma_query_qp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+
+#define OCRDMA_QUERY_UP_QP_ID_SHIFT 0
+#define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF
+ u32 qp_id;
+};
+
+struct ocrdma_query_qp_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+ struct ocrdma_qp_params params;
+ u32 dpp_credits_cqid;
+ u32 rbq_id;
+};
+
+enum {
+ OCRDMA_CREATE_SRQ_PD_ID_SHIFT = 0,
+ OCRDMA_CREATE_SRQ_PD_ID_MASK = 0xFFFF,
+ OCRDMA_CREATE_SRQ_PG_SZ_SHIFT = 16,
+ OCRDMA_CREATE_SRQ_PG_SZ_MASK = 0x3 <<
+ OCRDMA_CREATE_SRQ_PG_SZ_SHIFT,
+
+ OCRDMA_CREATE_SRQ_MAX_RQE_SHIFT = 0,
+ OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT = 16,
+ OCRDMA_CREATE_SRQ_MAX_SGE_RECV_MASK = 0xFFFF <<
+ OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT,
+
+ OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT = 0,
+ OCRDMA_CREATE_SRQ_RQE_SIZE_MASK = 0xFFFF,
+ OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT = 16,
+ OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_MASK = 0xFFFF <<
+ OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT
+};
+
+struct ocrdma_create_srq {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+
+ u32 pgsz_pdid;
+ u32 max_sge_rqe;
+ u32 pages_rqe_sz;
+ struct ocrdma_pa rq_addr[MAX_OCRDMA_SRQ_PAGES];
+};
+
+enum {
+ OCRDMA_CREATE_SRQ_RSP_SRQ_ID_SHIFT = 0,
+ OCRDMA_CREATE_SRQ_RSP_SRQ_ID_MASK = 0xFFFFFF,
+
+ OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT = 0,
+ OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK = 0xFFFF,
+ OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT = 16,
+ OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK = 0xFFFF <<
+ OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT
+};
+
+struct ocrdma_create_srq_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u32 id;
+ u32 max_sge_rqe_allocated;
+};
+
+enum {
+ OCRDMA_MODIFY_SRQ_ID_SHIFT = 0,
+ OCRDMA_MODIFY_SRQ_ID_MASK = 0xFFFFFF,
+
+ OCRDMA_MODIFY_SRQ_MAX_RQE_SHIFT = 0,
+ OCRDMA_MODIFY_SRQ_MAX_RQE_MASK = 0xFFFF,
+ OCRDMA_MODIFY_SRQ_LIMIT_SHIFT = 16,
+ OCRDMA_MODIFY_SRQ__LIMIT_MASK = 0xFFFF <<
+ OCRDMA_MODIFY_SRQ_LIMIT_SHIFT
+};
+
+struct ocrdma_modify_srq {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rep;
+
+ u32 id;
+ u32 limit_max_rqe;
+};
+
+enum {
+ OCRDMA_QUERY_SRQ_ID_SHIFT = 0,
+ OCRDMA_QUERY_SRQ_ID_MASK = 0xFFFFFF
+};
+
+struct ocrdma_query_srq {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp req;
+
+ u32 id;
+};
+
+enum {
+ OCRDMA_QUERY_SRQ_RSP_PD_ID_SHIFT = 0,
+ OCRDMA_QUERY_SRQ_RSP_PD_ID_MASK = 0xFFFF,
+ OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT = 16,
+ OCRDMA_QUERY_SRQ_RSP_MAX_RQE_MASK = 0xFFFF <<
+ OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT,
+
+ OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_SHIFT = 0,
+ OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK = 0xFFFF,
+ OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT = 16,
+ OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_MASK = 0xFFFF <<
+ OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT
+};
+
+struct ocrdma_query_srq_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp req;
+
+ u32 max_rqe_pdid;
+ u32 srq_lmt_max_sge;
+};
+
+enum {
+ OCRDMA_DESTROY_SRQ_ID_SHIFT = 0,
+ OCRDMA_DESTROY_SRQ_ID_MASK = 0xFFFFFF
+};
+
+struct ocrdma_destroy_srq {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp req;
+
+ u32 id;
+};
+
+enum {
+ OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16),
+ OCRDMA_DPP_PAGE_SIZE = 4096
+};
+
+struct ocrdma_alloc_pd {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+ u32 enable_dpp_rsvd;
+};
+
+enum {
+ OCRDMA_ALLOC_PD_RSP_DPP = BIT(16),
+ OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT = 20,
+ OCRDMA_ALLOC_PD_RSP_PDID_MASK = 0xFFFF,
+};
+
+struct ocrdma_alloc_pd_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+ u32 dpp_page_pdid;
+};
+
+struct ocrdma_dealloc_pd {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+ u32 id;
+};
+
+struct ocrdma_dealloc_pd_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+};
+
+struct ocrdma_alloc_pd_range {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+ u32 enable_dpp_rsvd;
+ u32 pd_count;
+};
+
+struct ocrdma_alloc_pd_range_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+ u32 dpp_page_pdid;
+ u32 pd_count;
+};
+
+enum {
+ OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF,
+};
+
+struct ocrdma_dealloc_pd_range {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+ u32 start_pd_id;
+ u32 pd_count;
+};
+
+struct ocrdma_dealloc_pd_range_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+ u32 rsvd;
+};
+
+enum {
+ OCRDMA_ADDR_CHECK_ENABLE = 1,
+ OCRDMA_ADDR_CHECK_DISABLE = 0
+};
+
+enum {
+ OCRDMA_ALLOC_LKEY_PD_ID_SHIFT = 0,
+ OCRDMA_ALLOC_LKEY_PD_ID_MASK = 0xFFFF,
+
+ OCRDMA_ALLOC_LKEY_ADDR_CHECK_SHIFT = 0,
+ OCRDMA_ALLOC_LKEY_ADDR_CHECK_MASK = BIT(0),
+ OCRDMA_ALLOC_LKEY_FMR_SHIFT = 1,
+ OCRDMA_ALLOC_LKEY_FMR_MASK = BIT(1),
+ OCRDMA_ALLOC_LKEY_REMOTE_INV_SHIFT = 2,
+ OCRDMA_ALLOC_LKEY_REMOTE_INV_MASK = BIT(2),
+ OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT = 3,
+ OCRDMA_ALLOC_LKEY_REMOTE_WR_MASK = BIT(3),
+ OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT = 4,
+ OCRDMA_ALLOC_LKEY_REMOTE_RD_MASK = BIT(4),
+ OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT = 5,
+ OCRDMA_ALLOC_LKEY_LOCAL_WR_MASK = BIT(5),
+ OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_MASK = BIT(6),
+ OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT = 6,
+ OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT = 16,
+ OCRDMA_ALLOC_LKEY_PBL_SIZE_MASK = 0xFFFF <<
+ OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT
+};
+
+struct ocrdma_alloc_lkey {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+
+ u32 pdid;
+ u32 pbl_sz_flags;
+};
+
+struct ocrdma_alloc_lkey_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u32 lrkey;
+ u32 num_pbl_rsvd;
+};
+
+struct ocrdma_dealloc_lkey {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+
+ u32 lkey;
+ u32 rsvd_frmr;
+};
+
+struct ocrdma_dealloc_lkey_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+};
+
+#define MAX_OCRDMA_NSMR_PBL (u32)22
+#define MAX_OCRDMA_PBL_SIZE 65536
+#define MAX_OCRDMA_PBL_PER_LKEY 32767
+
+enum {
+ OCRDMA_REG_NSMR_LRKEY_INDEX_SHIFT = 0,
+ OCRDMA_REG_NSMR_LRKEY_INDEX_MASK = 0xFFFFFF,
+ OCRDMA_REG_NSMR_LRKEY_SHIFT = 24,
+ OCRDMA_REG_NSMR_LRKEY_MASK = 0xFF <<
+ OCRDMA_REG_NSMR_LRKEY_SHIFT,
+
+ OCRDMA_REG_NSMR_PD_ID_SHIFT = 0,
+ OCRDMA_REG_NSMR_PD_ID_MASK = 0xFFFF,
+ OCRDMA_REG_NSMR_NUM_PBL_SHIFT = 16,
+ OCRDMA_REG_NSMR_NUM_PBL_MASK = 0xFFFF <<
+ OCRDMA_REG_NSMR_NUM_PBL_SHIFT,
+
+ OCRDMA_REG_NSMR_PBE_SIZE_SHIFT = 0,
+ OCRDMA_REG_NSMR_PBE_SIZE_MASK = 0xFFFF,
+ OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT = 16,
+ OCRDMA_REG_NSMR_HPAGE_SIZE_MASK = 0xFF <<
+ OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT,
+ OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT = 24,
+ OCRDMA_REG_NSMR_BIND_MEMWIN_MASK = BIT(24),
+ OCRDMA_REG_NSMR_ZB_SHIFT = 25,
+ OCRDMA_REG_NSMR_ZB_SHIFT_MASK = BIT(25),
+ OCRDMA_REG_NSMR_REMOTE_INV_SHIFT = 26,
+ OCRDMA_REG_NSMR_REMOTE_INV_MASK = BIT(26),
+ OCRDMA_REG_NSMR_REMOTE_WR_SHIFT = 27,
+ OCRDMA_REG_NSMR_REMOTE_WR_MASK = BIT(27),
+ OCRDMA_REG_NSMR_REMOTE_RD_SHIFT = 28,
+ OCRDMA_REG_NSMR_REMOTE_RD_MASK = BIT(28),
+ OCRDMA_REG_NSMR_LOCAL_WR_SHIFT = 29,
+ OCRDMA_REG_NSMR_LOCAL_WR_MASK = BIT(29),
+ OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT = 30,
+ OCRDMA_REG_NSMR_REMOTE_ATOMIC_MASK = BIT(30),
+ OCRDMA_REG_NSMR_LAST_SHIFT = 31,
+ OCRDMA_REG_NSMR_LAST_MASK = BIT(31)
+};
+
+struct ocrdma_reg_nsmr {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr cmd;
+
+ u32 fr_mr;
+ u32 num_pbl_pdid;
+ u32 flags_hpage_pbe_sz;
+ u32 totlen_low;
+ u32 totlen_high;
+ u32 fbo_low;
+ u32 fbo_high;
+ u32 va_loaddr;
+ u32 va_hiaddr;
+ struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
+};
+
+enum {
+ OCRDMA_REG_NSMR_CONT_PBL_SHIFT = 0,
+ OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK = 0xFFFF,
+ OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT = 16,
+ OCRDMA_REG_NSMR_CONT_NUM_PBL_MASK = 0xFFFF <<
+ OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT,
+
+ OCRDMA_REG_NSMR_CONT_LAST_SHIFT = 31,
+ OCRDMA_REG_NSMR_CONT_LAST_MASK = BIT(31)
+};
+
+struct ocrdma_reg_nsmr_cont {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr cmd;
+
+ u32 lrkey;
+ u32 num_pbl_offset;
+ u32 last;
+
+ struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
+};
+
+struct ocrdma_pbe {
+ u32 pa_hi;
+ u32 pa_lo;
+};
+
+enum {
+ OCRDMA_REG_NSMR_RSP_NUM_PBL_SHIFT = 16,
+ OCRDMA_REG_NSMR_RSP_NUM_PBL_MASK = 0xFFFF0000
+};
+struct ocrdma_reg_nsmr_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u32 lrkey;
+ u32 num_pbl;
+};
+
+enum {
+ OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_SHIFT = 0,
+ OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_MASK = 0xFFFFFF,
+ OCRDMA_REG_NSMR_CONT_RSP_LRKEY_SHIFT = 24,
+ OCRDMA_REG_NSMR_CONT_RSP_LRKEY_MASK = 0xFF <<
+ OCRDMA_REG_NSMR_CONT_RSP_LRKEY_SHIFT,
+
+ OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_SHIFT = 16,
+ OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_MASK = 0xFFFF <<
+ OCRDMA_REG_NSMR_CONT_RSP_NUM_PBL_SHIFT
+};
+
+struct ocrdma_reg_nsmr_cont_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u32 lrkey_key_index;
+ u32 num_pbl;
+};
+
+enum {
+ OCRDMA_ALLOC_MW_PD_ID_SHIFT = 0,
+ OCRDMA_ALLOC_MW_PD_ID_MASK = 0xFFFF
+};
+
+struct ocrdma_alloc_mw {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+
+ u32 pdid;
+};
+
+enum {
+ OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_SHIFT = 0,
+ OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_MASK = 0xFFFFFF
+};
+
+struct ocrdma_alloc_mw_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u32 lrkey_index;
+};
+
+struct ocrdma_attach_mcast {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+ u32 qp_id;
+ u8 mgid[16];
+ u32 mac_b0_to_b3;
+ u32 vlan_mac_b4_to_b5;
+};
+
+struct ocrdma_attach_mcast_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+};
+
+struct ocrdma_detach_mcast {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+ u32 qp_id;
+ u8 mgid[16];
+ u32 mac_b0_to_b3;
+ u32 vlan_mac_b4_to_b5;
+};
+
+struct ocrdma_detach_mcast_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+};
+
+enum {
+ OCRDMA_CREATE_AH_NUM_PAGES_SHIFT = 19,
+ OCRDMA_CREATE_AH_NUM_PAGES_MASK = 0xF <<
+ OCRDMA_CREATE_AH_NUM_PAGES_SHIFT,
+
+ OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT = 16,
+ OCRDMA_CREATE_AH_PAGE_SIZE_MASK = 0x7 <<
+ OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT,
+
+ OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT = 23,
+ OCRDMA_CREATE_AH_ENTRY_SIZE_MASK = 0x1FF <<
+ OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT,
+};
+
+#define OCRDMA_AH_TBL_PAGES 8
+
+struct ocrdma_create_ah_tbl {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+
+ u32 ah_conf;
+ struct ocrdma_pa tbl_addr[8];
+};
+
+struct ocrdma_create_ah_tbl_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+ u32 ahid;
+};
+
+struct ocrdma_delete_ah_tbl {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_hdr req;
+ u32 ahid;
+};
+
+struct ocrdma_delete_ah_tbl_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+};
+
+enum {
+ OCRDMA_EQE_VALID_SHIFT = 0,
+ OCRDMA_EQE_VALID_MASK = BIT(0),
+ OCRDMA_EQE_MAJOR_CODE_MASK = 0x0E,
+ OCRDMA_EQE_MAJOR_CODE_SHIFT = 0x01,
+ OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE,
+ OCRDMA_EQE_RESOURCE_ID_SHIFT = 16,
+ OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF <<
+ OCRDMA_EQE_RESOURCE_ID_SHIFT,
+};
+
+enum major_code {
+ OCRDMA_MAJOR_CODE_COMPLETION = 0x00,
+ OCRDMA_MAJOR_CODE_SENTINAL = 0x01
+};
+
+struct ocrdma_eqe {
+ u32 id_valid;
+};
+
+enum OCRDMA_CQE_STATUS {
+ OCRDMA_CQE_SUCCESS = 0,
+ OCRDMA_CQE_LOC_LEN_ERR,
+ OCRDMA_CQE_LOC_QP_OP_ERR,
+ OCRDMA_CQE_LOC_EEC_OP_ERR,
+ OCRDMA_CQE_LOC_PROT_ERR,
+ OCRDMA_CQE_WR_FLUSH_ERR,
+ OCRDMA_CQE_MW_BIND_ERR,
+ OCRDMA_CQE_BAD_RESP_ERR,
+ OCRDMA_CQE_LOC_ACCESS_ERR,
+ OCRDMA_CQE_REM_INV_REQ_ERR,
+ OCRDMA_CQE_REM_ACCESS_ERR,
+ OCRDMA_CQE_REM_OP_ERR,
+ OCRDMA_CQE_RETRY_EXC_ERR,
+ OCRDMA_CQE_RNR_RETRY_EXC_ERR,
+ OCRDMA_CQE_LOC_RDD_VIOL_ERR,
+ OCRDMA_CQE_REM_INV_RD_REQ_ERR,
+ OCRDMA_CQE_REM_ABORT_ERR,
+ OCRDMA_CQE_INV_EECN_ERR,
+ OCRDMA_CQE_INV_EEC_STATE_ERR,
+ OCRDMA_CQE_FATAL_ERR,
+ OCRDMA_CQE_RESP_TIMEOUT_ERR,
+ OCRDMA_CQE_GENERAL_ERR,
+
+ OCRDMA_MAX_CQE_ERR
+};
+
+enum {
+ /* w0 */
+ OCRDMA_CQE_WQEIDX_SHIFT = 0,
+ OCRDMA_CQE_WQEIDX_MASK = 0xFFFF,
+
+ /* w1 */
+ OCRDMA_CQE_UD_XFER_LEN_SHIFT = 16,
+ OCRDMA_CQE_UD_XFER_LEN_MASK = 0x1FFF,
+ OCRDMA_CQE_PKEY_SHIFT = 0,
+ OCRDMA_CQE_PKEY_MASK = 0xFFFF,
+ OCRDMA_CQE_UD_L3TYPE_SHIFT = 29,
+ OCRDMA_CQE_UD_L3TYPE_MASK = 0x07,
+
+ /* w2 */
+ OCRDMA_CQE_QPN_SHIFT = 0,
+ OCRDMA_CQE_QPN_MASK = 0x0000FFFF,
+
+ OCRDMA_CQE_BUFTAG_SHIFT = 16,
+ OCRDMA_CQE_BUFTAG_MASK = 0xFFFF << OCRDMA_CQE_BUFTAG_SHIFT,
+
+ /* w3 */
+ OCRDMA_CQE_UD_STATUS_SHIFT = 24,
+ OCRDMA_CQE_UD_STATUS_MASK = 0x7 << OCRDMA_CQE_UD_STATUS_SHIFT,
+ OCRDMA_CQE_STATUS_SHIFT = 16,
+ OCRDMA_CQE_STATUS_MASK = 0xFF << OCRDMA_CQE_STATUS_SHIFT,
+ OCRDMA_CQE_VALID = BIT(31),
+ OCRDMA_CQE_INVALIDATE = BIT(30),
+ OCRDMA_CQE_QTYPE = BIT(29),
+ OCRDMA_CQE_IMM = BIT(28),
+ OCRDMA_CQE_WRITE_IMM = BIT(27),
+ OCRDMA_CQE_QTYPE_SQ = 0,
+ OCRDMA_CQE_QTYPE_RQ = 1,
+ OCRDMA_CQE_SRCQP_MASK = 0xFFFFFF
+};
+
+struct ocrdma_cqe {
+ union {
+ /* w0 to w2 */
+ struct {
+ u32 wqeidx;
+ u32 bytes_xfered;
+ u32 qpn;
+ } wq;
+ struct {
+ u32 lkey_immdt;
+ u32 rxlen;
+ u32 buftag_qpn;
+ } rq;
+ struct {
+ u32 lkey_immdt;
+ u32 rxlen_pkey;
+ u32 buftag_qpn;
+ } ud;
+ struct {
+ u32 word_0;
+ u32 word_1;
+ u32 qpn;
+ } cmn;
+ };
+ u32 flags_status_srcqpn; /* w3 */
+};
+
+struct ocrdma_sge {
+ u32 addr_hi;
+ u32 addr_lo;
+ u32 lrkey;
+ u32 len;
+};
+
+enum {
+ OCRDMA_FLAG_SIG = 0x1,
+ OCRDMA_FLAG_INV = 0x2,
+ OCRDMA_FLAG_FENCE_L = 0x4,
+ OCRDMA_FLAG_FENCE_R = 0x8,
+ OCRDMA_FLAG_SOLICIT = 0x10,
+ OCRDMA_FLAG_IMM = 0x20,
+ OCRDMA_FLAG_AH_VLAN_PR = 0x40,
+
+ /* Stag flags */
+ OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1,
+ OCRDMA_LKEY_FLAG_REMOTE_RD = 0x2,
+ OCRDMA_LKEY_FLAG_REMOTE_WR = 0x4,
+ OCRDMA_LKEY_FLAG_VATO = 0x8,
+};
+
+enum OCRDMA_WQE_OPCODE {
+ OCRDMA_WRITE = 0x06,
+ OCRDMA_READ = 0x0C,
+ OCRDMA_RESV0 = 0x02,
+ OCRDMA_SEND = 0x00,
+ OCRDMA_CMP_SWP = 0x14,
+ OCRDMA_BIND_MW = 0x10,
+ OCRDMA_FR_MR = 0x11,
+ OCRDMA_RESV1 = 0x0A,
+ OCRDMA_LKEY_INV = 0x15,
+ OCRDMA_FETCH_ADD = 0x13,
+ OCRDMA_POST_RQ = 0x12
+};
+
+enum {
+ OCRDMA_TYPE_INLINE = 0x0,
+ OCRDMA_TYPE_LKEY = 0x1,
+};
+
+enum {
+ OCRDMA_WQE_OPCODE_SHIFT = 0,
+ OCRDMA_WQE_OPCODE_MASK = 0x0000001F,
+ OCRDMA_WQE_FLAGS_SHIFT = 5,
+ OCRDMA_WQE_TYPE_SHIFT = 16,
+ OCRDMA_WQE_TYPE_MASK = 0x00030000,
+ OCRDMA_WQE_SIZE_SHIFT = 18,
+ OCRDMA_WQE_SIZE_MASK = 0xFF,
+ OCRDMA_WQE_NXT_WQE_SIZE_SHIFT = 25,
+
+ OCRDMA_WQE_LKEY_FLAGS_SHIFT = 0,
+ OCRDMA_WQE_LKEY_FLAGS_MASK = 0xF
+};
+
+/* header WQE for all the SQ and RQ operations */
+struct ocrdma_hdr_wqe {
+ u32 cw;
+ union {
+ u32 rsvd_tag;
+ u32 rsvd_lkey_flags;
+ };
+ union {
+ u32 immdt;
+ u32 lkey;
+ };
+ u32 total_len;
+};
+
+struct ocrdma_ewqe_ud_hdr {
+ u32 rsvd_dest_qpn;
+ u32 qkey;
+ u32 rsvd_ahid;
+ u32 hdr_type;
+};
+
+/* extended wqe followed by hdr_wqe for Fast Memory register */
+struct ocrdma_ewqe_fr {
+ u32 va_hi;
+ u32 va_lo;
+ u32 fbo_hi;
+ u32 fbo_lo;
+ u32 size_sge;
+ u32 num_sges;
+ u32 rsvd;
+ u32 rsvd2;
+};
+
+struct ocrdma_eth_basic {
+ u8 dmac[6];
+ u8 smac[6];
+ __be16 eth_type;
+} __packed;
+
+struct ocrdma_eth_vlan {
+ u8 dmac[6];
+ u8 smac[6];
+ __be16 eth_type;
+ __be16 vlan_tag;
+ __be16 roce_eth_type;
+} __packed;
+
+struct ocrdma_grh {
+ __be32 tclass_flow;
+ __be32 pdid_hoplimit;
+ u8 sgid[16];
+ u8 dgid[16];
+ u16 rsvd;
+} __packed;
+
+#define OCRDMA_AV_VALID BIT(7)
+#define OCRDMA_AV_VLAN_VALID BIT(1)
+
+struct ocrdma_av {
+ struct ocrdma_eth_vlan eth_hdr;
+ struct ocrdma_grh grh;
+ u32 valid;
+} __packed;
+
+struct ocrdma_rsrc_stats {
+ u32 dpp_pds;
+ u32 non_dpp_pds;
+ u32 rc_dpp_qps;
+ u32 uc_dpp_qps;
+ u32 ud_dpp_qps;
+ u32 rc_non_dpp_qps;
+ u32 rsvd;
+ u32 uc_non_dpp_qps;
+ u32 ud_non_dpp_qps;
+ u32 rsvd1;
+ u32 srqs;
+ u32 rbqs;
+ u32 r64K_nsmr;
+ u32 r64K_to_2M_nsmr;
+ u32 r2M_to_44M_nsmr;
+ u32 r44M_to_1G_nsmr;
+ u32 r1G_to_4G_nsmr;
+ u32 nsmr_count_4G_to_32G;
+ u32 r32G_to_64G_nsmr;
+ u32 r64G_to_128G_nsmr;
+ u32 r128G_to_higher_nsmr;
+ u32 embedded_nsmr;
+ u32 frmr;
+ u32 prefetch_qps;
+ u32 ondemand_qps;
+ u32 phy_mr;
+ u32 mw;
+ u32 rsvd2[7];
+};
+
+struct ocrdma_db_err_stats {
+ u32 sq_doorbell_errors;
+ u32 cq_doorbell_errors;
+ u32 rq_srq_doorbell_errors;
+ u32 cq_overflow_errors;
+ u32 rsvd[4];
+};
+
+struct ocrdma_wqe_stats {
+ u32 large_send_rc_wqes_lo;
+ u32 large_send_rc_wqes_hi;
+ u32 large_write_rc_wqes_lo;
+ u32 large_write_rc_wqes_hi;
+ u32 rsvd[4];
+ u32 read_wqes_lo;
+ u32 read_wqes_hi;
+ u32 frmr_wqes_lo;
+ u32 frmr_wqes_hi;
+ u32 mw_bind_wqes_lo;
+ u32 mw_bind_wqes_hi;
+ u32 invalidate_wqes_lo;
+ u32 invalidate_wqes_hi;
+ u32 rsvd1[2];
+ u32 dpp_wqe_drops;
+ u32 rsvd2[5];
+};
+
+struct ocrdma_tx_stats {
+ u32 send_pkts_lo;
+ u32 send_pkts_hi;
+ u32 write_pkts_lo;
+ u32 write_pkts_hi;
+ u32 read_pkts_lo;
+ u32 read_pkts_hi;
+ u32 read_rsp_pkts_lo;
+ u32 read_rsp_pkts_hi;
+ u32 ack_pkts_lo;
+ u32 ack_pkts_hi;
+ u32 send_bytes_lo;
+ u32 send_bytes_hi;
+ u32 write_bytes_lo;
+ u32 write_bytes_hi;
+ u32 read_req_bytes_lo;
+ u32 read_req_bytes_hi;
+ u32 read_rsp_bytes_lo;
+ u32 read_rsp_bytes_hi;
+ u32 ack_timeouts;
+ u32 rsvd[5];
+};
+
+
+struct ocrdma_tx_qp_err_stats {
+ u32 local_length_errors;
+ u32 local_protection_errors;
+ u32 local_qp_operation_errors;
+ u32 retry_count_exceeded_errors;
+ u32 rnr_retry_count_exceeded_errors;
+ u32 rsvd[3];
+};
+
+struct ocrdma_rx_stats {
+ u32 roce_frame_bytes_lo;
+ u32 roce_frame_bytes_hi;
+ u32 roce_frame_icrc_drops;
+ u32 roce_frame_payload_len_drops;
+ u32 ud_drops;
+ u32 qp1_drops;
+ u32 psn_error_request_packets;
+ u32 psn_error_resp_packets;
+ u32 rnr_nak_timeouts;
+ u32 rnr_nak_receives;
+ u32 roce_frame_rxmt_drops;
+ u32 nak_count_psn_sequence_errors;
+ u32 rc_drop_count_lookup_errors;
+ u32 rq_rnr_naks;
+ u32 srq_rnr_naks;
+ u32 roce_frames_lo;
+ u32 roce_frames_hi;
+ u32 rsvd;
+};
+
+struct ocrdma_rx_qp_err_stats {
+ u32 nak_invalid_request_errors;
+ u32 nak_remote_operation_errors;
+ u32 nak_count_remote_access_errors;
+ u32 local_length_errors;
+ u32 local_protection_errors;
+ u32 local_qp_operation_errors;
+ u32 rsvd[2];
+};
+
+struct ocrdma_tx_dbg_stats {
+ u32 data[100];
+};
+
+struct ocrdma_rx_dbg_stats {
+ u32 data[200];
+};
+
+struct ocrdma_rdma_stats_req {
+ struct ocrdma_mbx_hdr hdr;
+ u8 reset_stats;
+ u8 rsvd[3];
+} __packed;
+
+struct ocrdma_rdma_stats_resp {
+ struct ocrdma_mbx_hdr hdr;
+ struct ocrdma_rsrc_stats act_rsrc_stats;
+ struct ocrdma_rsrc_stats th_rsrc_stats;
+ struct ocrdma_db_err_stats db_err_stats;
+ struct ocrdma_wqe_stats wqe_stats;
+ struct ocrdma_tx_stats tx_stats;
+ struct ocrdma_tx_qp_err_stats tx_qp_err_stats;
+ struct ocrdma_rx_stats rx_stats;
+ struct ocrdma_rx_qp_err_stats rx_qp_err_stats;
+ struct ocrdma_tx_dbg_stats tx_dbg_stats;
+ struct ocrdma_rx_dbg_stats rx_dbg_stats;
+} __packed;
+
+enum {
+ OCRDMA_HBA_ATTRB_EPROM_VER_LO_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_EPROM_VER_HI_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_EPROM_VER_HI_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_CDBLEN_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_ASIC_REV_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_ASIC_REV_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_GUID0_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_GUID0_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_GUID13_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_GUID14_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_GUID14_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_GUID15_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_GUID15_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PCNT_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_PCNT_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_LDTOUT_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_ISCSI_VER_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_ISCSI_VER_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_MFUNC_DEV_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_MFUNC_DEV_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_CV_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_HBA_ST_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_HBA_ST_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_MAX_DOMS_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_MAX_DOMS_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PTNUM_MASK = 0x3F000000,
+ OCRDMA_HBA_ATTRB_PTNUM_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_PT_MASK = 0xC0000000,
+ OCRDMA_HBA_ATTRB_PT_SHIFT = 0x1E,
+ OCRDMA_HBA_ATTRB_ISCSI_FET_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_ASIC_GEN_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_ASIC_GEN_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_PCI_VID_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_PCI_DID_MASK = 0xFFFF0000,
+ OCRDMA_HBA_ATTRB_PCI_DID_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PCI_SVID_MASK = 0xFFFF,
+ OCRDMA_HBA_ATTRB_PCI_SSID_MASK = 0xFFFF0000,
+ OCRDMA_HBA_ATTRB_PCI_SSID_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_PCI_BUSNUM_MASK = 0xFF,
+ OCRDMA_HBA_ATTRB_PCI_DEVNUM_MASK = 0xFF00,
+ OCRDMA_HBA_ATTRB_PCI_DEVNUM_SHIFT = 0x08,
+ OCRDMA_HBA_ATTRB_PCI_FUNCNUM_MASK = 0xFF0000,
+ OCRDMA_HBA_ATTRB_PCI_FUNCNUM_SHIFT = 0x10,
+ OCRDMA_HBA_ATTRB_IF_TYPE_MASK = 0xFF000000,
+ OCRDMA_HBA_ATTRB_IF_TYPE_SHIFT = 0x18,
+ OCRDMA_HBA_ATTRB_NETFIL_MASK =0xFF
+};
+
+struct mgmt_hba_attribs {
+ u8 flashrom_version_string[32];
+ u8 manufacturer_name[32];
+ u32 supported_modes;
+ u32 rsvd_eprom_verhi_verlo;
+ u32 mbx_ds_ver;
+ u32 epfw_ds_ver;
+ u8 ncsi_ver_string[12];
+ u32 default_extended_timeout;
+ u8 controller_model_number[32];
+ u8 controller_description[64];
+ u8 controller_serial_number[32];
+ u8 ip_version_string[32];
+ u8 firmware_version_string[32];
+ u8 bios_version_string[32];
+ u8 redboot_version_string[32];
+ u8 driver_version_string[32];
+ u8 fw_on_flash_version_string[32];
+ u32 functionalities_supported;
+ u32 guid0_asicrev_cdblen;
+ u8 generational_guid[12];
+ u32 portcnt_guid15;
+ u32 mfuncdev_iscsi_ldtout;
+ u32 ptpnum_maxdoms_hbast_cv;
+ u32 firmware_post_status;
+ u32 hba_mtu[8];
+ u32 res_asicgen_iscsi_feaures;
+ u32 rsvd1[3];
+};
+
+struct mgmt_controller_attrib {
+ struct mgmt_hba_attribs hba_attribs;
+ u32 pci_did_vid;
+ u32 pci_ssid_svid;
+ u32 ityp_fnum_devnum_bnum;
+ u32 uid_hi;
+ u32 uid_lo;
+ u32 res_nnetfil;
+ u32 rsvd0[4];
+};
+
+struct ocrdma_get_ctrl_attribs_rsp {
+ struct ocrdma_mbx_hdr hdr;
+ struct mgmt_controller_attrib ctrl_attribs;
+};
+
+#define OCRDMA_SUBSYS_DCBX 0x10
+
+enum OCRDMA_DCBX_OPCODE {
+ OCRDMA_CMD_GET_DCBX_CONFIG = 0x01
+};
+
+enum OCRDMA_DCBX_PARAM_TYPE {
+ OCRDMA_PARAMETER_TYPE_ADMIN = 0x00,
+ OCRDMA_PARAMETER_TYPE_OPER = 0x01,
+ OCRDMA_PARAMETER_TYPE_PEER = 0x02
+};
+
+enum OCRDMA_DCBX_PROTO {
+ OCRDMA_PROTO_SELECT_L2 = 0x00,
+ OCRDMA_PROTO_SELECT_L4 = 0x01
+};
+
+enum OCRDMA_DCBX_APP_PARAM {
+ OCRDMA_APP_PARAM_APP_PROTO_MASK = 0xFFFF,
+ OCRDMA_APP_PARAM_PROTO_SEL_MASK = 0xFF,
+ OCRDMA_APP_PARAM_PROTO_SEL_SHIFT = 0x10,
+ OCRDMA_APP_PARAM_VALID_MASK = 0xFF,
+ OCRDMA_APP_PARAM_VALID_SHIFT = 0x18
+};
+
+enum OCRDMA_DCBX_STATE_FLAGS {
+ OCRDMA_STATE_FLAG_ENABLED = 0x01,
+ OCRDMA_STATE_FLAG_ADDVERTISED = 0x02,
+ OCRDMA_STATE_FLAG_WILLING = 0x04,
+ OCRDMA_STATE_FLAG_SYNC = 0x08,
+ OCRDMA_STATE_FLAG_UNSUPPORTED = 0x40000000,
+ OCRDMA_STATE_FLAG_NEG_FAILD = 0x80000000
+};
+
+enum OCRDMA_TCV_AEV_OPV_ST {
+ OCRDMA_DCBX_TC_SUPPORT_MASK = 0xFF,
+ OCRDMA_DCBX_TC_SUPPORT_SHIFT = 0x18,
+ OCRDMA_DCBX_APP_ENTRY_SHIFT = 0x10,
+ OCRDMA_DCBX_OP_PARAM_SHIFT = 0x08,
+ OCRDMA_DCBX_STATE_MASK = 0xFF
+};
+
+struct ocrdma_app_parameter {
+ u32 valid_proto_app;
+ u32 oui;
+ u32 app_prio[2];
+};
+
+struct ocrdma_dcbx_cfg {
+ u32 tcv_aev_opv_st;
+ u32 tc_state;
+ u32 pfc_state;
+ u32 qcn_state;
+ u32 appl_state;
+ u32 ll_state;
+ u32 tc_bw[2];
+ u32 tc_prio[8];
+ u32 pfc_prio[2];
+ struct ocrdma_app_parameter app_param[15];
+};
+
+struct ocrdma_get_dcbx_cfg_req {
+ struct ocrdma_mbx_hdr hdr;
+ u32 param_type;
+} __packed;
+
+struct ocrdma_get_dcbx_cfg_rsp {
+ struct ocrdma_mbx_rsp hdr;
+ struct ocrdma_dcbx_cfg cfg;
+} __packed;
+
+#endif /* __OCRDMA_SLI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
new file mode 100644
index 000000000..5f831e3bd
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -0,0 +1,838 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <rdma/ib_addr.h>
+#include <rdma/ib_pma.h>
+#include "ocrdma_stats.h"
+
+static struct dentry *ocrdma_dbgfs_dir;
+
+static int ocrdma_add_stat(char *start, char *pcur,
+ char *name, u64 count)
+{
+ char buff[128] = {0};
+ int cpy_len = 0;
+
+ snprintf(buff, 128, "%s: %llu\n", name, count);
+ cpy_len = strlen(buff);
+
+ if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) {
+ pr_err("%s: No space in stats buff\n", __func__);
+ return 0;
+ }
+
+ memcpy(pcur, buff, cpy_len);
+ return cpy_len;
+}
+
+bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
+{
+ struct stats_mem *mem = &dev->stats_mem;
+
+ mutex_init(&dev->stats_lock);
+ /* Alloc mbox command mem*/
+ mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
+ sizeof(struct ocrdma_rdma_stats_resp));
+
+ mem->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (!mem->va) {
+ pr_err("%s: stats mbox allocation failed\n", __func__);
+ return false;
+ }
+
+ /* Alloc debugfs mem */
+ mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
+ if (!mem->debugfs_mem)
+ return false;
+
+ return true;
+}
+
+void ocrdma_release_stats_resources(struct ocrdma_dev *dev)
+{
+ struct stats_mem *mem = &dev->stats_mem;
+
+ if (mem->va)
+ dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
+ mem->va, mem->pa);
+ mem->va = NULL;
+ kfree(mem->debugfs_mem);
+}
+
+static char *ocrdma_resource_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds",
+ (u64)rsrc_stats->dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds",
+ (u64)rsrc_stats->non_dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps",
+ (u64)rsrc_stats->rc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps",
+ (u64)rsrc_stats->uc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps",
+ (u64)rsrc_stats->ud_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps",
+ (u64)rsrc_stats->rc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps",
+ (u64)rsrc_stats->uc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps",
+ (u64)rsrc_stats->ud_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_srqs",
+ (u64)rsrc_stats->srqs);
+ pcur += ocrdma_add_stat(stats, pcur, "active_rbqs",
+ (u64)rsrc_stats->rbqs);
+ pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr",
+ (u64)rsrc_stats->r64K_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr",
+ (u64)rsrc_stats->r64K_to_2M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr",
+ (u64)rsrc_stats->r2M_to_44M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr",
+ (u64)rsrc_stats->r44M_to_1G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr",
+ (u64)rsrc_stats->r1G_to_4G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G",
+ (u64)rsrc_stats->nsmr_count_4G_to_32G);
+ pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr",
+ (u64)rsrc_stats->r32G_to_64G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr",
+ (u64)rsrc_stats->r64G_to_128G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr",
+ (u64)rsrc_stats->r128G_to_higher_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr",
+ (u64)rsrc_stats->embedded_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_frmr",
+ (u64)rsrc_stats->frmr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps",
+ (u64)rsrc_stats->prefetch_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps",
+ (u64)rsrc_stats->ondemand_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr",
+ (u64)rsrc_stats->phy_mr);
+ pcur += ocrdma_add_stat(stats, pcur, "active_mw",
+ (u64)rsrc_stats->mw);
+
+ /* Print the threshold stats */
+ rsrc_stats = &rdma_stats->th_rsrc_stats;
+
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds",
+ (u64)rsrc_stats->dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds",
+ (u64)rsrc_stats->non_dpp_pds);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps",
+ (u64)rsrc_stats->rc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps",
+ (u64)rsrc_stats->uc_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps",
+ (u64)rsrc_stats->ud_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps",
+ (u64)rsrc_stats->rc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps",
+ (u64)rsrc_stats->uc_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps",
+ (u64)rsrc_stats->ud_non_dpp_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs",
+ (u64)rsrc_stats->srqs);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs",
+ (u64)rsrc_stats->rbqs);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr",
+ (u64)rsrc_stats->r64K_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr",
+ (u64)rsrc_stats->r64K_to_2M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr",
+ (u64)rsrc_stats->r2M_to_44M_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr",
+ (u64)rsrc_stats->r44M_to_1G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr",
+ (u64)rsrc_stats->r1G_to_4G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G",
+ (u64)rsrc_stats->nsmr_count_4G_to_32G);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr",
+ (u64)rsrc_stats->r32G_to_64G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr",
+ (u64)rsrc_stats->r64G_to_128G_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr",
+ (u64)rsrc_stats->r128G_to_higher_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr",
+ (u64)rsrc_stats->embedded_nsmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr",
+ (u64)rsrc_stats->frmr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps",
+ (u64)rsrc_stats->prefetch_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps",
+ (u64)rsrc_stats->ondemand_qps);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr",
+ (u64)rsrc_stats->phy_mr);
+ pcur += ocrdma_add_stat(stats, pcur, "threshold_mw",
+ (u64)rsrc_stats->mw);
+ return stats;
+}
+
+static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat
+ (stats, pcur, "roce_frame_bytes",
+ convert_to_64bit(rx_stats->roce_frame_bytes_lo,
+ rx_stats->roce_frame_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops",
+ (u64)rx_stats->roce_frame_icrc_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops",
+ (u64)rx_stats->roce_frame_payload_len_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "ud_drops",
+ (u64)rx_stats->ud_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "qp1_drops",
+ (u64)rx_stats->qp1_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets",
+ (u64)rx_stats->psn_error_request_packets);
+ pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets",
+ (u64)rx_stats->psn_error_resp_packets);
+ pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts",
+ (u64)rx_stats->rnr_nak_timeouts);
+ pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives",
+ (u64)rx_stats->rnr_nak_receives);
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops",
+ (u64)rx_stats->roce_frame_rxmt_drops);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors",
+ (u64)rx_stats->nak_count_psn_sequence_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors",
+ (u64)rx_stats->rc_drop_count_lookup_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks",
+ (u64)rx_stats->rq_rnr_naks);
+ pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks",
+ (u64)rx_stats->srq_rnr_naks);
+ pcur += ocrdma_add_stat(stats, pcur, "roce_frames",
+ convert_to_64bit(rx_stats->roce_frames_lo,
+ rx_stats->roce_frames_hi));
+
+ return stats;
+}
+
+static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev)
+{
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+ return convert_to_64bit(rx_stats->roce_frames_lo,
+ rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops
+ + (u64)rx_stats->roce_frame_payload_len_drops;
+}
+
+static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
+{
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+ return (convert_to_64bit(rx_stats->roce_frame_bytes_lo,
+ rx_stats->roce_frame_bytes_hi))/4;
+}
+
+static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "send_pkts",
+ convert_to_64bit(tx_stats->send_pkts_lo,
+ tx_stats->send_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "write_pkts",
+ convert_to_64bit(tx_stats->write_pkts_lo,
+ tx_stats->write_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_pkts",
+ convert_to_64bit(tx_stats->read_pkts_lo,
+ tx_stats->read_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts",
+ convert_to_64bit(tx_stats->read_rsp_pkts_lo,
+ tx_stats->read_rsp_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "ack_pkts",
+ convert_to_64bit(tx_stats->ack_pkts_lo,
+ tx_stats->ack_pkts_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "send_bytes",
+ convert_to_64bit(tx_stats->send_bytes_lo,
+ tx_stats->send_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "write_bytes",
+ convert_to_64bit(tx_stats->write_bytes_lo,
+ tx_stats->write_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes",
+ convert_to_64bit(tx_stats->read_req_bytes_lo,
+ tx_stats->read_req_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes",
+ convert_to_64bit(tx_stats->read_rsp_bytes_lo,
+ tx_stats->read_rsp_bytes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts",
+ (u64)tx_stats->ack_timeouts);
+
+ return stats;
+}
+
+static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev)
+{
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+ return (convert_to_64bit(tx_stats->send_pkts_lo,
+ tx_stats->send_pkts_hi) +
+ convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) +
+ convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) +
+ convert_to_64bit(tx_stats->read_rsp_pkts_lo,
+ tx_stats->read_rsp_pkts_hi) +
+ convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi));
+}
+
+static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
+{
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+ return (convert_to_64bit(tx_stats->send_bytes_lo,
+ tx_stats->send_bytes_hi) +
+ convert_to_64bit(tx_stats->write_bytes_lo,
+ tx_stats->write_bytes_hi) +
+ convert_to_64bit(tx_stats->read_req_bytes_lo,
+ tx_stats->read_req_bytes_hi) +
+ convert_to_64bit(tx_stats->read_rsp_bytes_lo,
+ tx_stats->read_rsp_bytes_hi))/4;
+}
+
+static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_wqe_stats *wqe_stats = &rdma_stats->wqe_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes",
+ convert_to_64bit(wqe_stats->large_send_rc_wqes_lo,
+ wqe_stats->large_send_rc_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes",
+ convert_to_64bit(wqe_stats->large_write_rc_wqes_lo,
+ wqe_stats->large_write_rc_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "read_wqes",
+ convert_to_64bit(wqe_stats->read_wqes_lo,
+ wqe_stats->read_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes",
+ convert_to_64bit(wqe_stats->frmr_wqes_lo,
+ wqe_stats->frmr_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes",
+ convert_to_64bit(wqe_stats->mw_bind_wqes_lo,
+ wqe_stats->mw_bind_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes",
+ convert_to_64bit(wqe_stats->invalidate_wqes_lo,
+ wqe_stats->invalidate_wqes_hi));
+ pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops",
+ (u64)wqe_stats->dpp_wqe_drops);
+ return stats;
+}
+
+static char *ocrdma_db_errstats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors",
+ (u64)db_err_stats->sq_doorbell_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors",
+ (u64)db_err_stats->cq_doorbell_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors",
+ (u64)db_err_stats->rq_srq_doorbell_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors",
+ (u64)db_err_stats->cq_overflow_errors);
+ return stats;
+}
+
+static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rx_qp_err_stats *rx_qp_err_stats =
+ &rdma_stats->rx_qp_err_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_request_errors",
+ (u64)rx_qp_err_stats->nak_invalid_request_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
+ (u64)rx_qp_err_stats->nak_remote_operation_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
+ (u64)rx_qp_err_stats->nak_count_remote_access_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
+ (u64)rx_qp_err_stats->local_length_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
+ (u64)rx_qp_err_stats->local_protection_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
+ (u64)rx_qp_err_stats->local_qp_operation_errors);
+ return stats;
+}
+
+static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_tx_qp_err_stats *tx_qp_err_stats =
+ &rdma_stats->tx_qp_err_stats;
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
+ (u64)tx_qp_err_stats->local_length_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
+ (u64)tx_qp_err_stats->local_protection_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
+ (u64)tx_qp_err_stats->local_qp_operation_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors",
+ (u64)tx_qp_err_stats->retry_count_exceeded_errors);
+ pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors",
+ (u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors);
+ return stats;
+}
+
+static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
+{
+ int i;
+ char *pstats = dev->stats_mem.debugfs_mem;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_tx_dbg_stats *tx_dbg_stats =
+ &rdma_stats->tx_dbg_stats;
+
+ memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ for (i = 0; i < 100; i++)
+ pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
+ tx_dbg_stats->data[i]);
+
+ return dev->stats_mem.debugfs_mem;
+}
+
+static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
+{
+ int i;
+ char *pstats = dev->stats_mem.debugfs_mem;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rx_dbg_stats *rx_dbg_stats =
+ &rdma_stats->rx_dbg_stats;
+
+ memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ for (i = 0; i < 200; i++)
+ pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
+ rx_dbg_stats->data[i]);
+
+ return dev->stats_mem.debugfs_mem;
+}
+
+static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
+{
+ char *stats = dev->stats_mem.debugfs_mem, *pcur;
+
+
+ memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+ pcur = stats;
+ pcur += ocrdma_add_stat(stats, pcur, "async_cq_err",
+ (u64)(dev->async_err_stats
+ [OCRDMA_CQ_ERROR].counter));
+ pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err",
+ (u64)dev->async_err_stats
+ [OCRDMA_CQ_OVERRUN_ERROR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err",
+ (u64)dev->async_err_stats
+ [OCRDMA_CQ_QPCAT_ERROR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err",
+ (u64)dev->async_err_stats
+ [OCRDMA_QP_ACCESS_ERROR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt",
+ (u64)dev->async_err_stats
+ [OCRDMA_QP_COMM_EST_EVENT].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt",
+ (u64)dev->async_err_stats
+ [OCRDMA_SQ_DRAINED_EVENT].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt",
+ (u64)dev->async_err_stats
+ [OCRDMA_DEVICE_FATAL_EVENT].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err",
+ (u64)dev->async_err_stats
+ [OCRDMA_SRQCAT_ERROR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt",
+ (u64)dev->async_err_stats
+ [OCRDMA_SRQ_LIMIT_EVENT].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt",
+ (u64)dev->async_err_stats
+ [OCRDMA_QP_LAST_WQE_EVENT].counter);
+
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_LOC_LEN_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_LOC_QP_OP_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_LOC_EEC_OP_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_LOC_PROT_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_WR_FLUSH_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_MW_BIND_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_BAD_RESP_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_LOC_ACCESS_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_REM_INV_REQ_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_REM_ACCESS_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_REM_OP_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_RETRY_EXC_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_REM_ABORT_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_INV_EECN_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_INV_EEC_STATE_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_FATAL_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter);
+ pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err",
+ (u64)dev->cqe_err_stats
+ [OCRDMA_CQE_GENERAL_ERR].counter);
+ return stats;
+}
+
+static void ocrdma_update_stats(struct ocrdma_dev *dev)
+{
+ ulong now = jiffies, secs;
+ int status;
+ struct ocrdma_rdma_stats_resp *rdma_stats =
+ (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+ struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
+
+ secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
+ if (secs) {
+ /* update */
+ status = ocrdma_mbx_rdma_stats(dev, false);
+ if (status)
+ pr_err("%s: stats mbox failed with status = %d\n",
+ __func__, status);
+ /* Update PD counters from PD resource manager */
+ if (dev->pd_mgr->pd_prealloc_valid) {
+ rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count;
+ rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count;
+ /* Threshold stata*/
+ rsrc_stats = &rdma_stats->th_rsrc_stats;
+ rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh;
+ rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh;
+ }
+ dev->last_stats_time = jiffies;
+ }
+}
+
+static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ char tmp_str[32];
+ long reset;
+ int status;
+ struct ocrdma_stats *pstats = filp->private_data;
+ struct ocrdma_dev *dev = pstats->dev;
+
+ if (*ppos != 0 || count == 0 || count > sizeof(tmp_str))
+ goto err;
+
+ if (copy_from_user(tmp_str, buffer, count))
+ goto err;
+
+ tmp_str[count-1] = '\0';
+ if (kstrtol(tmp_str, 10, &reset))
+ goto err;
+
+ switch (pstats->type) {
+ case OCRDMA_RESET_STATS:
+ if (reset) {
+ status = ocrdma_mbx_rdma_stats(dev, true);
+ if (status) {
+ pr_err("Failed to reset stats = %d\n", status);
+ goto err;
+ }
+ }
+ break;
+ default:
+ goto err;
+ }
+
+ return count;
+err:
+ return -EFAULT;
+}
+
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad)
+{
+ struct ib_pma_portcounters *pma_cnt;
+
+ pma_cnt = (void *)(out_mad->data + 40);
+ ocrdma_update_stats(dev);
+
+ pma_cnt->port_xmit_data = cpu_to_be32(ocrdma_sysfs_xmit_data(dev));
+ pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
+ pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
+ pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
+}
+
+static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
+ size_t usr_buf_len, loff_t *ppos)
+{
+ struct ocrdma_stats *pstats = filp->private_data;
+ struct ocrdma_dev *dev = pstats->dev;
+ ssize_t status = 0;
+ char *data = NULL;
+
+ /* No partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ mutex_lock(&dev->stats_lock);
+
+ ocrdma_update_stats(dev);
+
+ switch (pstats->type) {
+ case OCRDMA_RSRC_STATS:
+ data = ocrdma_resource_stats(dev);
+ break;
+ case OCRDMA_RXSTATS:
+ data = ocrdma_rx_stats(dev);
+ break;
+ case OCRDMA_WQESTATS:
+ data = ocrdma_wqe_stats(dev);
+ break;
+ case OCRDMA_TXSTATS:
+ data = ocrdma_tx_stats(dev);
+ break;
+ case OCRDMA_DB_ERRSTATS:
+ data = ocrdma_db_errstats(dev);
+ break;
+ case OCRDMA_RXQP_ERRSTATS:
+ data = ocrdma_rxqp_errstats(dev);
+ break;
+ case OCRDMA_TXQP_ERRSTATS:
+ data = ocrdma_txqp_errstats(dev);
+ break;
+ case OCRDMA_TX_DBG_STATS:
+ data = ocrdma_tx_dbg_stats(dev);
+ break;
+ case OCRDMA_RX_DBG_STATS:
+ data = ocrdma_rx_dbg_stats(dev);
+ break;
+ case OCRDMA_DRV_STATS:
+ data = ocrdma_driver_dbg_stats(dev);
+ break;
+
+ default:
+ status = -EFAULT;
+ goto exit;
+ }
+
+ if (usr_buf_len < strlen(data)) {
+ status = -ENOSPC;
+ goto exit;
+ }
+
+ status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data,
+ strlen(data));
+exit:
+ mutex_unlock(&dev->stats_lock);
+ return status;
+}
+
+static const struct file_operations ocrdma_dbg_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = ocrdma_dbgfs_ops_read,
+ .write = ocrdma_dbgfs_ops_write,
+};
+
+void ocrdma_add_port_stats(struct ocrdma_dev *dev)
+{
+ const struct pci_dev *pdev = dev->nic_info.pdev;
+
+ if (!ocrdma_dbgfs_dir)
+ return;
+
+ /* Create post stats base dir */
+ dev->dir = debugfs_create_dir(pci_name(pdev), ocrdma_dbgfs_dir);
+
+ dev->rsrc_stats.type = OCRDMA_RSRC_STATS;
+ dev->rsrc_stats.dev = dev;
+ debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
+ &dev->rsrc_stats, &ocrdma_dbg_ops);
+
+ dev->rx_stats.type = OCRDMA_RXSTATS;
+ dev->rx_stats.dev = dev;
+ debugfs_create_file("rx_stats", S_IRUSR, dev->dir, &dev->rx_stats,
+ &ocrdma_dbg_ops);
+
+ dev->wqe_stats.type = OCRDMA_WQESTATS;
+ dev->wqe_stats.dev = dev;
+ debugfs_create_file("wqe_stats", S_IRUSR, dev->dir, &dev->wqe_stats,
+ &ocrdma_dbg_ops);
+
+ dev->tx_stats.type = OCRDMA_TXSTATS;
+ dev->tx_stats.dev = dev;
+ debugfs_create_file("tx_stats", S_IRUSR, dev->dir, &dev->tx_stats,
+ &ocrdma_dbg_ops);
+
+ dev->db_err_stats.type = OCRDMA_DB_ERRSTATS;
+ dev->db_err_stats.dev = dev;
+ debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
+ &dev->db_err_stats, &ocrdma_dbg_ops);
+
+ dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS;
+ dev->tx_qp_err_stats.dev = dev;
+ debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
+ &dev->tx_qp_err_stats, &ocrdma_dbg_ops);
+
+ dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS;
+ dev->rx_qp_err_stats.dev = dev;
+ debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
+ &dev->rx_qp_err_stats, &ocrdma_dbg_ops);
+
+ dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS;
+ dev->tx_dbg_stats.dev = dev;
+ debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
+ &dev->tx_dbg_stats, &ocrdma_dbg_ops);
+
+ dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS;
+ dev->rx_dbg_stats.dev = dev;
+ debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
+ &dev->rx_dbg_stats, &ocrdma_dbg_ops);
+
+ dev->driver_stats.type = OCRDMA_DRV_STATS;
+ dev->driver_stats.dev = dev;
+ debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
+ &dev->driver_stats, &ocrdma_dbg_ops);
+
+ dev->reset_stats.type = OCRDMA_RESET_STATS;
+ dev->reset_stats.dev = dev;
+ debugfs_create_file("reset_stats", 0200, dev->dir, &dev->reset_stats,
+ &ocrdma_dbg_ops);
+}
+
+void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
+{
+ debugfs_remove_recursive(dev->dir);
+}
+
+void ocrdma_init_debugfs(void)
+{
+ /* Create base dir in debugfs root dir */
+ ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL);
+}
+
+void ocrdma_rem_debugfs(void)
+{
+ debugfs_remove_recursive(ocrdma_dbgfs_dir);
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
new file mode 100644
index 000000000..98feca26a
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -0,0 +1,74 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef __OCRDMA_STATS_H__
+#define __OCRDMA_STATS_H__
+
+#include <linux/debugfs.h>
+#include "ocrdma.h"
+#include "ocrdma_hw.h"
+
+#define OCRDMA_MAX_DBGFS_MEM 4096
+
+enum OCRDMA_STATS_TYPE {
+ OCRDMA_RSRC_STATS,
+ OCRDMA_RXSTATS,
+ OCRDMA_WQESTATS,
+ OCRDMA_TXSTATS,
+ OCRDMA_DB_ERRSTATS,
+ OCRDMA_RXQP_ERRSTATS,
+ OCRDMA_TXQP_ERRSTATS,
+ OCRDMA_TX_DBG_STATS,
+ OCRDMA_RX_DBG_STATS,
+ OCRDMA_DRV_STATS,
+ OCRDMA_RESET_STATS
+};
+
+void ocrdma_rem_debugfs(void);
+void ocrdma_init_debugfs(void);
+bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
+void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
+void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
+void ocrdma_add_port_stats(struct ocrdma_dev *dev);
+void ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad);
+
+#endif /* __OCRDMA_STATS_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
new file mode 100644
index 000000000..dd4021b11
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -0,0 +1,2970 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#include <linux/dma-mapping.h>
+#include <net/addrconf.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "ocrdma.h"
+#include "ocrdma_hw.h"
+#include "ocrdma_verbs.h"
+#include <rdma/ocrdma-abi.h>
+
+int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey)
+{
+ if (index > 0)
+ return -EINVAL;
+
+ *pkey = 0xffff;
+ return 0;
+}
+
+int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
+ struct ib_udata *uhw)
+{
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
+
+ if (uhw->inlen || uhw->outlen)
+ return -EINVAL;
+
+ memset(attr, 0, sizeof *attr);
+ memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
+ min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ dev->nic_info.mac_addr);
+ attr->max_mr_size = dev->attr.max_mr_size;
+ attr->page_size_cap = 0xffff000;
+ attr->vendor_id = dev->nic_info.pdev->vendor;
+ attr->vendor_part_id = dev->nic_info.pdev->device;
+ attr->hw_ver = dev->asic_id;
+ attr->max_qp = dev->attr.max_qp;
+ attr->max_ah = OCRDMA_MAX_AH;
+ attr->max_qp_wr = dev->attr.max_wqe;
+
+ attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
+ IB_DEVICE_RC_RNR_NAK_GEN |
+ IB_DEVICE_SHUTDOWN_PORT |
+ IB_DEVICE_SYS_IMAGE_GUID |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
+ attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ attr->max_send_sge = dev->attr.max_send_sge;
+ attr->max_recv_sge = dev->attr.max_recv_sge;
+ attr->max_sge_rd = dev->attr.max_rdma_sge;
+ attr->max_cq = dev->attr.max_cq;
+ attr->max_cqe = dev->attr.max_cqe;
+ attr->max_mr = dev->attr.max_mr;
+ attr->max_mw = dev->attr.max_mw;
+ attr->max_pd = dev->attr.max_pd;
+ attr->atomic_cap = 0;
+ attr->max_qp_rd_atom =
+ min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
+ attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
+ attr->max_srq = dev->attr.max_srq;
+ attr->max_srq_sge = dev->attr.max_srq_sge;
+ attr->max_srq_wr = dev->attr.max_rqe;
+ attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
+ attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
+ attr->max_pkeys = 1;
+ return 0;
+}
+
+static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
+ u16 *ib_speed, u8 *ib_width)
+{
+ int status;
+ u8 speed;
+
+ status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
+ if (status)
+ speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
+
+ switch (speed) {
+ case OCRDMA_PHYS_LINK_SPEED_1GBPS:
+ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_10GBPS:
+ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_20GBPS:
+ *ib_speed = IB_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_40GBPS:
+ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ }
+}
+
+int ocrdma_query_port(struct ib_device *ibdev,
+ u32 port, struct ib_port_attr *props)
+{
+ enum ib_port_state port_state;
+ struct ocrdma_dev *dev;
+ struct net_device *netdev;
+
+ /* props being zeroed by the caller, avoid zeroing it here */
+ dev = get_ocrdma_dev(ibdev);
+ netdev = dev->nic_info.netdev;
+ if (netif_running(netdev) && netif_oper_up(netdev)) {
+ port_state = IB_PORT_ACTIVE;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else {
+ port_state = IB_PORT_DOWN;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+ props->max_mtu = IB_MTU_4096;
+ props->active_mtu = iboe_get_mtu(netdev->mtu);
+ props->lid = 0;
+ props->lmc = 0;
+ props->sm_lid = 0;
+ props->sm_sl = 0;
+ props->state = port_state;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_DEVICE_MGMT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP;
+ props->ip_gids = true;
+ props->gid_tbl_len = OCRDMA_MAX_SGID;
+ props->pkey_tbl_len = 1;
+ props->bad_pkey_cntr = 0;
+ props->qkey_viol_cntr = 0;
+ get_link_speed_and_width(dev, &props->active_speed,
+ &props->active_width);
+ props->max_msg_sz = 0x80000000;
+ props->max_vl_num = 4;
+ return 0;
+}
+
+static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
+ unsigned long len)
+{
+ struct ocrdma_mm *mm;
+
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
+ if (mm == NULL)
+ return -ENOMEM;
+ mm->key.phy_addr = phy_addr;
+ mm->key.len = len;
+ INIT_LIST_HEAD(&mm->entry);
+
+ mutex_lock(&uctx->mm_list_lock);
+ list_add_tail(&mm->entry, &uctx->mm_head);
+ mutex_unlock(&uctx->mm_list_lock);
+ return 0;
+}
+
+static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
+ unsigned long len)
+{
+ struct ocrdma_mm *mm, *tmp;
+
+ mutex_lock(&uctx->mm_list_lock);
+ list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
+ if (len != mm->key.len && phy_addr != mm->key.phy_addr)
+ continue;
+
+ list_del(&mm->entry);
+ kfree(mm);
+ break;
+ }
+ mutex_unlock(&uctx->mm_list_lock);
+}
+
+static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
+ unsigned long len)
+{
+ bool found = false;
+ struct ocrdma_mm *mm;
+
+ mutex_lock(&uctx->mm_list_lock);
+ list_for_each_entry(mm, &uctx->mm_head, entry) {
+ if (len != mm->key.len && phy_addr != mm->key.phy_addr)
+ continue;
+
+ found = true;
+ break;
+ }
+ mutex_unlock(&uctx->mm_list_lock);
+ return found;
+}
+
+
+static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
+{
+ u16 pd_bitmap_idx = 0;
+ unsigned long *pd_bitmap;
+
+ if (dpp_pool) {
+ pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
+ pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
+ dev->pd_mgr->max_dpp_pd);
+ __set_bit(pd_bitmap_idx, pd_bitmap);
+ dev->pd_mgr->pd_dpp_count++;
+ if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
+ dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
+ } else {
+ pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
+ pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
+ dev->pd_mgr->max_normal_pd);
+ __set_bit(pd_bitmap_idx, pd_bitmap);
+ dev->pd_mgr->pd_norm_count++;
+ if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
+ dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
+ }
+ return pd_bitmap_idx;
+}
+
+static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
+ bool dpp_pool)
+{
+ u16 pd_count;
+ u16 pd_bit_index;
+
+ pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
+ dev->pd_mgr->pd_norm_count;
+ if (pd_count == 0)
+ return -EINVAL;
+
+ if (dpp_pool) {
+ pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
+ if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
+ return -EINVAL;
+ } else {
+ __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
+ dev->pd_mgr->pd_dpp_count--;
+ }
+ } else {
+ pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
+ if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
+ return -EINVAL;
+ } else {
+ __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
+ dev->pd_mgr->pd_norm_count--;
+ }
+ }
+
+ return 0;
+}
+
+static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
+ bool dpp_pool)
+{
+ int status;
+
+ mutex_lock(&dev->dev_lock);
+ status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
+ mutex_unlock(&dev->dev_lock);
+ return status;
+}
+
+static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
+{
+ u16 pd_idx = 0;
+ int status = 0;
+
+ mutex_lock(&dev->dev_lock);
+ if (pd->dpp_enabled) {
+ /* try allocating DPP PD, if not available then normal PD */
+ if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
+ pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
+ pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
+ pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
+ } else if (dev->pd_mgr->pd_norm_count <
+ dev->pd_mgr->max_normal_pd) {
+ pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
+ pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
+ pd->dpp_enabled = false;
+ } else {
+ status = -EINVAL;
+ }
+ } else {
+ if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
+ pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
+ pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
+ } else {
+ status = -EINVAL;
+ }
+ }
+ mutex_unlock(&dev->dev_lock);
+ return status;
+}
+
+/*
+ * NOTE:
+ *
+ * ocrdma_ucontext must be used here because this function is also
+ * called from ocrdma_alloc_ucontext where ib_udata does not have
+ * valid ib_ucontext pointer. ib_uverbs_get_context does not call
+ * uobj_{alloc|get_xxx} helpers which are used to store the
+ * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so
+ * ib_udata does NOT imply valid ib_ucontext here!
+ */
+static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
+ struct ocrdma_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ int status;
+
+ if (udata && uctx && dev->attr.max_dpp_pds) {
+ pd->dpp_enabled =
+ ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
+ pd->num_dpp_qp =
+ pd->dpp_enabled ? (dev->nic_info.db_page_size /
+ dev->attr.wqe_size) : 0;
+ }
+
+ if (dev->pd_mgr->pd_prealloc_valid)
+ return ocrdma_get_pd_num(dev, pd);
+
+retry:
+ status = ocrdma_mbx_alloc_pd(dev, pd);
+ if (status) {
+ if (pd->dpp_enabled) {
+ pd->dpp_enabled = false;
+ pd->num_dpp_qp = 0;
+ goto retry;
+ }
+ return status;
+ }
+
+ return 0;
+}
+
+static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
+ struct ocrdma_pd *pd)
+{
+ return (uctx->cntxt_pd == pd);
+}
+
+static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
+ struct ocrdma_pd *pd)
+{
+ if (dev->pd_mgr->pd_prealloc_valid)
+ ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
+ else
+ ocrdma_mbx_dealloc_pd(dev, pd);
+}
+
+static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
+ struct ocrdma_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ struct ib_device *ibdev = &dev->ibdev;
+ struct ib_pd *pd;
+ int status;
+
+ pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
+ if (!pd)
+ return -ENOMEM;
+
+ pd->device = ibdev;
+ uctx->cntxt_pd = get_ocrdma_pd(pd);
+
+ status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata);
+ if (status) {
+ kfree(uctx->cntxt_pd);
+ goto err;
+ }
+
+ uctx->cntxt_pd->uctx = uctx;
+ uctx->cntxt_pd->ibpd.device = &dev->ibdev;
+err:
+ return status;
+}
+
+static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ struct ocrdma_pd *pd = uctx->cntxt_pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+
+ if (uctx->pd_in_use) {
+ pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
+ __func__, dev->id, pd->id);
+ }
+ uctx->cntxt_pd = NULL;
+ _ocrdma_dealloc_pd(dev, pd);
+ kfree(pd);
+}
+
+static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ struct ocrdma_pd *pd = NULL;
+
+ mutex_lock(&uctx->mm_list_lock);
+ if (!uctx->pd_in_use) {
+ uctx->pd_in_use = true;
+ pd = uctx->cntxt_pd;
+ }
+ mutex_unlock(&uctx->mm_list_lock);
+
+ return pd;
+}
+
+static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ mutex_lock(&uctx->mm_list_lock);
+ uctx->pd_in_use = false;
+ mutex_unlock(&uctx->mm_list_lock);
+}
+
+int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
+{
+ struct ib_device *ibdev = uctx->device;
+ int status;
+ struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx);
+ struct ocrdma_alloc_ucontext_resp resp = {};
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
+
+ if (!udata)
+ return -EFAULT;
+ INIT_LIST_HEAD(&ctx->mm_head);
+ mutex_init(&ctx->mm_list_lock);
+
+ ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
+ &ctx->ah_tbl.pa, GFP_KERNEL);
+ if (!ctx->ah_tbl.va)
+ return -ENOMEM;
+
+ ctx->ah_tbl.len = map_len;
+
+ resp.ah_tbl_len = ctx->ah_tbl.len;
+ resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
+
+ status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
+ if (status)
+ goto map_err;
+
+ status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
+ if (status)
+ goto pd_err;
+
+ resp.dev_id = dev->id;
+ resp.max_inline_data = dev->attr.max_inline_data;
+ resp.wqe_size = dev->attr.wqe_size;
+ resp.rqe_size = dev->attr.rqe_size;
+ resp.dpp_wqe_size = dev->attr.wqe_size;
+
+ memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
+ status = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (status)
+ goto cpy_err;
+ return 0;
+
+cpy_err:
+ ocrdma_dealloc_ucontext_pd(ctx);
+pd_err:
+ ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
+map_err:
+ dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
+ ctx->ah_tbl.pa);
+ return status;
+}
+
+void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct ocrdma_mm *mm, *tmp;
+ struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
+ struct pci_dev *pdev = dev->nic_info.pdev;
+
+ ocrdma_dealloc_ucontext_pd(uctx);
+
+ ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
+ dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
+ uctx->ah_tbl.pa);
+
+ list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
+ list_del(&mm->entry);
+ kfree(mm);
+ }
+}
+
+int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
+ struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
+ unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
+ u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
+ unsigned long len = (vma->vm_end - vma->vm_start);
+ int status;
+ bool found;
+
+ if (vma->vm_start & (PAGE_SIZE - 1))
+ return -EINVAL;
+ found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
+ if (!found)
+ return -EINVAL;
+
+ if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
+ dev->nic_info.db_total_size)) &&
+ (len <= dev->nic_info.db_page_size)) {
+ if (vma->vm_flags & VM_READ)
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ len, vma->vm_page_prot);
+ } else if (dev->nic_info.dpp_unmapped_len &&
+ (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
+ (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
+ dev->nic_info.dpp_unmapped_len)) &&
+ (len <= dev->nic_info.dpp_unmapped_len)) {
+ if (vma->vm_flags & VM_READ)
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ len, vma->vm_page_prot);
+ } else {
+ status = remap_pfn_range(vma, vma->vm_start,
+ vma->vm_pgoff, len, vma->vm_page_prot);
+ }
+ return status;
+}
+
+static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
+ struct ib_udata *udata)
+{
+ int status;
+ u64 db_page_addr;
+ u64 dpp_page_addr = 0;
+ u32 db_page_size;
+ struct ocrdma_alloc_pd_uresp rsp;
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.id = pd->id;
+ rsp.dpp_enabled = pd->dpp_enabled;
+ db_page_addr = ocrdma_get_db_addr(dev, pd->id);
+ db_page_size = dev->nic_info.db_page_size;
+
+ status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
+ if (status)
+ return status;
+
+ if (pd->dpp_enabled) {
+ dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
+ (pd->id * PAGE_SIZE);
+ status = ocrdma_add_mmap(uctx, dpp_page_addr,
+ PAGE_SIZE);
+ if (status)
+ goto dpp_map_err;
+ rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
+ rsp.dpp_page_addr_lo = dpp_page_addr;
+ }
+
+ status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
+ if (status)
+ goto ucopy_err;
+
+ pd->uctx = uctx;
+ return 0;
+
+ucopy_err:
+ if (pd->dpp_enabled)
+ ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
+dpp_map_err:
+ ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
+ return status;
+}
+
+int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ib_device *ibdev = ibpd->device;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
+ struct ocrdma_pd *pd;
+ int status;
+ u8 is_uctx_pd = false;
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
+
+ if (udata) {
+ pd = ocrdma_get_ucontext_pd(uctx);
+ if (pd) {
+ is_uctx_pd = true;
+ goto pd_mapping;
+ }
+ }
+
+ pd = get_ocrdma_pd(ibpd);
+ status = _ocrdma_alloc_pd(dev, pd, uctx, udata);
+ if (status)
+ goto exit;
+
+pd_mapping:
+ if (udata) {
+ status = ocrdma_copy_pd_uresp(dev, pd, udata);
+ if (status)
+ goto err;
+ }
+ return 0;
+
+err:
+ if (is_uctx_pd)
+ ocrdma_release_ucontext_pd(uctx);
+ else
+ _ocrdma_dealloc_pd(dev, pd);
+exit:
+ return status;
+}
+
+int ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ struct ocrdma_ucontext *uctx = NULL;
+ u64 usr_db;
+
+ uctx = pd->uctx;
+ if (uctx) {
+ u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
+ (pd->id * PAGE_SIZE);
+ if (pd->dpp_enabled)
+ ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
+ usr_db = ocrdma_get_db_addr(dev, pd->id);
+ ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
+
+ if (is_ucontext_pd(uctx, pd)) {
+ ocrdma_release_ucontext_pd(uctx);
+ return 0;
+ }
+ }
+ _ocrdma_dealloc_pd(dev, pd);
+ return 0;
+}
+
+static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
+ u32 pdid, int acc, u32 num_pbls, u32 addr_check)
+{
+ int status;
+
+ mr->hwmr.fr_mr = 0;
+ mr->hwmr.local_rd = 1;
+ mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
+ mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hwmr.num_pbls = num_pbls;
+
+ status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
+ if (status)
+ return status;
+
+ mr->ibmr.lkey = mr->hwmr.lkey;
+ if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
+ mr->ibmr.rkey = mr->hwmr.lkey;
+ return 0;
+}
+
+struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
+{
+ int status;
+ struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+
+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
+ pr_err("%s err, invalid access rights\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
+ OCRDMA_ADDR_CHECK_DISABLE);
+ if (status) {
+ kfree(mr);
+ return ERR_PTR(status);
+ }
+
+ return &mr->ibmr;
+}
+
+static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
+ struct ocrdma_hw_mr *mr)
+{
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ int i = 0;
+
+ if (mr->pbl_table) {
+ for (i = 0; i < mr->num_pbls; i++) {
+ if (!mr->pbl_table[i].va)
+ continue;
+ dma_free_coherent(&pdev->dev, mr->pbl_size,
+ mr->pbl_table[i].va,
+ mr->pbl_table[i].pa);
+ }
+ kfree(mr->pbl_table);
+ mr->pbl_table = NULL;
+ }
+}
+
+static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
+ u32 num_pbes)
+{
+ u32 num_pbls = 0;
+ u32 idx = 0;
+ int status = 0;
+ u32 pbl_size;
+
+ do {
+ pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
+ if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
+ status = -EFAULT;
+ break;
+ }
+ num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
+ num_pbls = num_pbls / (pbl_size / sizeof(u64));
+ idx++;
+ } while (num_pbls >= dev->attr.max_num_mr_pbl);
+
+ mr->hwmr.num_pbes = num_pbes;
+ mr->hwmr.num_pbls = num_pbls;
+ mr->hwmr.pbl_size = pbl_size;
+ return status;
+}
+
+static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
+{
+ int status = 0;
+ int i;
+ u32 dma_len = mr->pbl_size;
+ struct pci_dev *pdev = dev->nic_info.pdev;
+ void *va;
+ dma_addr_t pa;
+
+ mr->pbl_table = kcalloc(mr->num_pbls, sizeof(struct ocrdma_pbl),
+ GFP_KERNEL);
+
+ if (!mr->pbl_table)
+ return -ENOMEM;
+
+ for (i = 0; i < mr->num_pbls; i++) {
+ va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
+ if (!va) {
+ ocrdma_free_mr_pbl_tbl(dev, mr);
+ status = -ENOMEM;
+ break;
+ }
+ mr->pbl_table[i].va = va;
+ mr->pbl_table[i].pa = pa;
+ }
+ return status;
+}
+
+static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr)
+{
+ struct ocrdma_pbe *pbe;
+ struct ib_block_iter biter;
+ struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
+ int pbe_cnt;
+ u64 pg_addr;
+
+ if (!mr->hwmr.num_pbes)
+ return;
+
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ pbe_cnt = 0;
+
+ rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) {
+ /* store the page address in pbe */
+ pg_addr = rdma_block_iter_dma_address(&biter);
+ pbe->pa_lo = cpu_to_le32(pg_addr);
+ pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
+ pbe_cnt += 1;
+ pbe++;
+
+ /* if the given pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ pbe_cnt = 0;
+ }
+ }
+}
+
+struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
+ u64 usr_addr, int acc, struct ib_udata *udata)
+{
+ int status = -ENOMEM;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd;
+
+ pd = get_ocrdma_pd(ibpd);
+
+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(status);
+ mr->umem = ib_umem_get(ibpd->device, start, len, acc);
+ if (IS_ERR(mr->umem)) {
+ status = -EFAULT;
+ goto umem_err;
+ }
+ status = ocrdma_get_pbl_info(
+ dev, mr, ib_umem_num_dma_blocks(mr->umem, PAGE_SIZE));
+ if (status)
+ goto umem_err;
+
+ mr->hwmr.pbe_size = PAGE_SIZE;
+ mr->hwmr.va = usr_addr;
+ mr->hwmr.len = len;
+ mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hwmr.local_rd = 1;
+ mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
+ if (status)
+ goto umem_err;
+ build_user_pbes(dev, mr);
+ status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
+ if (status)
+ goto mbx_err;
+ mr->ibmr.lkey = mr->hwmr.lkey;
+ if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
+ mr->ibmr.rkey = mr->hwmr.lkey;
+
+ return &mr->ibmr;
+
+mbx_err:
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+umem_err:
+ kfree(mr);
+ return ERR_PTR(status);
+}
+
+int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
+{
+ struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
+
+ (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
+
+ kfree(mr->pages);
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+
+ /* it could be user registered memory. */
+ ib_umem_release(mr->umem);
+ kfree(mr);
+
+ /* Don't stop cleanup, in case FW is unresponsive */
+ if (dev->mqe_ctx.fw_error_state) {
+ pr_err("%s(%d) fw not responding.\n",
+ __func__, dev->id);
+ }
+ return 0;
+}
+
+static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
+ struct ib_udata *udata)
+{
+ int status;
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
+ struct ocrdma_create_cq_uresp uresp;
+
+ /* this must be user flow! */
+ if (!udata)
+ return -EINVAL;
+
+ memset(&uresp, 0, sizeof(uresp));
+ uresp.cq_id = cq->id;
+ uresp.page_size = PAGE_ALIGN(cq->len);
+ uresp.num_pages = 1;
+ uresp.max_hw_cqe = cq->max_hw_cqe;
+ uresp.page_addr[0] = virt_to_phys(cq->va);
+ uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
+ uresp.db_page_size = dev->nic_info.db_page_size;
+ uresp.phase_change = cq->phase_change ? 1 : 0;
+ status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (status) {
+ pr_err("%s(%d) copy error cqid=0x%x.\n",
+ __func__, dev->id, cq->id);
+ goto err;
+ }
+ status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
+ if (status)
+ goto err;
+ status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
+ if (status) {
+ ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
+ goto err;
+ }
+ cq->ucontext = uctx;
+err:
+ return status;
+}
+
+int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ib_device *ibdev = ibcq->device;
+ int entries = attr->cqe;
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
+ struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
+ udata, struct ocrdma_ucontext, ibucontext);
+ u16 pd_id = 0;
+ int status;
+ struct ocrdma_create_cq_ureq ureq;
+
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
+ if (udata) {
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
+ return -EFAULT;
+ } else
+ ureq.dpp_cq = 0;
+
+ spin_lock_init(&cq->cq_lock);
+ spin_lock_init(&cq->comp_handler_lock);
+ INIT_LIST_HEAD(&cq->sq_head);
+ INIT_LIST_HEAD(&cq->rq_head);
+
+ if (udata)
+ pd_id = uctx->cntxt_pd->id;
+
+ status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
+ if (status)
+ return status;
+
+ if (udata) {
+ status = ocrdma_copy_cq_uresp(dev, cq, udata);
+ if (status)
+ goto ctx_err;
+ }
+ cq->phase = OCRDMA_CQE_VALID;
+ dev->cq_tbl[cq->id] = cq;
+ return 0;
+
+ctx_err:
+ ocrdma_mbx_destroy_cq(dev, cq);
+ return status;
+}
+
+int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
+ struct ib_udata *udata)
+{
+ int status = 0;
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+
+ if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
+ status = -EINVAL;
+ return status;
+ }
+ ibcq->cqe = new_cnt;
+ return status;
+}
+
+static void ocrdma_flush_cq(struct ocrdma_cq *cq)
+{
+ int cqe_cnt;
+ int valid_count = 0;
+ unsigned long flags;
+
+ struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
+ struct ocrdma_cqe *cqe = NULL;
+
+ cqe = cq->va;
+ cqe_cnt = cq->cqe_cnt;
+
+ /* Last irq might have scheduled a polling thread
+ * sync-up with it before hard flushing.
+ */
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ while (cqe_cnt) {
+ if (is_cqe_valid(cq, cqe))
+ valid_count++;
+ cqe++;
+ cqe_cnt--;
+ }
+ ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+}
+
+int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_eq *eq = NULL;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
+ int pdid = 0;
+ u32 irq, indx;
+
+ dev->cq_tbl[cq->id] = NULL;
+ indx = ocrdma_get_eq_table_index(dev, cq->eqn);
+
+ eq = &dev->eq_tbl[indx];
+ irq = ocrdma_get_irq(dev, eq);
+ synchronize_irq(irq);
+ ocrdma_flush_cq(cq);
+
+ ocrdma_mbx_destroy_cq(dev, cq);
+ if (cq->ucontext) {
+ pdid = cq->ucontext->cntxt_pd->id;
+ ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
+ PAGE_ALIGN(cq->len));
+ ocrdma_del_mmap(cq->ucontext,
+ ocrdma_get_db_addr(dev, pdid),
+ dev->nic_info.db_page_size);
+ }
+ return 0;
+}
+
+static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
+{
+ int status = -EINVAL;
+
+ if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
+ dev->qp_tbl[qp->id] = qp;
+ status = 0;
+ }
+ return status;
+}
+
+static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
+{
+ dev->qp_tbl[qp->id] = NULL;
+}
+
+static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
+ struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ if ((attrs->qp_type != IB_QPT_GSI) &&
+ (attrs->qp_type != IB_QPT_RC) &&
+ (attrs->qp_type != IB_QPT_UC) &&
+ (attrs->qp_type != IB_QPT_UD)) {
+ pr_err("%s(%d) unsupported qp type=0x%x requested\n",
+ __func__, dev->id, attrs->qp_type);
+ return -EOPNOTSUPP;
+ }
+ /* Skip the check for QP1 to support CM size of 128 */
+ if ((attrs->qp_type != IB_QPT_GSI) &&
+ (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
+ pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_send_wr);
+ pr_err("%s(%d) supported send_wr=0x%x\n",
+ __func__, dev->id, dev->attr.max_wqe);
+ return -EINVAL;
+ }
+ if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
+ pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_recv_wr);
+ pr_err("%s(%d) supported recv_wr=0x%x\n",
+ __func__, dev->id, dev->attr.max_rqe);
+ return -EINVAL;
+ }
+ if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
+ pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_inline_data);
+ pr_err("%s(%d) supported inline data size=0x%x\n",
+ __func__, dev->id, dev->attr.max_inline_data);
+ return -EINVAL;
+ }
+ if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
+ pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_send_sge);
+ pr_err("%s(%d) supported send_sge=0x%x\n",
+ __func__, dev->id, dev->attr.max_send_sge);
+ return -EINVAL;
+ }
+ if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
+ pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
+ __func__, dev->id, attrs->cap.max_recv_sge);
+ pr_err("%s(%d) supported recv_sge=0x%x\n",
+ __func__, dev->id, dev->attr.max_recv_sge);
+ return -EINVAL;
+ }
+ /* unprivileged user space cannot create special QP */
+ if (udata && attrs->qp_type == IB_QPT_GSI) {
+ pr_err
+ ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
+ __func__, dev->id, attrs->qp_type);
+ return -EINVAL;
+ }
+ /* allow creating only one GSI type of QP */
+ if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
+ pr_err("%s(%d) GSI special QPs already created.\n",
+ __func__, dev->id);
+ return -EINVAL;
+ }
+ /* verify consumer QPs are not trying to use GSI QP's CQ */
+ if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
+ if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
+ (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
+ pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
+ __func__, dev->id);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
+ struct ib_udata *udata, int dpp_offset,
+ int dpp_credit_lmt, int srq)
+{
+ int status;
+ u64 usr_db;
+ struct ocrdma_create_qp_uresp uresp;
+ struct ocrdma_pd *pd = qp->pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+
+ memset(&uresp, 0, sizeof(uresp));
+ usr_db = dev->nic_info.unmapped_db +
+ (pd->id * dev->nic_info.db_page_size);
+ uresp.qp_id = qp->id;
+ uresp.sq_dbid = qp->sq.dbid;
+ uresp.num_sq_pages = 1;
+ uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
+ uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
+ uresp.num_wqe_allocated = qp->sq.max_cnt;
+ if (!srq) {
+ uresp.rq_dbid = qp->rq.dbid;
+ uresp.num_rq_pages = 1;
+ uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
+ uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
+ uresp.num_rqe_allocated = qp->rq.max_cnt;
+ }
+ uresp.db_page_addr = usr_db;
+ uresp.db_page_size = dev->nic_info.db_page_size;
+ uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
+ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
+ uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
+
+ if (qp->dpp_enabled) {
+ uresp.dpp_credit = dpp_credit_lmt;
+ uresp.dpp_offset = dpp_offset;
+ }
+ status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (status) {
+ pr_err("%s(%d) user copy error.\n", __func__, dev->id);
+ goto err;
+ }
+ status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
+ uresp.sq_page_size);
+ if (status)
+ goto err;
+
+ if (!srq) {
+ status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
+ uresp.rq_page_size);
+ if (status)
+ goto rq_map_err;
+ }
+ return status;
+rq_map_err:
+ ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
+err:
+ return status;
+}
+
+static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
+ struct ocrdma_pd *pd)
+{
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
+ qp->sq_db = dev->nic_info.db +
+ (pd->id * dev->nic_info.db_page_size) +
+ OCRDMA_DB_GEN2_SQ_OFFSET;
+ qp->rq_db = dev->nic_info.db +
+ (pd->id * dev->nic_info.db_page_size) +
+ OCRDMA_DB_GEN2_RQ_OFFSET;
+ } else {
+ qp->sq_db = dev->nic_info.db +
+ (pd->id * dev->nic_info.db_page_size) +
+ OCRDMA_DB_SQ_OFFSET;
+ qp->rq_db = dev->nic_info.db +
+ (pd->id * dev->nic_info.db_page_size) +
+ OCRDMA_DB_RQ_OFFSET;
+ }
+}
+
+static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
+{
+ qp->wqe_wr_id_tbl =
+ kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)),
+ GFP_KERNEL);
+ if (qp->wqe_wr_id_tbl == NULL)
+ return -ENOMEM;
+ qp->rqe_wr_id_tbl =
+ kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
+ if (qp->rqe_wr_id_tbl == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
+ struct ocrdma_pd *pd,
+ struct ib_qp_init_attr *attrs)
+{
+ qp->pd = pd;
+ spin_lock_init(&qp->q_lock);
+ INIT_LIST_HEAD(&qp->sq_entry);
+ INIT_LIST_HEAD(&qp->rq_entry);
+
+ qp->qp_type = attrs->qp_type;
+ qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
+ qp->max_inline_data = attrs->cap.max_inline_data;
+ qp->sq.max_sges = attrs->cap.max_send_sge;
+ qp->rq.max_sges = attrs->cap.max_recv_sge;
+ qp->state = OCRDMA_QPS_RST;
+ qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
+}
+
+static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
+ struct ib_qp_init_attr *attrs)
+{
+ if (attrs->qp_type == IB_QPT_GSI) {
+ dev->gsi_qp_created = 1;
+ dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
+ dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
+ }
+}
+
+int ocrdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata)
+{
+ int status;
+ struct ib_pd *ibpd = ibqp->pd;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
+ struct ocrdma_create_qp_ureq ureq;
+ u16 dpp_credit_lmt, dpp_offset;
+
+ if (attrs->create_flags)
+ return -EOPNOTSUPP;
+
+ status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
+ if (status)
+ goto gen_err;
+
+ memset(&ureq, 0, sizeof(ureq));
+ if (udata) {
+ if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
+ return -EFAULT;
+ }
+ ocrdma_set_qp_init_params(qp, pd, attrs);
+ if (udata == NULL)
+ qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
+ OCRDMA_QP_FAST_REG);
+
+ mutex_lock(&dev->dev_lock);
+ status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
+ ureq.dpp_cq_id,
+ &dpp_offset, &dpp_credit_lmt);
+ if (status)
+ goto mbx_err;
+
+ /* user space QP's wr_id table are managed in library */
+ if (udata == NULL) {
+ status = ocrdma_alloc_wr_id_tbl(qp);
+ if (status)
+ goto map_err;
+ }
+
+ status = ocrdma_add_qpn_map(dev, qp);
+ if (status)
+ goto map_err;
+ ocrdma_set_qp_db(dev, qp, pd);
+ if (udata) {
+ status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
+ dpp_credit_lmt,
+ (attrs->srq != NULL));
+ if (status)
+ goto cpy_err;
+ }
+ ocrdma_store_gsi_qp_cq(dev, attrs);
+ qp->ibqp.qp_num = qp->id;
+ mutex_unlock(&dev->dev_lock);
+ return 0;
+
+cpy_err:
+ ocrdma_del_qpn_map(dev, qp);
+map_err:
+ ocrdma_mbx_destroy_qp(dev, qp);
+mbx_err:
+ mutex_unlock(&dev->dev_lock);
+ kfree(qp->wqe_wr_id_tbl);
+ kfree(qp->rqe_wr_id_tbl);
+ pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
+gen_err:
+ return status;
+}
+
+int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ int status = 0;
+ struct ocrdma_qp *qp;
+ struct ocrdma_dev *dev;
+ enum ib_qp_state old_qps;
+
+ qp = get_ocrdma_qp(ibqp);
+ dev = get_ocrdma_dev(ibqp->device);
+ if (attr_mask & IB_QP_STATE)
+ status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
+ /* if new and previous states are same hw doesn't need to
+ * know about it.
+ */
+ if (status < 0)
+ return status;
+ return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
+}
+
+int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ unsigned long flags;
+ int status = -EINVAL;
+ struct ocrdma_qp *qp;
+ struct ocrdma_dev *dev;
+ enum ib_qp_state old_qps, new_qps;
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ qp = get_ocrdma_qp(ibqp);
+ dev = get_ocrdma_dev(ibqp->device);
+
+ /* syncronize with multiple context trying to change, retrive qps */
+ mutex_lock(&dev->dev_lock);
+ /* syncronize with wqe, rqe posting and cqe processing contexts */
+ spin_lock_irqsave(&qp->q_lock, flags);
+ old_qps = get_ibqp_state(qp->state);
+ if (attr_mask & IB_QP_STATE)
+ new_qps = attr->qp_state;
+ else
+ new_qps = old_qps;
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+
+ if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
+ pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
+ "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
+ __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
+ old_qps, new_qps);
+ goto param_err;
+ }
+
+ status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
+ if (status > 0)
+ status = 0;
+param_err:
+ mutex_unlock(&dev->dev_lock);
+ return status;
+}
+
+static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
+{
+ switch (mtu) {
+ case 256:
+ return IB_MTU_256;
+ case 512:
+ return IB_MTU_512;
+ case 1024:
+ return IB_MTU_1024;
+ case 2048:
+ return IB_MTU_2048;
+ case 4096:
+ return IB_MTU_4096;
+ default:
+ return IB_MTU_1024;
+ }
+}
+
+static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
+{
+ int ib_qp_acc_flags = 0;
+
+ if (qp_cap_flags & OCRDMA_QP_INB_WR)
+ ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (qp_cap_flags & OCRDMA_QP_INB_RD)
+ ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ return ib_qp_acc_flags;
+}
+
+int ocrdma_query_qp(struct ib_qp *ibqp,
+ struct ib_qp_attr *qp_attr,
+ int attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ int status;
+ u32 qp_state;
+ struct ocrdma_qp_params params;
+ struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
+
+ memset(&params, 0, sizeof(params));
+ mutex_lock(&dev->dev_lock);
+ status = ocrdma_mbx_query_qp(dev, qp, &params);
+ mutex_unlock(&dev->dev_lock);
+ if (status)
+ goto mbx_err;
+ if (qp->qp_type == IB_QPT_UD)
+ qp_attr->qkey = params.qkey;
+ qp_attr->path_mtu =
+ ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
+ OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
+ OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
+ qp_attr->path_mig_state = IB_MIG_MIGRATED;
+ qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
+ qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
+ qp_attr->dest_qp_num =
+ params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
+
+ qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
+ qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
+ qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
+ qp_attr->cap.max_send_sge = qp->sq.max_sges;
+ qp_attr->cap.max_recv_sge = qp->rq.max_sges;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+ qp_init_attr->cap = qp_attr->cap;
+ qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
+
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
+ params.rnt_rc_sl_fl &
+ OCRDMA_QP_PARAMS_FLOW_LABEL_MASK,
+ qp->sgid_idx,
+ (params.hop_lmt_rq_psn &
+ OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
+ OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
+ (params.tclass_sq_psn &
+ OCRDMA_QP_PARAMS_TCLASS_MASK) >>
+ OCRDMA_QP_PARAMS_TCLASS_SHIFT);
+ rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid[0]);
+
+ rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
+ rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl &
+ OCRDMA_QP_PARAMS_SL_MASK) >>
+ OCRDMA_QP_PARAMS_SL_SHIFT);
+ qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
+ OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
+ OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
+ qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
+ OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
+ OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
+ qp_attr->retry_cnt =
+ (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
+ OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
+ qp_attr->min_rnr_timer = 0;
+ qp_attr->pkey_index = 0;
+ qp_attr->port_num = 1;
+ rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
+ rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
+ qp_attr->alt_pkey_index = 0;
+ qp_attr->alt_port_num = 0;
+ qp_attr->alt_timeout = 0;
+ memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
+ qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
+ OCRDMA_QP_PARAMS_STATE_SHIFT;
+ qp_attr->qp_state = get_ibqp_state(qp_state);
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
+ qp_attr->max_dest_rd_atomic =
+ params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
+ qp_attr->max_rd_atomic =
+ params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
+ qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
+ OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
+ /* Sync driver QP state with FW */
+ ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
+mbx_err:
+ return status;
+}
+
+static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
+{
+ unsigned int i = idx / 32;
+ u32 mask = (1U << (idx % 32));
+
+ srq->idx_bit_fields[i] ^= mask;
+}
+
+static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
+{
+ return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
+}
+
+static int is_hw_sq_empty(struct ocrdma_qp *qp)
+{
+ return (qp->sq.tail == qp->sq.head);
+}
+
+static int is_hw_rq_empty(struct ocrdma_qp *qp)
+{
+ return (qp->rq.tail == qp->rq.head);
+}
+
+static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
+{
+ return q->va + (q->head * q->entry_size);
+}
+
+static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
+ u32 idx)
+{
+ return q->va + (idx * q->entry_size);
+}
+
+static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
+{
+ q->head = (q->head + 1) & q->max_wqe_idx;
+}
+
+static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
+{
+ q->tail = (q->tail + 1) & q->max_wqe_idx;
+}
+
+/* discard the cqe for a given QP */
+static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
+{
+ unsigned long cq_flags;
+ unsigned long flags;
+ int discard_cnt = 0;
+ u32 cur_getp, stop_getp;
+ struct ocrdma_cqe *cqe;
+ u32 qpn = 0, wqe_idx = 0;
+
+ spin_lock_irqsave(&cq->cq_lock, cq_flags);
+
+ /* traverse through the CQEs in the hw CQ,
+ * find the matching CQE for a given qp,
+ * mark the matching one discarded by clearing qpn.
+ * ring the doorbell in the poll_cq() as
+ * we don't complete out of order cqe.
+ */
+
+ cur_getp = cq->getp;
+ /* find upto when do we reap the cq. */
+ stop_getp = cur_getp;
+ do {
+ if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
+ break;
+
+ cqe = cq->va + cur_getp;
+ /* if (a) done reaping whole hw cq, or
+ * (b) qp_xq becomes empty.
+ * then exit
+ */
+ qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
+ /* if previously discarded cqe found, skip that too. */
+ /* check for matching qp */
+ if (qpn == 0 || qpn != qp->id)
+ goto skip_cqe;
+
+ if (is_cqe_for_sq(cqe)) {
+ ocrdma_hwq_inc_tail(&qp->sq);
+ } else {
+ if (qp->srq) {
+ wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
+ OCRDMA_CQE_BUFTAG_SHIFT) &
+ qp->srq->rq.max_wqe_idx;
+ BUG_ON(wqe_idx < 1);
+ spin_lock_irqsave(&qp->srq->q_lock, flags);
+ ocrdma_hwq_inc_tail(&qp->srq->rq);
+ ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
+ spin_unlock_irqrestore(&qp->srq->q_lock, flags);
+
+ } else {
+ ocrdma_hwq_inc_tail(&qp->rq);
+ }
+ }
+ /* mark cqe discarded so that it is not picked up later
+ * in the poll_cq().
+ */
+ discard_cnt += 1;
+ cqe->cmn.qpn = 0;
+skip_cqe:
+ cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
+ } while (cur_getp != stop_getp);
+ spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
+}
+
+void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
+{
+ int found = false;
+ unsigned long flags;
+ struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+ /* sync with any active CQ poll */
+
+ spin_lock_irqsave(&dev->flush_q_lock, flags);
+ found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
+ if (found)
+ list_del(&qp->sq_entry);
+ if (!qp->srq) {
+ found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
+ if (found)
+ list_del(&qp->rq_entry);
+ }
+ spin_unlock_irqrestore(&dev->flush_q_lock, flags);
+}
+
+int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct ocrdma_pd *pd;
+ struct ocrdma_qp *qp;
+ struct ocrdma_dev *dev;
+ struct ib_qp_attr attrs;
+ int attr_mask;
+ unsigned long flags;
+
+ qp = get_ocrdma_qp(ibqp);
+ dev = get_ocrdma_dev(ibqp->device);
+
+ pd = qp->pd;
+
+ /* change the QP state to ERROR */
+ if (qp->state != OCRDMA_QPS_RST) {
+ attrs.qp_state = IB_QPS_ERR;
+ attr_mask = IB_QP_STATE;
+ _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
+ }
+ /* ensure that CQEs for newly created QP (whose id may be same with
+ * one which just getting destroyed are same), dont get
+ * discarded until the old CQEs are discarded.
+ */
+ mutex_lock(&dev->dev_lock);
+ (void) ocrdma_mbx_destroy_qp(dev, qp);
+
+ /*
+ * acquire CQ lock while destroy is in progress, in order to
+ * protect against proessing in-flight CQEs for this QP.
+ */
+ spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
+ if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) {
+ spin_lock(&qp->rq_cq->cq_lock);
+ ocrdma_del_qpn_map(dev, qp);
+ spin_unlock(&qp->rq_cq->cq_lock);
+ } else {
+ ocrdma_del_qpn_map(dev, qp);
+ }
+ spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
+
+ if (!pd->uctx) {
+ ocrdma_discard_cqes(qp, qp->sq_cq);
+ ocrdma_discard_cqes(qp, qp->rq_cq);
+ }
+ mutex_unlock(&dev->dev_lock);
+
+ if (pd->uctx) {
+ ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
+ PAGE_ALIGN(qp->sq.len));
+ if (!qp->srq)
+ ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
+ PAGE_ALIGN(qp->rq.len));
+ }
+
+ ocrdma_del_flush_qp(qp);
+
+ kfree(qp->wqe_wr_id_tbl);
+ kfree(qp->rqe_wr_id_tbl);
+ return 0;
+}
+
+static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
+ struct ib_udata *udata)
+{
+ int status;
+ struct ocrdma_create_srq_uresp uresp;
+
+ memset(&uresp, 0, sizeof(uresp));
+ uresp.rq_dbid = srq->rq.dbid;
+ uresp.num_rq_pages = 1;
+ uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
+ uresp.rq_page_size = srq->rq.len;
+ uresp.db_page_addr = dev->nic_info.unmapped_db +
+ (srq->pd->id * dev->nic_info.db_page_size);
+ uresp.db_page_size = dev->nic_info.db_page_size;
+ uresp.num_rqe_allocated = srq->rq.max_cnt;
+ if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
+ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
+ uresp.db_shift = 24;
+ } else {
+ uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
+ uresp.db_shift = 16;
+ }
+
+ status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+ if (status)
+ return status;
+ status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
+ uresp.rq_page_size);
+ if (status)
+ return status;
+ return status;
+}
+
+int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ int status;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
+ struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq);
+
+ if (init_attr->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
+ if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
+ return -EINVAL;
+ if (init_attr->attr.max_wr > dev->attr.max_rqe)
+ return -EINVAL;
+
+ spin_lock_init(&srq->q_lock);
+ srq->pd = pd;
+ srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
+ status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
+ if (status)
+ return status;
+
+ if (!udata) {
+ srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
+ GFP_KERNEL);
+ if (!srq->rqe_wr_id_tbl) {
+ status = -ENOMEM;
+ goto arm_err;
+ }
+
+ srq->bit_fields_len = (srq->rq.max_cnt / 32) +
+ (srq->rq.max_cnt % 32 ? 1 : 0);
+ srq->idx_bit_fields =
+ kmalloc_array(srq->bit_fields_len, sizeof(u32),
+ GFP_KERNEL);
+ if (!srq->idx_bit_fields) {
+ status = -ENOMEM;
+ goto arm_err;
+ }
+ memset(srq->idx_bit_fields, 0xff,
+ srq->bit_fields_len * sizeof(u32));
+ }
+
+ if (init_attr->attr.srq_limit) {
+ status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
+ if (status)
+ goto arm_err;
+ }
+
+ if (udata) {
+ status = ocrdma_copy_srq_uresp(dev, srq, udata);
+ if (status)
+ goto arm_err;
+ }
+
+ return 0;
+
+arm_err:
+ ocrdma_mbx_destroy_srq(dev, srq);
+ kfree(srq->rqe_wr_id_tbl);
+ kfree(srq->idx_bit_fields);
+ return status;
+}
+
+int ocrdma_modify_srq(struct ib_srq *ibsrq,
+ struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ int status;
+ struct ocrdma_srq *srq;
+
+ srq = get_ocrdma_srq(ibsrq);
+ if (srq_attr_mask & IB_SRQ_MAX_WR)
+ status = -EINVAL;
+ else
+ status = ocrdma_mbx_modify_srq(srq, srq_attr);
+ return status;
+}
+
+int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct ocrdma_srq *srq;
+
+ srq = get_ocrdma_srq(ibsrq);
+ return ocrdma_mbx_query_srq(srq, srq_attr);
+}
+
+int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+{
+ struct ocrdma_srq *srq;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
+
+ srq = get_ocrdma_srq(ibsrq);
+
+ ocrdma_mbx_destroy_srq(dev, srq);
+
+ if (srq->pd->uctx)
+ ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
+ PAGE_ALIGN(srq->rq.len));
+
+ kfree(srq->idx_bit_fields);
+ kfree(srq->rqe_wr_id_tbl);
+ return 0;
+}
+
+/* unprivileged verbs and their support functions. */
+static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
+ struct ocrdma_hdr_wqe *hdr,
+ const struct ib_send_wr *wr)
+{
+ struct ocrdma_ewqe_ud_hdr *ud_hdr =
+ (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
+ struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
+
+ ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
+ if (qp->qp_type == IB_QPT_GSI)
+ ud_hdr->qkey = qp->qkey;
+ else
+ ud_hdr->qkey = ud_wr(wr)->remote_qkey;
+ ud_hdr->rsvd_ahid = ah->id;
+ ud_hdr->hdr_type = ah->hdr_type;
+ if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
+ hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
+}
+
+static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
+ struct ocrdma_sge *sge, int num_sge,
+ struct ib_sge *sg_list)
+{
+ int i;
+
+ for (i = 0; i < num_sge; i++) {
+ sge[i].lrkey = sg_list[i].lkey;
+ sge[i].addr_lo = sg_list[i].addr;
+ sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
+ sge[i].len = sg_list[i].length;
+ hdr->total_len += sg_list[i].length;
+ }
+ if (num_sge == 0)
+ memset(sge, 0, sizeof(*sge));
+}
+
+static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
+{
+ uint32_t total_len = 0, i;
+
+ for (i = 0; i < num_sge; i++)
+ total_len += sg_list[i].length;
+ return total_len;
+}
+
+
+static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
+ struct ocrdma_hdr_wqe *hdr,
+ struct ocrdma_sge *sge,
+ const struct ib_send_wr *wr, u32 wqe_size)
+{
+ int i;
+ char *dpp_addr;
+
+ if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
+ hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
+ if (unlikely(hdr->total_len > qp->max_inline_data)) {
+ pr_err("%s() supported_len=0x%x,\n"
+ " unsupported len req=0x%x\n", __func__,
+ qp->max_inline_data, hdr->total_len);
+ return -EINVAL;
+ }
+ dpp_addr = (char *)sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(dpp_addr,
+ (void *)(unsigned long)wr->sg_list[i].addr,
+ wr->sg_list[i].length);
+ dpp_addr += wr->sg_list[i].length;
+ }
+
+ wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
+ if (0 == hdr->total_len)
+ wqe_size += sizeof(struct ocrdma_sge);
+ hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
+ } else {
+ ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
+ if (wr->num_sge)
+ wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
+ else
+ wqe_size += sizeof(struct ocrdma_sge);
+ hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
+ }
+ hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
+ return 0;
+}
+
+static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
+ const struct ib_send_wr *wr)
+{
+ struct ocrdma_sge *sge;
+ u32 wqe_size = sizeof(*hdr);
+
+ if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
+ ocrdma_build_ud_hdr(qp, hdr, wr);
+ sge = (struct ocrdma_sge *)(hdr + 2);
+ wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
+ } else {
+ sge = (struct ocrdma_sge *)(hdr + 1);
+ }
+
+ return ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
+}
+
+static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
+ const struct ib_send_wr *wr)
+{
+ int status;
+ struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
+ struct ocrdma_sge *sge = ext_rw + 1;
+ u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
+
+ status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
+ if (status)
+ return status;
+ ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
+ ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
+ ext_rw->lrkey = rdma_wr(wr)->rkey;
+ ext_rw->len = hdr->total_len;
+ return 0;
+}
+
+static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
+ const struct ib_send_wr *wr)
+{
+ struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
+ struct ocrdma_sge *sge = ext_rw + 1;
+ u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
+ sizeof(struct ocrdma_hdr_wqe);
+
+ ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
+ hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
+ hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
+ hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
+
+ ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
+ ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
+ ext_rw->lrkey = rdma_wr(wr)->rkey;
+ ext_rw->len = hdr->total_len;
+}
+
+static int get_encoded_page_size(int pg_sz)
+{
+ /* Max size is 256M 4096 << 16 */
+ int i = 0;
+ for (; i < 17; i++)
+ if (pg_sz == (4096 << i))
+ break;
+ return i;
+}
+
+static int ocrdma_build_reg(struct ocrdma_qp *qp,
+ struct ocrdma_hdr_wqe *hdr,
+ const struct ib_reg_wr *wr)
+{
+ u64 fbo;
+ struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
+ struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
+ struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
+ struct ocrdma_pbe *pbe;
+ u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
+ int num_pbes = 0, i;
+
+ wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
+
+ hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
+ hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
+
+ if (wr->access & IB_ACCESS_LOCAL_WRITE)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
+ if (wr->access & IB_ACCESS_REMOTE_WRITE)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
+ if (wr->access & IB_ACCESS_REMOTE_READ)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
+ hdr->lkey = wr->key;
+ hdr->total_len = mr->ibmr.length;
+
+ fbo = mr->ibmr.iova - mr->pages[0];
+
+ fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
+ fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
+ fast_reg->fbo_hi = upper_32_bits(fbo);
+ fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
+ fast_reg->num_sges = mr->npages;
+ fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size);
+
+ pbe = pbl_tbl->va;
+ for (i = 0; i < mr->npages; i++) {
+ u64 buf_addr = mr->pages[i];
+
+ pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
+ pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
+ num_pbes += 1;
+ pbe++;
+
+ /* if the pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ }
+ }
+
+ return 0;
+}
+
+static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
+{
+ u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
+
+ iowrite32(val, qp->sq_db);
+}
+
+int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ int status = 0;
+ struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
+ struct ocrdma_hdr_wqe *hdr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+ if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+
+ while (wr) {
+ if (qp->qp_type == IB_QPT_UD &&
+ (wr->opcode != IB_WR_SEND &&
+ wr->opcode != IB_WR_SEND_WITH_IMM)) {
+ *bad_wr = wr;
+ status = -EINVAL;
+ break;
+ }
+ if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
+ wr->num_sge > qp->sq.max_sges) {
+ *bad_wr = wr;
+ status = -ENOMEM;
+ break;
+ }
+ hdr = ocrdma_hwq_head(&qp->sq);
+ hdr->cw = 0;
+ if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
+ hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
+ if (wr->send_flags & IB_SEND_FENCE)
+ hdr->cw |=
+ (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ hdr->cw |=
+ (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
+ hdr->total_len = 0;
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
+ hdr->immdt = ntohl(wr->ex.imm_data);
+ fallthrough;
+ case IB_WR_SEND:
+ hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
+ ocrdma_build_send(qp, hdr, wr);
+ break;
+ case IB_WR_SEND_WITH_INV:
+ hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
+ hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
+ hdr->lkey = wr->ex.invalidate_rkey;
+ status = ocrdma_build_send(qp, hdr, wr);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
+ hdr->immdt = ntohl(wr->ex.imm_data);
+ fallthrough;
+ case IB_WR_RDMA_WRITE:
+ hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
+ status = ocrdma_build_write(qp, hdr, wr);
+ break;
+ case IB_WR_RDMA_READ:
+ ocrdma_build_read(qp, hdr, wr);
+ break;
+ case IB_WR_LOCAL_INV:
+ hdr->cw |=
+ (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
+ hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
+ sizeof(struct ocrdma_sge)) /
+ OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
+ hdr->lkey = wr->ex.invalidate_rkey;
+ break;
+ case IB_WR_REG_MR:
+ status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
+ break;
+ default:
+ status = -EINVAL;
+ break;
+ }
+ if (status) {
+ *bad_wr = wr;
+ break;
+ }
+ if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
+ qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
+ else
+ qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
+ qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
+ ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
+ OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
+ /* make sure wqe is written before adapter can access it */
+ wmb();
+ /* inform hw to start processing it */
+ ocrdma_ring_sq_db(qp);
+
+ /* update pointer, counter for next wr */
+ ocrdma_hwq_inc_head(&qp->sq);
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ return status;
+}
+
+static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
+{
+ u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
+
+ iowrite32(val, qp->rq_db);
+}
+
+static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe,
+ const struct ib_recv_wr *wr, u16 tag)
+{
+ u32 wqe_size = 0;
+ struct ocrdma_sge *sge;
+ if (wr->num_sge)
+ wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
+ else
+ wqe_size = sizeof(*sge) + sizeof(*rqe);
+
+ rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
+ OCRDMA_WQE_SIZE_SHIFT);
+ rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
+ rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
+ rqe->total_len = 0;
+ rqe->rsvd_tag = tag;
+ sge = (struct ocrdma_sge *)(rqe + 1);
+ ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
+ ocrdma_cpu_to_le32(rqe, wqe_size);
+}
+
+int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ int status = 0;
+ unsigned long flags;
+ struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
+ struct ocrdma_hdr_wqe *rqe;
+
+ spin_lock_irqsave(&qp->q_lock, flags);
+ if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ *bad_wr = wr;
+ return -EINVAL;
+ }
+ while (wr) {
+ if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
+ wr->num_sge > qp->rq.max_sges) {
+ *bad_wr = wr;
+ status = -ENOMEM;
+ break;
+ }
+ rqe = ocrdma_hwq_head(&qp->rq);
+ ocrdma_build_rqe(rqe, wr, 0);
+
+ qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
+ /* make sure rqe is written before adapter can access it */
+ wmb();
+
+ /* inform hw to start processing it */
+ ocrdma_ring_rq_db(qp);
+
+ /* update pointer, counter for next wr */
+ ocrdma_hwq_inc_head(&qp->rq);
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&qp->q_lock, flags);
+ return status;
+}
+
+/* cqe for srq's rqe can potentially arrive out of order.
+ * index gives the entry in the shadow table where to store
+ * the wr_id. tag/index is returned in cqe to reference back
+ * for a given rqe.
+ */
+static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
+{
+ int row = 0;
+ int indx = 0;
+
+ for (row = 0; row < srq->bit_fields_len; row++) {
+ if (srq->idx_bit_fields[row]) {
+ indx = ffs(srq->idx_bit_fields[row]);
+ indx = (row * 32) + (indx - 1);
+ BUG_ON(indx >= srq->rq.max_cnt);
+ ocrdma_srq_toggle_bit(srq, indx);
+ break;
+ }
+ }
+
+ BUG_ON(row == srq->bit_fields_len);
+ return indx + 1; /* Use from index 1 */
+}
+
+static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
+{
+ u32 val = srq->rq.dbid | (1 << 16);
+
+ iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
+}
+
+int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ int status = 0;
+ unsigned long flags;
+ struct ocrdma_srq *srq;
+ struct ocrdma_hdr_wqe *rqe;
+ u16 tag;
+
+ srq = get_ocrdma_srq(ibsrq);
+
+ spin_lock_irqsave(&srq->q_lock, flags);
+ while (wr) {
+ if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
+ wr->num_sge > srq->rq.max_sges) {
+ status = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+ tag = ocrdma_srq_get_idx(srq);
+ rqe = ocrdma_hwq_head(&srq->rq);
+ ocrdma_build_rqe(rqe, wr, tag);
+
+ srq->rqe_wr_id_tbl[tag] = wr->wr_id;
+ /* make sure rqe is written before adapter can perform DMA */
+ wmb();
+ /* inform hw to start processing it */
+ ocrdma_ring_srq_db(srq);
+ /* update pointer, counter for next wr */
+ ocrdma_hwq_inc_head(&srq->rq);
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&srq->q_lock, flags);
+ return status;
+}
+
+static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
+{
+ enum ib_wc_status ibwc_status;
+
+ switch (status) {
+ case OCRDMA_CQE_GENERAL_ERR:
+ ibwc_status = IB_WC_GENERAL_ERR;
+ break;
+ case OCRDMA_CQE_LOC_LEN_ERR:
+ ibwc_status = IB_WC_LOC_LEN_ERR;
+ break;
+ case OCRDMA_CQE_LOC_QP_OP_ERR:
+ ibwc_status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case OCRDMA_CQE_LOC_EEC_OP_ERR:
+ ibwc_status = IB_WC_LOC_EEC_OP_ERR;
+ break;
+ case OCRDMA_CQE_LOC_PROT_ERR:
+ ibwc_status = IB_WC_LOC_PROT_ERR;
+ break;
+ case OCRDMA_CQE_WR_FLUSH_ERR:
+ ibwc_status = IB_WC_WR_FLUSH_ERR;
+ break;
+ case OCRDMA_CQE_MW_BIND_ERR:
+ ibwc_status = IB_WC_MW_BIND_ERR;
+ break;
+ case OCRDMA_CQE_BAD_RESP_ERR:
+ ibwc_status = IB_WC_BAD_RESP_ERR;
+ break;
+ case OCRDMA_CQE_LOC_ACCESS_ERR:
+ ibwc_status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case OCRDMA_CQE_REM_INV_REQ_ERR:
+ ibwc_status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case OCRDMA_CQE_REM_ACCESS_ERR:
+ ibwc_status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case OCRDMA_CQE_REM_OP_ERR:
+ ibwc_status = IB_WC_REM_OP_ERR;
+ break;
+ case OCRDMA_CQE_RETRY_EXC_ERR:
+ ibwc_status = IB_WC_RETRY_EXC_ERR;
+ break;
+ case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
+ ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
+ break;
+ case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
+ ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
+ break;
+ case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
+ ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
+ break;
+ case OCRDMA_CQE_REM_ABORT_ERR:
+ ibwc_status = IB_WC_REM_ABORT_ERR;
+ break;
+ case OCRDMA_CQE_INV_EECN_ERR:
+ ibwc_status = IB_WC_INV_EECN_ERR;
+ break;
+ case OCRDMA_CQE_INV_EEC_STATE_ERR:
+ ibwc_status = IB_WC_INV_EEC_STATE_ERR;
+ break;
+ case OCRDMA_CQE_FATAL_ERR:
+ ibwc_status = IB_WC_FATAL_ERR;
+ break;
+ case OCRDMA_CQE_RESP_TIMEOUT_ERR:
+ ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
+ break;
+ default:
+ ibwc_status = IB_WC_GENERAL_ERR;
+ break;
+ }
+ return ibwc_status;
+}
+
+static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
+ u32 wqe_idx)
+{
+ struct ocrdma_hdr_wqe *hdr;
+ struct ocrdma_sge *rw;
+ int opcode;
+
+ hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
+
+ ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
+ /* Undo the hdr->cw swap */
+ opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
+ switch (opcode) {
+ case OCRDMA_WRITE:
+ ibwc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case OCRDMA_READ:
+ rw = (struct ocrdma_sge *)(hdr + 1);
+ ibwc->opcode = IB_WC_RDMA_READ;
+ ibwc->byte_len = rw->len;
+ break;
+ case OCRDMA_SEND:
+ ibwc->opcode = IB_WC_SEND;
+ break;
+ case OCRDMA_FR_MR:
+ ibwc->opcode = IB_WC_REG_MR;
+ break;
+ case OCRDMA_LKEY_INV:
+ ibwc->opcode = IB_WC_LOCAL_INV;
+ break;
+ default:
+ ibwc->status = IB_WC_GENERAL_ERR;
+ pr_err("%s() invalid opcode received = 0x%x\n",
+ __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
+ break;
+ }
+}
+
+static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
+ struct ocrdma_cqe *cqe)
+{
+ if (is_cqe_for_sq(cqe)) {
+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+ cqe->flags_status_srcqpn) &
+ ~OCRDMA_CQE_STATUS_MASK);
+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+ cqe->flags_status_srcqpn) |
+ (OCRDMA_CQE_WR_FLUSH_ERR <<
+ OCRDMA_CQE_STATUS_SHIFT));
+ } else {
+ if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+ cqe->flags_status_srcqpn) &
+ ~OCRDMA_CQE_UD_STATUS_MASK);
+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+ cqe->flags_status_srcqpn) |
+ (OCRDMA_CQE_WR_FLUSH_ERR <<
+ OCRDMA_CQE_UD_STATUS_SHIFT));
+ } else {
+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+ cqe->flags_status_srcqpn) &
+ ~OCRDMA_CQE_STATUS_MASK);
+ cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
+ cqe->flags_status_srcqpn) |
+ (OCRDMA_CQE_WR_FLUSH_ERR <<
+ OCRDMA_CQE_STATUS_SHIFT));
+ }
+ }
+}
+
+static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
+ struct ocrdma_qp *qp, int status)
+{
+ bool expand = false;
+
+ ibwc->byte_len = 0;
+ ibwc->qp = &qp->ibqp;
+ ibwc->status = ocrdma_to_ibwc_err(status);
+
+ ocrdma_flush_qp(qp);
+ ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
+
+ /* if wqe/rqe pending for which cqe needs to be returned,
+ * trigger inflating it.
+ */
+ if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
+ expand = true;
+ ocrdma_set_cqe_status_flushed(qp, cqe);
+ }
+ return expand;
+}
+
+static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
+ struct ocrdma_qp *qp, int status)
+{
+ ibwc->opcode = IB_WC_RECV;
+ ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
+ ocrdma_hwq_inc_tail(&qp->rq);
+
+ return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
+}
+
+static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
+ struct ocrdma_qp *qp, int status)
+{
+ ocrdma_update_wc(qp, ibwc, qp->sq.tail);
+ ocrdma_hwq_inc_tail(&qp->sq);
+
+ return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
+}
+
+
+static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
+ struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
+ bool *polled, bool *stop)
+{
+ bool expand;
+ struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+ int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+ if (status < OCRDMA_MAX_CQE_ERR)
+ atomic_inc(&dev->cqe_err_stats[status]);
+
+ /* when hw sq is empty, but rq is not empty, so we continue
+ * to keep the cqe in order to get the cq event again.
+ */
+ if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
+ /* when cq for rq and sq is same, it is safe to return
+ * flush cqe for RQEs.
+ */
+ if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
+ *polled = true;
+ status = OCRDMA_CQE_WR_FLUSH_ERR;
+ expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
+ } else {
+ /* stop processing further cqe as this cqe is used for
+ * triggering cq event on buddy cq of RQ.
+ * When QP is destroyed, this cqe will be removed
+ * from the cq's hardware q.
+ */
+ *polled = false;
+ *stop = true;
+ expand = false;
+ }
+ } else if (is_hw_sq_empty(qp)) {
+ /* Do nothing */
+ expand = false;
+ *polled = false;
+ *stop = false;
+ } else {
+ *polled = true;
+ expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
+ }
+ return expand;
+}
+
+static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
+ struct ocrdma_cqe *cqe,
+ struct ib_wc *ibwc, bool *polled)
+{
+ bool expand = false;
+ int tail = qp->sq.tail;
+ u32 wqe_idx;
+
+ if (!qp->wqe_wr_id_tbl[tail].signaled) {
+ *polled = false; /* WC cannot be consumed yet */
+ } else {
+ ibwc->status = IB_WC_SUCCESS;
+ ibwc->wc_flags = 0;
+ ibwc->qp = &qp->ibqp;
+ ocrdma_update_wc(qp, ibwc, tail);
+ *polled = true;
+ }
+ wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
+ OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
+ if (tail != wqe_idx)
+ expand = true; /* Coalesced CQE can't be consumed yet */
+
+ ocrdma_hwq_inc_tail(&qp->sq);
+ return expand;
+}
+
+static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
+ struct ib_wc *ibwc, bool *polled, bool *stop)
+{
+ int status;
+ bool expand;
+
+ status = (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+
+ if (status == OCRDMA_CQE_SUCCESS)
+ expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
+ else
+ expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
+ return expand;
+}
+
+static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
+ struct ocrdma_cqe *cqe)
+{
+ int status;
+ u16 hdr_type = 0;
+
+ status = (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
+ ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_SRCQP_MASK;
+ ibwc->pkey_index = 0;
+ ibwc->wc_flags = IB_WC_GRH;
+ ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
+ OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
+ OCRDMA_CQE_UD_XFER_LEN_MASK;
+
+ if (ocrdma_is_udp_encap_supported(dev)) {
+ hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
+ OCRDMA_CQE_UD_L3TYPE_SHIFT) &
+ OCRDMA_CQE_UD_L3TYPE_MASK;
+ ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+ ibwc->network_hdr_type = hdr_type;
+ }
+
+ return status;
+}
+
+static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
+ struct ocrdma_cqe *cqe,
+ struct ocrdma_qp *qp)
+{
+ unsigned long flags;
+ struct ocrdma_srq *srq;
+ u32 wqe_idx;
+
+ srq = get_ocrdma_srq(qp->ibqp.srq);
+ wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
+ OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
+ BUG_ON(wqe_idx < 1);
+
+ ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
+ spin_lock_irqsave(&srq->q_lock, flags);
+ ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
+ spin_unlock_irqrestore(&srq->q_lock, flags);
+ ocrdma_hwq_inc_tail(&srq->rq);
+}
+
+static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
+ struct ib_wc *ibwc, bool *polled, bool *stop,
+ int status)
+{
+ bool expand;
+ struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+
+ if (status < OCRDMA_MAX_CQE_ERR)
+ atomic_inc(&dev->cqe_err_stats[status]);
+
+ /* when hw_rq is empty, but wq is not empty, so continue
+ * to keep the cqe to get the cq event again.
+ */
+ if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
+ if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
+ *polled = true;
+ status = OCRDMA_CQE_WR_FLUSH_ERR;
+ expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
+ } else {
+ *polled = false;
+ *stop = true;
+ expand = false;
+ }
+ } else if (is_hw_rq_empty(qp)) {
+ /* Do nothing */
+ expand = false;
+ *polled = false;
+ *stop = false;
+ } else {
+ *polled = true;
+ expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
+ }
+ return expand;
+}
+
+static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
+ struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
+{
+ struct ocrdma_dev *dev;
+
+ dev = get_ocrdma_dev(qp->ibqp.device);
+ ibwc->opcode = IB_WC_RECV;
+ ibwc->qp = &qp->ibqp;
+ ibwc->status = IB_WC_SUCCESS;
+
+ if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
+ ocrdma_update_ud_rcqe(dev, ibwc, cqe);
+ else
+ ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
+
+ if (is_cqe_imm(cqe)) {
+ ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
+ ibwc->wc_flags |= IB_WC_WITH_IMM;
+ } else if (is_cqe_wr_imm(cqe)) {
+ ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
+ ibwc->wc_flags |= IB_WC_WITH_IMM;
+ } else if (is_cqe_invalidated(cqe)) {
+ ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
+ ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+ if (qp->ibqp.srq) {
+ ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
+ } else {
+ ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
+ ocrdma_hwq_inc_tail(&qp->rq);
+ }
+}
+
+static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
+ struct ib_wc *ibwc, bool *polled, bool *stop)
+{
+ int status;
+ bool expand = false;
+
+ ibwc->wc_flags = 0;
+ if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
+ status = (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_UD_STATUS_MASK) >>
+ OCRDMA_CQE_UD_STATUS_SHIFT;
+ } else {
+ status = (le32_to_cpu(cqe->flags_status_srcqpn) &
+ OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+ }
+
+ if (status == OCRDMA_CQE_SUCCESS) {
+ *polled = true;
+ ocrdma_poll_success_rcqe(qp, cqe, ibwc);
+ } else {
+ expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
+ status);
+ }
+ return expand;
+}
+
+static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
+ u16 cur_getp)
+{
+ if (cq->phase_change) {
+ if (cur_getp == 0)
+ cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
+ } else {
+ /* clear valid bit */
+ cqe->flags_status_srcqpn = 0;
+ }
+}
+
+static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
+ struct ib_wc *ibwc)
+{
+ u16 qpn = 0;
+ int i = 0;
+ bool expand = false;
+ int polled_hw_cqes = 0;
+ struct ocrdma_qp *qp = NULL;
+ struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
+ struct ocrdma_cqe *cqe;
+ u16 cur_getp; bool polled = false; bool stop = false;
+
+ cur_getp = cq->getp;
+ while (num_entries) {
+ cqe = cq->va + cur_getp;
+ /* check whether valid cqe or not */
+ if (!is_cqe_valid(cq, cqe))
+ break;
+ qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
+ /* ignore discarded cqe */
+ if (qpn == 0)
+ goto skip_cqe;
+ qp = dev->qp_tbl[qpn];
+ BUG_ON(qp == NULL);
+
+ if (is_cqe_for_sq(cqe)) {
+ expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
+ &stop);
+ } else {
+ expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
+ &stop);
+ }
+ if (expand)
+ goto expand_cqe;
+ if (stop)
+ goto stop_cqe;
+ /* clear qpn to avoid duplicate processing by discard_cqe() */
+ cqe->cmn.qpn = 0;
+skip_cqe:
+ polled_hw_cqes += 1;
+ cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
+ ocrdma_change_cq_phase(cq, cqe, cur_getp);
+expand_cqe:
+ if (polled) {
+ num_entries -= 1;
+ i += 1;
+ ibwc = ibwc + 1;
+ polled = false;
+ }
+ }
+stop_cqe:
+ cq->getp = cur_getp;
+
+ if (polled_hw_cqes)
+ ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
+
+ return i;
+}
+
+/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
+static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
+ struct ocrdma_qp *qp, struct ib_wc *ibwc)
+{
+ int err_cqes = 0;
+
+ while (num_entries) {
+ if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
+ break;
+ if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
+ ocrdma_update_wc(qp, ibwc, qp->sq.tail);
+ ocrdma_hwq_inc_tail(&qp->sq);
+ } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
+ ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
+ ocrdma_hwq_inc_tail(&qp->rq);
+ } else {
+ return err_cqes;
+ }
+ ibwc->byte_len = 0;
+ ibwc->status = IB_WC_WR_FLUSH_ERR;
+ ibwc = ibwc + 1;
+ err_cqes += 1;
+ num_entries -= 1;
+ }
+ return err_cqes;
+}
+
+int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ int cqes_to_poll = num_entries;
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
+ int num_os_cqe = 0, err_cqes = 0;
+ struct ocrdma_qp *qp;
+ unsigned long flags;
+
+ /* poll cqes from adapter CQ */
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ cqes_to_poll -= num_os_cqe;
+
+ if (cqes_to_poll) {
+ wc = wc + num_os_cqe;
+ /* adapter returns single error cqe when qp moves to
+ * error state. So insert error cqes with wc_status as
+ * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
+ * respectively which uses this CQ.
+ */
+ spin_lock_irqsave(&dev->flush_q_lock, flags);
+ list_for_each_entry(qp, &cq->sq_head, sq_entry) {
+ if (cqes_to_poll == 0)
+ break;
+ err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
+ cqes_to_poll -= err_cqes;
+ num_os_cqe += err_cqes;
+ wc = wc + err_cqes;
+ }
+ spin_unlock_irqrestore(&dev->flush_q_lock, flags);
+ }
+ return num_os_cqe;
+}
+
+int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
+{
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
+ u16 cq_id;
+ unsigned long flags;
+ bool arm_needed = false, sol_needed = false;
+
+ cq_id = cq->id;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
+ arm_needed = true;
+ if (cq_flags & IB_CQ_SOLICITED)
+ sol_needed = true;
+
+ ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return 0;
+}
+
+struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ int status;
+ struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ if (max_num_sg > dev->attr.max_pages_per_frmr)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
+ if (!mr->pages) {
+ status = -ENOMEM;
+ goto pl_err;
+ }
+
+ status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
+ if (status)
+ goto pbl_err;
+ mr->hwmr.fr_mr = 1;
+ mr->hwmr.remote_rd = 0;
+ mr->hwmr.remote_wr = 0;
+ mr->hwmr.local_rd = 0;
+ mr->hwmr.local_wr = 0;
+ mr->hwmr.mw_bind = 0;
+ status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
+ if (status)
+ goto pbl_err;
+ status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
+ if (status)
+ goto mbx_err;
+ mr->ibmr.rkey = mr->hwmr.lkey;
+ mr->ibmr.lkey = mr->hwmr.lkey;
+ dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
+ (unsigned long) mr;
+ return &mr->ibmr;
+mbx_err:
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+pbl_err:
+ kfree(mr->pages);
+pl_err:
+ kfree(mr);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
+
+ if (unlikely(mr->npages == mr->hwmr.num_pbes))
+ return -ENOMEM;
+
+ mr->pages[mr->npages++] = addr;
+
+ return 0;
+}
+
+int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
+
+ mr->npages = 0;
+
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
new file mode 100644
index 000000000..f860b7fce
--- /dev/null
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -0,0 +1,107 @@
+/* This file is part of the Emulex RoCE Device Driver for
+ * RoCE (RDMA over Converged Ethernet) adapters.
+ * Copyright (C) 2012-2015 Emulex. All rights reserved.
+ * EMULEX and SLI are trademarks of Emulex.
+ * www.emulex.com
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License (GPL) Version 2, available from the file COPYING in the main
+ * directory of this source tree, or the BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * linux-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+#ifndef __OCRDMA_VERBS_H__
+#define __OCRDMA_VERBS_H__
+
+int ocrdma_post_send(struct ib_qp *, const struct ib_send_wr *,
+ const struct ib_send_wr **bad_wr);
+int ocrdma_post_recv(struct ib_qp *, const struct ib_recv_wr *,
+ const struct ib_recv_wr **bad_wr);
+
+int ocrdma_poll_cq(struct ib_cq *, int num_entries, struct ib_wc *wc);
+int ocrdma_arm_cq(struct ib_cq *, enum ib_cq_notify_flags flags);
+
+int ocrdma_query_device(struct ib_device *, struct ib_device_attr *props,
+ struct ib_udata *uhw);
+int ocrdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
+
+enum rdma_protocol_type
+ocrdma_query_protocol(struct ib_device *device, u32 port_num);
+
+int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
+
+int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
+void ocrdma_dealloc_ucontext(struct ib_ucontext *uctx);
+
+int ocrdma_mmap(struct ib_ucontext *, struct vm_area_struct *vma);
+
+int ocrdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+int ocrdma_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
+
+int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata);
+int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
+int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+
+int ocrdma_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
+ struct ib_udata *udata);
+int _ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
+ int attr_mask);
+int ocrdma_modify_qp(struct ib_qp *, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int ocrdma_query_qp(struct ib_qp *,
+ struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *);
+int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
+
+int ocrdma_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attr,
+ struct ib_udata *udata);
+int ocrdma_modify_srq(struct ib_srq *, struct ib_srq_attr *,
+ enum ib_srq_attr_mask, struct ib_udata *);
+int ocrdma_query_srq(struct ib_srq *, struct ib_srq_attr *);
+int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata);
+int ocrdma_post_srq_recv(struct ib_srq *, const struct ib_recv_wr *,
+ const struct ib_recv_wr **bad_recv_wr);
+
+int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
+struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *, int acc);
+struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
+ u64 virt, int acc, struct ib_udata *);
+struct ib_mr *ocrdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+
+#endif /* __OCRDMA_VERBS_H__ */