summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/irdma
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/infiniband/hw/irdma
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/infiniband/hw/irdma')
-rw-r--r--drivers/infiniband/hw/irdma/Kconfig12
-rw-r--r--drivers/infiniband/hw/irdma/Makefile27
-rw-r--r--drivers/infiniband/hw/irdma/cm.c4432
-rw-r--r--drivers/infiniband/hw/irdma/cm.h416
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c5489
-rw-r--r--drivers/infiniband/hw/irdma/defs.h1157
-rw-r--r--drivers/infiniband/hw/irdma/hmc.c697
-rw-r--r--drivers/infiniband/hw/irdma/hmc.h169
-rw-r--r--drivers/infiniband/hw/irdma/hw.c2751
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c259
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.h160
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c217
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c202
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.h72
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h156
-rw-r--r--drivers/infiniband/hw/irdma/main.c369
-rw-r--r--drivers/infiniband/hw/irdma/main.h557
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h82
-rw-r--r--drivers/infiniband/hw/irdma/pble.c509
-rw-r--r--drivers/infiniband/hw/irdma/pble.h132
-rw-r--r--drivers/infiniband/hw/irdma/protos.h100
-rw-r--r--drivers/infiniband/hw/irdma/puda.c1739
-rw-r--r--drivers/infiniband/hw/irdma/puda.h185
-rw-r--r--drivers/infiniband/hw/irdma/trace.c112
-rw-r--r--drivers/infiniband/hw/irdma/trace.h3
-rw-r--r--drivers/infiniband/hw/irdma/trace_cm.h460
-rw-r--r--drivers/infiniband/hw/irdma/type.h1494
-rw-r--r--drivers/infiniband/hw/irdma/uda.c265
-rw-r--r--drivers/infiniband/hw/irdma/uda.h87
-rw-r--r--drivers/infiniband/hw/irdma/uda_d.h128
-rw-r--r--drivers/infiniband/hw/irdma/uk.c1646
-rw-r--r--drivers/infiniband/hw/irdma/user.h413
-rw-r--r--drivers/infiniband/hw/irdma/utils.c2545
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c4728
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h300
-rw-r--r--drivers/infiniband/hw/irdma/ws.c406
-rw-r--r--drivers/infiniband/hw/irdma/ws.h41
37 files changed, 32517 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/irdma/Kconfig b/drivers/infiniband/hw/irdma/Kconfig
new file mode 100644
index 0000000000..b6f9c41bca
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_IRDMA
+ tristate "Intel(R) Ethernet Protocol Driver for RDMA"
+ depends on INET
+ depends on IPV6 || !IPV6
+ depends on PCI
+ depends on ICE && I40E
+ select GENERIC_ALLOCATOR
+ select AUXILIARY_BUS
+ help
+ This is an Intel(R) Ethernet Protocol Driver for RDMA driver
+ that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.
diff --git a/drivers/infiniband/hw/irdma/Makefile b/drivers/infiniband/hw/irdma/Makefile
new file mode 100644
index 0000000000..48c3854235
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+# Copyright (c) 2019, Intel Corporation.
+
+#
+# Makefile for the Intel(R) Ethernet Connection RDMA Linux Driver
+#
+
+obj-$(CONFIG_INFINIBAND_IRDMA) += irdma.o
+
+irdma-objs := cm.o \
+ ctrl.o \
+ hmc.o \
+ hw.o \
+ i40iw_hw.o \
+ i40iw_if.o \
+ icrdma_hw.o \
+ main.o \
+ pble.o \
+ puda.o \
+ trace.o \
+ uda.o \
+ uk.o \
+ utils.o \
+ verbs.o \
+ ws.o \
+
+CFLAGS_trace.o = -I$(src)
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
new file mode 100644
index 0000000000..42d1e97710
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -0,0 +1,4432 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+#include "trace.h"
+
+static void irdma_cm_post_event(struct irdma_cm_event *event);
+static void irdma_disconnect_worker(struct work_struct *work);
+
+/**
+ * irdma_free_sqbuf - put back puda buffer if refcount is 0
+ * @vsi: The VSI structure of the device
+ * @bufp: puda buffer to free
+ */
+void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp)
+{
+ struct irdma_puda_buf *buf = bufp;
+ struct irdma_puda_rsrc *ilq = vsi->ilq;
+
+ if (refcount_dec_and_test(&buf->refcount))
+ irdma_puda_ret_bufpool(ilq, buf);
+}
+
+/**
+ * irdma_record_ird_ord - Record IRD/ORD passed in
+ * @cm_node: connection's node
+ * @conn_ird: connection IRD
+ * @conn_ord: connection ORD
+ */
+static void irdma_record_ird_ord(struct irdma_cm_node *cm_node, u32 conn_ird,
+ u32 conn_ord)
+{
+ if (conn_ird > cm_node->dev->hw_attrs.max_hw_ird)
+ conn_ird = cm_node->dev->hw_attrs.max_hw_ird;
+
+ if (conn_ord > cm_node->dev->hw_attrs.max_hw_ord)
+ conn_ord = cm_node->dev->hw_attrs.max_hw_ord;
+ else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
+ conn_ord = 1;
+ cm_node->ird_size = conn_ird;
+ cm_node->ord_size = conn_ord;
+}
+
+/**
+ * irdma_copy_ip_ntohl - copy IP address from network to host
+ * @dst: IP address in host order
+ * @src: IP address in network order (big endian)
+ */
+void irdma_copy_ip_ntohl(u32 *dst, __be32 *src)
+{
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst++ = ntohl(*src++);
+ *dst = ntohl(*src);
+}
+
+/**
+ * irdma_copy_ip_htonl - copy IP address from host to network order
+ * @dst: IP address in network order (big endian)
+ * @src: IP address in host order
+ */
+void irdma_copy_ip_htonl(__be32 *dst, u32 *src)
+{
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst++ = htonl(*src++);
+ *dst = htonl(*src);
+}
+
+/**
+ * irdma_get_addr_info
+ * @cm_node: contains ip/tcp info
+ * @cm_info: to get a copy of the cm_node ip/tcp info
+ */
+static void irdma_get_addr_info(struct irdma_cm_node *cm_node,
+ struct irdma_cm_info *cm_info)
+{
+ memset(cm_info, 0, sizeof(*cm_info));
+ cm_info->ipv4 = cm_node->ipv4;
+ cm_info->vlan_id = cm_node->vlan_id;
+ memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
+ memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
+ cm_info->loc_port = cm_node->loc_port;
+ cm_info->rem_port = cm_node->rem_port;
+}
+
+/**
+ * irdma_fill_sockaddr4 - fill in addr info for IPv4 connection
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void irdma_fill_sockaddr4(struct irdma_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
+
+ laddr->sin_family = AF_INET;
+ raddr->sin_family = AF_INET;
+
+ laddr->sin_port = htons(cm_node->loc_port);
+ raddr->sin_port = htons(cm_node->rem_port);
+
+ laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
+ raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
+}
+
+/**
+ * irdma_fill_sockaddr6 - fill in addr info for IPv6 connection
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void irdma_fill_sockaddr6(struct irdma_cm_node *cm_node,
+ struct iw_cm_event *event)
+{
+ struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
+ struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
+
+ laddr6->sin6_family = AF_INET6;
+ raddr6->sin6_family = AF_INET6;
+
+ laddr6->sin6_port = htons(cm_node->loc_port);
+ raddr6->sin6_port = htons(cm_node->rem_port);
+
+ irdma_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
+ cm_node->loc_addr);
+ irdma_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
+ cm_node->rem_addr);
+}
+
+/**
+ * irdma_get_cmevent_info - for cm event upcall
+ * @cm_node: connection's node
+ * @cm_id: upper layers cm struct for the event
+ * @event: upper layer's cm event
+ */
+static inline void irdma_get_cmevent_info(struct irdma_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ struct iw_cm_event *event)
+{
+ memcpy(&event->local_addr, &cm_id->m_local_addr,
+ sizeof(event->local_addr));
+ memcpy(&event->remote_addr, &cm_id->m_remote_addr,
+ sizeof(event->remote_addr));
+ if (cm_node) {
+ event->private_data = cm_node->pdata_buf;
+ event->private_data_len = (u8)cm_node->pdata.size;
+ event->ird = cm_node->ird_size;
+ event->ord = cm_node->ord_size;
+ }
+}
+
+/**
+ * irdma_send_cm_event - upcall cm's event handler
+ * @cm_node: connection's node
+ * @cm_id: upper layer's cm info struct
+ * @type: Event type to indicate
+ * @status: status for the event type
+ */
+static int irdma_send_cm_event(struct irdma_cm_node *cm_node,
+ struct iw_cm_id *cm_id,
+ enum iw_cm_event_type type, int status)
+{
+ struct iw_cm_event event = {};
+
+ event.event = type;
+ event.status = status;
+ trace_irdma_send_cm_event(cm_node, cm_id, type, status,
+ __builtin_return_address(0));
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node %p cm_id=%p state=%d accel=%d event_type=%d status=%d\n",
+ cm_node, cm_id, cm_node->accelerated, cm_node->state, type,
+ status);
+
+ switch (type) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ if (cm_node->ipv4)
+ irdma_fill_sockaddr4(cm_node, &event);
+ else
+ irdma_fill_sockaddr6(cm_node, &event);
+ event.provider_data = cm_node;
+ event.private_data = cm_node->pdata_buf;
+ event.private_data_len = (u8)cm_node->pdata.size;
+ event.ird = cm_node->ird_size;
+ break;
+ case IW_CM_EVENT_CONNECT_REPLY:
+ irdma_get_cmevent_info(cm_node, cm_id, &event);
+ break;
+ case IW_CM_EVENT_ESTABLISHED:
+ event.ird = cm_node->ird_size;
+ event.ord = cm_node->ord_size;
+ break;
+ case IW_CM_EVENT_DISCONNECT:
+ case IW_CM_EVENT_CLOSE:
+ /* Wait if we are in RTS but havent issued the iwcm event upcall */
+ if (!cm_node->accelerated)
+ wait_for_completion(&cm_node->establish_comp);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return cm_id->event_handler(cm_id, &event);
+}
+
+/**
+ * irdma_timer_list_prep - add connection nodes to a list to perform timer tasks
+ * @cm_core: cm's core
+ * @timer_list: a timer list to which cm_node will be selected
+ */
+static void irdma_timer_list_prep(struct irdma_cm_core *cm_core,
+ struct list_head *timer_list)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if ((cm_node->close_entry || cm_node->send_entry) &&
+ refcount_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->timer_entry, timer_list);
+ }
+}
+
+/**
+ * irdma_create_event - create cm event
+ * @cm_node: connection's node
+ * @type: Event type to generate
+ */
+static struct irdma_cm_event *irdma_create_event(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type)
+{
+ struct irdma_cm_event *event;
+
+ if (!cm_node->cm_id)
+ return NULL;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+
+ if (!event)
+ return NULL;
+
+ event->type = type;
+ event->cm_node = cm_node;
+ memcpy(event->cm_info.rem_addr, cm_node->rem_addr,
+ sizeof(event->cm_info.rem_addr));
+ memcpy(event->cm_info.loc_addr, cm_node->loc_addr,
+ sizeof(event->cm_info.loc_addr));
+ event->cm_info.rem_port = cm_node->rem_port;
+ event->cm_info.loc_port = cm_node->loc_port;
+ event->cm_info.cm_id = cm_node->cm_id;
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: node=%p event=%p type=%u dst=%pI4 src=%pI4\n", cm_node,
+ event, type, event->cm_info.loc_addr,
+ event->cm_info.rem_addr);
+ trace_irdma_create_event(cm_node, type, __builtin_return_address(0));
+ irdma_cm_post_event(event);
+
+ return event;
+}
+
+/**
+ * irdma_free_retrans_entry - free send entry
+ * @cm_node: connection's node
+ */
+static void irdma_free_retrans_entry(struct irdma_cm_node *cm_node)
+{
+ struct irdma_device *iwdev = cm_node->iwdev;
+ struct irdma_timer_entry *send_entry;
+
+ send_entry = cm_node->send_entry;
+ if (!send_entry)
+ return;
+
+ cm_node->send_entry = NULL;
+ irdma_free_sqbuf(&iwdev->vsi, send_entry->sqbuf);
+ kfree(send_entry);
+ refcount_dec(&cm_node->refcnt);
+}
+
+/**
+ * irdma_cleanup_retrans_entry - free send entry with lock
+ * @cm_node: connection's node
+ */
+static void irdma_cleanup_retrans_entry(struct irdma_cm_node *cm_node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ irdma_free_retrans_entry(cm_node);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+}
+
+/**
+ * irdma_form_ah_cm_frame - get a free packet and build frame with address handle
+ * @cm_node: connection's node ionfo to use in frame
+ * @options: pointer to options info
+ * @hdr: pointer mpa header
+ * @pdata: pointer to private data
+ * @flags: indicates FIN or ACK
+ */
+static struct irdma_puda_buf *irdma_form_ah_cm_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ u8 *buf;
+ struct tcphdr *tcph;
+ u16 pktsize;
+ u32 opts_len = 0;
+ u32 pd_len = 0;
+ u32 hdr_len = 0;
+
+ if (!cm_node->ah || !cm_node->ah->ah_info.ah_valid) {
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: AH invalid\n");
+ return NULL;
+ }
+
+ sqbuf = irdma_puda_get_bufpool(vsi->ilq);
+ if (!sqbuf) {
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: SQ buf NULL\n");
+ return NULL;
+ }
+
+ sqbuf->ah_id = cm_node->ah->ah_info.ah_idx;
+ buf = sqbuf->mem.va;
+ if (options)
+ opts_len = (u32)options->size;
+
+ if (hdr)
+ hdr_len = hdr->size;
+
+ if (pdata)
+ pd_len = pdata->size;
+
+ pktsize = sizeof(*tcph) + opts_len + hdr_len + pd_len;
+
+ memset(buf, 0, sizeof(*tcph));
+
+ sqbuf->totallen = pktsize;
+ sqbuf->tcphlen = sizeof(*tcph) + opts_len;
+ sqbuf->scratch = cm_node;
+
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
+ tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->ack = 1;
+ } else {
+ tcph->ack_seq = 0;
+ }
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->syn = 1;
+ } else {
+ cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
+ }
+
+ if (flags & SET_FIN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->fin = 1;
+ }
+
+ if (flags & SET_RST)
+ tcph->rst = 1;
+
+ tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
+ sqbuf->tcphlen = tcph->doff << 2;
+ tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->urg_ptr = 0;
+
+ if (opts_len) {
+ memcpy(buf, options->addr, opts_len);
+ buf += opts_len;
+ }
+
+ if (hdr_len) {
+ memcpy(buf, hdr->addr, hdr_len);
+ buf += hdr_len;
+ }
+
+ if (pdata && pdata->addr)
+ memcpy(buf, pdata->addr, pdata->size);
+
+ refcount_set(&sqbuf->refcount, 1);
+
+ print_hex_dump_debug("ILQ: TRANSMIT ILQ BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, sqbuf->mem.va, sqbuf->totallen, false);
+
+ return sqbuf;
+}
+
+/**
+ * irdma_form_uda_cm_frame - get a free packet and build frame full tcpip packet
+ * @cm_node: connection's node ionfo to use in frame
+ * @options: pointer to options info
+ * @hdr: pointer mpa header
+ * @pdata: pointer to private data
+ * @flags: indicates FIN or ACK
+ */
+static struct irdma_puda_buf *irdma_form_uda_cm_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ u8 *buf;
+
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct ethhdr *ethh;
+ u16 pktsize;
+ u16 eth_hlen = ETH_HLEN;
+ u32 opts_len = 0;
+ u32 pd_len = 0;
+ u32 hdr_len = 0;
+
+ u16 vtag;
+
+ sqbuf = irdma_puda_get_bufpool(vsi->ilq);
+ if (!sqbuf)
+ return NULL;
+
+ buf = sqbuf->mem.va;
+
+ if (options)
+ opts_len = (u32)options->size;
+
+ if (hdr)
+ hdr_len = hdr->size;
+
+ if (pdata)
+ pd_len = pdata->size;
+
+ if (cm_node->vlan_id < VLAN_N_VID)
+ eth_hlen += 4;
+
+ if (cm_node->ipv4)
+ pktsize = sizeof(*iph) + sizeof(*tcph);
+ else
+ pktsize = sizeof(*ip6h) + sizeof(*tcph);
+ pktsize += opts_len + hdr_len + pd_len;
+
+ memset(buf, 0, eth_hlen + pktsize);
+
+ sqbuf->totallen = pktsize + eth_hlen;
+ sqbuf->maclen = eth_hlen;
+ sqbuf->tcphlen = sizeof(*tcph) + opts_len;
+ sqbuf->scratch = cm_node;
+
+ ethh = (struct ethhdr *)buf;
+ buf += eth_hlen;
+
+ if (cm_node->do_lpb)
+ sqbuf->do_lpb = true;
+
+ if (cm_node->ipv4) {
+ sqbuf->ipv4 = true;
+
+ iph = (struct iphdr *)buf;
+ buf += sizeof(*iph);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+ ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ((struct vlan_ethhdr *)ethh)->h_vlan_proto =
+ htons(ETH_P_8021Q);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) |
+ cm_node->vlan_id;
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
+
+ ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto =
+ htons(ETH_P_IP);
+ } else {
+ ethh->h_proto = htons(ETH_P_IP);
+ }
+
+ iph->version = IPVERSION;
+ iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
+ iph->tos = cm_node->tos;
+ iph->tot_len = htons(pktsize);
+ iph->id = htons(++cm_node->tcp_cntxt.loc_id);
+
+ iph->frag_off = htons(0x4000);
+ iph->ttl = 0x40;
+ iph->protocol = IPPROTO_TCP;
+ iph->saddr = htonl(cm_node->loc_addr[0]);
+ iph->daddr = htonl(cm_node->rem_addr[0]);
+ } else {
+ sqbuf->ipv4 = false;
+ ip6h = (struct ipv6hdr *)buf;
+ buf += sizeof(*ip6h);
+ tcph = (struct tcphdr *)buf;
+ buf += sizeof(*tcph);
+
+ ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+ ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ((struct vlan_ethhdr *)ethh)->h_vlan_proto =
+ htons(ETH_P_8021Q);
+ vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) |
+ cm_node->vlan_id;
+ ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
+ ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto =
+ htons(ETH_P_IPV6);
+ } else {
+ ethh->h_proto = htons(ETH_P_IPV6);
+ }
+ ip6h->version = 6;
+ ip6h->priority = cm_node->tos >> 4;
+ ip6h->flow_lbl[0] = cm_node->tos << 4;
+ ip6h->flow_lbl[1] = 0;
+ ip6h->flow_lbl[2] = 0;
+ ip6h->payload_len = htons(pktsize - sizeof(*ip6h));
+ ip6h->nexthdr = 6;
+ ip6h->hop_limit = 128;
+ irdma_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
+ cm_node->loc_addr);
+ irdma_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
+ cm_node->rem_addr);
+ }
+
+ tcph->source = htons(cm_node->loc_port);
+ tcph->dest = htons(cm_node->rem_port);
+ tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+ if (flags & SET_ACK) {
+ cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+ tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
+ tcph->ack = 1;
+ } else {
+ tcph->ack_seq = 0;
+ }
+
+ if (flags & SET_SYN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->syn = 1;
+ } else {
+ cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
+ }
+
+ if (flags & SET_FIN) {
+ cm_node->tcp_cntxt.loc_seq_num++;
+ tcph->fin = 1;
+ }
+
+ if (flags & SET_RST)
+ tcph->rst = 1;
+
+ tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
+ sqbuf->tcphlen = tcph->doff << 2;
+ tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
+ tcph->urg_ptr = 0;
+
+ if (opts_len) {
+ memcpy(buf, options->addr, opts_len);
+ buf += opts_len;
+ }
+
+ if (hdr_len) {
+ memcpy(buf, hdr->addr, hdr_len);
+ buf += hdr_len;
+ }
+
+ if (pdata && pdata->addr)
+ memcpy(buf, pdata->addr, pdata->size);
+
+ refcount_set(&sqbuf->refcount, 1);
+
+ print_hex_dump_debug("ILQ: TRANSMIT ILQ BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, sqbuf->mem.va, sqbuf->totallen, false);
+ return sqbuf;
+}
+
+/**
+ * irdma_send_reset - Send RST packet
+ * @cm_node: connection's node
+ */
+int irdma_send_reset(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+ int flags = SET_RST | SET_ACK;
+
+ trace_irdma_send_reset(cm_node, 0, __builtin_return_address(0));
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ flags);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: caller: %pS cm_node %p cm_id=%p accel=%d state=%d rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
+ __builtin_return_address(0), cm_node, cm_node->cm_id,
+ cm_node->accelerated, cm_node->state, cm_node->rem_port,
+ cm_node->loc_port, cm_node->rem_addr, cm_node->loc_addr);
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 0,
+ 1);
+}
+
+/**
+ * irdma_active_open_err - send event for active side cm error
+ * @cm_node: connection's node
+ * @reset: Flag to send reset or not
+ */
+static void irdma_active_open_err(struct irdma_cm_node *cm_node, bool reset)
+{
+ trace_irdma_active_open_err(cm_node, reset,
+ __builtin_return_address(0));
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_connect_errs++;
+ if (reset) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node=%p state=%d\n", cm_node,
+ cm_node->state);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ }
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+}
+
+/**
+ * irdma_passive_open_err - handle passive side cm error
+ * @cm_node: connection's node
+ * @reset: send reset or just free cm_node
+ */
+static void irdma_passive_open_err(struct irdma_cm_node *cm_node, bool reset)
+{
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->cm_core->stats_passive_errs++;
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: cm_node=%p state =%d\n",
+ cm_node, cm_node->state);
+ trace_irdma_passive_open_err(cm_node, reset,
+ __builtin_return_address(0));
+ if (reset)
+ irdma_send_reset(cm_node);
+ else
+ irdma_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * irdma_event_connect_error - to create connect error event
+ * @event: cm information for connect event
+ */
+static void irdma_event_connect_error(struct irdma_cm_event *event)
+{
+ struct irdma_qp *iwqp;
+ struct iw_cm_id *cm_id;
+
+ cm_id = event->cm_node->cm_id;
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+
+ if (!iwqp || !iwqp->iwdev)
+ return;
+
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+ -ECONNRESET);
+ irdma_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * irdma_process_options - process options from TCP header
+ * @cm_node: connection's node
+ * @optionsloc: point to start of options
+ * @optionsize: size of all options
+ * @syn_pkt: flag if syn packet
+ */
+static int irdma_process_options(struct irdma_cm_node *cm_node, u8 *optionsloc,
+ u32 optionsize, u32 syn_pkt)
+{
+ u32 tmp;
+ u32 offset = 0;
+ union all_known_options *all_options;
+ char got_mss_option = 0;
+
+ while (offset < optionsize) {
+ all_options = (union all_known_options *)(optionsloc + offset);
+ switch (all_options->base.optionnum) {
+ case OPTION_NUM_EOL:
+ offset = optionsize;
+ break;
+ case OPTION_NUM_NONE:
+ offset += 1;
+ continue;
+ case OPTION_NUM_MSS:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: MSS Length: %d Offset: %d Size: %d\n",
+ all_options->mss.len, offset, optionsize);
+ got_mss_option = 1;
+ if (all_options->mss.len != 4)
+ return -EINVAL;
+ tmp = ntohs(all_options->mss.mss);
+ if ((cm_node->ipv4 &&
+ (tmp + IRDMA_MTU_TO_MSS_IPV4) < IRDMA_MIN_MTU_IPV4) ||
+ (!cm_node->ipv4 &&
+ (tmp + IRDMA_MTU_TO_MSS_IPV6) < IRDMA_MIN_MTU_IPV6))
+ return -EINVAL;
+ if (tmp < cm_node->tcp_cntxt.mss)
+ cm_node->tcp_cntxt.mss = tmp;
+ break;
+ case OPTION_NUM_WINDOW_SCALE:
+ cm_node->tcp_cntxt.snd_wscale =
+ all_options->windowscale.shiftcount;
+ break;
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Unsupported TCP Option: %x\n",
+ all_options->base.optionnum);
+ break;
+ }
+ offset += all_options->base.len;
+ }
+ if (!got_mss_option && syn_pkt)
+ cm_node->tcp_cntxt.mss = IRDMA_CM_DEFAULT_MSS;
+
+ return 0;
+}
+
+/**
+ * irdma_handle_tcp_options - setup TCP context info after parsing TCP options
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ * @optionsize: size of options rcvd
+ * @passive: active or passive flag
+ */
+static int irdma_handle_tcp_options(struct irdma_cm_node *cm_node,
+ struct tcphdr *tcph, int optionsize,
+ int passive)
+{
+ u8 *optionsloc = (u8 *)&tcph[1];
+ int ret;
+
+ if (optionsize) {
+ ret = irdma_process_options(cm_node, optionsloc, optionsize,
+ (u32)tcph->syn);
+ if (ret) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Node %p, Sending Reset\n", cm_node);
+ if (passive)
+ irdma_passive_open_err(cm_node, true);
+ else
+ irdma_active_open_err(cm_node, true);
+ return ret;
+ }
+ }
+
+ cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window)
+ << cm_node->tcp_cntxt.snd_wscale;
+
+ if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
+ cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+
+ return 0;
+}
+
+/**
+ * irdma_build_mpa_v1 - build a MPA V1 frame
+ * @cm_node: connection's node
+ * @start_addr: address where to build frame
+ * @mpa_key: to do read0 or write0
+ */
+static void irdma_build_mpa_v1(struct irdma_cm_node *cm_node, void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v1 *mpa_frame = start_addr;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
+ break;
+ case MPA_KEY_REPLY:
+ memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
+ break;
+ default:
+ break;
+ }
+ mpa_frame->flags = IETF_MPA_FLAGS_CRC;
+ mpa_frame->rev = cm_node->mpa_frame_rev;
+ mpa_frame->priv_data_len = htons(cm_node->pdata.size);
+}
+
+/**
+ * irdma_build_mpa_v2 - build a MPA V2 frame
+ * @cm_node: connection's node
+ * @start_addr: buffer start address
+ * @mpa_key: to do read0 or write0
+ */
+static void irdma_build_mpa_v2(struct irdma_cm_node *cm_node, void *start_addr,
+ u8 mpa_key)
+{
+ struct ietf_mpa_v2 *mpa_frame = start_addr;
+ struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
+ u16 ctrl_ird, ctrl_ord;
+
+ /* initialize the upper 5 bytes of the frame */
+ irdma_build_mpa_v1(cm_node, start_addr, mpa_key);
+ mpa_frame->flags |= IETF_MPA_V2_FLAG;
+ if (cm_node->iwdev->iw_ooo) {
+ mpa_frame->flags |= IETF_MPA_FLAGS_MARKERS;
+ cm_node->rcv_mark_en = true;
+ }
+ mpa_frame->priv_data_len = cpu_to_be16(be16_to_cpu(mpa_frame->priv_data_len) +
+ IETF_RTR_MSG_SIZE);
+
+ /* initialize RTR msg */
+ if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
+ ctrl_ird = IETF_NO_IRD_ORD;
+ ctrl_ord = IETF_NO_IRD_ORD;
+ } else {
+ ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD :
+ cm_node->ird_size;
+ ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
+ IETF_NO_IRD_ORD :
+ cm_node->ord_size;
+ }
+ ctrl_ird |= IETF_PEER_TO_PEER;
+
+ switch (mpa_key) {
+ case MPA_KEY_REQUEST:
+ ctrl_ord |= IETF_RDMA0_WRITE;
+ ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ case MPA_KEY_REPLY:
+ switch (cm_node->send_rdma0_op) {
+ case SEND_RDMA_WRITE_ZERO:
+ ctrl_ord |= IETF_RDMA0_WRITE;
+ break;
+ case SEND_RDMA_READ_ZERO:
+ ctrl_ord |= IETF_RDMA0_READ;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ rtr_msg->ctrl_ird = htons(ctrl_ird);
+ rtr_msg->ctrl_ord = htons(ctrl_ord);
+}
+
+/**
+ * irdma_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
+ * @cm_node: connection's node
+ * @mpa: mpa: data buffer
+ * @mpa_key: to do read0 or write0
+ */
+static int irdma_cm_build_mpa_frame(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *mpa, u8 mpa_key)
+{
+ int hdr_len = 0;
+
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V1:
+ hdr_len = sizeof(struct ietf_mpa_v1);
+ irdma_build_mpa_v1(cm_node, mpa->addr, mpa_key);
+ break;
+ case IETF_MPA_V2:
+ hdr_len = sizeof(struct ietf_mpa_v2);
+ irdma_build_mpa_v2(cm_node, mpa->addr, mpa_key);
+ break;
+ default:
+ break;
+ }
+
+ return hdr_len;
+}
+
+/**
+ * irdma_send_mpa_request - active node send mpa request to passive node
+ * @cm_node: connection's node
+ */
+static int irdma_send_mpa_request(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame;
+ cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REQUEST);
+ if (!cm_node->mpa_hdr.size) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: mpa size = %d\n", cm_node->mpa_hdr.size);
+ return -EINVAL;
+ }
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL,
+ &cm_node->mpa_hdr,
+ &cm_node->pdata, SET_ACK);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_send_mpa_reject -
+ * @cm_node: connection's node
+ * @pdata: reject data for connection
+ * @plen: length of reject data
+ */
+static int irdma_send_mpa_reject(struct irdma_cm_node *cm_node,
+ const void *pdata, u8 plen)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_mpa_priv_info priv_info;
+
+ cm_node->mpa_hdr.addr = &cm_node->mpa_v2_frame;
+ cm_node->mpa_hdr.size = irdma_cm_build_mpa_frame(cm_node,
+ &cm_node->mpa_hdr,
+ MPA_KEY_REPLY);
+
+ cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
+ priv_info.addr = pdata;
+ priv_info.size = plen;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL,
+ &cm_node->mpa_hdr, &priv_info,
+ SET_ACK | SET_FIN);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ cm_node->state = IRDMA_CM_STATE_FIN_WAIT1;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_negotiate_mpa_v2_ird_ord - negotiate MPAv2 IRD/ORD
+ * @cm_node: connection's node
+ * @buf: Data pointer
+ */
+static int irdma_negotiate_mpa_v2_ird_ord(struct irdma_cm_node *cm_node,
+ u8 *buf)
+{
+ struct ietf_mpa_v2 *mpa_v2_frame;
+ struct ietf_rtr_msg *rtr_msg;
+ u16 ird_size;
+ u16 ord_size;
+ u16 ctrl_ord;
+ u16 ctrl_ird;
+
+ mpa_v2_frame = (struct ietf_mpa_v2 *)buf;
+ rtr_msg = &mpa_v2_frame->rtr_msg;
+
+ /* parse rtr message */
+ ctrl_ord = ntohs(rtr_msg->ctrl_ord);
+ ctrl_ird = ntohs(rtr_msg->ctrl_ird);
+ ird_size = ctrl_ird & IETF_NO_IRD_ORD;
+ ord_size = ctrl_ord & IETF_NO_IRD_ORD;
+
+ if (!(ctrl_ird & IETF_PEER_TO_PEER))
+ return -EOPNOTSUPP;
+
+ if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
+ cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
+ goto negotiate_done;
+ }
+
+ if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
+ /* responder */
+ if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
+ cm_node->ird_size = 1;
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+ } else {
+ /* initiator */
+ if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
+ /* Remote peer doesn't support RDMA0_READ */
+ return -EOPNOTSUPP;
+
+ if (cm_node->ord_size > ird_size)
+ cm_node->ord_size = ird_size;
+
+ if (cm_node->ird_size < ord_size)
+ /* no resources available */
+ return -EINVAL;
+ }
+
+negotiate_done:
+ if (ctrl_ord & IETF_RDMA0_READ)
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ else if (ctrl_ord & IETF_RDMA0_WRITE)
+ cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
+ else
+ /* Not supported RDMA0 operation */
+ return -EOPNOTSUPP;
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: MPAV2 Negotiated ORD: %d, IRD: %d\n",
+ cm_node->ord_size, cm_node->ird_size);
+ trace_irdma_negotiate_mpa_v2(cm_node);
+ return 0;
+}
+
+/**
+ * irdma_parse_mpa - process an IETF MPA frame
+ * @cm_node: connection's node
+ * @buf: Data pointer
+ * @type: to return accept or reject
+ * @len: Len of mpa buffer
+ */
+static int irdma_parse_mpa(struct irdma_cm_node *cm_node, u8 *buf, u32 *type,
+ u32 len)
+{
+ struct ietf_mpa_v1 *mpa_frame;
+ int mpa_hdr_len, priv_data_len, ret;
+
+ *type = IRDMA_MPA_REQUEST_ACCEPT;
+
+ if (len < sizeof(struct ietf_mpa_v1)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: ietf buffer small (%x)\n", len);
+ return -EINVAL;
+ }
+
+ mpa_frame = (struct ietf_mpa_v1 *)buf;
+ mpa_hdr_len = sizeof(struct ietf_mpa_v1);
+ priv_data_len = ntohs(mpa_frame->priv_data_len);
+
+ if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: private_data too big %d\n", priv_data_len);
+ return -EOVERFLOW;
+ }
+
+ if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: unsupported mpa rev = %d\n", mpa_frame->rev);
+ return -EINVAL;
+ }
+
+ if (mpa_frame->rev > cm_node->mpa_frame_rev) {
+ ibdev_dbg(&cm_node->iwdev->ibdev, "CM: rev %d\n",
+ mpa_frame->rev);
+ return -EINVAL;
+ }
+
+ cm_node->mpa_frame_rev = mpa_frame->rev;
+ if (cm_node->state != IRDMA_CM_STATE_MPAREQ_SENT) {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ,
+ IETF_MPA_KEY_SIZE)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Unexpected MPA Key received\n");
+ return -EINVAL;
+ }
+ } else {
+ if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP,
+ IETF_MPA_KEY_SIZE)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: Unexpected MPA Key received\n");
+ return -EINVAL;
+ }
+ }
+
+ if (priv_data_len + mpa_hdr_len > len) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: ietf buffer len(%x + %x != %x)\n",
+ priv_data_len, mpa_hdr_len, len);
+ return -EOVERFLOW;
+ }
+
+ if (len > IRDMA_MAX_CM_BUF) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: ietf buffer large len = %d\n", len);
+ return -EOVERFLOW;
+ }
+
+ switch (mpa_frame->rev) {
+ case IETF_MPA_V2:
+ mpa_hdr_len += IETF_RTR_MSG_SIZE;
+ ret = irdma_negotiate_mpa_v2_ird_ord(cm_node, buf);
+ if (ret)
+ return ret;
+ break;
+ case IETF_MPA_V1:
+ default:
+ break;
+ }
+
+ memcpy(cm_node->pdata_buf, buf + mpa_hdr_len, priv_data_len);
+ cm_node->pdata.size = priv_data_len;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
+ *type = IRDMA_MPA_REQUEST_REJECT;
+
+ if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
+ cm_node->snd_mark_en = true;
+
+ return 0;
+}
+
+/**
+ * irdma_schedule_cm_timer
+ * @cm_node: connection's node
+ * @sqbuf: buffer to send
+ * @type: if it is send or close
+ * @send_retrans: if rexmits to be done
+ * @close_when_complete: is cm_node to be removed
+ *
+ * note - cm_node needs to be protected before calling this. Encase in:
+ * irdma_rem_ref_cm_node(cm_core, cm_node);
+ * irdma_schedule_cm_timer(...)
+ * refcount_inc(&cm_node->refcnt);
+ */
+int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *sqbuf,
+ enum irdma_timer_type type, int send_retrans,
+ int close_when_complete)
+{
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ struct irdma_timer_entry *new_send;
+ u32 was_timer_set;
+ unsigned long flags;
+
+ new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
+ if (!new_send) {
+ if (type != IRDMA_TIMER_TYPE_CLOSE)
+ irdma_free_sqbuf(vsi, sqbuf);
+ return -ENOMEM;
+ }
+
+ new_send->retrycount = IRDMA_DEFAULT_RETRYS;
+ new_send->retranscount = IRDMA_DEFAULT_RETRANS;
+ new_send->sqbuf = sqbuf;
+ new_send->timetosend = jiffies;
+ new_send->type = type;
+ new_send->send_retrans = send_retrans;
+ new_send->close_when_complete = close_when_complete;
+
+ if (type == IRDMA_TIMER_TYPE_CLOSE) {
+ new_send->timetosend += (HZ / 10);
+ if (cm_node->close_entry) {
+ kfree(new_send);
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: already close entry\n");
+ return -EINVAL;
+ }
+
+ cm_node->close_entry = new_send;
+ } else { /* type == IRDMA_TIMER_TYPE_SEND */
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ cm_node->send_entry = new_send;
+ refcount_inc(&cm_node->refcnt);
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ new_send->timetosend = jiffies + IRDMA_RETRY_TIMEOUT;
+
+ refcount_inc(&sqbuf->refcount);
+ irdma_puda_send_buf(vsi->ilq, sqbuf);
+ if (!send_retrans) {
+ irdma_cleanup_retrans_entry(cm_node);
+ if (close_when_complete)
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+ }
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ was_timer_set = timer_pending(&cm_core->tcp_timer);
+
+ if (!was_timer_set) {
+ cm_core->tcp_timer.expires = new_send->timetosend;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_retrans_expired - Could not rexmit the packet
+ * @cm_node: connection's node
+ */
+static void irdma_retrans_expired(struct irdma_cm_node *cm_node)
+{
+ enum irdma_cm_node_state state = cm_node->state;
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ switch (state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_CLOSING:
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_LAST_ACK:
+ irdma_send_reset(cm_node);
+ break;
+ default:
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+ break;
+ }
+}
+
+/**
+ * irdma_handle_close_entry - for handling retry/timeouts
+ * @cm_node: connection's node
+ * @rem_node: flag for remove cm_node
+ */
+static void irdma_handle_close_entry(struct irdma_cm_node *cm_node,
+ u32 rem_node)
+{
+ struct irdma_timer_entry *close_entry = cm_node->close_entry;
+ struct irdma_qp *iwqp;
+ unsigned long flags;
+
+ if (!close_entry)
+ return;
+ iwqp = (struct irdma_qp *)close_entry->sqbuf;
+ if (iwqp) {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->cm_id) {
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
+ iwqp->hw_iwarp_state = IRDMA_QP_STATE_ERROR;
+ iwqp->last_aeq = IRDMA_AE_RESET_SENT;
+ iwqp->ibqp_state = IB_QPS_ERR;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_cm_disconn(iwqp);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ } else if (rem_node) {
+ /* TIME_WAIT state */
+ irdma_rem_ref_cm_node(cm_node);
+ }
+
+ kfree(close_entry);
+ cm_node->close_entry = NULL;
+}
+
+/**
+ * irdma_cm_timer_tick - system's timer expired callback
+ * @t: Pointer to timer_list
+ */
+static void irdma_cm_timer_tick(struct timer_list *t)
+{
+ unsigned long nexttimeout = jiffies + IRDMA_LONG_TIME;
+ struct irdma_cm_node *cm_node;
+ struct irdma_timer_entry *send_entry, *close_entry;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct irdma_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
+ struct irdma_sc_vsi *vsi;
+ u32 settimer = 0;
+ unsigned long timetosend;
+ unsigned long flags;
+ struct list_head timer_list;
+
+ INIT_LIST_HEAD(&timer_list);
+
+ rcu_read_lock();
+ irdma_timer_list_prep(cm_core, &timer_list);
+ rcu_read_unlock();
+
+ list_for_each_safe (list_node, list_core_temp, &timer_list) {
+ cm_node = container_of(list_node, struct irdma_cm_node,
+ timer_entry);
+ close_entry = cm_node->close_entry;
+
+ if (close_entry) {
+ if (time_after(close_entry->timetosend, jiffies)) {
+ if (nexttimeout > close_entry->timetosend ||
+ !settimer) {
+ nexttimeout = close_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ irdma_handle_close_entry(cm_node, 1);
+ }
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+
+ send_entry = cm_node->send_entry;
+ if (!send_entry)
+ goto done;
+ if (time_after(send_entry->timetosend, jiffies)) {
+ if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
+ if (nexttimeout > send_entry->timetosend ||
+ !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ irdma_free_retrans_entry(cm_node);
+ }
+ goto done;
+ }
+
+ if (cm_node->state == IRDMA_CM_STATE_OFFLOADED ||
+ cm_node->state == IRDMA_CM_STATE_CLOSED) {
+ irdma_free_retrans_entry(cm_node);
+ goto done;
+ }
+
+ if (!send_entry->retranscount || !send_entry->retrycount) {
+ irdma_free_retrans_entry(cm_node);
+
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock,
+ flags);
+ irdma_retrans_expired(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ goto done;
+ }
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+
+ vsi = &cm_node->iwdev->vsi;
+ if (!cm_node->ack_rcvd) {
+ refcount_inc(&send_entry->sqbuf->refcount);
+ irdma_puda_send_buf(vsi->ilq, send_entry->sqbuf);
+ cm_node->cm_core->stats_pkt_retrans++;
+ }
+
+ spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+ if (send_entry->send_retrans) {
+ send_entry->retranscount--;
+ timetosend = (IRDMA_RETRY_TIMEOUT <<
+ (IRDMA_DEFAULT_RETRANS -
+ send_entry->retranscount));
+
+ send_entry->timetosend = jiffies +
+ min(timetosend, IRDMA_MAX_TIMEOUT);
+ if (nexttimeout > send_entry->timetosend || !settimer) {
+ nexttimeout = send_entry->timetosend;
+ settimer = 1;
+ }
+ } else {
+ int close_when_complete;
+
+ close_when_complete = send_entry->close_when_complete;
+ irdma_free_retrans_entry(cm_node);
+ if (close_when_complete)
+ irdma_rem_ref_cm_node(cm_node);
+ }
+done:
+ spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+ irdma_rem_ref_cm_node(cm_node);
+ }
+
+ if (settimer) {
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ if (!timer_pending(&cm_core->tcp_timer)) {
+ cm_core->tcp_timer.expires = nexttimeout;
+ add_timer(&cm_core->tcp_timer);
+ }
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ }
+}
+
+/**
+ * irdma_send_syn - send SYN packet
+ * @cm_node: connection's node
+ * @sendack: flag to set ACK bit or not
+ */
+int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack)
+{
+ struct irdma_puda_buf *sqbuf;
+ int flags = SET_SYN;
+ char optionsbuf[sizeof(struct option_mss) +
+ sizeof(struct option_windowscale) +
+ sizeof(struct option_base) + TCP_OPTIONS_PADDING];
+ struct irdma_kmem_info opts;
+ int optionssize = 0;
+ /* Sending MSS option */
+ union all_known_options *options;
+
+ opts.addr = optionsbuf;
+ if (!cm_node)
+ return -EINVAL;
+
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->mss.optionnum = OPTION_NUM_MSS;
+ options->mss.len = sizeof(struct option_mss);
+ options->mss.mss = htons(cm_node->tcp_cntxt.mss);
+ optionssize += sizeof(struct option_mss);
+
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->windowscale.optionnum = OPTION_NUM_WINDOW_SCALE;
+ options->windowscale.len = sizeof(struct option_windowscale);
+ options->windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
+ optionssize += sizeof(struct option_windowscale);
+ options = (union all_known_options *)&optionsbuf[optionssize];
+ options->eol = OPTION_NUM_EOL;
+ optionssize += 1;
+
+ if (sendack)
+ flags |= SET_ACK;
+
+ opts.size = optionssize;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, &opts, NULL, NULL,
+ flags);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_send_ack - Send ACK packet
+ * @cm_node: connection's node
+ */
+void irdma_send_ack(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+ struct irdma_sc_vsi *vsi = &cm_node->iwdev->vsi;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ SET_ACK);
+ if (sqbuf)
+ irdma_puda_send_buf(vsi->ilq, sqbuf);
+}
+
+/**
+ * irdma_send_fin - Send FIN pkt
+ * @cm_node: connection's node
+ */
+static int irdma_send_fin(struct irdma_cm_node *cm_node)
+{
+ struct irdma_puda_buf *sqbuf;
+
+ sqbuf = cm_node->cm_core->form_cm_frame(cm_node, NULL, NULL, NULL,
+ SET_ACK | SET_FIN);
+ if (!sqbuf)
+ return -ENOMEM;
+
+ return irdma_schedule_cm_timer(cm_node, sqbuf, IRDMA_TIMER_TYPE_SEND, 1,
+ 0);
+}
+
+/**
+ * irdma_find_listener - find a cm node listening on this addr-port pair
+ * @cm_core: cm's core
+ * @dst_addr: listener ip addr
+ * @ipv4: flag indicating IPv4 when true
+ * @dst_port: listener tcp port num
+ * @vlan_id: virtual LAN ID
+ * @listener_state: state to match with listen node's
+ */
+static struct irdma_cm_listener *
+irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, bool ipv4,
+ u16 dst_port, u16 vlan_id,
+ enum irdma_cm_listener_state listener_state)
+{
+ struct irdma_cm_listener *listen_node;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ u32 listen_addr[4];
+ u16 listen_port;
+ unsigned long flags;
+
+ /* walk list and find cm_node associated with this session ID */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry (listen_node, &cm_core->listen_list, list) {
+ memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
+ listen_port = listen_node->loc_port;
+ if (listen_node->ipv4 != ipv4 || listen_port != dst_port ||
+ !(listener_state & listen_node->listener_state))
+ continue;
+ /* compare node pair, return node handle if a match */
+ if (!memcmp(listen_addr, ip_zero, sizeof(listen_addr)) ||
+ (!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) &&
+ vlan_id == listen_node->vlan_id)) {
+ refcount_inc(&listen_node->refcnt);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock,
+ flags);
+ trace_irdma_find_listener(listen_node);
+ return listen_node;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ return NULL;
+}
+
+/**
+ * irdma_del_multiple_qhash - Remove qhash and child listens
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ */
+static int irdma_del_multiple_qhash(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct irdma_cm_listener *child_listen_node;
+ struct list_head *pos, *tpos;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_for_each_safe (pos, tpos,
+ &cm_parent_listen_node->child_listen_list) {
+ child_listen_node = list_entry(pos, struct irdma_cm_listener,
+ child_listen_list);
+ if (child_listen_node->ipv4)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: removing child listen for IP=%pI4, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: removing child listen for IP=%pI6, port=%d, vlan=%d\n",
+ child_listen_node->loc_addr,
+ child_listen_node->loc_port,
+ child_listen_node->vlan_id);
+ trace_irdma_del_multiple_qhash(child_listen_node);
+ list_del(pos);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ cm_info->vlan_id = child_listen_node->vlan_id;
+ if (child_listen_node->qhash_set) {
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE,
+ NULL, false);
+ child_listen_node->qhash_set = false;
+ } else {
+ ret = 0;
+ }
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Child listen node freed = %p\n",
+ child_listen_node);
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
+ }
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+
+ return ret;
+}
+
+static u8 irdma_iw_get_vlan_prio(u32 *loc_addr, u8 prio, bool ipv4)
+{
+ struct net_device *ndev = NULL;
+
+ rcu_read_lock();
+ if (ipv4) {
+ ndev = ip_dev_find(&init_net, htonl(loc_addr[0]));
+ } else if (IS_ENABLED(CONFIG_IPV6)) {
+ struct net_device *ip_dev;
+ struct in6_addr laddr6;
+
+ irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, loc_addr);
+
+ for_each_netdev_rcu (&init_net, ip_dev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
+ ndev = ip_dev;
+ break;
+ }
+ }
+ }
+
+ if (!ndev)
+ goto done;
+ if (is_vlan_dev(ndev))
+ prio = (vlan_dev_get_egress_qos_mask(ndev, prio) & VLAN_PRIO_MASK)
+ >> VLAN_PRIO_SHIFT;
+ if (ipv4)
+ dev_put(ndev);
+
+done:
+ rcu_read_unlock();
+
+ return prio;
+}
+
+/**
+ * irdma_get_vlan_mac_ipv6 - Gets the vlan and mac
+ * @addr: local IPv6 address
+ * @vlan_id: vlan id for the given IPv6 address
+ * @mac: mac address for the given IPv6 address
+ *
+ * Returns the vlan id and mac for an IPv6 address.
+ */
+void irdma_get_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
+{
+ struct net_device *ip_dev = NULL;
+ struct in6_addr laddr6;
+
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return;
+
+ irdma_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
+ if (vlan_id)
+ *vlan_id = 0xFFFF; /* Match rdma_vlan_dev_vlan_id() */
+ if (mac)
+ eth_zero_addr(mac);
+
+ rcu_read_lock();
+ for_each_netdev_rcu (&init_net, ip_dev) {
+ if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
+ if (vlan_id)
+ *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ if (ip_dev->dev_addr && mac)
+ ether_addr_copy(mac, ip_dev->dev_addr);
+ break;
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * irdma_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
+ * @addr: local IPv4 address
+ */
+u16 irdma_get_vlan_ipv4(u32 *addr)
+{
+ struct net_device *netdev;
+ u16 vlan_id = 0xFFFF;
+
+ netdev = ip_dev_find(&init_net, htonl(addr[0]));
+ if (netdev) {
+ vlan_id = rdma_vlan_dev_vlan_id(netdev);
+ dev_put(netdev);
+ }
+
+ return vlan_id;
+}
+
+/**
+ * irdma_add_mqh_6 - Adds multiple qhashes for IPv6
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv6 address
+ * on the adapter and adds the associated qhash filter
+ */
+static int irdma_add_mqh_6(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct net_device *ip_dev;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp, *tmp;
+ struct irdma_cm_listener *child_listen_node;
+ unsigned long flags;
+ int ret = 0;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, ip_dev) {
+ if (!(ip_dev->flags & IFF_UP))
+ continue;
+
+ if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
+ (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
+ ip_dev != iwdev->netdev)
+ continue;
+
+ idev = __in6_dev_get(ip_dev);
+ if (!idev) {
+ ibdev_dbg(&iwdev->ibdev, "CM: idev == NULL\n");
+ break;
+ }
+ list_for_each_entry_safe (ifp, tmp, &idev->addr_list, if_list) {
+ ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI6, vlan_id=%d, MAC=%pM\n",
+ &ifp->addr, rdma_vlan_dev_vlan_id(ip_dev),
+ ip_dev->dev_addr);
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
+ ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+ memcpy(child_listen_node, cm_parent_listen_node,
+ sizeof(*child_listen_node));
+ irdma_copy_ip_ntohl(child_listen_node->loc_addr,
+ ifp->addr.in6_u.u6_addr32);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ if (!iwdev->vsi.dscp_mode)
+ cm_info->user_pri =
+ irdma_iw_get_vlan_prio(child_listen_node->loc_addr,
+ cm_info->user_pri,
+ false);
+
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (ret) {
+ kfree(child_listen_node);
+ continue;
+ }
+
+ trace_irdma_add_mqh_6(iwdev, child_listen_node,
+ ip_dev->dev_addr);
+
+ child_listen_node->qhash_set = true;
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ }
+ }
+exit:
+ rtnl_unlock();
+
+ return ret;
+}
+
+/**
+ * irdma_add_mqh_4 - Adds multiple qhashes for IPv4
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv4 address
+ * on the adapter and adds the associated qhash filter
+ */
+static int irdma_add_mqh_4(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_parent_listen_node)
+{
+ struct net_device *ip_dev;
+ struct in_device *idev;
+ struct irdma_cm_listener *child_listen_node;
+ unsigned long flags;
+ const struct in_ifaddr *ifa;
+ int ret = 0;
+
+ rtnl_lock();
+ for_each_netdev(&init_net, ip_dev) {
+ if (!(ip_dev->flags & IFF_UP))
+ continue;
+
+ if (((rdma_vlan_dev_vlan_id(ip_dev) >= VLAN_N_VID) ||
+ (rdma_vlan_dev_real_dev(ip_dev) != iwdev->netdev)) &&
+ ip_dev != iwdev->netdev)
+ continue;
+
+ idev = in_dev_get(ip_dev);
+ if (!idev)
+ continue;
+
+ in_dev_for_each_ifa_rtnl(ifa, idev) {
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
+ &ifa->ifa_address, rdma_vlan_dev_vlan_id(ip_dev),
+ ip_dev->dev_addr);
+ child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
+ cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+ ibdev_dbg(&iwdev->ibdev, "CM: Allocating child listener %p\n",
+ child_listen_node);
+ if (!child_listen_node) {
+ ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
+ in_dev_put(idev);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+ cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+ memcpy(child_listen_node, cm_parent_listen_node,
+ sizeof(*child_listen_node));
+ child_listen_node->loc_addr[0] =
+ ntohl(ifa->ifa_address);
+ memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+ sizeof(cm_info->loc_addr));
+ if (!iwdev->vsi.dscp_mode)
+ cm_info->user_pri =
+ irdma_iw_get_vlan_prio(child_listen_node->loc_addr,
+ cm_info->user_pri,
+ true);
+ ret = irdma_manage_qhash(iwdev, cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (ret) {
+ kfree(child_listen_node);
+ cm_parent_listen_node->cm_core
+ ->stats_listen_nodes_created--;
+ continue;
+ }
+
+ trace_irdma_add_mqh_4(iwdev, child_listen_node,
+ ip_dev->dev_addr);
+
+ child_listen_node->qhash_set = true;
+ spin_lock_irqsave(&iwdev->cm_core.listen_list_lock,
+ flags);
+ list_add(&child_listen_node->child_listen_list,
+ &cm_parent_listen_node->child_listen_list);
+ spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+ }
+ in_dev_put(idev);
+ }
+exit:
+ rtnl_unlock();
+
+ return ret;
+}
+
+/**
+ * irdma_add_mqh - Adds multiple qhashes
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_listen_node: The parent listen node
+ */
+static int irdma_add_mqh(struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *cm_listen_node)
+{
+ if (cm_info->ipv4)
+ return irdma_add_mqh_4(iwdev, cm_info, cm_listen_node);
+ else
+ return irdma_add_mqh_6(iwdev, cm_info, cm_listen_node);
+}
+
+/**
+ * irdma_reset_list_prep - add connection nodes slated for reset to list
+ * @cm_core: cm's core
+ * @listener: pointer to listener node
+ * @reset_list: a list to which cm_node will be selected
+ */
+static void irdma_reset_list_prep(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ struct list_head *reset_list)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if (cm_node->listener == listener &&
+ !cm_node->accelerated &&
+ refcount_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->reset_entry, reset_list);
+ }
+}
+
+/**
+ * irdma_dec_refcnt_listen - delete listener and associated cm nodes
+ * @cm_core: cm's core
+ * @listener: pointer to listener node
+ * @free_hanging_nodes: to free associated cm_nodes
+ * @apbvt_del: flag to delete the apbvt
+ */
+static int irdma_dec_refcnt_listen(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ int free_hanging_nodes, bool apbvt_del)
+{
+ int err;
+ struct list_head *list_pos;
+ struct list_head *list_temp;
+ struct irdma_cm_node *cm_node;
+ struct list_head reset_list;
+ struct irdma_cm_info nfo;
+ enum irdma_cm_node_state old_state;
+ unsigned long flags;
+
+ trace_irdma_dec_refcnt_listen(listener, __builtin_return_address(0));
+ /* free non-accelerated child nodes for this listener */
+ INIT_LIST_HEAD(&reset_list);
+ if (free_hanging_nodes) {
+ rcu_read_lock();
+ irdma_reset_list_prep(cm_core, listener, &reset_list);
+ rcu_read_unlock();
+ }
+
+ list_for_each_safe (list_pos, list_temp, &reset_list) {
+ cm_node = container_of(list_pos, struct irdma_cm_node,
+ reset_entry);
+ if (cm_node->state >= IRDMA_CM_STATE_FIN_WAIT1) {
+ irdma_rem_ref_cm_node(cm_node);
+ continue;
+ }
+
+ irdma_cleanup_retrans_entry(cm_node);
+ err = irdma_send_reset(cm_node);
+ if (err) {
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: send reset failed\n");
+ } else {
+ old_state = cm_node->state;
+ cm_node->state = IRDMA_CM_STATE_LISTENER_DESTROYED;
+ if (old_state != IRDMA_CM_STATE_MPAREQ_RCVD)
+ irdma_rem_ref_cm_node(cm_node);
+ }
+ }
+
+ if (refcount_dec_and_test(&listener->refcnt)) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_del(&listener->list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ if (apbvt_del)
+ irdma_del_apbvt(listener->iwdev,
+ listener->apbvt_entry);
+ memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
+ nfo.loc_port = listener->loc_port;
+ nfo.ipv4 = listener->ipv4;
+ nfo.vlan_id = listener->vlan_id;
+ nfo.user_pri = listener->user_pri;
+ nfo.qh_qpid = listener->iwdev->vsi.ilq->qp_id;
+
+ if (!list_empty(&listener->child_listen_list)) {
+ irdma_del_multiple_qhash(listener->iwdev, &nfo,
+ listener);
+ } else {
+ if (listener->qhash_set)
+ irdma_manage_qhash(listener->iwdev,
+ &nfo,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE,
+ NULL, false);
+ }
+
+ cm_core->stats_listen_destroyed++;
+ cm_core->stats_listen_nodes_destroyed++;
+ ibdev_dbg(&listener->iwdev->ibdev,
+ "CM: loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d apbvt_del=%d\n",
+ listener->loc_port, listener->loc_addr, listener,
+ listener->cm_id, listener->qhash_set,
+ listener->vlan_id, apbvt_del);
+ kfree(listener);
+ listener = NULL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * irdma_cm_del_listen - delete a listener
+ * @cm_core: cm's core
+ * @listener: passive connection's listener
+ * @apbvt_del: flag to delete apbvt
+ */
+static int irdma_cm_del_listen(struct irdma_cm_core *cm_core,
+ struct irdma_cm_listener *listener,
+ bool apbvt_del)
+{
+ listener->listener_state = IRDMA_CM_LISTENER_PASSIVE_STATE;
+ listener->cm_id = NULL;
+
+ return irdma_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
+}
+
+/**
+ * irdma_addr_resolve_neigh - resolve neighbor address
+ * @iwdev: iwarp device structure
+ * @src_ip: local ip address
+ * @dst_ip: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+static int irdma_addr_resolve_neigh(struct irdma_device *iwdev, u32 src_ip,
+ u32 dst_ip, int arpindex)
+{
+ struct rtable *rt;
+ struct neighbour *neigh;
+ int rc = arpindex;
+ __be32 dst_ipaddr = htonl(dst_ip);
+ __be32 src_ipaddr = htonl(src_ip);
+
+ rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
+ if (IS_ERR(rt)) {
+ ibdev_dbg(&iwdev->ibdev, "CM: ip_route_output fail\n");
+ return -EINVAL;
+ }
+
+ neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
+ if (!neigh)
+ goto exit;
+
+ if (neigh->nud_state & NUD_VALID)
+ rc = irdma_add_arp(iwdev->rf, &dst_ip, true, neigh->ha);
+ else
+ neigh_event_send(neigh, NULL);
+ if (neigh)
+ neigh_release(neigh);
+exit:
+ ip_rt_put(rt);
+
+ return rc;
+}
+
+/**
+ * irdma_get_dst_ipv6 - get destination cache entry via ipv6 lookup
+ * @src_addr: local ipv6 sock address
+ * @dst_addr: destination ipv6 sock address
+ */
+static struct dst_entry *irdma_get_dst_ipv6(struct sockaddr_in6 *src_addr,
+ struct sockaddr_in6 *dst_addr)
+{
+ struct dst_entry *dst = NULL;
+
+ if ((IS_ENABLED(CONFIG_IPV6))) {
+ struct flowi6 fl6 = {};
+
+ fl6.daddr = dst_addr->sin6_addr;
+ fl6.saddr = src_addr->sin6_addr;
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = dst_addr->sin6_scope_id;
+
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ }
+
+ return dst;
+}
+
+/**
+ * irdma_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
+ * @iwdev: iwarp device structure
+ * @src: local ip address
+ * @dest: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+static int irdma_addr_resolve_neigh_ipv6(struct irdma_device *iwdev, u32 *src,
+ u32 *dest, int arpindex)
+{
+ struct neighbour *neigh;
+ int rc = arpindex;
+ struct dst_entry *dst;
+ struct sockaddr_in6 dst_addr = {};
+ struct sockaddr_in6 src_addr = {};
+
+ dst_addr.sin6_family = AF_INET6;
+ irdma_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
+ src_addr.sin6_family = AF_INET6;
+ irdma_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
+ dst = irdma_get_dst_ipv6(&src_addr, &dst_addr);
+ if (!dst || dst->error) {
+ if (dst) {
+ dst_release(dst);
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: ip6_route_output returned dst->error = %d\n",
+ dst->error);
+ }
+ return -EINVAL;
+ }
+
+ neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
+ if (!neigh)
+ goto exit;
+
+ ibdev_dbg(&iwdev->ibdev, "CM: dst_neigh_lookup MAC=%pM\n",
+ neigh->ha);
+
+ trace_irdma_addr_resolve(iwdev, neigh->ha);
+
+ if (neigh->nud_state & NUD_VALID)
+ rc = irdma_add_arp(iwdev->rf, dest, false, neigh->ha);
+ else
+ neigh_event_send(neigh, NULL);
+ if (neigh)
+ neigh_release(neigh);
+exit:
+ dst_release(dst);
+
+ return rc;
+}
+
+/**
+ * irdma_find_node - find a cm node that matches the reference cm node
+ * @cm_core: cm's core
+ * @rem_port: remote tcp port num
+ * @rem_addr: remote ip addr
+ * @loc_port: local tcp port num
+ * @loc_addr: local ip addr
+ * @vlan_id: local VLAN ID
+ */
+struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
+ u16 rem_port, u32 *rem_addr, u16 loc_port,
+ u32 *loc_addr, u16 vlan_id)
+{
+ struct irdma_cm_node *cm_node;
+ u32 key = (rem_port << 16) | loc_port;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(cm_core->cm_hash_tbl, cm_node, list, key) {
+ if (cm_node->vlan_id == vlan_id &&
+ cm_node->loc_port == loc_port && cm_node->rem_port == rem_port &&
+ !memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
+ !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr))) {
+ if (!refcount_inc_not_zero(&cm_node->refcnt))
+ goto exit;
+ rcu_read_unlock();
+ trace_irdma_find_node(cm_node, 0, NULL);
+ return cm_node;
+ }
+ }
+
+exit:
+ rcu_read_unlock();
+
+ /* no owner node */
+ return NULL;
+}
+
+/**
+ * irdma_add_hte_node - add a cm node to the hash table
+ * @cm_core: cm's core
+ * @cm_node: connection's node
+ */
+static void irdma_add_hte_node(struct irdma_cm_core *cm_core,
+ struct irdma_cm_node *cm_node)
+{
+ unsigned long flags;
+ u32 key = (cm_node->rem_port << 16) | cm_node->loc_port;
+
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ hash_add_rcu(cm_core->cm_hash_tbl, &cm_node->list, key);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+}
+
+/**
+ * irdma_ipv4_is_lpb - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr)
+{
+ return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
+}
+
+/**
+ * irdma_ipv6_is_lpb - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr)
+{
+ struct in6_addr raddr6;
+
+ irdma_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
+
+ return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
+}
+
+/**
+ * irdma_cm_create_ah - create a cm address handle
+ * @cm_node: The connection manager node to create AH for
+ * @wait: Provides option to wait for ah creation or not
+ */
+static int irdma_cm_create_ah(struct irdma_cm_node *cm_node, bool wait)
+{
+ struct irdma_ah_info ah_info = {};
+ struct irdma_device *iwdev = cm_node->iwdev;
+
+ ether_addr_copy(ah_info.mac_addr, iwdev->netdev->dev_addr);
+
+ ah_info.hop_ttl = 0x40;
+ ah_info.tc_tos = cm_node->tos;
+ ah_info.vsi = &iwdev->vsi;
+
+ if (cm_node->ipv4) {
+ ah_info.ipv4_valid = true;
+ ah_info.dest_ip_addr[0] = cm_node->rem_addr[0];
+ ah_info.src_ip_addr[0] = cm_node->loc_addr[0];
+ ah_info.do_lpbk = irdma_ipv4_is_lpb(ah_info.src_ip_addr[0],
+ ah_info.dest_ip_addr[0]);
+ } else {
+ memcpy(ah_info.dest_ip_addr, cm_node->rem_addr,
+ sizeof(ah_info.dest_ip_addr));
+ memcpy(ah_info.src_ip_addr, cm_node->loc_addr,
+ sizeof(ah_info.src_ip_addr));
+ ah_info.do_lpbk = irdma_ipv6_is_lpb(ah_info.src_ip_addr,
+ ah_info.dest_ip_addr);
+ }
+
+ ah_info.vlan_tag = cm_node->vlan_id;
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ ah_info.insert_vlan_tag = 1;
+ ah_info.vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT;
+ }
+
+ ah_info.dst_arpindex =
+ irdma_arp_table(iwdev->rf, ah_info.dest_ip_addr,
+ ah_info.ipv4_valid, NULL, IRDMA_ARP_RESOLVE);
+
+ if (irdma_puda_create_ah(&iwdev->rf->sc_dev, &ah_info, wait,
+ IRDMA_PUDA_RSRC_TYPE_ILQ, cm_node,
+ &cm_node->ah))
+ return -ENOMEM;
+
+ trace_irdma_create_ah(cm_node);
+ return 0;
+}
+
+/**
+ * irdma_cm_free_ah - free a cm address handle
+ * @cm_node: The connection manager node to create AH for
+ */
+static void irdma_cm_free_ah(struct irdma_cm_node *cm_node)
+{
+ struct irdma_device *iwdev = cm_node->iwdev;
+
+ trace_irdma_cm_free_ah(cm_node);
+ irdma_puda_free_ah(&iwdev->rf->sc_dev, cm_node->ah);
+ cm_node->ah = NULL;
+}
+
+/**
+ * irdma_make_cm_node - create a new instance of a cm node
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ * @listener: passive connection's listener
+ */
+static struct irdma_cm_node *
+irdma_make_cm_node(struct irdma_cm_core *cm_core, struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_listener *listener)
+{
+ struct irdma_cm_node *cm_node;
+ int oldarpindex;
+ int arpindex;
+ struct net_device *netdev = iwdev->netdev;
+
+ /* create an hte and cm_node for this instance */
+ cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
+ if (!cm_node)
+ return NULL;
+
+ /* set our node specific transport info */
+ cm_node->ipv4 = cm_info->ipv4;
+ cm_node->vlan_id = cm_info->vlan_id;
+ if (cm_node->vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
+ cm_node->vlan_id = 0;
+ cm_node->tos = cm_info->tos;
+ cm_node->user_pri = cm_info->user_pri;
+ if (listener) {
+ if (listener->tos != cm_info->tos)
+ ibdev_warn(&iwdev->ibdev,
+ "application TOS[%d] and remote client TOS[%d] mismatch\n",
+ listener->tos, cm_info->tos);
+ if (iwdev->vsi.dscp_mode) {
+ cm_node->user_pri = listener->user_pri;
+ } else {
+ cm_node->tos = max(listener->tos, cm_info->tos);
+ cm_node->user_pri = rt_tos2priority(cm_node->tos);
+ cm_node->user_pri =
+ irdma_iw_get_vlan_prio(cm_info->loc_addr,
+ cm_node->user_pri,
+ cm_info->ipv4);
+ }
+ ibdev_dbg(&iwdev->ibdev,
+ "DCB: listener: TOS:[%d] UP:[%d]\n", cm_node->tos,
+ cm_node->user_pri);
+ trace_irdma_listener_tos(iwdev, cm_node->tos,
+ cm_node->user_pri);
+ }
+ memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
+ memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
+ cm_node->loc_port = cm_info->loc_port;
+ cm_node->rem_port = cm_info->rem_port;
+
+ cm_node->mpa_frame_rev = IRDMA_CM_DEFAULT_MPA_VER;
+ cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+ cm_node->iwdev = iwdev;
+ cm_node->dev = &iwdev->rf->sc_dev;
+
+ cm_node->ird_size = cm_node->dev->hw_attrs.max_hw_ird;
+ cm_node->ord_size = cm_node->dev->hw_attrs.max_hw_ord;
+
+ cm_node->listener = listener;
+ cm_node->cm_id = cm_info->cm_id;
+ ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
+ spin_lock_init(&cm_node->retrans_list_lock);
+ cm_node->ack_rcvd = false;
+
+ init_completion(&cm_node->establish_comp);
+ refcount_set(&cm_node->refcnt, 1);
+ /* associate our parent CM core */
+ cm_node->cm_core = cm_core;
+ cm_node->tcp_cntxt.loc_id = IRDMA_CM_DEFAULT_LOCAL_ID;
+ cm_node->tcp_cntxt.rcv_wscale = iwdev->rcv_wscale;
+ cm_node->tcp_cntxt.rcv_wnd = iwdev->rcv_wnd >> cm_node->tcp_cntxt.rcv_wscale;
+ if (cm_node->ipv4) {
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr[0]),
+ htonl(cm_node->rem_addr[0]),
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV4;
+ } else if (IS_ENABLED(CONFIG_IPV6)) {
+ __be32 loc[4] = {
+ htonl(cm_node->loc_addr[0]), htonl(cm_node->loc_addr[1]),
+ htonl(cm_node->loc_addr[2]), htonl(cm_node->loc_addr[3])
+ };
+ __be32 rem[4] = {
+ htonl(cm_node->rem_addr[0]), htonl(cm_node->rem_addr[1]),
+ htonl(cm_node->rem_addr[2]), htonl(cm_node->rem_addr[3])
+ };
+ cm_node->tcp_cntxt.loc_seq_num = secure_tcpv6_seq(loc, rem,
+ htons(cm_node->loc_port),
+ htons(cm_node->rem_port));
+ cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6;
+ }
+
+ if ((cm_node->ipv4 &&
+ irdma_ipv4_is_lpb(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
+ (!cm_node->ipv4 &&
+ irdma_ipv6_is_lpb(cm_node->loc_addr, cm_node->rem_addr))) {
+ cm_node->do_lpb = true;
+ arpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
+ cm_node->ipv4, NULL,
+ IRDMA_ARP_RESOLVE);
+ } else {
+ oldarpindex = irdma_arp_table(iwdev->rf, cm_node->rem_addr,
+ cm_node->ipv4, NULL,
+ IRDMA_ARP_RESOLVE);
+ if (cm_node->ipv4)
+ arpindex = irdma_addr_resolve_neigh(iwdev,
+ cm_info->loc_addr[0],
+ cm_info->rem_addr[0],
+ oldarpindex);
+ else if (IS_ENABLED(CONFIG_IPV6))
+ arpindex = irdma_addr_resolve_neigh_ipv6(iwdev,
+ cm_info->loc_addr,
+ cm_info->rem_addr,
+ oldarpindex);
+ else
+ arpindex = -EINVAL;
+ }
+
+ if (arpindex < 0)
+ goto err;
+
+ ether_addr_copy(cm_node->rem_mac,
+ iwdev->rf->arp_table[arpindex].mac_addr);
+ irdma_add_hte_node(cm_core, cm_node);
+ cm_core->stats_nodes_created++;
+ return cm_node;
+
+err:
+ kfree(cm_node);
+
+ return NULL;
+}
+
+static void irdma_destroy_connection(struct irdma_cm_node *cm_node)
+{
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ struct irdma_qp *iwqp;
+ struct irdma_cm_info nfo;
+
+ /* if the node is destroyed before connection was accelerated */
+ if (!cm_node->accelerated && cm_node->accept_pend) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: node destroyed before established\n");
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ }
+ if (cm_node->close_entry)
+ irdma_handle_close_entry(cm_node, 0);
+ if (cm_node->listener) {
+ irdma_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
+ } else {
+ if (cm_node->apbvt_set) {
+ irdma_del_apbvt(cm_node->iwdev, cm_node->apbvt_entry);
+ cm_node->apbvt_set = 0;
+ }
+ irdma_get_addr_info(cm_node, &nfo);
+ if (cm_node->qhash_set) {
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL,
+ false);
+ cm_node->qhash_set = 0;
+ }
+ }
+
+ iwqp = cm_node->iwqp;
+ if (iwqp) {
+ cm_node->cm_id->rem_ref(cm_node->cm_id);
+ cm_node->cm_id = NULL;
+ iwqp->cm_id = NULL;
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ cm_node->iwqp = NULL;
+ } else if (cm_node->qhash_set) {
+ irdma_get_addr_info(cm_node, &nfo);
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_DELETE, NULL, false);
+ cm_node->qhash_set = 0;
+ }
+
+ cm_core->cm_free_ah(cm_node);
+}
+
+/**
+ * irdma_rem_ref_cm_node - destroy an instance of a cm node
+ * @cm_node: connection's node
+ */
+void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node)
+{
+ struct irdma_cm_core *cm_core = cm_node->cm_core;
+ unsigned long flags;
+
+ trace_irdma_rem_ref_cm_node(cm_node, 0, __builtin_return_address(0));
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+ if (!refcount_dec_and_test(&cm_node->refcnt)) {
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ return;
+ }
+ if (cm_node->iwqp) {
+ cm_node->iwqp->cm_node = NULL;
+ cm_node->iwqp->cm_id = NULL;
+ }
+ hash_del_rcu(&cm_node->list);
+ cm_node->cm_core->stats_nodes_destroyed++;
+
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+ irdma_destroy_connection(cm_node);
+
+ kfree_rcu(cm_node, rcu_head);
+}
+
+/**
+ * irdma_handle_fin_pkt - FIN packet received
+ * @cm_node: connection's node
+ */
+static void irdma_handle_fin_pkt(struct irdma_cm_node *cm_node)
+{
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_MPAREJ_RCVD:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_LAST_ACK;
+ irdma_send_fin(cm_node);
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ irdma_create_event(cm_node, IRDMA_CM_EVENT_ABORTED);
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSING;
+ irdma_send_ack(cm_node);
+ /*
+ * Wait for ACK as this is simultaneous close.
+ * After we receive ACK, do not send anything.
+ * Just rm the node.
+ */
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_TIME_WAIT;
+ irdma_send_ack(cm_node);
+ irdma_schedule_cm_timer(cm_node, NULL, IRDMA_TIMER_TYPE_CLOSE,
+ 1, 0);
+ break;
+ case IRDMA_CM_STATE_TIME_WAIT:
+ cm_node->tcp_cntxt.rcv_nxt++;
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: bad state node state = %d\n", cm_node->state);
+ break;
+ }
+}
+
+/**
+ * irdma_handle_rst_pkt - process received RST packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_rst_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: caller: %pS cm_node=%p state=%d rem_port=0x%04x loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4\n",
+ __builtin_return_address(0), cm_node, cm_node->state,
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
+ cm_node->loc_addr);
+
+ irdma_cleanup_retrans_entry(cm_node);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ switch (cm_node->mpa_frame_rev) {
+ case IETF_MPA_V2:
+ /* Drop down to MPA_V1*/
+ cm_node->mpa_frame_rev = IETF_MPA_V1;
+ /* send a syn and goto syn sent state */
+ cm_node->state = IRDMA_CM_STATE_SYN_SENT;
+ if (irdma_send_syn(cm_node, 0))
+ irdma_active_open_err(cm_node, false);
+ break;
+ case IETF_MPA_V1:
+ default:
+ irdma_active_open_err(cm_node, false);
+ break;
+ }
+ break;
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ atomic_inc(&cm_node->passive_state);
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_passive_open_err(cm_node, false);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ irdma_active_open_err(cm_node, false);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_TIME_WAIT:
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_rcv_mpa - Process a recv'd mpa buffer
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_rcv_mpa(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ int err;
+ int datasize = rbuf->datalen;
+ u8 *dataloc = rbuf->data;
+
+ enum irdma_cm_event_type type = IRDMA_CM_EVENT_UNKNOWN;
+ u32 res_type;
+
+ err = irdma_parse_mpa(cm_node, dataloc, &res_type, datasize);
+ if (err) {
+ if (cm_node->state == IRDMA_CM_STATE_MPAREQ_SENT)
+ irdma_active_open_err(cm_node, true);
+ else
+ irdma_passive_open_err(cm_node, true);
+ return;
+ }
+
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_ESTABLISHED:
+ if (res_type == IRDMA_MPA_REQUEST_REJECT)
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: state for reject\n");
+ cm_node->state = IRDMA_CM_STATE_MPAREQ_RCVD;
+ type = IRDMA_CM_EVENT_MPA_REQ;
+ irdma_send_ack(cm_node); /* ACK received MPA request */
+ atomic_set(&cm_node->passive_state,
+ IRDMA_PASSIVE_STATE_INDICATED);
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ irdma_cleanup_retrans_entry(cm_node);
+ if (res_type == IRDMA_MPA_REQUEST_REJECT) {
+ type = IRDMA_CM_EVENT_MPA_REJECT;
+ cm_node->state = IRDMA_CM_STATE_MPAREJ_RCVD;
+ } else {
+ type = IRDMA_CM_EVENT_CONNECTED;
+ cm_node->state = IRDMA_CM_STATE_OFFLOADED;
+ }
+ irdma_send_ack(cm_node);
+ break;
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: wrong cm_node state =%d\n", cm_node->state);
+ break;
+ }
+ irdma_create_event(cm_node, type);
+}
+
+/**
+ * irdma_check_syn - Check for error on received syn ack
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int irdma_check_syn(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
+{
+ if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
+ irdma_active_open_err(cm_node, true);
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_check_seq - check seq numbers if OK
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int irdma_check_seq(struct irdma_cm_node *cm_node, struct tcphdr *tcph)
+{
+ u32 seq;
+ u32 ack_seq;
+ u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
+ u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+ u32 rcv_wnd;
+ int err = 0;
+
+ seq = ntohl(tcph->seq);
+ ack_seq = ntohl(tcph->ack_seq);
+ rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
+ if (ack_seq != loc_seq_num ||
+ !between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
+ err = -1;
+ if (err)
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: seq number err\n");
+
+ return err;
+}
+
+void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node)
+{
+ struct irdma_cm_info nfo;
+
+ irdma_get_addr_info(cm_node, &nfo);
+ nfo.qh_qpid = cm_node->iwdev->vsi.ilq->qp_id;
+ irdma_manage_qhash(cm_node->iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ cm_node, false);
+ cm_node->qhash_set = true;
+}
+
+/**
+ * irdma_handle_syn_pkt - is for Passive node
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_syn_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int err;
+ u32 inc_sequence;
+ int optionsize;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->seq);
+
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ /* Rcvd syn on active open connection */
+ irdma_active_open_err(cm_node, 1);
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ /* Passive OPEN */
+ if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
+ cm_node->listener->backlog) {
+ cm_node->cm_core->stats_backlog_drops++;
+ irdma_passive_open_err(cm_node, false);
+ break;
+ }
+ err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (err) {
+ irdma_passive_open_err(cm_node, false);
+ /* drop pkt */
+ break;
+ }
+ err = cm_node->cm_core->cm_create_ah(cm_node, false);
+ if (err) {
+ irdma_passive_open_err(cm_node, false);
+ /* drop pkt */
+ break;
+ }
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ cm_node->accept_pend = 1;
+ atomic_inc(&cm_node->listener->pend_accepts_cnt);
+
+ cm_node->state = IRDMA_CM_STATE_SYN_RCVD;
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ irdma_cleanup_retrans_entry(cm_node);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_CLOSING:
+ case IRDMA_CM_STATE_UNKNOWN:
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_synack_pkt - Process SYN+ACK packet (active side)
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_handle_synack_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ int err;
+ u32 inc_sequence;
+ int optionsize;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+ inc_sequence = ntohl(tcph->seq);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_SENT:
+ irdma_cleanup_retrans_entry(cm_node);
+ /* active open */
+ if (irdma_check_syn(cm_node, tcph)) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: check syn fail\n");
+ return;
+ }
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ /* setup options */
+ err = irdma_handle_tcp_options(cm_node, tcph, optionsize, 0);
+ if (err) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node=%p tcp_options failed\n",
+ cm_node);
+ break;
+ }
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+ irdma_send_ack(cm_node); /* ACK for the syn_ack */
+ err = irdma_send_mpa_request(cm_node);
+ if (err) {
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: cm_node=%p irdma_send_mpa_request failed\n",
+ cm_node);
+ break;
+ }
+ cm_node->state = IRDMA_CM_STATE_MPAREQ_SENT;
+ break;
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ irdma_passive_open_err(cm_node, true);
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+ irdma_cleanup_retrans_entry(cm_node);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_CLOSING:
+ case IRDMA_CM_STATE_UNKNOWN:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_handle_ack_pkt - process packet with ACK
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static int irdma_handle_ack_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 inc_sequence;
+ int ret;
+ int optionsize;
+ u32 datasize = rbuf->datalen;
+
+ optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+
+ if (irdma_check_seq(cm_node, tcph))
+ return -EINVAL;
+
+ inc_sequence = ntohl(tcph->seq);
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ irdma_cleanup_retrans_entry(cm_node);
+ ret = irdma_handle_tcp_options(cm_node, tcph, optionsize, 1);
+ if (ret)
+ return ret;
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ cm_node->state = IRDMA_CM_STATE_ESTABLISHED;
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case IRDMA_CM_STATE_ESTABLISHED:
+ irdma_cleanup_retrans_entry(cm_node);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ }
+ break;
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+ if (datasize) {
+ cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ cm_node->ack_rcvd = false;
+ irdma_handle_rcv_mpa(cm_node, rbuf);
+ } else {
+ cm_node->ack_rcvd = true;
+ }
+ break;
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSED:
+ irdma_cleanup_retrans_entry(cm_node);
+ refcount_inc(&cm_node->refcnt);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_CLOSING:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ irdma_cleanup_retrans_entry(cm_node);
+ cm_node->state = IRDMA_CM_STATE_FIN_WAIT2;
+ break;
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_OFFLOADED:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ case IRDMA_CM_STATE_UNKNOWN:
+ default:
+ irdma_cleanup_retrans_entry(cm_node);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_process_pkt - process cm packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void irdma_process_pkt(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *rbuf)
+{
+ enum irdma_tcpip_pkt_type pkt_type = IRDMA_PKT_TYPE_UNKNOWN;
+ struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+ u32 fin_set = 0;
+ int err;
+
+ if (tcph->rst) {
+ pkt_type = IRDMA_PKT_TYPE_RST;
+ } else if (tcph->syn) {
+ pkt_type = IRDMA_PKT_TYPE_SYN;
+ if (tcph->ack)
+ pkt_type = IRDMA_PKT_TYPE_SYNACK;
+ } else if (tcph->ack) {
+ pkt_type = IRDMA_PKT_TYPE_ACK;
+ }
+ if (tcph->fin)
+ fin_set = 1;
+
+ switch (pkt_type) {
+ case IRDMA_PKT_TYPE_SYN:
+ irdma_handle_syn_pkt(cm_node, rbuf);
+ break;
+ case IRDMA_PKT_TYPE_SYNACK:
+ irdma_handle_synack_pkt(cm_node, rbuf);
+ break;
+ case IRDMA_PKT_TYPE_ACK:
+ err = irdma_handle_ack_pkt(cm_node, rbuf);
+ if (fin_set && !err)
+ irdma_handle_fin_pkt(cm_node);
+ break;
+ case IRDMA_PKT_TYPE_RST:
+ irdma_handle_rst_pkt(cm_node, rbuf);
+ break;
+ default:
+ if (fin_set &&
+ (!irdma_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
+ irdma_handle_fin_pkt(cm_node);
+ break;
+ }
+}
+
+/**
+ * irdma_make_listen_node - create a listen node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ */
+static struct irdma_cm_listener *
+irdma_make_listen_node(struct irdma_cm_core *cm_core,
+ struct irdma_device *iwdev,
+ struct irdma_cm_info *cm_info)
+{
+ struct irdma_cm_listener *listener;
+ unsigned long flags;
+
+ /* cannot have multiple matching listeners */
+ listener =
+ irdma_find_listener(cm_core, cm_info->loc_addr, cm_info->ipv4,
+ cm_info->loc_port, cm_info->vlan_id,
+ IRDMA_CM_LISTENER_EITHER_STATE);
+ if (listener &&
+ listener->listener_state == IRDMA_CM_LISTENER_ACTIVE_STATE) {
+ refcount_dec(&listener->refcnt);
+ return NULL;
+ }
+
+ if (!listener) {
+ /* create a CM listen node
+ * 1/2 node to compare incoming traffic to
+ */
+ listener = kzalloc(sizeof(*listener), GFP_KERNEL);
+ if (!listener)
+ return NULL;
+ cm_core->stats_listen_nodes_created++;
+ memcpy(listener->loc_addr, cm_info->loc_addr,
+ sizeof(listener->loc_addr));
+ listener->loc_port = cm_info->loc_port;
+
+ INIT_LIST_HEAD(&listener->child_listen_list);
+
+ refcount_set(&listener->refcnt, 1);
+ } else {
+ listener->reused_node = 1;
+ }
+
+ listener->cm_id = cm_info->cm_id;
+ listener->ipv4 = cm_info->ipv4;
+ listener->vlan_id = cm_info->vlan_id;
+ atomic_set(&listener->pend_accepts_cnt, 0);
+ listener->cm_core = cm_core;
+ listener->iwdev = iwdev;
+
+ listener->backlog = cm_info->backlog;
+ listener->listener_state = IRDMA_CM_LISTENER_ACTIVE_STATE;
+
+ if (!listener->reused_node) {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_add(&listener->list, &cm_core->listen_list);
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+ }
+
+ return listener;
+}
+
+/**
+ * irdma_create_cm_node - make a connection node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @conn_param: connection parameters
+ * @cm_info: quad info for connection
+ * @caller_cm_node: pointer to cm_node structure to return
+ */
+static int irdma_create_cm_node(struct irdma_cm_core *cm_core,
+ struct irdma_device *iwdev,
+ struct iw_cm_conn_param *conn_param,
+ struct irdma_cm_info *cm_info,
+ struct irdma_cm_node **caller_cm_node)
+{
+ struct irdma_cm_node *cm_node;
+ u16 private_data_len = conn_param->private_data_len;
+ const void *private_data = conn_param->private_data;
+
+ /* create a CM connection node */
+ cm_node = irdma_make_cm_node(cm_core, iwdev, cm_info, NULL);
+ if (!cm_node)
+ return -ENOMEM;
+
+ /* set our node side to client (active) side */
+ cm_node->tcp_cntxt.client = 1;
+ cm_node->tcp_cntxt.rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+
+ irdma_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
+
+ cm_node->pdata.size = private_data_len;
+ cm_node->pdata.addr = cm_node->pdata_buf;
+
+ memcpy(cm_node->pdata_buf, private_data, private_data_len);
+ *caller_cm_node = cm_node;
+
+ return 0;
+}
+
+/**
+ * irdma_cm_reject - reject and teardown a connection
+ * @cm_node: connection's node
+ * @pdata: ptr to private data for reject
+ * @plen: size of private data
+ */
+static int irdma_cm_reject(struct irdma_cm_node *cm_node, const void *pdata,
+ u8 plen)
+{
+ int ret;
+ int passive_state;
+
+ if (cm_node->tcp_cntxt.client)
+ return 0;
+
+ irdma_cleanup_retrans_entry(cm_node);
+
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == IRDMA_SEND_RESET_EVENT) {
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+
+ if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
+ irdma_rem_ref_cm_node(cm_node);
+ return 0;
+ }
+
+ ret = irdma_send_mpa_reject(cm_node, pdata, plen);
+ if (!ret)
+ return 0;
+
+ cm_node->state = IRDMA_CM_STATE_CLOSED;
+ if (irdma_send_reset(cm_node))
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: send reset failed\n");
+
+ return ret;
+}
+
+/**
+ * irdma_cm_close - close of cm connection
+ * @cm_node: connection's node
+ */
+static int irdma_cm_close(struct irdma_cm_node *cm_node)
+{
+ switch (cm_node->state) {
+ case IRDMA_CM_STATE_SYN_RCVD:
+ case IRDMA_CM_STATE_SYN_SENT:
+ case IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED:
+ case IRDMA_CM_STATE_ESTABLISHED:
+ case IRDMA_CM_STATE_ACCEPTING:
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ irdma_cleanup_retrans_entry(cm_node);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_CLOSE_WAIT:
+ cm_node->state = IRDMA_CM_STATE_LAST_ACK;
+ irdma_send_fin(cm_node);
+ break;
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ case IRDMA_CM_STATE_LAST_ACK:
+ case IRDMA_CM_STATE_TIME_WAIT:
+ case IRDMA_CM_STATE_CLOSING:
+ return -EINVAL;
+ case IRDMA_CM_STATE_LISTENING:
+ irdma_cleanup_retrans_entry(cm_node);
+ irdma_send_reset(cm_node);
+ break;
+ case IRDMA_CM_STATE_MPAREJ_RCVD:
+ case IRDMA_CM_STATE_UNKNOWN:
+ case IRDMA_CM_STATE_INITED:
+ case IRDMA_CM_STATE_CLOSED:
+ case IRDMA_CM_STATE_LISTENER_DESTROYED:
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ case IRDMA_CM_STATE_OFFLOADED:
+ if (cm_node->send_entry)
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: CM send_entry in OFFLOADED state\n");
+ irdma_rem_ref_cm_node(cm_node);
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_receive_ilq - recv an ETHERNET packet, and process it
+ * through CM
+ * @vsi: VSI structure of dev
+ * @rbuf: receive buffer
+ */
+void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
+{
+ struct irdma_cm_node *cm_node;
+ struct irdma_cm_listener *listener;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+ struct irdma_cm_info cm_info = {};
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ struct vlan_ethhdr *ethh;
+ u16 vtag;
+
+ /* if vlan, then maclen = 18 else 14 */
+ iph = (struct iphdr *)rbuf->iph;
+ print_hex_dump_debug("ILQ: RECEIVE ILQ BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, rbuf->mem.va, rbuf->totallen, false);
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (rbuf->vlan_valid) {
+ vtag = rbuf->vlan_id;
+ cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ cm_info.vlan_id = vtag & VLAN_VID_MASK;
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ }
+ } else {
+ ethh = rbuf->mem.va;
+
+ if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
+ vtag = ntohs(ethh->h_vlan_TCI);
+ cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ cm_info.vlan_id = vtag & VLAN_VID_MASK;
+ ibdev_dbg(&cm_core->iwdev->ibdev,
+ "CM: vlan_id=%d\n", cm_info.vlan_id);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ }
+ }
+ tcph = (struct tcphdr *)rbuf->tcph;
+
+ if (rbuf->ipv4) {
+ cm_info.loc_addr[0] = ntohl(iph->daddr);
+ cm_info.rem_addr[0] = ntohl(iph->saddr);
+ cm_info.ipv4 = true;
+ cm_info.tos = iph->tos;
+ } else {
+ ip6h = (struct ipv6hdr *)rbuf->iph;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ ip6h->daddr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(cm_info.rem_addr,
+ ip6h->saddr.in6_u.u6_addr32);
+ cm_info.ipv4 = false;
+ cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
+ }
+ cm_info.loc_port = ntohs(tcph->dest);
+ cm_info.rem_port = ntohs(tcph->source);
+ cm_node = irdma_find_node(cm_core, cm_info.rem_port, cm_info.rem_addr,
+ cm_info.loc_port, cm_info.loc_addr, cm_info.vlan_id);
+
+ if (!cm_node) {
+ /* Only type of packet accepted are for the
+ * PASSIVE open (syn only)
+ */
+ if (!tcph->syn || tcph->ack)
+ return;
+
+ listener = irdma_find_listener(cm_core,
+ cm_info.loc_addr,
+ cm_info.ipv4,
+ cm_info.loc_port,
+ cm_info.vlan_id,
+ IRDMA_CM_LISTENER_ACTIVE_STATE);
+ if (!listener) {
+ cm_info.cm_id = NULL;
+ ibdev_dbg(&cm_core->iwdev->ibdev,
+ "CM: no listener found\n");
+ return;
+ }
+
+ cm_info.cm_id = listener->cm_id;
+ cm_node = irdma_make_cm_node(cm_core, iwdev, &cm_info,
+ listener);
+ if (!cm_node) {
+ ibdev_dbg(&cm_core->iwdev->ibdev,
+ "CM: allocate node failed\n");
+ refcount_dec(&listener->refcnt);
+ return;
+ }
+
+ if (!tcph->rst && !tcph->fin) {
+ cm_node->state = IRDMA_CM_STATE_LISTENING;
+ } else {
+ irdma_rem_ref_cm_node(cm_node);
+ return;
+ }
+
+ refcount_inc(&cm_node->refcnt);
+ } else if (cm_node->state == IRDMA_CM_STATE_OFFLOADED) {
+ irdma_rem_ref_cm_node(cm_node);
+ return;
+ }
+
+ irdma_process_pkt(cm_node, rbuf);
+ irdma_rem_ref_cm_node(cm_node);
+}
+
+static int irdma_add_qh(struct irdma_cm_node *cm_node, bool active)
+{
+ if (!active)
+ irdma_add_conn_est_qh(cm_node);
+ return 0;
+}
+
+static void irdma_cm_free_ah_nop(struct irdma_cm_node *cm_node)
+{
+}
+
+/**
+ * irdma_setup_cm_core - setup top level instance of a cm core
+ * @iwdev: iwarp device structure
+ * @rdma_ver: HW version
+ */
+int irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+
+ cm_core->iwdev = iwdev;
+ cm_core->dev = &iwdev->rf->sc_dev;
+
+ /* Handles CM event work items send to Iwarp core */
+ cm_core->event_wq = alloc_ordered_workqueue("iwarp-event-wq", 0);
+ if (!cm_core->event_wq)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cm_core->listen_list);
+
+ timer_setup(&cm_core->tcp_timer, irdma_cm_timer_tick, 0);
+
+ spin_lock_init(&cm_core->ht_lock);
+ spin_lock_init(&cm_core->listen_list_lock);
+ spin_lock_init(&cm_core->apbvt_lock);
+ switch (rdma_ver) {
+ case IRDMA_GEN_1:
+ cm_core->form_cm_frame = irdma_form_uda_cm_frame;
+ cm_core->cm_create_ah = irdma_add_qh;
+ cm_core->cm_free_ah = irdma_cm_free_ah_nop;
+ break;
+ case IRDMA_GEN_2:
+ default:
+ cm_core->form_cm_frame = irdma_form_ah_cm_frame;
+ cm_core->cm_create_ah = irdma_cm_create_ah;
+ cm_core->cm_free_ah = irdma_cm_free_ah;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_cleanup_cm_core - deallocate a top level instance of a
+ * cm core
+ * @cm_core: cm's core
+ */
+void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core)
+{
+ if (!cm_core)
+ return;
+
+ del_timer_sync(&cm_core->tcp_timer);
+
+ destroy_workqueue(cm_core->event_wq);
+ cm_core->dev->ws_reset(&cm_core->iwdev->vsi);
+}
+
+/**
+ * irdma_init_tcp_ctx - setup qp context
+ * @cm_node: connection's node
+ * @tcp_info: offload info for tcp
+ * @iwqp: associate qp for the connection
+ */
+static void irdma_init_tcp_ctx(struct irdma_cm_node *cm_node,
+ struct irdma_tcp_offload_info *tcp_info,
+ struct irdma_qp *iwqp)
+{
+ tcp_info->ipv4 = cm_node->ipv4;
+ tcp_info->drop_ooo_seg = !iwqp->iwdev->iw_ooo;
+ tcp_info->wscale = true;
+ tcp_info->ignore_tcp_opt = true;
+ tcp_info->ignore_tcp_uns_opt = true;
+ tcp_info->no_nagle = false;
+
+ tcp_info->ttl = IRDMA_DEFAULT_TTL;
+ tcp_info->rtt_var = IRDMA_DEFAULT_RTT_VAR;
+ tcp_info->ss_thresh = IRDMA_DEFAULT_SS_THRESH;
+ tcp_info->rexmit_thresh = IRDMA_DEFAULT_REXMIT_THRESH;
+
+ tcp_info->tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
+ tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
+ tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
+
+ tcp_info->snd_nxt = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+ tcp_info->rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+ tcp_info->snd_max = cm_node->tcp_cntxt.loc_seq_num;
+
+ tcp_info->snd_una = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->cwnd = 2 * cm_node->tcp_cntxt.mss;
+ tcp_info->snd_wl1 = cm_node->tcp_cntxt.rcv_nxt;
+ tcp_info->snd_wl2 = cm_node->tcp_cntxt.loc_seq_num;
+ tcp_info->max_snd_window = cm_node->tcp_cntxt.max_snd_wnd;
+ tcp_info->rcv_wnd = cm_node->tcp_cntxt.rcv_wnd
+ << cm_node->tcp_cntxt.rcv_wscale;
+
+ tcp_info->flow_label = 0;
+ tcp_info->snd_mss = (u32)cm_node->tcp_cntxt.mss;
+ tcp_info->tos = cm_node->tos;
+ if (cm_node->vlan_id < VLAN_N_VID) {
+ tcp_info->insert_vlan_tag = true;
+ tcp_info->vlan_tag = cm_node->vlan_id;
+ tcp_info->vlan_tag |= cm_node->user_pri << VLAN_PRIO_SHIFT;
+ }
+ if (cm_node->ipv4) {
+ tcp_info->src_port = cm_node->loc_port;
+ tcp_info->dst_port = cm_node->rem_port;
+
+ tcp_info->dest_ip_addr[3] = cm_node->rem_addr[0];
+ tcp_info->local_ipaddr[3] = cm_node->loc_addr[0];
+ tcp_info->arp_idx = (u16)irdma_arp_table(iwqp->iwdev->rf,
+ &tcp_info->dest_ip_addr[3],
+ true, NULL,
+ IRDMA_ARP_RESOLVE);
+ } else {
+ tcp_info->src_port = cm_node->loc_port;
+ tcp_info->dst_port = cm_node->rem_port;
+ memcpy(tcp_info->dest_ip_addr, cm_node->rem_addr,
+ sizeof(tcp_info->dest_ip_addr));
+ memcpy(tcp_info->local_ipaddr, cm_node->loc_addr,
+ sizeof(tcp_info->local_ipaddr));
+
+ tcp_info->arp_idx = (u16)irdma_arp_table(iwqp->iwdev->rf,
+ &tcp_info->dest_ip_addr[0],
+ false, NULL,
+ IRDMA_ARP_RESOLVE);
+ }
+}
+
+/**
+ * irdma_cm_init_tsa_conn - setup qp for RTS
+ * @iwqp: associate qp for the connection
+ * @cm_node: connection's node
+ */
+static void irdma_cm_init_tsa_conn(struct irdma_qp *iwqp,
+ struct irdma_cm_node *cm_node)
+{
+ struct irdma_iwarp_offload_info *iwarp_info;
+ struct irdma_qp_host_ctx_info *ctx_info;
+
+ iwarp_info = &iwqp->iwarp_info;
+ ctx_info = &iwqp->ctx_info;
+
+ ctx_info->tcp_info = &iwqp->tcp_info;
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+
+ iwarp_info->ord_size = cm_node->ord_size;
+ iwarp_info->ird_size = cm_node->ird_size;
+ iwarp_info->rd_en = true;
+ iwarp_info->rdmap_ver = 1;
+ iwarp_info->ddp_ver = 1;
+ iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
+
+ ctx_info->tcp_info_valid = true;
+ ctx_info->iwarp_info_valid = true;
+ ctx_info->user_pri = cm_node->user_pri;
+
+ irdma_init_tcp_ctx(cm_node, &iwqp->tcp_info, iwqp);
+ if (cm_node->snd_mark_en) {
+ iwarp_info->snd_mark_en = true;
+ iwarp_info->snd_mark_offset = (iwqp->tcp_info.snd_nxt & SNDMARKER_SEQNMASK) +
+ cm_node->lsmm_size;
+ }
+
+ cm_node->state = IRDMA_CM_STATE_OFFLOADED;
+ iwqp->tcp_info.tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
+ iwqp->tcp_info.src_mac_addr_idx = iwqp->iwdev->mac_ip_table_idx;
+
+ if (cm_node->rcv_mark_en) {
+ iwarp_info->rcv_mark_en = true;
+ iwarp_info->align_hdrs = true;
+ }
+
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+
+ /* once tcp_info is set, no need to do it again */
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = false;
+}
+
+/**
+ * irdma_cm_disconn - when a connection is being closed
+ * @iwqp: associated qp for the connection
+ */
+void irdma_cm_disconn(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct disconn_work *work;
+ unsigned long flags;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
+ if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) {
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: qp_id %d is already freed\n",
+ iwqp->ibqp.qp_num);
+ kfree(work);
+ return;
+ }
+ irdma_qp_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+
+ work->iwqp = iwqp;
+ INIT_WORK(&work->work, irdma_disconnect_worker);
+ queue_work(iwdev->cleanup_wq, &work->work);
+}
+
+/**
+ * irdma_qp_disconnect - free qp and close cm
+ * @iwqp: associate qp for the connection
+ */
+static void irdma_qp_disconnect(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+
+ iwqp->active_conn = 0;
+ /* close the CM node down if it is still active */
+ ibdev_dbg(&iwdev->ibdev, "CM: Call close API\n");
+ irdma_cm_close(iwqp->cm_node);
+}
+
+/**
+ * irdma_cm_disconn_true - called by worker thread to disconnect qp
+ * @iwqp: associate qp for the connection
+ */
+static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
+{
+ struct iw_cm_id *cm_id;
+ struct irdma_device *iwdev;
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+ u16 last_ae;
+ u8 original_hw_tcp_state;
+ u8 original_ibqp_state;
+ int disconn_status = 0;
+ int issue_disconn = 0;
+ int issue_close = 0;
+ int issue_flush = 0;
+ unsigned long flags;
+ int err;
+
+ iwdev = iwqp->iwdev;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ struct ib_qp_attr attr;
+
+ if (iwqp->flush_issued || iwqp->sc_qp.qp_uk.destroy_pending) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ attr.qp_state = IB_QPS_ERR;
+ irdma_modify_qp_roce(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ irdma_ib_qp_event(iwqp, qp->event_type);
+ return;
+ }
+
+ cm_id = iwqp->cm_id;
+ original_hw_tcp_state = iwqp->hw_tcp_state;
+ original_ibqp_state = iwqp->ibqp_state;
+ last_ae = iwqp->last_aeq;
+
+ if (qp->term_flags) {
+ issue_disconn = 1;
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ irdma_terminate_del_timer(qp);
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ } else if ((original_hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT) ||
+ ((original_ibqp_state == IB_QPS_RTS) &&
+ (last_ae == IRDMA_AE_LLP_CONNECTION_RESET))) {
+ issue_disconn = 1;
+ if (last_ae == IRDMA_AE_LLP_CONNECTION_RESET)
+ disconn_status = -ECONNRESET;
+ }
+
+ if (original_hw_tcp_state == IRDMA_TCP_STATE_CLOSED ||
+ original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
+ last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
+ last_ae == IRDMA_AE_BAD_CLOSE ||
+ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset || !cm_id) {
+ issue_close = 1;
+ iwqp->cm_id = NULL;
+ qp->term_flags = 0;
+ if (!iwqp->flush_issued) {
+ iwqp->flush_issued = 1;
+ issue_flush = 1;
+ }
+ }
+
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (issue_flush && !iwqp->sc_qp.qp_uk.destroy_pending) {
+ irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ | IRDMA_FLUSH_RQ |
+ IRDMA_FLUSH_WAIT);
+
+ if (qp->term_flags)
+ irdma_ib_qp_event(iwqp, qp->event_type);
+ }
+
+ if (!cm_id || !cm_id->event_handler)
+ return;
+
+ spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
+ if (!iwqp->cm_node) {
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ return;
+ }
+ refcount_inc(&iwqp->cm_node->refcnt);
+
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+
+ if (issue_disconn) {
+ err = irdma_send_cm_event(iwqp->cm_node, cm_id,
+ IW_CM_EVENT_DISCONNECT,
+ disconn_status);
+ if (err)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: disconnect event failed: - cm_id = %p\n",
+ cm_id);
+ }
+ if (issue_close) {
+ cm_id->provider_data = iwqp;
+ err = irdma_send_cm_event(iwqp->cm_node, cm_id,
+ IW_CM_EVENT_CLOSE, 0);
+ if (err)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: close event failed: - cm_id = %p\n",
+ cm_id);
+ irdma_qp_disconnect(iwqp);
+ }
+ irdma_rem_ref_cm_node(iwqp->cm_node);
+}
+
+/**
+ * irdma_disconnect_worker - worker for connection close
+ * @work: points or disconn structure
+ */
+static void irdma_disconnect_worker(struct work_struct *work)
+{
+ struct disconn_work *dwork = container_of(work, struct disconn_work, work);
+ struct irdma_qp *iwqp = dwork->iwqp;
+
+ kfree(dwork);
+ irdma_cm_disconn_true(iwqp);
+ irdma_qp_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * irdma_free_lsmm_rsrc - free lsmm memory and deregister
+ * @iwqp: associate qp for the connection
+ */
+void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev;
+
+ iwdev = iwqp->iwdev;
+
+ if (iwqp->ietf_mem.va) {
+ if (iwqp->lsmm_mr)
+ iwdev->ibdev.ops.dereg_mr(iwqp->lsmm_mr, NULL);
+ dma_free_coherent(iwdev->rf->sc_dev.hw->device,
+ iwqp->ietf_mem.size, iwqp->ietf_mem.va,
+ iwqp->ietf_mem.pa);
+ iwqp->ietf_mem.va = NULL;
+ }
+}
+
+/**
+ * irdma_accept - registered call for connection to be accepted
+ * @cm_id: cm information for passive connection
+ * @conn_param: accpet parameters
+ */
+int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct irdma_cm_node *cm_node;
+ struct ib_qp_attr attr = {};
+ int passive_state;
+ struct ib_mr *ibmr;
+ struct irdma_pd *iwpd;
+ u16 buf_len = 0;
+ struct irdma_kmem_info accept;
+ u64 tagged_offset;
+ int wait_ret;
+ int ret = 0;
+
+ ibqp = irdma_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+
+ iwqp = to_iwqp(ibqp);
+ iwdev = iwqp->iwdev;
+ dev = &iwdev->rf->sc_dev;
+ cm_node = cm_id->provider_data;
+
+ if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
+ cm_node->ipv4 = true;
+ cm_node->vlan_id = irdma_get_vlan_ipv4(cm_node->loc_addr);
+ } else {
+ cm_node->ipv4 = false;
+ irdma_get_vlan_mac_ipv6(cm_node->loc_addr, &cm_node->vlan_id,
+ NULL);
+ }
+ ibdev_dbg(&iwdev->ibdev, "CM: Accept vlan_id=%d\n",
+ cm_node->vlan_id);
+
+ trace_irdma_accept(cm_node, 0, NULL);
+
+ if (cm_node->state == IRDMA_CM_STATE_LISTENER_DESTROYED) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ passive_state = atomic_add_return(1, &cm_node->passive_state);
+ if (passive_state == IRDMA_SEND_RESET_EVENT) {
+ ret = -ECONNRESET;
+ goto error;
+ }
+
+ buf_len = conn_param->private_data_len + IRDMA_MAX_IETF_SIZE;
+ iwqp->ietf_mem.size = ALIGN(buf_len, 1);
+ iwqp->ietf_mem.va = dma_alloc_coherent(dev->hw->device,
+ iwqp->ietf_mem.size,
+ &iwqp->ietf_mem.pa, GFP_KERNEL);
+ if (!iwqp->ietf_mem.va) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ cm_node->pdata.size = conn_param->private_data_len;
+ accept.addr = iwqp->ietf_mem.va;
+ accept.size = irdma_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
+ memcpy((u8 *)accept.addr + accept.size, conn_param->private_data,
+ conn_param->private_data_len);
+
+ if (cm_node->dev->ws_add(iwqp->sc_qp.vsi, cm_node->user_pri)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ iwqp->sc_qp.user_pri = cm_node->user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ /* setup our first outgoing iWarp send WQE (the IETF frame response) */
+ iwpd = iwqp->iwpd;
+ tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
+ ibmr = irdma_reg_phys_mr(&iwpd->ibpd, iwqp->ietf_mem.pa, buf_len,
+ IB_ACCESS_LOCAL_WRITE, &tagged_offset);
+ if (IS_ERR(ibmr)) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ibmr->pd = &iwpd->ibpd;
+ ibmr->device = iwpd->ibpd.device;
+ iwqp->lsmm_mr = ibmr;
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page);
+
+ cm_node->lsmm_size = accept.size + conn_param->private_data_len;
+ irdma_sc_send_lsmm(&iwqp->sc_qp, iwqp->ietf_mem.va, cm_node->lsmm_size,
+ ibmr->lkey);
+
+ if (iwqp->page)
+ kunmap_local(iwqp->sc_qp.qp_uk.sq_base);
+
+ iwqp->cm_id = cm_id;
+ cm_node->cm_id = cm_id;
+
+ cm_id->provider_data = iwqp;
+ iwqp->active_conn = 0;
+ iwqp->cm_node = cm_node;
+ cm_node->iwqp = iwqp;
+ irdma_cm_init_tsa_conn(iwqp, cm_node);
+ irdma_qp_add_ref(&iwqp->ibqp);
+ cm_id->add_ref(cm_id);
+
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ cm_node->cm_core->cm_free_ah(cm_node);
+
+ irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) {
+ wait_ret = wait_event_interruptible_timeout(iwqp->waitq,
+ iwqp->rts_ae_rcvd,
+ IRDMA_MAX_TIMEOUT);
+ if (!wait_ret) {
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
+ cm_node, cm_node->loc_port,
+ cm_node->rem_port, cm_node->cm_id);
+ ret = -ECONNRESET;
+ goto error;
+ }
+ }
+
+ irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
+ cm_node->accelerated = true;
+ complete(&cm_node->establish_comp);
+
+ if (cm_node->accept_pend) {
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
+ cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
+ cm_node->cm_core->stats_accepts++;
+
+ return 0;
+error:
+ irdma_free_lsmm_rsrc(iwqp);
+ irdma_rem_ref_cm_node(cm_node);
+
+ return ret;
+}
+
+/**
+ * irdma_reject - registered call for connection to be rejected
+ * @cm_id: cm information for passive connection
+ * @pdata: private data to be sent
+ * @pdata_len: private data length
+ */
+int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+
+ cm_node = cm_id->provider_data;
+ cm_node->pdata.size = pdata_len;
+
+ trace_irdma_reject(cm_node, 0, NULL);
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+
+ cm_node->cm_core->stats_rejects++;
+
+ if (pdata_len + sizeof(struct ietf_mpa_v2) > IRDMA_MAX_CM_BUF)
+ return -EINVAL;
+
+ return irdma_cm_reject(cm_node, pdata, pdata_len);
+}
+
+/**
+ * irdma_connect - registered call for connection to be established
+ * @cm_id: cm information for passive connection
+ * @conn_param: Information about the connection
+ */
+int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+ struct ib_qp *ibqp;
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+ struct irdma_cm_info cm_info;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in *raddr;
+ struct sockaddr_in6 *laddr6;
+ struct sockaddr_in6 *raddr6;
+ int ret = 0;
+
+ ibqp = irdma_get_qp(cm_id->device, conn_param->qpn);
+ if (!ibqp)
+ return -EINVAL;
+ iwqp = to_iwqp(ibqp);
+ if (!iwqp)
+ return -EINVAL;
+ iwdev = iwqp->iwdev;
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
+
+ if (!(laddr->sin_port) || !(raddr->sin_port))
+ return -EINVAL;
+
+ iwqp->active_conn = 1;
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = iwqp;
+
+ /* set up the connection params for the node */
+ if (cm_id->remote_addr.ss_family == AF_INET) {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4)
+ return -EINVAL;
+
+ cm_info.ipv4 = true;
+ memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
+ memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+ cm_info.rem_port = ntohs(raddr->sin_port);
+ cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr);
+ } else {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6)
+ return -EINVAL;
+
+ cm_info.ipv4 = false;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(cm_info.rem_addr,
+ raddr6->sin6_addr.in6_u.u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ cm_info.rem_port = ntohs(raddr6->sin6_port);
+ irdma_get_vlan_mac_ipv6(cm_info.loc_addr, &cm_info.vlan_id,
+ NULL);
+ }
+ cm_info.cm_id = cm_id;
+ cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
+ cm_info.tos = cm_id->tos;
+ if (iwdev->vsi.dscp_mode) {
+ cm_info.user_pri =
+ iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(cm_info.tos)];
+ } else {
+ cm_info.user_pri = rt_tos2priority(cm_id->tos);
+ cm_info.user_pri = irdma_iw_get_vlan_prio(cm_info.loc_addr,
+ cm_info.user_pri,
+ cm_info.ipv4);
+ }
+
+ if (iwqp->sc_qp.dev->ws_add(iwqp->sc_qp.vsi, cm_info.user_pri))
+ return -ENOMEM;
+ iwqp->sc_qp.user_pri = cm_info.user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ ibdev_dbg(&iwdev->ibdev, "DCB: TOS:[%d] UP:[%d]\n", cm_id->tos,
+ cm_info.user_pri);
+
+ trace_irdma_dcb_tos(iwdev, cm_id->tos, cm_info.user_pri);
+
+ ret = irdma_create_cm_node(&iwdev->cm_core, iwdev, conn_param, &cm_info,
+ &cm_node);
+ if (ret)
+ return ret;
+ ret = cm_node->cm_core->cm_create_ah(cm_node, true);
+ if (ret)
+ goto err;
+ if (irdma_manage_qhash(iwdev, &cm_info,
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED,
+ IRDMA_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ cm_node->qhash_set = true;
+
+ cm_node->apbvt_entry = irdma_add_apbvt(iwdev, cm_info.loc_port);
+ if (!cm_node->apbvt_entry) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ cm_node->apbvt_set = true;
+ iwqp->cm_node = cm_node;
+ cm_node->iwqp = iwqp;
+ iwqp->cm_id = cm_id;
+ irdma_qp_add_ref(&iwqp->ibqp);
+ cm_id->add_ref(cm_id);
+
+ if (cm_node->state != IRDMA_CM_STATE_OFFLOADED) {
+ cm_node->state = IRDMA_CM_STATE_SYN_SENT;
+ ret = irdma_send_syn(cm_node, 0);
+ if (ret)
+ goto err;
+ }
+
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: rem_port=0x%04x, loc_port=0x%04x rem_addr=%pI4 loc_addr=%pI4 cm_node=%p cm_id=%p qp_id = %d\n\n",
+ cm_node->rem_port, cm_node->loc_port, cm_node->rem_addr,
+ cm_node->loc_addr, cm_node, cm_id, ibqp->qp_num);
+
+ trace_irdma_connect(cm_node, 0, NULL);
+
+ return 0;
+
+err:
+ if (cm_info.ipv4)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: connect() FAILED: dest addr=%pI4",
+ cm_info.rem_addr);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: connect() FAILED: dest addr=%pI6",
+ cm_info.rem_addr);
+ irdma_rem_ref_cm_node(cm_node);
+ iwdev->cm_core.stats_connect_errs++;
+
+ return ret;
+}
+
+/**
+ * irdma_create_listen - registered call creating listener
+ * @cm_id: cm information for passive connection
+ * @backlog: to max accept pending count
+ */
+int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+ struct irdma_device *iwdev;
+ struct irdma_cm_listener *cm_listen_node;
+ struct irdma_cm_info cm_info = {};
+ struct sockaddr_in *laddr;
+ struct sockaddr_in6 *laddr6;
+ bool wildcard = false;
+ int err;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (!iwdev)
+ return -EINVAL;
+
+ laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+ laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+ cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
+
+ if (laddr->sin_family == AF_INET) {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV4)
+ return -EINVAL;
+
+ cm_info.ipv4 = true;
+ cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+ cm_info.loc_port = ntohs(laddr->sin_port);
+
+ if (laddr->sin_addr.s_addr != htonl(INADDR_ANY)) {
+ cm_info.vlan_id = irdma_get_vlan_ipv4(cm_info.loc_addr);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ wildcard = true;
+ }
+ } else {
+ if (iwdev->vsi.mtu < IRDMA_MIN_MTU_IPV6)
+ return -EINVAL;
+
+ cm_info.ipv4 = false;
+ irdma_copy_ip_ntohl(cm_info.loc_addr,
+ laddr6->sin6_addr.in6_u.u6_addr32);
+ cm_info.loc_port = ntohs(laddr6->sin6_port);
+ if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY) {
+ irdma_get_vlan_mac_ipv6(cm_info.loc_addr,
+ &cm_info.vlan_id, NULL);
+ } else {
+ cm_info.vlan_id = 0xFFFF;
+ wildcard = true;
+ }
+ }
+
+ if (cm_info.vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
+ cm_info.vlan_id = 0;
+ cm_info.backlog = backlog;
+ cm_info.cm_id = cm_id;
+
+ trace_irdma_create_listen(iwdev, &cm_info);
+
+ cm_listen_node = irdma_make_listen_node(&iwdev->cm_core, iwdev,
+ &cm_info);
+ if (!cm_listen_node) {
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: cm_listen_node == NULL\n");
+ return -ENOMEM;
+ }
+
+ cm_id->provider_data = cm_listen_node;
+
+ cm_listen_node->tos = cm_id->tos;
+ if (iwdev->vsi.dscp_mode)
+ cm_listen_node->user_pri =
+ iwdev->vsi.dscp_map[irdma_tos2dscp(cm_id->tos)];
+ else
+ cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
+ cm_info.user_pri = cm_listen_node->user_pri;
+ if (!cm_listen_node->reused_node) {
+ if (wildcard) {
+ err = irdma_add_mqh(iwdev, &cm_info, cm_listen_node);
+ if (err)
+ goto error;
+ } else {
+ if (!iwdev->vsi.dscp_mode)
+ cm_listen_node->user_pri =
+ irdma_iw_get_vlan_prio(cm_info.loc_addr,
+ cm_info.user_pri,
+ cm_info.ipv4);
+ cm_info.user_pri = cm_listen_node->user_pri;
+ err = irdma_manage_qhash(iwdev, &cm_info,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ NULL, true);
+ if (err)
+ goto error;
+
+ cm_listen_node->qhash_set = true;
+ }
+
+ cm_listen_node->apbvt_entry = irdma_add_apbvt(iwdev,
+ cm_info.loc_port);
+ if (!cm_listen_node->apbvt_entry)
+ goto error;
+ }
+ cm_id->add_ref(cm_id);
+ cm_listen_node->cm_core->stats_listen_created++;
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: loc_port=0x%04x loc_addr=%pI4 cm_listen_node=%p cm_id=%p qhash_set=%d vlan_id=%d\n",
+ cm_listen_node->loc_port, cm_listen_node->loc_addr,
+ cm_listen_node, cm_listen_node->cm_id,
+ cm_listen_node->qhash_set, cm_listen_node->vlan_id);
+
+ return 0;
+
+error:
+
+ irdma_cm_del_listen(&iwdev->cm_core, cm_listen_node, false);
+
+ return -EINVAL;
+}
+
+/**
+ * irdma_destroy_listen - registered call to destroy listener
+ * @cm_id: cm information for passive connection
+ */
+int irdma_destroy_listen(struct iw_cm_id *cm_id)
+{
+ struct irdma_device *iwdev;
+
+ iwdev = to_iwdev(cm_id->device);
+ if (cm_id->provider_data)
+ irdma_cm_del_listen(&iwdev->cm_core, cm_id->provider_data,
+ true);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: cm_id->provider_data was NULL\n");
+
+ cm_id->rem_ref(cm_id);
+
+ return 0;
+}
+
+/**
+ * irdma_teardown_list_prep - add conn nodes slated for tear down to list
+ * @cm_core: cm's core
+ * @teardown_list: a list to which cm_node will be selected
+ * @ipaddr: pointer to ip address
+ * @nfo: pointer to cm_info structure instance
+ * @disconnect_all: flag indicating disconnect all QPs
+ */
+static void irdma_teardown_list_prep(struct irdma_cm_core *cm_core,
+ struct list_head *teardown_list,
+ u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all)
+{
+ struct irdma_cm_node *cm_node;
+ int bkt;
+
+ hash_for_each_rcu(cm_core->cm_hash_tbl, bkt, cm_node, list) {
+ if ((disconnect_all ||
+ (nfo->vlan_id == cm_node->vlan_id &&
+ !memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16))) &&
+ refcount_inc_not_zero(&cm_node->refcnt))
+ list_add(&cm_node->teardown_entry, teardown_list);
+ }
+}
+
+/**
+ * irdma_cm_event_connected - handle connected active node
+ * @event: the info for cm_node of connection
+ */
+static void irdma_cm_event_connected(struct irdma_cm_event *event)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_cm_node *cm_node;
+ struct irdma_sc_dev *dev;
+ struct ib_qp_attr attr = {};
+ struct iw_cm_id *cm_id;
+ int status;
+ bool read0;
+ int wait_ret = 0;
+
+ cm_node = event->cm_node;
+ cm_id = cm_node->cm_id;
+ iwqp = cm_id->provider_data;
+ iwdev = iwqp->iwdev;
+ dev = &iwdev->rf->sc_dev;
+ if (iwqp->sc_qp.qp_uk.destroy_pending) {
+ status = -ETIMEDOUT;
+ goto error;
+ }
+
+ irdma_cm_init_tsa_conn(iwqp, cm_node);
+ read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
+ if (iwqp->page)
+ iwqp->sc_qp.qp_uk.sq_base = kmap_local_page(iwqp->page);
+ irdma_sc_send_rtt(&iwqp->sc_qp, read0);
+ if (iwqp->page)
+ kunmap_local(iwqp->sc_qp.qp_uk.sq_base);
+
+ attr.qp_state = IB_QPS_RTS;
+ cm_node->qhash_set = false;
+ irdma_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) {
+ wait_ret = wait_event_interruptible_timeout(iwqp->waitq,
+ iwqp->rts_ae_rcvd,
+ IRDMA_MAX_TIMEOUT);
+ if (!wait_ret)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: Slow Connection: cm_node=%p, loc_port=%d, rem_port=%d, cm_id=%p\n",
+ cm_node, cm_node->loc_port,
+ cm_node->rem_port, cm_node->cm_id);
+ }
+
+ irdma_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
+ cm_node->accelerated = true;
+ complete(&cm_node->establish_comp);
+ cm_node->cm_core->cm_free_ah(cm_node);
+ return;
+
+error:
+ iwqp->cm_id = NULL;
+ cm_id->provider_data = NULL;
+ irdma_send_cm_event(event->cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
+ status);
+ irdma_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * irdma_cm_event_reset - handle reset
+ * @event: the info for cm_node of connection
+ */
+static void irdma_cm_event_reset(struct irdma_cm_event *event)
+{
+ struct irdma_cm_node *cm_node = event->cm_node;
+ struct iw_cm_id *cm_id = cm_node->cm_id;
+ struct irdma_qp *iwqp;
+
+ if (!cm_id)
+ return;
+
+ iwqp = cm_id->provider_data;
+ if (!iwqp)
+ return;
+
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: reset event %p - cm_id = %p\n", event->cm_node, cm_id);
+ iwqp->cm_id = NULL;
+
+ irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT,
+ -ECONNRESET);
+ irdma_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
+}
+
+/**
+ * irdma_cm_event_handler - send event to cm upper layer
+ * @work: pointer of cm event info.
+ */
+static void irdma_cm_event_handler(struct work_struct *work)
+{
+ struct irdma_cm_event *event = container_of(work, struct irdma_cm_event, event_work);
+ struct irdma_cm_node *cm_node;
+
+ if (!event || !event->cm_node || !event->cm_node->cm_core)
+ return;
+
+ cm_node = event->cm_node;
+ trace_irdma_cm_event_handler(cm_node, event->type, NULL);
+
+ switch (event->type) {
+ case IRDMA_CM_EVENT_MPA_REQ:
+ irdma_send_cm_event(cm_node, cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REQUEST, 0);
+ break;
+ case IRDMA_CM_EVENT_RESET:
+ irdma_cm_event_reset(event);
+ break;
+ case IRDMA_CM_EVENT_CONNECTED:
+ if (!event->cm_node->cm_id ||
+ event->cm_node->state != IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_cm_event_connected(event);
+ break;
+ case IRDMA_CM_EVENT_MPA_REJECT:
+ if (!event->cm_node->cm_id ||
+ cm_node->state == IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_send_cm_event(cm_node, cm_node->cm_id,
+ IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
+ break;
+ case IRDMA_CM_EVENT_ABORTED:
+ if (!event->cm_node->cm_id ||
+ event->cm_node->state == IRDMA_CM_STATE_OFFLOADED)
+ break;
+ irdma_event_connect_error(event);
+ break;
+ default:
+ ibdev_dbg(&cm_node->iwdev->ibdev,
+ "CM: bad event type = %d\n", event->type);
+ break;
+ }
+
+ irdma_rem_ref_cm_node(event->cm_node);
+ kfree(event);
+}
+
+/**
+ * irdma_cm_post_event - queue event request for worker thread
+ * @event: cm node's info for up event call
+ */
+static void irdma_cm_post_event(struct irdma_cm_event *event)
+{
+ refcount_inc(&event->cm_node->refcnt);
+ INIT_WORK(&event->event_work, irdma_cm_event_handler);
+ queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
+}
+
+/**
+ * irdma_cm_teardown_connections - teardown QPs
+ * @iwdev: device pointer
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @nfo: Connection info
+ * @disconnect_all: flag indicating disconnect all QPs
+ *
+ * teardown QPs where source or destination addr matches ip addr
+ */
+void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ struct list_head *list_core_temp;
+ struct list_head *list_node;
+ struct irdma_cm_node *cm_node;
+ struct list_head teardown_list;
+ struct ib_qp_attr attr;
+
+ INIT_LIST_HEAD(&teardown_list);
+
+ rcu_read_lock();
+ irdma_teardown_list_prep(cm_core, &teardown_list, ipaddr, nfo, disconnect_all);
+ rcu_read_unlock();
+
+ list_for_each_safe (list_node, list_core_temp, &teardown_list) {
+ cm_node = container_of(list_node, struct irdma_cm_node,
+ teardown_entry);
+ attr.qp_state = IB_QPS_ERR;
+ irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+ if (iwdev->rf->reset)
+ irdma_cm_disconn(cm_node->iwqp);
+ irdma_rem_ref_cm_node(cm_node);
+ }
+}
+
+/**
+ * irdma_qhash_ctrl - enable/disable qhash for list
+ * @iwdev: device pointer
+ * @parent_listen_node: parent listen node
+ * @nfo: cm info node
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ *
+ * Enables or disables the qhash for the node in the child
+ * listen list that matches ipaddr. If no matching IP was found
+ * it will allocate and add a new child listen node to the
+ * parent listen node. The listen_list_lock is assumed to be
+ * held when called.
+ */
+static void irdma_qhash_ctrl(struct irdma_device *iwdev,
+ struct irdma_cm_listener *parent_listen_node,
+ struct irdma_cm_info *nfo, u32 *ipaddr, bool ipv4,
+ bool ifup)
+{
+ struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
+ struct irdma_cm_listener *child_listen_node;
+ struct list_head *pos, *tpos;
+ bool node_allocated = false;
+ enum irdma_quad_hash_manage_type op = ifup ?
+ IRDMA_QHASH_MANAGE_TYPE_ADD :
+ IRDMA_QHASH_MANAGE_TYPE_DELETE;
+ int err;
+
+ list_for_each_safe (pos, tpos, child_listen_list) {
+ child_listen_node = list_entry(pos, struct irdma_cm_listener,
+ child_listen_list);
+ if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
+ goto set_qhash;
+ }
+
+ /* if not found then add a child listener if interface is going up */
+ if (!ifup)
+ return;
+ child_listen_node = kmemdup(parent_listen_node,
+ sizeof(*child_listen_node), GFP_ATOMIC);
+ if (!child_listen_node)
+ return;
+
+ node_allocated = true;
+ memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
+
+set_qhash:
+ memcpy(nfo->loc_addr, child_listen_node->loc_addr,
+ sizeof(nfo->loc_addr));
+ nfo->vlan_id = child_listen_node->vlan_id;
+ err = irdma_manage_qhash(iwdev, nfo, IRDMA_QHASH_TYPE_TCP_SYN, op, NULL,
+ false);
+ if (!err) {
+ child_listen_node->qhash_set = ifup;
+ if (node_allocated)
+ list_add(&child_listen_node->child_listen_list,
+ &parent_listen_node->child_listen_list);
+ } else if (node_allocated) {
+ kfree(child_listen_node);
+ }
+}
+
+/**
+ * irdma_if_notify - process an ifdown on an interface
+ * @iwdev: device pointer
+ * @netdev: network device structure
+ * @ipaddr: Pointer to IPv4 or IPv6 address
+ * @ipv4: flag indicating IPv4 when true
+ * @ifup: flag indicating interface up when true
+ */
+void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev,
+ u32 *ipaddr, bool ipv4, bool ifup)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ unsigned long flags;
+ struct irdma_cm_listener *listen_node;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ struct irdma_cm_info nfo = {};
+ u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
+ enum irdma_quad_hash_manage_type op = ifup ?
+ IRDMA_QHASH_MANAGE_TYPE_ADD :
+ IRDMA_QHASH_MANAGE_TYPE_DELETE;
+
+ nfo.vlan_id = vlan_id;
+ nfo.ipv4 = ipv4;
+ nfo.qh_qpid = 1;
+
+ /* Disable or enable qhash for listeners */
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry (listen_node, &cm_core->listen_list, list) {
+ if (vlan_id != listen_node->vlan_id ||
+ (memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) &&
+ memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16)))
+ continue;
+
+ memcpy(nfo.loc_addr, listen_node->loc_addr,
+ sizeof(nfo.loc_addr));
+ nfo.loc_port = listen_node->loc_port;
+ nfo.user_pri = listen_node->user_pri;
+ if (!list_empty(&listen_node->child_listen_list)) {
+ irdma_qhash_ctrl(iwdev, listen_node, &nfo, ipaddr, ipv4,
+ ifup);
+ } else if (memcmp(listen_node->loc_addr, ip_zero,
+ ipv4 ? 4 : 16)) {
+ if (!irdma_manage_qhash(iwdev, &nfo,
+ IRDMA_QHASH_TYPE_TCP_SYN, op,
+ NULL, false))
+ listen_node->qhash_set = ifup;
+ }
+ }
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+ /* disconnect any connected qp's on ifdown */
+ if (!ifup)
+ irdma_cm_teardown_connections(iwdev, ipaddr, &nfo, false);
+}
diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h
new file mode 100644
index 0000000000..7feadb3e1e
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/cm.h
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_CM_H
+#define IRDMA_CM_H
+
+#define IRDMA_MPA_REQUEST_ACCEPT 1
+#define IRDMA_MPA_REQUEST_REJECT 2
+
+/* IETF MPA -- defines */
+#define IEFT_MPA_KEY_REQ "MPA ID Req Frame"
+#define IEFT_MPA_KEY_REP "MPA ID Rep Frame"
+#define IETF_MPA_KEY_SIZE 16
+#define IETF_MPA_VER 1
+#define IETF_MAX_PRIV_DATA_LEN 512
+#define IETF_MPA_FRAME_SIZE 20
+#define IETF_RTR_MSG_SIZE 4
+#define IETF_MPA_V2_FLAG 0x10
+#define SNDMARKER_SEQNMASK 0x000001ff
+#define IRDMA_MAX_IETF_SIZE 32
+
+/* IETF RTR MSG Fields */
+#define IETF_PEER_TO_PEER 0x8000
+#define IETF_FLPDU_ZERO_LEN 0x4000
+#define IETF_RDMA0_WRITE 0x8000
+#define IETF_RDMA0_READ 0x4000
+#define IETF_NO_IRD_ORD 0x3fff
+
+#define MAX_PORTS 65536
+
+#define IRDMA_PASSIVE_STATE_INDICATED 0
+#define IRDMA_DO_NOT_SEND_RESET_EVENT 1
+#define IRDMA_SEND_RESET_EVENT 2
+
+#define MAX_IRDMA_IFS 4
+
+#define SET_ACK 1
+#define SET_SYN 2
+#define SET_FIN 4
+#define SET_RST 8
+
+#define TCP_OPTIONS_PADDING 3
+
+#define IRDMA_DEFAULT_RETRYS 64
+#define IRDMA_DEFAULT_RETRANS 32
+#define IRDMA_DEFAULT_TTL 0x40
+#define IRDMA_DEFAULT_RTT_VAR 6
+#define IRDMA_DEFAULT_SS_THRESH 0x3fffffff
+#define IRDMA_DEFAULT_REXMIT_THRESH 8
+
+#define IRDMA_RETRY_TIMEOUT HZ
+#define IRDMA_SHORT_TIME 10
+#define IRDMA_LONG_TIME (2 * HZ)
+#define IRDMA_MAX_TIMEOUT ((unsigned long)(12 * HZ))
+
+#define IRDMA_CM_HASHTABLE_SIZE 1024
+#define IRDMA_CM_TCP_TIMER_INTERVAL 3000
+#define IRDMA_CM_DEFAULT_MTU 1540
+#define IRDMA_CM_DEFAULT_FRAME_CNT 10
+#define IRDMA_CM_THREAD_STACK_SIZE 256
+#define IRDMA_CM_DEFAULT_RCV_WND 64240
+#define IRDMA_CM_DEFAULT_RCV_WND_SCALED 0x3FFFC
+#define IRDMA_CM_DEFAULT_RCV_WND_SCALE 2
+#define IRDMA_CM_DEFAULT_FREE_PKTS 10
+#define IRDMA_CM_FREE_PKT_LO_WATERMARK 2
+#define IRDMA_CM_DEFAULT_MSS 536
+#define IRDMA_CM_DEFAULT_MPA_VER 2
+#define IRDMA_CM_DEFAULT_SEQ 0x159bf75f
+#define IRDMA_CM_DEFAULT_LOCAL_ID 0x3b47
+#define IRDMA_CM_DEFAULT_SEQ2 0x18ed5740
+#define IRDMA_CM_DEFAULT_LOCAL_ID2 0xb807
+#define IRDMA_MAX_CM_BUF (IRDMA_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
+
+enum ietf_mpa_flags {
+ IETF_MPA_FLAGS_REJECT = 0x20,
+ IETF_MPA_FLAGS_CRC = 0x40,
+ IETF_MPA_FLAGS_MARKERS = 0x80,
+};
+
+enum irdma_timer_type {
+ IRDMA_TIMER_TYPE_SEND,
+ IRDMA_TIMER_TYPE_CLOSE,
+};
+
+enum option_nums {
+ OPTION_NUM_EOL,
+ OPTION_NUM_NONE,
+ OPTION_NUM_MSS,
+ OPTION_NUM_WINDOW_SCALE,
+ OPTION_NUM_SACK_PERM,
+ OPTION_NUM_SACK,
+ OPTION_NUM_WRITE0 = 0xbc,
+};
+
+/* cm node transition states */
+enum irdma_cm_node_state {
+ IRDMA_CM_STATE_UNKNOWN,
+ IRDMA_CM_STATE_INITED,
+ IRDMA_CM_STATE_LISTENING,
+ IRDMA_CM_STATE_SYN_RCVD,
+ IRDMA_CM_STATE_SYN_SENT,
+ IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED,
+ IRDMA_CM_STATE_ESTABLISHED,
+ IRDMA_CM_STATE_ACCEPTING,
+ IRDMA_CM_STATE_MPAREQ_SENT,
+ IRDMA_CM_STATE_MPAREQ_RCVD,
+ IRDMA_CM_STATE_MPAREJ_RCVD,
+ IRDMA_CM_STATE_OFFLOADED,
+ IRDMA_CM_STATE_FIN_WAIT1,
+ IRDMA_CM_STATE_FIN_WAIT2,
+ IRDMA_CM_STATE_CLOSE_WAIT,
+ IRDMA_CM_STATE_TIME_WAIT,
+ IRDMA_CM_STATE_LAST_ACK,
+ IRDMA_CM_STATE_CLOSING,
+ IRDMA_CM_STATE_LISTENER_DESTROYED,
+ IRDMA_CM_STATE_CLOSED,
+};
+
+enum mpa_frame_ver {
+ IETF_MPA_V1 = 1,
+ IETF_MPA_V2 = 2,
+};
+
+enum mpa_frame_key {
+ MPA_KEY_REQUEST,
+ MPA_KEY_REPLY,
+};
+
+enum send_rdma0 {
+ SEND_RDMA_READ_ZERO = 1,
+ SEND_RDMA_WRITE_ZERO = 2,
+};
+
+enum irdma_tcpip_pkt_type {
+ IRDMA_PKT_TYPE_UNKNOWN,
+ IRDMA_PKT_TYPE_SYN,
+ IRDMA_PKT_TYPE_SYNACK,
+ IRDMA_PKT_TYPE_ACK,
+ IRDMA_PKT_TYPE_FIN,
+ IRDMA_PKT_TYPE_RST,
+};
+
+enum irdma_cm_listener_state {
+ IRDMA_CM_LISTENER_PASSIVE_STATE = 1,
+ IRDMA_CM_LISTENER_ACTIVE_STATE = 2,
+ IRDMA_CM_LISTENER_EITHER_STATE = 3,
+};
+
+/* CM event codes */
+enum irdma_cm_event_type {
+ IRDMA_CM_EVENT_UNKNOWN,
+ IRDMA_CM_EVENT_ESTABLISHED,
+ IRDMA_CM_EVENT_MPA_REQ,
+ IRDMA_CM_EVENT_MPA_CONNECT,
+ IRDMA_CM_EVENT_MPA_ACCEPT,
+ IRDMA_CM_EVENT_MPA_REJECT,
+ IRDMA_CM_EVENT_MPA_ESTABLISHED,
+ IRDMA_CM_EVENT_CONNECTED,
+ IRDMA_CM_EVENT_RESET,
+ IRDMA_CM_EVENT_ABORTED,
+};
+
+struct ietf_mpa_v1 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ u8 priv_data[];
+};
+
+struct ietf_rtr_msg {
+ __be16 ctrl_ird;
+ __be16 ctrl_ord;
+};
+
+struct ietf_mpa_v2 {
+ u8 key[IETF_MPA_KEY_SIZE];
+ u8 flags;
+ u8 rev;
+ __be16 priv_data_len;
+ struct ietf_rtr_msg rtr_msg;
+ u8 priv_data[];
+};
+
+struct option_base {
+ u8 optionnum;
+ u8 len;
+};
+
+struct option_mss {
+ u8 optionnum;
+ u8 len;
+ __be16 mss;
+};
+
+struct option_windowscale {
+ u8 optionnum;
+ u8 len;
+ u8 shiftcount;
+};
+
+union all_known_options {
+ char eol;
+ struct option_base base;
+ struct option_mss mss;
+ struct option_windowscale windowscale;
+};
+
+struct irdma_timer_entry {
+ struct list_head list;
+ unsigned long timetosend; /* jiffies */
+ struct irdma_puda_buf *sqbuf;
+ u32 type;
+ u32 retrycount;
+ u32 retranscount;
+ u32 context;
+ u32 send_retrans;
+ int close_when_complete;
+};
+
+/* CM context params */
+struct irdma_cm_tcp_context {
+ u8 client;
+ u32 loc_seq_num;
+ u32 loc_ack_num;
+ u32 rem_ack_num;
+ u32 rcv_nxt;
+ u32 loc_id;
+ u32 rem_id;
+ u32 snd_wnd;
+ u32 max_snd_wnd;
+ u32 rcv_wnd;
+ u32 mss;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+};
+
+struct irdma_apbvt_entry {
+ struct hlist_node hlist;
+ u32 use_cnt;
+ u16 port;
+};
+
+struct irdma_cm_listener {
+ struct list_head list;
+ struct iw_cm_id *cm_id;
+ struct irdma_cm_core *cm_core;
+ struct irdma_device *iwdev;
+ struct list_head child_listen_list;
+ struct irdma_apbvt_entry *apbvt_entry;
+ enum irdma_cm_listener_state listener_state;
+ refcount_t refcnt;
+ atomic_t pend_accepts_cnt;
+ u32 loc_addr[4];
+ u32 reused_node;
+ int backlog;
+ u16 loc_port;
+ u16 vlan_id;
+ u8 loc_mac[ETH_ALEN];
+ u8 user_pri;
+ u8 tos;
+ bool qhash_set:1;
+ bool ipv4:1;
+};
+
+struct irdma_kmem_info {
+ void *addr;
+ u32 size;
+};
+
+struct irdma_mpa_priv_info {
+ const void *addr;
+ u32 size;
+};
+
+struct irdma_cm_node {
+ struct irdma_qp *iwqp;
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct irdma_cm_tcp_context tcp_cntxt;
+ struct irdma_cm_core *cm_core;
+ struct irdma_timer_entry *send_entry;
+ struct irdma_timer_entry *close_entry;
+ struct irdma_cm_listener *listener;
+ struct list_head timer_entry;
+ struct list_head reset_entry;
+ struct list_head teardown_entry;
+ struct irdma_apbvt_entry *apbvt_entry;
+ struct rcu_head rcu_head;
+ struct irdma_mpa_priv_info pdata;
+ struct irdma_sc_ah *ah;
+ union {
+ struct ietf_mpa_v1 mpa_frame;
+ struct ietf_mpa_v2 mpa_v2_frame;
+ };
+ struct irdma_kmem_info mpa_hdr;
+ struct iw_cm_id *cm_id;
+ struct hlist_node list;
+ struct completion establish_comp;
+ spinlock_t retrans_list_lock; /* protect CM node rexmit updates*/
+ atomic_t passive_state;
+ refcount_t refcnt;
+ enum irdma_cm_node_state state;
+ enum send_rdma0 send_rdma0_op;
+ enum mpa_frame_ver mpa_frame_rev;
+ u32 loc_addr[4], rem_addr[4];
+ u16 loc_port, rem_port;
+ int apbvt_set;
+ int accept_pend;
+ u16 vlan_id;
+ u16 ird_size;
+ u16 ord_size;
+ u16 mpav2_ird_ord;
+ u16 lsmm_size;
+ u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ u8 user_pri;
+ u8 tos;
+ bool ack_rcvd:1;
+ bool qhash_set:1;
+ bool ipv4:1;
+ bool snd_mark_en:1;
+ bool rcv_mark_en:1;
+ bool do_lpb:1;
+ bool accelerated:1;
+};
+
+/* Used by internal CM APIs to pass CM information*/
+struct irdma_cm_info {
+ struct iw_cm_id *cm_id;
+ u16 loc_port;
+ u16 rem_port;
+ u32 loc_addr[4];
+ u32 rem_addr[4];
+ u32 qh_qpid;
+ u16 vlan_id;
+ int backlog;
+ u8 user_pri;
+ u8 tos;
+ bool ipv4;
+};
+
+struct irdma_cm_event {
+ enum irdma_cm_event_type type;
+ struct irdma_cm_info cm_info;
+ struct work_struct event_work;
+ struct irdma_cm_node *cm_node;
+};
+
+struct irdma_cm_core {
+ struct irdma_device *iwdev;
+ struct irdma_sc_dev *dev;
+ struct list_head listen_list;
+ DECLARE_HASHTABLE(cm_hash_tbl, 8);
+ DECLARE_HASHTABLE(apbvt_hash_tbl, 8);
+ struct timer_list tcp_timer;
+ struct workqueue_struct *event_wq;
+ spinlock_t ht_lock; /* protect CM node (active side) list */
+ spinlock_t listen_list_lock; /* protect listener list */
+ spinlock_t apbvt_lock; /*serialize apbvt add/del entries*/
+ u64 stats_nodes_created;
+ u64 stats_nodes_destroyed;
+ u64 stats_listen_created;
+ u64 stats_listen_destroyed;
+ u64 stats_listen_nodes_created;
+ u64 stats_listen_nodes_destroyed;
+ u64 stats_lpbs;
+ u64 stats_accepts;
+ u64 stats_rejects;
+ u64 stats_connect_errs;
+ u64 stats_passive_errs;
+ u64 stats_pkt_retrans;
+ u64 stats_backlog_drops;
+ struct irdma_puda_buf *(*form_cm_frame)(struct irdma_cm_node *cm_node,
+ struct irdma_kmem_info *options,
+ struct irdma_kmem_info *hdr,
+ struct irdma_mpa_priv_info *pdata,
+ u8 flags);
+ int (*cm_create_ah)(struct irdma_cm_node *cm_node, bool wait);
+ void (*cm_free_ah)(struct irdma_cm_node *cm_node);
+};
+
+int irdma_schedule_cm_timer(struct irdma_cm_node *cm_node,
+ struct irdma_puda_buf *sqbuf,
+ enum irdma_timer_type type, int send_retrans,
+ int close_when_complete);
+
+static inline u8 irdma_tos2dscp(u8 tos)
+{
+#define IRDMA_DSCP_VAL GENMASK(7, 2)
+ return (u8)FIELD_GET(IRDMA_DSCP_VAL, tos);
+}
+
+int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
+int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
+int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
+int irdma_destroy_listen(struct iw_cm_id *cm_id);
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac);
+void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
+ struct irdma_cm_info *nfo,
+ bool disconnect_all);
+int irdma_cm_start(struct irdma_device *dev);
+int irdma_cm_stop(struct irdma_device *dev);
+bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr);
+bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr);
+int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
+ const u8 *mac_addr, u32 action);
+void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev,
+ u32 *ipaddr, bool ipv4, bool ifup);
+bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port);
+void irdma_send_ack(struct irdma_cm_node *cm_node);
+void irdma_lpb_nop(struct irdma_sc_qp *qp);
+void irdma_rem_ref_cm_node(struct irdma_cm_node *cm_node);
+void irdma_add_conn_est_qh(struct irdma_cm_node *cm_node);
+#endif /* IRDMA_CM_H */
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
new file mode 100644
index 0000000000..8a6200e55c
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -0,0 +1,5489 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include <linux/etherdevice.h>
+
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "ws.h"
+#include "protos.h"
+
+/**
+ * irdma_get_qp_from_list - get next qp from a list
+ * @head: Listhead of qp's
+ * @qp: current qp
+ */
+struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
+ struct irdma_sc_qp *qp)
+{
+ struct list_head *lastentry;
+ struct list_head *entry = NULL;
+
+ if (list_empty(head))
+ return NULL;
+
+ if (!qp) {
+ entry = head->next;
+ } else {
+ lastentry = &qp->list;
+ entry = lastentry->next;
+ if (entry == head)
+ return NULL;
+ }
+
+ return container_of(entry, struct irdma_sc_qp, list);
+}
+
+/**
+ * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI
+ * @vsi: the VSI struct pointer
+ * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND
+ */
+void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op)
+{
+ struct irdma_sc_qp *qp = NULL;
+ u8 i;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ mutex_lock(&vsi->qos[i].qos_mutex);
+ qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
+ while (qp) {
+ if (op == IRDMA_OP_RESUME) {
+ if (!qp->dev->ws_add(vsi, i)) {
+ qp->qs_handle =
+ vsi->qos[qp->user_pri].qs_handle;
+ irdma_cqp_qp_suspend_resume(qp, op);
+ } else {
+ irdma_cqp_qp_suspend_resume(qp, op);
+ irdma_modify_qp_to_err(qp);
+ }
+ } else if (op == IRDMA_OP_SUSPEND) {
+ /* issue cqp suspend command */
+ if (!irdma_cqp_qp_suspend_resume(qp, op))
+ atomic_inc(&vsi->qp_suspend_reqs);
+ }
+ qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp);
+ }
+ mutex_unlock(&vsi->qos[i].qos_mutex);
+ }
+}
+
+static void irdma_set_qos_info(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2p)
+{
+ u8 i;
+
+ vsi->qos_rel_bw = l2p->vsi_rel_bw;
+ vsi->qos_prio_type = l2p->vsi_prio_type;
+ vsi->dscp_mode = l2p->dscp_mode;
+ if (l2p->dscp_mode) {
+ memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map));
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ l2p->up2tc[i] = i;
+ }
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ vsi->qos[i].qs_handle = l2p->qs_handle_list[i];
+ vsi->qos[i].traffic_class = l2p->up2tc[i];
+ vsi->qos[i].rel_bw =
+ l2p->tc_info[vsi->qos[i].traffic_class].rel_bw;
+ vsi->qos[i].prio_type =
+ l2p->tc_info[vsi->qos[i].traffic_class].prio_type;
+ vsi->qos[i].valid = false;
+ }
+}
+
+/**
+ * irdma_change_l2params - given the new l2 parameters, change all qp
+ * @vsi: RDMA VSI pointer
+ * @l2params: New parameters from l2
+ */
+void irdma_change_l2params(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2params)
+{
+ if (l2params->mtu_changed) {
+ vsi->mtu = l2params->mtu;
+ if (vsi->ieq)
+ irdma_reinitialize_ieq(vsi);
+ }
+
+ if (!l2params->tc_changed)
+ return;
+
+ vsi->tc_change_pending = false;
+ irdma_set_qos_info(vsi, l2params);
+ irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME);
+}
+
+/**
+ * irdma_qp_rem_qos - remove qp from qos lists during destroy qp
+ * @qp: qp to be removed from qos
+ */
+void irdma_qp_rem_qos(struct irdma_sc_qp *qp)
+{
+ struct irdma_sc_vsi *vsi = qp->vsi;
+
+ ibdev_dbg(to_ibdev(qp->dev),
+ "DCB: DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
+ qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
+ qp->on_qoslist);
+ mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
+ if (qp->on_qoslist) {
+ qp->on_qoslist = false;
+ list_del(&qp->list);
+ }
+ mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
+}
+
+/**
+ * irdma_qp_add_qos - called during setctx for qp to be added to qos
+ * @qp: qp to be added to qos
+ */
+void irdma_qp_add_qos(struct irdma_sc_qp *qp)
+{
+ struct irdma_sc_vsi *vsi = qp->vsi;
+
+ ibdev_dbg(to_ibdev(qp->dev),
+ "DCB: DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n",
+ qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle,
+ qp->on_qoslist);
+ mutex_lock(&vsi->qos[qp->user_pri].qos_mutex);
+ if (!qp->on_qoslist) {
+ list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
+ qp->on_qoslist = true;
+ qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
+ }
+ mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex);
+}
+
+/**
+ * irdma_sc_pd_init - initialize sc pd struct
+ * @dev: sc device struct
+ * @pd: sc pd ptr
+ * @pd_id: pd_id for allocated pd
+ * @abi_ver: User/Kernel ABI version
+ */
+void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
+ int abi_ver)
+{
+ pd->pd_id = pd_id;
+ pd->abi_ver = abi_ver;
+ pd->dev = dev;
+}
+
+/**
+ * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry
+ * @cqp: struct for cqp hw
+ * @info: arp entry information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_add_arp_cache_entry_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ set_64bit_val(wqe, 8, info->reach_max);
+ set_64bit_val(wqe, 16, ether_addr_to_u64(info->mac_addr));
+
+ hdr = info->arp_index |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
+ FIELD_PREP(IRDMA_CQPSQ_MAT_PERMANENT, (info->permanent ? 1 : 0)) |
+ FIELD_PREP(IRDMA_CQPSQ_MAT_ENTRYVALID, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ARP_CACHE_ENTRY WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_del_arp_cache_entry - dele arp cache entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @arp_index: arp index to delete arp entry
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 arp_index, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ hdr = arp_index |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_ARP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ARP_CACHE_DEL_ENTRY WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries
+ * @cqp: struct for cqp hw
+ * @info: info for apbvt entry to add or delete
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_apbvt_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, info->port);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_APBVT) |
+ FIELD_PREP(IRDMA_CQPSQ_MAPT_ADDPORT, info->add) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MANAGE_APBVT WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_qhash_table_entry - manage quad hash entries
+ * @cqp: struct for cqp hw
+ * @info: info for quad hash to manage
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ *
+ * This is called before connection establishment is started.
+ * For passive connections, when listener is created, it will
+ * call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local
+ * ip address and tcp port. When SYN is received (passive
+ * connections) or sent (active connections), this routine is
+ * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED
+ * and quad is passed in info.
+ *
+ * When iwarp connection is done and its state moves to RTS, the
+ * quad hash entry in the hardware will point to iwarp's qp
+ * number and requires no calls from the driver.
+ */
+static int
+irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_qhash_table_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 qw1 = 0;
+ u64 qw2 = 0;
+ u64 temp;
+ struct irdma_sc_vsi *vsi = info->vsi;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr));
+
+ qw1 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QPN, info->qp_num) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_DEST_PORT, info->dest_port);
+ if (info->ipv4_valid) {
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[0]));
+ } else {
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->dest_ip[0]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->dest_ip[1]));
+
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->dest_ip[2]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->dest_ip[3]));
+ }
+ qw2 = FIELD_PREP(IRDMA_CQPSQ_QHASH_QS_HANDLE,
+ vsi->qos[info->user_pri].qs_handle);
+ if (info->vlan_valid)
+ qw2 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANID, info->vlan_id);
+ set_64bit_val(wqe, 16, qw2);
+ if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) {
+ qw1 |= FIELD_PREP(IRDMA_CQPSQ_QHASH_SRC_PORT, info->src_port);
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR0, info->src_ip[0]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR1, info->src_ip[1]));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR2, info->src_ip[2]) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[3]));
+ } else {
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ADDR3, info->src_ip[0]));
+ }
+ }
+
+ set_64bit_val(wqe, 8, qw1);
+ temp = FIELD_PREP(IRDMA_CQPSQ_QHASH_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_OPCODE,
+ IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_MANAGE, info->manage) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_IPV4VALID, info->ipv4_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_VLANVALID, info->vlan_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QHASH_ENTRYTYPE, info->entry_type);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: MANAGE_QHASH WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_init - initialize qp
+ * @qp: sc qp
+ * @info: initialization qp info
+ */
+int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
+{
+ int ret_code;
+ u32 pble_obj_cnt;
+ u16 wqe_size;
+
+ if (info->qp_uk_init_info.max_sq_frag_cnt >
+ info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags ||
+ info->qp_uk_init_info.max_rq_frag_cnt >
+ info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags)
+ return -EINVAL;
+
+ qp->dev = info->pd->dev;
+ qp->vsi = info->vsi;
+ qp->ieq_qp = info->vsi->exception_lan_q;
+ qp->sq_pa = info->sq_pa;
+ qp->rq_pa = info->rq_pa;
+ qp->hw_host_ctx_pa = info->host_ctx_pa;
+ qp->q2_pa = info->q2_pa;
+ qp->shadow_area_pa = info->shadow_area_pa;
+ qp->q2_buf = info->q2;
+ qp->pd = info->pd;
+ qp->hw_host_ctx = info->host_ctx;
+ info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
+ ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info);
+ if (ret_code)
+ return ret_code;
+
+ qp->virtual_map = info->virtual_map;
+ pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
+ (info->virtual_map && info->rq_pa >= pble_obj_cnt))
+ return -EINVAL;
+
+ qp->llp_stream_handle = (void *)(-1);
+ qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
+ IRDMA_QUEUE_TYPE_SQ_RQ);
+ ibdev_dbg(to_ibdev(qp->dev),
+ "WQE: hw_sq_size[%04d] sq_ring.size[%04d]\n",
+ qp->hw_sq_size, qp->qp_uk.sq_ring.size);
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1 && qp->pd->abi_ver > 4)
+ wqe_size = IRDMA_WQE_SIZE_128;
+ else
+ ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+ &wqe_size);
+ if (ret_code)
+ return ret_code;
+
+ qp->hw_rq_size = irdma_get_encoded_wqe_size(qp->qp_uk.rq_size *
+ (wqe_size / IRDMA_QP_WQE_MIN_SIZE), IRDMA_QUEUE_TYPE_SQ_RQ);
+ ibdev_dbg(to_ibdev(qp->dev),
+ "WQE: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
+ qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
+ qp->sq_tph_val = info->sq_tph_val;
+ qp->rq_tph_val = info->rq_tph_val;
+ qp->sq_tph_en = info->sq_tph_en;
+ qp->rq_tph_en = info->rq_tph_en;
+ qp->rcv_tph_en = info->rcv_tph_en;
+ qp->xmit_tph_en = info->xmit_tph_en;
+ qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq;
+ qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_create - create qp
+ * @qp: sc qp
+ * @info: qp create info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info,
+ u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = qp->dev->cqp;
+ if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id ||
+ qp->qp_uk.qp_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt)
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, (info->ord_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
+ info->arp_cache_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_modify - modify qp cqp wqe
+ * @qp: sc qp
+ * @info: modify qp info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ u8 term_actions = 0;
+ u8 term_len = 0;
+
+ cqp = qp->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) {
+ if (info->dont_send_fin)
+ term_actions += IRDMAQP_TERM_SEND_TERM_ONLY;
+ if (info->dont_send_term)
+ term_actions += IRDMAQP_TERM_SEND_FIN_ONLY;
+ if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN ||
+ term_actions == IRDMAQP_TERM_SEND_TERM_ONLY)
+ term_len = info->termlen;
+ }
+
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEWMSS, info->new_mss) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TERMLEN, term_len));
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ORDVALID, info->ord_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TOECTXVALID, info->tcp_ctx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CACHEDVARVALID,
+ info->cached_var_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_VQ, qp->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_FORCELOOPBACK, info->force_lpb) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, info->cq_num_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MACVALID, info->mac_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_MSSCHANGE, info->mss_change) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY,
+ info->remove_hash_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_TERMACT, term_actions) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_RESETCON, info->reset_tcp_conn) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_ARPTABIDXVALID,
+ info->arp_cache_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, info->next_iwarp_state) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_destroy - cqp destroy qp
+ * @qp: sc qp
+ * @scratch: u64 saved to be used during cqp completion
+ * @remove_hash_idx: flag if to remove hash idx
+ * @ignore_mw_bnd: memory window bind flag
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
+ bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = qp->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, qp->qp_uk.qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_IGNOREMWBOUND, ignore_mw_bnd) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_REMOVEHASHENTRY, remove_hash_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_get_encoded_ird_size -
+ * @ird_size: IRD size
+ * The ird from the connection is rounded to a supported HW setting and then encoded
+ * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
+ * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input
+ */
+static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
+{
+ switch (ird_size ?
+ roundup_pow_of_two(2 * ird_size) : 4) {
+ case 256:
+ return IRDMA_IRD_HW_SIZE_256;
+ case 128:
+ return IRDMA_IRD_HW_SIZE_128;
+ case 64:
+ case 32:
+ return IRDMA_IRD_HW_SIZE_64;
+ case 16:
+ case 8:
+ return IRDMA_IRD_HW_SIZE_16;
+ case 4:
+ default:
+ break;
+ }
+
+ return IRDMA_IRD_HW_SIZE_4;
+}
+
+/**
+ * irdma_sc_qp_setctx_roce - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_roce_offload_info *roce_info;
+ struct irdma_udp_offload_info *udp;
+ u8 push_mode_en;
+ u32 push_idx;
+
+ roce_info = info->roce_info;
+ udp = info->udp_info;
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+ set_64bit_val(qp_ctx, 0,
+ FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
+ FIELD_PREP(IRDMAQPC_PDIDXHI, roce_info->pd_id >> 16) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID, roce_info->err_rq_idx_valid) |
+ FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
+ FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
+ FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag));
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+ if ((roce_info->dcqcn_en || roce_info->dctcp_en) &&
+ !(udp->tos & 0x03))
+ udp->tos |= ECN_CODE_PT_VAL;
+ set_64bit_val(qp_ctx, 24,
+ FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
+ FIELD_PREP(IRDMAQPC_TTL, udp->ttl) | FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port));
+ set_64bit_val(qp_ctx, 32,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
+ set_64bit_val(qp_ctx, 40,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
+ set_64bit_val(qp_ctx, 56,
+ FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
+ FIELD_PREP(IRDMAQPC_PDIDX, roce_info->pd_id) |
+ FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
+ FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label));
+ set_64bit_val(qp_ctx, 64,
+ FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
+ FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp));
+ set_64bit_val(qp_ctx, 80,
+ FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
+ FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
+ set_64bit_val(qp_ctx, 88,
+ FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
+ set_64bit_val(qp_ctx, 96,
+ FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
+ FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
+ set_64bit_val(qp_ctx, 112,
+ FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
+ set_64bit_val(qp_ctx, 128,
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, roce_info->err_rq_idx) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
+ set_64bit_val(qp_ctx, 144,
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
+ set_64bit_val(qp_ctx, 152, ether_addr_to_u64(roce_info->mac_addr) << 16);
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
+ FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
+ FIELD_PREP(IRDMAQPC_FW_CC_ENABLE, roce_info->fw_cc_enable) |
+ FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE, roce_info->udprivcq_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
+ set_64bit_val(qp_ctx, 184,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
+ set_64bit_val(qp_ctx, 192,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
+ set_64bit_val(qp_ctx, 200,
+ FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
+ FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
+ set_64bit_val(qp_ctx, 208,
+ FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
+
+ print_hex_dump_debug("WQE: QP_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+/* irdma_sc_alloc_local_mac_entry - allocate a mac entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ALLOCATE_LOCAL_MAC WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_add_local_mac_entry - add mac enry
+ * @cqp: struct for cqp hw
+ * @info:mac addr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp,
+ struct irdma_local_mac_entry_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 header;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 32, ether_addr_to_u64(info->mac_addr));
+
+ header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, info->entry_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ print_hex_dump_debug("WQE: ADD_LOCAL_MAC WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @entry_idx: index of mac entry
+ * @ignore_ref_count: to force mac adde delete
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch,
+ u16 entry_idx, u8 ignore_ref_count,
+ bool post_sq)
+{
+ __le64 *wqe;
+ u64 header;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ header = FIELD_PREP(IRDMA_CQPSQ_MLM_TABLEIDX, entry_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_MLM_FREEENTRY, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_MLM_IGNORE_REF_CNT, ignore_ref_count);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, header);
+
+ print_hex_dump_debug("WQE: DEL_LOCAL_MAC_IPADDR WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_setctx - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_iwarp_offload_info *iw;
+ struct irdma_tcp_offload_info *tcp;
+ struct irdma_sc_dev *dev;
+ u8 push_mode_en;
+ u32 push_idx;
+ u64 qw0, qw3, qw7 = 0, qw16 = 0;
+ u64 mac = 0;
+
+ iw = info->iwarp_info;
+ tcp = info->tcp_info;
+ dev = qp->dev;
+ if (iw->rcv_mark_en) {
+ qp->pfpdu.marker_len = 4;
+ qp->pfpdu.rcv_start_seq = tcp->rcv_nxt;
+ }
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+ qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en);
+
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+
+ qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size);
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX,
+ qp->src_mac_addr_idx);
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle) |
+ FIELD_PREP(IRDMAQPC_EXCEPTION_LAN_QUEUE, qp->ieq_qp));
+ if (info->iwarp_info_valid) {
+ qw0 |= FIELD_PREP(IRDMAQPC_DDP_VER, iw->ddp_ver) |
+ FIELD_PREP(IRDMAQPC_RDMAP_VER, iw->rdmap_ver) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, iw->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ECN_EN, iw->ecn_en) |
+ FIELD_PREP(IRDMAQPC_IBRDENABLE, iw->ib_rd_en) |
+ FIELD_PREP(IRDMAQPC_PDIDXHI, iw->pd_id >> 16) |
+ FIELD_PREP(IRDMAQPC_ERR_RQ_IDX_VALID,
+ iw->err_rq_idx_valid);
+ qw7 |= FIELD_PREP(IRDMAQPC_PDIDX, iw->pd_id);
+ qw16 |= FIELD_PREP(IRDMAQPC_ERR_RQ_IDX, iw->err_rq_idx) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, iw->rtomin);
+ set_64bit_val(qp_ctx, 144,
+ FIELD_PREP(IRDMAQPC_Q2ADDR, qp->q2_pa >> 8) |
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ mac = ether_addr_to_u64(iw->mac_addr);
+
+ set_64bit_val(qp_ctx, 152,
+ mac << 16 | FIELD_PREP(IRDMAQPC_LASTBYTESENT, iw->last_byte_sent));
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_ORDSIZE, iw->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(iw->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
+ FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
+ FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_IWARPMODE, 1) |
+ FIELD_PREP(IRDMAQPC_RCVMARKERS, iw->rcv_mark_en) |
+ FIELD_PREP(IRDMAQPC_ALIGNHDRS, iw->align_hdrs) |
+ FIELD_PREP(IRDMAQPC_RCVNOMPACRC, iw->rcv_no_mpa_crc) |
+ FIELD_PREP(IRDMAQPC_RCVMARKOFFSET, iw->rcv_mark_offset || !tcp ? iw->rcv_mark_offset : tcp->rcv_nxt) |
+ FIELD_PREP(IRDMAQPC_SNDMARKOFFSET, iw->snd_mark_offset || !tcp ? iw->snd_mark_offset : tcp->snd_nxt) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, iw->timely_en));
+ }
+ if (info->tcp_info_valid) {
+ qw0 |= FIELD_PREP(IRDMAQPC_IPV4, tcp->ipv4) |
+ FIELD_PREP(IRDMAQPC_NONAGLE, tcp->no_nagle) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG,
+ tcp->insert_vlan_tag) |
+ FIELD_PREP(IRDMAQPC_TIMESTAMP, tcp->time_stamp) |
+ FIELD_PREP(IRDMAQPC_LIMIT, tcp->cwnd_inc_limit) |
+ FIELD_PREP(IRDMAQPC_DROPOOOSEG, tcp->drop_ooo_seg) |
+ FIELD_PREP(IRDMAQPC_DUPACK_THRESH, tcp->dup_ack_thresh);
+
+ if ((iw->ecn_en || iw->dctcp_en) && !(tcp->tos & 0x03))
+ tcp->tos |= ECN_CODE_PT_VAL;
+
+ qw3 |= FIELD_PREP(IRDMAQPC_TTL, tcp->ttl) |
+ FIELD_PREP(IRDMAQPC_AVOIDSTRETCHACK, tcp->avoid_stretch_ack) |
+ FIELD_PREP(IRDMAQPC_TOS, tcp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, tcp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, tcp->dst_port);
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
+ qw3 |= FIELD_PREP(IRDMAQPC_GEN1_SRCMACADDRIDX, tcp->src_mac_addr_idx);
+
+ qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
+ }
+ set_64bit_val(qp_ctx, 32,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, tcp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, tcp->dest_ip_addr[3]));
+ set_64bit_val(qp_ctx, 40,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, tcp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, tcp->dest_ip_addr[1]));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, tcp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_SYN_RST_HANDLING, tcp->syn_rst_handling) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, tcp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, tcp->arp_idx));
+ qw7 |= FIELD_PREP(IRDMAQPC_FLOWLABEL, tcp->flow_label) |
+ FIELD_PREP(IRDMAQPC_WSCALE, tcp->wscale) |
+ FIELD_PREP(IRDMAQPC_IGNORE_TCP_OPT,
+ tcp->ignore_tcp_opt) |
+ FIELD_PREP(IRDMAQPC_IGNORE_TCP_UNS_OPT,
+ tcp->ignore_tcp_uns_opt) |
+ FIELD_PREP(IRDMAQPC_TCPSTATE, tcp->tcp_state) |
+ FIELD_PREP(IRDMAQPC_RCVSCALE, tcp->rcv_wscale) |
+ FIELD_PREP(IRDMAQPC_SNDSCALE, tcp->snd_wscale);
+ set_64bit_val(qp_ctx, 72,
+ FIELD_PREP(IRDMAQPC_TIMESTAMP_RECENT, tcp->time_stamp_recent) |
+ FIELD_PREP(IRDMAQPC_TIMESTAMP_AGE, tcp->time_stamp_age));
+ set_64bit_val(qp_ctx, 80,
+ FIELD_PREP(IRDMAQPC_SNDNXT, tcp->snd_nxt) |
+ FIELD_PREP(IRDMAQPC_SNDWND, tcp->snd_wnd));
+ set_64bit_val(qp_ctx, 88,
+ FIELD_PREP(IRDMAQPC_RCVNXT, tcp->rcv_nxt) |
+ FIELD_PREP(IRDMAQPC_RCVWND, tcp->rcv_wnd));
+ set_64bit_val(qp_ctx, 96,
+ FIELD_PREP(IRDMAQPC_SNDMAX, tcp->snd_max) |
+ FIELD_PREP(IRDMAQPC_SNDUNA, tcp->snd_una));
+ set_64bit_val(qp_ctx, 104,
+ FIELD_PREP(IRDMAQPC_SRTT, tcp->srtt) |
+ FIELD_PREP(IRDMAQPC_RTTVAR, tcp->rtt_var));
+ set_64bit_val(qp_ctx, 112,
+ FIELD_PREP(IRDMAQPC_SSTHRESH, tcp->ss_thresh) |
+ FIELD_PREP(IRDMAQPC_CWND, tcp->cwnd));
+ set_64bit_val(qp_ctx, 120,
+ FIELD_PREP(IRDMAQPC_SNDWL1, tcp->snd_wl1) |
+ FIELD_PREP(IRDMAQPC_SNDWL2, tcp->snd_wl2));
+ qw16 |= FIELD_PREP(IRDMAQPC_MAXSNDWND, tcp->max_snd_window) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, tcp->rexmit_thresh);
+ set_64bit_val(qp_ctx, 184,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, tcp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, tcp->local_ipaddr[2]));
+ set_64bit_val(qp_ctx, 192,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, tcp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, tcp->local_ipaddr[0]));
+ set_64bit_val(qp_ctx, 200,
+ FIELD_PREP(IRDMAQPC_THIGH, iw->t_high) |
+ FIELD_PREP(IRDMAQPC_TLOW, iw->t_low));
+ set_64bit_val(qp_ctx, 208,
+ FIELD_PREP(IRDMAQPC_REMENDPOINTIDX, info->rem_endpoint_idx));
+ }
+
+ set_64bit_val(qp_ctx, 0, qw0);
+ set_64bit_val(qp_ctx, 24, qw3);
+ set_64bit_val(qp_ctx, 56, qw7);
+ set_64bit_val(qp_ctx, 128, qw16);
+
+ print_hex_dump_debug("WQE: QP_HOST CTX", DUMP_PREFIX_OFFSET, 16, 8,
+ qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+/**
+ * irdma_sc_alloc_stag - mr stag alloc
+ * @dev: sc device struct
+ * @info: stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_allocate_stag_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ enum irdma_page_size page_size;
+
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else
+ page_size = IRDMA_PAGE_SIZE_4K;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
+
+ if (info->chunk_size)
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_idx));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: ALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mr_reg_non_shared - non-shared mr registration
+ * @dev: sc device struct
+ * @info: mr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
+ struct irdma_reg_ns_stag_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 fbo;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ u32 pble_obj_cnt;
+ bool remote_access;
+ u8 addr_type;
+ enum irdma_page_size page_size;
+
+ if (!info->total_len && !info->all_memory)
+ return -EINVAL;
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else if (info->page_size == 0x1000)
+ page_size = IRDMA_PAGE_SIZE_4K;
+ else
+ return -EINVAL;
+
+ if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY |
+ IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY))
+ remote_access = true;
+ else
+ remote_access = false;
+
+ pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt)
+ return -EINVAL;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ fbo = info->va & (info->page_size - 1);
+
+ set_64bit_val(wqe, 0,
+ (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ?
+ info->va : fbo));
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len) |
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+ if (!info->chunk_size) {
+ set_64bit_val(wqe, 32, info->reg_addr_pa);
+ set_64bit_val(wqe, 48, 0);
+ } else {
+ set_64bit_val(wqe, 32, 0);
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX, info->first_pm_pbl_index));
+ }
+ set_64bit_val(wqe, 40, info->hmc_fcn_index);
+ set_64bit_val(wqe, 56, 0);
+
+ addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0;
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_REG_MR) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_ARIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, remote_access) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MR_REG_NS WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_dealloc_stag - deallocate stag
+ * @dev: sc device struct
+ * @info: dealloc stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
+ struct irdma_dealloc_stag_info *info,
+ u64 scratch, bool post_sq)
+{
+ u64 hdr;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: DEALLOC_STAG WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mw_alloc - mw allocate
+ * @dev: sc device struct
+ * @info: memory window allocation information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
+ struct irdma_mw_alloc_info *info, u64 scratch,
+ bool post_sq)
+{
+ u64 hdr;
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 8,
+ FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY,
+ info->mw1_bind_dont_vldt_key) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MW_ALLOC WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
+ * @qp: sc qp struct
+ * @info: fast mr info
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info,
+ bool post_sq)
+{
+ u64 temp, hdr;
+ __le64 *wqe;
+ u32 wqe_idx;
+ enum irdma_page_size page_size;
+ struct irdma_post_sq_info sq_info = {};
+
+ if (info->page_size == 0x40000000)
+ page_size = IRDMA_PAGE_SIZE_1G;
+ else if (info->page_size == 0x200000)
+ page_size = IRDMA_PAGE_SIZE_2M;
+ else
+ page_size = IRDMA_PAGE_SIZE_4K;
+
+ sq_info.wr_id = info->wr_id;
+ sq_info.signaled = info->signaled;
+
+ wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx,
+ IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(&qp->qp_uk, wqe_idx);
+
+ ibdev_dbg(to_ibdev(qp->dev),
+ "MR: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
+ info->wr_id, wqe_idx,
+ &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
+
+ temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ?
+ (uintptr_t)info->va : info->fbo;
+ set_64bit_val(wqe, 0, temp);
+
+ temp = FIELD_GET(IRDMAQPSQ_FIRSTPMPBLIDXHI,
+ info->first_pm_pbl_index >> 16);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXHI, temp) |
+ FIELD_PREP(IRDMAQPSQ_PBLADDR >> IRDMA_HW_PAGE_SHIFT, info->reg_addr_pa));
+ set_64bit_val(wqe, 16,
+ info->total_len |
+ FIELD_PREP(IRDMAQPSQ_FIRSTPMPBLIDXLO, info->first_pm_pbl_index));
+
+ hdr = FIELD_PREP(IRDMAQPSQ_STAGKEY, info->stag_key) |
+ FIELD_PREP(IRDMAQPSQ_STAGINDEX, info->stag_idx) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_FAST_REGISTER) |
+ FIELD_PREP(IRDMAQPSQ_LPBLSIZE, info->chunk_size) |
+ FIELD_PREP(IRDMAQPSQ_HPAGESIZE, page_size) |
+ FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, info->access_rights) |
+ FIELD_PREP(IRDMAQPSQ_VABASEDTO, info->addr_type) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: FAST_REG WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(&qp->qp_uk);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_gen_rts_ae - request AE generated after RTS
+ * @qp: sc qp struct
+ */
+static void irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+
+ wqe = qp_uk->sq_base[1].elem;
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ print_hex_dump_debug("QP: NOP W/LOCAL FENCE WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+
+ wqe = qp_uk->sq_base[2].elem;
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_GEN_RTS_AE) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ print_hex_dump_debug("QP: CONN EST WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+}
+
+/**
+ * irdma_sc_send_lsmm - send last streaming mode message
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ * @stag: stag of lsmm buffer
+ */
+void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
+ irdma_stag stag)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, stag));
+ } else {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, stag) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
+ }
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
+ FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
+ FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SEND_LSMM WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_QP_WQE_MIN_SIZE, false);
+
+ if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
+ irdma_sc_gen_rts_ae(qp);
+}
+
+/**
+ * irdma_sc_send_rtt - send last read0 or write0
+ * @qp: sc qp struct
+ * @read: Do read0 or write0
+ */
+void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read)
+{
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_qp_uk *qp_uk;
+
+ qp_uk = &qp->qp_uk;
+ wqe = qp_uk->sq_base->elem;
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 16, 0);
+ if (read) {
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, 0xabcd));
+ } else {
+ set_64bit_val(wqe, 8,
+ (u64)0xabcd | FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
+ }
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, 0x1234) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_READ) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+
+ } else {
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8, 0);
+ } else {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
+ }
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_WRITE) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ }
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: RTR WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_QP_WQE_MIN_SIZE, false);
+
+ if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
+ irdma_sc_gen_rts_ae(qp);
+}
+
+/**
+ * irdma_iwarp_opcode - determine if incoming is rdma layer
+ * @info: aeq info for the packet
+ * @pkt: packet for error
+ */
+static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt)
+{
+ __be16 *mpa;
+ u32 opcode = 0xffffffff;
+
+ if (info->q2_data_written) {
+ mpa = (__be16 *)pkt;
+ opcode = ntohs(mpa[1]) & 0xf;
+ }
+
+ return opcode;
+}
+
+/**
+ * irdma_locate_mpa - return pointer to mpa in the pkt
+ * @pkt: packet with data
+ */
+static u8 *irdma_locate_mpa(u8 *pkt)
+{
+ /* skip over ethernet header */
+ pkt += IRDMA_MAC_HLEN;
+
+ /* Skip over IP and TCP headers */
+ pkt += 4 * (pkt[0] & 0x0f);
+ pkt += 4 * ((pkt[12] >> 4) & 0x0f);
+
+ return pkt;
+}
+
+/**
+ * irdma_bld_termhdr_ctrl - setup terminate hdr control fields
+ * @qp: sc qp ptr for pkt
+ * @hdr: term hdr
+ * @opcode: flush opcode for termhdr
+ * @layer_etype: error layer + error type
+ * @err: error cod ein the header
+ */
+static void irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp,
+ struct irdma_terminate_hdr *hdr,
+ enum irdma_flush_opcode opcode,
+ u8 layer_etype, u8 err)
+{
+ qp->flush_code = opcode;
+ hdr->layer_etype = layer_etype;
+ hdr->error_code = err;
+}
+
+/**
+ * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr
+ * @pkt: ptr to mpa in offending pkt
+ * @hdr: term hdr
+ * @copy_len: offending pkt length to be copied to term hdr
+ * @is_tagged: DDP tagged or untagged
+ */
+static void irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr,
+ int *copy_len, u8 *is_tagged)
+{
+ u16 ddp_seg_len;
+
+ ddp_seg_len = ntohs(*(__be16 *)pkt);
+ if (ddp_seg_len) {
+ *copy_len = 2;
+ hdr->hdrct = DDP_LEN_FLAG;
+ if (pkt[2] & 0x80) {
+ *is_tagged = 1;
+ if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
+ *copy_len += TERM_DDP_LEN_TAGGED;
+ hdr->hdrct |= DDP_HDR_FLAG;
+ }
+ } else {
+ if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
+ *copy_len += TERM_DDP_LEN_UNTAGGED;
+ hdr->hdrct |= DDP_HDR_FLAG;
+ }
+ if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) &&
+ ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) {
+ *copy_len += TERM_RDMA_LEN;
+ hdr->hdrct |= RDMA_HDR_FLAG;
+ }
+ }
+ }
+}
+
+/**
+ * irdma_bld_terminate_hdr - build terminate message header
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+static int irdma_bld_terminate_hdr(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ int copy_len = 0;
+ u8 is_tagged = 0;
+ u32 opcode;
+ struct irdma_terminate_hdr *termhdr;
+
+ termhdr = (struct irdma_terminate_hdr *)qp->q2_buf;
+ memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
+
+ if (info->q2_data_written) {
+ pkt = irdma_locate_mpa(pkt);
+ irdma_bld_termhdr_ddp_rdma(pkt, termhdr, &copy_len, &is_tagged);
+ }
+
+ opcode = irdma_iwarp_opcode(info, pkt);
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ qp->sq_flush_code = info->sq;
+ qp->rq_flush_code = info->rq;
+
+ switch (info->ae_id) {
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ if (opcode == IRDMA_OP_TYPE_RDMA_WRITE)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_INV_STAG);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_STAG);
+ break;
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ if (info->q2_data_written)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_BOUNDS);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_BOUNDS);
+ break;
+ case IRDMA_AE_AMP_BAD_PD:
+ switch (opcode) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_UNASSOC_STAG);
+ break;
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_UNASSOC_STAG);
+ }
+ break;
+ case IRDMA_AE_AMP_INVALID_STAG:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_INV_STAG);
+ break;
+ case IRDMA_AE_AMP_BAD_QP:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_QN);
+ break;
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ switch (opcode) {
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_CANT_INV_STAG);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_INV_STAG);
+ }
+ break;
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_ACCESS);
+ break;
+ case IRDMA_AE_AMP_TO_WRAP:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT,
+ RDMAP_TO_WRAP);
+ break;
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC,
+ DDP_CATASTROPHIC_LOCAL);
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_DDP_NO_L_BIT:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_DDP << 4) | DDP_CATASTROPHIC,
+ DDP_CATASTROPHIC_LOCAL);
+ break;
+ case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MSN_RANGE);
+ break;
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_TOO_LONG);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
+ if (is_tagged)
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_TAGGED_BUF,
+ DDP_TAGGED_INV_DDP_VER);
+ else
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_DDP_VER);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MO);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_MSN_NO_BUF);
+ break;
+ case IRDMA_AE_DDP_UBE_INVALID_QN:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_DDP << 4) | DDP_UNTAGGED_BUF,
+ DDP_UNTAGGED_INV_QN);
+ break;
+ case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_INV_RDMAP_VER);
+ break;
+ default:
+ irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR,
+ (LAYER_RDMA << 4) | RDMAP_REMOTE_OP,
+ RDMAP_UNSPECIFIED);
+ break;
+ }
+
+ if (copy_len)
+ memcpy(termhdr + 1, pkt, copy_len);
+
+ return sizeof(struct irdma_terminate_hdr) + copy_len;
+}
+
+/**
+ * irdma_terminate_send_fin() - Send fin for terminate message
+ * @qp: qp associated with received terminate AE
+ */
+void irdma_terminate_send_fin(struct irdma_sc_qp *qp)
+{
+ irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
+ IRDMAQP_TERM_SEND_FIN_ONLY, 0);
+}
+
+/**
+ * irdma_terminate_connection() - Bad AE and send terminate to remote QP
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void irdma_terminate_connection(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 termlen = 0;
+
+ if (qp->term_flags & IRDMA_TERM_SENT)
+ return;
+
+ termlen = irdma_bld_terminate_hdr(qp, info);
+ irdma_terminate_start_timer(qp);
+ qp->term_flags |= IRDMA_TERM_SENT;
+ irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE,
+ IRDMAQP_TERM_SEND_TERM_ONLY, termlen);
+}
+
+/**
+ * irdma_terminate_received - handle terminate received AE
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void irdma_terminate_received(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+ __be32 *mpa;
+ u8 ddp_ctl;
+ u8 rdma_ctl;
+ u16 aeq_id = 0;
+ struct irdma_terminate_hdr *termhdr;
+
+ mpa = (__be32 *)irdma_locate_mpa(pkt);
+ if (info->q2_data_written) {
+ /* did not validate the frame - do it now */
+ ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
+ rdma_ctl = ntohl(mpa[0]) & 0xff;
+ if ((ddp_ctl & 0xc0) != 0x40)
+ aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC;
+ else if ((ddp_ctl & 0x03) != 1)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION;
+ else if (ntohl(mpa[2]) != 2)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN;
+ else if (ntohl(mpa[3]) != 1)
+ aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN;
+ else if (ntohl(mpa[4]) != 0)
+ aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO;
+ else if ((rdma_ctl & 0xc0) != 0x40)
+ aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
+
+ info->ae_id = aeq_id;
+ if (info->ae_id) {
+ /* Bad terminate recvd - send back a terminate */
+ irdma_terminate_connection(qp, info);
+ return;
+ }
+ }
+
+ qp->term_flags |= IRDMA_TERM_RCVD;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ termhdr = (struct irdma_terminate_hdr *)&mpa[5];
+ if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
+ termhdr->layer_etype == RDMAP_REMOTE_OP) {
+ irdma_terminate_done(qp, 0);
+ } else {
+ irdma_terminate_start_timer(qp);
+ irdma_terminate_send_fin(qp);
+ }
+}
+
+static int irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ return 0;
+}
+
+static void irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ /* do nothing */
+}
+
+static void irdma_null_ws_reset(struct irdma_sc_vsi *vsi)
+{
+ /* do nothing */
+}
+
+/**
+ * irdma_sc_vsi_init - Init the vsi structure
+ * @vsi: pointer to vsi structure to initialize
+ * @info: the info used to initialize the vsi struct
+ */
+void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_init_info *info)
+{
+ int i;
+
+ vsi->dev = info->dev;
+ vsi->back_vsi = info->back_vsi;
+ vsi->register_qset = info->register_qset;
+ vsi->unregister_qset = info->unregister_qset;
+ vsi->mtu = info->params->mtu;
+ vsi->exception_lan_q = info->exception_lan_q;
+ vsi->vsi_idx = info->pf_data_vsi_num;
+
+ irdma_set_qos_info(vsi, info->params);
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ mutex_init(&vsi->qos[i].qos_mutex);
+ INIT_LIST_HEAD(&vsi->qos[i].qplist);
+ }
+ if (vsi->register_qset) {
+ vsi->dev->ws_add = irdma_ws_add;
+ vsi->dev->ws_remove = irdma_ws_remove;
+ vsi->dev->ws_reset = irdma_ws_reset;
+ } else {
+ vsi->dev->ws_add = irdma_null_ws_add;
+ vsi->dev->ws_remove = irdma_null_ws_remove;
+ vsi->dev->ws_reset = irdma_null_ws_reset;
+ }
+}
+
+/**
+ * irdma_get_stats_idx - Return stats index
+ * @vsi: pointer to the vsi
+ */
+static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_stats_inst_info stats_info = {};
+ struct irdma_sc_dev *dev = vsi->dev;
+ u8 i;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE,
+ &stats_info))
+ return stats_info.stats_idx;
+ }
+
+ for (i = 0; i < IRDMA_MAX_STATS_COUNT_GEN_1; i++) {
+ if (!dev->stats_idx_array[i]) {
+ dev->stats_idx_array[i] = true;
+ return i;
+ }
+ }
+
+ return IRDMA_INVALID_STATS_IDX;
+}
+
+/**
+ * irdma_hw_stats_init_gen1 - Initialize stat reg table used for gen1
+ * @vsi: vsi structure where hw_regs are set
+ *
+ * Populate the HW stats table
+ */
+static void irdma_hw_stats_init_gen1(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_sc_dev *dev = vsi->dev;
+ const struct irdma_hw_stat_map *map;
+ u64 *stat_reg = vsi->hw_stats_regs;
+ u64 *regs = dev->hw_stats_regs;
+ u16 i, stats_reg_set = vsi->stats_idx;
+
+ map = dev->hw_stats_map;
+
+ /* First 4 stat instances are reserved for port level statistics. */
+ stats_reg_set += vsi->stats_inst_alloc ? IRDMA_FIRST_NON_PF_STAT : 0;
+
+ for (i = 0; i < dev->hw_attrs.max_stat_idx; i++) {
+ if (map[i].bitmask <= IRDMA_MAX_STATS_32)
+ stat_reg[i] = regs[i] + stats_reg_set * sizeof(u32);
+ else
+ stat_reg[i] = regs[i] + stats_reg_set * sizeof(u64);
+ }
+}
+
+/**
+ * irdma_vsi_stats_init - Initialize the vsi statistics
+ * @vsi: pointer to the vsi structure
+ * @info: The info structure used for initialization
+ */
+int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_stats_info *info)
+{
+ struct irdma_dma_mem *stats_buff_mem;
+
+ vsi->pestat = info->pestat;
+ vsi->pestat->hw = vsi->dev->hw;
+ vsi->pestat->vsi = vsi;
+ stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem;
+ stats_buff_mem->size = ALIGN(IRDMA_GATHER_STATS_BUF_SIZE * 2, 1);
+ stats_buff_mem->va = dma_alloc_coherent(vsi->pestat->hw->device,
+ stats_buff_mem->size,
+ &stats_buff_mem->pa,
+ GFP_KERNEL);
+ if (!stats_buff_mem->va)
+ return -ENOMEM;
+
+ vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va;
+ vsi->pestat->gather_info.last_gather_stats_va =
+ (void *)((uintptr_t)stats_buff_mem->va +
+ IRDMA_GATHER_STATS_BUF_SIZE);
+
+ irdma_hw_stats_start_timer(vsi);
+
+ /* when stat allocation is not required default to fcn_id. */
+ vsi->stats_idx = info->fcn_id;
+ if (info->alloc_stats_inst) {
+ u8 stats_idx = irdma_get_stats_idx(vsi);
+
+ if (stats_idx != IRDMA_INVALID_STATS_IDX) {
+ vsi->stats_inst_alloc = true;
+ vsi->stats_idx = stats_idx;
+ vsi->pestat->gather_info.use_stats_inst = true;
+ vsi->pestat->gather_info.stats_inst_index = stats_idx;
+ }
+ }
+
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ irdma_hw_stats_init_gen1(vsi);
+
+ return 0;
+}
+
+/**
+ * irdma_vsi_stats_free - Free the vsi stats
+ * @vsi: pointer to the vsi structure
+ */
+void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_stats_inst_info stats_info = {};
+ struct irdma_sc_dev *dev = vsi->dev;
+ u8 stats_idx = vsi->stats_idx;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (vsi->stats_inst_alloc) {
+ stats_info.stats_idx = vsi->stats_idx;
+ irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE,
+ &stats_info);
+ }
+ } else {
+ if (vsi->stats_inst_alloc &&
+ stats_idx < vsi->dev->hw_attrs.max_stat_inst)
+ vsi->dev->stats_idx_array[stats_idx] = false;
+ }
+
+ if (!vsi->pestat)
+ return;
+ irdma_hw_stats_stop_timer(vsi);
+ dma_free_coherent(vsi->pestat->hw->device,
+ vsi->pestat->gather_info.stats_buff_mem.size,
+ vsi->pestat->gather_info.stats_buff_mem.va,
+ vsi->pestat->gather_info.stats_buff_mem.pa);
+ vsi->pestat->gather_info.stats_buff_mem.va = NULL;
+}
+
+/**
+ * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size
+ * @wqsize: size of the wq (sq, rq) to encoded_size
+ * @queue_type: queue type selected for the calculation algorithm
+ */
+u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
+{
+ u8 encoded_size = 0;
+
+ /* cqp sq's hw coded value starts from 1 for size of 4
+ * while it starts from 0 for qp' wq's.
+ */
+ if (queue_type == IRDMA_QUEUE_TYPE_CQP)
+ encoded_size = 1;
+ wqsize >>= 2;
+ while (wqsize >>= 1)
+ encoded_size++;
+
+ return encoded_size;
+}
+
+/**
+ * irdma_sc_gather_stats - collect the statistics
+ * @cqp: struct for cqp hw
+ * @info: gather stats info structure
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_gather_stats(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_gather_info *info,
+ u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp;
+
+ if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE)
+ return -ENOMEM;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fcn_index));
+ set_64bit_val(wqe, 32, info->stats_buff_mem.pa);
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_INST, info->use_stats_inst) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX,
+ info->stats_inst_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
+ info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_GATHER_STATS);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("STATS: GATHER_STATS WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ irdma_sc_cqp_post_sq(cqp);
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "STATS: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
+ cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_stats_inst - allocate or free stats instance
+ * @cqp: struct for cqp hw
+ * @info: stats info structure
+ * @alloc: alloc vs. delete flag
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp,
+ struct irdma_stats_inst_info *info,
+ bool alloc, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_STATS_HMC_FCN_INDEX, info->hmc_fn_id));
+ temp = FIELD_PREP(IRDMA_CQPSQ_STATS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_ALLOC_INST, alloc) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX,
+ info->use_hmc_fcn_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_INST_INDEX, info->stats_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STATS_OP, IRDMA_CQP_OP_MANAGE_STATS);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: MANAGE_STATS WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_set_up_map - set the up map table
+ * @cqp: struct for cqp hw
+ * @info: User priority map info
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_set_up_map(struct irdma_sc_cqp *cqp,
+ struct irdma_up_info *info, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp = 0;
+ int i;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ temp |= (u64)info->map[i] << (i * 8);
+
+ set_64bit_val(wqe, 0, temp);
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_UP_CNPOVERRIDE, info->cnp_up_override) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_HMCFCNIDX, info->hmc_fcn_idx));
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_UP_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_USEVLAN, info->use_vlan) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_USEOVERRIDE,
+ info->use_cnp_up_override) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_UP_MAP);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: UPMAP WQE", DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_ws_node - create/modify/destroy WS node
+ * @cqp: struct for cqp hw
+ * @info: node info structure
+ * @node_op: 0 for add 1 for modify, 2 for delete
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp,
+ struct irdma_ws_node_info *info,
+ enum irdma_ws_node_op node_op, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_WS_VSI, info->vsi) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_WEIGHT, info->weight));
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_WS_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODEOP, node_op) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_ENABLENODE, info->enable) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODETYPE, info->type_leaf) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_PRIOTYPE, info->prio_type) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_TC, info->tc) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_OP, IRDMA_CQP_OP_WORK_SCHED_NODE) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_PARENTID, info->parent_id) |
+ FIELD_PREP(IRDMA_CQPSQ_WS_NODEID, info->id);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: MANAGE_WS WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_qp_flush_wqes - flush qp's wqe
+ * @qp: sc qp
+ * @info: dlush information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, u64 scratch,
+ bool post_sq)
+{
+ u64 temp = 0;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ bool flush_sq = false, flush_rq = false;
+
+ if (info->rq && !qp->flush_rq)
+ flush_rq = true;
+ if (info->sq && !qp->flush_sq)
+ flush_sq = true;
+ qp->flush_sq |= flush_sq;
+ qp->flush_rq |= flush_rq;
+
+ if (!flush_sq && !flush_rq) {
+ ibdev_dbg(to_ibdev(qp->dev),
+ "CQP: Additional flush request ignored for qp %x\n",
+ qp->qp_uk.qp_id);
+ return -EALREADY;
+ }
+
+ cqp = qp->pd->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ if (info->userflushcode) {
+ if (flush_rq)
+ temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMNERR,
+ info->rq_minor_code) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_RQMJERR,
+ info->rq_major_code);
+ if (flush_sq)
+ temp |= FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMNERR,
+ info->sq_minor_code) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_SQMJERR,
+ info->sq_major_code);
+ }
+ set_64bit_val(wqe, 16, temp);
+
+ temp = (info->generate_ae) ?
+ info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
+ info->ae_src) : 0;
+ set_64bit_val(wqe, 8, temp);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, info->generate_ae) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_USERFLCODE, info->userflushcode) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_FLUSH WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP
+ * @qp: sc qp
+ * @info: gen ae information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_gen_ae(struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, u64 scratch,
+ bool post_sq)
+{
+ u64 temp;
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = qp->pd->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ temp = info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
+ info->ae_src);
+ set_64bit_val(wqe, 8, temp);
+
+ hdr = qp->qp_uk.qp_id | FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_GEN_AE) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_GENERATE_AE, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: GEN_AE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/*** irdma_sc_qp_upload_context - upload qp's context
+ * @dev: sc device struct
+ * @info: upload context info ptr for return
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_qp_upload_context(struct irdma_sc_dev *dev,
+ struct irdma_upload_context_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, info->buf_pa);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_UCTX_QPID, info->qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPLOAD_CONTEXT) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_QPTYPE, info->qp_type) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_RAWFORMAT, info->raw_format) |
+ FIELD_PREP(IRDMA_CQPSQ_UCTX_FREEZEQP, info->freeze_qp) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QP_UPLOAD_CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_manage_push_page - Handle push page
+ * @cqp: struct for cqp hw
+ * @info: push page info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_manage_push_page_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ if (info->free_page &&
+ info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages)
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, info->qs_handle);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_MPP_PPIDX, info->push_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_MPP_PPTYPE, info->push_page_type) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MANAGE_PUSH_PAGES) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_MPP_FREE_PAGE, info->free_page);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MANAGE_PUSH_PAGES WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_suspend_qp - suspend qp for param change
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
+{
+ u64 hdr;
+ __le64 *wqe;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_SUSPENDQP_QPID, qp->qp_uk.qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_SUSPEND_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SUSPEND_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_resume_qp - resume qp after suspend
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp,
+ u64 scratch)
+{
+ u64 hdr;
+ __le64 *wqe;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QSHANDLE, qp->qs_handle));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_RESUMEQP_QPID, qp->qp_uk.qp_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_RESUME_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: RESUME_QP WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_ack - acknowledge completion q
+ * @cq: cq struct
+ */
+static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
+{
+ writel(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db);
+}
+
+/**
+ * irdma_sc_cq_init - initialize completion q
+ * @cq: cq struct
+ * @info: cq initialization info
+ */
+int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ cq->cq_pa = info->cq_base_pa;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
+ info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
+ irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
+
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->ceqe_mask = info->ceqe_mask;
+ cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP;
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->vsi = info->vsi;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_create - create completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: flag for overflow check
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
+ bool check_overflow, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ struct irdma_sc_ceq *ceq;
+ int ret_code = 0;
+
+ cqp = cq->dev->cqp;
+ if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)
+ return -EINVAL;
+
+ if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
+
+ ceq = cq->dev->ceq[cq->ceq_id];
+ if (ceq && ceq->reg_cq)
+ ret_code = irdma_sc_add_cq_ctx(ceq, cq);
+
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe) {
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, cq);
+ return -ENOMEM;
+ }
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
+ set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
+
+ hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
+ FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
+ cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_destroy - destroy completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_sc_ceq *ceq;
+
+ cqp = cq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ ceq = cq->dev->ceq[cq->ceq_id];
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, cq);
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48,
+ (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+
+ hdr = cq->cq_uk.cq_id |
+ FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cq_resize - set resized cq buffer info
+ * @cq: resized cq
+ * @info: resized cq buffer info
+ */
+void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info)
+{
+ cq->virtual_map = info->virtual_map;
+ cq->cq_pa = info->cq_pa;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size);
+}
+
+/**
+ * irdma_sc_cq_modify - modify a Completion Queue
+ * @cq: cq struct
+ * @info: modification info struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag to post to sq
+ */
+static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
+ struct irdma_modify_cq_info *info, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ u32 pble_obj_cnt;
+
+ pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+ if (info->cq_resize && info->virtual_map &&
+ info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ cqp = cq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, info->cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, info->shadow_read_threshold));
+ set_64bit_val(wqe, 32, info->cq_pa);
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 48, info->first_pm_pbl_idx);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
+
+ hdr = cq->cq_uk.cq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CQRESIZE, info->cq_resize) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, info->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, info->check_overflow) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, info->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT,
+ cq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_check_cqp_progress - check cqp processing progress
+ * @timeout: timeout info struct
+ * @dev: sc device struct
+ */
+void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev)
+{
+ u64 completed_ops = atomic64_read(&dev->cqp->completed_ops);
+
+ if (timeout->compl_cqp_cmds != completed_ops) {
+ timeout->compl_cqp_cmds = completed_ops;
+ timeout->count = 0;
+ } else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) {
+ timeout->count++;
+ }
+}
+
+/**
+ * irdma_get_cqp_reg_info - get head and tail for cqp using registers
+ * @cqp: struct for cqp hw
+ * @val: cqp tail register value
+ * @tail: wqtail register value
+ * @error: cqp processing err
+ */
+static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
+ u32 *tail, u32 *error)
+{
+ *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]);
+ *tail = FIELD_GET(IRDMA_CQPTAIL_WQTAIL, *val);
+ *error = FIELD_GET(IRDMA_CQPTAIL_CQP_OP_ERR, *val);
+}
+
+/**
+ * irdma_cqp_poll_registers - poll cqp registers
+ * @cqp: struct for cqp hw
+ * @tail: wqtail register value
+ * @count: how many times to try for completion
+ */
+static int irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail,
+ u32 count)
+{
+ u32 i = 0;
+ u32 newtail, error, val;
+
+ while (i++ < count) {
+ irdma_get_cqp_reg_info(cqp, &val, &newtail, &error);
+ if (error) {
+ error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "CQP: CQPERRCODES error_code[x%08X]\n",
+ error);
+ return -EIO;
+ }
+ if (newtail != tail) {
+ /* SUCCESS */
+ IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ atomic64_inc(&cqp->completed_ops);
+ return 0;
+ }
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base
+ * @dev: sc device struct
+ * @buf: pointer to commit buffer
+ * @buf_idx: buffer index
+ * @obj_info: object info pointer
+ * @rsrc_idx: indexs of memory resource
+ */
+static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
+ u32 buf_idx, struct irdma_hmc_obj_info *obj_info,
+ u32 rsrc_idx)
+{
+ u64 temp;
+
+ get_64bit_val(buf, buf_idx, &temp);
+
+ switch (rsrc_idx) {
+ case IRDMA_HMC_IW_QP:
+ obj_info[rsrc_idx].cnt = (u32)FIELD_GET(IRDMA_COMMIT_FPM_QPCNT, temp);
+ break;
+ case IRDMA_HMC_IW_CQ:
+ obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
+ break;
+ case IRDMA_HMC_IW_APBVT_ENTRY:
+ obj_info[rsrc_idx].cnt = 1;
+ break;
+ default:
+ obj_info[rsrc_idx].cnt = (u32)temp;
+ break;
+ }
+
+ obj_info[rsrc_idx].base = (temp >> IRDMA_COMMIT_FPM_BASE_S) * 512;
+
+ return temp;
+}
+
+/**
+ * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer
+ * @dev: pointer to dev struct
+ * @buf: ptr to fpm commit buffer
+ * @info: ptr to irdma_hmc_obj_info struct
+ * @sd: number of SDs for HMC objects
+ *
+ * parses fpm commit info and copy base value
+ * of hmc objects in hmc_info
+ */
+static void
+irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
+ struct irdma_hmc_obj_info *info, u32 *sd)
+{
+ u64 size;
+ u32 i;
+ u64 max_base = 0;
+ u32 last_hmc_obj = 0;
+
+ irdma_sc_decode_fpm_commit(dev, buf, 0, info,
+ IRDMA_HMC_IW_QP);
+ irdma_sc_decode_fpm_commit(dev, buf, 8, info,
+ IRDMA_HMC_IW_CQ);
+ /* skiping RSRVD */
+ irdma_sc_decode_fpm_commit(dev, buf, 24, info,
+ IRDMA_HMC_IW_HTE);
+ irdma_sc_decode_fpm_commit(dev, buf, 32, info,
+ IRDMA_HMC_IW_ARP);
+ irdma_sc_decode_fpm_commit(dev, buf, 40, info,
+ IRDMA_HMC_IW_APBVT_ENTRY);
+ irdma_sc_decode_fpm_commit(dev, buf, 48, info,
+ IRDMA_HMC_IW_MR);
+ irdma_sc_decode_fpm_commit(dev, buf, 56, info,
+ IRDMA_HMC_IW_XF);
+ irdma_sc_decode_fpm_commit(dev, buf, 64, info,
+ IRDMA_HMC_IW_XFFL);
+ irdma_sc_decode_fpm_commit(dev, buf, 72, info,
+ IRDMA_HMC_IW_Q1);
+ irdma_sc_decode_fpm_commit(dev, buf, 80, info,
+ IRDMA_HMC_IW_Q1FL);
+ irdma_sc_decode_fpm_commit(dev, buf, 88, info,
+ IRDMA_HMC_IW_TIMER);
+ irdma_sc_decode_fpm_commit(dev, buf, 112, info,
+ IRDMA_HMC_IW_PBLE);
+ /* skipping RSVD. */
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) {
+ irdma_sc_decode_fpm_commit(dev, buf, 96, info,
+ IRDMA_HMC_IW_FSIMC);
+ irdma_sc_decode_fpm_commit(dev, buf, 104, info,
+ IRDMA_HMC_IW_FSIAV);
+ irdma_sc_decode_fpm_commit(dev, buf, 128, info,
+ IRDMA_HMC_IW_RRF);
+ irdma_sc_decode_fpm_commit(dev, buf, 136, info,
+ IRDMA_HMC_IW_RRFFL);
+ irdma_sc_decode_fpm_commit(dev, buf, 144, info,
+ IRDMA_HMC_IW_HDR);
+ irdma_sc_decode_fpm_commit(dev, buf, 152, info,
+ IRDMA_HMC_IW_MD);
+ irdma_sc_decode_fpm_commit(dev, buf, 160, info,
+ IRDMA_HMC_IW_OOISC);
+ irdma_sc_decode_fpm_commit(dev, buf, 168, info,
+ IRDMA_HMC_IW_OOISCFFL);
+ }
+
+ /* searching for the last object in HMC to find the size of the HMC area. */
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
+ if (info[i].base > max_base) {
+ max_base = info[i].base;
+ last_hmc_obj = i;
+ }
+ }
+
+ size = info[last_hmc_obj].cnt * info[last_hmc_obj].size +
+ info[last_hmc_obj].base;
+
+ if (size & 0x1FFFFF)
+ *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
+ else
+ *sd = (u32)(size >> 21);
+
+}
+
+/**
+ * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
+ * @buf: ptr to fpm query buffer
+ * @buf_idx: index into buf
+ * @obj_info: ptr to irdma_hmc_obj_info struct
+ * @rsrc_idx: resource index into info
+ *
+ * Decode a 64 bit value from fpm query buffer into max count and size
+ */
+static u64 irdma_sc_decode_fpm_query(__le64 *buf, u32 buf_idx,
+ struct irdma_hmc_obj_info *obj_info,
+ u32 rsrc_idx)
+{
+ u64 temp;
+ u32 size;
+
+ get_64bit_val(buf, buf_idx, &temp);
+ obj_info[rsrc_idx].max_cnt = (u32)temp;
+ size = (u32)(temp >> 32);
+ obj_info[rsrc_idx].size = BIT_ULL(size);
+
+ return temp;
+}
+
+/**
+ * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer
+ * @dev: ptr to shared code device
+ * @buf: ptr to fpm query buffer
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ * @hmc_fpm_misc: ptr to fpm data
+ *
+ * parses fpm query buffer and copy max_cnt and
+ * size value of hmc objects in hmc_info
+ */
+static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
+ struct irdma_hmc_info *hmc_info,
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc)
+{
+ struct irdma_hmc_obj_info *obj_info;
+ u64 temp;
+ u32 size;
+ u16 max_pe_sds;
+
+ obj_info = hmc_info->hmc_obj;
+
+ get_64bit_val(buf, 0, &temp);
+ hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
+ max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
+
+ hmc_fpm_misc->max_sds = max_pe_sds;
+ hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
+ get_64bit_val(buf, 8, &temp);
+ obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_QPS, temp);
+ size = (u32)(temp >> 32);
+ obj_info[IRDMA_HMC_IW_QP].size = BIT_ULL(size);
+
+ get_64bit_val(buf, 16, &temp);
+ obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)FIELD_GET(IRDMA_QUERY_FPM_MAX_CQS, temp);
+ size = (u32)(temp >> 32);
+ obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size);
+
+ irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
+ irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
+
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+
+ irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
+ irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
+
+ get_64bit_val(buf, 64, &temp);
+ obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_XFFL].size = 4;
+ hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->xf_block_size)
+ return -EINVAL;
+
+ irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
+ get_64bit_val(buf, 80, &temp);
+ obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_Q1FL].size = 4;
+
+ hmc_fpm_misc->q1_block_size = FIELD_GET(IRDMA_QUERY_FPM_Q1BLOCKSIZE, temp);
+ if (!hmc_fpm_misc->q1_block_size)
+ return -EINVAL;
+
+ irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER);
+
+ get_64bit_val(buf, 112, &temp);
+ obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_PBLE].size = 8;
+
+ get_64bit_val(buf, 120, &temp);
+ hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
+ hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
+ hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ return 0;
+ irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
+ irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV);
+ irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF);
+
+ get_64bit_val(buf, 136, &temp);
+ obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_RRFFL].size = 4;
+ hmc_fpm_misc->rrf_block_size = FIELD_GET(IRDMA_QUERY_FPM_RRFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->rrf_block_size &&
+ obj_info[IRDMA_HMC_IW_RRFFL].max_cnt)
+ return -EINVAL;
+
+ irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
+ irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
+ irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
+
+ get_64bit_val(buf, 168, &temp);
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
+ hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->ooiscf_block_size &&
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_find_reg_cq - find cq ctx index
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
+ struct irdma_sc_cq *cq)
+{
+ u32 i;
+
+ for (i = 0; i < ceq->reg_cq_size; i++) {
+ if (cq == ceq->reg_cq[i])
+ return i;
+ }
+
+ return IRDMA_INVALID_CQ_IDX;
+}
+
+/**
+ * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+
+ if (ceq->reg_cq_size == ceq->elem_cnt) {
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+ return -ENOMEM;
+ }
+
+ ceq->reg_cq[ceq->reg_cq_size++] = cq;
+
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
+ * @ceq: ceq sc structure
+ * @cq: cq sc structure
+ */
+void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
+{
+ unsigned long flags;
+ u32 cq_ctx_idx;
+
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+ cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
+ if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
+ goto exit;
+
+ ceq->reg_cq_size--;
+ if (cq_ctx_idx != ceq->reg_cq_size)
+ ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
+ ceq->reg_cq[ceq->reg_cq_size] = NULL;
+
+exit:
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+}
+
+/**
+ * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
+ * @cqp: IWARP control queue pair pointer
+ * @info: IWARP control queue pair init info pointer
+ *
+ * Initializes the object and context buffers for a control Queue Pair.
+ */
+int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_init_info *info)
+{
+ u8 hw_sq_size;
+
+ if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
+ info->sq_size < IRDMA_CQP_SW_SQSIZE_4 ||
+ ((info->sq_size & (info->sq_size - 1))))
+ return -EINVAL;
+
+ hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size,
+ IRDMA_QUEUE_TYPE_CQP);
+ cqp->size = sizeof(*cqp);
+ cqp->sq_size = info->sq_size;
+ cqp->hw_sq_size = hw_sq_size;
+ cqp->sq_base = info->sq;
+ cqp->host_ctx = info->host_ctx;
+ cqp->sq_pa = info->sq_pa;
+ cqp->host_ctx_pa = info->host_ctx_pa;
+ cqp->dev = info->dev;
+ cqp->struct_ver = info->struct_ver;
+ cqp->hw_maj_ver = info->hw_maj_ver;
+ cqp->hw_min_ver = info->hw_min_ver;
+ cqp->scratch_array = info->scratch_array;
+ cqp->polarity = 0;
+ cqp->en_datacenter_tcp = info->en_datacenter_tcp;
+ cqp->ena_vf_count = info->ena_vf_count;
+ cqp->hmc_profile = info->hmc_profile;
+ cqp->ceqs_per_vf = info->ceqs_per_vf;
+ cqp->disable_packed = info->disable_packed;
+ cqp->rocev2_rto_policy = info->rocev2_rto_policy;
+ cqp->protocol_used = info->protocol_used;
+ memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
+ info->dev->cqp = cqp;
+
+ IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
+ cqp->requested_ops = 0;
+ atomic64_set(&cqp->completed_ops, 0);
+ /* for the cqp commands backlog. */
+ INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
+
+ writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%pK] cqp[%p] polarity[x%04x]\n",
+ cqp->sq_size, cqp->hw_sq_size, cqp->sq_base,
+ (u64 *)(uintptr_t)cqp->sq_pa, cqp, cqp->polarity);
+ return 0;
+}
+
+/**
+ * irdma_sc_cqp_create - create cqp during bringup
+ * @cqp: struct for cqp hw
+ * @maj_err: If error, major err number
+ * @min_err: If error, minor err number
+ */
+int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
+{
+ u64 temp;
+ u8 hw_rev;
+ u32 cnt = 0, p1, p2, val = 0, err_code;
+ int ret_code;
+
+ hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev;
+ cqp->sdbuf.size = ALIGN(IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size,
+ IRDMA_SD_BUF_ALIGNMENT);
+ cqp->sdbuf.va = dma_alloc_coherent(cqp->dev->hw->device,
+ cqp->sdbuf.size, &cqp->sdbuf.pa,
+ GFP_KERNEL);
+ if (!cqp->sdbuf.va)
+ return -ENOMEM;
+
+ spin_lock_init(&cqp->dev->cqp_lock);
+
+ temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
+ FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
+ FIELD_PREP(IRDMA_CQPHC_DISABLE_PFPDUS, cqp->disable_packed) |
+ FIELD_PREP(IRDMA_CQPHC_CEQPERVF, cqp->ceqs_per_vf);
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_ROCEV2_RTO_POLICY,
+ cqp->rocev2_rto_policy) |
+ FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
+ cqp->protocol_used);
+ }
+
+ set_64bit_val(cqp->host_ctx, 0, temp);
+ set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
+
+ temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
+ FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
+ set_64bit_val(cqp->host_ctx, 16, temp);
+ set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
+ temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
+ FIELD_PREP(IRDMA_CQPHC_HW_MINVER, cqp->hw_min_ver);
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_MIN_RATE, cqp->dcqcn_params.min_rate) |
+ FIELD_PREP(IRDMA_CQPHC_MIN_DEC_FACTOR, cqp->dcqcn_params.min_dec_factor);
+ }
+ set_64bit_val(cqp->host_ctx, 32, temp);
+ set_64bit_val(cqp->host_ctx, 40, 0);
+ temp = 0;
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_T, cqp->dcqcn_params.dcqcn_t) |
+ FIELD_PREP(IRDMA_CQPHC_RAI_FACTOR, cqp->dcqcn_params.rai_factor) |
+ FIELD_PREP(IRDMA_CQPHC_HAI_FACTOR, cqp->dcqcn_params.hai_factor);
+ }
+ set_64bit_val(cqp->host_ctx, 48, temp);
+ temp = 0;
+ if (hw_rev >= IRDMA_GEN_2) {
+ temp |= FIELD_PREP(IRDMA_CQPHC_DCQCN_B, cqp->dcqcn_params.dcqcn_b) |
+ FIELD_PREP(IRDMA_CQPHC_DCQCN_F, cqp->dcqcn_params.dcqcn_f) |
+ FIELD_PREP(IRDMA_CQPHC_CC_CFG_VALID, cqp->dcqcn_params.cc_cfg_valid) |
+ FIELD_PREP(IRDMA_CQPHC_RREDUCE_MPERIOD, cqp->dcqcn_params.rreduce_mperiod);
+ }
+ set_64bit_val(cqp->host_ctx, 56, temp);
+ print_hex_dump_debug("WQE: CQP_HOST_CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8, false);
+ p1 = cqp->host_ctx_pa >> 32;
+ p2 = (u32)cqp->host_ctx_pa;
+
+ writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
+ writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
+
+ do {
+ if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
+ ret_code = -ETIMEDOUT;
+ goto err;
+ }
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ } while (!val);
+
+ if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) {
+ ret_code = -EOPNOTSUPP;
+ goto err;
+ }
+
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+ return 0;
+
+err:
+ dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
+ cqp->sdbuf.va, cqp->sdbuf.pa);
+ cqp->sdbuf.va = NULL;
+ err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ *min_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MINOR_CODE, err_code);
+ *maj_err = FIELD_GET(IRDMA_CQPERRCODES_CQP_MAJOR_CODE, err_code);
+ return ret_code;
+}
+
+/**
+ * irdma_sc_cqp_post_sq - post of cqp's sq
+ * @cqp: struct for cqp hw
+ */
+void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp)
+{
+ writel(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db);
+
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: CQP SQ head 0x%x tail 0x%x size 0x%x\n",
+ cqp->sq_ring.head, cqp->sq_ring.tail, cqp->sq_ring.size);
+}
+
+/**
+ * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq
+ * and pass back index
+ * @cqp: CQP HW structure
+ * @scratch: private data for CQP WQE
+ * @wqe_idx: WQE index of CQP SQ
+ */
+__le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
+ u32 *wqe_idx)
+{
+ __le64 *wqe = NULL;
+ int ret_code;
+
+ if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) {
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n",
+ cqp->sq_ring.head, cqp->sq_ring.tail,
+ cqp->sq_ring.size);
+ return NULL;
+ }
+ IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ cqp->requested_ops++;
+ if (!*wqe_idx)
+ cqp->polarity = !cqp->polarity;
+ wqe = cqp->sq_base[*wqe_idx].elem;
+ cqp->scratch_array[*wqe_idx] = scratch;
+ IRDMA_CQP_INIT_WQE(wqe);
+
+ return wqe;
+}
+
+/**
+ * irdma_sc_cqp_destroy - destroy cqp during close
+ * @cqp: struct for cqp hw
+ */
+int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
+{
+ u32 cnt = 0, val;
+ int ret_code = 0;
+
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]);
+ do {
+ if (cnt++ > cqp->dev->hw_attrs.max_done_count) {
+ ret_code = -ETIMEDOUT;
+ break;
+ }
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE));
+
+ dma_free_coherent(cqp->dev->hw->device, cqp->sdbuf.size,
+ cqp->sdbuf.va, cqp->sdbuf.pa);
+ cqp->sdbuf.va = NULL;
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ccq_arm - enable intr for control cq
+ * @ccq: ccq sc struct
+ */
+void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se;
+ u8 arm_seq_num;
+
+ get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
+ arm_seq_num++;
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
+ set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
+
+ dma_wmb(); /* make sure shadow area is updated before arming */
+
+ writel(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db);
+}
+
+/**
+ * irdma_sc_ccq_get_cqe_info - get ccq's cq entry
+ * @ccq: ccq sc struct
+ * @info: completion q entry to return
+ */
+int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_cqe_info *info)
+{
+ u64 qp_ctx, temp, temp1;
+ __le64 *cqe;
+ struct irdma_sc_cqp *cqp;
+ u32 wqe_idx;
+ u32 error;
+ u8 polarity;
+ int ret_code = 0;
+
+ if (ccq->cq_uk.avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk);
+
+ get_64bit_val(cqe, 24, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, temp);
+ if (polarity != ccq->cq_uk.polarity)
+ return -ENOENT;
+
+ /* Ensure CEQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ get_64bit_val(cqe, 8, &qp_ctx);
+ cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx;
+ info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp);
+ info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR;
+ info->min_err_code = (u16)FIELD_GET(IRDMA_CQ_MINERR, temp);
+ if (info->error) {
+ info->maj_err_code = (u16)FIELD_GET(IRDMA_CQ_MAJERR, temp);
+ error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]);
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "CQP: CQPERRCODES error_code[x%08X]\n", error);
+ }
+
+ wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, temp);
+ info->scratch = cqp->scratch_array[wqe_idx];
+
+ get_64bit_val(cqe, 16, &temp1);
+ info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
+ get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
+ info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
+ info->cqp = cqp;
+
+ /* move the head for cq */
+ IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
+ if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring))
+ ccq->cq_uk.polarity ^= 1;
+
+ /* update cq tail in cq shadow memory also */
+ IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
+ set_64bit_val(ccq->cq_uk.shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring));
+
+ dma_wmb(); /* make sure shadow area is updated before moving tail */
+
+ IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ atomic64_inc(&cqp->completed_ops);
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
+ * @cqp: struct for cqp hw
+ * @op_code: cqp opcode for completion
+ * @compl_info: completion q entry to return
+ */
+int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code,
+ struct irdma_ccq_cqe_info *compl_info)
+{
+ struct irdma_ccq_cqe_info info = {};
+ struct irdma_sc_cq *ccq;
+ int ret_code = 0;
+ u32 cnt = 0;
+
+ ccq = cqp->dev->ccq;
+ while (1) {
+ if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count)
+ return -ETIMEDOUT;
+
+ if (irdma_sc_ccq_get_cqe_info(ccq, &info)) {
+ udelay(cqp->dev->hw_attrs.max_sleep_count);
+ continue;
+ }
+ if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) {
+ ret_code = -EIO;
+ break;
+ }
+ /* make sure op code matches*/
+ if (op_code == info.op_code)
+ break;
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "WQE: opcode mismatch for my op code 0x%x, returned opcode %x\n",
+ op_code, info.op_code);
+ }
+
+ if (compl_info)
+ memcpy(compl_info, &info, sizeof(*compl_info));
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_manage_hmc_pm_func_table - manage of function table
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @info: info for the manage function table operation
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp,
+ struct irdma_hmc_fcn_info *info,
+ u64 scratch, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+ set_64bit_val(wqe, 32, 0);
+ set_64bit_val(wqe, 40, 0);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56, 0);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_MHMC_VFIDX, info->vf_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE) |
+ FIELD_PREP(IRDMA_CQPSQ_MHMC_FREEPMFN, info->free_fcn) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: MANAGE_HMC_PM_FUNC_TABLE WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion
+ * for fpm commit
+ * @cqp: struct for cqp hw
+ */
+static int irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp)
+{
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL,
+ NULL);
+}
+
+/**
+ * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @commit_fpm_mem: Memory for fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static int irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id,
+ struct irdma_dma_mem *commit_fpm_mem,
+ bool post_sq, u8 wait_type)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 tail, val, error;
+ int ret_code = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, hmc_fn_id);
+ set_64bit_val(wqe, 32, commit_fpm_mem->pa);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_BUFSIZE, IRDMA_COMMIT_FPM_BUF_SIZE) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_COMMIT_FPM_VAL) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: COMMIT_FPM_VAL WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
+ ret_code = irdma_sc_commit_fpm_val_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for
+ * query fpm
+ * @cqp: struct for cqp hw
+ */
+static int irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp)
+{
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL,
+ NULL);
+}
+
+/**
+ * irdma_sc_query_fpm_val - cqp wqe query fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @query_fpm_mem: memory for return fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static int irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id,
+ struct irdma_dma_mem *query_fpm_mem,
+ bool post_sq, u8 wait_type)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 tail, val, error;
+ int ret_code = 0;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, hmc_fn_id);
+ set_64bit_val(wqe, 32, query_fpm_mem->pa);
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_QUERY_FPM_VAL) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: QUERY_FPM WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ if (wait_type == IRDMA_CQP_WAIT_POLL_REGS)
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ)
+ ret_code = irdma_sc_query_fpm_val_done(cqp);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ceq_init - initialize ceq
+ * @ceq: ceq sc structure
+ * @info: ceq initialization info
+ */
+int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
+ struct irdma_ceq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size ||
+ info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size)
+ return -EINVAL;
+
+ if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ ceq->size = sizeof(*ceq);
+ ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base;
+ ceq->ceq_id = info->ceq_id;
+ ceq->dev = info->dev;
+ ceq->elem_cnt = info->elem_cnt;
+ ceq->ceq_elem_pa = info->ceqe_pa;
+ ceq->virtual_map = info->virtual_map;
+ ceq->itr_no_expire = info->itr_no_expire;
+ ceq->reg_cq = info->reg_cq;
+ ceq->reg_cq_size = 0;
+ spin_lock_init(&ceq->req_cq_lock);
+ ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
+ ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
+ ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
+ ceq->tph_en = info->tph_en;
+ ceq->tph_val = info->tph_val;
+ ceq->vsi = info->vsi;
+ ceq->polarity = 1;
+ IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
+ ceq->dev->ceq[info->ceq_id] = ceq;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_ceq_create - create ceq wqe
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+
+static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = ceq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ set_64bit_val(wqe, 16, ceq->elem_cnt);
+ set_64bit_val(wqe, 32,
+ (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
+ set_64bit_val(wqe, 48,
+ (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
+ hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_cceq_create_done - poll for control ceq wqe to complete
+ * @ceq: ceq sc structure
+ */
+static int irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ cqp = ceq->dev->cqp;
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ,
+ NULL);
+}
+
+/**
+ * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete
+ * @ceq: ceq sc structure
+ */
+int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ if (ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
+
+ cqp = ceq->dev->cqp;
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ,
+ NULL);
+}
+
+/**
+ * irdma_sc_cceq_create - create cceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ */
+int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
+{
+ int ret_code;
+ struct irdma_sc_dev *dev = ceq->dev;
+
+ dev->ccq->vsi = ceq->vsi;
+ if (ceq->reg_cq) {
+ ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
+ if (ret_code)
+ return ret_code;
+ }
+
+ ret_code = irdma_sc_ceq_create(ceq, scratch, true);
+ if (!ret_code)
+ return irdma_sc_cceq_create_done(ceq);
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_ceq_destroy - destroy ceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = ceq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, ceq->elem_cnt);
+ set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
+ hdr = ceq->ceq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_process_ceq - process ceq
+ * @dev: sc device struct
+ * @ceq: ceq sc structure
+ *
+ * It is expected caller serializes this function with cleanup_ceqes()
+ * because these functions manipulate the same ceq
+ */
+void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
+{
+ u64 temp;
+ __le64 *ceqe;
+ struct irdma_sc_cq *cq = NULL;
+ struct irdma_sc_cq *temp_cq;
+ u8 polarity;
+ u32 cq_idx;
+ unsigned long flags;
+
+ do {
+ cq_idx = 0;
+ ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq);
+ get_64bit_val(ceqe, 0, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
+ if (polarity != ceq->polarity)
+ return NULL;
+
+ temp_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
+ if (!temp_cq) {
+ cq_idx = IRDMA_INVALID_CQ_IDX;
+ IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
+
+ if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
+ ceq->polarity ^= 1;
+ continue;
+ }
+
+ cq = temp_cq;
+ if (ceq->reg_cq) {
+ spin_lock_irqsave(&ceq->req_cq_lock, flags);
+ cq_idx = irdma_sc_find_reg_cq(ceq, cq);
+ spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
+ }
+
+ IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
+ if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
+ ceq->polarity ^= 1;
+ } while (cq_idx == IRDMA_INVALID_CQ_IDX);
+
+ if (cq)
+ irdma_sc_cq_ack(cq);
+ return cq;
+}
+
+/**
+ * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq
+ * @cq: cq for which the ceqes need to be cleaned up
+ * @ceq: ceq ptr
+ *
+ * The function is called after the cq is destroyed to cleanup
+ * its pending ceqe entries. It is expected caller serializes this
+ * function with process_ceq() in interrupt context.
+ */
+void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq)
+{
+ struct irdma_sc_cq *next_cq;
+ u8 ceq_polarity = ceq->polarity;
+ __le64 *ceqe;
+ u8 polarity;
+ u64 temp;
+ int next;
+ u32 i;
+
+ next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0);
+
+ for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) {
+ ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next);
+
+ get_64bit_val(ceqe, 0, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_CEQE_VALID, temp);
+ if (polarity != ceq_polarity)
+ return;
+
+ next_cq = (struct irdma_sc_cq *)(unsigned long)(temp << 1);
+ if (cq == next_cq)
+ set_64bit_val(ceqe, 0, temp & IRDMA_CEQE_VALID);
+
+ next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i);
+ if (!next)
+ ceq_polarity ^= 1;
+ }
+}
+
+/**
+ * irdma_sc_aeq_init - initialize aeq
+ * @aeq: aeq structure ptr
+ * @info: aeq initialization info
+ */
+int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
+ struct irdma_aeq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size ||
+ info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size)
+ return -EINVAL;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ aeq->size = sizeof(*aeq);
+ aeq->polarity = 1;
+ aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base;
+ aeq->dev = info->dev;
+ aeq->elem_cnt = info->elem_cnt;
+ aeq->aeq_elem_pa = info->aeq_elem_pa;
+ IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
+ aeq->virtual_map = info->virtual_map;
+ aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
+ aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
+ aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
+ aeq->msix_idx = info->msix_idx;
+ info->dev->aeq = aeq;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_aeq_create - create aeq
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+
+ cqp = aeq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ set_64bit_val(wqe, 16, aeq->elem_cnt);
+ set_64bit_val(wqe, 32,
+ (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
+ set_64bit_val(wqe, 48,
+ (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: AEQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_aeq_destroy - destroy aeq during close
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ struct irdma_sc_dev *dev;
+ u64 hdr;
+
+ dev = aeq->dev;
+ writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+ set_64bit_val(wqe, 16, aeq->elem_cnt);
+ set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
+ FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: AEQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+ return 0;
+}
+
+/**
+ * irdma_sc_get_next_aeqe - get next aeq entry
+ * @aeq: aeq structure ptr
+ * @info: aeqe info to be returned
+ */
+int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
+ struct irdma_aeqe_info *info)
+{
+ u64 temp, compl_ctx;
+ __le64 *aeqe;
+ u8 ae_src;
+ u8 polarity;
+
+ aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq);
+ get_64bit_val(aeqe, 8, &temp);
+ polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp);
+
+ if (aeq->polarity != polarity)
+ return -ENOENT;
+
+ /* Ensure AEQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ get_64bit_val(aeqe, 0, &compl_ctx);
+
+ print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ aeqe, 16, false);
+
+ ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
+ info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
+ ((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
+ info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
+ info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
+ info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
+ info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
+ info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
+
+ info->ae_src = ae_src;
+ switch (info->ae_id) {
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
+ case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
+ case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG:
+ case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_BAD_CLOSE:
+ case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO:
+ case IRDMA_AE_STAG_ZERO_INVALID:
+ case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_INVALID_QN:
+ case IRDMA_AE_DDP_NO_L_BIT:
+ case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_INVALID_ARP_ENTRY:
+ case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
+ case IRDMA_AE_STALE_ARP_ENTRY:
+ case IRDMA_AE_INVALID_AH_ENTRY:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LLP_DOUBT_REACHABILITY:
+ case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
+ case IRDMA_AE_RESET_SENT:
+ case IRDMA_AE_TERMINATE_SENT:
+ case IRDMA_AE_RESET_NOT_SENT:
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_QP_SUSPEND_COMPLETE:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ break;
+ case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ info->cq = true;
+ info->compl_ctx = compl_ctx << 1;
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
+ case IRDMA_AE_ROCE_EMPTY_MCG:
+ case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
+ case IRDMA_AE_ROCE_BAD_MC_QPID:
+ case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH:
+ fallthrough;
+ case IRDMA_AE_LLP_CONNECTION_RESET:
+ case IRDMA_AE_LLP_SYN_RECEIVED:
+ case IRDMA_AE_LLP_FIN_RECEIVED:
+ case IRDMA_AE_LLP_CLOSE_COMPLETE:
+ case IRDMA_AE_LLP_TERMINATE_RECEIVED:
+ case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ break;
+ default:
+ break;
+ }
+
+ switch (ae_src) {
+ case IRDMA_AE_SOURCE_RQ:
+ case IRDMA_AE_SOURCE_RQ_0011:
+ info->qp = true;
+ info->rq = true;
+ info->compl_ctx = compl_ctx;
+ break;
+ case IRDMA_AE_SOURCE_CQ:
+ case IRDMA_AE_SOURCE_CQ_0110:
+ case IRDMA_AE_SOURCE_CQ_1010:
+ case IRDMA_AE_SOURCE_CQ_1110:
+ info->cq = true;
+ info->compl_ctx = compl_ctx << 1;
+ break;
+ case IRDMA_AE_SOURCE_SQ:
+ case IRDMA_AE_SOURCE_SQ_0111:
+ info->qp = true;
+ info->sq = true;
+ info->compl_ctx = compl_ctx;
+ break;
+ case IRDMA_AE_SOURCE_IN_RR_WR:
+ case IRDMA_AE_SOURCE_IN_RR_WR_1011:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ info->in_rdrsp_wr = true;
+ break;
+ case IRDMA_AE_SOURCE_OUT_RR:
+ case IRDMA_AE_SOURCE_OUT_RR_1111:
+ info->qp = true;
+ info->compl_ctx = compl_ctx;
+ info->out_rdrsp = true;
+ break;
+ case IRDMA_AE_SOURCE_RSVD:
+ default:
+ break;
+ }
+
+ IRDMA_RING_MOVE_TAIL(aeq->aeq_ring);
+ if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring))
+ aeq->polarity ^= 1;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_repost_aeq_entries - repost completed aeq entries
+ * @dev: sc device struct
+ * @count: allocate count
+ */
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count)
+{
+ writel(count, dev->hw_regs[IRDMA_AEQALLOC]);
+}
+
+/**
+ * irdma_sc_ccq_init - initialize control cq
+ * @cq: sc's cq ctruct
+ * @info: info for control cq initialization
+ */
+int irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info)
+{
+ u32 pble_obj_cnt;
+
+ if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size ||
+ info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size)
+ return -EINVAL;
+
+ if (info->ceq_id >= info->dev->hmc_fpm_misc.max_ceqs)
+ return -EINVAL;
+
+ pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
+
+ if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt)
+ return -EINVAL;
+
+ cq->cq_pa = info->cq_pa;
+ cq->cq_uk.cq_base = info->cq_base;
+ cq->shadow_area_pa = info->shadow_area_pa;
+ cq->cq_uk.shadow_area = info->shadow_area;
+ cq->shadow_read_threshold = info->shadow_read_threshold;
+ cq->dev = info->dev;
+ cq->ceq_id = info->ceq_id;
+ cq->cq_uk.cq_size = info->num_elem;
+ cq->cq_type = IRDMA_CQ_TYPE_CQP;
+ cq->ceqe_mask = info->ceqe_mask;
+ IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
+ cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
+ cq->ceq_id_valid = info->ceq_id_valid;
+ cq->tph_en = info->tph_en;
+ cq->tph_val = info->tph_val;
+ cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
+ cq->pbl_list = info->pbl_list;
+ cq->virtual_map = info->virtual_map;
+ cq->pbl_chunk_size = info->pbl_chunk_size;
+ cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ cq->cq_uk.polarity = true;
+ cq->vsi = info->vsi;
+ cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db;
+
+ /* Only applicable to CQs other than CCQ so initialize to zero */
+ cq->cq_uk.cqe_alloc_db = NULL;
+
+ info->dev->ccq = cq;
+ return 0;
+}
+
+/**
+ * irdma_sc_ccq_create_done - poll cqp for ccq create
+ * @ccq: ccq sc struct
+ */
+static inline int irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq)
+{
+ struct irdma_sc_cqp *cqp;
+
+ cqp = ccq->dev->cqp;
+
+ return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL);
+}
+
+/**
+ * irdma_sc_ccq_create - create control cq
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: overlow flag for ccq
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
+ bool check_overflow, bool post_sq)
+{
+ int ret_code;
+
+ ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq);
+ if (ret_code)
+ return ret_code;
+
+ if (post_sq) {
+ ret_code = irdma_sc_ccq_create_done(ccq);
+ if (ret_code)
+ return ret_code;
+ }
+ ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd;
+
+ return 0;
+}
+
+/**
+ * irdma_sc_ccq_destroy - destroy ccq during close
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ int ret_code = 0;
+ u32 tail, val, error;
+
+ cqp = ccq->dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)ccq >> 1);
+ set_64bit_val(wqe, 40, ccq->shadow_area_pa);
+
+ hdr = ccq->cq_uk.cq_id |
+ FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0),
+ IRDMA_CQPSQ_CQ_CEQID) |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, ccq->ceqe_mask) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, ccq->ceq_id_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_TPHEN, ccq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT, ccq->cq_uk.avoid_mem_cflct) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: CCQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ ret_code = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ }
+
+ cqp->process_cqp_sds = irdma_update_sds_noccq;
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
+ * @dev : ptr to irdma_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
+{
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ struct irdma_dma_mem query_fpm_mem;
+ int ret_code = 0;
+ u8 wait_type;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ query_fpm_mem.pa = dev->fpm_query_buf_pa;
+ query_fpm_mem.va = dev->fpm_query_buf;
+ hmc_info->hmc_fn_id = hmc_fn_id;
+ wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
+
+ ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
+ &query_fpm_mem, true, wait_type);
+ if (ret_code)
+ return ret_code;
+
+ /* parse the fpm_query_buf and fill hmc obj info */
+ ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info,
+ hmc_fpm_misc);
+
+ print_hex_dump_debug("HMC: QUERY FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE,
+ false);
+ return ret_code;
+}
+
+/**
+ * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
+ * command and populates fpm base address in hmc_info
+ * @dev : ptr to irdma_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
+{
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_hmc_obj_info *obj_info;
+ __le64 *buf;
+ struct irdma_dma_mem commit_fpm_mem;
+ int ret_code = 0;
+ u8 wait_type;
+
+ hmc_info = dev->hmc_info;
+ obj_info = hmc_info->hmc_obj;
+ buf = dev->fpm_commit_buf;
+
+ set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
+ set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
+ set_64bit_val(buf, 16, (u64)0); /* RSRVD */
+ set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
+ set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
+ set_64bit_val(buf, 40, (u64)0); /* RSVD */
+ set_64bit_val(buf, 48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt);
+ set_64bit_val(buf, 56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt);
+ set_64bit_val(buf, 64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt);
+ set_64bit_val(buf, 72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt);
+ set_64bit_val(buf, 80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt);
+ set_64bit_val(buf, 88,
+ (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt);
+ set_64bit_val(buf, 96,
+ (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt);
+ set_64bit_val(buf, 104,
+ (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt);
+ set_64bit_val(buf, 112,
+ (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt);
+ set_64bit_val(buf, 120, (u64)0); /* RSVD */
+ set_64bit_val(buf, 128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt);
+ set_64bit_val(buf, 136,
+ (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt);
+ set_64bit_val(buf, 144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt);
+ set_64bit_val(buf, 152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt);
+ set_64bit_val(buf, 160,
+ (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
+ set_64bit_val(buf, 168,
+ (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
+
+ commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
+ commit_fpm_mem.va = dev->fpm_commit_buf;
+
+ wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS;
+ print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
+ false);
+ ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id,
+ &commit_fpm_mem, true, wait_type);
+ if (!ret_code)
+ irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf,
+ hmc_info->hmc_obj,
+ &hmc_info->sd_table.sd_cnt);
+ print_hex_dump_debug("HMC: COMMIT FPM BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE,
+ false);
+
+ return ret_code;
+}
+
+/**
+ * cqp_sds_wqe_fill - fill cqp wqe doe sd
+ * @cqp: struct for cqp hw
+ * @info: sd info for wqe
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp,
+ struct irdma_update_sds_info *info, u64 scratch)
+{
+ u64 data;
+ u64 hdr;
+ __le64 *wqe;
+ int mem_entries, wqe_entries;
+ struct irdma_dma_mem *sdbuf = &cqp->sdbuf;
+ u64 offset = 0;
+ u32 wqe_idx;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
+ if (!wqe)
+ return -ENOMEM;
+
+ wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
+ mem_entries = info->cnt - wqe_entries;
+
+ if (mem_entries) {
+ offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE;
+ memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4);
+
+ data = (u64)sdbuf->pa + offset;
+ } else {
+ data = 0;
+ }
+ data |= FIELD_PREP(IRDMA_CQPSQ_UPESD_HMCFNID, info->hmc_fn_id);
+ set_64bit_val(wqe, 16, data);
+
+ switch (wqe_entries) {
+ case 3:
+ set_64bit_val(wqe, 48,
+ (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[2].cmd) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
+
+ set_64bit_val(wqe, 56, info->entry[2].data);
+ fallthrough;
+ case 2:
+ set_64bit_val(wqe, 32,
+ (FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[1].cmd) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_VALID, 1)));
+
+ set_64bit_val(wqe, 40, info->entry[1].data);
+ fallthrough;
+ case 1:
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_SDCMD, info->entry[0].cmd));
+
+ set_64bit_val(wqe, 8, info->entry[0].data);
+ break;
+ default:
+ break;
+ }
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_UPDATE_PE_SDS) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_UPESD_ENTRY_COUNT, mem_entries);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (mem_entries)
+ print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE Buffer",
+ DUMP_PREFIX_OFFSET, 16, 8,
+ (char *)sdbuf->va + offset,
+ mem_entries << 4, false);
+
+ print_hex_dump_debug("WQE: UPDATE_PE_SDS WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+
+ return 0;
+}
+
+/**
+ * irdma_update_pe_sds - cqp wqe for sd
+ * @dev: ptr to irdma_dev struct
+ * @info: sd info for sd's
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_update_pe_sds(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info, u64 scratch)
+{
+ struct irdma_sc_cqp *cqp = dev->cqp;
+ int ret_code;
+
+ ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
+ if (!ret_code)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return ret_code;
+}
+
+/**
+ * irdma_update_sds_noccq - update sd before ccq created
+ * @dev: sc device struct
+ * @info: sd info for sd's
+ */
+int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info)
+{
+ u32 error, val, tail;
+ struct irdma_sc_cqp *cqp = dev->cqp;
+ int ret_code;
+
+ ret_code = cqp_sds_wqe_fill(cqp, info, 0);
+ if (ret_code)
+ return ret_code;
+
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ irdma_sc_cqp_post_sq(cqp);
+ return irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+}
+
+/**
+ * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @post_sq: flag for cqp db to ring
+ * @poll_registers: flag to poll register for cqp completion
+ */
+int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers)
+{
+ u64 hdr;
+ __le64 *wqe;
+ u32 tail, val, error;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID, hmc_fn_id));
+
+ hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE,
+ IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SHMC_PAGES_ALLOCATED WQE",
+ DUMP_PREFIX_OFFSET, 16, 8, wqe,
+ IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+ if (post_sq) {
+ irdma_sc_cqp_post_sq(cqp);
+ if (poll_registers)
+ /* check for cqp sq tail update */
+ return irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ else
+ return irdma_sc_poll_for_cqp_op_done(cqp,
+ IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED,
+ NULL);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_cqp_ring_full - check if cqp ring is full
+ * @cqp: struct for cqp hw
+ */
+static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
+{
+ return IRDMA_RING_FULL_ERR(cqp->sq_ring);
+}
+
+/**
+ * irdma_est_sd - returns approximate number of SDs for HMC
+ * @dev: sc device struct
+ * @hmc_info: hmc structure, size and count for HMC objects
+ */
+static u32 irdma_est_sd(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info)
+{
+ int i;
+ u64 size = 0;
+ u64 sd;
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ if (i != IRDMA_HMC_IW_PBLE)
+ size += round_up(hmc_info->hmc_obj[i].cnt *
+ hmc_info->hmc_obj[i].size, 512);
+ size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
+ if (size & 0x1FFFFF)
+ sd = (size >> 21) + 1; /* add 1 for remainder */
+ else
+ sd = size >> 21;
+ if (sd > 0xFFFFFFFF) {
+ ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd);
+ sd = 0xFFFFFFFF - 1;
+ }
+
+ return (u32)sd;
+}
+
+/**
+ * irdma_sc_query_rdma_features_done - poll cqp for query features done
+ * @cqp: struct for cqp hw
+ */
+static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
+{
+ return irdma_sc_poll_for_cqp_op_done(cqp,
+ IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
+ NULL);
+}
+
+/**
+ * irdma_sc_query_rdma_features - query RDMA features and FW ver
+ * @cqp: struct for cqp hw
+ * @buf: buffer to hold query info
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
+ struct irdma_dma_mem *buf, u64 scratch)
+{
+ __le64 *wqe;
+ u64 temp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ temp = buf->pa;
+ set_64bit_val(wqe, 32, temp);
+
+ temp = FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID,
+ cqp->polarity) |
+ FIELD_PREP(IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN, buf->size) |
+ FIELD_PREP(IRDMA_CQPSQ_UP_OP, IRDMA_CQP_OP_QUERY_RDMA_FEATURES);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, temp);
+
+ print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
+ 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_get_rdma_features - get RDMA features
+ * @dev: sc device struct
+ */
+int irdma_get_rdma_features(struct irdma_sc_dev *dev)
+{
+ int ret_code;
+ struct irdma_dma_mem feat_buf;
+ u64 temp;
+ u16 byte_idx, feat_type, feat_cnt, feat_idx;
+
+ feat_buf.size = ALIGN(IRDMA_FEATURE_BUF_SIZE,
+ IRDMA_FEATURE_BUF_ALIGNMENT);
+ feat_buf.va = dma_alloc_coherent(dev->hw->device, feat_buf.size,
+ &feat_buf.pa, GFP_KERNEL);
+ if (!feat_buf.va)
+ return -ENOMEM;
+
+ ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
+ if (!ret_code)
+ ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
+ if (ret_code)
+ goto exit;
+
+ get_64bit_val(feat_buf.va, 0, &temp);
+ feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
+ if (feat_cnt < 2) {
+ ret_code = -EINVAL;
+ goto exit;
+ } else if (feat_cnt > IRDMA_MAX_FEATURES) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: feature buf size insufficient, retrying with larger buffer\n");
+ dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
+ feat_buf.pa);
+ feat_buf.va = NULL;
+ feat_buf.size = ALIGN(8 * feat_cnt,
+ IRDMA_FEATURE_BUF_ALIGNMENT);
+ feat_buf.va = dma_alloc_coherent(dev->hw->device,
+ feat_buf.size, &feat_buf.pa,
+ GFP_KERNEL);
+ if (!feat_buf.va)
+ return -ENOMEM;
+
+ ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
+ if (!ret_code)
+ ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
+ if (ret_code)
+ goto exit;
+
+ get_64bit_val(feat_buf.va, 0, &temp);
+ feat_cnt = (u16)FIELD_GET(IRDMA_FEATURE_CNT, temp);
+ if (feat_cnt < 2) {
+ ret_code = -EINVAL;
+ goto exit;
+ }
+ }
+
+ print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
+ 16, 8, feat_buf.va, feat_cnt * 8, false);
+
+ for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
+ feat_idx++, byte_idx += 8) {
+ get_64bit_val(feat_buf.va, byte_idx, &temp);
+ feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
+ if (feat_type >= IRDMA_MAX_FEATURES) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: found unrecognized feature type %d\n",
+ feat_type);
+ continue;
+ }
+ dev->feature_info[feat_type] = temp;
+ }
+exit:
+ dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
+ feat_buf.pa);
+ feat_buf.va = NULL;
+ return ret_code;
+}
+
+static u32 irdma_q1_cnt(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 qpwanted)
+{
+ u32 q1_cnt;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
+ q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted);
+ } else {
+ if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512);
+ else
+ q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted;
+ }
+
+ return q1_cnt;
+}
+
+static void cfg_fpm_value_gen_1(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 qpwanted)
+{
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes);
+}
+
+static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 qpwanted)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
+ 4 * hmc_fpm_misc->xf_block_size * qpwanted;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
+
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
+ hmc_fpm_misc->rrf_block_size;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt /
+ hmc_fpm_misc->ooiscf_block_size;
+}
+
+/**
+ * irdma_cfg_fpm_val - configure HMC objects
+ * @dev: sc device struct
+ * @qp_count: desired qp count
+ */
+int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
+{
+ struct irdma_virt_mem virt_mem;
+ u32 i, mem_size;
+ u32 qpwanted, mrwanted, pblewanted;
+ u32 powerof2, hte;
+ u32 sd_needed;
+ u32 sd_diff;
+ u32 loop_count = 0;
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ int ret_code = 0;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: irdma_sc_init_iw_hmc returned error_code = %d\n",
+ ret_code);
+ return ret_code;
+ }
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+ sd_needed = irdma_est_sd(dev, hmc_info);
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: FW max resources sd_needed[%08d] first_sd_index[%04d]\n",
+ sd_needed, hmc_info->first_sd_index);
+ ibdev_dbg(to_ibdev(dev), "HMC: sd count %d where max sd is %d\n",
+ hmc_info->sd_table.sd_cnt, hmc_fpm_misc->max_sds);
+
+ qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
+
+ powerof2 = 1;
+ while (powerof2 <= qpwanted)
+ powerof2 *= 2;
+ powerof2 /= 2;
+ qpwanted = powerof2;
+
+ mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
+ pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
+
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
+ qp_count, hmc_fpm_misc->max_sds,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
+
+ while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
+ qpwanted /= 2;
+
+ do {
+ ++loop_count;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
+ min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
+
+ hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
+ powerof2 = 1;
+ while (powerof2 < hte)
+ powerof2 *= 2;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt =
+ powerof2 * hmc_fpm_misc->ht_multiplier;
+ if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
+ else
+ cfg_fpm_value_gen_2(dev, hmc_info, qpwanted);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
+ (round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
+ sd_needed = irdma_est_sd(dev, hmc_info);
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: sd_needed = %d, hmc_fpm_misc->max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n",
+ sd_needed, hmc_fpm_misc->max_sds, mrwanted,
+ pblewanted, qpwanted);
+
+ /* Do not reduce resources further. All objects fit with max SDs */
+ if (sd_needed <= hmc_fpm_misc->max_sds)
+ break;
+
+ sd_diff = sd_needed - hmc_fpm_misc->max_sds;
+ if (sd_diff > 128) {
+ if (!(loop_count % 2) && qpwanted > 128) {
+ qpwanted /= 2;
+ } else {
+ mrwanted /= 2;
+ pblewanted /= 2;
+ }
+ continue;
+ }
+ if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
+ pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
+ pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
+ continue;
+ } else if (pblewanted > (100 * FPM_MULTIPLIER)) {
+ pblewanted -= 10 * FPM_MULTIPLIER;
+ } else if (pblewanted > FPM_MULTIPLIER) {
+ pblewanted -= FPM_MULTIPLIER;
+ } else if (qpwanted <= 128) {
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
+ }
+ if (mrwanted > FPM_MULTIPLIER)
+ mrwanted -= FPM_MULTIPLIER;
+ if (!(loop_count % 10) && qpwanted > 128) {
+ qpwanted /= 2;
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2;
+ }
+ } while (loop_count < 2000);
+
+ if (sd_needed > hmc_fpm_misc->max_sds) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
+ loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
+ return -EINVAL;
+ }
+
+ if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
+ pblewanted += (hmc_fpm_misc->max_sds - sd_needed) * 256 *
+ FPM_MULTIPLIER;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
+ sd_needed = irdma_est_sd(dev, hmc_info);
+ }
+
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n",
+ loop_count, sd_needed,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt,
+ hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index);
+
+ ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
+ readl(dev->hw_regs[IRDMA_CQPERRCODES]));
+ return ret_code;
+ }
+
+ mem_size = sizeof(struct irdma_hmc_sd_entry) *
+ (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
+ virt_mem.size = mem_size;
+ virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
+ if (!virt_mem.va) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: failed to allocate memory for sd_entry buffer\n");
+ return -ENOMEM;
+ }
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+
+ return ret_code;
+}
+
+/**
+ * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available
+ * @dev: rdma device
+ * @pcmdinfo: cqp command info
+ */
+static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo)
+{
+ int status;
+ struct irdma_dma_mem val_mem;
+ bool alloc = false;
+
+ dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
+ switch (pcmdinfo->cqp_cmd) {
+ case IRDMA_OP_CEQ_DESTROY:
+ status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
+ pcmdinfo->in.u.ceq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_AEQ_DESTROY:
+ status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
+ pcmdinfo->in.u.aeq_destroy.scratch,
+ pcmdinfo->post_sq);
+
+ break;
+ case IRDMA_OP_CEQ_CREATE:
+ status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
+ pcmdinfo->in.u.ceq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_AEQ_CREATE:
+ status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
+ pcmdinfo->in.u.aeq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_UPLOAD_CONTEXT:
+ status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev,
+ &pcmdinfo->in.u.qp_upload_context.info,
+ pcmdinfo->in.u.qp_upload_context.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_CQ_CREATE:
+ status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq,
+ pcmdinfo->in.u.cq_create.scratch,
+ pcmdinfo->in.u.cq_create.check_overflow,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_CQ_MODIFY:
+ status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq,
+ &pcmdinfo->in.u.cq_modify.info,
+ pcmdinfo->in.u.cq_modify.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_CQ_DESTROY:
+ status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq,
+ pcmdinfo->in.u.cq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_FLUSH_WQES:
+ status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp,
+ &pcmdinfo->in.u.qp_flush_wqes.info,
+ pcmdinfo->in.u.qp_flush_wqes.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_GEN_AE:
+ status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp,
+ &pcmdinfo->in.u.gen_ae.info,
+ pcmdinfo->in.u.gen_ae.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MANAGE_PUSH_PAGE:
+ status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp,
+ &pcmdinfo->in.u.manage_push_page.info,
+ pcmdinfo->in.u.manage_push_page.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_UPDATE_PE_SDS:
+ status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev,
+ &pcmdinfo->in.u.update_pe_sds.info,
+ pcmdinfo->in.u.update_pe_sds.scratch);
+ break;
+ case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE:
+ /* switch to calling through the call table */
+ status =
+ irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
+ &pcmdinfo->in.u.manage_hmc_pm.info,
+ pcmdinfo->in.u.manage_hmc_pm.scratch,
+ true);
+ break;
+ case IRDMA_OP_SUSPEND:
+ status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp,
+ pcmdinfo->in.u.suspend_resume.qp,
+ pcmdinfo->in.u.suspend_resume.scratch);
+ break;
+ case IRDMA_OP_RESUME:
+ status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp,
+ pcmdinfo->in.u.suspend_resume.qp,
+ pcmdinfo->in.u.suspend_resume.scratch);
+ break;
+ case IRDMA_OP_QUERY_FPM_VAL:
+ val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa;
+ val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va;
+ status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp,
+ pcmdinfo->in.u.query_fpm_val.scratch,
+ pcmdinfo->in.u.query_fpm_val.hmc_fn_id,
+ &val_mem, true, IRDMA_CQP_WAIT_EVENT);
+ break;
+ case IRDMA_OP_COMMIT_FPM_VAL:
+ val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa;
+ val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va;
+ status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp,
+ pcmdinfo->in.u.commit_fpm_val.scratch,
+ pcmdinfo->in.u.commit_fpm_val.hmc_fn_id,
+ &val_mem,
+ true,
+ IRDMA_CQP_WAIT_EVENT);
+ break;
+ case IRDMA_OP_STATS_ALLOCATE:
+ alloc = true;
+ fallthrough;
+ case IRDMA_OP_STATS_FREE:
+ status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp,
+ &pcmdinfo->in.u.stats_manage.info,
+ alloc,
+ pcmdinfo->in.u.stats_manage.scratch);
+ break;
+ case IRDMA_OP_STATS_GATHER:
+ status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp,
+ &pcmdinfo->in.u.stats_gather.info,
+ pcmdinfo->in.u.stats_gather.scratch);
+ break;
+ case IRDMA_OP_WS_MODIFY_NODE:
+ status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
+ &pcmdinfo->in.u.ws_node.info,
+ IRDMA_MODIFY_NODE,
+ pcmdinfo->in.u.ws_node.scratch);
+ break;
+ case IRDMA_OP_WS_DELETE_NODE:
+ status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
+ &pcmdinfo->in.u.ws_node.info,
+ IRDMA_DEL_NODE,
+ pcmdinfo->in.u.ws_node.scratch);
+ break;
+ case IRDMA_OP_WS_ADD_NODE:
+ status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp,
+ &pcmdinfo->in.u.ws_node.info,
+ IRDMA_ADD_NODE,
+ pcmdinfo->in.u.ws_node.scratch);
+ break;
+ case IRDMA_OP_SET_UP_MAP:
+ status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp,
+ &pcmdinfo->in.u.up_map.info,
+ pcmdinfo->in.u.up_map.scratch);
+ break;
+ case IRDMA_OP_QUERY_RDMA_FEATURES:
+ status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp,
+ &pcmdinfo->in.u.query_rdma.query_buff_mem,
+ pcmdinfo->in.u.query_rdma.scratch);
+ break;
+ case IRDMA_OP_DELETE_ARP_CACHE_ENTRY:
+ status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp,
+ pcmdinfo->in.u.del_arp_cache_entry.scratch,
+ pcmdinfo->in.u.del_arp_cache_entry.arp_index,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MANAGE_APBVT_ENTRY:
+ status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp,
+ &pcmdinfo->in.u.manage_apbvt_entry.info,
+ pcmdinfo->in.u.manage_apbvt_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY:
+ status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp,
+ &pcmdinfo->in.u.manage_qhash_table_entry.info,
+ pcmdinfo->in.u.manage_qhash_table_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_MODIFY:
+ status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp,
+ &pcmdinfo->in.u.qp_modify.info,
+ pcmdinfo->in.u.qp_modify.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_CREATE:
+ status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp,
+ &pcmdinfo->in.u.qp_create.info,
+ pcmdinfo->in.u.qp_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_QP_DESTROY:
+ status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp,
+ pcmdinfo->in.u.qp_destroy.scratch,
+ pcmdinfo->in.u.qp_destroy.remove_hash_idx,
+ pcmdinfo->in.u.qp_destroy.ignore_mw_bnd,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ALLOC_STAG:
+ status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev,
+ &pcmdinfo->in.u.alloc_stag.info,
+ pcmdinfo->in.u.alloc_stag.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MR_REG_NON_SHARED:
+ status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev,
+ &pcmdinfo->in.u.mr_reg_non_shared.info,
+ pcmdinfo->in.u.mr_reg_non_shared.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_DEALLOC_STAG:
+ status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev,
+ &pcmdinfo->in.u.dealloc_stag.info,
+ pcmdinfo->in.u.dealloc_stag.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_MW_ALLOC:
+ status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev,
+ &pcmdinfo->in.u.mw_alloc.info,
+ pcmdinfo->in.u.mw_alloc.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ADD_ARP_CACHE_ENTRY:
+ status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp,
+ &pcmdinfo->in.u.add_arp_cache_entry.info,
+ pcmdinfo->in.u.add_arp_cache_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY:
+ status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp,
+ pcmdinfo->in.u.alloc_local_mac_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_ADD_LOCAL_MAC_ENTRY:
+ status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp,
+ &pcmdinfo->in.u.add_local_mac_entry.info,
+ pcmdinfo->in.u.add_local_mac_entry.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY:
+ status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp,
+ pcmdinfo->in.u.del_local_mac_entry.scratch,
+ pcmdinfo->in.u.del_local_mac_entry.entry_idx,
+ pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_AH_CREATE:
+ status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp,
+ &pcmdinfo->in.u.ah_create.info,
+ pcmdinfo->in.u.ah_create.scratch);
+ break;
+ case IRDMA_OP_AH_DESTROY:
+ status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp,
+ &pcmdinfo->in.u.ah_destroy.info,
+ pcmdinfo->in.u.ah_destroy.scratch);
+ break;
+ case IRDMA_OP_MC_CREATE:
+ status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp,
+ &pcmdinfo->in.u.mc_create.info,
+ pcmdinfo->in.u.mc_create.scratch);
+ break;
+ case IRDMA_OP_MC_DESTROY:
+ status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp,
+ &pcmdinfo->in.u.mc_destroy.info,
+ pcmdinfo->in.u.mc_destroy.scratch);
+ break;
+ case IRDMA_OP_MC_MODIFY:
+ status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp,
+ &pcmdinfo->in.u.mc_modify.info,
+ pcmdinfo->in.u.mc_modify.scratch);
+ break;
+ default:
+ status = -EOPNOTSUPP;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_process_cqp_cmd - process all cqp commands
+ * @dev: sc device struct
+ * @pcmdinfo: cqp command info
+ */
+int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo)
+{
+ int status = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cqp_lock, flags);
+ if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp))
+ status = irdma_exec_cqp_cmd(dev, pcmdinfo);
+ else
+ list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
+ spin_unlock_irqrestore(&dev->cqp_lock, flags);
+ return status;
+}
+
+/**
+ * irdma_process_bh - called from tasklet for cqp list
+ * @dev: sc device struct
+ */
+int irdma_process_bh(struct irdma_sc_dev *dev)
+{
+ int status = 0;
+ struct cqp_cmds_info *pcmdinfo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cqp_lock, flags);
+ while (!list_empty(&dev->cqp_cmd_head) &&
+ !irdma_cqp_ring_full(dev->cqp)) {
+ pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev);
+ status = irdma_exec_cqp_cmd(dev, pcmdinfo);
+ if (status)
+ break;
+ }
+ spin_unlock_irqrestore(&dev->cqp_lock, flags);
+ return status;
+}
+
+/**
+ * irdma_cfg_aeq- Configure AEQ interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ * @enable: True to enable, False disables
+ */
+void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
+ FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
+ FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, 3);
+ writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
+}
+
+/**
+ * sc_vsi_update_stats - Update statistics
+ * @vsi: sc_vsi instance to update
+ */
+void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_gather_stats *gather_stats;
+ struct irdma_gather_stats *last_gather_stats;
+
+ gather_stats = vsi->pestat->gather_info.gather_stats_va;
+ last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
+ irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
+ last_gather_stats, vsi->dev->hw_stats_map,
+ vsi->dev->hw_attrs.max_stat_idx);
+}
+
+/**
+ * irdma_wait_pe_ready - Check if firmware is ready
+ * @dev: provides access to registers
+ */
+static int irdma_wait_pe_ready(struct irdma_sc_dev *dev)
+{
+ u32 statuscpu0;
+ u32 statuscpu1;
+ u32 statuscpu2;
+ u32 retrycount = 0;
+
+ do {
+ statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]);
+ statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]);
+ statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]);
+ if (statuscpu0 == 0x80 && statuscpu1 == 0x80 &&
+ statuscpu2 == 0x80)
+ return 0;
+ mdelay(1000);
+ } while (retrycount++ < dev->hw_attrs.max_pe_ready_count);
+ return -1;
+}
+
+static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
+{
+ switch (dev->hw_attrs.uk_attrs.hw_rev) {
+ case IRDMA_GEN_1:
+ i40iw_init_hw(dev);
+ break;
+ case IRDMA_GEN_2:
+ icrdma_init_hw(dev);
+ break;
+ }
+}
+
+/**
+ * irdma_sc_dev_init - Initialize control part of device
+ * @ver: version
+ * @dev: Device pointer
+ * @info: Device init info
+ */
+int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
+ struct irdma_device_init_info *info)
+{
+ u32 val;
+ int ret_code = 0;
+ u8 db_size;
+
+ INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */
+ mutex_init(&dev->ws_mutex);
+ dev->hmc_fn_id = info->hmc_fn_id;
+ dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
+ dev->fpm_query_buf = info->fpm_query_buf;
+ dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
+ dev->fpm_commit_buf = info->fpm_commit_buf;
+ dev->hw = info->hw;
+ dev->hw->hw_addr = info->bar0;
+ /* Setup the hardware limits, hmc may limit further */
+ dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
+ dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
+ dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
+ dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
+ dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
+ dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_wq_frags = IRDMA_MAX_WQ_FRAGMENT_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = IRDMA_MAX_SGE_RD;
+ dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
+ dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
+ dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
+ dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
+ dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
+
+ dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA;
+ dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS;
+ dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT;
+
+ dev->hw_attrs.max_pe_ready_count = 14;
+ dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT;
+ dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
+ dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
+
+ dev->hw_attrs.uk_attrs.hw_rev = ver;
+ irdma_sc_init_hw(dev);
+
+ if (irdma_wait_pe_ready(dev))
+ return -ETIMEDOUT;
+
+ val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
+ db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
+ if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
+ val, db_size);
+ return -ENODEV;
+ }
+ dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
+
+ return ret_code;
+}
+
+/**
+ * irdma_stat_val - Extract HW counter value from statistics buffer
+ * @stats_val: pointer to statistics buffer
+ * @byteoff: byte offset of counter value in the buffer (8B-aligned)
+ * @bitoff: bit offset of counter value within 8B entry
+ * @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter)
+ */
+static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff, u8 bitoff,
+ u64 bitmask)
+{
+ u16 idx = byteoff / sizeof(*stats_val);
+
+ return (stats_val[idx] >> bitoff) & bitmask;
+}
+
+/**
+ * irdma_stat_delta - Calculate counter delta
+ * @new_val: updated counter value
+ * @old_val: last counter value
+ * @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter)
+ */
+static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val)
+{
+ if (new_val >= old_val)
+ return new_val - old_val;
+
+ /* roll-over case */
+ return max_val - old_val + new_val + 1;
+}
+
+/**
+ * irdma_update_stats - Update statistics
+ * @hw_stats: hw_stats instance to update
+ * @gather_stats: updated stat counters
+ * @last_gather_stats: last stat counters
+ * @map: HW stat map (hw_stats => gather_stats)
+ * @max_stat_idx: number of HW stats
+ */
+void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
+ struct irdma_gather_stats *gather_stats,
+ struct irdma_gather_stats *last_gather_stats,
+ const struct irdma_hw_stat_map *map, u16 max_stat_idx)
+{
+ u64 *stats_val = hw_stats->stats_val;
+ u16 i;
+
+ for (i = 0; i < max_stat_idx; i++) {
+ u64 new_val = irdma_stat_val(gather_stats->val, map[i].byteoff,
+ map[i].bitoff, map[i].bitmask);
+ u64 last_val = irdma_stat_val(last_gather_stats->val,
+ map[i].byteoff, map[i].bitoff,
+ map[i].bitmask);
+
+ stats_val[i] +=
+ irdma_stat_delta(new_val, last_val, map[i].bitmask);
+ }
+
+ memcpy(last_gather_stats, gather_stats, sizeof(*last_gather_stats));
+}
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
new file mode 100644
index 0000000000..d06e45d2c2
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -0,0 +1,1157 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_DEFS_H
+#define IRDMA_DEFS_H
+
+#define IRDMA_FIRST_USER_QP_ID 3
+
+#define ECN_CODE_PT_VAL 2
+
+#define IRDMA_PUSH_OFFSET (8 * 1024 * 1024)
+#define IRDMA_PF_FIRST_PUSH_PAGE_INDEX 16
+#define IRDMA_PF_BAR_RSVD (60 * 1024)
+
+#define IRDMA_PE_DB_SIZE_4M 1
+#define IRDMA_PE_DB_SIZE_8M 2
+
+#define IRDMA_IRD_HW_SIZE_4 0
+#define IRDMA_IRD_HW_SIZE_16 1
+#define IRDMA_IRD_HW_SIZE_64 2
+#define IRDMA_IRD_HW_SIZE_128 3
+#define IRDMA_IRD_HW_SIZE_256 4
+
+enum irdma_protocol_used {
+ IRDMA_ANY_PROTOCOL = 0,
+ IRDMA_IWARP_PROTOCOL_ONLY = 1,
+ IRDMA_ROCE_PROTOCOL_ONLY = 2,
+};
+
+#define IRDMA_QP_STATE_INVALID 0
+#define IRDMA_QP_STATE_IDLE 1
+#define IRDMA_QP_STATE_RTS 2
+#define IRDMA_QP_STATE_CLOSING 3
+#define IRDMA_QP_STATE_SQD 3
+#define IRDMA_QP_STATE_RTR 4
+#define IRDMA_QP_STATE_TERMINATE 5
+#define IRDMA_QP_STATE_ERROR 6
+
+#define IRDMA_MAX_TRAFFIC_CLASS 8
+#define IRDMA_MAX_STATS_COUNT_GEN_1 12
+#define IRDMA_MAX_USER_PRIORITY 8
+#define IRDMA_MAX_APPS 8
+#define IRDMA_MAX_STATS_COUNT 128
+#define IRDMA_FIRST_NON_PF_STAT 4
+
+#define IRDMA_MIN_MTU_IPV4 576
+#define IRDMA_MIN_MTU_IPV6 1280
+#define IRDMA_MTU_TO_MSS_IPV4 40
+#define IRDMA_MTU_TO_MSS_IPV6 60
+#define IRDMA_DEFAULT_MTU 1500
+
+#define Q2_FPSN_OFFSET 64
+#define TERM_DDP_LEN_TAGGED 14
+#define TERM_DDP_LEN_UNTAGGED 18
+#define TERM_RDMA_LEN 28
+#define RDMA_OPCODE_M 0x0f
+#define RDMA_READ_REQ_OPCODE 1
+#define Q2_BAD_FRAME_OFFSET 72
+#define CQE_MAJOR_DRV 0x8000
+
+#define IRDMA_TERM_SENT 1
+#define IRDMA_TERM_RCVD 2
+#define IRDMA_TERM_DONE 4
+#define IRDMA_MAC_HLEN 14
+
+#define IRDMA_CQP_WAIT_POLL_REGS 1
+#define IRDMA_CQP_WAIT_POLL_CQ 2
+#define IRDMA_CQP_WAIT_EVENT 3
+
+#define IRDMA_AE_SOURCE_RSVD 0x0
+#define IRDMA_AE_SOURCE_RQ 0x1
+#define IRDMA_AE_SOURCE_RQ_0011 0x3
+
+#define IRDMA_AE_SOURCE_CQ 0x2
+#define IRDMA_AE_SOURCE_CQ_0110 0x6
+#define IRDMA_AE_SOURCE_CQ_1010 0xa
+#define IRDMA_AE_SOURCE_CQ_1110 0xe
+
+#define IRDMA_AE_SOURCE_SQ 0x5
+#define IRDMA_AE_SOURCE_SQ_0111 0x7
+
+#define IRDMA_AE_SOURCE_IN_RR_WR 0x9
+#define IRDMA_AE_SOURCE_IN_RR_WR_1011 0xb
+#define IRDMA_AE_SOURCE_OUT_RR 0xd
+#define IRDMA_AE_SOURCE_OUT_RR_1111 0xf
+
+#define IRDMA_TCP_STATE_NON_EXISTENT 0
+#define IRDMA_TCP_STATE_CLOSED 1
+#define IRDMA_TCP_STATE_LISTEN 2
+#define IRDMA_STATE_SYN_SEND 3
+#define IRDMA_TCP_STATE_SYN_RECEIVED 4
+#define IRDMA_TCP_STATE_ESTABLISHED 5
+#define IRDMA_TCP_STATE_CLOSE_WAIT 6
+#define IRDMA_TCP_STATE_FIN_WAIT_1 7
+#define IRDMA_TCP_STATE_CLOSING 8
+#define IRDMA_TCP_STATE_LAST_ACK 9
+#define IRDMA_TCP_STATE_FIN_WAIT_2 10
+#define IRDMA_TCP_STATE_TIME_WAIT 11
+#define IRDMA_TCP_STATE_RESERVED_1 12
+#define IRDMA_TCP_STATE_RESERVED_2 13
+#define IRDMA_TCP_STATE_RESERVED_3 14
+#define IRDMA_TCP_STATE_RESERVED_4 15
+
+#define IRDMA_CQP_SW_SQSIZE_4 4
+#define IRDMA_CQP_SW_SQSIZE_2048 2048
+
+#define IRDMA_CQ_TYPE_IWARP 1
+#define IRDMA_CQ_TYPE_ILQ 2
+#define IRDMA_CQ_TYPE_IEQ 3
+#define IRDMA_CQ_TYPE_CQP 4
+
+#define IRDMA_DONE_COUNT 1000
+#define IRDMA_SLEEP_COUNT 10
+
+#define IRDMA_UPDATE_SD_BUFF_SIZE 128
+#define IRDMA_FEATURE_BUF_SIZE (8 * IRDMA_MAX_FEATURES)
+
+#define IRDMA_MAX_QUANTA_PER_WR 8
+
+#define IRDMA_QP_SW_MAX_WQ_QUANTA 32768
+#define IRDMA_QP_SW_MAX_SQ_QUANTA 32768
+#define IRDMA_QP_SW_MAX_RQ_QUANTA 32768
+#define IRDMA_MAX_QP_WRS(max_quanta_per_wr) \
+ ((IRDMA_QP_SW_MAX_WQ_QUANTA - IRDMA_SQ_RSVD) / (max_quanta_per_wr))
+
+#define IRDMAQP_TERM_SEND_TERM_AND_FIN 0
+#define IRDMAQP_TERM_SEND_TERM_ONLY 1
+#define IRDMAQP_TERM_SEND_FIN_ONLY 2
+#define IRDMAQP_TERM_DONOT_SEND_TERM_OR_FIN 3
+
+#define IRDMA_QP_TYPE_IWARP 1
+#define IRDMA_QP_TYPE_UDA 2
+#define IRDMA_QP_TYPE_ROCE_RC 3
+#define IRDMA_QP_TYPE_ROCE_UD 4
+
+#define IRDMA_HW_PAGE_SIZE 4096
+#define IRDMA_HW_PAGE_SHIFT 12
+#define IRDMA_CQE_QTYPE_RQ 0
+#define IRDMA_CQE_QTYPE_SQ 1
+
+#define IRDMA_QP_SW_MIN_WQSIZE 8u /* in WRs*/
+#define IRDMA_QP_WQE_MIN_SIZE 32
+#define IRDMA_QP_WQE_MAX_SIZE 256
+#define IRDMA_QP_WQE_MIN_QUANTA 1
+#define IRDMA_MAX_RQ_WQE_SHIFT_GEN1 2
+#define IRDMA_MAX_RQ_WQE_SHIFT_GEN2 3
+
+#define IRDMA_SQ_RSVD 258
+#define IRDMA_RQ_RSVD 1
+
+#define IRDMA_FEATURE_RTS_AE 1ULL
+#define IRDMA_FEATURE_CQ_RESIZE 2ULL
+#define IRDMAQP_OP_RDMA_WRITE 0x00
+#define IRDMAQP_OP_RDMA_READ 0x01
+#define IRDMAQP_OP_RDMA_SEND 0x03
+#define IRDMAQP_OP_RDMA_SEND_INV 0x04
+#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT 0x05
+#define IRDMAQP_OP_RDMA_SEND_SOL_EVENT_INV 0x06
+#define IRDMAQP_OP_BIND_MW 0x08
+#define IRDMAQP_OP_FAST_REGISTER 0x09
+#define IRDMAQP_OP_LOCAL_INVALIDATE 0x0a
+#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b
+#define IRDMAQP_OP_NOP 0x0c
+#define IRDMAQP_OP_RDMA_WRITE_SOL 0x0d
+#define IRDMAQP_OP_GEN_RTS_AE 0x30
+
+enum irdma_cqp_op_type {
+ IRDMA_OP_CEQ_DESTROY = 1,
+ IRDMA_OP_AEQ_DESTROY = 2,
+ IRDMA_OP_DELETE_ARP_CACHE_ENTRY = 3,
+ IRDMA_OP_MANAGE_APBVT_ENTRY = 4,
+ IRDMA_OP_CEQ_CREATE = 5,
+ IRDMA_OP_AEQ_CREATE = 6,
+ IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY = 7,
+ IRDMA_OP_QP_MODIFY = 8,
+ IRDMA_OP_QP_UPLOAD_CONTEXT = 9,
+ IRDMA_OP_CQ_CREATE = 10,
+ IRDMA_OP_CQ_DESTROY = 11,
+ IRDMA_OP_QP_CREATE = 12,
+ IRDMA_OP_QP_DESTROY = 13,
+ IRDMA_OP_ALLOC_STAG = 14,
+ IRDMA_OP_MR_REG_NON_SHARED = 15,
+ IRDMA_OP_DEALLOC_STAG = 16,
+ IRDMA_OP_MW_ALLOC = 17,
+ IRDMA_OP_QP_FLUSH_WQES = 18,
+ IRDMA_OP_ADD_ARP_CACHE_ENTRY = 19,
+ IRDMA_OP_MANAGE_PUSH_PAGE = 20,
+ IRDMA_OP_UPDATE_PE_SDS = 21,
+ IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE = 22,
+ IRDMA_OP_SUSPEND = 23,
+ IRDMA_OP_RESUME = 24,
+ IRDMA_OP_MANAGE_VF_PBLE_BP = 25,
+ IRDMA_OP_QUERY_FPM_VAL = 26,
+ IRDMA_OP_COMMIT_FPM_VAL = 27,
+ IRDMA_OP_AH_CREATE = 28,
+ IRDMA_OP_AH_MODIFY = 29,
+ IRDMA_OP_AH_DESTROY = 30,
+ IRDMA_OP_MC_CREATE = 31,
+ IRDMA_OP_MC_DESTROY = 32,
+ IRDMA_OP_MC_MODIFY = 33,
+ IRDMA_OP_STATS_ALLOCATE = 34,
+ IRDMA_OP_STATS_FREE = 35,
+ IRDMA_OP_STATS_GATHER = 36,
+ IRDMA_OP_WS_ADD_NODE = 37,
+ IRDMA_OP_WS_MODIFY_NODE = 38,
+ IRDMA_OP_WS_DELETE_NODE = 39,
+ IRDMA_OP_WS_FAILOVER_START = 40,
+ IRDMA_OP_WS_FAILOVER_COMPLETE = 41,
+ IRDMA_OP_SET_UP_MAP = 42,
+ IRDMA_OP_GEN_AE = 43,
+ IRDMA_OP_QUERY_RDMA_FEATURES = 44,
+ IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 45,
+ IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46,
+ IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47,
+ IRDMA_OP_CQ_MODIFY = 48,
+
+ /* Must be last entry*/
+ IRDMA_MAX_CQP_OPS = 49,
+};
+
+/* CQP SQ WQES */
+#define IRDMA_CQP_OP_CREATE_QP 0
+#define IRDMA_CQP_OP_MODIFY_QP 0x1
+#define IRDMA_CQP_OP_DESTROY_QP 0x02
+#define IRDMA_CQP_OP_CREATE_CQ 0x03
+#define IRDMA_CQP_OP_MODIFY_CQ 0x04
+#define IRDMA_CQP_OP_DESTROY_CQ 0x05
+#define IRDMA_CQP_OP_ALLOC_STAG 0x09
+#define IRDMA_CQP_OP_REG_MR 0x0a
+#define IRDMA_CQP_OP_QUERY_STAG 0x0b
+#define IRDMA_CQP_OP_REG_SMR 0x0c
+#define IRDMA_CQP_OP_DEALLOC_STAG 0x0d
+#define IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE 0x0e
+#define IRDMA_CQP_OP_MANAGE_ARP 0x0f
+#define IRDMA_CQP_OP_MANAGE_VF_PBLE_BP 0x10
+#define IRDMA_CQP_OP_MANAGE_PUSH_PAGES 0x11
+#define IRDMA_CQP_OP_QUERY_RDMA_FEATURES 0x12
+#define IRDMA_CQP_OP_UPLOAD_CONTEXT 0x13
+#define IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY 0x14
+#define IRDMA_CQP_OP_UPLOAD_CONTEXT 0x13
+#define IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15
+#define IRDMA_CQP_OP_CREATE_CEQ 0x16
+#define IRDMA_CQP_OP_DESTROY_CEQ 0x18
+#define IRDMA_CQP_OP_CREATE_AEQ 0x19
+#define IRDMA_CQP_OP_DESTROY_AEQ 0x1b
+#define IRDMA_CQP_OP_CREATE_ADDR_HANDLE 0x1c
+#define IRDMA_CQP_OP_MODIFY_ADDR_HANDLE 0x1d
+#define IRDMA_CQP_OP_DESTROY_ADDR_HANDLE 0x1e
+#define IRDMA_CQP_OP_UPDATE_PE_SDS 0x1f
+#define IRDMA_CQP_OP_QUERY_FPM_VAL 0x20
+#define IRDMA_CQP_OP_COMMIT_FPM_VAL 0x21
+#define IRDMA_CQP_OP_FLUSH_WQES 0x22
+/* IRDMA_CQP_OP_GEN_AE is the same value as IRDMA_CQP_OP_FLUSH_WQES */
+#define IRDMA_CQP_OP_GEN_AE 0x22
+#define IRDMA_CQP_OP_MANAGE_APBVT 0x23
+#define IRDMA_CQP_OP_NOP 0x24
+#define IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0x25
+#define IRDMA_CQP_OP_CREATE_MCAST_GRP 0x26
+#define IRDMA_CQP_OP_MODIFY_MCAST_GRP 0x27
+#define IRDMA_CQP_OP_DESTROY_MCAST_GRP 0x28
+#define IRDMA_CQP_OP_SUSPEND_QP 0x29
+#define IRDMA_CQP_OP_RESUME_QP 0x2a
+#define IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b
+#define IRDMA_CQP_OP_WORK_SCHED_NODE 0x2c
+#define IRDMA_CQP_OP_MANAGE_STATS 0x2d
+#define IRDMA_CQP_OP_GATHER_STATS 0x2e
+#define IRDMA_CQP_OP_UP_MAP 0x2f
+
+/* Async Events codes */
+#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
+#define IRDMA_AE_AMP_INVALID_STAG 0x0103
+#define IRDMA_AE_AMP_BAD_QP 0x0104
+#define IRDMA_AE_AMP_BAD_PD 0x0105
+#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
+#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
+#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
+#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
+#define IRDMA_AE_AMP_TO_WRAP 0x010a
+#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
+#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
+#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
+#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
+#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
+#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
+#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
+#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
+#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
+#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
+#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
+#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
+#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
+#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
+#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
+#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
+#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
+#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
+#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
+#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
+#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
+#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
+#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
+#define IRDMA_AE_BAD_CLOSE 0x0201
+#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
+#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
+#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
+#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
+#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
+#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
+#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
+#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
+#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
+#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
+#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
+#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
+#define IRDMA_AE_INVALID_REQUEST 0x0223
+#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
+#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
+#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
+#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
+#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
+#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
+#define IRDMA_AE_DDP_NO_L_BIT 0x0308
+#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
+#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
+#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
+#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
+#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
+#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
+#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
+#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
+#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
+#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
+#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
+#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
+#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
+#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
+#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
+#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
+#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
+#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
+#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
+#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
+#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
+#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
+#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
+#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
+#define IRDMA_AE_RESET_SENT 0x0601
+#define IRDMA_AE_TERMINATE_SENT 0x0602
+#define IRDMA_AE_RESET_NOT_SENT 0x0603
+#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
+#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
+#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
+#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
+
+#define FLD_LS_64(dev, val, field) \
+ (((u64)(val) << (dev)->hw_shifts[field ## _S]) & (dev)->hw_masks[field ## _M])
+#define FLD_RS_64(dev, val, field) \
+ ((u64)((val) & (dev)->hw_masks[field ## _M]) >> (dev)->hw_shifts[field ## _S])
+#define FLD_LS_32(dev, val, field) \
+ (((val) << (dev)->hw_shifts[field ## _S]) & (dev)->hw_masks[field ## _M])
+#define FLD_RS_32(dev, val, field) \
+ ((u64)((val) & (dev)->hw_masks[field ## _M]) >> (dev)->hw_shifts[field ## _S])
+
+#define IRDMA_MAX_STATS_24 0xffffffULL
+#define IRDMA_MAX_STATS_32 0xffffffffULL
+#define IRDMA_MAX_STATS_48 0xffffffffffffULL
+#define IRDMA_MAX_STATS_56 0xffffffffffffffULL
+#define IRDMA_MAX_STATS_64 0xffffffffffffffffULL
+
+#define IRDMA_MAX_CQ_READ_THRESH 0x3FFFF
+#define IRDMA_CQPSQ_QHASH_VLANID GENMASK_ULL(43, 32)
+#define IRDMA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32)
+#define IRDMA_CQPSQ_QHASH_QS_HANDLE GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16)
+#define IRDMA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32)
+#define IRDMA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_QHASH_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61)
+#define IRDMA_CQPSQ_QHASH_IPV4VALID BIT_ULL(60)
+#define IRDMA_CQPSQ_QHASH_VLANVALID BIT_ULL(59)
+#define IRDMA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42)
+#define IRDMA_CQPSQ_STATS_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_STATS_ALLOC_INST BIT_ULL(62)
+#define IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX BIT_ULL(60)
+#define IRDMA_CQPSQ_STATS_USE_INST BIT_ULL(61)
+#define IRDMA_CQPSQ_STATS_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_STATS_INST_INDEX GENMASK_ULL(6, 0)
+#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_WS_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(53, 52)
+
+#define IRDMA_CQPSQ_WS_ENABLENODE BIT_ULL(62)
+#define IRDMA_CQPSQ_WS_NODETYPE BIT_ULL(61)
+#define IRDMA_CQPSQ_WS_PRIOTYPE GENMASK_ULL(60, 59)
+#define IRDMA_CQPSQ_WS_TC GENMASK_ULL(58, 56)
+#define IRDMA_CQPSQ_WS_VMVFTYPE GENMASK_ULL(55, 54)
+#define IRDMA_CQPSQ_WS_VMVFNUM GENMASK_ULL(51, 42)
+#define IRDMA_CQPSQ_WS_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(25, 16)
+#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(57, 48)
+#define IRDMA_CQPSQ_WS_WEIGHT GENMASK_ULL(38, 32)
+
+#define IRDMA_CQPSQ_UP_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_UP_USEVLAN BIT_ULL(62)
+#define IRDMA_CQPSQ_UP_USEOVERRIDE BIT_ULL(61)
+#define IRDMA_CQPSQ_UP_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_UP_CNPOVERRIDE GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MODEL_USED GENMASK_ULL(47, 32)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MAJOR_VERSION GENMASK_ULL(23, 16)
+#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_HW_MINOR_VERSION GENMASK_ULL(7, 0)
+#define IRDMA_CQPHC_SQSIZE GENMASK_ULL(11, 8)
+#define IRDMA_CQPHC_DISABLE_PFPDUS BIT_ULL(1)
+#define IRDMA_CQPHC_ROCEV2_RTO_POLICY BIT_ULL(2)
+#define IRDMA_CQPHC_PROTOCOL_USED GENMASK_ULL(4, 3)
+#define IRDMA_CQPHC_MIN_RATE GENMASK_ULL(51, 48)
+#define IRDMA_CQPHC_MIN_DEC_FACTOR GENMASK_ULL(59, 56)
+#define IRDMA_CQPHC_DCQCN_T GENMASK_ULL(15, 0)
+#define IRDMA_CQPHC_HAI_FACTOR GENMASK_ULL(47, 32)
+#define IRDMA_CQPHC_RAI_FACTOR GENMASK_ULL(63, 48)
+#define IRDMA_CQPHC_DCQCN_B GENMASK_ULL(24, 0)
+#define IRDMA_CQPHC_DCQCN_F GENMASK_ULL(27, 25)
+#define IRDMA_CQPHC_CC_CFG_VALID BIT_ULL(31)
+#define IRDMA_CQPHC_RREDUCE_MPERIOD GENMASK_ULL(63, 32)
+#define IRDMA_CQPHC_HW_MINVER GENMASK_ULL(15, 0)
+
+#define IRDMA_CQPHC_HW_MAJVER_GEN_1 0
+#define IRDMA_CQPHC_HW_MAJVER_GEN_2 1
+#define IRDMA_CQPHC_HW_MAJVER_GEN_3 2
+#define IRDMA_CQPHC_HW_MAJVER GENMASK_ULL(31, 16)
+#define IRDMA_CQPHC_CEQPERVF GENMASK_ULL(39, 32)
+
+#define IRDMA_CQPHC_ENABLED_VFS GENMASK_ULL(37, 32)
+
+#define IRDMA_CQPHC_HMC_PROFILE GENMASK_ULL(2, 0)
+#define IRDMA_CQPHC_SVER GENMASK_ULL(31, 24)
+#define IRDMA_CQPHC_SQBASE GENMASK_ULL(63, 9)
+
+#define IRDMA_CQPHC_QPCTX GENMASK_ULL(63, 0)
+#define IRDMA_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(14, 0)
+#define IRDMA_CQ_DBSA_CQEIDX GENMASK_ULL(19, 0)
+#define IRDMA_CQ_DBSA_SW_CQ_SELECT GENMASK_ULL(13, 0)
+#define IRDMA_CQ_DBSA_ARM_NEXT BIT_ULL(14)
+#define IRDMA_CQ_DBSA_ARM_NEXT_SE BIT_ULL(15)
+#define IRDMA_CQ_DBSA_ARM_SEQ_NUM GENMASK_ULL(17, 16)
+
+/* CQP and iWARP Completion Queue */
+#define IRDMA_CQ_QPCTX IRDMA_CQPHC_QPCTX
+
+#define IRDMA_CCQ_OPRETVAL GENMASK_ULL(31, 0)
+
+#define IRDMA_CQ_MINERR GENMASK_ULL(15, 0)
+#define IRDMA_CQ_MAJERR GENMASK_ULL(31, 16)
+#define IRDMA_CQ_WQEIDX GENMASK_ULL(46, 32)
+#define IRDMA_CQ_EXTCQE BIT_ULL(50)
+#define IRDMA_OOO_CMPL BIT_ULL(54)
+#define IRDMA_CQ_ERROR BIT_ULL(55)
+#define IRDMA_CQ_SQ BIT_ULL(62)
+
+#define IRDMA_CQ_VALID BIT_ULL(63)
+#define IRDMA_CQ_IMMVALID BIT_ULL(62)
+#define IRDMA_CQ_UDSMACVALID BIT_ULL(61)
+#define IRDMA_CQ_UDVLANVALID BIT_ULL(60)
+#define IRDMA_CQ_UDSMAC GENMASK_ULL(47, 0)
+#define IRDMA_CQ_UDVLAN GENMASK_ULL(63, 48)
+
+#define IRDMA_CQ_IMMDATA_S 0
+#define IRDMA_CQ_IMMDATA_M (0xffffffffffffffffULL << IRDMA_CQ_IMMVALID_S)
+#define IRDMA_CQ_IMMDATALOW32 GENMASK_ULL(31, 0)
+#define IRDMA_CQ_IMMDATAUP32 GENMASK_ULL(63, 32)
+#define IRDMACQ_PAYLDLEN GENMASK_ULL(31, 0)
+#define IRDMACQ_TCPSEQNUMRTT GENMASK_ULL(63, 32)
+#define IRDMACQ_INVSTAG GENMASK_ULL(31, 0)
+#define IRDMACQ_QPID GENMASK_ULL(55, 32)
+
+#define IRDMACQ_UDSRCQPN GENMASK_ULL(31, 0)
+#define IRDMACQ_PSHDROP BIT_ULL(51)
+#define IRDMACQ_STAG BIT_ULL(53)
+#define IRDMACQ_IPV4 BIT_ULL(53)
+#define IRDMACQ_SOEVENT BIT_ULL(54)
+#define IRDMACQ_OP GENMASK_ULL(61, 56)
+
+#define IRDMA_CEQE_CQCTX GENMASK_ULL(62, 0)
+#define IRDMA_CEQE_VALID BIT_ULL(63)
+
+/* AEQE format */
+#define IRDMA_AEQE_COMPCTX IRDMA_CQPHC_QPCTX
+#define IRDMA_AEQE_QPCQID_LOW GENMASK_ULL(17, 0)
+#define IRDMA_AEQE_QPCQID_HI BIT_ULL(46)
+#define IRDMA_AEQE_WQDESCIDX GENMASK_ULL(32, 18)
+#define IRDMA_AEQE_OVERFLOW BIT_ULL(33)
+#define IRDMA_AEQE_AECODE GENMASK_ULL(45, 34)
+#define IRDMA_AEQE_AESRC GENMASK_ULL(53, 50)
+#define IRDMA_AEQE_IWSTATE GENMASK_ULL(56, 54)
+#define IRDMA_AEQE_TCPSTATE GENMASK_ULL(60, 57)
+#define IRDMA_AEQE_Q2DATA GENMASK_ULL(62, 61)
+#define IRDMA_AEQE_VALID BIT_ULL(63)
+
+#define IRDMA_UDA_QPSQ_NEXT_HDR GENMASK_ULL(23, 16)
+#define IRDMA_UDA_QPSQ_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_QPSQ_L4LEN GENMASK_ULL(45, 42)
+#define IRDMA_GEN1_UDA_QPSQ_L4LEN GENMASK_ULL(27, 24)
+#define IRDMA_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0)
+#define IRDMA_UDA_QPSQ_VALID BIT_ULL(63)
+#define IRDMA_UDA_QPSQ_SIGCOMPL BIT_ULL(62)
+#define IRDMA_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56)
+#define IRDMA_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48)
+#define IRDMA_UDA_QPSQ_L4T GENMASK_ULL(31, 30)
+#define IRDMA_UDA_QPSQ_IIPT GENMASK_ULL(29, 28)
+#define IRDMA_UDA_PAYLOADLEN GENMASK_ULL(13, 0)
+#define IRDMA_UDA_HDRLEN GENMASK_ULL(24, 16)
+#define IRDMA_VLAN_TAG_VALID BIT_ULL(50)
+#define IRDMA_UDA_L3PROTO GENMASK_ULL(1, 0)
+#define IRDMA_UDA_L4PROTO GENMASK_ULL(17, 16)
+#define IRDMA_UDA_QPSQ_DOLOOPBACK BIT_ULL(44)
+#define IRDMA_CQPSQ_BUFSIZE GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_WQEVALID BIT_ULL(63)
+#define IRDMA_CQPSQ_TPHVAL GENMASK_ULL(7, 0)
+
+#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(17, 8)
+#define IRDMA_CQPSQ_TPHEN BIT_ULL(60)
+
+#define IRDMA_CQPSQ_PBUFADDR IRDMA_CQPHC_QPCTX
+
+/* Create/Modify/Destroy QP */
+
+#define IRDMA_CQPSQ_QP_NEWMSS GENMASK_ULL(45, 32)
+#define IRDMA_CQPSQ_QP_TERMLEN GENMASK_ULL(51, 48)
+
+#define IRDMA_CQPSQ_QP_QPCTX IRDMA_CQPHC_QPCTX
+
+#define IRDMA_CQPSQ_QP_QPID_S 0
+#define IRDMA_CQPSQ_QP_QPID_M (0xFFFFFFUL)
+
+#define IRDMA_CQPSQ_QP_OP_S 32
+#define IRDMA_CQPSQ_QP_OP_M IRDMACQ_OP_M
+#define IRDMA_CQPSQ_QP_ORDVALID BIT_ULL(42)
+#define IRDMA_CQPSQ_QP_TOECTXVALID BIT_ULL(43)
+#define IRDMA_CQPSQ_QP_CACHEDVARVALID BIT_ULL(44)
+#define IRDMA_CQPSQ_QP_VQ BIT_ULL(45)
+#define IRDMA_CQPSQ_QP_FORCELOOPBACK BIT_ULL(46)
+#define IRDMA_CQPSQ_QP_CQNUMVALID BIT_ULL(47)
+#define IRDMA_CQPSQ_QP_QPTYPE GENMASK_ULL(50, 48)
+#define IRDMA_CQPSQ_QP_MACVALID BIT_ULL(51)
+#define IRDMA_CQPSQ_QP_MSSCHANGE BIT_ULL(52)
+
+#define IRDMA_CQPSQ_QP_IGNOREMWBOUND BIT_ULL(54)
+#define IRDMA_CQPSQ_QP_REMOVEHASHENTRY BIT_ULL(55)
+#define IRDMA_CQPSQ_QP_TERMACT GENMASK_ULL(57, 56)
+#define IRDMA_CQPSQ_QP_RESETCON BIT_ULL(58)
+#define IRDMA_CQPSQ_QP_ARPTABIDXVALID BIT_ULL(59)
+#define IRDMA_CQPSQ_QP_NEXTIWSTATE GENMASK_ULL(62, 60)
+
+#define IRDMA_CQPSQ_QP_DBSHADOWADDR IRDMA_CQPHC_QPCTX
+
+#define IRDMA_CQPSQ_CQ_CQSIZE GENMASK_ULL(20, 0)
+#define IRDMA_CQPSQ_CQ_CQCTX GENMASK_ULL(62, 0)
+#define IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD GENMASK(17, 0)
+
+#define IRDMA_CQPSQ_CQ_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_CQ_CQRESIZE BIT_ULL(43)
+#define IRDMA_CQPSQ_CQ_LPBLSIZE GENMASK_ULL(45, 44)
+#define IRDMA_CQPSQ_CQ_CHKOVERFLOW BIT_ULL(46)
+#define IRDMA_CQPSQ_CQ_VIRTMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_CQ_ENCEQEMASK BIT_ULL(48)
+#define IRDMA_CQPSQ_CQ_CEQIDVALID BIT_ULL(49)
+#define IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT BIT_ULL(61)
+#define IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+
+/* Allocate/Register/Register Shared/Deallocate Stag */
+#define IRDMA_CQPSQ_STAG_VA_FBO IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_STAG_STAGLEN GENMASK_ULL(45, 0)
+#define IRDMA_CQPSQ_STAG_KEY GENMASK_ULL(7, 0)
+#define IRDMA_CQPSQ_STAG_IDX GENMASK_ULL(31, 8)
+#define IRDMA_CQPSQ_STAG_IDX_S 8
+#define IRDMA_CQPSQ_STAG_PARENTSTAGIDX GENMASK_ULL(55, 32)
+#define IRDMA_CQPSQ_STAG_MR BIT_ULL(43)
+#define IRDMA_CQPSQ_STAG_MWTYPE BIT_ULL(42)
+#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY BIT_ULL(58)
+
+#define IRDMA_CQPSQ_STAG_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
+#define IRDMA_CQPSQ_STAG_HPAGESIZE GENMASK_ULL(47, 46)
+#define IRDMA_CQPSQ_STAG_ARIGHTS GENMASK_ULL(52, 48)
+#define IRDMA_CQPSQ_STAG_REMACCENABLED BIT_ULL(53)
+#define IRDMA_CQPSQ_STAG_VABASEDTO BIT_ULL(59)
+#define IRDMA_CQPSQ_STAG_USEHMCFNIDX BIT_ULL(60)
+#define IRDMA_CQPSQ_STAG_USEPFRID BIT_ULL(61)
+
+#define IRDMA_CQPSQ_STAG_PBA IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(5, 0)
+
+#define IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+#define IRDMA_CQPSQ_QUERYSTAG_IDX IRDMA_CQPSQ_STAG_IDX
+#define IRDMA_CQPSQ_MLM_TABLEIDX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_MLM_FREEENTRY BIT_ULL(62)
+#define IRDMA_CQPSQ_MLM_IGNORE_REF_CNT BIT_ULL(61)
+#define IRDMA_CQPSQ_MLM_MAC0 GENMASK_ULL(7, 0)
+#define IRDMA_CQPSQ_MLM_MAC1 GENMASK_ULL(15, 8)
+#define IRDMA_CQPSQ_MLM_MAC2 GENMASK_ULL(23, 16)
+#define IRDMA_CQPSQ_MLM_MAC3 GENMASK_ULL(31, 24)
+#define IRDMA_CQPSQ_MLM_MAC4 GENMASK_ULL(39, 32)
+#define IRDMA_CQPSQ_MLM_MAC5 GENMASK_ULL(47, 40)
+#define IRDMA_CQPSQ_MAT_REACHMAX GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_MAT_MACADDR GENMASK_ULL(47, 0)
+#define IRDMA_CQPSQ_MAT_ARPENTRYIDX GENMASK_ULL(11, 0)
+#define IRDMA_CQPSQ_MAT_ENTRYVALID BIT_ULL(42)
+#define IRDMA_CQPSQ_MAT_PERMANENT BIT_ULL(43)
+#define IRDMA_CQPSQ_MAT_QUERY BIT_ULL(44)
+#define IRDMA_CQPSQ_MVPBP_PD_ENTRY_CNT GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_MVPBP_FIRST_PD_INX GENMASK_ULL(24, 16)
+#define IRDMA_CQPSQ_MVPBP_SD_INX GENMASK_ULL(43, 32)
+#define IRDMA_CQPSQ_MVPBP_INV_PD_ENT BIT_ULL(62)
+#define IRDMA_CQPSQ_MVPBP_PD_PLPBA GENMASK_ULL(63, 3)
+
+/* Manage Push Page - MPP */
+#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
+#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
+
+#define IRDMA_CQPSQ_MPP_QS_HANDLE GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_MPP_PPTYPE GENMASK_ULL(61, 60)
+
+#define IRDMA_CQPSQ_MPP_FREE_PAGE BIT_ULL(62)
+
+/* Upload Context - UCTX */
+#define IRDMA_CQPSQ_UCTX_QPCTXADDR IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_UCTX_QPID GENMASK_ULL(23, 0)
+#define IRDMA_CQPSQ_UCTX_QPTYPE GENMASK_ULL(51, 48)
+
+#define IRDMA_CQPSQ_UCTX_RAWFORMAT BIT_ULL(61)
+#define IRDMA_CQPSQ_UCTX_FREEZEQP BIT_ULL(62)
+
+#define IRDMA_CQPSQ_MHMC_VFIDX GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_MHMC_FREEPMFN BIT_ULL(62)
+
+#define IRDMA_CQPSQ_SHMCRP_HMC_PROFILE GENMASK_ULL(2, 0)
+#define IRDMA_CQPSQ_SHMCRP_VFNUM GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_CEQ_CEQSIZE GENMASK_ULL(21, 0)
+#define IRDMA_CQPSQ_CEQ_CEQID GENMASK_ULL(9, 0)
+
+#define IRDMA_CQPSQ_CEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
+#define IRDMA_CQPSQ_CEQ_VMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_CEQ_ITRNOEXPIRE BIT_ULL(46)
+#define IRDMA_CQPSQ_CEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+#define IRDMA_CQPSQ_AEQ_AEQECNT GENMASK_ULL(18, 0)
+#define IRDMA_CQPSQ_AEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
+#define IRDMA_CQPSQ_AEQ_VMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
+
+#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(18, 0)
+
+#define IRDMA_COMMIT_FPM_BASE_S 32
+#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_FWQE_AESOURCE GENMASK_ULL(19, 16)
+#define IRDMA_CQPSQ_FWQE_RQMNERR GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_FWQE_RQMJERR GENMASK_ULL(31, 16)
+#define IRDMA_CQPSQ_FWQE_SQMNERR GENMASK_ULL(47, 32)
+#define IRDMA_CQPSQ_FWQE_SQMJERR GENMASK_ULL(63, 48)
+#define IRDMA_CQPSQ_FWQE_QPID GENMASK_ULL(23, 0)
+#define IRDMA_CQPSQ_FWQE_GENERATE_AE BIT_ULL(59)
+#define IRDMA_CQPSQ_FWQE_USERFLCODE BIT_ULL(60)
+#define IRDMA_CQPSQ_FWQE_FLUSHSQ BIT_ULL(61)
+#define IRDMA_CQPSQ_FWQE_FLUSHRQ BIT_ULL(62)
+#define IRDMA_CQPSQ_MAPT_PORT GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_MAPT_ADDPORT BIT_ULL(62)
+#define IRDMA_CQPSQ_UPESD_SDCMD GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_UPESD_SDDATALOW GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_UPESD_SDDATAHI GENMASK_ULL(63, 32)
+#define IRDMA_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_UPESD_ENTRY_VALID BIT_ULL(63)
+
+#define IRDMA_CQPSQ_UPESD_BM_PF 0
+#define IRDMA_CQPSQ_UPESD_BM_CP_LM 1
+#define IRDMA_CQPSQ_UPESD_BM_AXF 2
+#define IRDMA_CQPSQ_UPESD_BM_LM 4
+#define IRDMA_CQPSQ_UPESD_BM GENMASK_ULL(34, 32)
+#define IRDMA_CQPSQ_UPESD_ENTRY_COUNT GENMASK_ULL(3, 0)
+#define IRDMA_CQPSQ_UPESD_SKIP_ENTRY BIT_ULL(7)
+#define IRDMA_CQPSQ_SUSPENDQP_QPID GENMASK_ULL(23, 0)
+#define IRDMA_CQPSQ_RESUMEQP_QSHANDLE GENMASK_ULL(31, 0)
+#define IRDMA_CQPSQ_RESUMEQP_QPID GENMASK(23, 0)
+
+#define IRDMA_CQPSQ_MIN_STAG_INVALID 0x0001
+#define IRDMA_CQPSQ_MIN_SUSPEND_PND 0x0005
+
+#define IRDMA_CQPSQ_MAJ_NO_ERROR 0x0000
+#define IRDMA_CQPSQ_MAJ_OBJCACHE_ERROR 0xF000
+#define IRDMA_CQPSQ_MAJ_CNTXTCACHE_ERROR 0xF001
+#define IRDMA_CQPSQ_MAJ_ERROR 0xFFFF
+#define IRDMAQPC_DDP_VER GENMASK_ULL(1, 0)
+#define IRDMAQPC_IBRDENABLE BIT_ULL(2)
+#define IRDMAQPC_IPV4 BIT_ULL(3)
+#define IRDMAQPC_NONAGLE BIT_ULL(4)
+#define IRDMAQPC_INSERTVLANTAG BIT_ULL(5)
+#define IRDMAQPC_ISQP1 BIT_ULL(6)
+#define IRDMAQPC_TIMESTAMP BIT_ULL(7)
+#define IRDMAQPC_RQWQESIZE GENMASK_ULL(9, 8)
+#define IRDMAQPC_INSERTL2TAG2 BIT_ULL(11)
+#define IRDMAQPC_LIMIT GENMASK_ULL(13, 12)
+
+#define IRDMAQPC_ECN_EN BIT_ULL(14)
+#define IRDMAQPC_DROPOOOSEG BIT_ULL(15)
+#define IRDMAQPC_DUPACK_THRESH GENMASK_ULL(18, 16)
+#define IRDMAQPC_ERR_RQ_IDX_VALID BIT_ULL(19)
+#define IRDMAQPC_DIS_VLAN_CHECKS GENMASK_ULL(21, 19)
+#define IRDMAQPC_DC_TCP_EN BIT_ULL(25)
+#define IRDMAQPC_RCVTPHEN BIT_ULL(28)
+#define IRDMAQPC_XMITTPHEN BIT_ULL(29)
+#define IRDMAQPC_RQTPHEN BIT_ULL(30)
+#define IRDMAQPC_SQTPHEN BIT_ULL(31)
+#define IRDMAQPC_PPIDX GENMASK_ULL(41, 32)
+#define IRDMAQPC_PMENA BIT_ULL(47)
+#define IRDMAQPC_RDMAP_VER GENMASK_ULL(63, 62)
+#define IRDMAQPC_ROCE_TVER GENMASK_ULL(63, 60)
+
+#define IRDMAQPC_SQADDR IRDMA_CQPHC_QPCTX
+#define IRDMAQPC_RQADDR IRDMA_CQPHC_QPCTX
+#define IRDMAQPC_TTL GENMASK_ULL(7, 0)
+#define IRDMAQPC_RQSIZE GENMASK_ULL(11, 8)
+#define IRDMAQPC_SQSIZE GENMASK_ULL(15, 12)
+#define IRDMAQPC_GEN1_SRCMACADDRIDX GENMASK(21, 16)
+#define IRDMAQPC_AVOIDSTRETCHACK BIT_ULL(23)
+#define IRDMAQPC_TOS GENMASK_ULL(31, 24)
+#define IRDMAQPC_SRCPORTNUM GENMASK_ULL(47, 32)
+#define IRDMAQPC_DESTPORTNUM GENMASK_ULL(63, 48)
+#define IRDMAQPC_DESTIPADDR0 GENMASK_ULL(63, 32)
+#define IRDMAQPC_DESTIPADDR1 GENMASK_ULL(31, 0)
+#define IRDMAQPC_DESTIPADDR2 GENMASK_ULL(63, 32)
+#define IRDMAQPC_DESTIPADDR3 GENMASK_ULL(31, 0)
+#define IRDMAQPC_SNDMSS GENMASK_ULL(29, 16)
+#define IRDMAQPC_SYN_RST_HANDLING GENMASK_ULL(31, 30)
+#define IRDMAQPC_VLANTAG GENMASK_ULL(47, 32)
+#define IRDMAQPC_ARPIDX GENMASK_ULL(63, 48)
+#define IRDMAQPC_FLOWLABEL GENMASK_ULL(19, 0)
+#define IRDMAQPC_WSCALE BIT_ULL(20)
+#define IRDMAQPC_KEEPALIVE BIT_ULL(21)
+#define IRDMAQPC_IGNORE_TCP_OPT BIT_ULL(22)
+#define IRDMAQPC_IGNORE_TCP_UNS_OPT BIT_ULL(23)
+#define IRDMAQPC_TCPSTATE GENMASK_ULL(31, 28)
+#define IRDMAQPC_RCVSCALE GENMASK_ULL(35, 32)
+#define IRDMAQPC_SNDSCALE GENMASK_ULL(43, 40)
+#define IRDMAQPC_PDIDX GENMASK_ULL(63, 48)
+#define IRDMAQPC_PDIDXHI GENMASK_ULL(21, 20)
+#define IRDMAQPC_PKEY GENMASK_ULL(47, 32)
+#define IRDMAQPC_ACKCREDITS GENMASK_ULL(24, 20)
+#define IRDMAQPC_QKEY GENMASK_ULL(63, 32)
+#define IRDMAQPC_DESTQP GENMASK_ULL(23, 0)
+#define IRDMAQPC_KALIVE_TIMER_MAX_PROBES GENMASK_ULL(23, 16)
+#define IRDMAQPC_KEEPALIVE_INTERVAL GENMASK_ULL(31, 24)
+#define IRDMAQPC_TIMESTAMP_RECENT GENMASK_ULL(31, 0)
+#define IRDMAQPC_TIMESTAMP_AGE GENMASK_ULL(63, 32)
+#define IRDMAQPC_SNDNXT GENMASK_ULL(31, 0)
+#define IRDMAQPC_ISN GENMASK_ULL(55, 32)
+#define IRDMAQPC_PSNNXT GENMASK_ULL(23, 0)
+#define IRDMAQPC_LSN GENMASK_ULL(55, 32)
+#define IRDMAQPC_SNDWND GENMASK_ULL(63, 32)
+#define IRDMAQPC_RCVNXT GENMASK_ULL(31, 0)
+#define IRDMAQPC_EPSN GENMASK_ULL(23, 0)
+#define IRDMAQPC_RCVWND GENMASK_ULL(63, 32)
+#define IRDMAQPC_SNDMAX GENMASK_ULL(31, 0)
+#define IRDMAQPC_SNDUNA GENMASK_ULL(63, 32)
+#define IRDMAQPC_PSNMAX GENMASK_ULL(23, 0)
+#define IRDMAQPC_PSNUNA GENMASK_ULL(55, 32)
+#define IRDMAQPC_SRTT GENMASK_ULL(31, 0)
+#define IRDMAQPC_RTTVAR GENMASK_ULL(63, 32)
+#define IRDMAQPC_SSTHRESH GENMASK_ULL(31, 0)
+#define IRDMAQPC_CWND GENMASK_ULL(63, 32)
+#define IRDMAQPC_CWNDROCE GENMASK_ULL(55, 32)
+#define IRDMAQPC_SNDWL1 GENMASK_ULL(31, 0)
+#define IRDMAQPC_SNDWL2 GENMASK_ULL(63, 32)
+#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(45, 32)
+#define IRDMAQPC_RTOMIN GENMASK_ULL(63, 57)
+#define IRDMAQPC_MAXSNDWND GENMASK_ULL(31, 0)
+#define IRDMAQPC_REXMIT_THRESH GENMASK_ULL(53, 48)
+#define IRDMAQPC_RNRNAK_THRESH GENMASK_ULL(56, 54)
+#define IRDMAQPC_TXCQNUM GENMASK_ULL(18, 0)
+#define IRDMAQPC_RXCQNUM GENMASK_ULL(50, 32)
+#define IRDMAQPC_STAT_INDEX GENMASK_ULL(6, 0)
+#define IRDMAQPC_Q2ADDR GENMASK_ULL(63, 8)
+#define IRDMAQPC_LASTBYTESENT GENMASK_ULL(7, 0)
+#define IRDMAQPC_MACADDRESS GENMASK_ULL(63, 16)
+#define IRDMAQPC_ORDSIZE GENMASK_ULL(7, 0)
+
+#define IRDMAQPC_IRDSIZE GENMASK_ULL(18, 16)
+
+#define IRDMAQPC_UDPRIVCQENABLE BIT_ULL(19)
+#define IRDMAQPC_WRRDRSPOK BIT_ULL(20)
+#define IRDMAQPC_RDOK BIT_ULL(21)
+#define IRDMAQPC_SNDMARKERS BIT_ULL(22)
+#define IRDMAQPC_DCQCNENABLE BIT_ULL(22)
+#define IRDMAQPC_FW_CC_ENABLE BIT_ULL(28)
+#define IRDMAQPC_RCVNOICRC BIT_ULL(31)
+#define IRDMAQPC_BINDEN BIT_ULL(23)
+#define IRDMAQPC_FASTREGEN BIT_ULL(24)
+#define IRDMAQPC_PRIVEN BIT_ULL(25)
+#define IRDMAQPC_TIMELYENABLE BIT_ULL(27)
+#define IRDMAQPC_THIGH GENMASK_ULL(63, 52)
+#define IRDMAQPC_TLOW GENMASK_ULL(39, 32)
+#define IRDMAQPC_REMENDPOINTIDX GENMASK_ULL(16, 0)
+#define IRDMAQPC_USESTATSINSTANCE BIT_ULL(26)
+#define IRDMAQPC_IWARPMODE BIT_ULL(28)
+#define IRDMAQPC_RCVMARKERS BIT_ULL(29)
+#define IRDMAQPC_ALIGNHDRS BIT_ULL(30)
+#define IRDMAQPC_RCVNOMPACRC BIT_ULL(31)
+#define IRDMAQPC_RCVMARKOFFSET GENMASK_ULL(40, 32)
+#define IRDMAQPC_SNDMARKOFFSET GENMASK_ULL(56, 48)
+
+#define IRDMAQPC_QPCOMPCTX IRDMA_CQPHC_QPCTX
+#define IRDMAQPC_SQTPHVAL GENMASK_ULL(7, 0)
+#define IRDMAQPC_RQTPHVAL GENMASK_ULL(15, 8)
+#define IRDMAQPC_QSHANDLE GENMASK_ULL(25, 16)
+#define IRDMAQPC_EXCEPTION_LAN_QUEUE GENMASK_ULL(43, 32)
+#define IRDMAQPC_LOCAL_IPADDR3 GENMASK_ULL(31, 0)
+#define IRDMAQPC_LOCAL_IPADDR2 GENMASK_ULL(63, 32)
+#define IRDMAQPC_LOCAL_IPADDR1 GENMASK_ULL(31, 0)
+#define IRDMAQPC_LOCAL_IPADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_FW_VER_MINOR GENMASK_ULL(15, 0)
+#define IRDMA_FW_VER_MAJOR GENMASK_ULL(31, 16)
+#define IRDMA_FEATURE_INFO GENMASK_ULL(47, 0)
+#define IRDMA_FEATURE_CNT GENMASK_ULL(47, 32)
+#define IRDMA_FEATURE_TYPE GENMASK_ULL(63, 48)
+
+#define IRDMAQPSQ_OPCODE GENMASK_ULL(37, 32)
+#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43)
+#define IRDMAQPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
+#define IRDMAQPSQ_PUSHWQE BIT_ULL(56)
+#define IRDMAQPSQ_STREAMMODE BIT_ULL(58)
+#define IRDMAQPSQ_WAITFORRCVPDU BIT_ULL(59)
+#define IRDMAQPSQ_READFENCE BIT_ULL(60)
+#define IRDMAQPSQ_LOCALFENCE BIT_ULL(61)
+#define IRDMAQPSQ_UDPHEADER BIT_ULL(61)
+#define IRDMAQPSQ_L4LEN GENMASK_ULL(45, 42)
+#define IRDMAQPSQ_SIGCOMPL BIT_ULL(62)
+#define IRDMAQPSQ_VALID BIT_ULL(63)
+
+#define IRDMAQPSQ_FRAG_TO IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_FRAG_VALID BIT_ULL(63)
+#define IRDMAQPSQ_FRAG_LEN GENMASK_ULL(62, 32)
+#define IRDMAQPSQ_FRAG_STAG GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_GEN1_FRAG_LEN GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_GEN1_FRAG_STAG GENMASK_ULL(63, 32)
+#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
+#define IRDMAQPSQ_AHID GENMASK_ULL(16, 0)
+#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
+
+#define IRDMA_INLINE_VALID_S 7
+#define IRDMAQPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
+#define IRDMAQPSQ_IMMDATAFLAG BIT_ULL(47)
+#define IRDMAQPSQ_REPORTRTT BIT_ULL(46)
+
+#define IRDMAQPSQ_IMMDATA GENMASK_ULL(63, 0)
+#define IRDMAQPSQ_REMSTAG GENMASK_ULL(31, 0)
+
+#define IRDMAQPSQ_REMTO IRDMA_CQPHC_QPCTX
+
+#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(52, 48)
+#define IRDMAQPSQ_VABASEDTO BIT_ULL(53)
+#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(54)
+
+#define IRDMAQPSQ_MWLEN IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_PARENTMRSTAG GENMASK_ULL(63, 32)
+#define IRDMAQPSQ_MWSTAG GENMASK_ULL(31, 0)
+
+#define IRDMAQPSQ_BASEVA_TO_FBO IRDMA_CQPHC_QPCTX
+
+#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0)
+
+#define IRDMAQPSQ_STAGKEY GENMASK_ULL(7, 0)
+#define IRDMAQPSQ_STAGINDEX GENMASK_ULL(31, 8)
+#define IRDMAQPSQ_COPYHOSTPBLS BIT_ULL(43)
+#define IRDMAQPSQ_LPBLSIZE GENMASK_ULL(45, 44)
+#define IRDMAQPSQ_HPAGESIZE GENMASK_ULL(47, 46)
+#define IRDMAQPSQ_STAGLEN GENMASK_ULL(40, 0)
+#define IRDMAQPSQ_FIRSTPMPBLIDXLO GENMASK_ULL(63, 48)
+#define IRDMAQPSQ_FIRSTPMPBLIDXHI GENMASK_ULL(11, 0)
+#define IRDMAQPSQ_PBLADDR GENMASK_ULL(63, 12)
+
+/* iwarp QP RQ WQE common fields */
+#define IRDMAQPRQ_ADDFRAGCNT IRDMAQPSQ_ADDFRAGCNT
+#define IRDMAQPRQ_VALID IRDMAQPSQ_VALID
+#define IRDMAQPRQ_COMPLCTX IRDMA_CQPHC_QPCTX
+#define IRDMAQPRQ_FRAG_LEN IRDMAQPSQ_FRAG_LEN
+#define IRDMAQPRQ_STAG IRDMAQPSQ_FRAG_STAG
+#define IRDMAQPRQ_TO IRDMAQPSQ_FRAG_TO
+
+#define IRDMAPFINT_OICR_HMC_ERR_M BIT(26)
+#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
+#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
+
+#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(18, 0)
+#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(19, 0)
+#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX GENMASK_ULL(13, 0)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(45, 32)
+#define IRDMA_QUERY_FPM_MAX_CEQS GENMASK_ULL(9, 0)
+#define IRDMA_QUERY_FPM_XFBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_Q1BLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_HTMULTIPLIER GENMASK_ULL(19, 16)
+#define IRDMA_QUERY_FPM_TIMERBUCKET GENMASK_ULL(47, 32)
+#define IRDMA_QUERY_FPM_RRFBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_RRFFLBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_OOISCFBLOCKSIZE GENMASK_ULL(63, 32)
+#define IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID GENMASK_ULL(5, 0)
+
+#define IRDMA_GET_CURRENT_AEQ_ELEM(_aeq) \
+ ( \
+ (_aeq)->aeqe_base[IRDMA_RING_CURRENT_TAIL((_aeq)->aeq_ring)].buf \
+ )
+
+#define IRDMA_GET_CURRENT_CEQ_ELEM(_ceq) \
+ ( \
+ (_ceq)->ceqe_base[IRDMA_RING_CURRENT_TAIL((_ceq)->ceq_ring)].buf \
+ )
+
+#define IRDMA_GET_CEQ_ELEM_AT_POS(_ceq, _pos) \
+ ( \
+ (_ceq)->ceqe_base[_pos].buf \
+ )
+
+#define IRDMA_RING_GET_NEXT_TAIL(_ring, _idx) \
+ ( \
+ ((_ring).tail + (_idx)) % (_ring).size \
+ )
+
+#define IRDMA_CQP_INIT_WQE(wqe) memset(wqe, 0, 64)
+
+#define IRDMA_GET_CURRENT_CQ_ELEM(_cq) \
+ ( \
+ (_cq)->cq_base[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
+ )
+#define IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(_cq) \
+ ( \
+ ((struct irdma_extended_cqe *) \
+ ((_cq)->cq_base))[IRDMA_RING_CURRENT_HEAD((_cq)->cq_ring)].buf \
+ )
+
+#define IRDMA_RING_INIT(_ring, _size) \
+ { \
+ (_ring).head = 0; \
+ (_ring).tail = 0; \
+ (_ring).size = (_size); \
+ }
+#define IRDMA_RING_SIZE(_ring) ((_ring).size)
+#define IRDMA_RING_CURRENT_HEAD(_ring) ((_ring).head)
+#define IRDMA_RING_CURRENT_TAIL(_ring) ((_ring).tail)
+
+#define IRDMA_RING_MOVE_HEAD(_ring, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if (!IRDMA_RING_FULL_ERR(_ring)) { \
+ (_ring).head = ((_ring).head + 1) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = -ENOMEM; \
+ } \
+ }
+#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < size) { \
+ (_ring).head = ((_ring).head + (_count)) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = -ENOMEM; \
+ } \
+ }
+#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if (!IRDMA_SQ_RING_FULL_ERR(_ring)) { \
+ (_ring).head = ((_ring).head + 1) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = -ENOMEM; \
+ } \
+ }
+#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+ { \
+ register u32 size; \
+ size = (_ring).size; \
+ if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < (size - 256)) { \
+ (_ring).head = ((_ring).head + (_count)) % size; \
+ (_retcode) = 0; \
+ } else { \
+ (_retcode) = -ENOMEM; \
+ } \
+ }
+#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
+ (_ring).head = ((_ring).head + (_count)) % (_ring).size
+
+#define IRDMA_RING_MOVE_TAIL(_ring) \
+ (_ring).tail = ((_ring).tail + 1) % (_ring).size
+
+#define IRDMA_RING_MOVE_HEAD_NOCHECK(_ring) \
+ (_ring).head = ((_ring).head + 1) % (_ring).size
+
+#define IRDMA_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
+ (_ring).tail = ((_ring).tail + (_count)) % (_ring).size
+
+#define IRDMA_RING_SET_TAIL(_ring, _pos) \
+ (_ring).tail = (_pos) % (_ring).size
+
+#define IRDMA_RING_FULL_ERR(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 1)) \
+ )
+
+#define IRDMA_ERR_RING_FULL2(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 2)) \
+ )
+
+#define IRDMA_ERR_RING_FULL3(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 3)) \
+ )
+
+#define IRDMA_SQ_RING_FULL_ERR(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 257)) \
+ )
+
+#define IRDMA_ERR_SQ_RING_FULL2(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 258)) \
+ )
+#define IRDMA_ERR_SQ_RING_FULL3(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 259)) \
+ )
+#define IRDMA_RING_MORE_WORK(_ring) \
+ ( \
+ (IRDMA_RING_USED_QUANTA(_ring) != 0) \
+ )
+
+#define IRDMA_RING_USED_QUANTA(_ring) \
+ ( \
+ (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \
+ )
+
+#define IRDMA_RING_FREE_QUANTA(_ring) \
+ ( \
+ ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 1) \
+ )
+
+#define IRDMA_SQ_RING_FREE_QUANTA(_ring) \
+ ( \
+ ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 257) \
+ )
+
+#define IRDMA_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
+ { \
+ index = IRDMA_RING_CURRENT_HEAD(_ring); \
+ IRDMA_RING_MOVE_HEAD(_ring, _retcode); \
+ }
+
+enum irdma_qp_wqe_size {
+ IRDMA_WQE_SIZE_32 = 32,
+ IRDMA_WQE_SIZE_64 = 64,
+ IRDMA_WQE_SIZE_96 = 96,
+ IRDMA_WQE_SIZE_128 = 128,
+ IRDMA_WQE_SIZE_256 = 256,
+};
+
+enum irdma_ws_node_op {
+ IRDMA_ADD_NODE = 0,
+ IRDMA_MODIFY_NODE,
+ IRDMA_DEL_NODE,
+};
+
+enum { IRDMA_Q_ALIGNMENT_M = (128 - 1),
+ IRDMA_AEQ_ALIGNMENT_M = (256 - 1),
+ IRDMA_Q2_ALIGNMENT_M = (256 - 1),
+ IRDMA_CEQ_ALIGNMENT_M = (256 - 1),
+ IRDMA_CQ0_ALIGNMENT_M = (256 - 1),
+ IRDMA_HOST_CTX_ALIGNMENT_M = (4 - 1),
+ IRDMA_SHADOWAREA_M = (128 - 1),
+ IRDMA_FPM_QUERY_BUF_ALIGNMENT_M = (4 - 1),
+ IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M = (4 - 1),
+};
+
+enum irdma_alignment {
+ IRDMA_CQP_ALIGNMENT = 0x200,
+ IRDMA_AEQ_ALIGNMENT = 0x100,
+ IRDMA_CEQ_ALIGNMENT = 0x100,
+ IRDMA_CQ0_ALIGNMENT = 0x100,
+ IRDMA_SD_BUF_ALIGNMENT = 0x80,
+ IRDMA_FEATURE_BUF_ALIGNMENT = 0x8,
+};
+
+enum icrdma_protocol_used {
+ ICRDMA_ANY_PROTOCOL = 0,
+ ICRDMA_IWARP_PROTOCOL_ONLY = 1,
+ ICRDMA_ROCE_PROTOCOL_ONLY = 2,
+};
+
+/**
+ * set_64bit_val - set 64 bit value to hw wqe
+ * @wqe_words: wqe addr to write
+ * @byte_index: index in wqe
+ * @val: value to write
+ **/
+static inline void set_64bit_val(__le64 *wqe_words, u32 byte_index, u64 val)
+{
+ wqe_words[byte_index >> 3] = cpu_to_le64(val);
+}
+
+/**
+ * set_32bit_val - set 32 bit value to hw wqe
+ * @wqe_words: wqe addr to write
+ * @byte_index: index in wqe
+ * @val: value to write
+ **/
+static inline void set_32bit_val(__le32 *wqe_words, u32 byte_index, u32 val)
+{
+ wqe_words[byte_index >> 2] = cpu_to_le32(val);
+}
+
+/**
+ * get_64bit_val - read 64 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to read from
+ * @val: read value
+ **/
+static inline void get_64bit_val(__le64 *wqe_words, u32 byte_index, u64 *val)
+{
+ *val = le64_to_cpu(wqe_words[byte_index >> 3]);
+}
+
+/**
+ * get_32bit_val - read 32 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to reaad from
+ * @val: return 32 bit value
+ **/
+static inline void get_32bit_val(__le32 *wqe_words, u32 byte_index, u32 *val)
+{
+ *val = le32_to_cpu(wqe_words[byte_index >> 2]);
+}
+#endif /* IRDMA_DEFS_H */
diff --git a/drivers/infiniband/hw/irdma/hmc.c b/drivers/infiniband/hw/irdma/hmc.c
new file mode 100644
index 0000000000..49307ce8c4
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/hmc.c
@@ -0,0 +1,697 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+
+/**
+ * irdma_find_sd_index_limit - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by irdma_hmc_rsrc_type.
+ */
+
+static void irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
+ u32 idx, u32 cnt, u32 *sd_idx,
+ u32 *sd_limit)
+{
+ u64 fpm_addr, fpm_limit;
+
+ fpm_addr = hmc_info->hmc_obj[(type)].base +
+ hmc_info->hmc_obj[type].size * idx;
+ fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
+ *sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
+ *sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);
+ *sd_limit += 1;
+}
+
+/**
+ * irdma_find_pd_index_limit - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_idx: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by irdma_hmc_rsrc_type.
+ */
+
+static void irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
+ u32 idx, u32 cnt, u32 *pd_idx,
+ u32 *pd_limit)
+{
+ u64 fpm_adr, fpm_limit;
+
+ fpm_adr = hmc_info->hmc_obj[type].base +
+ hmc_info->hmc_obj[type].size * idx;
+ fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
+ *pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);
+ *pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);
+ *pd_limit += 1;
+}
+
+/**
+ * irdma_set_sd_entry - setup entry for sd programming
+ * @pa: physical addr
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static void irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
+ struct irdma_update_sd_entry *entry)
+{
+ entry->data = pa |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
+ type == IRDMA_SD_TYPE_PAGED ? 0 : 1) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1);
+
+ entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
+}
+
+/**
+ * irdma_clr_sd_entry - setup entry for sd clear
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static void irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
+ struct irdma_update_sd_entry *entry)
+{
+ entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
+ FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
+ type == IRDMA_SD_TYPE_PAGED ? 0 : 1);
+
+ entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) | BIT(15);
+}
+
+/**
+ * irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF
+ * @dev: pointer to our device struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ */
+static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
+ u32 pd_idx)
+{
+ u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) |
+ FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) |
+ FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx);
+
+ writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
+}
+
+/**
+ * irdma_hmc_sd_one - setup 1 sd entry for cqp
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ * @pa: physical addr
+ * @sd_idx: sd index
+ * @type: paged or direct sd
+ * @setsd: flag to set or clear sd
+ */
+int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
+ enum irdma_sd_entry_type type, bool setsd)
+{
+ struct irdma_update_sds_info sdinfo;
+
+ sdinfo.cnt = 1;
+ sdinfo.hmc_fn_id = hmc_fn_id;
+ if (setsd)
+ irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
+ else
+ irdma_clr_sd_entry(sd_idx, type, sdinfo.entry);
+ return dev->cqp->process_cqp_sds(dev, &sdinfo);
+}
+
+/**
+ * irdma_hmc_sd_grp - setup group of sd entries for cqp
+ * @dev: pointer to the device structure
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: sd index
+ * @sd_cnt: number of sd entries
+ * @setsd: flag to set or clear sd
+ */
+static int irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 sd_index,
+ u32 sd_cnt, bool setsd)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_update_sds_info sdinfo = {};
+ u64 pa;
+ u32 i;
+ int ret_code = 0;
+
+ sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
+ for (i = sd_index; i < sd_index + sd_cnt; i++) {
+ sd_entry = &hmc_info->sd_table.sd_entry[i];
+ if (!sd_entry || (!sd_entry->valid && setsd) ||
+ (sd_entry->valid && !setsd))
+ continue;
+ if (setsd) {
+ pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
+ sd_entry->u.pd_table.pd_page_addr.pa :
+ sd_entry->u.bp.addr.pa;
+ irdma_set_sd_entry(pa, i, sd_entry->entry_type,
+ &sdinfo.entry[sdinfo.cnt]);
+ } else {
+ irdma_clr_sd_entry(i, sd_entry->entry_type,
+ &sdinfo.entry[sdinfo.cnt]);
+ }
+ sdinfo.cnt++;
+ if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {
+ ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: sd_programming failed err=%d\n",
+ ret_code);
+ return ret_code;
+ }
+
+ sdinfo.cnt = 0;
+ }
+ }
+ if (sdinfo.cnt)
+ ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+
+ return ret_code;
+}
+
+/**
+ * irdma_hmc_finish_add_sd_reg - program sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: create obj info
+ */
+static int irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
+{
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return -EINVAL;
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return -EINVAL;
+
+ if (!info->add_sd_cnt)
+ return 0;
+ return irdma_hmc_sd_grp(dev, info->hmc_info,
+ info->hmc_info->sd_indexes[0], info->add_sd_cnt,
+ true);
+}
+
+/**
+ * irdma_sc_create_hmc_obj - allocate backing store for hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to irdma_hmc_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ */
+int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ u32 sd_idx, sd_lmt;
+ u32 pd_idx = 0, pd_lmt = 0;
+ u32 pd_idx1 = 0, pd_lmt1 = 0;
+ u32 i, j;
+ bool pd_error = false;
+ int ret_code = 0;
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+ return -EINVAL;
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: error type %u, start = %u, req cnt %u, cnt = %u\n",
+ info->rsrc_type, info->start_idx, info->count,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return -EINVAL;
+ }
+
+ irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &sd_idx,
+ &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ return -EINVAL;
+ }
+
+ irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = sd_idx; j < sd_lmt; j++) {
+ ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,
+ info->entry_type,
+ IRDMA_HMC_DIRECT_BP_SIZE);
+ if (ret_code)
+ goto exit_sd_error;
+
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+ if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&
+ (dev->hmc_info == info->hmc_info &&
+ info->rsrc_type != IRDMA_HMC_IW_PBLE)) {
+ pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));
+ pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);
+ for (i = pd_idx1; i < pd_lmt1; i++) {
+ /* update the pd table entry */
+ ret_code = irdma_add_pd_table_entry(dev,
+ info->hmc_info,
+ i, NULL);
+ if (ret_code) {
+ pd_error = true;
+ break;
+ }
+ }
+ if (pd_error) {
+ while (i && (i > pd_idx1)) {
+ irdma_remove_pd_bp(dev, info->hmc_info,
+ i - 1);
+ i--;
+ }
+ }
+ }
+ if (sd_entry->valid)
+ continue;
+
+ info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
+ info->add_sd_cnt++;
+ sd_entry->valid = true;
+ }
+ return irdma_hmc_finish_add_sd_reg(dev, info);
+
+exit_sd_error:
+ while (j && (j > sd_idx)) {
+ sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+ switch (sd_entry->entry_type) {
+ case IRDMA_SD_TYPE_PAGED:
+ pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);
+ pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));
+ for (i = pd_idx1; i < pd_lmt1; i++)
+ irdma_prep_remove_pd_page(info->hmc_info, i);
+ break;
+ case IRDMA_SD_TYPE_DIRECT:
+ irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
+ break;
+ default:
+ ret_code = -EINVAL;
+ break;
+ }
+ j--;
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_finish_del_sd_reg - delete sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: dele obj info
+ * @reset: true if called before reset
+ */
+static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info,
+ bool reset)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ int ret_code = 0;
+ u32 i, sd_idx;
+ struct irdma_dma_mem *mem;
+
+ if (!reset)
+ ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
+ info->hmc_info->sd_indexes[0],
+ info->del_sd_cnt, false);
+
+ if (ret_code)
+ ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd sd_grp\n");
+ for (i = 0; i < info->del_sd_cnt; i++) {
+ sd_idx = info->hmc_info->sd_indexes[i];
+ sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
+ mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
+ &sd_entry->u.pd_table.pd_page_addr :
+ &sd_entry->u.bp.addr;
+
+ if (!mem || !mem->va) {
+ ibdev_dbg(to_ibdev(dev), "HMC: error cqp sd mem\n");
+ } else {
+ dma_free_coherent(dev->hw->device, mem->size, mem->va,
+ mem->pa);
+ mem->va = NULL;
+ }
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_sc_del_hmc_obj - remove pe hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to irdma_hmc_del_obj_info struct
+ * @reset: true if called before reset
+ *
+ * This will de-populate the SDs and PDs. It frees
+ * the memory for PDS and backing storage. After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ */
+int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info, bool reset)
+{
+ struct irdma_hmc_pd_table *pd_table;
+ u32 sd_idx, sd_lmt;
+ u32 pd_idx, pd_lmt, rel_pd_idx;
+ u32 i, j;
+ int ret_code = 0;
+
+ if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
+ info->start_idx, info->rsrc_type,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return -EINVAL;
+ }
+
+ if ((info->start_idx + info->count) >
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
+ info->start_idx, info->count, info->rsrc_type,
+ info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+ return -EINVAL;
+ }
+
+ irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &pd_idx,
+ &pd_lmt);
+
+ for (j = pd_idx; j < pd_lmt; j++) {
+ sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;
+
+ if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)
+ continue;
+
+ if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
+ IRDMA_SD_TYPE_PAGED)
+ continue;
+
+ rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;
+ pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ if (pd_table->pd_entry &&
+ pd_table->pd_entry[rel_pd_idx].valid) {
+ ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: remove_pd_bp error\n");
+ return ret_code;
+ }
+ }
+ }
+
+ irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+ info->start_idx, info->count, &sd_idx,
+ &sd_lmt);
+ if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+ sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+ ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n");
+ return -EINVAL;
+ }
+
+ for (i = sd_idx; i < sd_lmt; i++) {
+ pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;
+ if (!info->hmc_info->sd_table.sd_entry[i].valid)
+ continue;
+ switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+ case IRDMA_SD_TYPE_DIRECT:
+ ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);
+ if (!ret_code) {
+ info->hmc_info->sd_indexes[info->del_sd_cnt] =
+ (u16)i;
+ info->del_sd_cnt++;
+ }
+ break;
+ case IRDMA_SD_TYPE_PAGED:
+ ret_code = irdma_prep_remove_pd_page(info->hmc_info, i);
+ if (ret_code)
+ break;
+ if (dev->hmc_info != info->hmc_info &&
+ info->rsrc_type == IRDMA_HMC_IW_PBLE &&
+ pd_table->pd_entry) {
+ kfree(pd_table->pd_entry_virt_mem.va);
+ pd_table->pd_entry = NULL;
+ }
+ info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
+ info->del_sd_cnt++;
+ break;
+ default:
+ break;
+ }
+ }
+ return irdma_finish_del_sd_reg(dev, info, reset);
+}
+
+/**
+ * irdma_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ */
+int irdma_add_sd_table_entry(struct irdma_hw *hw,
+ struct irdma_hmc_info *hmc_info, u32 sd_index,
+ enum irdma_sd_entry_type type, u64 direct_mode_sz)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_dma_mem dma_mem;
+ u64 alloc_len;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+ if (!sd_entry->valid) {
+ if (type == IRDMA_SD_TYPE_PAGED)
+ alloc_len = IRDMA_HMC_PAGED_BP_SIZE;
+ else
+ alloc_len = direct_mode_sz;
+
+ /* allocate a 4K pd page or 2M backing page */
+ dma_mem.size = ALIGN(alloc_len, IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
+ dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size,
+ &dma_mem.pa, GFP_KERNEL);
+ if (!dma_mem.va)
+ return -ENOMEM;
+ if (type == IRDMA_SD_TYPE_PAGED) {
+ struct irdma_virt_mem *vmem =
+ &sd_entry->u.pd_table.pd_entry_virt_mem;
+
+ vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
+ vmem->va = kzalloc(vmem->size, GFP_KERNEL);
+ if (!vmem->va) {
+ dma_free_coherent(hw->device, dma_mem.size,
+ dma_mem.va, dma_mem.pa);
+ dma_mem.va = NULL;
+ return -ENOMEM;
+ }
+ sd_entry->u.pd_table.pd_entry = vmem->va;
+
+ memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,
+ sizeof(sd_entry->u.pd_table.pd_page_addr));
+ } else {
+ memcpy(&sd_entry->u.bp.addr, &dma_mem,
+ sizeof(sd_entry->u.bp.addr));
+
+ sd_entry->u.bp.sd_pd_index = sd_index;
+ }
+
+ hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+ hmc_info->sd_table.use_cnt++;
+ }
+ if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)
+ sd_entry->u.bp.use_cnt++;
+
+ return 0;
+}
+
+/**
+ * irdma_add_pd_table_entry - Adds page descriptor to the specified table
+ * @dev: pointer to our device structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ * 1. Initializes the pd entry
+ * 2. Adds pd_entry in the pd_table
+ * 3. Mark the entry valid in irdma_hmc_pd_entry structure
+ * 4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ * 1. The memory for pd should be pinned down, physically contiguous and
+ * aligned on 4K boundary and zeroed memory.
+ * 2. It should be 4K in size.
+ */
+int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 pd_index,
+ struct irdma_dma_mem *rsrc_pg)
+{
+ struct irdma_hmc_pd_table *pd_table;
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_dma_mem mem;
+ struct irdma_dma_mem *page = &mem;
+ u32 sd_idx, rel_pd_idx;
+ u64 *pd_addr;
+ u64 page_desc;
+
+ if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
+ return -EINVAL;
+
+ sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
+ if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
+ IRDMA_SD_TYPE_PAGED)
+ return 0;
+
+ rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (!pd_entry->valid) {
+ if (rsrc_pg) {
+ pd_entry->rsrc_pg = true;
+ page = rsrc_pg;
+ } else {
+ page->size = ALIGN(IRDMA_HMC_PAGED_BP_SIZE,
+ IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
+ page->va = dma_alloc_coherent(dev->hw->device,
+ page->size, &page->pa,
+ GFP_KERNEL);
+ if (!page->va)
+ return -ENOMEM;
+
+ pd_entry->rsrc_pg = false;
+ }
+
+ memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));
+ pd_entry->bp.sd_pd_index = pd_index;
+ pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;
+ page_desc = page->pa | 0x1;
+ pd_addr = pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
+ pd_entry->sd_index = sd_idx;
+ pd_entry->valid = true;
+ pd_table->use_cnt++;
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
+ }
+ pd_entry->bp.use_cnt++;
+
+ return 0;
+}
+
+/**
+ * irdma_remove_pd_bp - remove a backing page from a page descriptor
+ * @dev: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ *
+ * This function:
+ * 1. Marks the entry in pd table (for paged address mode) or in sd table
+ * (for direct address mode) invalid.
+ * 2. Write to register PMPDINV to invalidate the backing page in FV cache
+ * 3. Decrement the ref count for the pd _entry
+ * assumptions:
+ * 1. Caller can deallocate the memory used by backing storage after this
+ * function returns.
+ */
+int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 idx)
+{
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_hmc_pd_table *pd_table;
+ struct irdma_hmc_sd_entry *sd_entry;
+ u32 sd_idx, rel_pd_idx;
+ struct irdma_dma_mem *mem;
+ u64 *pd_addr;
+
+ sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
+ rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
+ if (sd_idx >= hmc_info->sd_table.sd_cnt)
+ return -EINVAL;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+ if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
+ return -EINVAL;
+
+ pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+ pd_entry = &pd_table->pd_entry[rel_pd_idx];
+ if (--pd_entry->bp.use_cnt)
+ return 0;
+
+ pd_entry->valid = false;
+ pd_table->use_cnt--;
+ pd_addr = pd_table->pd_page_addr.va;
+ pd_addr += rel_pd_idx;
+ memset(pd_addr, 0, sizeof(u64));
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
+
+ if (!pd_entry->rsrc_pg) {
+ mem = &pd_entry->bp.addr;
+ if (!mem || !mem->va)
+ return -EINVAL;
+
+ dma_free_coherent(dev->hw->device, mem->size, mem->va,
+ mem->pa);
+ mem->va = NULL;
+ }
+ if (!pd_table->use_cnt)
+ kfree(pd_table->pd_entry_virt_mem.va);
+
+ return 0;
+}
+
+/**
+ * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ */
+int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+ if (--sd_entry->u.bp.use_cnt)
+ return -EBUSY;
+
+ hmc_info->sd_table.use_cnt--;
+ sd_entry->valid = false;
+
+ return 0;
+}
+
+/**
+ * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ */
+int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
+{
+ struct irdma_hmc_sd_entry *sd_entry;
+
+ sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+ if (sd_entry->u.pd_table.use_cnt)
+ return -EBUSY;
+
+ sd_entry->valid = false;
+ hmc_info->sd_table.use_cnt--;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/irdma/hmc.h b/drivers/infiniband/hw/irdma/hmc.h
new file mode 100644
index 0000000000..f5c5dacc70
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/hmc.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_HMC_H
+#define IRDMA_HMC_H
+
+#include "defs.h"
+
+#define IRDMA_HMC_MAX_BP_COUNT 512
+#define IRDMA_MAX_SD_ENTRIES 11
+#define IRDMA_HW_DBG_HMC_INVALID_BP_MARK 0xca
+#define IRDMA_HMC_INFO_SIGNATURE 0x484d5347
+#define IRDMA_HMC_PD_CNT_IN_SD 512
+#define IRDMA_HMC_DIRECT_BP_SIZE 0x200000
+#define IRDMA_HMC_MAX_SD_COUNT 8192
+#define IRDMA_HMC_PAGED_BP_SIZE 4096
+#define IRDMA_HMC_PD_BP_BUF_ALIGNMENT 4096
+#define IRDMA_FIRST_VF_FPM_ID 8
+#define FPM_MULTIPLIER 1024
+
+enum irdma_hmc_rsrc_type {
+ IRDMA_HMC_IW_QP = 0,
+ IRDMA_HMC_IW_CQ = 1,
+ IRDMA_HMC_IW_RESERVED = 2,
+ IRDMA_HMC_IW_HTE = 3,
+ IRDMA_HMC_IW_ARP = 4,
+ IRDMA_HMC_IW_APBVT_ENTRY = 5,
+ IRDMA_HMC_IW_MR = 6,
+ IRDMA_HMC_IW_XF = 7,
+ IRDMA_HMC_IW_XFFL = 8,
+ IRDMA_HMC_IW_Q1 = 9,
+ IRDMA_HMC_IW_Q1FL = 10,
+ IRDMA_HMC_IW_TIMER = 11,
+ IRDMA_HMC_IW_FSIMC = 12,
+ IRDMA_HMC_IW_FSIAV = 13,
+ IRDMA_HMC_IW_PBLE = 14,
+ IRDMA_HMC_IW_RRF = 15,
+ IRDMA_HMC_IW_RRFFL = 16,
+ IRDMA_HMC_IW_HDR = 17,
+ IRDMA_HMC_IW_MD = 18,
+ IRDMA_HMC_IW_OOISC = 19,
+ IRDMA_HMC_IW_OOISCFFL = 20,
+ IRDMA_HMC_IW_MAX, /* Must be last entry */
+};
+
+enum irdma_sd_entry_type {
+ IRDMA_SD_TYPE_INVALID = 0,
+ IRDMA_SD_TYPE_PAGED = 1,
+ IRDMA_SD_TYPE_DIRECT = 2,
+};
+
+struct irdma_hmc_obj_info {
+ u64 base;
+ u32 max_cnt;
+ u32 cnt;
+ u64 size;
+};
+
+struct irdma_hmc_bp {
+ enum irdma_sd_entry_type entry_type;
+ struct irdma_dma_mem addr;
+ u32 sd_pd_index;
+ u32 use_cnt;
+};
+
+struct irdma_hmc_pd_entry {
+ struct irdma_hmc_bp bp;
+ u32 sd_index;
+ bool rsrc_pg:1;
+ bool valid:1;
+};
+
+struct irdma_hmc_pd_table {
+ struct irdma_dma_mem pd_page_addr;
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_virt_mem pd_entry_virt_mem;
+ u32 use_cnt;
+ u32 sd_index;
+};
+
+struct irdma_hmc_sd_entry {
+ enum irdma_sd_entry_type entry_type;
+ bool valid;
+ union {
+ struct irdma_hmc_pd_table pd_table;
+ struct irdma_hmc_bp bp;
+ } u;
+};
+
+struct irdma_hmc_sd_table {
+ struct irdma_virt_mem addr;
+ u32 sd_cnt;
+ u32 use_cnt;
+ struct irdma_hmc_sd_entry *sd_entry;
+};
+
+struct irdma_hmc_info {
+ u32 signature;
+ u8 hmc_fn_id;
+ u16 first_sd_index;
+ struct irdma_hmc_obj_info *hmc_obj;
+ struct irdma_virt_mem hmc_obj_virt_mem;
+ struct irdma_hmc_sd_table sd_table;
+ u16 sd_indexes[IRDMA_HMC_MAX_SD_COUNT];
+};
+
+struct irdma_update_sd_entry {
+ u64 cmd;
+ u64 data;
+};
+
+struct irdma_update_sds_info {
+ u32 cnt;
+ u8 hmc_fn_id;
+ struct irdma_update_sd_entry entry[IRDMA_MAX_SD_ENTRIES];
+};
+
+struct irdma_ccq_cqe_info;
+struct irdma_hmc_fcn_info {
+ u32 vf_id;
+ u8 free_fcn;
+};
+
+struct irdma_hmc_create_obj_info {
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_virt_mem add_sd_virt_mem;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ u32 add_sd_cnt;
+ enum irdma_sd_entry_type entry_type;
+ bool privileged;
+};
+
+struct irdma_hmc_del_obj_info {
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_virt_mem del_sd_virt_mem;
+ u32 rsrc_type;
+ u32 start_idx;
+ u32 count;
+ u32 del_sd_cnt;
+ bool privileged;
+};
+
+int irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf,
+ struct irdma_dma_mem *src_mem, u64 src_offset, u64 size);
+int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info);
+int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
+ struct irdma_hmc_del_obj_info *info, bool reset);
+int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
+ enum irdma_sd_entry_type type,
+ bool setsd);
+int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,
+ u8 hmc_fn_id);
+struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,
+ u8 hmc_fn_id);
+int irdma_add_sd_table_entry(struct irdma_hw *hw,
+ struct irdma_hmc_info *hmc_info, u32 sd_index,
+ enum irdma_sd_entry_type type, u64 direct_mode_sz);
+int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 pd_index,
+ struct irdma_dma_mem *rsrc_pg);
+int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, u32 idx);
+int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx);
+int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);
+#endif /* IRDMA_HMC_H */
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
new file mode 100644
index 0000000000..564c9188e1
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -0,0 +1,2751 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+
+static struct irdma_rsrc_limits rsrc_limits_table[] = {
+ [0] = {
+ .qplimit = SZ_128,
+ },
+ [1] = {
+ .qplimit = SZ_1K,
+ },
+ [2] = {
+ .qplimit = SZ_2K,
+ },
+ [3] = {
+ .qplimit = SZ_4K,
+ },
+ [4] = {
+ .qplimit = SZ_16K,
+ },
+ [5] = {
+ .qplimit = SZ_64K,
+ },
+ [6] = {
+ .qplimit = SZ_128K,
+ },
+ [7] = {
+ .qplimit = SZ_256K,
+ },
+};
+
+/* types of hmc objects */
+static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
+ IRDMA_HMC_IW_QP,
+ IRDMA_HMC_IW_CQ,
+ IRDMA_HMC_IW_HTE,
+ IRDMA_HMC_IW_ARP,
+ IRDMA_HMC_IW_APBVT_ENTRY,
+ IRDMA_HMC_IW_MR,
+ IRDMA_HMC_IW_XF,
+ IRDMA_HMC_IW_XFFL,
+ IRDMA_HMC_IW_Q1,
+ IRDMA_HMC_IW_Q1FL,
+ IRDMA_HMC_IW_PBLE,
+ IRDMA_HMC_IW_TIMER,
+ IRDMA_HMC_IW_FSIMC,
+ IRDMA_HMC_IW_FSIAV,
+ IRDMA_HMC_IW_RRF,
+ IRDMA_HMC_IW_RRFFL,
+ IRDMA_HMC_IW_HDR,
+ IRDMA_HMC_IW_MD,
+ IRDMA_HMC_IW_OOISC,
+ IRDMA_HMC_IW_OOISCFFL,
+};
+
+/**
+ * irdma_iwarp_ce_handler - handle iwarp completions
+ * @iwcq: iwarp cq receiving event
+ */
+static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq)
+{
+ struct irdma_cq *cq = iwcq->back_cq;
+
+ if (!cq->user_mode)
+ atomic_set(&cq->armed, 0);
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+/**
+ * irdma_puda_ce_handler - handle puda completion events
+ * @rf: RDMA PCI function
+ * @cq: puda completion q for event
+ */
+static void irdma_puda_ce_handler(struct irdma_pci_f *rf,
+ struct irdma_sc_cq *cq)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ u32 compl_error;
+ int status;
+
+ do {
+ status = irdma_puda_poll_cmpl(dev, cq, &compl_error);
+ if (status == -ENOENT)
+ break;
+ if (status) {
+ ibdev_dbg(to_ibdev(dev), "ERR: puda status = %d\n", status);
+ break;
+ }
+ if (compl_error) {
+ ibdev_dbg(to_ibdev(dev), "ERR: puda compl_err =0x%x\n",
+ compl_error);
+ break;
+ }
+ } while (1);
+
+ irdma_sc_ccq_arm(cq);
+}
+
+/**
+ * irdma_process_ceq - handle ceq for completions
+ * @rf: RDMA PCI function
+ * @ceq: ceq having cq for completion
+ */
+static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_sc_ceq *sc_ceq;
+ struct irdma_sc_cq *cq;
+ unsigned long flags;
+
+ sc_ceq = &ceq->sc_ceq;
+ do {
+ spin_lock_irqsave(&ceq->ce_lock, flags);
+ cq = irdma_sc_process_ceq(dev, sc_ceq);
+ if (!cq) {
+ spin_unlock_irqrestore(&ceq->ce_lock, flags);
+ break;
+ }
+
+ if (cq->cq_type == IRDMA_CQ_TYPE_IWARP)
+ irdma_iwarp_ce_handler(cq);
+
+ spin_unlock_irqrestore(&ceq->ce_lock, flags);
+
+ if (cq->cq_type == IRDMA_CQ_TYPE_CQP)
+ queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work);
+ else if (cq->cq_type == IRDMA_CQ_TYPE_ILQ ||
+ cq->cq_type == IRDMA_CQ_TYPE_IEQ)
+ irdma_puda_ce_handler(rf, cq);
+ } while (1);
+}
+
+static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info)
+{
+ qp->sq_flush_code = info->sq;
+ qp->rq_flush_code = info->rq;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+
+ switch (info->ae_id) {
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ case IRDMA_AE_AMP_INVALID_STAG:
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ case IRDMA_AE_AMP_BAD_PD:
+ case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ case IRDMA_AE_AMP_TO_WRAP:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ qp->flush_code = FLUSH_PROT_ERR;
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
+ qp->flush_code = FLUSH_LOC_QP_OP_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp->flush_code = FLUSH_LOC_LEN_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ qp->flush_code = FLUSH_REM_ACCESS_ERR;
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp->flush_code = FLUSH_REM_OP_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ qp->flush_code = FLUSH_FATAL_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
+ qp->flush_code = FLUSH_GENERAL_ERR;
+ break;
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ qp->flush_code = FLUSH_RETRY_EXC_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+ case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+ case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+ case IRDMA_AE_AMP_MWBIND_VALID_STAG:
+ qp->flush_code = FLUSH_MW_BIND_ERR;
+ qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ qp->flush_code = FLUSH_REM_INV_REQ_ERR;
+ qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
+ break;
+ default:
+ qp->flush_code = FLUSH_GENERAL_ERR;
+ qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ }
+}
+
+/**
+ * irdma_process_aeq - handle aeq events
+ * @rf: RDMA PCI function
+ */
+static void irdma_process_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_aeq *aeq = &rf->aeq;
+ struct irdma_sc_aeq *sc_aeq = &aeq->sc_aeq;
+ struct irdma_aeqe_info aeinfo;
+ struct irdma_aeqe_info *info = &aeinfo;
+ int ret;
+ struct irdma_qp *iwqp = NULL;
+ struct irdma_cq *iwcq = NULL;
+ struct irdma_sc_qp *qp = NULL;
+ struct irdma_qp_host_ctx_info *ctx_info = NULL;
+ struct irdma_device *iwdev = rf->iwdev;
+ unsigned long flags;
+
+ u32 aeqcnt = 0;
+
+ if (!sc_aeq->size)
+ return;
+
+ do {
+ memset(info, 0, sizeof(*info));
+ ret = irdma_sc_get_next_aeqe(sc_aeq, info);
+ if (ret)
+ break;
+
+ aeqcnt++;
+ ibdev_dbg(&iwdev->ibdev,
+ "AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
+ info->ae_id, info->qp, info->qp_cq_id, info->tcp_state,
+ info->iwarp_state, info->ae_src);
+
+ if (info->qp) {
+ spin_lock_irqsave(&rf->qptable_lock, flags);
+ iwqp = rf->qp_table[info->qp_cq_id];
+ if (!iwqp) {
+ spin_unlock_irqrestore(&rf->qptable_lock,
+ flags);
+ if (info->ae_id == IRDMA_AE_QP_SUSPEND_COMPLETE) {
+ atomic_dec(&iwdev->vsi.qp_suspend_reqs);
+ wake_up(&iwdev->suspend_wq);
+ continue;
+ }
+ ibdev_dbg(&iwdev->ibdev, "AEQ: qp_id %d is already freed\n",
+ info->qp_cq_id);
+ continue;
+ }
+ irdma_qp_add_ref(&iwqp->ibqp);
+ spin_unlock_irqrestore(&rf->qptable_lock, flags);
+ qp = &iwqp->sc_qp;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ iwqp->hw_tcp_state = info->tcp_state;
+ iwqp->hw_iwarp_state = info->iwarp_state;
+ if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
+ iwqp->last_aeq = info->ae_id;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ ctx_info = &iwqp->ctx_info;
+ } else {
+ if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
+ continue;
+ }
+
+ switch (info->ae_id) {
+ struct irdma_cm_node *cm_node;
+ case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
+ cm_node = iwqp->cm_node;
+ if (cm_node->accept_pend) {
+ atomic_dec(&cm_node->listener->pend_accepts_cnt);
+ cm_node->accept_pend = 0;
+ }
+ iwqp->rts_ae_rcvd = 1;
+ wake_up_interruptible(&iwqp->waitq);
+ break;
+ case IRDMA_AE_LLP_FIN_RECEIVED:
+ case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE:
+ if (qp->term_flags)
+ break;
+ if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
+ if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
+ iwqp->ibqp_state == IB_QPS_RTS) {
+ irdma_next_iw_state(iwqp,
+ IRDMA_QP_STATE_CLOSING,
+ 0, 0, 0);
+ irdma_cm_disconn(iwqp);
+ }
+ irdma_schedule_cm_timer(iwqp->cm_node,
+ (struct irdma_puda_buf *)iwqp,
+ IRDMA_TIMER_TYPE_CLOSE,
+ 1, 0);
+ }
+ break;
+ case IRDMA_AE_LLP_CLOSE_COMPLETE:
+ if (qp->term_flags)
+ irdma_terminate_done(qp, 0);
+ else
+ irdma_cm_disconn(iwqp);
+ break;
+ case IRDMA_AE_BAD_CLOSE:
+ case IRDMA_AE_RESET_SENT:
+ irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
+ 0);
+ irdma_cm_disconn(iwqp);
+ break;
+ case IRDMA_AE_LLP_CONNECTION_RESET:
+ if (atomic_read(&iwqp->close_timer_started))
+ break;
+ irdma_cm_disconn(iwqp);
+ break;
+ case IRDMA_AE_QP_SUSPEND_COMPLETE:
+ if (iwqp->iwdev->vsi.tc_change_pending) {
+ if (!atomic_dec_return(&qp->vsi->qp_suspend_reqs))
+ wake_up(&iwqp->iwdev->suspend_wq);
+ }
+ if (iwqp->suspend_pending) {
+ iwqp->suspend_pending = false;
+ wake_up(&iwqp->iwdev->suspend_wq);
+ }
+ break;
+ case IRDMA_AE_TERMINATE_SENT:
+ irdma_terminate_send_fin(qp);
+ break;
+ case IRDMA_AE_LLP_TERMINATE_RECEIVED:
+ irdma_terminate_received(qp, info);
+ break;
+ case IRDMA_AE_CQ_OPERATION_ERROR:
+ ibdev_err(&iwdev->ibdev,
+ "Processing an iWARP related AE for CQ misc = 0x%04X\n",
+ info->ae_id);
+
+ spin_lock_irqsave(&rf->cqtable_lock, flags);
+ iwcq = rf->cq_table[info->qp_cq_id];
+ if (!iwcq) {
+ spin_unlock_irqrestore(&rf->cqtable_lock,
+ flags);
+ ibdev_dbg(to_ibdev(dev),
+ "cq_id %d is already freed\n", info->qp_cq_id);
+ continue;
+ }
+ irdma_cq_add_ref(&iwcq->ibcq);
+ spin_unlock_irqrestore(&rf->cqtable_lock, flags);
+
+ if (iwcq->ibcq.event_handler) {
+ struct ib_event ibevent;
+
+ ibevent.device = iwcq->ibcq.device;
+ ibevent.event = IB_EVENT_CQ_ERR;
+ ibevent.element.cq = &iwcq->ibcq;
+ iwcq->ibcq.event_handler(&ibevent,
+ iwcq->ibcq.cq_context);
+ }
+ irdma_cq_rem_ref(&iwcq->ibcq);
+ break;
+ case IRDMA_AE_RESET_NOT_SENT:
+ case IRDMA_AE_LLP_DOUBT_REACHABILITY:
+ case IRDMA_AE_RESOURCE_EXHAUSTION:
+ break;
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ case IRDMA_AE_STAG_ZERO_INVALID:
+ case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
+ case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_INVALID_QN:
+ case IRDMA_AE_DDP_NO_L_BIT:
+ case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+ case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+ case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
+ case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+ case IRDMA_AE_INVALID_ARP_ENTRY:
+ case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
+ case IRDMA_AE_STALE_ARP_ENTRY:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_SYN_RECEIVED:
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
+ case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ default:
+ ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
+ info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ ctx_info->roce_info->err_rq_idx_valid = info->rq;
+ if (info->rq) {
+ ctx_info->roce_info->err_rq_idx = info->wqe_idx;
+ irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
+ ctx_info);
+ }
+ irdma_set_flush_fields(qp, info);
+ irdma_cm_disconn(iwqp);
+ break;
+ }
+ ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
+ if (info->rq) {
+ ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
+ ctx_info->tcp_info_valid = false;
+ ctx_info->iwarp_info_valid = true;
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
+ ctx_info);
+ }
+ if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
+ iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
+ irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
+ irdma_cm_disconn(iwqp);
+ } else {
+ irdma_terminate_connection(qp, info);
+ }
+ break;
+ }
+ if (info->qp)
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ } while (1);
+
+ if (aeqcnt)
+ irdma_sc_repost_aeq_entries(dev, aeqcnt);
+}
+
+/**
+ * irdma_ena_intr - set up device interrupts
+ * @dev: hardware control device structure
+ * @msix_id: id of the interrupt to be enabled
+ */
+static void irdma_ena_intr(struct irdma_sc_dev *dev, u32 msix_id)
+{
+ dev->irq_ops->irdma_en_irq(dev, msix_id);
+}
+
+/**
+ * irdma_dpc - tasklet for aeq and ceq 0
+ * @t: tasklet_struct ptr
+ */
+static void irdma_dpc(struct tasklet_struct *t)
+{
+ struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet);
+
+ if (rf->msix_shared)
+ irdma_process_ceq(rf, rf->ceqlist);
+ irdma_process_aeq(rf);
+ irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx);
+}
+
+/**
+ * irdma_ceq_dpc - dpc handler for CEQ
+ * @t: tasklet_struct ptr
+ */
+static void irdma_ceq_dpc(struct tasklet_struct *t)
+{
+ struct irdma_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
+ struct irdma_pci_f *rf = iwceq->rf;
+
+ irdma_process_ceq(rf, iwceq);
+ irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx);
+}
+
+/**
+ * irdma_save_msix_info - copy msix vector information to iwarp device
+ * @rf: RDMA PCI function
+ *
+ * Allocate iwdev msix table and copy the msix info to the table
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_save_msix_info(struct irdma_pci_f *rf)
+{
+ struct irdma_qvlist_info *iw_qvlist;
+ struct irdma_qv_info *iw_qvinfo;
+ struct msix_entry *pmsix;
+ u32 ceq_idx;
+ u32 i;
+ size_t size;
+
+ if (!rf->msix_count)
+ return -EINVAL;
+
+ size = sizeof(struct irdma_msix_vector) * rf->msix_count;
+ size += struct_size(iw_qvlist, qv_info, rf->msix_count);
+ rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
+ if (!rf->iw_msixtbl)
+ return -ENOMEM;
+
+ rf->iw_qvlist = (struct irdma_qvlist_info *)
+ (&rf->iw_msixtbl[rf->msix_count]);
+ iw_qvlist = rf->iw_qvlist;
+ iw_qvinfo = iw_qvlist->qv_info;
+ iw_qvlist->num_vectors = rf->msix_count;
+ if (rf->msix_count <= num_online_cpus())
+ rf->msix_shared = true;
+ else if (rf->msix_count > num_online_cpus() + 1)
+ rf->msix_count = num_online_cpus() + 1;
+
+ pmsix = rf->msix_entries;
+ for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
+ rf->iw_msixtbl[i].idx = pmsix->entry;
+ rf->iw_msixtbl[i].irq = pmsix->vector;
+ rf->iw_msixtbl[i].cpu_affinity = ceq_idx;
+ if (!i) {
+ iw_qvinfo->aeq_idx = 0;
+ if (rf->msix_shared)
+ iw_qvinfo->ceq_idx = ceq_idx++;
+ else
+ iw_qvinfo->ceq_idx = IRDMA_Q_INVALID_IDX;
+ } else {
+ iw_qvinfo->aeq_idx = IRDMA_Q_INVALID_IDX;
+ iw_qvinfo->ceq_idx = ceq_idx++;
+ }
+ iw_qvinfo->itr_idx = 3;
+ iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx;
+ pmsix++;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_irq_handler - interrupt handler for aeq and ceq0
+ * @irq: Interrupt request number
+ * @data: RDMA PCI function
+ */
+static irqreturn_t irdma_irq_handler(int irq, void *data)
+{
+ struct irdma_pci_f *rf = data;
+
+ tasklet_schedule(&rf->dpc_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * irdma_ceq_handler - interrupt handler for ceq
+ * @irq: interrupt request number
+ * @data: ceq pointer
+ */
+static irqreturn_t irdma_ceq_handler(int irq, void *data)
+{
+ struct irdma_ceq *iwceq = data;
+
+ if (iwceq->irq != irq)
+ ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n",
+ iwceq->irq, irq);
+ tasklet_schedule(&iwceq->dpc_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * irdma_destroy_irq - destroy device interrupts
+ * @rf: RDMA PCI function
+ * @msix_vec: msix vector to disable irq
+ * @dev_id: parameter to pass to free_irq (used during irq setup)
+ *
+ * The function is called when destroying aeq/ceq
+ */
+static void irdma_destroy_irq(struct irdma_pci_f *rf,
+ struct irdma_msix_vector *msix_vec, void *dev_id)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+
+ dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
+ irq_update_affinity_hint(msix_vec->irq, NULL);
+ free_irq(msix_vec->irq, dev_id);
+}
+
+/**
+ * irdma_destroy_cqp - destroy control qp
+ * @rf: RDMA PCI function
+ *
+ * Issue destroy cqp request and
+ * free the resources associated with the cqp
+ */
+static void irdma_destroy_cqp(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cqp *cqp = &rf->cqp;
+ int status = 0;
+
+ status = irdma_sc_cqp_destroy(dev->cqp);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: Destroy CQP failed %d\n", status);
+
+ irdma_cleanup_pending_cqp_op(rf);
+ dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
+ cqp->sq.pa);
+ cqp->sq.va = NULL;
+ kfree(cqp->scratch_array);
+ cqp->scratch_array = NULL;
+ kfree(cqp->cqp_requests);
+ cqp->cqp_requests = NULL;
+}
+
+static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_aeq *aeq = &rf->aeq;
+ u32 pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
+ dma_addr_t *pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
+
+ irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt);
+ irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
+ vfree(aeq->mem.va);
+}
+
+/**
+ * irdma_destroy_aeq - destroy aeq
+ * @rf: RDMA PCI function
+ *
+ * Issue a destroy aeq request and
+ * free the resources associated with the aeq
+ * The function is called during driver unload
+ */
+static void irdma_destroy_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_aeq *aeq = &rf->aeq;
+ int status = -EBUSY;
+
+ if (!rf->msix_shared) {
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
+ irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
+ }
+ if (rf->reset)
+ goto exit;
+
+ aeq->sc_aeq.size = 0;
+ status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_DESTROY);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: Destroy AEQ failed %d\n", status);
+
+exit:
+ if (aeq->virtual_map) {
+ irdma_destroy_virt_aeq(rf);
+ } else {
+ dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
+ aeq->mem.pa);
+ aeq->mem.va = NULL;
+ }
+}
+
+/**
+ * irdma_destroy_ceq - destroy ceq
+ * @rf: RDMA PCI function
+ * @iwceq: ceq to be destroyed
+ *
+ * Issue a destroy ceq request and
+ * free the resources associated with the ceq
+ */
+static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ int status;
+
+ if (rf->reset)
+ goto exit;
+
+ status = irdma_sc_ceq_destroy(&iwceq->sc_ceq, 0, 1);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy command failed %d\n", status);
+ goto exit;
+ }
+
+ status = irdma_sc_cceq_destroy_done(&iwceq->sc_ceq);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: CEQ destroy completion failed %d\n",
+ status);
+exit:
+ dma_free_coherent(dev->hw->device, iwceq->mem.size, iwceq->mem.va,
+ iwceq->mem.pa);
+ iwceq->mem.va = NULL;
+}
+
+/**
+ * irdma_del_ceq_0 - destroy ceq 0
+ * @rf: RDMA PCI function
+ *
+ * Disable the ceq 0 interrupt and destroy the ceq 0
+ */
+static void irdma_del_ceq_0(struct irdma_pci_f *rf)
+{
+ struct irdma_ceq *iwceq = rf->ceqlist;
+ struct irdma_msix_vector *msix_vec;
+
+ if (rf->msix_shared) {
+ msix_vec = &rf->iw_msixtbl[0];
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
+ msix_vec->ceq_id,
+ msix_vec->idx, false);
+ irdma_destroy_irq(rf, msix_vec, rf);
+ } else {
+ msix_vec = &rf->iw_msixtbl[1];
+ irdma_destroy_irq(rf, msix_vec, iwceq);
+ }
+
+ irdma_destroy_ceq(rf, iwceq);
+ rf->sc_dev.ceq_valid = false;
+ rf->ceqs_count = 0;
+}
+
+/**
+ * irdma_del_ceqs - destroy all ceq's except CEQ 0
+ * @rf: RDMA PCI function
+ *
+ * Go through all of the device ceq's, except 0, and for each
+ * ceq disable the ceq interrupt and destroy the ceq
+ */
+static void irdma_del_ceqs(struct irdma_pci_f *rf)
+{
+ struct irdma_ceq *iwceq = &rf->ceqlist[1];
+ struct irdma_msix_vector *msix_vec;
+ u32 i = 0;
+
+ if (rf->msix_shared)
+ msix_vec = &rf->iw_msixtbl[1];
+ else
+ msix_vec = &rf->iw_msixtbl[2];
+
+ for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
+ msix_vec->idx, false);
+ irdma_destroy_irq(rf, msix_vec, iwceq);
+ irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
+ IRDMA_OP_CEQ_DESTROY);
+ dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size,
+ iwceq->mem.va, iwceq->mem.pa);
+ iwceq->mem.va = NULL;
+ }
+ rf->ceqs_count = 1;
+}
+
+/**
+ * irdma_destroy_ccq - destroy control cq
+ * @rf: RDMA PCI function
+ *
+ * Issue destroy ccq request and
+ * free the resources associated with the ccq
+ */
+static void irdma_destroy_ccq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_ccq *ccq = &rf->ccq;
+ int status = 0;
+
+ if (rf->cqp_cmpl_wq)
+ destroy_workqueue(rf->cqp_cmpl_wq);
+
+ if (!rf->reset)
+ status = irdma_sc_ccq_destroy(dev->ccq, 0, true);
+ if (status)
+ ibdev_dbg(to_ibdev(dev), "ERR: CCQ destroy failed %d\n", status);
+ dma_free_coherent(dev->hw->device, ccq->mem_cq.size, ccq->mem_cq.va,
+ ccq->mem_cq.pa);
+ ccq->mem_cq.va = NULL;
+}
+
+/**
+ * irdma_close_hmc_objects_type - delete hmc objects of a given type
+ * @dev: iwarp device
+ * @obj_type: the hmc object type to be deleted
+ * @hmc_info: host memory info struct
+ * @privileged: permission to close HMC objects
+ * @reset: true if called before reset
+ */
+static void irdma_close_hmc_objects_type(struct irdma_sc_dev *dev,
+ enum irdma_hmc_rsrc_type obj_type,
+ struct irdma_hmc_info *hmc_info,
+ bool privileged, bool reset)
+{
+ struct irdma_hmc_del_obj_info info = {};
+
+ info.hmc_info = hmc_info;
+ info.rsrc_type = obj_type;
+ info.count = hmc_info->hmc_obj[obj_type].cnt;
+ info.privileged = privileged;
+ if (irdma_sc_del_hmc_obj(dev, &info, reset))
+ ibdev_dbg(to_ibdev(dev), "ERR: del HMC obj of type %d failed\n",
+ obj_type);
+}
+
+/**
+ * irdma_del_hmc_objects - remove all device hmc objects
+ * @dev: iwarp device
+ * @hmc_info: hmc_info to free
+ * @privileged: permission to delete HMC objects
+ * @reset: true if called before reset
+ * @vers: hardware version
+ */
+static void irdma_del_hmc_objects(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info, bool privileged,
+ bool reset, enum irdma_vers vers)
+{
+ unsigned int i;
+
+ for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+ if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
+ irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
+ hmc_info, privileged, reset);
+ if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
+ break;
+ }
+}
+
+/**
+ * irdma_create_hmc_obj_type - create hmc object of a given type
+ * @dev: hardware control device structure
+ * @info: information for the hmc object to create
+ */
+static int irdma_create_hmc_obj_type(struct irdma_sc_dev *dev,
+ struct irdma_hmc_create_obj_info *info)
+{
+ return irdma_sc_create_hmc_obj(dev, info);
+}
+
+/**
+ * irdma_create_hmc_objs - create all hmc objects for the device
+ * @rf: RDMA PCI function
+ * @privileged: permission to create HMC objects
+ * @vers: HW version
+ *
+ * Create the device hmc objects and allocate hmc pages
+ * Return 0 if successful, otherwise clean up and return error
+ */
+static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged,
+ enum irdma_vers vers)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_hmc_create_obj_info info = {};
+ int i, status = 0;
+
+ info.hmc_info = dev->hmc_info;
+ info.privileged = privileged;
+ info.entry_type = rf->sd_type;
+
+ for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+ if (iw_hmc_obj_types[i] == IRDMA_HMC_IW_PBLE)
+ continue;
+ if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt) {
+ info.rsrc_type = iw_hmc_obj_types[i];
+ info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+ info.add_sd_cnt = 0;
+ status = irdma_create_hmc_obj_type(dev, &info);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: create obj type %d status = %d\n",
+ iw_hmc_obj_types[i], status);
+ break;
+ }
+ }
+ if (vers == IRDMA_GEN_1 && i == IRDMA_HMC_IW_TIMER)
+ break;
+ }
+
+ if (!status)
+ return irdma_sc_static_hmc_pages_allocated(dev->cqp, 0, dev->hmc_fn_id,
+ true, true);
+
+ while (i) {
+ i--;
+ /* destroy the hmc objects of a given type */
+ if (dev->hmc_info->hmc_obj[iw_hmc_obj_types[i]].cnt)
+ irdma_close_hmc_objects_type(dev, iw_hmc_obj_types[i],
+ dev->hmc_info, privileged,
+ false);
+ }
+
+ return status;
+}
+
+/**
+ * irdma_obj_aligned_mem - get aligned memory from device allocated memory
+ * @rf: RDMA PCI function
+ * @memptr: points to the memory addresses
+ * @size: size of memory needed
+ * @mask: mask for the aligned memory
+ *
+ * Get aligned memory of the requested size and
+ * update the memptr to point to the new aligned memory
+ * Return 0 if successful, otherwise return no memory error
+ */
+static int irdma_obj_aligned_mem(struct irdma_pci_f *rf,
+ struct irdma_dma_mem *memptr, u32 size,
+ u32 mask)
+{
+ unsigned long va, newva;
+ unsigned long extra;
+
+ va = (unsigned long)rf->obj_next.va;
+ newva = va;
+ if (mask)
+ newva = ALIGN(va, (unsigned long)mask + 1ULL);
+ extra = newva - va;
+ memptr->va = (u8 *)va + extra;
+ memptr->pa = rf->obj_next.pa + extra;
+ memptr->size = size;
+ if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size))
+ return -ENOMEM;
+
+ rf->obj_next.va = (u8 *)memptr->va + size;
+ rf->obj_next.pa = memptr->pa + size;
+
+ return 0;
+}
+
+/**
+ * irdma_create_cqp - create control qp
+ * @rf: RDMA PCI function
+ *
+ * Return 0, if the cqp and all the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static int irdma_create_cqp(struct irdma_pci_f *rf)
+{
+ u32 sqsize = IRDMA_CQP_SW_SQSIZE_2048;
+ struct irdma_dma_mem mem;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cqp_init_info cqp_init_info = {};
+ struct irdma_cqp *cqp = &rf->cqp;
+ u16 maj_err, min_err;
+ int i, status;
+
+ cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
+ if (!cqp->cqp_requests)
+ return -ENOMEM;
+
+ cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
+ if (!cqp->scratch_array) {
+ status = -ENOMEM;
+ goto err_scratch;
+ }
+
+ dev->cqp = &cqp->sc_cqp;
+ dev->cqp->dev = dev;
+ cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
+ IRDMA_CQP_ALIGNMENT);
+ cqp->sq.va = dma_alloc_coherent(dev->hw->device, cqp->sq.size,
+ &cqp->sq.pa, GFP_KERNEL);
+ if (!cqp->sq.va) {
+ status = -ENOMEM;
+ goto err_sq;
+ }
+
+ status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx),
+ IRDMA_HOST_CTX_ALIGNMENT_M);
+ if (status)
+ goto err_ctx;
+
+ dev->cqp->host_ctx_pa = mem.pa;
+ dev->cqp->host_ctx = mem.va;
+ /* populate the cqp init info */
+ cqp_init_info.dev = dev;
+ cqp_init_info.sq_size = sqsize;
+ cqp_init_info.sq = cqp->sq.va;
+ cqp_init_info.sq_pa = cqp->sq.pa;
+ cqp_init_info.host_ctx_pa = mem.pa;
+ cqp_init_info.host_ctx = mem.va;
+ cqp_init_info.hmc_profile = rf->rsrc_profile;
+ cqp_init_info.scratch_array = cqp->scratch_array;
+ cqp_init_info.protocol_used = rf->protocol_used;
+
+ switch (rf->rdma_ver) {
+ case IRDMA_GEN_1:
+ cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_1;
+ break;
+ case IRDMA_GEN_2:
+ cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
+ break;
+ }
+ status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev), "ERR: cqp init status %d\n", status);
+ goto err_ctx;
+ }
+
+ spin_lock_init(&cqp->req_lock);
+ spin_lock_init(&cqp->compl_lock);
+
+ status = irdma_sc_cqp_create(dev->cqp, &maj_err, &min_err);
+ if (status) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: cqp create failed - status %d maj_err %d min_err %d\n",
+ status, maj_err, min_err);
+ goto err_ctx;
+ }
+
+ INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
+ INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
+
+ /* init the waitqueue of the cqp_requests and add them to the list */
+ for (i = 0; i < sqsize; i++) {
+ init_waitqueue_head(&cqp->cqp_requests[i].waitq);
+ list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
+ }
+ init_waitqueue_head(&cqp->remove_wq);
+ return 0;
+
+err_ctx:
+ dma_free_coherent(dev->hw->device, cqp->sq.size,
+ cqp->sq.va, cqp->sq.pa);
+ cqp->sq.va = NULL;
+err_sq:
+ kfree(cqp->scratch_array);
+ cqp->scratch_array = NULL;
+err_scratch:
+ kfree(cqp->cqp_requests);
+ cqp->cqp_requests = NULL;
+
+ return status;
+}
+
+/**
+ * irdma_create_ccq - create control cq
+ * @rf: RDMA PCI function
+ *
+ * Return 0, if the ccq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static int irdma_create_ccq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_ccq_init_info info = {};
+ struct irdma_ccq *ccq = &rf->ccq;
+ int status;
+
+ dev->ccq = &ccq->sc_cq;
+ dev->ccq->dev = dev;
+ info.dev = dev;
+ ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
+ ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
+ IRDMA_CQ0_ALIGNMENT);
+ ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
+ &ccq->mem_cq.pa, GFP_KERNEL);
+ if (!ccq->mem_cq.va)
+ return -ENOMEM;
+
+ status = irdma_obj_aligned_mem(rf, &ccq->shadow_area,
+ ccq->shadow_area.size,
+ IRDMA_SHADOWAREA_M);
+ if (status)
+ goto exit;
+
+ ccq->sc_cq.back_cq = ccq;
+ /* populate the ccq init info */
+ info.cq_base = ccq->mem_cq.va;
+ info.cq_pa = ccq->mem_cq.pa;
+ info.num_elem = IW_CCQ_SIZE;
+ info.shadow_area = ccq->shadow_area.va;
+ info.shadow_area_pa = ccq->shadow_area.pa;
+ info.ceqe_mask = false;
+ info.ceq_id_valid = true;
+ info.shadow_read_threshold = 16;
+ info.vsi = &rf->default_vsi;
+ status = irdma_sc_ccq_init(dev->ccq, &info);
+ if (!status)
+ status = irdma_sc_ccq_create(dev->ccq, 0, true, true);
+exit:
+ if (status) {
+ dma_free_coherent(dev->hw->device, ccq->mem_cq.size,
+ ccq->mem_cq.va, ccq->mem_cq.pa);
+ ccq->mem_cq.va = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_alloc_set_mac - set up a mac address table entry
+ * @iwdev: irdma device
+ *
+ * Allocate a mac ip entry and add it to the hw table Return 0
+ * if successful, otherwise return error
+ */
+static int irdma_alloc_set_mac(struct irdma_device *iwdev)
+{
+ int status;
+
+ status = irdma_alloc_local_mac_entry(iwdev->rf,
+ &iwdev->mac_ip_table_idx);
+ if (!status) {
+ status = irdma_add_local_mac_entry(iwdev->rf,
+ (const u8 *)iwdev->netdev->dev_addr,
+ (u8)iwdev->mac_ip_table_idx);
+ if (status)
+ irdma_del_local_mac_entry(iwdev->rf,
+ (u8)iwdev->mac_ip_table_idx);
+ }
+ return status;
+}
+
+/**
+ * irdma_cfg_ceq_vector - set up the msix interrupt vector for
+ * ceq
+ * @rf: RDMA PCI function
+ * @iwceq: ceq associated with the vector
+ * @ceq_id: the id number of the iwceq
+ * @msix_vec: interrupt vector information
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ u32 ceq_id, struct irdma_msix_vector *msix_vec)
+{
+ int status;
+
+ if (rf->msix_shared && !ceq_id) {
+ snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
+ "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev));
+ tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
+ status = request_irq(msix_vec->irq, irdma_irq_handler, 0,
+ msix_vec->name, rf);
+ } else {
+ snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
+ "irdma-%s-CEQ-%d",
+ dev_name(&rf->pcidev->dev), ceq_id);
+ tasklet_setup(&iwceq->dpc_tasklet, irdma_ceq_dpc);
+
+ status = request_irq(msix_vec->irq, irdma_ceq_handler, 0,
+ msix_vec->name, iwceq);
+ }
+ cpumask_clear(&msix_vec->mask);
+ cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
+ irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
+ if (status) {
+ ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
+ return status;
+ }
+
+ msix_vec->ceq_id = ceq_id;
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
+
+ return 0;
+}
+
+/**
+ * irdma_cfg_aeq_vector - set up the msix vector for aeq
+ * @rf: RDMA PCI function
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
+{
+ struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
+ u32 ret = 0;
+
+ if (!rf->msix_shared) {
+ snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
+ "irdma-%s-AEQ", dev_name(&rf->pcidev->dev));
+ tasklet_setup(&rf->dpc_tasklet, irdma_dpc);
+ ret = request_irq(msix_vec->irq, irdma_irq_handler, 0,
+ msix_vec->name, rf);
+ }
+ if (ret) {
+ ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
+ return -EINVAL;
+ }
+
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
+
+ return 0;
+}
+
+/**
+ * irdma_create_ceq - create completion event queue
+ * @rf: RDMA PCI function
+ * @iwceq: pointer to the ceq resources to be created
+ * @ceq_id: the id number of the iwceq
+ * @vsi: SC vsi struct
+ *
+ * Return 0, if the ceq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
+ u32 ceq_id, struct irdma_sc_vsi *vsi)
+{
+ int status;
+ struct irdma_ceq_init_info info = {};
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ u32 ceq_size;
+
+ info.ceq_id = ceq_id;
+ iwceq->rf = rf;
+ ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt,
+ dev->hw_attrs.max_hw_ceq_size);
+ iwceq->mem.size = ALIGN(sizeof(struct irdma_ceqe) * ceq_size,
+ IRDMA_CEQ_ALIGNMENT);
+ iwceq->mem.va = dma_alloc_coherent(dev->hw->device, iwceq->mem.size,
+ &iwceq->mem.pa, GFP_KERNEL);
+ if (!iwceq->mem.va)
+ return -ENOMEM;
+
+ info.ceq_id = ceq_id;
+ info.ceqe_base = iwceq->mem.va;
+ info.ceqe_pa = iwceq->mem.pa;
+ info.elem_cnt = ceq_size;
+ iwceq->sc_ceq.ceq_id = ceq_id;
+ info.dev = dev;
+ info.vsi = vsi;
+ status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
+ if (!status) {
+ if (dev->ceq_valid)
+ status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
+ IRDMA_OP_CEQ_CREATE);
+ else
+ status = irdma_sc_cceq_create(&iwceq->sc_ceq, 0);
+ }
+
+ if (status) {
+ dma_free_coherent(dev->hw->device, iwceq->mem.size,
+ iwceq->mem.va, iwceq->mem.pa);
+ iwceq->mem.va = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_setup_ceq_0 - create CEQ 0 and it's interrupt resource
+ * @rf: RDMA PCI function
+ *
+ * Allocate a list for all device completion event queues
+ * Create the ceq 0 and configure it's msix interrupt vector
+ * Return 0, if successfully set up, otherwise return error
+ */
+static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
+{
+ struct irdma_ceq *iwceq;
+ struct irdma_msix_vector *msix_vec;
+ u32 i;
+ int status = 0;
+ u32 num_ceqs;
+
+ num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
+ rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL);
+ if (!rf->ceqlist) {
+ status = -ENOMEM;
+ goto exit;
+ }
+
+ iwceq = &rf->ceqlist[0];
+ status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
+ if (status) {
+ ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
+ status);
+ goto exit;
+ }
+
+ spin_lock_init(&iwceq->ce_lock);
+ i = rf->msix_shared ? 0 : 1;
+ msix_vec = &rf->iw_msixtbl[i];
+ iwceq->irq = msix_vec->irq;
+ iwceq->msix_idx = msix_vec->idx;
+ status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec);
+ if (status) {
+ irdma_destroy_ceq(rf, iwceq);
+ goto exit;
+ }
+
+ irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
+ rf->ceqs_count++;
+
+exit:
+ if (status && !rf->ceqs_count) {
+ kfree(rf->ceqlist);
+ rf->ceqlist = NULL;
+ return status;
+ }
+ rf->sc_dev.ceq_valid = true;
+
+ return 0;
+}
+
+/**
+ * irdma_setup_ceqs - manage the device ceq's and their interrupt resources
+ * @rf: RDMA PCI function
+ * @vsi: VSI structure for this CEQ
+ *
+ * Allocate a list for all device completion event queues
+ * Create the ceq's and configure their msix interrupt vectors
+ * Return 0, if ceqs are successfully set up, otherwise return error
+ */
+static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
+{
+ u32 i;
+ u32 ceq_id;
+ struct irdma_ceq *iwceq;
+ struct irdma_msix_vector *msix_vec;
+ int status;
+ u32 num_ceqs;
+
+ num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs);
+ i = (rf->msix_shared) ? 1 : 2;
+ for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
+ iwceq = &rf->ceqlist[ceq_id];
+ status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
+ if (status) {
+ ibdev_dbg(&rf->iwdev->ibdev,
+ "ERR: create ceq status = %d\n", status);
+ goto del_ceqs;
+ }
+ spin_lock_init(&iwceq->ce_lock);
+ msix_vec = &rf->iw_msixtbl[i];
+ iwceq->irq = msix_vec->irq;
+ iwceq->msix_idx = msix_vec->idx;
+ status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec);
+ if (status) {
+ irdma_destroy_ceq(rf, iwceq);
+ goto del_ceqs;
+ }
+ irdma_ena_intr(&rf->sc_dev, msix_vec->idx);
+ rf->ceqs_count++;
+ }
+
+ return 0;
+
+del_ceqs:
+ irdma_del_ceqs(rf);
+
+ return status;
+}
+
+static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size)
+{
+ struct irdma_aeq *aeq = &rf->aeq;
+ dma_addr_t *pg_arr;
+ u32 pg_cnt;
+ int status;
+
+ if (rf->rdma_ver < IRDMA_GEN_2)
+ return -EOPNOTSUPP;
+
+ aeq->mem.size = sizeof(struct irdma_sc_aeqe) * size;
+ aeq->mem.va = vzalloc(aeq->mem.size);
+
+ if (!aeq->mem.va)
+ return -ENOMEM;
+
+ pg_cnt = DIV_ROUND_UP(aeq->mem.size, PAGE_SIZE);
+ status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true);
+ if (status) {
+ vfree(aeq->mem.va);
+ return status;
+ }
+
+ pg_arr = (dma_addr_t *)aeq->palloc.level1.addr;
+ status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt);
+ if (status) {
+ irdma_free_pble(rf->pble_rsrc, &aeq->palloc);
+ vfree(aeq->mem.va);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_create_aeq - create async event queue
+ * @rf: RDMA PCI function
+ *
+ * Return 0, if the aeq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static int irdma_create_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_aeq_init_info info = {};
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_aeq *aeq = &rf->aeq;
+ struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info;
+ u32 aeq_size;
+ u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1;
+ int status;
+
+ aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
+ aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
+
+ aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
+ IRDMA_AEQ_ALIGNMENT);
+ aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
+ &aeq->mem.pa,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (aeq->mem.va)
+ goto skip_virt_aeq;
+
+ /* physically mapped aeq failed. setup virtual aeq */
+ status = irdma_create_virt_aeq(rf, aeq_size);
+ if (status)
+ return status;
+
+ info.virtual_map = true;
+ aeq->virtual_map = info.virtual_map;
+ info.pbl_chunk_size = 1;
+ info.first_pm_pbl_idx = aeq->palloc.level1.idx;
+
+skip_virt_aeq:
+ info.aeqe_base = aeq->mem.va;
+ info.aeq_elem_pa = aeq->mem.pa;
+ info.elem_cnt = aeq_size;
+ info.dev = dev;
+ info.msix_idx = rf->iw_msixtbl->idx;
+ status = irdma_sc_aeq_init(&aeq->sc_aeq, &info);
+ if (status)
+ goto err;
+
+ status = irdma_cqp_aeq_cmd(dev, &aeq->sc_aeq, IRDMA_OP_AEQ_CREATE);
+ if (status)
+ goto err;
+
+ return 0;
+
+err:
+ if (aeq->virtual_map) {
+ irdma_destroy_virt_aeq(rf);
+ } else {
+ dma_free_coherent(dev->hw->device, aeq->mem.size, aeq->mem.va,
+ aeq->mem.pa);
+ aeq->mem.va = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * irdma_setup_aeq - set up the device aeq
+ * @rf: RDMA PCI function
+ *
+ * Create the aeq and configure its msix interrupt vector
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_setup_aeq(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ int status;
+
+ status = irdma_create_aeq(rf);
+ if (status)
+ return status;
+
+ status = irdma_cfg_aeq_vector(rf);
+ if (status) {
+ irdma_destroy_aeq(rf);
+ return status;
+ }
+
+ if (!rf->msix_shared)
+ irdma_ena_intr(dev, rf->iw_msixtbl[0].idx);
+
+ return 0;
+}
+
+/**
+ * irdma_initialize_ilq - create iwarp local queue for cm
+ * @iwdev: irdma device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_initialize_ilq(struct irdma_device *iwdev)
+{
+ struct irdma_puda_rsrc_info info = {};
+ int status;
+
+ info.type = IRDMA_PUDA_RSRC_TYPE_ILQ;
+ info.cq_id = 1;
+ info.qp_id = 1;
+ info.count = 1;
+ info.pd_id = 1;
+ info.abi_ver = IRDMA_ABI_VER;
+ info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
+ info.rq_size = info.sq_size;
+ info.buf_size = 1024;
+ info.tx_buf_cnt = 2 * info.sq_size;
+ info.receive = irdma_receive_ilq;
+ info.xmit_complete = irdma_free_sqbuf;
+ status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
+ if (status)
+ ibdev_dbg(&iwdev->ibdev, "ERR: ilq create fail\n");
+
+ return status;
+}
+
+/**
+ * irdma_initialize_ieq - create iwarp exception queue
+ * @iwdev: irdma device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_initialize_ieq(struct irdma_device *iwdev)
+{
+ struct irdma_puda_rsrc_info info = {};
+ int status;
+
+ info.type = IRDMA_PUDA_RSRC_TYPE_IEQ;
+ info.cq_id = 2;
+ info.qp_id = iwdev->vsi.exception_lan_q;
+ info.count = 1;
+ info.pd_id = 2;
+ info.abi_ver = IRDMA_ABI_VER;
+ info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768);
+ info.rq_size = info.sq_size;
+ info.buf_size = iwdev->vsi.mtu + IRDMA_IPV4_PAD;
+ info.tx_buf_cnt = 4096;
+ status = irdma_puda_create_rsrc(&iwdev->vsi, &info);
+ if (status)
+ ibdev_dbg(&iwdev->ibdev, "ERR: ieq create fail\n");
+
+ return status;
+}
+
+/**
+ * irdma_reinitialize_ieq - destroy and re-create ieq
+ * @vsi: VSI structure
+ */
+void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
+ if (irdma_initialize_ieq(iwdev)) {
+ iwdev->rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
+}
+
+/**
+ * irdma_hmc_setup - create hmc objects for the device
+ * @rf: RDMA PCI function
+ *
+ * Set up the device private memory space for the number and size of
+ * the hmc objects and create the objects
+ * Return 0 if successful, otherwise return error
+ */
+static int irdma_hmc_setup(struct irdma_pci_f *rf)
+{
+ int status;
+ u32 qpcnt;
+
+ qpcnt = rsrc_limits_table[rf->limits_sel].qplimit;
+
+ rf->sd_type = IRDMA_SD_TYPE_DIRECT;
+ status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt);
+ if (status)
+ return status;
+
+ status = irdma_create_hmc_objs(rf, true, rf->rdma_ver);
+
+ return status;
+}
+
+/**
+ * irdma_del_init_mem - deallocate memory resources
+ * @rf: RDMA PCI function
+ */
+static void irdma_del_init_mem(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+
+ kfree(dev->hmc_info->sd_table.sd_entry);
+ dev->hmc_info->sd_table.sd_entry = NULL;
+ vfree(rf->mem_rsrc);
+ rf->mem_rsrc = NULL;
+ dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
+ rf->obj_mem.pa);
+ rf->obj_mem.va = NULL;
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ bitmap_free(rf->allocated_ws_nodes);
+ rf->allocated_ws_nodes = NULL;
+ }
+ kfree(rf->ceqlist);
+ rf->ceqlist = NULL;
+ kfree(rf->iw_msixtbl);
+ rf->iw_msixtbl = NULL;
+ kfree(rf->hmc_info_mem);
+ rf->hmc_info_mem = NULL;
+}
+
+/**
+ * irdma_initialize_dev - initialize device
+ * @rf: RDMA PCI function
+ *
+ * Allocate memory for the hmc objects and initialize iwdev
+ * Return 0 if successful, otherwise clean up the resources
+ * and return error
+ */
+static int irdma_initialize_dev(struct irdma_pci_f *rf)
+{
+ int status;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_device_init_info info = {};
+ struct irdma_dma_mem mem;
+ u32 size;
+
+ size = sizeof(struct irdma_hmc_pble_rsrc) +
+ sizeof(struct irdma_hmc_info) +
+ (sizeof(struct irdma_hmc_obj_info) * IRDMA_HMC_IW_MAX);
+
+ rf->hmc_info_mem = kzalloc(size, GFP_KERNEL);
+ if (!rf->hmc_info_mem)
+ return -ENOMEM;
+
+ rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem;
+ dev->hmc_info = &rf->hw.hmc;
+ dev->hmc_info->hmc_obj = (struct irdma_hmc_obj_info *)
+ (rf->pble_rsrc + 1);
+
+ status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE,
+ IRDMA_FPM_QUERY_BUF_ALIGNMENT_M);
+ if (status)
+ goto error;
+
+ info.fpm_query_buf_pa = mem.pa;
+ info.fpm_query_buf = mem.va;
+
+ status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE,
+ IRDMA_FPM_COMMIT_BUF_ALIGNMENT_M);
+ if (status)
+ goto error;
+
+ info.fpm_commit_buf_pa = mem.pa;
+ info.fpm_commit_buf = mem.va;
+
+ info.bar0 = rf->hw.hw_addr;
+ info.hmc_fn_id = rf->pf_id;
+ info.hw = &rf->hw;
+ status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
+ if (status)
+ goto error;
+
+ return status;
+error:
+ kfree(rf->hmc_info_mem);
+ rf->hmc_info_mem = NULL;
+
+ return status;
+}
+
+/**
+ * irdma_rt_deinit_hw - clean up the irdma device resources
+ * @iwdev: irdma device
+ *
+ * remove the mac ip entry and ipv4/ipv6 addresses, destroy the
+ * device queues and free the pble and the hmc objects
+ */
+void irdma_rt_deinit_hw(struct irdma_device *iwdev)
+{
+ ibdev_dbg(&iwdev->ibdev, "INIT: state = %d\n", iwdev->init_state);
+
+ switch (iwdev->init_state) {
+ case IP_ADDR_REGISTERED:
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ irdma_del_local_mac_entry(iwdev->rf,
+ (u8)iwdev->mac_ip_table_idx);
+ fallthrough;
+ case AEQ_CREATED:
+ case PBLE_CHUNK_MEM:
+ case CEQS_CREATED:
+ case IEQ_CREATED:
+ if (!iwdev->roce_mode)
+ irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
+ iwdev->rf->reset);
+ fallthrough;
+ case ILQ_CREATED:
+ if (!iwdev->roce_mode)
+ irdma_puda_dele_rsrc(&iwdev->vsi,
+ IRDMA_PUDA_RSRC_TYPE_ILQ,
+ iwdev->rf->reset);
+ break;
+ default:
+ ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
+ break;
+ }
+
+ irdma_cleanup_cm_core(&iwdev->cm_core);
+ if (iwdev->vsi.pestat) {
+ irdma_vsi_stats_free(&iwdev->vsi);
+ kfree(iwdev->vsi.pestat);
+ }
+ if (iwdev->cleanup_wq)
+ destroy_workqueue(iwdev->cleanup_wq);
+}
+
+static int irdma_setup_init_state(struct irdma_pci_f *rf)
+{
+ int status;
+
+ status = irdma_save_msix_info(rf);
+ if (status)
+ return status;
+
+ rf->hw.device = &rf->pcidev->dev;
+ rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE);
+ rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size,
+ &rf->obj_mem.pa, GFP_KERNEL);
+ if (!rf->obj_mem.va) {
+ status = -ENOMEM;
+ goto clean_msixtbl;
+ }
+
+ rf->obj_next = rf->obj_mem;
+ status = irdma_initialize_dev(rf);
+ if (status)
+ goto clean_obj_mem;
+
+ return 0;
+
+clean_obj_mem:
+ dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va,
+ rf->obj_mem.pa);
+ rf->obj_mem.va = NULL;
+clean_msixtbl:
+ kfree(rf->iw_msixtbl);
+ rf->iw_msixtbl = NULL;
+ return status;
+}
+
+/**
+ * irdma_get_used_rsrc - determine resources used internally
+ * @iwdev: irdma device
+ *
+ * Called at the end of open to get all internal allocations
+ */
+static void irdma_get_used_rsrc(struct irdma_device *iwdev)
+{
+ iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
+ iwdev->rf->max_pd);
+ iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
+ iwdev->rf->max_qp);
+ iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
+ iwdev->rf->max_cq);
+ iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
+ iwdev->rf->max_mr);
+}
+
+void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
+{
+ enum init_completion_state state = rf->init_state;
+
+ rf->init_state = INVALID_STATE;
+ if (rf->rsrc_created) {
+ irdma_destroy_aeq(rf);
+ irdma_destroy_pble_prm(rf->pble_rsrc);
+ irdma_del_ceqs(rf);
+ rf->rsrc_created = false;
+ }
+ switch (state) {
+ case CEQ0_CREATED:
+ irdma_del_ceq_0(rf);
+ fallthrough;
+ case CCQ_CREATED:
+ irdma_destroy_ccq(rf);
+ fallthrough;
+ case HW_RSRC_INITIALIZED:
+ case HMC_OBJS_CREATED:
+ irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true,
+ rf->reset, rf->rdma_ver);
+ fallthrough;
+ case CQP_CREATED:
+ irdma_destroy_cqp(rf);
+ fallthrough;
+ case INITIAL_STATE:
+ irdma_del_init_mem(rf);
+ break;
+ case INVALID_STATE:
+ default:
+ ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state);
+ break;
+ }
+}
+
+/**
+ * irdma_rt_init_hw - Initializes runtime portion of HW
+ * @iwdev: irdma device
+ * @l2params: qos, tc, mtu info from netdev driver
+ *
+ * Create device queues ILQ, IEQ, CEQs and PBLEs. Setup irdma
+ * device resource objects.
+ */
+int irdma_rt_init_hw(struct irdma_device *iwdev,
+ struct irdma_l2params *l2params)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_vsi_init_info vsi_info = {};
+ struct irdma_vsi_stats_info stats_info = {};
+ int status;
+
+ vsi_info.dev = dev;
+ vsi_info.back_vsi = iwdev;
+ vsi_info.params = l2params;
+ vsi_info.pf_data_vsi_num = iwdev->vsi_num;
+ vsi_info.register_qset = rf->gen_ops.register_qset;
+ vsi_info.unregister_qset = rf->gen_ops.unregister_qset;
+ vsi_info.exception_lan_q = 2;
+ irdma_sc_vsi_init(&iwdev->vsi, &vsi_info);
+
+ status = irdma_setup_cm_core(iwdev, rf->rdma_ver);
+ if (status)
+ return status;
+
+ stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
+ if (!stats_info.pestat) {
+ irdma_cleanup_cm_core(&iwdev->cm_core);
+ return -ENOMEM;
+ }
+ stats_info.fcn_id = dev->hmc_fn_id;
+ status = irdma_vsi_stats_init(&iwdev->vsi, &stats_info);
+ if (status) {
+ irdma_cleanup_cm_core(&iwdev->cm_core);
+ kfree(stats_info.pestat);
+ return status;
+ }
+
+ do {
+ if (!iwdev->roce_mode) {
+ status = irdma_initialize_ilq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = ILQ_CREATED;
+ status = irdma_initialize_ieq(iwdev);
+ if (status)
+ break;
+ iwdev->init_state = IEQ_CREATED;
+ }
+ if (!rf->rsrc_created) {
+ status = irdma_setup_ceqs(rf, &iwdev->vsi);
+ if (status)
+ break;
+
+ iwdev->init_state = CEQS_CREATED;
+
+ status = irdma_hmc_init_pble(&rf->sc_dev,
+ rf->pble_rsrc);
+ if (status) {
+ irdma_del_ceqs(rf);
+ break;
+ }
+
+ iwdev->init_state = PBLE_CHUNK_MEM;
+
+ status = irdma_setup_aeq(rf);
+ if (status) {
+ irdma_destroy_pble_prm(rf->pble_rsrc);
+ irdma_del_ceqs(rf);
+ break;
+ }
+ iwdev->init_state = AEQ_CREATED;
+ rf->rsrc_created = true;
+ }
+
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ irdma_alloc_set_mac(iwdev);
+ irdma_add_ip(iwdev);
+ iwdev->init_state = IP_ADDR_REGISTERED;
+
+ /* handles asynch cleanup tasks - disconnect CM , free qp,
+ * free cq bufs
+ */
+ iwdev->cleanup_wq = alloc_workqueue("irdma-cleanup-wq",
+ WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
+ if (!iwdev->cleanup_wq)
+ return -ENOMEM;
+ irdma_get_used_rsrc(iwdev);
+ init_waitqueue_head(&iwdev->suspend_wq);
+
+ return 0;
+ } while (0);
+
+ dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n",
+ status, iwdev->init_state);
+ irdma_rt_deinit_hw(iwdev);
+
+ return status;
+}
+
+/**
+ * irdma_ctrl_init_hw - Initializes control portion of HW
+ * @rf: RDMA PCI function
+ *
+ * Create admin queues, HMC obejcts and RF resource objects
+ */
+int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ int status;
+ do {
+ status = irdma_setup_init_state(rf);
+ if (status)
+ break;
+ rf->init_state = INITIAL_STATE;
+
+ status = irdma_create_cqp(rf);
+ if (status)
+ break;
+ rf->init_state = CQP_CREATED;
+
+ status = irdma_hmc_setup(rf);
+ if (status)
+ break;
+ rf->init_state = HMC_OBJS_CREATED;
+
+ status = irdma_initialize_hw_rsrc(rf);
+ if (status)
+ break;
+ rf->init_state = HW_RSRC_INITIALIZED;
+
+ status = irdma_create_ccq(rf);
+ if (status)
+ break;
+ rf->init_state = CCQ_CREATED;
+
+ dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ status = irdma_get_rdma_features(dev);
+ if (status)
+ break;
+ }
+
+ status = irdma_setup_ceq_0(rf);
+ if (status)
+ break;
+ rf->init_state = CEQ0_CREATED;
+ /* Handles processing of CQP completions */
+ rf->cqp_cmpl_wq =
+ alloc_ordered_workqueue("cqp_cmpl_wq", WQ_HIGHPRI);
+ if (!rf->cqp_cmpl_wq) {
+ status = -ENOMEM;
+ break;
+ }
+ INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
+ irdma_sc_ccq_arm(dev->ccq);
+ return 0;
+ } while (0);
+
+ dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n",
+ rf->init_state, status);
+ irdma_ctrl_deinit_hw(rf);
+ return status;
+}
+
+/**
+ * irdma_set_hw_rsrc - set hw memory resources.
+ * @rf: RDMA PCI function
+ */
+static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
+{
+ rf->allocated_qps = (void *)(rf->mem_rsrc +
+ (sizeof(struct irdma_arp_entry) * rf->arp_table_size));
+ rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
+ rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
+ rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
+ rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
+ rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
+ rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)];
+ rf->qp_table = (struct irdma_qp **)
+ (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]);
+ rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]);
+
+ spin_lock_init(&rf->rsrc_lock);
+ spin_lock_init(&rf->arp_lock);
+ spin_lock_init(&rf->qptable_lock);
+ spin_lock_init(&rf->cqtable_lock);
+ spin_lock_init(&rf->qh_list_lock);
+}
+
+/**
+ * irdma_calc_mem_rsrc_size - calculate memory resources size.
+ * @rf: RDMA PCI function
+ */
+static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
+{
+ u32 rsrc_size;
+
+ rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size;
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
+ rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
+ rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
+
+ return rsrc_size;
+}
+
+/**
+ * irdma_initialize_hw_rsrc - initialize hw resource tracking array
+ * @rf: RDMA PCI function
+ */
+u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
+{
+ u32 rsrc_size;
+ u32 mrdrvbits;
+ u32 ret;
+
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES,
+ GFP_KERNEL);
+ if (!rf->allocated_ws_nodes)
+ return -ENOMEM;
+
+ set_bit(0, rf->allocated_ws_nodes);
+ rf->max_ws_node_id = IRDMA_MAX_WS_NODES;
+ }
+ rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size;
+ rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
+ rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
+ rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
+ rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
+ rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
+ rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
+ rf->max_mcg = rf->max_qp;
+
+ rsrc_size = irdma_calc_mem_rsrc_size(rf);
+ rf->mem_rsrc = vzalloc(rsrc_size);
+ if (!rf->mem_rsrc) {
+ ret = -ENOMEM;
+ goto mem_rsrc_vzalloc_fail;
+ }
+
+ rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc;
+
+ irdma_set_hw_rsrc(rf);
+
+ set_bit(0, rf->allocated_mrs);
+ set_bit(0, rf->allocated_qps);
+ set_bit(0, rf->allocated_cqs);
+ set_bit(0, rf->allocated_pds);
+ set_bit(0, rf->allocated_arps);
+ set_bit(0, rf->allocated_ahs);
+ set_bit(0, rf->allocated_mcgs);
+ set_bit(2, rf->allocated_qps); /* qp 2 IEQ */
+ set_bit(1, rf->allocated_qps); /* qp 1 ILQ */
+ set_bit(1, rf->allocated_cqs);
+ set_bit(1, rf->allocated_pds);
+ set_bit(2, rf->allocated_cqs);
+ set_bit(2, rf->allocated_pds);
+
+ INIT_LIST_HEAD(&rf->mc_qht_list.list);
+ /* stag index mask has a minimum of 14 bits */
+ mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14);
+ rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
+
+ return 0;
+
+mem_rsrc_vzalloc_fail:
+ bitmap_free(rf->allocated_ws_nodes);
+ rf->allocated_ws_nodes = NULL;
+
+ return ret;
+}
+
+/**
+ * irdma_cqp_ce_handler - handle cqp completions
+ * @rf: RDMA PCI function
+ * @cq: cq for cqp completions
+ */
+void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ u32 cqe_count = 0;
+ struct irdma_ccq_cqe_info info;
+ unsigned long flags;
+ int ret;
+
+ do {
+ memset(&info, 0, sizeof(info));
+ spin_lock_irqsave(&rf->cqp.compl_lock, flags);
+ ret = irdma_sc_ccq_get_cqe_info(cq, &info);
+ spin_unlock_irqrestore(&rf->cqp.compl_lock, flags);
+ if (ret)
+ break;
+
+ cqp_request = (struct irdma_cqp_request *)
+ (unsigned long)info.scratch;
+ if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
+ info.maj_err_code,
+ info.min_err_code))
+ ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
+ info.op_code, info.maj_err_code, info.min_err_code);
+ if (cqp_request) {
+ cqp_request->compl_info.maj_err_code = info.maj_err_code;
+ cqp_request->compl_info.min_err_code = info.min_err_code;
+ cqp_request->compl_info.op_ret_val = info.op_ret_val;
+ cqp_request->compl_info.error = info.error;
+
+ if (cqp_request->waiting) {
+ WRITE_ONCE(cqp_request->request_done, true);
+ wake_up(&cqp_request->waitq);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ } else {
+ if (cqp_request->callback_fcn)
+ cqp_request->callback_fcn(cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ }
+ }
+
+ cqe_count++;
+ } while (1);
+
+ if (cqe_count) {
+ irdma_process_bh(dev);
+ irdma_sc_ccq_arm(cq);
+ }
+}
+
+/**
+ * cqp_compl_worker - Handle cqp completions
+ * @work: Pointer to work structure
+ */
+void cqp_compl_worker(struct work_struct *work)
+{
+ struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f,
+ cqp_cmpl_work);
+ struct irdma_sc_cq *cq = &rf->ccq.sc_cq;
+
+ irdma_cqp_ce_handler(rf, cq);
+}
+
+/**
+ * irdma_lookup_apbvt_entry - lookup hash table for an existing apbvt entry corresponding to port
+ * @cm_core: cm's core
+ * @port: port to identify apbvt entry
+ */
+static struct irdma_apbvt_entry *irdma_lookup_apbvt_entry(struct irdma_cm_core *cm_core,
+ u16 port)
+{
+ struct irdma_apbvt_entry *entry;
+
+ hash_for_each_possible(cm_core->apbvt_hash_tbl, entry, hlist, port) {
+ if (entry->port == port) {
+ entry->use_cnt++;
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_next_iw_state - modify qp state
+ * @iwqp: iwarp qp to modify
+ * @state: next state for qp
+ * @del_hash: del hash
+ * @term: term message
+ * @termlen: length of term message
+ */
+void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
+ u8 termlen)
+{
+ struct irdma_modify_qp_info info = {};
+
+ info.next_iwarp_state = state;
+ info.remove_hash_idx = del_hash;
+ info.cq_num_valid = true;
+ info.arp_cache_idx_valid = true;
+ info.dont_send_term = true;
+ info.dont_send_fin = true;
+ info.termlen = termlen;
+
+ if (term & IRDMAQP_TERM_SEND_TERM_ONLY)
+ info.dont_send_term = false;
+ if (term & IRDMAQP_TERM_SEND_FIN_ONLY)
+ info.dont_send_fin = false;
+ if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
+ info.reset_tcp_conn = true;
+ iwqp->hw_iwarp_state = state;
+ irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
+ iwqp->iwarp_state = info.next_iwarp_state;
+}
+
+/**
+ * irdma_del_local_mac_entry - remove a mac entry from the hw
+ * table
+ * @rf: RDMA PCI function
+ * @idx: the index of the mac ip address to delete
+ */
+void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
+{
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_DELETE_LOCAL_MAC_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.del_local_mac_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.del_local_mac_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.del_local_mac_entry.entry_idx = idx;
+ cqp_info->in.u.del_local_mac_entry.ignore_ref_count = 0;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+}
+
+/**
+ * irdma_add_local_mac_entry - add a mac ip address entry to the
+ * hw table
+ * @rf: RDMA PCI function
+ * @mac_addr: pointer to mac address
+ * @idx: the index of the mac ip address to add
+ */
+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
+{
+ struct irdma_local_mac_entry_info *info;
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->post_sq = 1;
+ info = &cqp_info->in.u.add_local_mac_entry.info;
+ ether_addr_copy(info->mac_addr, mac_addr);
+ info->entry_idx = idx;
+ cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->cqp_cmd = IRDMA_OP_ADD_LOCAL_MAC_ENTRY;
+ cqp_info->in.u.add_local_mac_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.add_local_mac_entry.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_alloc_local_mac_entry - allocate a mac entry
+ * @rf: RDMA PCI function
+ * @mac_tbl_idx: the index of the new mac address
+ *
+ * Allocate a mac address entry and update the mac_tbl_idx
+ * to hold the index of the newly created mac address
+ * Return 0 if successful, otherwise return error
+ */
+int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx)
+{
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status = 0;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.alloc_local_mac_entry.cqp = &iwcqp->sc_cqp;
+ cqp_info->in.u.alloc_local_mac_entry.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (!status)
+ *mac_tbl_idx = (u16)cqp_request->compl_info.op_ret_val;
+
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_manage_apbvt_cmd - send cqp command manage apbvt
+ * @iwdev: irdma device
+ * @accel_local_port: port for apbvt
+ * @add_port: add ordelete port
+ */
+static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
+ u16 accel_local_port, bool add_port)
+{
+ struct irdma_apbvt_info *info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.manage_apbvt_entry.info;
+ memset(info, 0, sizeof(*info));
+ info->add = add_port;
+ info->port = accel_local_port;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
+ ibdev_dbg(&iwdev->ibdev, "DEV: %s: port=0x%04x\n",
+ (!add_port) ? "DELETE" : "ADD", accel_local_port);
+
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_add_apbvt - add tcp port to HW apbvt table
+ * @iwdev: irdma device
+ * @port: port for apbvt
+ */
+struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ struct irdma_apbvt_entry *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+ entry = irdma_lookup_apbvt_entry(cm_core, port);
+ if (entry) {
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return entry;
+ }
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry) {
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return NULL;
+ }
+
+ entry->port = port;
+ entry->use_cnt = 1;
+ hash_add(cm_core->apbvt_hash_tbl, &entry->hlist, entry->port);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+
+ if (irdma_cqp_manage_apbvt_cmd(iwdev, port, true)) {
+ kfree(entry);
+ return NULL;
+ }
+
+ return entry;
+}
+
+/**
+ * irdma_del_apbvt - delete tcp port from HW apbvt table
+ * @iwdev: irdma device
+ * @entry: apbvt entry object
+ */
+void irdma_del_apbvt(struct irdma_device *iwdev,
+ struct irdma_apbvt_entry *entry)
+{
+ struct irdma_cm_core *cm_core = &iwdev->cm_core;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cm_core->apbvt_lock, flags);
+ if (--entry->use_cnt) {
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+ return;
+ }
+
+ hash_del(&entry->hlist);
+ /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to
+ * protect against race where add APBVT CQP can race ahead of the delete
+ * APBVT for same port.
+ */
+ irdma_cqp_manage_apbvt_cmd(iwdev, entry->port, false);
+ kfree(entry);
+ spin_unlock_irqrestore(&cm_core->apbvt_lock, flags);
+}
+
+/**
+ * irdma_manage_arp_cache - manage hw arp cache
+ * @rf: RDMA PCI function
+ * @mac_addr: mac address ptr
+ * @ip_addr: ip addr for arp cache
+ * @ipv4: flag inicating IPv4
+ * @action: add, delete or modify
+ */
+void irdma_manage_arp_cache(struct irdma_pci_f *rf,
+ const unsigned char *mac_addr,
+ u32 *ip_addr, bool ipv4, u32 action)
+{
+ struct irdma_add_arp_cache_entry_info *info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int arp_index;
+
+ arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action);
+ if (arp_index == -1)
+ return;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ if (action == IRDMA_ARP_ADD) {
+ cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
+ info = &cqp_info->in.u.add_arp_cache_entry.info;
+ memset(info, 0, sizeof(*info));
+ info->arp_index = (u16)arp_index;
+ info->permanent = true;
+ ether_addr_copy(info->mac_addr, mac_addr);
+ cqp_info->in.u.add_arp_cache_entry.scratch =
+ (uintptr_t)cqp_request;
+ cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
+ } else {
+ cqp_info->cqp_cmd = IRDMA_OP_DELETE_ARP_CACHE_ENTRY;
+ cqp_info->in.u.del_arp_cache_entry.scratch =
+ (uintptr_t)cqp_request;
+ cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp;
+ cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
+ }
+
+ cqp_info->post_sq = 1;
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_send_syn_cqp_callback - do syn/ack after qhash
+ * @cqp_request: qhash cqp completion
+ */
+static void irdma_send_syn_cqp_callback(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_cm_node *cm_node = cqp_request->param;
+
+ irdma_send_syn(cm_node, 1);
+ irdma_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * irdma_manage_qhash - add or modify qhash
+ * @iwdev: irdma device
+ * @cminfo: cm info for qhash
+ * @etype: type (syn or quad)
+ * @mtype: type of qhash
+ * @cmnode: cmnode associated with connection
+ * @wait: wait for completion
+ */
+int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
+ enum irdma_quad_entry_type etype,
+ enum irdma_quad_hash_manage_type mtype, void *cmnode,
+ bool wait)
+{
+ struct irdma_qhash_table_info *info;
+ struct irdma_cqp *iwcqp = &iwdev->rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_cm_node *cm_node = cmnode;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.manage_qhash_table_entry.info;
+ memset(info, 0, sizeof(*info));
+ info->vsi = &iwdev->vsi;
+ info->manage = mtype;
+ info->entry_type = etype;
+ if (cminfo->vlan_id < VLAN_N_VID) {
+ info->vlan_valid = true;
+ info->vlan_id = cminfo->vlan_id;
+ } else {
+ info->vlan_valid = false;
+ }
+ info->ipv4_valid = cminfo->ipv4;
+ info->user_pri = cminfo->user_pri;
+ ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
+ info->qp_num = cminfo->qh_qpid;
+ info->dest_port = cminfo->loc_port;
+ info->dest_ip[0] = cminfo->loc_addr[0];
+ info->dest_ip[1] = cminfo->loc_addr[1];
+ info->dest_ip[2] = cminfo->loc_addr[2];
+ info->dest_ip[3] = cminfo->loc_addr[3];
+ if (etype == IRDMA_QHASH_TYPE_TCP_ESTABLISHED ||
+ etype == IRDMA_QHASH_TYPE_UDP_UNICAST ||
+ etype == IRDMA_QHASH_TYPE_UDP_MCAST ||
+ etype == IRDMA_QHASH_TYPE_ROCE_MCAST ||
+ etype == IRDMA_QHASH_TYPE_ROCEV2_HW) {
+ info->src_port = cminfo->rem_port;
+ info->src_ip[0] = cminfo->rem_addr[0];
+ info->src_ip[1] = cminfo->rem_addr[1];
+ info->src_ip[2] = cminfo->rem_addr[2];
+ info->src_ip[3] = cminfo->rem_addr[3];
+ }
+ if (cmnode) {
+ cqp_request->callback_fcn = irdma_send_syn_cqp_callback;
+ cqp_request->param = cmnode;
+ if (!wait)
+ refcount_inc(&cm_node->refcnt);
+ }
+ if (info->ipv4_valid)
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI4 rem_addr=%pI4 mac=%pM, vlan_id=%d cm_node=%p\n",
+ (!mtype) ? "DELETE" : "ADD",
+ __builtin_return_address(0), info->dest_port,
+ info->src_port, info->dest_ip, info->src_ip,
+ info->mac_addr, cminfo->vlan_id,
+ cmnode ? cmnode : NULL);
+ else
+ ibdev_dbg(&iwdev->ibdev,
+ "CM: %s caller: %pS loc_port=0x%04x rem_port=0x%04x loc_addr=%pI6 rem_addr=%pI6 mac=%pM, vlan_id=%d cm_node=%p\n",
+ (!mtype) ? "DELETE" : "ADD",
+ __builtin_return_address(0), info->dest_port,
+ info->src_port, info->dest_ip, info->src_ip,
+ info->mac_addr, cminfo->vlan_id,
+ cmnode ? cmnode : NULL);
+
+ cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY;
+ cqp_info->post_sq = 1;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ if (status && cm_node && !wait)
+ irdma_rem_ref_cm_node(cm_node);
+
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_hw_flush_wqes_callback - Check return code after flush
+ * @cqp_request: qhash cqp completion
+ */
+static void irdma_hw_flush_wqes_callback(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_qp_flush_info *hw_info;
+ struct irdma_sc_qp *qp;
+ struct irdma_qp *iwqp;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_info = &cqp_request->info;
+ hw_info = &cqp_info->in.u.qp_flush_wqes.info;
+ qp = cqp_info->in.u.qp_flush_wqes.qp;
+ iwqp = qp->qp_uk.back_qp;
+
+ if (cqp_request->compl_info.maj_err_code)
+ return;
+
+ if (hw_info->rq &&
+ (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0)) {
+ /* RQ WQE flush was requested but did not happen */
+ qp->qp_uk.rq_flush_complete = true;
+ }
+ if (hw_info->sq &&
+ (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0)) {
+ if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
+ ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
+ qp->qp_uk.qp_id);
+ irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
+ }
+ qp->qp_uk.sq_flush_complete = true;
+ }
+}
+
+/**
+ * irdma_hw_flush_wqes - flush qp's wqe
+ * @rf: RDMA PCI function
+ * @qp: hardware control qp
+ * @info: info for flush
+ * @wait: flag wait for completion
+ */
+int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, bool wait)
+{
+ int status;
+ struct irdma_qp_flush_info *hw_info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_qp *iwqp = qp->qp_uk.back_qp;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ if (!wait)
+ cqp_request->callback_fcn = irdma_hw_flush_wqes_callback;
+ hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
+ memcpy(hw_info, info, sizeof(*hw_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_flush_wqes.qp = qp;
+ cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (status) {
+ qp->qp_uk.sq_flush_complete = true;
+ qp->qp_uk.rq_flush_complete = true;
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ return status;
+ }
+
+ if (!wait || cqp_request->compl_info.maj_err_code)
+ goto put_cqp;
+
+ if (info->rq) {
+ if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0) {
+ /* RQ WQE flush was requested but did not happen */
+ qp->qp_uk.rq_flush_complete = true;
+ }
+ }
+ if (info->sq) {
+ if (cqp_request->compl_info.min_err_code == IRDMA_CQP_COMPL_RQ_WQE_FLUSHED ||
+ cqp_request->compl_info.min_err_code == 0) {
+ /*
+ * Handling case where WQE is posted to empty SQ when
+ * flush has not completed
+ */
+ if (IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring)) {
+ struct irdma_cqp_request *new_req;
+
+ if (!qp->qp_uk.sq_flush_complete)
+ goto put_cqp;
+ qp->qp_uk.sq_flush_complete = false;
+ qp->flush_sq = false;
+
+ info->rq = false;
+ info->sq = true;
+ new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!new_req) {
+ status = -ENOMEM;
+ goto put_cqp;
+ }
+ cqp_info = &new_req->info;
+ hw_info = &new_req->info.in.u.qp_flush_wqes.info;
+ memcpy(hw_info, info, sizeof(*hw_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_FLUSH_WQES;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_flush_wqes.qp = qp;
+ cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)new_req;
+
+ status = irdma_handle_cqp_op(rf, new_req);
+ if (new_req->compl_info.maj_err_code ||
+ new_req->compl_info.min_err_code != IRDMA_CQP_COMPL_SQ_WQE_FLUSHED ||
+ status) {
+ ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
+ iwqp->ibqp.qp_num);
+ qp->qp_uk.sq_flush_complete = false;
+ irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
+ }
+ irdma_put_cqp_request(&rf->cqp, new_req);
+ } else {
+ /* SQ WQE flush was requested but did not happen */
+ qp->qp_uk.sq_flush_complete = true;
+ }
+ } else {
+ if (!IRDMA_RING_MORE_WORK(qp->qp_uk.sq_ring))
+ qp->qp_uk.sq_flush_complete = true;
+ }
+ }
+
+ ibdev_dbg(&rf->iwdev->ibdev,
+ "VERBS: qp_id=%d qp_type=%d qpstate=%d ibqpstate=%d last_aeq=%d hw_iw_state=%d maj_err_code=%d min_err_code=%d\n",
+ iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
+ iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
+ cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code);
+put_cqp:
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_gen_ae - generate AE
+ * @rf: RDMA PCI function
+ * @qp: qp associated with AE
+ * @info: info for ae
+ * @wait: wait for completion
+ */
+void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, bool wait)
+{
+ struct irdma_gen_ae_info *ae_info;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ ae_info = &cqp_request->info.in.u.gen_ae.info;
+ memcpy(ae_info, info, sizeof(*ae_info));
+ cqp_info->cqp_cmd = IRDMA_OP_GEN_AE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.gen_ae.qp = qp;
+ cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
+{
+ struct irdma_qp_flush_info info = {};
+ struct irdma_pci_f *rf = iwqp->iwdev->rf;
+ u8 flush_code = iwqp->sc_qp.flush_code;
+
+ if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
+ return;
+
+ /* Set flush info fields*/
+ info.sq = flush_mask & IRDMA_FLUSH_SQ;
+ info.rq = flush_mask & IRDMA_FLUSH_RQ;
+
+ /* Generate userflush errors in CQE */
+ info.sq_major_code = IRDMA_FLUSH_MAJOR_ERR;
+ info.sq_minor_code = FLUSH_GENERAL_ERR;
+ info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
+ info.rq_minor_code = FLUSH_GENERAL_ERR;
+ info.userflushcode = true;
+
+ if (flush_mask & IRDMA_REFLUSH) {
+ if (info.sq)
+ iwqp->sc_qp.flush_sq = false;
+ if (info.rq)
+ iwqp->sc_qp.flush_rq = false;
+ } else {
+ if (flush_code) {
+ if (info.sq && iwqp->sc_qp.sq_flush_code)
+ info.sq_minor_code = flush_code;
+ if (info.rq && iwqp->sc_qp.rq_flush_code)
+ info.rq_minor_code = flush_code;
+ }
+ if (!iwqp->user_mode)
+ queue_delayed_work(iwqp->iwdev->cleanup_wq,
+ &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
+ }
+
+ /* Issue flush */
+ (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
+ flush_mask & IRDMA_FLUSH_WAIT);
+ iwqp->flush_issued = true;
+}
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c
new file mode 100644
index 0000000000..638d127fb3
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "type.h"
+#include "i40iw_hw.h"
+#include "protos.h"
+
+static u32 i40iw_regs[IRDMA_MAX_REGS] = {
+ I40E_PFPE_CQPTAIL,
+ I40E_PFPE_CQPDB,
+ I40E_PFPE_CCQPSTATUS,
+ I40E_PFPE_CCQPHIGH,
+ I40E_PFPE_CCQPLOW,
+ I40E_PFPE_CQARM,
+ I40E_PFPE_CQACK,
+ I40E_PFPE_AEQALLOC,
+ I40E_PFPE_CQPERRCODES,
+ I40E_PFPE_WQEALLOC,
+ I40E_PFINT_DYN_CTLN(0),
+ I40IW_DB_ADDR_OFFSET,
+
+ I40E_GLPCI_LBARCTRL,
+ I40E_GLPE_CPUSTATUS0,
+ I40E_GLPE_CPUSTATUS1,
+ I40E_GLPE_CPUSTATUS2,
+ I40E_PFINT_AEQCTL,
+ I40E_PFINT_CEQCTL(0),
+ I40E_VSIQF_CTL(0),
+ I40E_PFHMC_PDINV,
+ I40E_GLHMC_VFPDINV(0),
+ I40E_GLPE_CRITERR,
+ 0xffffffff /* PFINT_RATEN not used in FPK */
+};
+
+static u32 i40iw_stat_offsets[] = {
+ I40E_GLPES_PFIP4RXDISCARD(0),
+ I40E_GLPES_PFIP4RXTRUNC(0),
+ I40E_GLPES_PFIP4TXNOROUTE(0),
+ I40E_GLPES_PFIP6RXDISCARD(0),
+ I40E_GLPES_PFIP6RXTRUNC(0),
+ I40E_GLPES_PFIP6TXNOROUTE(0),
+ I40E_GLPES_PFTCPRTXSEG(0),
+ I40E_GLPES_PFTCPRXOPTERR(0),
+ I40E_GLPES_PFTCPRXPROTOERR(0),
+ I40E_GLPES_PFRXVLANERR(0),
+
+ I40E_GLPES_PFIP4RXOCTSLO(0),
+ I40E_GLPES_PFIP4RXPKTSLO(0),
+ I40E_GLPES_PFIP4RXFRAGSLO(0),
+ I40E_GLPES_PFIP4RXMCPKTSLO(0),
+ I40E_GLPES_PFIP4TXOCTSLO(0),
+ I40E_GLPES_PFIP4TXPKTSLO(0),
+ I40E_GLPES_PFIP4TXFRAGSLO(0),
+ I40E_GLPES_PFIP4TXMCPKTSLO(0),
+ I40E_GLPES_PFIP6RXOCTSLO(0),
+ I40E_GLPES_PFIP6RXPKTSLO(0),
+ I40E_GLPES_PFIP6RXFRAGSLO(0),
+ I40E_GLPES_PFIP6RXMCPKTSLO(0),
+ I40E_GLPES_PFIP6TXOCTSLO(0),
+ I40E_GLPES_PFIP6TXPKTSLO(0),
+ I40E_GLPES_PFIP6TXFRAGSLO(0),
+ I40E_GLPES_PFIP6TXMCPKTSLO(0),
+ I40E_GLPES_PFTCPRXSEGSLO(0),
+ I40E_GLPES_PFTCPTXSEGLO(0),
+ I40E_GLPES_PFRDMARXRDSLO(0),
+ I40E_GLPES_PFRDMARXSNDSLO(0),
+ I40E_GLPES_PFRDMARXWRSLO(0),
+ I40E_GLPES_PFRDMATXRDSLO(0),
+ I40E_GLPES_PFRDMATXSNDSLO(0),
+ I40E_GLPES_PFRDMATXWRSLO(0),
+ I40E_GLPES_PFRDMAVBNDLO(0),
+ I40E_GLPES_PFRDMAVINVLO(0),
+ I40E_GLPES_PFIP4RXMCOCTSLO(0),
+ I40E_GLPES_PFIP4TXMCOCTSLO(0),
+ I40E_GLPES_PFIP6RXMCOCTSLO(0),
+ I40E_GLPES_PFIP6TXMCOCTSLO(0),
+ I40E_GLPES_PFUDPRXPKTSLO(0),
+ I40E_GLPES_PFUDPTXPKTSLO(0)
+};
+
+static u64 i40iw_masks[IRDMA_MAX_MASKS] = {
+ I40E_PFPE_CCQPSTATUS_CCQP_DONE,
+ I40E_PFPE_CCQPSTATUS_CCQP_ERR,
+ I40E_CQPSQ_STAG_PDID,
+ I40E_CQPSQ_CQ_CEQID,
+ I40E_CQPSQ_CQ_CQID,
+ I40E_COMMIT_FPM_CQCNT,
+};
+
+static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
+ I40E_PFPE_CCQPSTATUS_CCQP_DONE_S,
+ I40E_PFPE_CCQPSTATUS_CCQP_ERR_S,
+ I40E_CQPSQ_STAG_PDID_S,
+ I40E_CQPSQ_CQ_CEQID_S,
+ I40E_CQPSQ_CQ_CQID_S,
+ I40E_COMMIT_FPM_CQCNT_S,
+};
+
+/**
+ * i40iw_config_ceq- Configure CEQ interrupt
+ * @dev: pointer to the device structure
+ * @ceq_id: Completion Event Queue ID
+ * @idx: vector index
+ * @enable: Enable CEQ interrupt when true
+ */
+static void i40iw_config_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_INDX, ceq_id) |
+ FIELD_PREP(I40E_PFINT_LNKLSTN_FIRSTQ_TYPE, QUEUE_TYPE_CEQ);
+ wr32(dev->hw, I40E_PFINT_LNKLSTN(idx - 1), reg_val);
+
+ reg_val = FIELD_PREP(I40E_PFINT_DYN_CTLN_ITR_INDX, 0x3) |
+ FIELD_PREP(I40E_PFINT_DYN_CTLN_INTENA, 0x1);
+ wr32(dev->hw, I40E_PFINT_DYN_CTLN(idx - 1), reg_val);
+
+ reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
+ FIELD_PREP(I40E_PFINT_CEQCTL_NEXTQ_INDX, NULL_QUEUE_INDEX) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 0x3);
+
+ wr32(dev->hw, i40iw_regs[IRDMA_GLINT_CEQCTL] + 4 * ceq_id, reg_val);
+}
+
+/**
+ * i40iw_ena_irq - Enable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void i40iw_ena_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 val;
+
+ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 0x1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 0x1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0x3);
+ wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), val);
+}
+
+/**
+ * i40iw_disable_irq - Disable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void i40iw_disable_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ wr32(dev->hw, i40iw_regs[IRDMA_GLINT_DYN_CTL] + 4 * (idx - 1), 0);
+}
+
+static const struct irdma_irq_ops i40iw_irq_ops = {
+ .irdma_cfg_aeq = irdma_cfg_aeq,
+ .irdma_cfg_ceq = i40iw_config_ceq,
+ .irdma_dis_irq = i40iw_disable_irq,
+ .irdma_en_irq = i40iw_ena_irq,
+};
+
+static const struct irdma_hw_stat_map i40iw_hw_stat_map[] = {
+ [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 32, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 40, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 48, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 56, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 64, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 72, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 80, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 88, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 96, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 104, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 112, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 120, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 128, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 136, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 144, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 152, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 160, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 168, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 176, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 184, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 192, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 200, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 208, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 224, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 232, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 240, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 248, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 256, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 264, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 272, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 280, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 288, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 296, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 304, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 312, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 320, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 328, 0, IRDMA_MAX_STATS_48 },
+};
+
+void i40iw_init_hw(struct irdma_sc_dev *dev)
+{
+ int i;
+ u8 __iomem *hw_addr;
+
+ for (i = 0; i < IRDMA_MAX_REGS; ++i) {
+ hw_addr = dev->hw->hw_addr;
+
+ if (i == IRDMA_DB_ADDR_OFFSET)
+ hw_addr = NULL;
+
+ dev->hw_regs[i] = (u32 __iomem *)(i40iw_regs[i] + hw_addr);
+ }
+
+ for (i = 0; i < IRDMA_HW_STAT_INDEX_MAX_GEN_1; ++i)
+ dev->hw_stats_regs[i] = i40iw_stat_offsets[i];
+
+ dev->hw_attrs.first_hw_vf_fpm_id = I40IW_FIRST_VF_FPM_ID;
+ dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
+
+ for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
+ dev->hw_shifts[i] = i40iw_shifts[i];
+
+ for (i = 0; i < IRDMA_MAX_MASKS; ++i)
+ dev->hw_masks[i] = i40iw_masks[i];
+
+ dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
+ dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
+ dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
+ dev->ceq_itr_mask_db = NULL;
+ dev->aeq_itr_mask_db = NULL;
+ dev->irq_ops = &i40iw_irq_ops;
+ dev->hw_stats_map = i40iw_hw_stat_map;
+
+ /* Setup the hardware limits, hmc may limit further */
+ dev->hw_attrs.uk_attrs.max_hw_wq_frags = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD;
+ dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M;
+ dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE;
+ dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE;
+ dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES;
+ dev->hw_attrs.uk_attrs.max_hw_rq_quanta = I40IW_QP_SW_MAX_RQ_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_wq_quanta = I40IW_QP_SW_MAX_WQ_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_sq_chunk = I40IW_MAX_QUANTA_PER_WR;
+ dev->hw_attrs.max_hw_pds = I40IW_MAX_PDS;
+ dev->hw_attrs.max_stat_inst = I40IW_MAX_STATS_COUNT;
+ dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_1;
+ dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;
+ dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = I40IW_MIN_WQ_SIZE;
+ dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;
+}
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.h b/drivers/infiniband/hw/irdma/i40iw_hw.h
new file mode 100644
index 0000000000..10afc165f5
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef I40IW_HW_H
+#define I40IW_HW_H
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
+
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CRITERR 0x000B4000 /* Reset: PE_CORER */
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
+
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+
+#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024)
+
+#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
+
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
+#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX GENMASK(10, 0)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE GENMASK(12, 11)
+
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
+#define I40E_PFINT_CEQCTL_MAX_INDEX 511
+
+/* shifts/masks for FLD_[LS/RS]_64 macros used in device table */
+#define I40E_PFINT_CEQCTL_MSIX_INDX_S 0
+#define I40E_PFINT_CEQCTL_MSIX_INDX GENMASK(7, 0)
+#define I40E_PFINT_CEQCTL_ITR_INDX_S 11
+#define I40E_PFINT_CEQCTL_ITR_INDX GENMASK(12, 11)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_S 13
+#define I40E_PFINT_CEQCTL_MSIX0_INDX GENMASK(15, 13)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_S 16
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX GENMASK(26, 16)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_S 27
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE GENMASK(28, 27)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_S 30
+#define I40E_PFINT_CEQCTL_CAUSE_ENA BIT(30)
+#define I40E_PFINT_CEQCTL_INTEVENT_S 31
+#define I40E_PFINT_CEQCTL_INTEVENT BIT(31)
+#define I40E_CQPSQ_STAG_PDID_S 48
+#define I40E_CQPSQ_STAG_PDID GENMASK_ULL(62, 48)
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_S 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE BIT_ULL(0)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_S 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR BIT_ULL(31)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_S 3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX GENMASK(4, 3)
+#define I40E_PFINT_DYN_CTLN_INTENA_S 0
+#define I40E_PFINT_DYN_CTLN_INTENA BIT(0)
+#define I40E_CQPSQ_CQ_CEQID_S 24
+#define I40E_CQPSQ_CQ_CEQID GENMASK(30, 24)
+#define I40E_CQPSQ_CQ_CQID_S 0
+#define I40E_CQPSQ_CQ_CQID GENMASK_ULL(15, 0)
+#define I40E_COMMIT_FPM_CQCNT_S 0
+#define I40E_COMMIT_FPM_CQCNT GENMASK_ULL(17, 0)
+
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4))
+
+enum i40iw_device_caps_const {
+ I40IW_MAX_WQ_FRAGMENT_COUNT = 3,
+ I40IW_MAX_SGE_RD = 1,
+ I40IW_MAX_PUSH_PAGE_COUNT = 0,
+ I40IW_MAX_INLINE_DATA_SIZE = 48,
+ I40IW_MAX_IRD_SIZE = 63,
+ I40IW_MAX_ORD_SIZE = 127,
+ I40IW_MAX_WQ_ENTRIES = 2048,
+ I40IW_MAX_WQE_SIZE_RQ = 128,
+ I40IW_MAX_PDS = 32768,
+ I40IW_MAX_STATS_COUNT = 16,
+ I40IW_MAX_CQ_SIZE = 1048575,
+ I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647,
+ I40IW_MAX_INBOUND_MSG_SIZE = 2147483647,
+ I40IW_MIN_WQ_SIZE = 4 /* WQEs */,
+};
+
+#define I40IW_QP_WQE_MIN_SIZE 32
+#define I40IW_QP_WQE_MAX_SIZE 128
+#define I40IW_MAX_RQ_WQE_SHIFT 2
+#define I40IW_MAX_QUANTA_PER_WR 2
+
+#define I40IW_QP_SW_MAX_SQ_QUANTA 2048
+#define I40IW_QP_SW_MAX_RQ_QUANTA 16384
+#define I40IW_QP_SW_MAX_WQ_QUANTA 2048
+#define I40IW_MAX_QP_WRS ((I40IW_QP_SW_MAX_SQ_QUANTA - IRDMA_SQ_RSVD) / I40IW_MAX_QUANTA_PER_WR)
+#define I40IW_FIRST_VF_FPM_ID 16
+#define QUEUE_TYPE_CEQ 2
+#define NULL_QUEUE_INDEX 0x7FF
+
+void i40iw_init_hw(struct irdma_sc_dev *dev);
+#endif /* I40IW_HW_H */
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
new file mode 100644
index 0000000000..4053ead324
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+#include "i40iw_hw.h"
+#include <linux/net/intel/i40e_client.h>
+
+static struct i40e_client i40iw_client;
+
+/**
+ * i40iw_l2param_change - handle mss change
+ * @cdev_info: parent lan device information structure with data/ops
+ * @client: client for parameter change
+ * @params: new parameters from L2
+ */
+static void i40iw_l2param_change(struct i40e_info *cdev_info,
+ struct i40e_client *client,
+ struct i40e_params *params)
+{
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+
+ ibdev = ib_device_get_by_netdev(cdev_info->netdev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return;
+
+ iwdev = to_iwdev(ibdev);
+
+ if (iwdev->vsi.mtu != params->mtu) {
+ l2params.mtu_changed = true;
+ l2params.mtu = params->mtu;
+ }
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ ib_device_put(ibdev);
+}
+
+/**
+ * i40iw_close - client interface operation close for iwarp/uda device
+ * @cdev_info: parent lan device information structure with data/ops
+ * @client: client to close
+ * @reset: flag to indicate close on reset
+ *
+ * Called by the lan driver during the processing of client unregister
+ * Destroy and clean up the driver resources
+ */
+static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client,
+ bool reset)
+{
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+
+ ibdev = ib_device_get_by_netdev(cdev_info->netdev, RDMA_DRIVER_IRDMA);
+ if (WARN_ON(!ibdev))
+ return;
+
+ iwdev = to_iwdev(ibdev);
+ if (reset)
+ iwdev->rf->reset = true;
+
+ iwdev->iw_status = 0;
+ irdma_port_ibevent(iwdev);
+ ib_unregister_device_and_put(ibdev);
+ pr_debug("INIT: Gen1 PF[%d] close complete\n", PCI_FUNC(cdev_info->pcidev->devfn));
+}
+
+static void i40iw_request_reset(struct irdma_pci_f *rf)
+{
+ struct i40e_info *cdev_info = rf->cdev;
+
+ cdev_info->ops->request_reset(cdev_info, &i40iw_client, 1);
+}
+
+static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info *cdev_info)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ rf->rdma_ver = IRDMA_GEN_1;
+ rf->gen_ops.request_reset = i40iw_request_reset;
+ rf->pcidev = cdev_info->pcidev;
+ rf->pf_id = cdev_info->fid;
+ rf->hw.hw_addr = cdev_info->hw_addr;
+ rf->cdev = cdev_info;
+ rf->msix_count = cdev_info->msix_count;
+ rf->msix_entries = cdev_info->msix_entries;
+ rf->limits_sel = 5;
+ rf->protocol_used = IRDMA_IWARP_PROTOCOL_ONLY;
+ rf->iwdev = iwdev;
+
+ iwdev->init_state = INITIAL_STATE;
+ iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
+ iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+ iwdev->netdev = cdev_info->netdev;
+ iwdev->vsi_num = 0;
+}
+
+/**
+ * i40iw_open - client interface operation open for iwarp/uda device
+ * @cdev_info: parent lan device information structure with data/ops
+ * @client: iwarp client information, provided during registration
+ *
+ * Called by the lan driver during the processing of client register
+ * Create device resources, set up queues, pble and hmc objects and
+ * register the device with the ib verbs interface
+ * Return 0 if successful, otherwise return error
+ */
+static int i40iw_open(struct i40e_info *cdev_info, struct i40e_client *client)
+{
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ int err = -EIO;
+ int i;
+ u16 qset;
+ u16 last_qset = IRDMA_NO_QSET;
+
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ if (!iwdev)
+ return -ENOMEM;
+
+ iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!iwdev->rf) {
+ ib_dealloc_device(&iwdev->ibdev);
+ return -ENOMEM;
+ }
+
+ i40iw_fill_device_info(iwdev, cdev_info);
+ rf = iwdev->rf;
+
+ if (irdma_ctrl_init_hw(rf)) {
+ err = -EIO;
+ goto err_ctrl_init;
+ }
+
+ l2params.mtu = (cdev_info->params.mtu) ? cdev_info->params.mtu : IRDMA_DEFAULT_MTU;
+ for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
+ qset = cdev_info->params.qos.prio_qos[i].qs_handle;
+ l2params.up2tc[i] = cdev_info->params.qos.prio_qos[i].tc;
+ l2params.qs_handle_list[i] = qset;
+ if (last_qset == IRDMA_NO_QSET)
+ last_qset = qset;
+ else if ((qset != last_qset) && (qset != IRDMA_NO_QSET))
+ iwdev->dcb_vlan_mode = true;
+ }
+
+ if (irdma_rt_init_hw(iwdev, &l2params)) {
+ err = -EIO;
+ goto err_rt_init;
+ }
+
+ err = irdma_ib_register_device(iwdev);
+ if (err)
+ goto err_ibreg;
+
+ ibdev_dbg(&iwdev->ibdev, "INIT: Gen1 PF[%d] open success\n",
+ PCI_FUNC(rf->pcidev->devfn));
+
+ return 0;
+
+err_ibreg:
+ irdma_rt_deinit_hw(iwdev);
+err_rt_init:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ kfree(iwdev->rf);
+ ib_dealloc_device(&iwdev->ibdev);
+
+ return err;
+}
+
+/* client interface functions */
+static const struct i40e_client_ops i40e_ops = {
+ .open = i40iw_open,
+ .close = i40iw_close,
+ .l2_param_change = i40iw_l2param_change
+};
+
+static struct i40e_client i40iw_client = {
+ .ops = &i40e_ops,
+ .type = I40E_CLIENT_IWARP,
+};
+
+static int i40iw_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
+{
+ struct i40e_auxiliary_device *i40e_adev = container_of(aux_dev,
+ struct i40e_auxiliary_device,
+ aux_dev);
+ struct i40e_info *cdev_info = i40e_adev->ldev;
+
+ strncpy(i40iw_client.name, "irdma", I40E_CLIENT_STR_LENGTH);
+ i40e_client_device_register(cdev_info, &i40iw_client);
+
+ return 0;
+}
+
+static void i40iw_remove(struct auxiliary_device *aux_dev)
+{
+ struct i40e_auxiliary_device *i40e_adev = container_of(aux_dev,
+ struct i40e_auxiliary_device,
+ aux_dev);
+ struct i40e_info *cdev_info = i40e_adev->ldev;
+
+ i40e_client_device_unregister(cdev_info);
+}
+
+static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = {
+ {.name = "i40e.iwarp", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, i40iw_auxiliary_id_table);
+
+struct auxiliary_driver i40iw_auxiliary_drv = {
+ .name = "gen_1",
+ .id_table = i40iw_auxiliary_id_table,
+ .probe = i40iw_probe,
+ .remove = i40iw_remove,
+};
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c
new file mode 100644
index 0000000000..10ccf4bc3f
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "type.h"
+#include "icrdma_hw.h"
+
+static u32 icrdma_regs[IRDMA_MAX_REGS] = {
+ PFPE_CQPTAIL,
+ PFPE_CQPDB,
+ PFPE_CCQPSTATUS,
+ PFPE_CCQPHIGH,
+ PFPE_CCQPLOW,
+ PFPE_CQARM,
+ PFPE_CQACK,
+ PFPE_AEQALLOC,
+ PFPE_CQPERRCODES,
+ PFPE_WQEALLOC,
+ GLINT_DYN_CTL(0),
+ ICRDMA_DB_ADDR_OFFSET,
+
+ GLPCI_LBARCTRL,
+ GLPE_CPUSTATUS0,
+ GLPE_CPUSTATUS1,
+ GLPE_CPUSTATUS2,
+ PFINT_AEQCTL,
+ GLINT_CEQCTL(0),
+ VSIQF_PE_CTL1(0),
+ PFHMC_PDINV,
+ GLHMC_VFPDINV(0),
+ GLPE_CRITERR,
+ GLINT_RATE(0),
+};
+
+static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
+ ICRDMA_CCQPSTATUS_CCQP_DONE,
+ ICRDMA_CCQPSTATUS_CCQP_ERR,
+ ICRDMA_CQPSQ_STAG_PDID,
+ ICRDMA_CQPSQ_CQ_CEQID,
+ ICRDMA_CQPSQ_CQ_CQID,
+ ICRDMA_COMMIT_FPM_CQCNT,
+};
+
+static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
+ ICRDMA_CCQPSTATUS_CCQP_DONE_S,
+ ICRDMA_CCQPSTATUS_CCQP_ERR_S,
+ ICRDMA_CQPSQ_STAG_PDID_S,
+ ICRDMA_CQPSQ_CQ_CEQID_S,
+ ICRDMA_CQPSQ_CQ_CQID_S,
+ ICRDMA_COMMIT_FPM_CQCNT_S,
+};
+
+/**
+ * icrdma_ena_irq - Enable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void icrdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 val;
+ u32 interval = 0;
+
+ if (dev->ceq_itr && dev->aeq->msix_idx != idx)
+ interval = dev->ceq_itr >> 1; /* 2 usec units */
+ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_ITR_INDX, 0) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTERVAL, interval) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
+ writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
+ else
+ writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
+}
+
+/**
+ * icrdma_disable_irq - Disable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void icrdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1)
+ writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + idx);
+ else
+ writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx - 1));
+}
+
+/**
+ * icrdma_cfg_ceq- Configure CEQ interrupt
+ * @dev: pointer to the device structure
+ * @ceq_id: Completion Event Queue ID
+ * @idx: vector index
+ * @enable: True to enable, False disables
+ */
+static void icrdma_cfg_ceq(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = FIELD_PREP(IRDMA_GLINT_CEQCTL_CAUSE_ENA, enable) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_MSIX_INDX, idx) |
+ FIELD_PREP(IRDMA_GLINT_CEQCTL_ITR_INDX, 3);
+
+ writel(reg_val, dev->hw_regs[IRDMA_GLINT_CEQCTL] + ceq_id);
+}
+
+static const struct irdma_irq_ops icrdma_irq_ops = {
+ .irdma_cfg_aeq = irdma_cfg_aeq,
+ .irdma_cfg_ceq = icrdma_cfg_ceq,
+ .irdma_dis_irq = icrdma_disable_irq,
+ .irdma_en_irq = icrdma_ena_irq,
+};
+
+static const struct irdma_hw_stat_map icrdma_hw_stat_map[] = {
+ [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 32, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 32, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 24, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 32, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 40, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 48, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 56, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 64, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 72, 32, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 72, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 80, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 88, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 96, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 104, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 112, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 120, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 128, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 136, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 144, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 152, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 160, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 168, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 176, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 184, 32, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 184, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 192, 32, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 200, 32, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 200, 0, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 208, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 32, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 224, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 232, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 240, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 248, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 256, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 264, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 272, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 280, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 288, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 296, 0, IRDMA_MAX_STATS_48 },
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = { 304, 0, IRDMA_MAX_STATS_56 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = { 312, 32, IRDMA_MAX_STATS_24 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = { 312, 0, IRDMA_MAX_STATS_32 },
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = { 320, 0, IRDMA_MAX_STATS_32 },
+};
+
+void icrdma_init_hw(struct irdma_sc_dev *dev)
+{
+ int i;
+ u8 __iomem *hw_addr;
+
+ for (i = 0; i < IRDMA_MAX_REGS; ++i) {
+ hw_addr = dev->hw->hw_addr;
+
+ if (i == IRDMA_DB_ADDR_OFFSET)
+ hw_addr = NULL;
+
+ dev->hw_regs[i] = (u32 __iomem *)(hw_addr + icrdma_regs[i]);
+ }
+ dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
+ dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
+
+ for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
+ dev->hw_shifts[i] = icrdma_shifts[i];
+
+ for (i = 0; i < IRDMA_MAX_MASKS; ++i)
+ dev->hw_masks[i] = icrdma_masks[i];
+
+ dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
+ dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
+ dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
+ dev->irq_ops = &icrdma_irq_ops;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
+ dev->hw_stats_map = icrdma_hw_stat_map;
+ dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE;
+ dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
+ dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
+ dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
+
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
+ IRDMA_FEATURE_CQ_RESIZE;
+}
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.h b/drivers/infiniband/hw/irdma/icrdma_hw.h
new file mode 100644
index 0000000000..54035a08cc
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#ifndef ICRDMA_HW_H
+#define ICRDMA_HW_H
+
+#include "irdma.h"
+
+#define VFPE_CQPTAIL1 0x0000a000
+#define VFPE_CQPDB1 0x0000bc00
+#define VFPE_CCQPSTATUS1 0x0000b800
+#define VFPE_CCQPHIGH1 0x00009800
+#define VFPE_CCQPLOW1 0x0000ac00
+#define VFPE_CQARM1 0x0000b400
+#define VFPE_CQARM1 0x0000b400
+#define VFPE_CQACK1 0x0000b000
+#define VFPE_AEQALLOC1 0x0000a400
+#define VFPE_CQPERRCODES1 0x00009c00
+#define VFPE_WQEALLOC1 0x0000c000
+#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) /* _i=0...63 */
+
+#define PFPE_CQPTAIL 0x00500880
+#define PFPE_CQPDB 0x00500800
+#define PFPE_CCQPSTATUS 0x0050a000
+#define PFPE_CCQPHIGH 0x0050a100
+#define PFPE_CCQPLOW 0x0050a080
+#define PFPE_CQARM 0x00502c00
+#define PFPE_CQACK 0x00502c80
+#define PFPE_AEQALLOC 0x00502d00
+#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) /* _i=0...2047 */
+#define GLPCI_LBARCTRL 0x0009de74
+#define GLPE_CPUSTATUS0 0x0050ba5c
+#define GLPE_CPUSTATUS1 0x0050ba60
+#define GLPE_CPUSTATUS2 0x0050ba64
+#define PFINT_AEQCTL 0x0016cb00
+#define PFPE_CQPERRCODES 0x0050a200
+#define PFPE_WQEALLOC 0x00504400
+#define GLINT_CEQCTL(_INT) (0x0015c000 + ((_INT) * 4)) /* _i=0...2047 */
+#define VSIQF_PE_CTL1(_VSI) (0x00414000 + ((_VSI) * 4)) /* _i=0...767 */
+#define PFHMC_PDINV 0x00520300
+#define GLHMC_VFPDINV(_i) (0x00528300 + ((_i) * 4)) /* _i=0...31 */
+#define GLPE_CRITERR 0x00534000
+#define GLINT_RATE(_INT) (0x0015A000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+
+#define ICRDMA_DB_ADDR_OFFSET (8 * 1024 * 1024 - 64 * 1024)
+
+#define ICRDMA_VF_DB_ADDR_OFFSET (64 * 1024)
+
+/* shifts/masks for FLD_[LS/RS]_64 macros used in device table */
+#define ICRDMA_CCQPSTATUS_CCQP_DONE_S 0
+#define ICRDMA_CCQPSTATUS_CCQP_DONE BIT_ULL(0)
+#define ICRDMA_CCQPSTATUS_CCQP_ERR_S 31
+#define ICRDMA_CCQPSTATUS_CCQP_ERR BIT_ULL(31)
+#define ICRDMA_CQPSQ_STAG_PDID_S 46
+#define ICRDMA_CQPSQ_STAG_PDID GENMASK_ULL(63, 46)
+#define ICRDMA_CQPSQ_CQ_CEQID_S 22
+#define ICRDMA_CQPSQ_CQ_CEQID GENMASK_ULL(31, 22)
+#define ICRDMA_CQPSQ_CQ_CQID_S 0
+#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(18, 0)
+#define ICRDMA_COMMIT_FPM_CQCNT_S 0
+#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0)
+
+enum icrdma_device_caps_const {
+ ICRDMA_MAX_STATS_COUNT = 128,
+
+ ICRDMA_MAX_IRD_SIZE = 127,
+ ICRDMA_MAX_ORD_SIZE = 255,
+ ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
+
+};
+
+void icrdma_init_hw(struct irdma_sc_dev *dev);
+#endif /* ICRDMA_HW_H*/
diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h
new file mode 100644
index 0000000000..3237fa64bc
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/irdma.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#ifndef IRDMA_H
+#define IRDMA_H
+
+#define IRDMA_WQEALLOC_WQE_DESC_INDEX GENMASK(31, 20)
+
+#define IRDMA_CQPTAIL_WQTAIL GENMASK(10, 0)
+#define IRDMA_CQPTAIL_CQP_OP_ERR BIT(31)
+
+#define IRDMA_CQPERRCODES_CQP_MINOR_CODE GENMASK(15, 0)
+#define IRDMA_CQPERRCODES_CQP_MAJOR_CODE GENMASK(31, 16)
+#define IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE GENMASK(5, 4)
+#define IRDMA_GLINT_RATE_INTERVAL GENMASK(5, 0)
+#define IRDMA_GLINT_RATE_INTRL_ENA BIT(6)
+#define IRDMA_GLINT_DYN_CTL_INTENA BIT(0)
+#define IRDMA_GLINT_DYN_CTL_CLEARPBA BIT(1)
+#define IRDMA_GLINT_DYN_CTL_ITR_INDX GENMASK(4, 3)
+#define IRDMA_GLINT_DYN_CTL_INTERVAL GENMASK(16, 5)
+#define IRDMA_GLINT_CEQCTL_ITR_INDX GENMASK(12, 11)
+#define IRDMA_GLINT_CEQCTL_CAUSE_ENA BIT(30)
+#define IRDMA_GLINT_CEQCTL_MSIX_INDX GENMASK(10, 0)
+#define IRDMA_PFINT_AEQCTL_MSIX_INDX GENMASK(10, 0)
+#define IRDMA_PFINT_AEQCTL_ITR_INDX GENMASK(12, 11)
+#define IRDMA_PFINT_AEQCTL_CAUSE_ENA BIT(30)
+#define IRDMA_PFHMC_PDINV_PMSDIDX GENMASK(11, 0)
+#define IRDMA_PFHMC_PDINV_PMSDPARTSEL BIT(15)
+#define IRDMA_PFHMC_PDINV_PMPDIDX GENMASK(24, 16)
+#define IRDMA_PFHMC_SDDATALOW_PMSDVALID BIT(0)
+#define IRDMA_PFHMC_SDDATALOW_PMSDTYPE BIT(1)
+#define IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT GENMASK(11, 2)
+#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12)
+#define IRDMA_PFHMC_SDCMD_PMSDWR BIT(31)
+
+#define IRDMA_INVALID_CQ_IDX 0xffffffff
+enum irdma_registers {
+ IRDMA_CQPTAIL,
+ IRDMA_CQPDB,
+ IRDMA_CCQPSTATUS,
+ IRDMA_CCQPHIGH,
+ IRDMA_CCQPLOW,
+ IRDMA_CQARM,
+ IRDMA_CQACK,
+ IRDMA_AEQALLOC,
+ IRDMA_CQPERRCODES,
+ IRDMA_WQEALLOC,
+ IRDMA_GLINT_DYN_CTL,
+ IRDMA_DB_ADDR_OFFSET,
+ IRDMA_GLPCI_LBARCTRL,
+ IRDMA_GLPE_CPUSTATUS0,
+ IRDMA_GLPE_CPUSTATUS1,
+ IRDMA_GLPE_CPUSTATUS2,
+ IRDMA_PFINT_AEQCTL,
+ IRDMA_GLINT_CEQCTL,
+ IRDMA_VSIQF_PE_CTL1,
+ IRDMA_PFHMC_PDINV,
+ IRDMA_GLHMC_VFPDINV,
+ IRDMA_GLPE_CRITERR,
+ IRDMA_GLINT_RATE,
+ IRDMA_MAX_REGS, /* Must be last entry */
+};
+
+enum irdma_shifts {
+ IRDMA_CCQPSTATUS_CCQP_DONE_S,
+ IRDMA_CCQPSTATUS_CCQP_ERR_S,
+ IRDMA_CQPSQ_STAG_PDID_S,
+ IRDMA_CQPSQ_CQ_CEQID_S,
+ IRDMA_CQPSQ_CQ_CQID_S,
+ IRDMA_COMMIT_FPM_CQCNT_S,
+ IRDMA_MAX_SHIFTS,
+};
+
+enum irdma_masks {
+ IRDMA_CCQPSTATUS_CCQP_DONE_M,
+ IRDMA_CCQPSTATUS_CCQP_ERR_M,
+ IRDMA_CQPSQ_STAG_PDID_M,
+ IRDMA_CQPSQ_CQ_CEQID_M,
+ IRDMA_CQPSQ_CQ_CQID_M,
+ IRDMA_COMMIT_FPM_CQCNT_M,
+ IRDMA_MAX_MASKS, /* Must be last entry */
+};
+
+#define IRDMA_MAX_MGS_PER_CTX 8
+
+struct irdma_mcast_grp_ctx_entry_info {
+ u32 qp_id;
+ bool valid_entry;
+ u16 dest_port;
+ u32 use_cnt;
+};
+
+struct irdma_mcast_grp_info {
+ u8 dest_mac_addr[ETH_ALEN];
+ u16 vlan_id;
+ u8 hmc_fcn_id;
+ bool ipv4_valid:1;
+ bool vlan_valid:1;
+ u16 mg_id;
+ u32 no_of_mgs;
+ u32 dest_ip_addr[4];
+ u16 qs_handle;
+ struct irdma_dma_mem dma_mem_mc;
+ struct irdma_mcast_grp_ctx_entry_info mg_ctx_info[IRDMA_MAX_MGS_PER_CTX];
+};
+
+enum irdma_vers {
+ IRDMA_GEN_RSVD,
+ IRDMA_GEN_1,
+ IRDMA_GEN_2,
+};
+
+struct irdma_uk_attrs {
+ u64 feature_flags;
+ u32 max_hw_wq_frags;
+ u32 max_hw_read_sges;
+ u32 max_hw_inline;
+ u32 max_hw_rq_quanta;
+ u32 max_hw_wq_quanta;
+ u32 min_hw_cq_size;
+ u32 max_hw_cq_size;
+ u16 max_hw_sq_chunk;
+ u16 min_hw_wq_size;
+ u8 hw_rev;
+};
+
+struct irdma_hw_attrs {
+ struct irdma_uk_attrs uk_attrs;
+ u64 max_hw_outbound_msg_size;
+ u64 max_hw_inbound_msg_size;
+ u64 max_mr_size;
+ u64 page_size_cap;
+ u32 min_hw_qp_id;
+ u32 min_hw_aeq_size;
+ u32 max_hw_aeq_size;
+ u32 min_hw_ceq_size;
+ u32 max_hw_ceq_size;
+ u32 max_hw_device_pages;
+ u32 max_hw_vf_fpm_id;
+ u32 first_hw_vf_fpm_id;
+ u32 max_hw_ird;
+ u32 max_hw_ord;
+ u32 max_hw_wqes;
+ u32 max_hw_pds;
+ u32 max_hw_ena_vf_count;
+ u32 max_qp_wr;
+ u32 max_pe_ready_count;
+ u32 max_done_count;
+ u32 max_sleep_count;
+ u32 max_cqp_compl_wait_time_ms;
+ u16 max_stat_inst;
+ u16 max_stat_idx;
+};
+
+void i40iw_init_hw(struct irdma_sc_dev *dev);
+void icrdma_init_hw(struct irdma_sc_dev *dev);
+#endif /* IRDMA_H*/
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
new file mode 100644
index 0000000000..be1030d1ad
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+#include "../../../net/ethernet/intel/ice/ice.h"
+
+MODULE_ALIAS("i40iw");
+MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct notifier_block irdma_inetaddr_notifier = {
+ .notifier_call = irdma_inetaddr_event
+};
+
+static struct notifier_block irdma_inetaddr6_notifier = {
+ .notifier_call = irdma_inet6addr_event
+};
+
+static struct notifier_block irdma_net_notifier = {
+ .notifier_call = irdma_net_event
+};
+
+static struct notifier_block irdma_netdevice_notifier = {
+ .notifier_call = irdma_netdevice_event
+};
+
+static void irdma_register_notifiers(void)
+{
+ register_inetaddr_notifier(&irdma_inetaddr_notifier);
+ register_inet6addr_notifier(&irdma_inetaddr6_notifier);
+ register_netevent_notifier(&irdma_net_notifier);
+ register_netdevice_notifier(&irdma_netdevice_notifier);
+}
+
+static void irdma_unregister_notifiers(void)
+{
+ unregister_netevent_notifier(&irdma_net_notifier);
+ unregister_inetaddr_notifier(&irdma_inetaddr_notifier);
+ unregister_inet6addr_notifier(&irdma_inetaddr6_notifier);
+ unregister_netdevice_notifier(&irdma_netdevice_notifier);
+}
+
+static void irdma_prep_tc_change(struct irdma_device *iwdev)
+{
+ iwdev->vsi.tc_change_pending = true;
+ irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
+
+ /* Wait for all qp's to suspend */
+ wait_event_timeout(iwdev->suspend_wq,
+ !atomic_read(&iwdev->vsi.qp_suspend_reqs),
+ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
+ irdma_ws_reset(&iwdev->vsi);
+}
+
+static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
+{
+ if (mtu < IRDMA_MIN_MTU_IPV4)
+ ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
+ else if (mtu < IRDMA_MIN_MTU_IPV6)
+ ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
+}
+
+static void irdma_fill_qos_info(struct irdma_l2params *l2params,
+ struct iidc_qos_params *qos_info)
+{
+ int i;
+
+ l2params->num_tc = qos_info->num_tc;
+ l2params->vsi_prio_type = qos_info->vport_priority_type;
+ l2params->vsi_rel_bw = qos_info->vport_relative_bw;
+ for (i = 0; i < l2params->num_tc; i++) {
+ l2params->tc_info[i].egress_virt_up =
+ qos_info->tc_info[i].egress_virt_up;
+ l2params->tc_info[i].ingress_virt_up =
+ qos_info->tc_info[i].ingress_virt_up;
+ l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
+ l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
+ l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
+ }
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ l2params->up2tc[i] = qos_info->up2tc[i];
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
+ l2params->dscp_mode = true;
+ memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
+ }
+}
+
+static void irdma_iidc_event_handler(struct ice_pf *pf, struct iidc_event *event)
+{
+ struct irdma_device *iwdev = dev_get_drvdata(&pf->adev->dev);
+ struct irdma_l2params l2params = {};
+
+ if (*event->type & BIT(IIDC_EVENT_AFTER_MTU_CHANGE)) {
+ ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
+ if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
+ l2params.mtu = iwdev->netdev->mtu;
+ l2params.mtu_changed = true;
+ irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ }
+ } else if (*event->type & BIT(IIDC_EVENT_BEFORE_TC_CHANGE)) {
+ if (iwdev->vsi.tc_change_pending)
+ return;
+
+ irdma_prep_tc_change(iwdev);
+ } else if (*event->type & BIT(IIDC_EVENT_AFTER_TC_CHANGE)) {
+ struct iidc_qos_params qos_info = {};
+
+ if (!iwdev->vsi.tc_change_pending)
+ return;
+
+ l2params.tc_changed = true;
+ ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
+ ice_get_qos_params(pf, &qos_info);
+ irdma_fill_qos_info(&l2params, &qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode = qos_info.num_tc > 1 && !l2params.dscp_mode;
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ } else if (*event->type & BIT(IIDC_EVENT_CRIT_ERR)) {
+ ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
+ event->reg);
+ if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
+ u32 pe_criterr;
+
+ pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
+#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
+ if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
+ ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
+ pe_criterr);
+ iwdev->rf->reset = true;
+ } else {
+ ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
+ }
+ }
+ if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
+ ibdev_err(&iwdev->ibdev, "HMC Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
+ ibdev_err(&iwdev->ibdev, "PE Push Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (iwdev->rf->reset)
+ iwdev->rf->gen_ops.request_reset(iwdev->rf);
+ }
+}
+
+/**
+ * irdma_request_reset - Request a reset
+ * @rf: RDMA PCI function
+ */
+static void irdma_request_reset(struct irdma_pci_f *rf)
+{
+ struct ice_pf *pf = rf->cdev;
+
+ ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
+ ice_rdma_request_reset(pf, IIDC_PFR);
+}
+
+/**
+ * irdma_lan_register_qset - Register qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct ice_pf *pf = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+ int ret;
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ ret = ice_add_rdma_qset(pf, &qset);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
+ return ret;
+ }
+
+ tc_node->l2_sched_node_id = qset.teid;
+ vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
+
+ return 0;
+}
+
+/**
+ * irdma_lan_unregister_qset - Unregister qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct ice_pf *pf = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ qset.teid = tc_node->l2_sched_node_id;
+
+ if (ice_del_rdma_qset(pf, &qset))
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
+}
+
+static void irdma_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
+ struct iidc_auxiliary_dev,
+ adev);
+ struct ice_pf *pf = iidc_adev->pf;
+ struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+
+ irdma_ib_unregister_device(iwdev);
+ ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
+
+ pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(pf->pdev->devfn));
+}
+
+static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf,
+ struct ice_vsi *vsi)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ rf->cdev = pf;
+ rf->gen_ops.register_qset = irdma_lan_register_qset;
+ rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
+ rf->hw.hw_addr = pf->hw.hw_addr;
+ rf->pcidev = pf->pdev;
+ rf->msix_count = pf->num_rdma_msix;
+ rf->pf_id = pf->hw.pf_id;
+ rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector];
+ rf->default_vsi.vsi_idx = vsi->vsi_num;
+ rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
+ rf->rdma_ver = IRDMA_GEN_2;
+ rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ rf->gen_ops.request_reset = irdma_request_reset;
+ rf->limits_sel = 7;
+ rf->iwdev = iwdev;
+ mutex_init(&iwdev->ah_tbl_lock);
+ iwdev->netdev = vsi->netdev;
+ iwdev->vsi_num = vsi->vsi_num;
+ iwdev->init_state = INITIAL_STATE;
+ iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
+ iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
+ iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
+ iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+ if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY)
+ iwdev->roce_mode = true;
+}
+
+static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
+{
+ struct iidc_auxiliary_dev *iidc_adev = container_of(aux_dev,
+ struct iidc_auxiliary_dev,
+ adev);
+ struct ice_pf *pf = iidc_adev->pf;
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct iidc_qos_params qos_info = {};
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ struct irdma_l2params l2params = {};
+ int err;
+
+ if (!vsi)
+ return -EIO;
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ if (!iwdev)
+ return -ENOMEM;
+ iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!iwdev->rf) {
+ ib_dealloc_device(&iwdev->ibdev);
+ return -ENOMEM;
+ }
+
+ irdma_fill_device_info(iwdev, pf, vsi);
+ rf = iwdev->rf;
+
+ err = irdma_ctrl_init_hw(rf);
+ if (err)
+ goto err_ctrl_init;
+
+ l2params.mtu = iwdev->netdev->mtu;
+ ice_get_qos_params(pf, &qos_info);
+ irdma_fill_qos_info(&l2params, &qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
+
+ err = irdma_rt_init_hw(iwdev, &l2params);
+ if (err)
+ goto err_rt_init;
+
+ err = irdma_ib_register_device(iwdev);
+ if (err)
+ goto err_ibreg;
+
+ ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true);
+
+ ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
+ auxiliary_set_drvdata(aux_dev, iwdev);
+
+ return 0;
+
+err_ibreg:
+ irdma_rt_deinit_hw(iwdev);
+err_rt_init:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ kfree(iwdev->rf);
+ ib_dealloc_device(&iwdev->ibdev);
+
+ return err;
+}
+
+static const struct auxiliary_device_id irdma_auxiliary_id_table[] = {
+ {.name = "ice.iwarp", },
+ {.name = "ice.roce", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
+
+static struct iidc_auxiliary_drv irdma_auxiliary_drv = {
+ .adrv = {
+ .id_table = irdma_auxiliary_id_table,
+ .probe = irdma_probe,
+ .remove = irdma_remove,
+ },
+ .event_handler = irdma_iidc_event_handler,
+};
+
+static int __init irdma_init_module(void)
+{
+ int ret;
+
+ ret = auxiliary_driver_register(&i40iw_auxiliary_drv);
+ if (ret) {
+ pr_err("Failed i40iw(gen_1) auxiliary_driver_register() ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = auxiliary_driver_register(&irdma_auxiliary_drv.adrv);
+ if (ret) {
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ pr_err("Failed irdma auxiliary_driver_register() ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ irdma_register_notifiers();
+
+ return 0;
+}
+
+static void __exit irdma_exit_module(void)
+{
+ irdma_unregister_notifiers();
+ auxiliary_driver_unregister(&irdma_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+}
+
+module_init(irdma_init_module);
+module_exit(irdma_exit_module);
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
new file mode 100644
index 0000000000..cbf0db72e1
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -0,0 +1,557 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_MAIN_H
+#define IRDMA_MAIN_H
+
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <net/addrconf.h>
+#include <net/netevent.h>
+#include <net/tcp.h>
+#include <net/ip6_route.h>
+#include <net/flow.h>
+#include <net/secure_seq.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/crc32c.h>
+#include <linux/kthread.h>
+#ifndef CONFIG_64BIT
+#include <linux/io-64-nonatomic-lo-hi.h>
+#endif
+#include <linux/auxiliary_bus.h>
+#include <linux/net/intel/iidc.h>
+#include <crypto/hash.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_cache.h>
+#include <rdma/uverbs_ioctl.h>
+#include "osdep.h"
+#include "defs.h"
+#include "hmc.h"
+#include "type.h"
+#include "ws.h"
+#include "protos.h"
+#include "pble.h"
+#include "cm.h"
+#include <rdma/irdma-abi.h>
+#include "verbs.h"
+#include "user.h"
+#include "puda.h"
+
+extern struct auxiliary_driver i40iw_auxiliary_drv;
+
+#define IRDMA_FW_VER_DEFAULT 2
+#define IRDMA_HW_VER 2
+
+#define IRDMA_ARP_ADD 1
+#define IRDMA_ARP_DELETE 2
+#define IRDMA_ARP_RESOLVE 3
+
+#define IRDMA_MACIP_ADD 1
+#define IRDMA_MACIP_DELETE 2
+
+#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 1)
+#define IW_CEQ_SIZE 2048
+#define IW_AEQ_SIZE 2048
+
+#define RX_BUF_SIZE (1536 + 8)
+#define IW_REG0_SIZE (4 * 1024)
+#define IW_TX_TIMEOUT (6 * HZ)
+#define IW_FIRST_QPN 1
+
+#define IW_SW_CONTEXT_ALIGN 1024
+
+#define MAX_DPC_ITERATIONS 128
+
+#define IRDMA_EVENT_TIMEOUT_MS 5000
+#define IRDMA_VCHNL_EVENT_TIMEOUT 100000
+#define IRDMA_RST_TIMEOUT_HZ 4
+
+#define IRDMA_NO_QSET 0xffff
+
+#define IW_CFG_FPM_QP_COUNT 32768
+#define IRDMA_MAX_PAGES_PER_FMR 262144
+#define IRDMA_MIN_PAGES_PER_FMR 1
+#define IRDMA_CQP_COMPL_RQ_WQE_FLUSHED 2
+#define IRDMA_CQP_COMPL_SQ_WQE_FLUSHED 3
+
+#define IRDMA_Q_TYPE_PE_AEQ 0x80
+#define IRDMA_Q_INVALID_IDX 0xffff
+#define IRDMA_REM_ENDPOINT_TRK_QPID 3
+
+#define IRDMA_DRV_OPT_ENA_MPA_VER_0 0x00000001
+#define IRDMA_DRV_OPT_DISABLE_MPA_CRC 0x00000002
+#define IRDMA_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004
+#define IRDMA_DRV_OPT_DISABLE_INTF 0x00000008
+#define IRDMA_DRV_OPT_ENA_MSI 0x00000010
+#define IRDMA_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020
+#define IRDMA_DRV_OPT_NO_INLINE_DATA 0x00000080
+#define IRDMA_DRV_OPT_DISABLE_INT_MOD 0x00000100
+#define IRDMA_DRV_OPT_DISABLE_VIRT_WQ 0x00000200
+#define IRDMA_DRV_OPT_ENA_PAU 0x00000400
+#define IRDMA_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800
+
+#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
+#define IRDMA_ROCE_CWND_DEFAULT 0x400
+#define IRDMA_ROCE_ACKCREDS_DEFAULT 0x1E
+
+#define IRDMA_FLUSH_SQ BIT(0)
+#define IRDMA_FLUSH_RQ BIT(1)
+#define IRDMA_REFLUSH BIT(2)
+#define IRDMA_FLUSH_WAIT BIT(3)
+
+#define IRDMA_IRQ_NAME_STR_LEN (64)
+
+enum init_completion_state {
+ INVALID_STATE = 0,
+ INITIAL_STATE,
+ CQP_CREATED,
+ HMC_OBJS_CREATED,
+ HW_RSRC_INITIALIZED,
+ CCQ_CREATED,
+ CEQ0_CREATED, /* Last state of probe */
+ ILQ_CREATED,
+ IEQ_CREATED,
+ CEQS_CREATED,
+ PBLE_CHUNK_MEM,
+ AEQ_CREATED,
+ IP_ADDR_REGISTERED, /* Last state of open */
+};
+
+struct irdma_rsrc_limits {
+ u32 qplimit;
+ u32 mrlimit;
+ u32 cqlimit;
+};
+
+struct irdma_cqp_err_info {
+ u16 maj;
+ u16 min;
+ const char *desc;
+};
+
+struct irdma_cqp_compl_info {
+ u32 op_ret_val;
+ u16 maj_err_code;
+ u16 min_err_code;
+ bool error;
+ u8 op_code;
+};
+
+struct irdma_cqp_request {
+ struct cqp_cmds_info info;
+ wait_queue_head_t waitq;
+ struct list_head list;
+ refcount_t refcnt;
+ void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
+ void *param;
+ struct irdma_cqp_compl_info compl_info;
+ bool request_done; /* READ/WRITE_ONCE macros operate on it */
+ bool waiting:1;
+ bool dynamic:1;
+};
+
+struct irdma_cqp {
+ struct irdma_sc_cqp sc_cqp;
+ spinlock_t req_lock; /* protect CQP request list */
+ spinlock_t compl_lock; /* protect CQP completion processing */
+ wait_queue_head_t waitq;
+ wait_queue_head_t remove_wq;
+ struct irdma_dma_mem sq;
+ struct irdma_dma_mem host_ctx;
+ u64 *scratch_array;
+ struct irdma_cqp_request *cqp_requests;
+ struct list_head cqp_avail_reqs;
+ struct list_head cqp_pending_reqs;
+};
+
+struct irdma_ccq {
+ struct irdma_sc_cq sc_cq;
+ struct irdma_dma_mem mem_cq;
+ struct irdma_dma_mem shadow_area;
+};
+
+struct irdma_ceq {
+ struct irdma_sc_ceq sc_ceq;
+ struct irdma_dma_mem mem;
+ u32 irq;
+ u32 msix_idx;
+ struct irdma_pci_f *rf;
+ struct tasklet_struct dpc_tasklet;
+ spinlock_t ce_lock; /* sync cq destroy with cq completion event notification */
+};
+
+struct irdma_aeq {
+ struct irdma_sc_aeq sc_aeq;
+ struct irdma_dma_mem mem;
+ struct irdma_pble_alloc palloc;
+ bool virtual_map;
+};
+
+struct irdma_arp_entry {
+ u32 ip_addr[4];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct irdma_msix_vector {
+ u32 idx;
+ u32 irq;
+ u32 cpu_affinity;
+ u32 ceq_id;
+ cpumask_t mask;
+ char name[IRDMA_IRQ_NAME_STR_LEN];
+};
+
+struct irdma_mc_table_info {
+ u32 mgn;
+ u32 dest_ip[4];
+ bool lan_fwd:1;
+ bool ipv4_valid:1;
+};
+
+struct mc_table_list {
+ struct list_head list;
+ struct irdma_mc_table_info mc_info;
+ struct irdma_mcast_grp_info mc_grp_ctx;
+};
+
+struct irdma_qv_info {
+ u32 v_idx; /* msix_vector */
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct irdma_qvlist_info {
+ u32 num_vectors;
+ struct irdma_qv_info qv_info[];
+};
+
+struct irdma_gen_ops {
+ void (*request_reset)(struct irdma_pci_f *rf);
+ int (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ void (*unregister_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+};
+
+struct irdma_pci_f {
+ bool reset:1;
+ bool rsrc_created:1;
+ bool msix_shared:1;
+ u8 rsrc_profile;
+ u8 *hmc_info_mem;
+ u8 *mem_rsrc;
+ u8 rdma_ver;
+ u8 rst_to;
+ u8 pf_id;
+ enum irdma_protocol_used protocol_used;
+ u32 sd_type;
+ u32 msix_count;
+ u32 max_mr;
+ u32 max_qp;
+ u32 max_cq;
+ u32 max_ah;
+ u32 next_ah;
+ u32 max_mcg;
+ u32 next_mcg;
+ u32 max_pd;
+ u32 next_qp;
+ u32 next_cq;
+ u32 next_pd;
+ u32 max_mr_size;
+ u32 max_cqe;
+ u32 mr_stagmask;
+ u32 used_pds;
+ u32 used_cqs;
+ u32 used_mrs;
+ u32 used_qps;
+ u32 arp_table_size;
+ u32 next_arp_index;
+ u32 ceqs_count;
+ u32 next_ws_node_id;
+ u32 max_ws_node_id;
+ u32 limits_sel;
+ unsigned long *allocated_ws_nodes;
+ unsigned long *allocated_qps;
+ unsigned long *allocated_cqs;
+ unsigned long *allocated_mrs;
+ unsigned long *allocated_pds;
+ unsigned long *allocated_mcgs;
+ unsigned long *allocated_ahs;
+ unsigned long *allocated_arps;
+ enum init_completion_state init_state;
+ struct irdma_sc_dev sc_dev;
+ struct pci_dev *pcidev;
+ void *cdev;
+ struct irdma_hw hw;
+ struct irdma_cqp cqp;
+ struct irdma_ccq ccq;
+ struct irdma_aeq aeq;
+ struct irdma_ceq *ceqlist;
+ struct irdma_hmc_pble_rsrc *pble_rsrc;
+ struct irdma_arp_entry *arp_table;
+ spinlock_t arp_lock; /*protect ARP table access*/
+ spinlock_t rsrc_lock; /* protect HW resource array access */
+ spinlock_t qptable_lock; /*protect QP table access*/
+ spinlock_t cqtable_lock; /*protect CQ table access*/
+ struct irdma_qp **qp_table;
+ struct irdma_cq **cq_table;
+ spinlock_t qh_list_lock; /* protect mc_qht_list */
+ struct mc_table_list mc_qht_list;
+ struct irdma_msix_vector *iw_msixtbl;
+ struct irdma_qvlist_info *iw_qvlist;
+ struct tasklet_struct dpc_tasklet;
+ struct msix_entry *msix_entries;
+ struct irdma_dma_mem obj_mem;
+ struct irdma_dma_mem obj_next;
+ atomic_t vchnl_msgs;
+ wait_queue_head_t vchnl_waitq;
+ struct workqueue_struct *cqp_cmpl_wq;
+ struct work_struct cqp_cmpl_work;
+ struct irdma_sc_vsi default_vsi;
+ void *back_fcn;
+ struct irdma_gen_ops gen_ops;
+ struct irdma_device *iwdev;
+};
+
+struct irdma_device {
+ struct ib_device ibdev;
+ struct irdma_pci_f *rf;
+ struct net_device *netdev;
+ struct workqueue_struct *cleanup_wq;
+ struct irdma_sc_vsi vsi;
+ struct irdma_cm_core cm_core;
+ DECLARE_HASHTABLE(ah_hash_tbl, 8);
+ struct mutex ah_tbl_lock; /* protect AH hash table access */
+ u32 roce_cwnd;
+ u32 roce_ackcreds;
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 push_mode;
+ u32 rcv_wnd;
+ u16 mac_ip_table_idx;
+ u16 vsi_num;
+ u8 rcv_wscale;
+ u8 iw_status;
+ bool roce_mode:1;
+ bool roce_dcqcn_en:1;
+ bool dcb_vlan_mode:1;
+ bool iw_ooo:1;
+ enum init_completion_state init_state;
+
+ wait_queue_head_t suspend_wq;
+};
+
+static inline struct irdma_device *to_iwdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct irdma_device, ibdev);
+}
+
+static inline struct irdma_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct irdma_ucontext, ibucontext);
+}
+
+static inline struct irdma_user_mmap_entry *
+to_irdma_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct irdma_user_mmap_entry,
+ rdma_entry);
+}
+
+static inline struct irdma_pd *to_iwpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct irdma_pd, ibpd);
+}
+
+static inline struct irdma_ah *to_iwah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct irdma_ah, ibah);
+}
+
+static inline struct irdma_mr *to_iwmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct irdma_mr, ibmr);
+}
+
+static inline struct irdma_mr *to_iwmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct irdma_mr, ibmw);
+}
+
+static inline struct irdma_cq *to_iwcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct irdma_cq, ibcq);
+}
+
+static inline struct irdma_qp *to_iwqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct irdma_qp, ibqp);
+}
+
+static inline struct irdma_pci_f *dev_to_rf(struct irdma_sc_dev *dev)
+{
+ return container_of(dev, struct irdma_pci_f, sc_dev);
+}
+
+/**
+ * irdma_alloc_resource - allocate a resource
+ * @iwdev: device pointer
+ * @resource_array: resource bit array:
+ * @max_resources: maximum resource number
+ * @req_resources_num: Allocated resource number
+ * @next: next free id
+ **/
+static inline int irdma_alloc_rsrc(struct irdma_pci_f *rf,
+ unsigned long *rsrc_array, u32 max_rsrc,
+ u32 *req_rsrc_num, u32 *next)
+{
+ u32 rsrc_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ rsrc_num = find_next_zero_bit(rsrc_array, max_rsrc, *next);
+ if (rsrc_num >= max_rsrc) {
+ rsrc_num = find_first_zero_bit(rsrc_array, max_rsrc);
+ if (rsrc_num >= max_rsrc) {
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ ibdev_dbg(&rf->iwdev->ibdev,
+ "ERR: resource [%d] allocation failed\n",
+ rsrc_num);
+ return -EOVERFLOW;
+ }
+ }
+ __set_bit(rsrc_num, rsrc_array);
+ *next = rsrc_num + 1;
+ if (*next == max_rsrc)
+ *next = 0;
+ *req_rsrc_num = rsrc_num;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_free_resource - free a resource
+ * @iwdev: device pointer
+ * @resource_array: resource array for the resource_num
+ * @resource_num: resource number to free
+ **/
+static inline void irdma_free_rsrc(struct irdma_pci_f *rf,
+ unsigned long *rsrc_array, u32 rsrc_num)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ __clear_bit(rsrc_num, rsrc_array);
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+}
+
+int irdma_ctrl_init_hw(struct irdma_pci_f *rf);
+void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf);
+int irdma_rt_init_hw(struct irdma_device *iwdev,
+ struct irdma_l2params *l2params);
+void irdma_rt_deinit_hw(struct irdma_device *iwdev);
+void irdma_qp_add_ref(struct ib_qp *ibqp);
+void irdma_qp_rem_ref(struct ib_qp *ibqp);
+void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
+struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
+void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
+void irdma_manage_arp_cache(struct irdma_pci_f *rf,
+ const unsigned char *mac_addr,
+ u32 *ip_addr, bool ipv4, u32 action);
+struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
+void irdma_del_apbvt(struct irdma_device *iwdev,
+ struct irdma_apbvt_entry *entry);
+struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
+ bool wait);
+void irdma_free_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request);
+void irdma_put_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request);
+int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);
+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx);
+void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);
+
+u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);
+void irdma_port_ibevent(struct irdma_device *iwdev);
+void irdma_cm_disconn(struct irdma_qp *qp);
+
+bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
+ u16 maj_err_code, u16 min_err_code);
+int irdma_handle_cqp_op(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request);
+
+int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata);
+int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+void irdma_cq_add_ref(struct ib_cq *ibcq);
+void irdma_cq_rem_ref(struct ib_cq *ibcq);
+void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
+
+void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
+int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
+ struct irdma_modify_qp_info *info, bool wait);
+int irdma_qp_suspend_resume(struct irdma_sc_qp *qp, bool suspend);
+int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
+ enum irdma_quad_entry_type etype,
+ enum irdma_quad_hash_manage_type mtype, void *cmnode,
+ bool wait);
+void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf);
+void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp);
+void irdma_free_qp_rsrc(struct irdma_qp *iwqp);
+int irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);
+void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core);
+void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
+ u8 term_len);
+int irdma_send_syn(struct irdma_cm_node *cm_node, u32 sendack);
+int irdma_send_reset(struct irdma_cm_node *cm_node);
+struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
+ u16 rem_port, u32 *rem_addr, u16 loc_port,
+ u32 *loc_addr, u16 vlan_id);
+int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, bool wait);
+void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
+ struct irdma_gen_ae_info *info, bool wait);
+void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
+void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
+u16 irdma_get_vlan_ipv4(u32 *addr);
+void irdma_get_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
+struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
+ int acc, u64 *iova_start);
+int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
+void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
+int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
+ bool wait,
+ void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
+ void *cb_param);
+void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
+bool irdma_cq_empty(struct irdma_cq *iwcq);
+int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+int irdma_net_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr);
+void irdma_add_ip(struct irdma_device *iwdev);
+void cqp_compl_worker(struct work_struct *work);
+#endif /* IRDMA_MAIN_H */
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
new file mode 100644
index 0000000000..fc1ba2a3e6
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_OSDEP_H
+#define IRDMA_OSDEP_H
+
+#include <linux/pci.h>
+#include <linux/bitfield.h>
+#include <linux/net/intel/iidc.h>
+#include <crypto/hash.h>
+#include <rdma/ib_verbs.h>
+
+#define STATS_TIMER_DELAY 60000
+
+struct irdma_dma_info {
+ dma_addr_t *dmaaddrs;
+};
+
+struct irdma_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+} __packed;
+
+struct irdma_virt_mem {
+ void *va;
+ u32 size;
+} __packed;
+
+struct irdma_sc_vsi;
+struct irdma_sc_dev;
+struct irdma_sc_qp;
+struct irdma_puda_buf;
+struct irdma_puda_cmpl_info;
+struct irdma_update_sds_info;
+struct irdma_hmc_fcn_info;
+struct irdma_manage_vf_pble_info;
+struct irdma_hw;
+struct irdma_pci_f;
+
+struct ib_device *to_ibdev(struct irdma_sc_dev *dev);
+void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev);
+bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev);
+void irdma_add_dev_ref(struct irdma_sc_dev *dev);
+void irdma_put_dev_ref(struct irdma_sc_dev *dev);
+int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len,
+ u32 val);
+struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf);
+void irdma_send_ieq_ack(struct irdma_sc_qp *qp);
+void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
+ u32 seqnum);
+void irdma_free_hash_desc(struct shash_desc *hash_desc);
+int irdma_init_hash_desc(struct shash_desc **hash_desc);
+int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf);
+int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
+ struct irdma_hmc_fcn_info *hmcfcninfo,
+ u16 *pmf_idx);
+int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *mem);
+void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
+void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
+ u8 term_len);
+void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred);
+void irdma_terminate_start_timer(struct irdma_sc_qp *qp);
+void irdma_terminate_del_timer(struct irdma_sc_qp *qp);
+void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi);
+void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi);
+void wr32(struct irdma_hw *hw, u32 reg, u32 val);
+u32 rd32(struct irdma_hw *hw, u32 reg);
+u64 rd64(struct irdma_hw *hw, u32 reg);
+int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma,
+ u32 pg_cnt);
+void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt);
+#endif /* IRDMA_OSDEP_H */
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
new file mode 100644
index 0000000000..c0bef11436
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "pble.h"
+
+static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
+
+/**
+ * irdma_destroy_pble_prm - destroy prm during module unload
+ * @pble_rsrc: pble resources
+ */
+void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
+{
+ struct irdma_chunk *chunk;
+ struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
+
+ while (!list_empty(&pinfo->clist)) {
+ chunk = (struct irdma_chunk *) pinfo->clist.next;
+ list_del(&chunk->list);
+ if (chunk->type == PBLE_SD_PAGED)
+ irdma_pble_free_paged_mem(chunk);
+ bitmap_free(chunk->bitmapbuf);
+ kfree(chunk->chunkmem.va);
+ }
+}
+
+/**
+ * irdma_hmc_init_pble - Initialize pble resources during module load
+ * @dev: irdma_sc_dev struct
+ * @pble_rsrc: pble resources
+ */
+int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ struct irdma_hmc_pble_rsrc *pble_rsrc)
+{
+ struct irdma_hmc_info *hmc_info;
+ u32 fpm_idx = 0;
+ int status = 0;
+
+ hmc_info = dev->hmc_info;
+ pble_rsrc->dev = dev;
+ pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
+ /* Start pble' on 4k boundary */
+ if (pble_rsrc->fpm_base_addr & 0xfff)
+ fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
+ pble_rsrc->unallocated_pble =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
+ pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
+ pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
+
+ mutex_init(&pble_rsrc->pble_mutex_lock);
+
+ spin_lock_init(&pble_rsrc->pinfo.prm_lock);
+ INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
+ if (add_pble_prm(pble_rsrc)) {
+ irdma_destroy_pble_prm(pble_rsrc);
+ status = -ENOMEM;
+ }
+
+ return status;
+}
+
+/**
+ * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
+ * @pble_rsrc: structure containing fpm address
+ * @idx: where to return indexes
+ */
+static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct sd_pd_idx *idx)
+{
+ idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
+ idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
+ idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
+}
+
+/**
+ * add_sd_direct - add sd direct for pble
+ * @pble_rsrc: pble resource ptr
+ * @info: page info for sd
+ */
+static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_add_page_info *info)
+{
+ struct irdma_sc_dev *dev = pble_rsrc->dev;
+ int ret_code = 0;
+ struct sd_pd_idx *idx = &info->idx;
+ struct irdma_chunk *chunk = info->chunk;
+ struct irdma_hmc_info *hmc_info = info->hmc_info;
+ struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
+ u32 offset = 0;
+
+ if (!sd_entry->valid) {
+ ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
+ info->idx.sd_idx,
+ IRDMA_SD_TYPE_DIRECT,
+ IRDMA_HMC_DIRECT_BP_SIZE);
+ if (ret_code)
+ return ret_code;
+
+ chunk->type = PBLE_SD_CONTIGOUS;
+ }
+
+ offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
+ chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
+ chunk->vaddr = sd_entry->u.bp.addr.va + offset;
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ ibdev_dbg(to_ibdev(dev),
+ "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
+ chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
+
+ return 0;
+}
+
+/**
+ * fpm_to_idx - given fpm address, get pble index
+ * @pble_rsrc: pble resource management
+ * @addr: fpm address for index
+ */
+static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
+{
+ u64 idx;
+
+ idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
+
+ return (u32)idx;
+}
+
+/**
+ * add_bp_pages - add backing pages for sd
+ * @pble_rsrc: pble resource management
+ * @info: page info for sd
+ */
+static int add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_add_page_info *info)
+{
+ struct irdma_sc_dev *dev = pble_rsrc->dev;
+ u8 *addr;
+ struct irdma_dma_mem mem;
+ struct irdma_hmc_pd_entry *pd_entry;
+ struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
+ struct irdma_hmc_info *hmc_info = info->hmc_info;
+ struct irdma_chunk *chunk = info->chunk;
+ int status = 0;
+ u32 rel_pd_idx = info->idx.rel_pd_idx;
+ u32 pd_idx = info->idx.pd_idx;
+ u32 i;
+
+ if (irdma_pble_get_paged_mem(chunk, info->pages))
+ return -ENOMEM;
+
+ status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
+ IRDMA_SD_TYPE_PAGED,
+ IRDMA_HMC_DIRECT_BP_SIZE);
+ if (status)
+ goto error;
+
+ addr = chunk->vaddr;
+ for (i = 0; i < info->pages; i++) {
+ mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
+ mem.size = 4096;
+ mem.va = addr;
+ pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
+ if (!pd_entry->valid) {
+ status = irdma_add_pd_table_entry(dev, hmc_info,
+ pd_idx++, &mem);
+ if (status)
+ goto error;
+
+ addr += 4096;
+ }
+ }
+
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ return 0;
+
+error:
+ irdma_pble_free_paged_mem(chunk);
+
+ return status;
+}
+
+/**
+ * irdma_get_type - add a sd entry type for sd
+ * @dev: irdma_sc_dev struct
+ * @idx: index of sd
+ * @pages: pages in the sd
+ */
+static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
+ struct sd_pd_idx *idx, u32 pages)
+{
+ enum irdma_sd_entry_type sd_entry_type;
+
+ sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
+ IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
+ return sd_entry_type;
+}
+
+/**
+ * add_pble_prm - add a sd entry for pble resoure
+ * @pble_rsrc: pble resource management
+ */
+static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
+{
+ struct irdma_sc_dev *dev = pble_rsrc->dev;
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_chunk *chunk;
+ struct irdma_add_page_info info;
+ struct sd_pd_idx *idx = &info.idx;
+ int ret_code = 0;
+ enum irdma_sd_entry_type sd_entry_type;
+ u64 sd_reg_val = 0;
+ struct irdma_virt_mem chunkmem;
+ u32 pages;
+
+ if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
+ return -ENOMEM;
+
+ if (pble_rsrc->next_fpm_addr & 0xfff)
+ return -EINVAL;
+
+ chunkmem.size = sizeof(*chunk);
+ chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
+ if (!chunkmem.va)
+ return -ENOMEM;
+
+ chunk = chunkmem.va;
+ chunk->chunkmem = chunkmem;
+ hmc_info = dev->hmc_info;
+ chunk->dev = dev;
+ chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+ get_sd_pd_idx(pble_rsrc, idx);
+ sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
+ pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
+ IRDMA_HMC_PD_CNT_IN_SD;
+ pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
+ info.chunk = chunk;
+ info.hmc_info = hmc_info;
+ info.pages = pages;
+ info.sd_entry = sd_entry;
+ if (!sd_entry->valid)
+ sd_entry_type = irdma_get_type(dev, idx, pages);
+ else
+ sd_entry_type = sd_entry->entry_type;
+
+ ibdev_dbg(to_ibdev(dev),
+ "PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n",
+ pages, pble_rsrc->unallocated_pble,
+ pble_rsrc->next_fpm_addr);
+ ibdev_dbg(to_ibdev(dev), "PBLE: sd_entry_type = %d\n", sd_entry_type);
+ if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
+ ret_code = add_sd_direct(pble_rsrc, &info);
+
+ if (ret_code)
+ sd_entry_type = IRDMA_SD_TYPE_PAGED;
+ else
+ pble_rsrc->stats_direct_sds++;
+
+ if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
+ ret_code = add_bp_pages(pble_rsrc, &info);
+ if (ret_code)
+ goto error;
+ else
+ pble_rsrc->stats_paged_sds++;
+ }
+
+ ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
+ if (ret_code)
+ goto error;
+
+ pble_rsrc->next_fpm_addr += chunk->size;
+ ibdev_dbg(to_ibdev(dev),
+ "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
+ pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
+ pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
+ sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
+ sd_entry->u.pd_table.pd_page_addr.pa :
+ sd_entry->u.bp.addr.pa;
+
+ if (!sd_entry->valid) {
+ ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
+ idx->sd_idx, sd_entry->entry_type, true);
+ if (ret_code)
+ goto error;
+ }
+
+ list_add(&chunk->list, &pble_rsrc->pinfo.clist);
+ sd_entry->valid = true;
+ return 0;
+
+error:
+ bitmap_free(chunk->bitmapbuf);
+ kfree(chunk->chunkmem.va);
+
+ return ret_code;
+}
+
+/**
+ * free_lvl2 - fee level 2 pble
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ */
+static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ u32 i;
+ struct irdma_pble_level2 *lvl2 = &palloc->level2;
+ struct irdma_pble_info *root = &lvl2->root;
+ struct irdma_pble_info *leaf = lvl2->leaf;
+
+ for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+ if (leaf->addr)
+ irdma_prm_return_pbles(&pble_rsrc->pinfo,
+ &leaf->chunkinfo);
+ else
+ break;
+ }
+
+ if (root->addr)
+ irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
+
+ kfree(lvl2->leafmem.va);
+ lvl2->leaf = NULL;
+}
+
+/**
+ * get_lvl2_pble - get level 2 pble resource
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ */
+static int get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ u32 lf4k, lflast, total, i;
+ u32 pblcnt = PBLE_PER_PAGE;
+ u64 *addr;
+ struct irdma_pble_level2 *lvl2 = &palloc->level2;
+ struct irdma_pble_info *root = &lvl2->root;
+ struct irdma_pble_info *leaf;
+ int ret_code;
+ u64 fpm_addr;
+
+ /* number of full 512 (4K) leafs) */
+ lf4k = palloc->total_cnt >> 9;
+ lflast = palloc->total_cnt % PBLE_PER_PAGE;
+ total = (lflast == 0) ? lf4k : lf4k + 1;
+ lvl2->leaf_cnt = total;
+
+ lvl2->leafmem.size = (sizeof(*leaf) * total);
+ lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
+ if (!lvl2->leafmem.va)
+ return -ENOMEM;
+
+ lvl2->leaf = lvl2->leafmem.va;
+ leaf = lvl2->leaf;
+ ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
+ total << 3, &root->addr, &fpm_addr);
+ if (ret_code) {
+ kfree(lvl2->leafmem.va);
+ lvl2->leaf = NULL;
+ return -ENOMEM;
+ }
+
+ root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
+ root->cnt = total;
+ addr = root->addr;
+ for (i = 0; i < total; i++, leaf++) {
+ pblcnt = (lflast && ((i + 1) == total)) ?
+ lflast : PBLE_PER_PAGE;
+ ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
+ &leaf->chunkinfo, pblcnt << 3,
+ &leaf->addr, &fpm_addr);
+ if (ret_code)
+ goto error;
+
+ leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
+
+ leaf->cnt = pblcnt;
+ *addr = (u64)leaf->idx;
+ addr++;
+ }
+
+ palloc->level = PBLE_LEVEL_2;
+ pble_rsrc->stats_lvl2++;
+ return 0;
+
+error:
+ free_lvl2(pble_rsrc, palloc);
+
+ return -ENOMEM;
+}
+
+/**
+ * get_lvl1_pble - get level 1 pble resource
+ * @pble_rsrc: pble resource management
+ * @palloc: level 1 pble allocation
+ */
+static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ int ret_code;
+ u64 fpm_addr;
+ struct irdma_pble_info *lvl1 = &palloc->level1;
+
+ ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
+ palloc->total_cnt << 3, &lvl1->addr,
+ &fpm_addr);
+ if (ret_code)
+ return -ENOMEM;
+
+ palloc->level = PBLE_LEVEL_1;
+ lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
+ lvl1->cnt = palloc->total_cnt;
+ pble_rsrc->stats_lvl1++;
+
+ return 0;
+}
+
+/**
+ * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @lvl: Bitmask for requested pble level
+ */
+static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc, u8 lvl)
+{
+ int status = 0;
+
+ status = get_lvl1_pble(pble_rsrc, palloc);
+ if (!status || lvl == PBLE_LEVEL_1 || palloc->total_cnt <= PBLE_PER_PAGE)
+ return status;
+
+ status = get_lvl2_pble(pble_rsrc, palloc);
+
+ return status;
+}
+
+/**
+ * irdma_get_pble - allocate pbles from the prm
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @pble_cnt: #of pbles requested
+ * @lvl: requested pble level mask
+ */
+int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc, u32 pble_cnt,
+ u8 lvl)
+{
+ int status = 0;
+ int max_sds = 0;
+ int i;
+
+ palloc->total_cnt = pble_cnt;
+ palloc->level = PBLE_LEVEL_0;
+
+ mutex_lock(&pble_rsrc->pble_mutex_lock);
+
+ /*check first to see if we can get pble's without acquiring
+ * additional sd's
+ */
+ status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
+ if (!status)
+ goto exit;
+
+ max_sds = (palloc->total_cnt >> 18) + 1;
+ for (i = 0; i < max_sds; i++) {
+ status = add_pble_prm(pble_rsrc);
+ if (status)
+ break;
+
+ status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
+ /* if level1_only, only go through it once */
+ if (!status || lvl)
+ break;
+ }
+
+exit:
+ if (!status) {
+ pble_rsrc->allocdpbles += pble_cnt;
+ pble_rsrc->stats_alloc_ok++;
+ } else {
+ pble_rsrc->stats_alloc_fail++;
+ }
+ mutex_unlock(&pble_rsrc->pble_mutex_lock);
+
+ return status;
+}
+
+/**
+ * irdma_free_pble - put pbles back into prm
+ * @pble_rsrc: pble resources
+ * @palloc: contains all information regarding pble resource being freed
+ */
+void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc)
+{
+ pble_rsrc->freedpbles += palloc->total_cnt;
+
+ if (palloc->level == PBLE_LEVEL_2)
+ free_lvl2(pble_rsrc, palloc);
+ else
+ irdma_prm_return_pbles(&pble_rsrc->pinfo,
+ &palloc->level1.chunkinfo);
+ pble_rsrc->stats_alloc_freed++;
+}
diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h
new file mode 100644
index 0000000000..b31b7c5d66
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/pble.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2019 Intel Corporation */
+#ifndef IRDMA_PBLE_H
+#define IRDMA_PBLE_H
+
+#define PBLE_SHIFT 6
+#define PBLE_PER_PAGE 512
+#define HMC_PAGED_BP_SHIFT 12
+#define PBLE_512_SHIFT 9
+#define PBLE_INVALID_IDX 0xffffffff
+
+enum irdma_pble_level {
+ PBLE_LEVEL_0 = 0,
+ PBLE_LEVEL_1 = 1,
+ PBLE_LEVEL_2 = 2,
+};
+
+enum irdma_alloc_type {
+ PBLE_NO_ALLOC = 0,
+ PBLE_SD_CONTIGOUS = 1,
+ PBLE_SD_PAGED = 2,
+};
+
+struct irdma_chunk;
+
+struct irdma_pble_chunkinfo {
+ struct irdma_chunk *pchunk;
+ u64 bit_idx;
+ u64 bits_used;
+};
+
+struct irdma_pble_info {
+ u64 *addr;
+ u32 idx;
+ u32 cnt;
+ struct irdma_pble_chunkinfo chunkinfo;
+};
+
+struct irdma_pble_level2 {
+ struct irdma_pble_info root;
+ struct irdma_pble_info *leaf;
+ struct irdma_virt_mem leafmem;
+ u32 leaf_cnt;
+};
+
+struct irdma_pble_alloc {
+ u32 total_cnt;
+ enum irdma_pble_level level;
+ union {
+ struct irdma_pble_info level1;
+ struct irdma_pble_level2 level2;
+ };
+};
+
+struct sd_pd_idx {
+ u32 sd_idx;
+ u32 pd_idx;
+ u32 rel_pd_idx;
+};
+
+struct irdma_add_page_info {
+ struct irdma_chunk *chunk;
+ struct irdma_hmc_sd_entry *sd_entry;
+ struct irdma_hmc_info *hmc_info;
+ struct sd_pd_idx idx;
+ u32 pages;
+};
+
+struct irdma_chunk {
+ struct list_head list;
+ struct irdma_dma_info dmainfo;
+ unsigned long *bitmapbuf;
+
+ u32 sizeofbitmap;
+ u64 size;
+ void *vaddr;
+ u64 fpm_addr;
+ u32 pg_cnt;
+ enum irdma_alloc_type type;
+ struct irdma_sc_dev *dev;
+ struct irdma_virt_mem chunkmem;
+};
+
+struct irdma_pble_prm {
+ struct list_head clist;
+ spinlock_t prm_lock; /* protect prm bitmap */
+ u64 total_pble_alloc;
+ u64 free_pble_cnt;
+ u8 pble_shift;
+};
+
+struct irdma_hmc_pble_rsrc {
+ u32 unallocated_pble;
+ struct mutex pble_mutex_lock; /* protect PBLE resource */
+ struct irdma_sc_dev *dev;
+ u64 fpm_base_addr;
+ u64 next_fpm_addr;
+ struct irdma_pble_prm pinfo;
+ u64 allocdpbles;
+ u64 freedpbles;
+ u32 stats_direct_sds;
+ u32 stats_paged_sds;
+ u64 stats_alloc_ok;
+ u64 stats_alloc_fail;
+ u64 stats_alloc_freed;
+ u64 stats_lvl1;
+ u64 stats_lvl2;
+};
+
+void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
+int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
+ struct irdma_hmc_pble_rsrc *pble_rsrc);
+void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc);
+int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ struct irdma_pble_alloc *palloc, u32 pble_cnt,
+ u8 lvl);
+int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
+ struct irdma_chunk *pchunk);
+int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
+ u64 **vaddr, u64 *fpm_addr);
+void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo);
+void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ unsigned long *flags);
+void irdma_pble_release_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
+ unsigned long *flags);
+void irdma_pble_free_paged_mem(struct irdma_chunk *chunk);
+int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt);
+void irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk);
+#endif /* IRDMA_PBLE_H */
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
new file mode 100644
index 0000000000..113096b603
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#ifndef IRDMA_PROTOS_H
+#define IRDMA_PROTOS_H
+
+#define PAUSE_TIMER_VAL 0xffff
+#define REFRESH_THRESHOLD 0x7fff
+#define HIGH_THRESHOLD 0x800
+#define LOW_THRESHOLD 0x200
+#define ALL_TC2PFC 0xff
+#define CQP_COMPL_WAIT_TIME_MS 10
+#define CQP_TIMEOUT_THRESHOLD 500
+
+/* init operations */
+int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
+ struct irdma_device_init_info *info);
+void irdma_sc_rt_init(struct irdma_sc_dev *dev);
+void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
+__le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch);
+int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info,
+ bool post_sq);
+/* HMC/FPM functions */
+int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id);
+/* stats misc */
+int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat, bool wait);
+void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat);
+void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats,
+ const u64 *hw_stats_regs);
+int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
+ struct irdma_ws_node_info *node_info);
+int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
+ u8 op);
+int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
+ u8 op);
+int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
+ struct irdma_stats_inst_info *stats_info);
+u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
+void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
+void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
+ struct irdma_gather_stats *gather_stats,
+ struct irdma_gather_stats *last_gather_stats,
+ const struct irdma_hw_stat_map *map, u16 max_stat_idx);
+
+/* vsi functions */
+int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_stats_info *info);
+void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi);
+void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
+ struct irdma_vsi_init_info *info);
+int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
+void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
+/* misc L2 param change functions */
+void irdma_change_l2params(struct irdma_sc_vsi *vsi,
+ struct irdma_l2params *l2params);
+void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 suspend);
+int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 cmd);
+void irdma_qp_add_qos(struct irdma_sc_qp *qp);
+void irdma_qp_rem_qos(struct irdma_sc_qp *qp);
+struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
+ struct irdma_sc_qp *qp);
+void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi);
+/* terminate functions*/
+void irdma_terminate_send_fin(struct irdma_sc_qp *qp);
+
+void irdma_terminate_connection(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info);
+
+void irdma_terminate_received(struct irdma_sc_qp *qp,
+ struct irdma_aeqe_info *info);
+/* dynamic memory allocation */
+/* misc */
+u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type);
+void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp);
+int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers);
+int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count);
+int irdma_get_rdma_features(struct irdma_sc_dev *dev);
+void free_sd_mem(struct irdma_sc_dev *dev);
+int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
+ struct cqp_cmds_info *pcmdinfo);
+int irdma_process_bh(struct irdma_sc_dev *dev);
+int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
+int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *mem);
+int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
+ struct irdma_hmc_fcn_info *hmcfcninfo,
+ u16 *pmf_idx);
+void irdma_add_dev_ref(struct irdma_sc_dev *dev);
+void irdma_put_dev_ref(struct irdma_sc_dev *dev);
+void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
+#endif /* IRDMA_PROTOS_H */
diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c
new file mode 100644
index 0000000000..562531712e
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/puda.c
@@ -0,0 +1,1739 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "puda.h"
+#include "ws.h"
+
+static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_buf *buf);
+static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid);
+static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
+ struct irdma_puda_buf *buf, u32 wqe_idx);
+/**
+ * irdma_puda_get_listbuf - get buffer from puda list
+ * @list: list to use for buffers (ILQ or IEQ)
+ */
+static struct irdma_puda_buf *irdma_puda_get_listbuf(struct list_head *list)
+{
+ struct irdma_puda_buf *buf = NULL;
+
+ if (!list_empty(list)) {
+ buf = (struct irdma_puda_buf *)list->next;
+ list_del((struct list_head *)&buf->list);
+ }
+
+ return buf;
+}
+
+/**
+ * irdma_puda_get_bufpool - return buffer from resource
+ * @rsrc: resource to use for buffer
+ */
+struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_puda_buf *buf = NULL;
+ struct list_head *list = &rsrc->bufpool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ buf = irdma_puda_get_listbuf(list);
+ if (buf) {
+ rsrc->avail_buf_count--;
+ buf->vsi = rsrc->vsi;
+ } else {
+ rsrc->stats_buf_alloc_fail++;
+ }
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+
+ return buf;
+}
+
+/**
+ * irdma_puda_ret_bufpool - return buffer to rsrc list
+ * @rsrc: resource to use for buffer
+ * @buf: buffer to return to resource
+ */
+void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf)
+{
+ unsigned long flags;
+
+ buf->do_lpb = false;
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ list_add(&buf->list, &rsrc->bufpool);
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ rsrc->avail_buf_count++;
+}
+
+/**
+ * irdma_puda_post_recvbuf - set wqe for rcv buffer
+ * @rsrc: resource ptr
+ * @wqe_idx: wqe index to use
+ * @buf: puda buffer for rcv q
+ * @initial: flag if during init time
+ */
+static void irdma_puda_post_recvbuf(struct irdma_puda_rsrc *rsrc, u32 wqe_idx,
+ struct irdma_puda_buf *buf, bool initial)
+{
+ __le64 *wqe;
+ struct irdma_sc_qp *qp = &rsrc->qp;
+ u64 offset24 = 0;
+
+ /* Synch buffer for use by device */
+ dma_sync_single_for_device(rsrc->dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
+ wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+ if (!initial)
+ get_64bit_val(wqe, 24, &offset24);
+
+ offset24 = (offset24) ? 0 : FIELD_PREP(IRDMAQPSQ_VALID, 1);
+
+ set_64bit_val(wqe, 16, 0);
+ set_64bit_val(wqe, 0, buf->mem.pa);
+ if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, buf->mem.size));
+ } else {
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, buf->mem.size) |
+ offset24);
+ }
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * irdma_puda_replenish_rq - post rcv buffers
+ * @rsrc: resource to use for buffer
+ * @initial: flag if during init time
+ */
+static int irdma_puda_replenish_rq(struct irdma_puda_rsrc *rsrc, bool initial)
+{
+ u32 i;
+ u32 invalid_cnt = rsrc->rxq_invalid_cnt;
+ struct irdma_puda_buf *buf = NULL;
+
+ for (i = 0; i < invalid_cnt; i++) {
+ buf = irdma_puda_get_bufpool(rsrc);
+ if (!buf)
+ return -ENOBUFS;
+ irdma_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, initial);
+ rsrc->rx_wqe_idx = ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
+ rsrc->rxq_invalid_cnt--;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_puda_alloc_buf - allocate mem for buffer
+ * @dev: iwarp device
+ * @len: length of buffer
+ */
+static struct irdma_puda_buf *irdma_puda_alloc_buf(struct irdma_sc_dev *dev,
+ u32 len)
+{
+ struct irdma_puda_buf *buf;
+ struct irdma_virt_mem buf_mem;
+
+ buf_mem.size = sizeof(struct irdma_puda_buf);
+ buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
+ if (!buf_mem.va)
+ return NULL;
+
+ buf = buf_mem.va;
+ buf->mem.size = len;
+ buf->mem.va = kzalloc(buf->mem.size, GFP_KERNEL);
+ if (!buf->mem.va)
+ goto free_virt;
+ buf->mem.pa = dma_map_single(dev->hw->device, buf->mem.va,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->device, buf->mem.pa)) {
+ kfree(buf->mem.va);
+ goto free_virt;
+ }
+
+ buf->buf_mem.va = buf_mem.va;
+ buf->buf_mem.size = buf_mem.size;
+
+ return buf;
+
+free_virt:
+ kfree(buf_mem.va);
+ return NULL;
+}
+
+/**
+ * irdma_puda_dele_buf - delete buffer back to system
+ * @dev: iwarp device
+ * @buf: buffer to free
+ */
+static void irdma_puda_dele_buf(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf)
+{
+ dma_unmap_single(dev->hw->device, buf->mem.pa, buf->mem.size,
+ DMA_BIDIRECTIONAL);
+ kfree(buf->mem.va);
+ kfree(buf->buf_mem.va);
+}
+
+/**
+ * irdma_puda_get_next_send_wqe - return next wqe for processing
+ * @qp: puda qp for wqe
+ * @wqe_idx: wqe index for caller
+ */
+static __le64 *irdma_puda_get_next_send_wqe(struct irdma_qp_uk *qp,
+ u32 *wqe_idx)
+{
+ int ret_code = 0;
+
+ *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+ IRDMA_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+ if (ret_code)
+ return NULL;
+
+ return qp->sq_base[*wqe_idx].elem;
+}
+
+/**
+ * irdma_puda_poll_info - poll cq for completion
+ * @cq: cq for poll
+ * @info: info return for successful completion
+ */
+static int irdma_puda_poll_info(struct irdma_sc_cq *cq,
+ struct irdma_puda_cmpl_info *info)
+{
+ struct irdma_cq_uk *cq_uk = &cq->cq_uk;
+ u64 qword0, qword2, qword3, qword6;
+ __le64 *cqe;
+ __le64 *ext_cqe = NULL;
+ u64 qword7 = 0;
+ u64 comp_ctx;
+ bool valid_bit;
+ bool ext_valid = 0;
+ u32 major_err, minor_err;
+ u32 peek_head;
+ bool error;
+ u8 polarity;
+
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(&cq->cq_uk);
+ get_64bit_val(cqe, 24, &qword3);
+ valid_bit = (bool)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ if (valid_bit != cq_uk->polarity)
+ return -ENOENT;
+
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
+
+ if (ext_valid) {
+ peek_head = (cq_uk->cq_ring.head + 1) % cq_uk->cq_ring.size;
+ ext_cqe = cq_uk->cq_base[peek_head].buf;
+ get_64bit_val(ext_cqe, 24, &qword7);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
+ if (!peek_head)
+ polarity ^= 1;
+ if (polarity != cq_uk->polarity)
+ return -ENOENT;
+
+ /* Ensure ext CQE contents are read after ext valid bit is checked */
+ dma_rmb();
+
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
+ cq_uk->polarity = !cq_uk->polarity;
+ /* update cq tail in cq shadow memory also */
+ IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
+ }
+
+ print_hex_dump_debug("PUDA: PUDA CQE", DUMP_PREFIX_OFFSET, 16, 8, cqe,
+ 32, false);
+ if (ext_valid)
+ print_hex_dump_debug("PUDA: PUDA EXT-CQE", DUMP_PREFIX_OFFSET,
+ 16, 8, ext_cqe, 32, false);
+
+ error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
+ if (error) {
+ ibdev_dbg(to_ibdev(cq->dev), "PUDA: receive error\n");
+ major_err = (u32)(FIELD_GET(IRDMA_CQ_MAJERR, qword3));
+ minor_err = (u32)(FIELD_GET(IRDMA_CQ_MINERR, qword3));
+ info->compl_error = major_err << 16 | minor_err;
+ return -EIO;
+ }
+
+ get_64bit_val(cqe, 0, &qword0);
+ get_64bit_val(cqe, 16, &qword2);
+
+ info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
+ if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+ info->qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
+ info->wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
+
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
+ if (ext_valid) {
+ info->vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
+ if (info->vlan_valid) {
+ get_64bit_val(ext_cqe, 16, &qword6);
+ info->vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
+ }
+ info->smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
+ if (info->smac_valid) {
+ get_64bit_val(ext_cqe, 16, &qword6);
+ info->smac[0] = (u8)((qword6 >> 40) & 0xFF);
+ info->smac[1] = (u8)((qword6 >> 32) & 0xFF);
+ info->smac[2] = (u8)((qword6 >> 24) & 0xFF);
+ info->smac[3] = (u8)((qword6 >> 16) & 0xFF);
+ info->smac[4] = (u8)((qword6 >> 8) & 0xFF);
+ info->smac[5] = (u8)(qword6 & 0xFF);
+ }
+ }
+
+ if (cq->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) {
+ info->vlan_valid = (bool)FIELD_GET(IRDMA_VLAN_TAG_VALID, qword3);
+ info->l4proto = (u8)FIELD_GET(IRDMA_UDA_L4PROTO, qword2);
+ info->l3proto = (u8)FIELD_GET(IRDMA_UDA_L3PROTO, qword2);
+ }
+
+ info->payload_len = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_puda_poll_cmpl - processes completion for cq
+ * @dev: iwarp device
+ * @cq: cq getting interrupt
+ * @compl_err: return any completion err
+ */
+int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
+ u32 *compl_err)
+{
+ struct irdma_qp_uk *qp;
+ struct irdma_cq_uk *cq_uk = &cq->cq_uk;
+ struct irdma_puda_cmpl_info info = {};
+ int ret = 0;
+ struct irdma_puda_buf *buf;
+ struct irdma_puda_rsrc *rsrc;
+ u8 cq_type = cq->cq_type;
+ unsigned long flags;
+
+ if (cq_type == IRDMA_CQ_TYPE_ILQ || cq_type == IRDMA_CQ_TYPE_IEQ) {
+ rsrc = (cq_type == IRDMA_CQ_TYPE_ILQ) ? cq->vsi->ilq :
+ cq->vsi->ieq;
+ } else {
+ ibdev_dbg(to_ibdev(dev), "PUDA: qp_type error\n");
+ return -EINVAL;
+ }
+
+ ret = irdma_puda_poll_info(cq, &info);
+ *compl_err = info.compl_error;
+ if (ret == -ENOENT)
+ return ret;
+ if (ret)
+ goto done;
+
+ qp = info.qp;
+ if (!qp || !rsrc) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (qp->qp_id != rsrc->qp_id) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ if (info.q_type == IRDMA_CQE_QTYPE_RQ) {
+ buf = (struct irdma_puda_buf *)(uintptr_t)
+ qp->rq_wrid_array[info.wqe_idx];
+
+ /* reusing so synch the buffer for CPU use */
+ dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ /* Get all the tcpip information in the buf header */
+ ret = irdma_puda_get_tcpip_info(&info, buf);
+ if (ret) {
+ rsrc->stats_rcvd_pkt_err++;
+ if (cq_type == IRDMA_CQ_TYPE_ILQ) {
+ irdma_ilq_putback_rcvbuf(&rsrc->qp, buf,
+ info.wqe_idx);
+ } else {
+ irdma_puda_ret_bufpool(rsrc, buf);
+ irdma_puda_replenish_rq(rsrc, false);
+ }
+ goto done;
+ }
+
+ rsrc->stats_pkt_rcvd++;
+ rsrc->compl_rxwqe_idx = info.wqe_idx;
+ ibdev_dbg(to_ibdev(dev), "PUDA: RQ completion\n");
+ rsrc->receive(rsrc->vsi, buf);
+ if (cq_type == IRDMA_CQ_TYPE_ILQ)
+ irdma_ilq_putback_rcvbuf(&rsrc->qp, buf, info.wqe_idx);
+ else
+ irdma_puda_replenish_rq(rsrc, false);
+
+ } else {
+ ibdev_dbg(to_ibdev(dev), "PUDA: SQ completion\n");
+ buf = (struct irdma_puda_buf *)(uintptr_t)
+ qp->sq_wrtrk_array[info.wqe_idx].wrid;
+
+ /* reusing so synch the buffer for CPU use */
+ dma_sync_single_for_cpu(dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ IRDMA_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
+ rsrc->xmit_complete(rsrc->vsi, buf);
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ rsrc->tx_wqe_avail_cnt++;
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ if (!list_empty(&rsrc->txpend))
+ irdma_puda_send_buf(rsrc, NULL);
+ }
+
+done:
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring))
+ cq_uk->polarity = !cq_uk->polarity;
+ /* update cq tail in cq shadow memory also */
+ IRDMA_RING_MOVE_TAIL(cq_uk->cq_ring);
+ set_64bit_val(cq_uk->shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring));
+
+ return ret;
+}
+
+/**
+ * irdma_puda_send - complete send wqe for transmit
+ * @qp: puda qp for send
+ * @info: buffer information for transmit
+ */
+int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info)
+{
+ __le64 *wqe;
+ u32 iplen, l4len;
+ u64 hdr[2];
+ u32 wqe_idx;
+ u8 iipt;
+
+ /* number of 32 bits DWORDS in header */
+ l4len = info->tcplen >> 2;
+ if (info->ipv4) {
+ iipt = 3;
+ iplen = 5;
+ } else {
+ iipt = 1;
+ iplen = 10;
+ }
+
+ wqe = irdma_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
+ if (!wqe)
+ return -ENOMEM;
+
+ qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
+ /* Third line of WQE descriptor */
+ /* maclen is in words */
+
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ hdr[0] = 0; /* Dest_QPN and Dest_QKey only for UD */
+ hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_L4LEN, l4len) |
+ FIELD_PREP(IRDMAQPSQ_AHID, info->ah_id) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID,
+ qp->qp_uk.swqe_polarity);
+
+ /* Forth line of WQE descriptor */
+
+ set_64bit_val(wqe, 0, info->paddr);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, info->len) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity));
+ } else {
+ hdr[0] = FIELD_PREP(IRDMA_UDA_QPSQ_MACLEN, info->maclen >> 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_IPLEN, iplen) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_L4T, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_IIPT, iipt) |
+ FIELD_PREP(IRDMA_GEN1_UDA_QPSQ_L4LEN, l4len);
+
+ hdr[1] = FIELD_PREP(IRDMA_UDA_QPSQ_OPCODE, IRDMA_OP_TYPE_SEND) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_SIGCOMPL, 1) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_DOLOOPBACK, info->do_lpb) |
+ FIELD_PREP(IRDMA_UDA_QPSQ_VALID, qp->qp_uk.swqe_polarity);
+
+ /* Forth line of WQE descriptor */
+
+ set_64bit_val(wqe, 0, info->paddr);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, info->len));
+ }
+
+ set_64bit_val(wqe, 16, hdr[0]);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr[1]);
+
+ print_hex_dump_debug("PUDA: PUDA SEND WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, 32, false);
+ irdma_uk_qp_post_wr(&qp->qp_uk);
+ return 0;
+}
+
+/**
+ * irdma_puda_send_buf - transmit puda buffer
+ * @rsrc: resource to use for buffer
+ * @buf: puda buffer to transmit
+ */
+void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_puda_send_info info;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+ /* if no wqe available or not from a completion and we have
+ * pending buffers, we must queue new buffer
+ */
+ if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
+ list_add_tail(&buf->list, &rsrc->txpend);
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+ rsrc->stats_sent_pkt_q++;
+ if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ ibdev_dbg(to_ibdev(rsrc->dev),
+ "PUDA: adding to txpend\n");
+ return;
+ }
+ rsrc->tx_wqe_avail_cnt--;
+ /* if we are coming from a completion and have pending buffers
+ * then Get one from pending list
+ */
+ if (!buf) {
+ buf = irdma_puda_get_listbuf(&rsrc->txpend);
+ if (!buf)
+ goto done;
+ }
+
+ info.scratch = buf;
+ info.paddr = buf->mem.pa;
+ info.len = buf->totallen;
+ info.tcplen = buf->tcphlen;
+ info.ipv4 = buf->ipv4;
+
+ if (rsrc->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ info.ah_id = buf->ah_id;
+ } else {
+ info.maclen = buf->maclen;
+ info.do_lpb = buf->do_lpb;
+ }
+
+ /* Synch buffer for use by device */
+ dma_sync_single_for_cpu(rsrc->dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ ret = irdma_puda_send(&rsrc->qp, &info);
+ if (ret) {
+ rsrc->tx_wqe_avail_cnt++;
+ rsrc->stats_sent_pkt_q++;
+ list_add(&buf->list, &rsrc->txpend);
+ if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ ibdev_dbg(to_ibdev(rsrc->dev),
+ "PUDA: adding to puda_send\n");
+ } else {
+ rsrc->stats_pkt_sent++;
+ }
+done:
+ spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+}
+
+/**
+ * irdma_puda_qp_setctx - during init, set qp's context
+ * @rsrc: qp's resource
+ */
+static void irdma_puda_qp_setctx(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_sc_qp *qp = &rsrc->qp;
+ __le64 *qp_ctx = qp->hw_host_ctx;
+
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+ set_64bit_val(qp_ctx, 24,
+ FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, rsrc->buf_size));
+ set_64bit_val(qp_ctx, 56, 0);
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ set_64bit_val(qp_ctx, 64, 1);
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, rsrc->cq_id) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, rsrc->cq_id));
+ set_64bit_val(qp_ctx, 144,
+ FIELD_PREP(IRDMAQPC_STAT_INDEX, rsrc->stats_idx));
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_PRIVEN, 1) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, rsrc->stats_idx_valid));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, (uintptr_t)qp));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
+
+ print_hex_dump_debug("PUDA: PUDA QP CONTEXT", DUMP_PREFIX_OFFSET, 16,
+ 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+/**
+ * irdma_puda_qp_wqe - setup wqe for qp create
+ * @dev: Device
+ * @qp: Resource qp
+ */
+static int irdma_puda_qp_wqe(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+ struct irdma_ccq_cqe_info compl_info;
+ int status = 0;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+ set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+ hdr = qp->qp_uk.qp_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_QP) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_QPTYPE, IRDMA_QP_TYPE_UDA) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_CQNUMVALID, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_QP_NEXTIWSTATE, 2) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("PUDA: PUDA QP CREATE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, 40, false);
+ irdma_sc_cqp_post_sq(cqp);
+ status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_QP,
+ &compl_info);
+
+ return status;
+}
+
+/**
+ * irdma_puda_qp_create - create qp for resource
+ * @rsrc: resource to use for buffer
+ */
+static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_sc_qp *qp = &rsrc->qp;
+ struct irdma_qp_uk *ukqp = &qp->qp_uk;
+ int ret = 0;
+ u32 sq_size, rq_size;
+ struct irdma_dma_mem *mem;
+
+ sq_size = rsrc->sq_size * IRDMA_QP_WQE_MIN_SIZE;
+ rq_size = rsrc->rq_size * IRDMA_QP_WQE_MIN_SIZE;
+ rsrc->qpmem.size = ALIGN((sq_size + rq_size + (IRDMA_SHADOW_AREA_SIZE << 3) + IRDMA_QP_CTX_SIZE),
+ IRDMA_HW_PAGE_SIZE);
+ rsrc->qpmem.va = dma_alloc_coherent(rsrc->dev->hw->device,
+ rsrc->qpmem.size, &rsrc->qpmem.pa,
+ GFP_KERNEL);
+ if (!rsrc->qpmem.va)
+ return -ENOMEM;
+
+ mem = &rsrc->qpmem;
+ memset(mem->va, 0, rsrc->qpmem.size);
+ qp->hw_sq_size = irdma_get_encoded_wqe_size(rsrc->sq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
+ qp->hw_rq_size = irdma_get_encoded_wqe_size(rsrc->rq_size, IRDMA_QUEUE_TYPE_SQ_RQ);
+ qp->pd = &rsrc->sc_pd;
+ qp->qp_uk.qp_type = IRDMA_QP_TYPE_UDA;
+ qp->dev = rsrc->dev;
+ qp->qp_uk.back_qp = rsrc;
+ qp->sq_pa = mem->pa;
+ qp->rq_pa = qp->sq_pa + sq_size;
+ qp->vsi = rsrc->vsi;
+ ukqp->sq_base = mem->va;
+ ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
+ ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
+ ukqp->uk_attrs = &qp->dev->hw_attrs.uk_attrs;
+ qp->shadow_area_pa = qp->rq_pa + rq_size;
+ qp->hw_host_ctx = ukqp->shadow_area + IRDMA_SHADOW_AREA_SIZE;
+ qp->hw_host_ctx_pa = qp->shadow_area_pa + (IRDMA_SHADOW_AREA_SIZE << 3);
+ qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
+ ukqp->qp_id = rsrc->qp_id;
+ ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
+ ukqp->rq_wrid_array = rsrc->rq_wrid_array;
+ ukqp->sq_size = rsrc->sq_size;
+ ukqp->rq_size = rsrc->rq_size;
+
+ IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
+ IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
+ IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
+ ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
+
+ ret = rsrc->dev->ws_add(qp->vsi, qp->user_pri);
+ if (ret) {
+ dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
+ rsrc->qpmem.va, rsrc->qpmem.pa);
+ rsrc->qpmem.va = NULL;
+ return ret;
+ }
+
+ irdma_qp_add_qos(qp);
+ irdma_puda_qp_setctx(rsrc);
+
+ if (rsrc->dev->ceq_valid)
+ ret = irdma_cqp_qp_create_cmd(rsrc->dev, qp);
+ else
+ ret = irdma_puda_qp_wqe(rsrc->dev, qp);
+ if (ret) {
+ irdma_qp_rem_qos(qp);
+ rsrc->dev->ws_remove(qp->vsi, qp->user_pri);
+ dma_free_coherent(rsrc->dev->hw->device, rsrc->qpmem.size,
+ rsrc->qpmem.va, rsrc->qpmem.pa);
+ rsrc->qpmem.va = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * irdma_puda_cq_wqe - setup wqe for CQ create
+ * @dev: Device
+ * @cq: resource for cq
+ */
+static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
+{
+ __le64 *wqe;
+ struct irdma_sc_cqp *cqp;
+ u64 hdr;
+ struct irdma_ccq_cqe_info compl_info;
+ int status = 0;
+
+ cqp = dev->cqp;
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+ set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
+ set_64bit_val(wqe, 32, cq->cq_pa);
+ set_64bit_val(wqe, 40, cq->shadow_area_pa);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
+
+ hdr = cq->cq_uk.cq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, 1) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("PUDA: PUDA CREATE CQ", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(dev->cqp);
+ status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
+ &compl_info);
+ if (!status) {
+ struct irdma_sc_ceq *ceq = dev->ceq[0];
+
+ if (ceq && ceq->reg_cq)
+ status = irdma_sc_add_cq_ctx(ceq, cq);
+ }
+
+ return status;
+}
+
+/**
+ * irdma_puda_cq_create - create cq for resource
+ * @rsrc: resource for which cq to create
+ */
+static int irdma_puda_cq_create(struct irdma_puda_rsrc *rsrc)
+{
+ struct irdma_sc_dev *dev = rsrc->dev;
+ struct irdma_sc_cq *cq = &rsrc->cq;
+ int ret = 0;
+ u32 cqsize;
+ struct irdma_dma_mem *mem;
+ struct irdma_cq_init_info info = {};
+ struct irdma_cq_uk_init_info *init_info = &info.cq_uk_init_info;
+
+ cq->vsi = rsrc->vsi;
+ cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe));
+ rsrc->cqmem.size = ALIGN(cqsize + sizeof(struct irdma_cq_shadow_area),
+ IRDMA_CQ0_ALIGNMENT);
+ rsrc->cqmem.va = dma_alloc_coherent(dev->hw->device, rsrc->cqmem.size,
+ &rsrc->cqmem.pa, GFP_KERNEL);
+ if (!rsrc->cqmem.va)
+ return -ENOMEM;
+
+ mem = &rsrc->cqmem;
+ info.dev = dev;
+ info.type = (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ) ?
+ IRDMA_CQ_TYPE_ILQ : IRDMA_CQ_TYPE_IEQ;
+ info.shadow_read_threshold = rsrc->cq_size >> 2;
+ info.cq_base_pa = mem->pa;
+ info.shadow_area_pa = mem->pa + cqsize;
+ init_info->cq_base = mem->va;
+ init_info->shadow_area = (__le64 *)((u8 *)mem->va + cqsize);
+ init_info->cq_size = rsrc->cq_size;
+ init_info->cq_id = rsrc->cq_id;
+ info.ceqe_mask = true;
+ info.ceq_id_valid = true;
+ info.vsi = rsrc->vsi;
+
+ ret = irdma_sc_cq_init(cq, &info);
+ if (ret)
+ goto error;
+
+ if (rsrc->dev->ceq_valid)
+ ret = irdma_cqp_cq_create_cmd(dev, cq);
+ else
+ ret = irdma_puda_cq_wqe(dev, cq);
+error:
+ if (ret) {
+ dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
+ rsrc->cqmem.va, rsrc->cqmem.pa);
+ rsrc->cqmem.va = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * irdma_puda_free_qp - free qp for resource
+ * @rsrc: resource for which qp to free
+ */
+static void irdma_puda_free_qp(struct irdma_puda_rsrc *rsrc)
+{
+ int ret;
+ struct irdma_ccq_cqe_info compl_info;
+ struct irdma_sc_dev *dev = rsrc->dev;
+
+ if (rsrc->dev->ceq_valid) {
+ irdma_cqp_qp_destroy_cmd(dev, &rsrc->qp);
+ rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
+ return;
+ }
+
+ ret = irdma_sc_qp_destroy(&rsrc->qp, 0, false, true, true);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error puda qp destroy wqe, status = %d\n",
+ ret);
+ if (!ret) {
+ ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_QP,
+ &compl_info);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error puda qp destroy failed, status = %d\n",
+ ret);
+ }
+ rsrc->dev->ws_remove(rsrc->qp.vsi, rsrc->qp.user_pri);
+}
+
+/**
+ * irdma_puda_free_cq - free cq for resource
+ * @rsrc: resource for which cq to free
+ */
+static void irdma_puda_free_cq(struct irdma_puda_rsrc *rsrc)
+{
+ int ret;
+ struct irdma_ccq_cqe_info compl_info;
+ struct irdma_sc_dev *dev = rsrc->dev;
+
+ if (rsrc->dev->ceq_valid) {
+ irdma_cqp_cq_destroy_cmd(dev, &rsrc->cq);
+ return;
+ }
+
+ ret = irdma_sc_cq_destroy(&rsrc->cq, 0, true);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev), "PUDA: error ieq cq destroy\n");
+ if (!ret) {
+ ret = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_DESTROY_CQ,
+ &compl_info);
+ if (ret)
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error ieq qp destroy done\n");
+ }
+}
+
+/**
+ * irdma_puda_dele_rsrc - delete all resources during close
+ * @vsi: VSI structure of device
+ * @type: type of resource to dele
+ * @reset: true if reset chip
+ */
+void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
+ bool reset)
+{
+ struct irdma_sc_dev *dev = vsi->dev;
+ struct irdma_puda_rsrc *rsrc;
+ struct irdma_puda_buf *buf = NULL;
+ struct irdma_puda_buf *nextbuf = NULL;
+ struct irdma_virt_mem *vmem;
+ struct irdma_sc_ceq *ceq;
+
+ ceq = vsi->dev->ceq[0];
+ switch (type) {
+ case IRDMA_PUDA_RSRC_TYPE_ILQ:
+ rsrc = vsi->ilq;
+ vmem = &vsi->ilq_mem;
+ vsi->ilq = NULL;
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
+ break;
+ case IRDMA_PUDA_RSRC_TYPE_IEQ:
+ rsrc = vsi->ieq;
+ vmem = &vsi->ieq_mem;
+ vsi->ieq = NULL;
+ if (ceq && ceq->reg_cq)
+ irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
+ break;
+ default:
+ ibdev_dbg(to_ibdev(dev), "PUDA: error resource type = 0x%x\n",
+ type);
+ return;
+ }
+
+ switch (rsrc->cmpl) {
+ case PUDA_HASH_CRC_COMPLETE:
+ irdma_free_hash_desc(rsrc->hash_desc);
+ fallthrough;
+ case PUDA_QP_CREATED:
+ irdma_qp_rem_qos(&rsrc->qp);
+
+ if (!reset)
+ irdma_puda_free_qp(rsrc);
+
+ dma_free_coherent(dev->hw->device, rsrc->qpmem.size,
+ rsrc->qpmem.va, rsrc->qpmem.pa);
+ rsrc->qpmem.va = NULL;
+ fallthrough;
+ case PUDA_CQ_CREATED:
+ if (!reset)
+ irdma_puda_free_cq(rsrc);
+
+ dma_free_coherent(dev->hw->device, rsrc->cqmem.size,
+ rsrc->cqmem.va, rsrc->cqmem.pa);
+ rsrc->cqmem.va = NULL;
+ break;
+ default:
+ ibdev_dbg(to_ibdev(rsrc->dev), "PUDA: error no resources\n");
+ break;
+ }
+ /* Free all allocated puda buffers for both tx and rx */
+ buf = rsrc->alloclist;
+ while (buf) {
+ nextbuf = buf->next;
+ irdma_puda_dele_buf(dev, buf);
+ buf = nextbuf;
+ rsrc->alloc_buf_count--;
+ }
+
+ kfree(vmem->va);
+}
+
+/**
+ * irdma_puda_allocbufs - allocate buffers for resource
+ * @rsrc: resource for buffer allocation
+ * @count: number of buffers to create
+ */
+static int irdma_puda_allocbufs(struct irdma_puda_rsrc *rsrc, u32 count)
+{
+ u32 i;
+ struct irdma_puda_buf *buf;
+ struct irdma_puda_buf *nextbuf;
+
+ for (i = 0; i < count; i++) {
+ buf = irdma_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
+ if (!buf) {
+ rsrc->stats_buf_alloc_fail++;
+ return -ENOMEM;
+ }
+ irdma_puda_ret_bufpool(rsrc, buf);
+ rsrc->alloc_buf_count++;
+ if (!rsrc->alloclist) {
+ rsrc->alloclist = buf;
+ } else {
+ nextbuf = rsrc->alloclist;
+ rsrc->alloclist = buf;
+ buf->next = nextbuf;
+ }
+ }
+
+ rsrc->avail_buf_count = rsrc->alloc_buf_count;
+
+ return 0;
+}
+
+/**
+ * irdma_puda_create_rsrc - create resource (ilq or ieq)
+ * @vsi: sc VSI struct
+ * @info: resource information
+ */
+int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_rsrc_info *info)
+{
+ struct irdma_sc_dev *dev = vsi->dev;
+ int ret = 0;
+ struct irdma_puda_rsrc *rsrc;
+ u32 pudasize;
+ u32 sqwridsize, rqwridsize;
+ struct irdma_virt_mem *vmem;
+
+ info->count = 1;
+ pudasize = sizeof(struct irdma_puda_rsrc);
+ sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info);
+ rqwridsize = info->rq_size * 8;
+ switch (info->type) {
+ case IRDMA_PUDA_RSRC_TYPE_ILQ:
+ vmem = &vsi->ilq_mem;
+ break;
+ case IRDMA_PUDA_RSRC_TYPE_IEQ:
+ vmem = &vsi->ieq_mem;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ vmem->size = pudasize + sqwridsize + rqwridsize;
+ vmem->va = kzalloc(vmem->size, GFP_KERNEL);
+ if (!vmem->va)
+ return -ENOMEM;
+
+ rsrc = vmem->va;
+ spin_lock_init(&rsrc->bufpool_lock);
+ switch (info->type) {
+ case IRDMA_PUDA_RSRC_TYPE_ILQ:
+ vsi->ilq = vmem->va;
+ vsi->ilq_count = info->count;
+ rsrc->receive = info->receive;
+ rsrc->xmit_complete = info->xmit_complete;
+ break;
+ case IRDMA_PUDA_RSRC_TYPE_IEQ:
+ vsi->ieq_count = info->count;
+ vsi->ieq = vmem->va;
+ rsrc->receive = irdma_ieq_receive;
+ rsrc->xmit_complete = irdma_ieq_tx_compl;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rsrc->type = info->type;
+ rsrc->sq_wrtrk_array = (struct irdma_sq_uk_wr_trk_info *)
+ ((u8 *)vmem->va + pudasize);
+ rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
+ /* Initialize all ieq lists */
+ INIT_LIST_HEAD(&rsrc->bufpool);
+ INIT_LIST_HEAD(&rsrc->txpend);
+
+ rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
+ irdma_sc_pd_init(dev, &rsrc->sc_pd, info->pd_id, info->abi_ver);
+ rsrc->qp_id = info->qp_id;
+ rsrc->cq_id = info->cq_id;
+ rsrc->sq_size = info->sq_size;
+ rsrc->rq_size = info->rq_size;
+ rsrc->cq_size = info->rq_size + info->sq_size;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ if (rsrc->type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ rsrc->cq_size += info->rq_size;
+ }
+ rsrc->buf_size = info->buf_size;
+ rsrc->dev = dev;
+ rsrc->vsi = vsi;
+ rsrc->stats_idx = info->stats_idx;
+ rsrc->stats_idx_valid = info->stats_idx_valid;
+
+ ret = irdma_puda_cq_create(rsrc);
+ if (!ret) {
+ rsrc->cmpl = PUDA_CQ_CREATED;
+ ret = irdma_puda_qp_create(rsrc);
+ }
+ if (ret) {
+ ibdev_dbg(to_ibdev(dev),
+ "PUDA: error qp_create type=%d, status=%d\n",
+ rsrc->type, ret);
+ goto error;
+ }
+ rsrc->cmpl = PUDA_QP_CREATED;
+
+ ret = irdma_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
+ if (ret) {
+ ibdev_dbg(to_ibdev(dev), "PUDA: error alloc_buf\n");
+ goto error;
+ }
+
+ rsrc->rxq_invalid_cnt = info->rq_size;
+ ret = irdma_puda_replenish_rq(rsrc, true);
+ if (ret)
+ goto error;
+
+ if (info->type == IRDMA_PUDA_RSRC_TYPE_IEQ) {
+ if (!irdma_init_hash_desc(&rsrc->hash_desc)) {
+ rsrc->check_crc = true;
+ rsrc->cmpl = PUDA_HASH_CRC_COMPLETE;
+ ret = 0;
+ }
+ }
+
+ irdma_sc_ccq_arm(&rsrc->cq);
+ return ret;
+
+error:
+ irdma_puda_dele_rsrc(vsi, info->type, false);
+
+ return ret;
+}
+
+/**
+ * irdma_ilq_putback_rcvbuf - ilq buffer to put back on rq
+ * @qp: ilq's qp resource
+ * @buf: puda buffer for rcv q
+ * @wqe_idx: wqe index of completed rcvbuf
+ */
+static void irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
+ struct irdma_puda_buf *buf, u32 wqe_idx)
+{
+ __le64 *wqe;
+ u64 offset8, offset24;
+
+ /* Synch buffer for use by device */
+ dma_sync_single_for_device(qp->dev->hw->device, buf->mem.pa,
+ buf->mem.size, DMA_BIDIRECTIONAL);
+ wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+ get_64bit_val(wqe, 24, &offset24);
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ get_64bit_val(wqe, 8, &offset8);
+ if (offset24)
+ offset8 &= ~FIELD_PREP(IRDMAQPSQ_VALID, 1);
+ else
+ offset8 |= FIELD_PREP(IRDMAQPSQ_VALID, 1);
+ set_64bit_val(wqe, 8, offset8);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+ }
+ if (offset24)
+ offset24 = 0;
+ else
+ offset24 = FIELD_PREP(IRDMAQPSQ_VALID, 1);
+
+ set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * irdma_ieq_get_fpdu_len - get length of fpdu with or without marker
+ * @pfpdu: pointer to fpdu
+ * @datap: pointer to data in the buffer
+ * @rcv_seq: seqnum of the data buffer
+ */
+static u16 irdma_ieq_get_fpdu_len(struct irdma_pfpdu *pfpdu, u8 *datap,
+ u32 rcv_seq)
+{
+ u32 marker_seq, end_seq, blk_start;
+ u8 marker_len = pfpdu->marker_len;
+ u16 total_len = 0;
+ u16 fpdu_len;
+
+ blk_start = (pfpdu->rcv_start_seq - rcv_seq) & (IRDMA_MRK_BLK_SZ - 1);
+ if (!blk_start) {
+ total_len = marker_len;
+ marker_seq = rcv_seq + IRDMA_MRK_BLK_SZ;
+ if (marker_len && *(u32 *)datap)
+ return 0;
+ } else {
+ marker_seq = rcv_seq + blk_start;
+ }
+
+ datap += total_len;
+ fpdu_len = ntohs(*(__be16 *)datap);
+ fpdu_len += IRDMA_IEQ_MPA_FRAMING;
+ fpdu_len = (fpdu_len + 3) & 0xfffc;
+
+ if (fpdu_len > pfpdu->max_fpdu_data)
+ return 0;
+
+ total_len += fpdu_len;
+ end_seq = rcv_seq + total_len;
+ while ((int)(marker_seq - end_seq) < 0) {
+ total_len += marker_len;
+ end_seq += marker_len;
+ marker_seq += IRDMA_MRK_BLK_SZ;
+ }
+
+ return total_len;
+}
+
+/**
+ * irdma_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
+ * @buf: rcv buffer with partial
+ * @txbuf: tx buffer for sending back
+ * @buf_offset: rcv buffer offset to copy from
+ * @txbuf_offset: at offset in tx buf to copy
+ * @len: length of data to copy
+ */
+static void irdma_ieq_copy_to_txbuf(struct irdma_puda_buf *buf,
+ struct irdma_puda_buf *txbuf,
+ u16 buf_offset, u32 txbuf_offset, u32 len)
+{
+ void *mem1 = (u8 *)buf->mem.va + buf_offset;
+ void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
+
+ memcpy(mem2, mem1, len);
+}
+
+/**
+ * irdma_ieq_setup_tx_buf - setup tx buffer for partial handling
+ * @buf: reeive buffer with partial
+ * @txbuf: buffer to prepare
+ */
+static void irdma_ieq_setup_tx_buf(struct irdma_puda_buf *buf,
+ struct irdma_puda_buf *txbuf)
+{
+ txbuf->tcphlen = buf->tcphlen;
+ txbuf->ipv4 = buf->ipv4;
+
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ txbuf->hdrlen = txbuf->tcphlen;
+ irdma_ieq_copy_to_txbuf(buf, txbuf, IRDMA_TCP_OFFSET, 0,
+ txbuf->hdrlen);
+ } else {
+ txbuf->maclen = buf->maclen;
+ txbuf->hdrlen = buf->hdrlen;
+ irdma_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
+ }
+}
+
+/**
+ * irdma_ieq_check_first_buf - check if rcv buffer's seq is in range
+ * @buf: receive exception buffer
+ * @fps: first partial sequence number
+ */
+static void irdma_ieq_check_first_buf(struct irdma_puda_buf *buf, u32 fps)
+{
+ u32 offset;
+
+ if (buf->seqnum < fps) {
+ offset = fps - buf->seqnum;
+ if (offset > buf->datalen)
+ return;
+ buf->data += offset;
+ buf->datalen -= (u16)offset;
+ buf->seqnum = fps;
+ }
+}
+
+/**
+ * irdma_ieq_compl_pfpdu - write txbuf with full fpdu
+ * @ieq: ieq resource
+ * @rxlist: ieq's received buffer list
+ * @pbufl: temporary list for buffers for fpddu
+ * @txbuf: tx buffer for fpdu
+ * @fpdu_len: total length of fpdu
+ */
+static void irdma_ieq_compl_pfpdu(struct irdma_puda_rsrc *ieq,
+ struct list_head *rxlist,
+ struct list_head *pbufl,
+ struct irdma_puda_buf *txbuf, u16 fpdu_len)
+{
+ struct irdma_puda_buf *buf;
+ u32 nextseqnum;
+ u16 txoffset, bufoffset;
+
+ buf = irdma_puda_get_listbuf(pbufl);
+ if (!buf)
+ return;
+
+ nextseqnum = buf->seqnum + fpdu_len;
+ irdma_ieq_setup_tx_buf(buf, txbuf);
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ txoffset = txbuf->hdrlen;
+ txbuf->totallen = txbuf->hdrlen + fpdu_len;
+ txbuf->data = (u8 *)txbuf->mem.va + txoffset;
+ } else {
+ txoffset = buf->hdrlen;
+ txbuf->totallen = buf->hdrlen + fpdu_len;
+ txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
+ }
+ bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+
+ do {
+ if (buf->datalen >= fpdu_len) {
+ /* copied full fpdu */
+ irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
+ fpdu_len);
+ buf->datalen -= fpdu_len;
+ buf->data += fpdu_len;
+ buf->seqnum = nextseqnum;
+ break;
+ }
+ /* copy partial fpdu */
+ irdma_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset,
+ buf->datalen);
+ txoffset += buf->datalen;
+ fpdu_len -= buf->datalen;
+ irdma_puda_ret_bufpool(ieq, buf);
+ buf = irdma_puda_get_listbuf(pbufl);
+ if (!buf)
+ return;
+
+ bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+ } while (1);
+
+ /* last buffer on the list*/
+ if (buf->datalen)
+ list_add(&buf->list, rxlist);
+ else
+ irdma_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * irdma_ieq_create_pbufl - create buffer list for single fpdu
+ * @pfpdu: pointer to fpdu
+ * @rxlist: resource list for receive ieq buffes
+ * @pbufl: temp. list for buffers for fpddu
+ * @buf: first receive buffer
+ * @fpdu_len: total length of fpdu
+ */
+static int irdma_ieq_create_pbufl(struct irdma_pfpdu *pfpdu,
+ struct list_head *rxlist,
+ struct list_head *pbufl,
+ struct irdma_puda_buf *buf, u16 fpdu_len)
+{
+ int status = 0;
+ struct irdma_puda_buf *nextbuf;
+ u32 nextseqnum;
+ u16 plen = fpdu_len - buf->datalen;
+ bool done = false;
+
+ nextseqnum = buf->seqnum + buf->datalen;
+ do {
+ nextbuf = irdma_puda_get_listbuf(rxlist);
+ if (!nextbuf) {
+ status = -ENOBUFS;
+ break;
+ }
+ list_add_tail(&nextbuf->list, pbufl);
+ if (nextbuf->seqnum != nextseqnum) {
+ pfpdu->bad_seq_num++;
+ status = -ERANGE;
+ break;
+ }
+ if (nextbuf->datalen >= plen) {
+ done = true;
+ } else {
+ plen -= nextbuf->datalen;
+ nextseqnum = nextbuf->seqnum + nextbuf->datalen;
+ }
+
+ } while (!done);
+
+ return status;
+}
+
+/**
+ * irdma_ieq_handle_partial - process partial fpdu buffer
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ * @fpdu_len: fpdu len in the buffer
+ */
+static int irdma_ieq_handle_partial(struct irdma_puda_rsrc *ieq,
+ struct irdma_pfpdu *pfpdu,
+ struct irdma_puda_buf *buf, u16 fpdu_len)
+{
+ int status = 0;
+ u8 *crcptr;
+ u32 mpacrc;
+ u32 seqnum = buf->seqnum;
+ struct list_head pbufl; /* partial buffer list */
+ struct irdma_puda_buf *txbuf = NULL;
+ struct list_head *rxlist = &pfpdu->rxlist;
+
+ ieq->partials_handled++;
+
+ INIT_LIST_HEAD(&pbufl);
+ list_add(&buf->list, &pbufl);
+
+ status = irdma_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
+ if (status)
+ goto error;
+
+ txbuf = irdma_puda_get_bufpool(ieq);
+ if (!txbuf) {
+ pfpdu->no_tx_bufs++;
+ status = -ENOBUFS;
+ goto error;
+ }
+
+ irdma_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
+ irdma_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
+
+ crcptr = txbuf->data + fpdu_len - 4;
+ mpacrc = *(u32 *)crcptr;
+ if (ieq->check_crc) {
+ status = irdma_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
+ (fpdu_len - 4), mpacrc);
+ if (status) {
+ ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error bad crc\n");
+ goto error;
+ }
+ }
+
+ print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
+ txbuf->mem.va, txbuf->totallen, false);
+ if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
+ txbuf->do_lpb = true;
+ irdma_puda_send_buf(ieq, txbuf);
+ pfpdu->rcv_nxt = seqnum + fpdu_len;
+ return status;
+
+error:
+ while (!list_empty(&pbufl)) {
+ buf = list_last_entry(&pbufl, struct irdma_puda_buf, list);
+ list_move(&buf->list, rxlist);
+ }
+ if (txbuf)
+ irdma_puda_ret_bufpool(ieq, txbuf);
+
+ return status;
+}
+
+/**
+ * irdma_ieq_process_buf - process buffer rcvd for ieq
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ */
+static int irdma_ieq_process_buf(struct irdma_puda_rsrc *ieq,
+ struct irdma_pfpdu *pfpdu,
+ struct irdma_puda_buf *buf)
+{
+ u16 fpdu_len = 0;
+ u16 datalen = buf->datalen;
+ u8 *datap = buf->data;
+ u8 *crcptr;
+ u16 ioffset = 0;
+ u32 mpacrc;
+ u32 seqnum = buf->seqnum;
+ u16 len = 0;
+ u16 full = 0;
+ bool partial = false;
+ struct irdma_puda_buf *txbuf;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ int ret = 0;
+
+ ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
+ while (datalen) {
+ fpdu_len = irdma_ieq_get_fpdu_len(pfpdu, datap, buf->seqnum);
+ if (!fpdu_len) {
+ ibdev_dbg(to_ibdev(ieq->dev),
+ "IEQ: error bad fpdu len\n");
+ list_add(&buf->list, rxlist);
+ return -EINVAL;
+ }
+
+ if (datalen < fpdu_len) {
+ partial = true;
+ break;
+ }
+ crcptr = datap + fpdu_len - 4;
+ mpacrc = *(u32 *)crcptr;
+ if (ieq->check_crc)
+ ret = irdma_ieq_check_mpacrc(ieq->hash_desc, datap,
+ fpdu_len - 4, mpacrc);
+ if (ret) {
+ list_add(&buf->list, rxlist);
+ ibdev_dbg(to_ibdev(ieq->dev),
+ "ERR: IRDMA_ERR_MPA_CRC\n");
+ return -EINVAL;
+ }
+ full++;
+ pfpdu->fpdu_processed++;
+ ieq->fpdu_processed++;
+ datap += fpdu_len;
+ len += fpdu_len;
+ datalen -= fpdu_len;
+ }
+ if (full) {
+ /* copy full pdu's in the txbuf and send them out */
+ txbuf = irdma_puda_get_bufpool(ieq);
+ if (!txbuf) {
+ pfpdu->no_tx_bufs++;
+ list_add(&buf->list, rxlist);
+ return -ENOBUFS;
+ }
+ /* modify txbuf's buffer header */
+ irdma_ieq_setup_tx_buf(buf, txbuf);
+ /* copy full fpdu's to new buffer */
+ if (ieq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
+ txbuf->hdrlen, len);
+ txbuf->totallen = txbuf->hdrlen + len;
+ txbuf->ah_id = pfpdu->ah->ah_info.ah_idx;
+ } else {
+ irdma_ieq_copy_to_txbuf(buf, txbuf, ioffset,
+ buf->hdrlen, len);
+ txbuf->totallen = buf->hdrlen + len;
+ }
+ irdma_ieq_update_tcpip_info(txbuf, len, buf->seqnum);
+ print_hex_dump_debug("IEQ: IEQ TX BUFFER", DUMP_PREFIX_OFFSET,
+ 16, 8, txbuf->mem.va, txbuf->totallen,
+ false);
+ txbuf->do_lpb = true;
+ irdma_puda_send_buf(ieq, txbuf);
+
+ if (!datalen) {
+ pfpdu->rcv_nxt = buf->seqnum + len;
+ irdma_puda_ret_bufpool(ieq, buf);
+ return 0;
+ }
+ buf->data = datap;
+ buf->seqnum = seqnum + len;
+ buf->datalen = datalen;
+ pfpdu->rcv_nxt = buf->seqnum;
+ }
+ if (partial)
+ return irdma_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
+
+ return 0;
+}
+
+/**
+ * irdma_ieq_process_fpdus - process fpdu's buffers on its list
+ * @qp: qp for which partial fpdus
+ * @ieq: ieq resource
+ */
+void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
+ struct irdma_puda_rsrc *ieq)
+{
+ struct irdma_pfpdu *pfpdu = &qp->pfpdu;
+ struct list_head *rxlist = &pfpdu->rxlist;
+ struct irdma_puda_buf *buf;
+ int status;
+
+ do {
+ if (list_empty(rxlist))
+ break;
+ buf = irdma_puda_get_listbuf(rxlist);
+ if (!buf) {
+ ibdev_dbg(to_ibdev(ieq->dev), "IEQ: error no buf\n");
+ break;
+ }
+ if (buf->seqnum != pfpdu->rcv_nxt) {
+ /* This could be out of order or missing packet */
+ pfpdu->out_of_order++;
+ list_add(&buf->list, rxlist);
+ break;
+ }
+ /* keep processing buffers from the head of the list */
+ status = irdma_ieq_process_buf(ieq, pfpdu, buf);
+ if (status == -EINVAL) {
+ pfpdu->mpa_crc_err = true;
+ while (!list_empty(rxlist)) {
+ buf = irdma_puda_get_listbuf(rxlist);
+ irdma_puda_ret_bufpool(ieq, buf);
+ pfpdu->crc_err++;
+ ieq->crc_err++;
+ }
+ /* create CQP for AE */
+ irdma_ieq_mpa_crc_ae(ieq->dev, qp);
+ }
+ } while (!status);
+}
+
+/**
+ * irdma_ieq_create_ah - create an address handle for IEQ
+ * @qp: qp pointer
+ * @buf: buf received on IEQ used to create AH
+ */
+static int irdma_ieq_create_ah(struct irdma_sc_qp *qp, struct irdma_puda_buf *buf)
+{
+ struct irdma_ah_info ah_info = {};
+
+ qp->pfpdu.ah_buf = buf;
+ irdma_puda_ieq_get_ah_info(qp, &ah_info);
+ return irdma_puda_create_ah(qp->vsi->dev, &ah_info, false,
+ IRDMA_PUDA_RSRC_TYPE_IEQ, qp,
+ &qp->pfpdu.ah);
+}
+
+/**
+ * irdma_ieq_handle_exception - handle qp's exception
+ * @ieq: ieq resource
+ * @qp: qp receiving excpetion
+ * @buf: receive buffer
+ */
+static void irdma_ieq_handle_exception(struct irdma_puda_rsrc *ieq,
+ struct irdma_sc_qp *qp,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_pfpdu *pfpdu = &qp->pfpdu;
+ u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
+ u32 rcv_wnd = hw_host_ctx[23];
+ /* first partial seq # in q2 */
+ u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
+ struct list_head *rxlist = &pfpdu->rxlist;
+ unsigned long flags = 0;
+ u8 hw_rev = qp->dev->hw_attrs.uk_attrs.hw_rev;
+
+ print_hex_dump_debug("IEQ: IEQ RX BUFFER", DUMP_PREFIX_OFFSET, 16, 8,
+ buf->mem.va, buf->totallen, false);
+
+ spin_lock_irqsave(&pfpdu->lock, flags);
+ pfpdu->total_ieq_bufs++;
+ if (pfpdu->mpa_crc_err) {
+ pfpdu->crc_err++;
+ goto error;
+ }
+ if (pfpdu->mode && fps != pfpdu->fps) {
+ /* clean up qp as it is new partial sequence */
+ irdma_ieq_cleanup_qp(ieq, qp);
+ ibdev_dbg(to_ibdev(ieq->dev), "IEQ: restarting new partial\n");
+ pfpdu->mode = false;
+ }
+
+ if (!pfpdu->mode) {
+ print_hex_dump_debug("IEQ: Q2 BUFFER", DUMP_PREFIX_OFFSET, 16,
+ 8, (u64 *)qp->q2_buf, 128, false);
+ /* First_Partial_Sequence_Number check */
+ pfpdu->rcv_nxt = fps;
+ pfpdu->fps = fps;
+ pfpdu->mode = true;
+ pfpdu->max_fpdu_data = (buf->ipv4) ?
+ (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV4) :
+ (ieq->vsi->mtu - IRDMA_MTU_TO_MSS_IPV6);
+ pfpdu->pmode_count++;
+ ieq->pmode_count++;
+ INIT_LIST_HEAD(rxlist);
+ irdma_ieq_check_first_buf(buf, fps);
+ }
+
+ if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
+ pfpdu->bad_seq_num++;
+ ieq->bad_seq_num++;
+ goto error;
+ }
+
+ if (!list_empty(rxlist)) {
+ if (buf->seqnum != pfpdu->nextseqnum) {
+ irdma_send_ieq_ack(qp);
+ /* throw away out-of-order, duplicates*/
+ goto error;
+ }
+ }
+ /* Insert buf before head */
+ list_add_tail(&buf->list, rxlist);
+ pfpdu->nextseqnum = buf->seqnum + buf->datalen;
+ pfpdu->lastrcv_buf = buf;
+ if (hw_rev >= IRDMA_GEN_2 && !pfpdu->ah) {
+ irdma_ieq_create_ah(qp, buf);
+ if (!pfpdu->ah)
+ goto error;
+ goto exit;
+ }
+ if (hw_rev == IRDMA_GEN_1)
+ irdma_ieq_process_fpdus(qp, ieq);
+ else if (pfpdu->ah && pfpdu->ah->ah_info.ah_valid)
+ irdma_ieq_process_fpdus(qp, ieq);
+exit:
+ spin_unlock_irqrestore(&pfpdu->lock, flags);
+
+ return;
+
+error:
+ irdma_puda_ret_bufpool(ieq, buf);
+ spin_unlock_irqrestore(&pfpdu->lock, flags);
+}
+
+/**
+ * irdma_ieq_receive - received exception buffer
+ * @vsi: VSI of device
+ * @buf: exception buffer received
+ */
+static void irdma_ieq_receive(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_puda_rsrc *ieq = vsi->ieq;
+ struct irdma_sc_qp *qp = NULL;
+ u32 wqe_idx = ieq->compl_rxwqe_idx;
+
+ qp = irdma_ieq_get_qp(vsi->dev, buf);
+ if (!qp) {
+ ieq->stats_bad_qp_id++;
+ irdma_puda_ret_bufpool(ieq, buf);
+ } else {
+ irdma_ieq_handle_exception(ieq, qp, buf);
+ }
+ /*
+ * ieq->rx_wqe_idx is used by irdma_puda_replenish_rq()
+ * on which wqe_idx to start replenish rq
+ */
+ if (!ieq->rxq_invalid_cnt)
+ ieq->rx_wqe_idx = wqe_idx;
+ ieq->rxq_invalid_cnt++;
+}
+
+/**
+ * irdma_ieq_tx_compl - put back after sending completed exception buffer
+ * @vsi: sc VSI struct
+ * @sqwrid: pointer to puda buffer
+ */
+static void irdma_ieq_tx_compl(struct irdma_sc_vsi *vsi, void *sqwrid)
+{
+ struct irdma_puda_rsrc *ieq = vsi->ieq;
+ struct irdma_puda_buf *buf = sqwrid;
+
+ irdma_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * irdma_ieq_cleanup_qp - qp is being destroyed
+ * @ieq: ieq resource
+ * @qp: all pending fpdu buffers
+ */
+void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp)
+{
+ struct irdma_puda_buf *buf;
+ struct irdma_pfpdu *pfpdu = &qp->pfpdu;
+ struct list_head *rxlist = &pfpdu->rxlist;
+
+ if (qp->pfpdu.ah) {
+ irdma_puda_free_ah(ieq->dev, qp->pfpdu.ah);
+ qp->pfpdu.ah = NULL;
+ qp->pfpdu.ah_buf = NULL;
+ }
+
+ if (!pfpdu->mode)
+ return;
+
+ while (!list_empty(rxlist)) {
+ buf = irdma_puda_get_listbuf(rxlist);
+ irdma_puda_ret_bufpool(ieq, buf);
+ }
+}
diff --git a/drivers/infiniband/hw/irdma/puda.h b/drivers/infiniband/hw/irdma/puda.h
new file mode 100644
index 0000000000..5f5124db6d
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/puda.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_PUDA_H
+#define IRDMA_PUDA_H
+
+#define IRDMA_IEQ_MPA_FRAMING 6
+#define IRDMA_TCP_OFFSET 40
+#define IRDMA_IPV4_PAD 20
+#define IRDMA_MRK_BLK_SZ 512
+
+enum puda_rsrc_type {
+ IRDMA_PUDA_RSRC_TYPE_ILQ = 1,
+ IRDMA_PUDA_RSRC_TYPE_IEQ,
+ IRDMA_PUDA_RSRC_TYPE_MAX, /* Must be last entry */
+};
+
+enum puda_rsrc_complete {
+ PUDA_CQ_CREATED = 1,
+ PUDA_QP_CREATED,
+ PUDA_TX_COMPLETE,
+ PUDA_RX_COMPLETE,
+ PUDA_HASH_CRC_COMPLETE,
+};
+
+struct irdma_sc_dev;
+struct irdma_sc_qp;
+struct irdma_sc_cq;
+
+struct irdma_puda_cmpl_info {
+ struct irdma_qp_uk *qp;
+ u8 q_type;
+ u8 l3proto;
+ u8 l4proto;
+ u16 vlan;
+ u32 payload_len;
+ u32 compl_error; /* No_err=0, else major and minor err code */
+ u32 qp_id;
+ u32 wqe_idx;
+ bool ipv4:1;
+ bool smac_valid:1;
+ bool vlan_valid:1;
+ u8 smac[ETH_ALEN];
+};
+
+struct irdma_puda_send_info {
+ u64 paddr; /* Physical address */
+ u32 len;
+ u32 ah_id;
+ u8 tcplen;
+ u8 maclen;
+ bool ipv4:1;
+ bool do_lpb:1;
+ void *scratch;
+};
+
+struct irdma_puda_buf {
+ struct list_head list; /* MUST be first entry */
+ struct irdma_dma_mem mem; /* DMA memory for the buffer */
+ struct irdma_puda_buf *next; /* for alloclist in rsrc struct */
+ struct irdma_virt_mem buf_mem; /* Buffer memory for this buffer */
+ void *scratch;
+ u8 *iph;
+ u8 *tcph;
+ u8 *data;
+ u16 datalen;
+ u16 vlan_id;
+ u8 tcphlen; /* tcp length in bytes */
+ u8 maclen; /* mac length in bytes */
+ u32 totallen; /* machlen+iphlen+tcphlen+datalen */
+ refcount_t refcount;
+ u8 hdrlen;
+ bool ipv4:1;
+ bool vlan_valid:1;
+ bool do_lpb:1; /* Loopback buffer */
+ bool smac_valid:1;
+ u32 seqnum;
+ u32 ah_id;
+ u8 smac[ETH_ALEN];
+ struct irdma_sc_vsi *vsi;
+};
+
+struct irdma_puda_rsrc_info {
+ void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
+ void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
+ enum puda_rsrc_type type; /* ILQ or IEQ */
+ u32 count;
+ u32 pd_id;
+ u32 cq_id;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
+ u16 buf_size;
+ u8 stats_idx;
+ bool stats_idx_valid:1;
+ int abi_ver;
+};
+
+struct irdma_puda_rsrc {
+ struct irdma_sc_cq cq;
+ struct irdma_sc_qp qp;
+ struct irdma_sc_pd sc_pd;
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_dma_mem cqmem;
+ struct irdma_dma_mem qpmem;
+ struct irdma_virt_mem ilq_mem;
+ enum puda_rsrc_complete cmpl;
+ enum puda_rsrc_type type;
+ u16 buf_size; /*buf must be max datalen + tcpip hdr + mac */
+ u32 cq_id;
+ u32 qp_id;
+ u32 sq_size;
+ u32 rq_size;
+ u32 cq_size;
+ struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u32 compl_rxwqe_idx;
+ u32 rx_wqe_idx;
+ u32 rxq_invalid_cnt;
+ u32 tx_wqe_avail_cnt;
+ struct shash_desc *hash_desc;
+ struct list_head txpend;
+ struct list_head bufpool; /* free buffers pool list for recv and xmit */
+ u32 alloc_buf_count;
+ u32 avail_buf_count; /* snapshot of currently available buffers */
+ spinlock_t bufpool_lock;
+ struct irdma_puda_buf *alloclist;
+ void (*receive)(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *buf);
+ void (*xmit_complete)(struct irdma_sc_vsi *vsi, void *sqwrid);
+ /* puda stats */
+ u64 stats_buf_alloc_fail;
+ u64 stats_pkt_rcvd;
+ u64 stats_pkt_sent;
+ u64 stats_rcvd_pkt_err;
+ u64 stats_sent_pkt_q;
+ u64 stats_bad_qp_id;
+ /* IEQ stats */
+ u64 fpdu_processed;
+ u64 bad_seq_num;
+ u64 crc_err;
+ u64 pmode_count;
+ u64 partials_handled;
+ u8 stats_idx;
+ bool check_crc:1;
+ bool stats_idx_valid:1;
+};
+
+struct irdma_puda_buf *irdma_puda_get_bufpool(struct irdma_puda_rsrc *rsrc);
+void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf);
+void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
+ struct irdma_puda_buf *buf);
+int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info);
+int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
+ struct irdma_puda_rsrc_info *info);
+void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
+ bool reset);
+int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
+ u32 *compl_err);
+
+struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf);
+int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf);
+int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len, u32 val);
+int irdma_init_hash_desc(struct shash_desc **desc);
+void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+void irdma_free_hash_desc(struct shash_desc *desc);
+void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, u32 seqnum);
+int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
+int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
+void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
+void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
+ struct irdma_ah_info *ah_info);
+int irdma_puda_create_ah(struct irdma_sc_dev *dev,
+ struct irdma_ah_info *ah_info, bool wait,
+ enum puda_rsrc_type type, void *cb_param,
+ struct irdma_sc_ah **ah);
+void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah);
+void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
+ struct irdma_puda_rsrc *ieq);
+void irdma_ieq_cleanup_qp(struct irdma_puda_rsrc *ieq, struct irdma_sc_qp *qp);
+#endif /*IRDMA_PROTOS_H */
diff --git a/drivers/infiniband/hw/irdma/trace.c b/drivers/infiniband/hw/irdma/trace.c
new file mode 100644
index 0000000000..b5133f4137
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/trace.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2019 Intel Corporation */
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ipv4)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ if (ipv4) {
+ __be32 myaddr = htonl(*addr);
+
+ trace_seq_printf(p, "%pI4:%d", &myaddr, htons(port));
+ } else {
+ trace_seq_printf(p, "%pI6:%d", addr, htons(port));
+ }
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *parse_iw_event_type(enum iw_cm_event_type iw_type)
+{
+ switch (iw_type) {
+ case IW_CM_EVENT_CONNECT_REQUEST:
+ return "IwRequest";
+ case IW_CM_EVENT_CONNECT_REPLY:
+ return "IwReply";
+ case IW_CM_EVENT_ESTABLISHED:
+ return "IwEstablished";
+ case IW_CM_EVENT_DISCONNECT:
+ return "IwDisconnect";
+ case IW_CM_EVENT_CLOSE:
+ return "IwClose";
+ }
+
+ return "Unknown";
+}
+
+const char *parse_cm_event_type(enum irdma_cm_event_type cm_type)
+{
+ switch (cm_type) {
+ case IRDMA_CM_EVENT_ESTABLISHED:
+ return "CmEstablished";
+ case IRDMA_CM_EVENT_MPA_REQ:
+ return "CmMPA_REQ";
+ case IRDMA_CM_EVENT_MPA_CONNECT:
+ return "CmMPA_CONNECT";
+ case IRDMA_CM_EVENT_MPA_ACCEPT:
+ return "CmMPA_ACCEPT";
+ case IRDMA_CM_EVENT_MPA_REJECT:
+ return "CmMPA_REJECT";
+ case IRDMA_CM_EVENT_MPA_ESTABLISHED:
+ return "CmMPA_ESTABLISHED";
+ case IRDMA_CM_EVENT_CONNECTED:
+ return "CmConnected";
+ case IRDMA_CM_EVENT_RESET:
+ return "CmReset";
+ case IRDMA_CM_EVENT_ABORTED:
+ return "CmAborted";
+ case IRDMA_CM_EVENT_UNKNOWN:
+ return "none";
+ }
+ return "Unknown";
+}
+
+const char *parse_cm_state(enum irdma_cm_node_state state)
+{
+ switch (state) {
+ case IRDMA_CM_STATE_UNKNOWN:
+ return "UNKNOWN";
+ case IRDMA_CM_STATE_INITED:
+ return "INITED";
+ case IRDMA_CM_STATE_LISTENING:
+ return "LISTENING";
+ case IRDMA_CM_STATE_SYN_RCVD:
+ return "SYN_RCVD";
+ case IRDMA_CM_STATE_SYN_SENT:
+ return "SYN_SENT";
+ case IRDMA_CM_STATE_ONE_SIDE_ESTABLISHED:
+ return "ONE_SIDE_ESTABLISHED";
+ case IRDMA_CM_STATE_ESTABLISHED:
+ return "ESTABLISHED";
+ case IRDMA_CM_STATE_ACCEPTING:
+ return "ACCEPTING";
+ case IRDMA_CM_STATE_MPAREQ_SENT:
+ return "MPAREQ_SENT";
+ case IRDMA_CM_STATE_MPAREQ_RCVD:
+ return "MPAREQ_RCVD";
+ case IRDMA_CM_STATE_MPAREJ_RCVD:
+ return "MPAREJ_RECVD";
+ case IRDMA_CM_STATE_OFFLOADED:
+ return "OFFLOADED";
+ case IRDMA_CM_STATE_FIN_WAIT1:
+ return "FIN_WAIT1";
+ case IRDMA_CM_STATE_FIN_WAIT2:
+ return "FIN_WAIT2";
+ case IRDMA_CM_STATE_CLOSE_WAIT:
+ return "CLOSE_WAIT";
+ case IRDMA_CM_STATE_TIME_WAIT:
+ return "TIME_WAIT";
+ case IRDMA_CM_STATE_LAST_ACK:
+ return "LAST_ACK";
+ case IRDMA_CM_STATE_CLOSING:
+ return "CLOSING";
+ case IRDMA_CM_STATE_LISTENER_DESTROYED:
+ return "LISTENER_DESTROYED";
+ case IRDMA_CM_STATE_CLOSED:
+ return "CLOSED";
+ }
+ return ("Bad state");
+}
diff --git a/drivers/infiniband/hw/irdma/trace.h b/drivers/infiniband/hw/irdma/trace.h
new file mode 100644
index 0000000000..702e4efb01
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/trace.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2019 Intel Corporation */
+#include "trace_cm.h"
diff --git a/drivers/infiniband/hw/irdma/trace_cm.h b/drivers/infiniband/hw/irdma/trace_cm.h
new file mode 100644
index 0000000000..f633fb3433
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/trace_cm.h
@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2019 - 2021 Intel Corporation */
+#if !defined(__TRACE_CM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_CM_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+
+#include "main.h"
+
+const char *print_ip_addr(struct trace_seq *p, u32 *addr, u16 port, bool ivp4);
+const char *parse_iw_event_type(enum iw_cm_event_type iw_type);
+const char *parse_cm_event_type(enum irdma_cm_event_type cm_type);
+const char *parse_cm_state(enum irdma_cm_node_state);
+#define __print_ip_addr(addr, port, ipv4) print_ip_addr(p, addr, port, ipv4)
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irdma_cm
+
+TRACE_EVENT(irdma_create_listen,
+ TP_PROTO(struct irdma_device *iwdev, struct irdma_cm_info *cm_info),
+ TP_ARGS(iwdev, cm_info),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __dynamic_array(u32, laddr, 4)
+ __field(u16, lport)
+ __field(bool, ipv4)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ __entry->lport = cm_info->loc_port;
+ __entry->ipv4 = cm_info->ipv4;
+ memcpy(__get_dynamic_array(laddr),
+ cm_info->loc_addr, 4);
+ ),
+ TP_printk("iwdev=%p loc: %s",
+ __entry->iwdev,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+TRACE_EVENT(irdma_dec_refcnt_listen,
+ TP_PROTO(struct irdma_cm_listener *listener, void *caller),
+ TP_ARGS(listener, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u32, refcnt)
+ __dynamic_array(u32, laddr, 4)
+ __field(u16, lport)
+ __field(bool, ipv4)
+ __field(void *, caller)
+ ),
+ TP_fast_assign(__entry->iwdev = listener->iwdev;
+ __entry->lport = listener->loc_port;
+ __entry->ipv4 = listener->ipv4;
+ memcpy(__get_dynamic_array(laddr),
+ listener->loc_addr, 4);
+ ),
+ TP_printk("iwdev=%p caller=%pS loc: %s",
+ __entry->iwdev,
+ __entry->caller,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+DECLARE_EVENT_CLASS(listener_template,
+ TP_PROTO(struct irdma_cm_listener *listener),
+ TP_ARGS(listener),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u16, lport)
+ __field(u16, vlan_id)
+ __field(bool, ipv4)
+ __field(enum irdma_cm_listener_state,
+ state)
+ __dynamic_array(u32, laddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = listener->iwdev;
+ __entry->lport = listener->loc_port;
+ __entry->vlan_id = listener->vlan_id;
+ __entry->ipv4 = listener->ipv4;
+ __entry->state = listener->listener_state;
+ memcpy(__get_dynamic_array(laddr),
+ listener->loc_addr, 4);
+ ),
+ TP_printk("iwdev=%p vlan=%d loc: %s",
+ __entry->iwdev,
+ __entry->vlan_id,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(listener_template, irdma_find_listener,
+ TP_PROTO(struct irdma_cm_listener *listener),
+ TP_ARGS(listener));
+
+DEFINE_EVENT(listener_template, irdma_del_multiple_qhash,
+ TP_PROTO(struct irdma_cm_listener *listener),
+ TP_ARGS(listener));
+
+TRACE_EVENT(irdma_negotiate_mpa_v2,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node),
+ TP_STRUCT__entry(__field(struct irdma_cm_node *, cm_node)
+ __field(u16, ord_size)
+ __field(u16, ird_size)
+ ),
+ TP_fast_assign(__entry->cm_node = cm_node;
+ __entry->ord_size = cm_node->ord_size;
+ __entry->ird_size = cm_node->ird_size;
+ ),
+ TP_printk("MPVA2 Negotiated cm_node=%p ORD:[%d], IRD:[%d]",
+ __entry->cm_node,
+ __entry->ord_size,
+ __entry->ird_size
+ )
+);
+
+DECLARE_EVENT_CLASS(tos_template,
+ TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
+ TP_ARGS(iwdev, tos, user_pri),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u8, tos)
+ __field(u8, user_pri)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ __entry->tos = tos;
+ __entry->user_pri = user_pri;
+ ),
+ TP_printk("iwdev=%p TOS:[%d] UP:[%d]",
+ __entry->iwdev,
+ __entry->tos,
+ __entry->user_pri
+ )
+);
+
+DEFINE_EVENT(tos_template, irdma_listener_tos,
+ TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
+ TP_ARGS(iwdev, tos, user_pri));
+
+DEFINE_EVENT(tos_template, irdma_dcb_tos,
+ TP_PROTO(struct irdma_device *iwdev, u8 tos, u8 user_pri),
+ TP_ARGS(iwdev, tos, user_pri));
+
+DECLARE_EVENT_CLASS(qhash_template,
+ TP_PROTO(struct irdma_device *iwdev,
+ struct irdma_cm_listener *listener,
+ const char *dev_addr),
+ TP_ARGS(iwdev, listener, dev_addr),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(u16, lport)
+ __field(u16, vlan_id)
+ __field(bool, ipv4)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, mac, ETH_ALEN)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ __entry->lport = listener->loc_port;
+ __entry->vlan_id = listener->vlan_id;
+ __entry->ipv4 = listener->ipv4;
+ memcpy(__get_dynamic_array(laddr),
+ listener->loc_addr, 4);
+ ether_addr_copy(__get_dynamic_array(mac),
+ dev_addr);
+ ),
+ TP_printk("iwdev=%p vlan=%d MAC=%6phC loc: %s",
+ __entry->iwdev,
+ __entry->vlan_id,
+ __get_dynamic_array(mac),
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(qhash_template, irdma_add_mqh_6,
+ TP_PROTO(struct irdma_device *iwdev,
+ struct irdma_cm_listener *listener,
+ const char *dev_addr),
+ TP_ARGS(iwdev, listener, dev_addr));
+
+DEFINE_EVENT(qhash_template, irdma_add_mqh_4,
+ TP_PROTO(struct irdma_device *iwdev,
+ struct irdma_cm_listener *listener,
+ const char *dev_addr),
+ TP_ARGS(iwdev, listener, dev_addr));
+
+TRACE_EVENT(irdma_addr_resolve,
+ TP_PROTO(struct irdma_device *iwdev, char *dev_addr),
+ TP_ARGS(iwdev, dev_addr),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __dynamic_array(u8, mac, ETH_ALEN)
+ ),
+ TP_fast_assign(__entry->iwdev = iwdev;
+ ether_addr_copy(__get_dynamic_array(mac), dev_addr);
+ ),
+ TP_printk("iwdev=%p MAC=%6phC", __entry->iwdev,
+ __get_dynamic_array(mac)
+ )
+);
+
+TRACE_EVENT(irdma_send_cm_event,
+ TP_PROTO(struct irdma_cm_node *cm_node, struct iw_cm_id *cm_id,
+ enum iw_cm_event_type type, int status, void *caller),
+ TP_ARGS(cm_node, cm_id, type, status, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(struct iw_cm_id *, cm_id)
+ __field(u32, refcount)
+ __field(u16, lport)
+ __field(u16, rport)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, ipv4)
+ __field(u16, vlan_id)
+ __field(int, accel)
+ __field(enum iw_cm_event_type, type)
+ __field(int, status)
+ __field(void *, caller)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, raddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->cm_id = cm_id;
+ __entry->refcount = refcount_read(&cm_node->refcnt);
+ __entry->state = cm_node->state;
+ __entry->lport = cm_node->loc_port;
+ __entry->rport = cm_node->rem_port;
+ __entry->ipv4 = cm_node->ipv4;
+ __entry->vlan_id = cm_node->vlan_id;
+ __entry->accel = cm_node->accelerated;
+ __entry->type = type;
+ __entry->status = status;
+ __entry->caller = caller;
+ memcpy(__get_dynamic_array(laddr),
+ cm_node->loc_addr, 4);
+ memcpy(__get_dynamic_array(raddr),
+ cm_node->rem_addr, 4);
+ ),
+ TP_printk("iwdev=%p caller=%pS cm_id=%p node=%p refcnt=%d vlan_id=%d accel=%d state=%s event_type=%s status=%d loc: %s rem: %s",
+ __entry->iwdev,
+ __entry->caller,
+ __entry->cm_id,
+ __entry->cm_node,
+ __entry->refcount,
+ __entry->vlan_id,
+ __entry->accel,
+ parse_cm_state(__entry->state),
+ parse_iw_event_type(__entry->type),
+ __entry->status,
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4),
+ __print_ip_addr(__get_dynamic_array(raddr),
+ __entry->rport, __entry->ipv4)
+ )
+);
+
+TRACE_EVENT(irdma_send_cm_event_no_node,
+ TP_PROTO(struct iw_cm_id *cm_id, enum iw_cm_event_type type,
+ int status, void *caller),
+ TP_ARGS(cm_id, type, status, caller),
+ TP_STRUCT__entry(__field(struct iw_cm_id *, cm_id)
+ __field(enum iw_cm_event_type, type)
+ __field(int, status)
+ __field(void *, caller)
+ ),
+ TP_fast_assign(__entry->cm_id = cm_id;
+ __entry->type = type;
+ __entry->status = status;
+ __entry->caller = caller;
+ ),
+ TP_printk("cm_id=%p caller=%pS event_type=%s status=%d",
+ __entry->cm_id,
+ __entry->caller,
+ parse_iw_event_type(__entry->type),
+ __entry->status
+ )
+);
+
+DECLARE_EVENT_CLASS(cm_node_template,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(u32, refcount)
+ __field(u16, lport)
+ __field(u16, rport)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, ipv4)
+ __field(u16, vlan_id)
+ __field(int, accel)
+ __field(enum irdma_cm_event_type, type)
+ __field(void *, caller)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, raddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->refcount = refcount_read(&cm_node->refcnt);
+ __entry->state = cm_node->state;
+ __entry->lport = cm_node->loc_port;
+ __entry->rport = cm_node->rem_port;
+ __entry->ipv4 = cm_node->ipv4;
+ __entry->vlan_id = cm_node->vlan_id;
+ __entry->accel = cm_node->accelerated;
+ __entry->type = type;
+ __entry->caller = caller;
+ memcpy(__get_dynamic_array(laddr),
+ cm_node->loc_addr, 4);
+ memcpy(__get_dynamic_array(raddr),
+ cm_node->rem_addr, 4);
+ ),
+ TP_printk("iwdev=%p caller=%pS node=%p refcnt=%d vlan_id=%d accel=%d state=%s event_type=%s loc: %s rem: %s",
+ __entry->iwdev,
+ __entry->caller,
+ __entry->cm_node,
+ __entry->refcount,
+ __entry->vlan_id,
+ __entry->accel,
+ parse_cm_state(__entry->state),
+ parse_cm_event_type(__entry->type),
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4),
+ __print_ip_addr(__get_dynamic_array(raddr),
+ __entry->rport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(cm_node_template, irdma_create_event,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_accept,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_connect,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_reject,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_find_node,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_send_reset,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_rem_ref_cm_node,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+DEFINE_EVENT(cm_node_template, irdma_cm_event_handler,
+ TP_PROTO(struct irdma_cm_node *cm_node,
+ enum irdma_cm_event_type type, void *caller),
+ TP_ARGS(cm_node, type, caller));
+
+TRACE_EVENT(open_err_template,
+ TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
+ TP_ARGS(cm_node, reset, caller),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, reset)
+ __field(void *, caller)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->state = cm_node->state;
+ __entry->reset = reset;
+ __entry->caller = caller;
+ ),
+ TP_printk("iwdev=%p caller=%pS node%p reset=%d state=%s",
+ __entry->iwdev,
+ __entry->caller,
+ __entry->cm_node,
+ __entry->reset,
+ parse_cm_state(__entry->state)
+ )
+);
+
+DEFINE_EVENT(open_err_template, irdma_active_open_err,
+ TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
+ TP_ARGS(cm_node, reset, caller));
+
+DEFINE_EVENT(open_err_template, irdma_passive_open_err,
+ TP_PROTO(struct irdma_cm_node *cm_node, bool reset, void *caller),
+ TP_ARGS(cm_node, reset, caller));
+
+DECLARE_EVENT_CLASS(cm_node_ah_template,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node),
+ TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
+ __field(struct irdma_cm_node *, cm_node)
+ __field(struct irdma_sc_ah *, ah)
+ __field(u32, refcount)
+ __field(u16, lport)
+ __field(u16, rport)
+ __field(enum irdma_cm_node_state, state)
+ __field(bool, ipv4)
+ __field(u16, vlan_id)
+ __field(int, accel)
+ __dynamic_array(u32, laddr, 4)
+ __dynamic_array(u32, raddr, 4)
+ ),
+ TP_fast_assign(__entry->iwdev = cm_node->iwdev;
+ __entry->cm_node = cm_node;
+ __entry->ah = cm_node->ah;
+ __entry->refcount = refcount_read(&cm_node->refcnt);
+ __entry->lport = cm_node->loc_port;
+ __entry->rport = cm_node->rem_port;
+ __entry->state = cm_node->state;
+ __entry->ipv4 = cm_node->ipv4;
+ __entry->vlan_id = cm_node->vlan_id;
+ __entry->accel = cm_node->accelerated;
+ memcpy(__get_dynamic_array(laddr),
+ cm_node->loc_addr, 4);
+ memcpy(__get_dynamic_array(raddr),
+ cm_node->rem_addr, 4);
+ ),
+ TP_printk("iwdev=%p node=%p ah=%p refcnt=%d vlan_id=%d accel=%d state=%s loc: %s rem: %s",
+ __entry->iwdev,
+ __entry->cm_node,
+ __entry->ah,
+ __entry->refcount,
+ __entry->vlan_id,
+ __entry->accel,
+ parse_cm_state(__entry->state),
+ __print_ip_addr(__get_dynamic_array(laddr),
+ __entry->lport, __entry->ipv4),
+ __print_ip_addr(__get_dynamic_array(raddr),
+ __entry->rport, __entry->ipv4)
+ )
+);
+
+DEFINE_EVENT(cm_node_ah_template, irdma_cm_free_ah,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node));
+
+DEFINE_EVENT(cm_node_ah_template, irdma_create_ah,
+ TP_PROTO(struct irdma_cm_node *cm_node),
+ TP_ARGS(cm_node));
+
+#endif /* __TRACE_CM_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_cm
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
new file mode 100644
index 0000000000..c84ec4dd85
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -0,0 +1,1494 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_TYPE_H
+#define IRDMA_TYPE_H
+#include "osdep.h"
+#include "irdma.h"
+#include "user.h"
+#include "hmc.h"
+#include "uda.h"
+#include "ws.h"
+#define IRDMA_DEBUG_ERR "ERR"
+#define IRDMA_DEBUG_INIT "INIT"
+#define IRDMA_DEBUG_DEV "DEV"
+#define IRDMA_DEBUG_CM "CM"
+#define IRDMA_DEBUG_VERBS "VERBS"
+#define IRDMA_DEBUG_PUDA "PUDA"
+#define IRDMA_DEBUG_ILQ "ILQ"
+#define IRDMA_DEBUG_IEQ "IEQ"
+#define IRDMA_DEBUG_QP "QP"
+#define IRDMA_DEBUG_CQ "CQ"
+#define IRDMA_DEBUG_MR "MR"
+#define IRDMA_DEBUG_PBLE "PBLE"
+#define IRDMA_DEBUG_WQE "WQE"
+#define IRDMA_DEBUG_AEQ "AEQ"
+#define IRDMA_DEBUG_CQP "CQP"
+#define IRDMA_DEBUG_HMC "HMC"
+#define IRDMA_DEBUG_USER "USER"
+#define IRDMA_DEBUG_VIRT "VIRT"
+#define IRDMA_DEBUG_DCB "DCB"
+#define IRDMA_DEBUG_CQE "CQE"
+#define IRDMA_DEBUG_CLNT "CLNT"
+#define IRDMA_DEBUG_WS "WS"
+#define IRDMA_DEBUG_STATS "STATS"
+
+enum irdma_page_size {
+ IRDMA_PAGE_SIZE_4K = 0,
+ IRDMA_PAGE_SIZE_2M,
+ IRDMA_PAGE_SIZE_1G,
+};
+
+enum irdma_hdrct_flags {
+ DDP_LEN_FLAG = 0x80,
+ DDP_HDR_FLAG = 0x40,
+ RDMA_HDR_FLAG = 0x20,
+};
+
+enum irdma_term_layers {
+ LAYER_RDMA = 0,
+ LAYER_DDP = 1,
+ LAYER_MPA = 2,
+};
+
+enum irdma_term_error_types {
+ RDMAP_REMOTE_PROT = 1,
+ RDMAP_REMOTE_OP = 2,
+ DDP_CATASTROPHIC = 0,
+ DDP_TAGGED_BUF = 1,
+ DDP_UNTAGGED_BUF = 2,
+ DDP_LLP = 3,
+};
+
+enum irdma_term_rdma_errors {
+ RDMAP_INV_STAG = 0x00,
+ RDMAP_INV_BOUNDS = 0x01,
+ RDMAP_ACCESS = 0x02,
+ RDMAP_UNASSOC_STAG = 0x03,
+ RDMAP_TO_WRAP = 0x04,
+ RDMAP_INV_RDMAP_VER = 0x05,
+ RDMAP_UNEXPECTED_OP = 0x06,
+ RDMAP_CATASTROPHIC_LOCAL = 0x07,
+ RDMAP_CATASTROPHIC_GLOBAL = 0x08,
+ RDMAP_CANT_INV_STAG = 0x09,
+ RDMAP_UNSPECIFIED = 0xff,
+};
+
+enum irdma_term_ddp_errors {
+ DDP_CATASTROPHIC_LOCAL = 0x00,
+ DDP_TAGGED_INV_STAG = 0x00,
+ DDP_TAGGED_BOUNDS = 0x01,
+ DDP_TAGGED_UNASSOC_STAG = 0x02,
+ DDP_TAGGED_TO_WRAP = 0x03,
+ DDP_TAGGED_INV_DDP_VER = 0x04,
+ DDP_UNTAGGED_INV_QN = 0x01,
+ DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
+ DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
+ DDP_UNTAGGED_INV_MO = 0x04,
+ DDP_UNTAGGED_INV_TOO_LONG = 0x05,
+ DDP_UNTAGGED_INV_DDP_VER = 0x06,
+};
+
+enum irdma_term_mpa_errors {
+ MPA_CLOSED = 0x01,
+ MPA_CRC = 0x02,
+ MPA_MARKER = 0x03,
+ MPA_REQ_RSP = 0x04,
+};
+
+enum irdma_qp_event_type {
+ IRDMA_QP_EVENT_CATASTROPHIC,
+ IRDMA_QP_EVENT_ACCESS_ERR,
+ IRDMA_QP_EVENT_REQ_ERR,
+};
+
+enum irdma_hw_stats_index {
+ /* gen1 - 32-bit */
+ IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
+ IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1,
+ IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2,
+ IRDMA_HW_STAT_INDEX_IP6RXDISCARD = 3,
+ IRDMA_HW_STAT_INDEX_IP6RXTRUNC = 4,
+ IRDMA_HW_STAT_INDEX_IP6TXNOROUTE = 5,
+ IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6,
+ IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7,
+ IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8,
+ IRDMA_HW_STAT_INDEX_RXVLANERR = 9,
+ /* gen1 - 64-bit */
+ IRDMA_HW_STAT_INDEX_IP4RXOCTS = 10,
+ IRDMA_HW_STAT_INDEX_IP4RXPKTS = 11,
+ IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 12,
+ IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 13,
+ IRDMA_HW_STAT_INDEX_IP4TXOCTS = 14,
+ IRDMA_HW_STAT_INDEX_IP4TXPKTS = 15,
+ IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 16,
+ IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 17,
+ IRDMA_HW_STAT_INDEX_IP6RXOCTS = 18,
+ IRDMA_HW_STAT_INDEX_IP6RXPKTS = 19,
+ IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 20,
+ IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 21,
+ IRDMA_HW_STAT_INDEX_IP6TXOCTS = 22,
+ IRDMA_HW_STAT_INDEX_IP6TXPKTS = 23,
+ IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 24,
+ IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 25,
+ IRDMA_HW_STAT_INDEX_TCPRXSEGS = 26,
+ IRDMA_HW_STAT_INDEX_TCPTXSEG = 27,
+ IRDMA_HW_STAT_INDEX_RDMARXRDS = 28,
+ IRDMA_HW_STAT_INDEX_RDMARXSNDS = 29,
+ IRDMA_HW_STAT_INDEX_RDMARXWRS = 30,
+ IRDMA_HW_STAT_INDEX_RDMATXRDS = 31,
+ IRDMA_HW_STAT_INDEX_RDMATXSNDS = 32,
+ IRDMA_HW_STAT_INDEX_RDMATXWRS = 33,
+ IRDMA_HW_STAT_INDEX_RDMAVBND = 34,
+ IRDMA_HW_STAT_INDEX_RDMAVINV = 35,
+ IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 36,
+ IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 37,
+ IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 38,
+ IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 39,
+ IRDMA_HW_STAT_INDEX_UDPRXPKTS = 40,
+ IRDMA_HW_STAT_INDEX_UDPTXPKTS = 41,
+ IRDMA_HW_STAT_INDEX_MAX_GEN_1 = 42, /* Must be same value as next entry */
+ /* gen2 - 64-bit */
+ IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 42,
+ /* gen2 - 32-bit */
+ IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 43,
+ IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
+ IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
+ IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
+};
+
+enum irdma_feature_type {
+ IRDMA_FEATURE_FW_INFO = 0,
+ IRDMA_HW_VERSION_INFO = 1,
+ IRDMA_QSETS_MAX = 26,
+ IRDMA_MAX_FEATURES, /* Must be last entry */
+};
+
+enum irdma_sched_prio_type {
+ IRDMA_PRIO_WEIGHTED_RR = 1,
+ IRDMA_PRIO_STRICT = 2,
+ IRDMA_PRIO_WEIGHTED_STRICT = 3,
+};
+
+enum irdma_vm_vf_type {
+ IRDMA_VF_TYPE = 0,
+ IRDMA_VM_TYPE,
+ IRDMA_PF_TYPE,
+};
+
+enum irdma_cqp_hmc_profile {
+ IRDMA_HMC_PROFILE_DEFAULT = 1,
+ IRDMA_HMC_PROFILE_FAVOR_VF = 2,
+ IRDMA_HMC_PROFILE_EQUAL = 3,
+};
+
+enum irdma_quad_entry_type {
+ IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
+ IRDMA_QHASH_TYPE_TCP_SYN,
+ IRDMA_QHASH_TYPE_UDP_UNICAST,
+ IRDMA_QHASH_TYPE_UDP_MCAST,
+ IRDMA_QHASH_TYPE_ROCE_MCAST,
+ IRDMA_QHASH_TYPE_ROCEV2_HW,
+};
+
+enum irdma_quad_hash_manage_type {
+ IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
+ IRDMA_QHASH_MANAGE_TYPE_ADD,
+ IRDMA_QHASH_MANAGE_TYPE_MODIFY,
+};
+
+enum irdma_syn_rst_handling {
+ IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
+ IRDMA_SYN_RST_HANDLING_HW_TCP,
+ IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
+ IRDMA_SYN_RST_HANDLING_FW_TCP,
+};
+
+enum irdma_queue_type {
+ IRDMA_QUEUE_TYPE_SQ_RQ = 0,
+ IRDMA_QUEUE_TYPE_CQP,
+};
+
+struct irdma_sc_dev;
+struct irdma_vsi_pestat;
+
+struct irdma_dcqcn_cc_params {
+ u8 cc_cfg_valid;
+ u8 min_dec_factor;
+ u8 min_rate;
+ u8 dcqcn_f;
+ u16 rai_factor;
+ u16 hai_factor;
+ u16 dcqcn_t;
+ u32 dcqcn_b;
+ u32 rreduce_mperiod;
+};
+
+struct irdma_cqp_init_info {
+ u64 cqp_compl_ctx;
+ u64 host_ctx_pa;
+ u64 sq_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_cqp_quanta *sq;
+ struct irdma_dcqcn_cc_params dcqcn_params;
+ __le64 *host_ctx;
+ u64 *scratch_array;
+ u32 sq_size;
+ u16 hw_maj_ver;
+ u16 hw_min_ver;
+ u8 struct_ver;
+ u8 hmc_profile;
+ u8 ena_vf_count;
+ u8 ceqs_per_vf;
+ bool en_datacenter_tcp:1;
+ bool disable_packed:1;
+ bool rocev2_rto_policy:1;
+ enum irdma_protocol_used protocol_used;
+};
+
+struct irdma_terminate_hdr {
+ u8 layer_etype;
+ u8 error_code;
+ u8 hdrct;
+ u8 rsvd;
+};
+
+struct irdma_cqp_sq_wqe {
+ __le64 buf[IRDMA_CQP_WQE_SIZE];
+};
+
+struct irdma_sc_aeqe {
+ __le64 buf[IRDMA_AEQE_SIZE];
+};
+
+struct irdma_ceqe {
+ __le64 buf[IRDMA_CEQE_SIZE];
+};
+
+struct irdma_cqp_ctx {
+ __le64 buf[IRDMA_CQP_CTX_SIZE];
+};
+
+struct irdma_cq_shadow_area {
+ __le64 buf[IRDMA_SHADOW_AREA_SIZE];
+};
+
+struct irdma_dev_hw_stats_offsets {
+ u32 stats_offset[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
+};
+
+struct irdma_dev_hw_stats {
+ u64 stats_val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
+};
+
+struct irdma_gather_stats {
+ u64 val[IRDMA_GATHER_STATS_BUF_SIZE / sizeof(u64)];
+};
+
+struct irdma_hw_stat_map {
+ u16 byteoff;
+ u8 bitoff;
+ u64 bitmask;
+};
+
+struct irdma_stats_gather_info {
+ bool use_hmc_fcn_index:1;
+ bool use_stats_inst:1;
+ u8 hmc_fcn_index;
+ u8 stats_inst_index;
+ struct irdma_dma_mem stats_buff_mem;
+ void *gather_stats_va;
+ void *last_gather_stats_va;
+};
+
+struct irdma_vsi_pestat {
+ struct irdma_hw *hw;
+ struct irdma_dev_hw_stats hw_stats;
+ struct irdma_stats_gather_info gather_info;
+ struct timer_list stats_timer;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_dev_hw_stats last_hw_stats;
+ spinlock_t lock; /* rdma stats lock */
+};
+
+struct irdma_hw {
+ u8 __iomem *hw_addr;
+ u8 __iomem *priv_hw_addr;
+ struct device *device;
+ struct irdma_hmc_info hmc;
+};
+
+struct irdma_pfpdu {
+ struct list_head rxlist;
+ u32 rcv_nxt;
+ u32 fps;
+ u32 max_fpdu_data;
+ u32 nextseqnum;
+ u32 rcv_start_seq;
+ bool mode:1;
+ bool mpa_crc_err:1;
+ u8 marker_len;
+ u64 total_ieq_bufs;
+ u64 fpdu_processed;
+ u64 bad_seq_num;
+ u64 crc_err;
+ u64 no_tx_bufs;
+ u64 tx_err;
+ u64 out_of_order;
+ u64 pmode_count;
+ struct irdma_sc_ah *ah;
+ struct irdma_puda_buf *ah_buf;
+ spinlock_t lock; /* fpdu processing lock */
+ struct irdma_puda_buf *lastrcv_buf;
+};
+
+struct irdma_sc_pd {
+ struct irdma_sc_dev *dev;
+ u32 pd_id;
+ int abi_ver;
+};
+
+struct irdma_cqp_quanta {
+ __le64 elem[IRDMA_CQP_WQE_SIZE];
+};
+
+struct irdma_sc_cqp {
+ u32 size;
+ u64 sq_pa;
+ u64 host_ctx_pa;
+ void *back_cqp;
+ struct irdma_sc_dev *dev;
+ int (*process_cqp_sds)(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *info);
+ struct irdma_dma_mem sdbuf;
+ struct irdma_ring sq_ring;
+ struct irdma_cqp_quanta *sq_base;
+ struct irdma_dcqcn_cc_params dcqcn_params;
+ __le64 *host_ctx;
+ u64 *scratch_array;
+ u64 requested_ops;
+ atomic64_t completed_ops;
+ u32 cqp_id;
+ u32 sq_size;
+ u32 hw_sq_size;
+ u16 hw_maj_ver;
+ u16 hw_min_ver;
+ u8 struct_ver;
+ u8 polarity;
+ u8 hmc_profile;
+ u8 ena_vf_count;
+ u8 timeout_count;
+ u8 ceqs_per_vf;
+ bool en_datacenter_tcp:1;
+ bool disable_packed:1;
+ bool rocev2_rto_policy:1;
+ enum irdma_protocol_used protocol_used;
+};
+
+struct irdma_sc_aeq {
+ u32 size;
+ u64 aeq_elem_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_aeqe *aeqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ struct irdma_ring aeq_ring;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ u32 msix_idx;
+ u8 polarity;
+ bool virtual_map:1;
+};
+
+struct irdma_sc_ceq {
+ u32 size;
+ u64 ceq_elem_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_ceqe *ceqe_base;
+ void *pbl_list;
+ u32 ceq_id;
+ u32 elem_cnt;
+ struct irdma_ring ceq_ring;
+ u8 pbl_chunk_size;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ u8 polarity;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_sc_cq **reg_cq;
+ u32 reg_cq_size;
+ spinlock_t req_cq_lock; /* protect access to reg_cq array */
+ bool virtual_map:1;
+ bool tph_en:1;
+ bool itr_no_expire:1;
+};
+
+struct irdma_sc_cq {
+ struct irdma_cq_uk cq_uk;
+ u64 cq_pa;
+ u64 shadow_area_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ void *pbl_list;
+ void *back_cq;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ u8 pbl_chunk_size;
+ u8 cq_type;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ bool ceqe_mask:1;
+ bool virtual_map:1;
+ bool check_overflow:1;
+ bool ceq_id_valid:1;
+ bool tph_en;
+};
+
+struct irdma_sc_qp {
+ struct irdma_qp_uk qp_uk;
+ u64 sq_pa;
+ u64 rq_pa;
+ u64 hw_host_ctx_pa;
+ u64 shadow_area_pa;
+ u64 q2_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_sc_pd *pd;
+ __le64 *hw_host_ctx;
+ void *llp_stream_handle;
+ struct irdma_pfpdu pfpdu;
+ u32 ieq_qp;
+ u8 *q2_buf;
+ u64 qp_compl_ctx;
+ u32 push_idx;
+ u16 qs_handle;
+ u16 push_offset;
+ u8 flush_wqes_count;
+ u8 sq_tph_val;
+ u8 rq_tph_val;
+ u8 qp_state;
+ u8 hw_sq_size;
+ u8 hw_rq_size;
+ u8 src_mac_addr_idx;
+ bool on_qoslist:1;
+ bool ieq_pass_thru:1;
+ bool sq_tph_en:1;
+ bool rq_tph_en:1;
+ bool rcv_tph_en:1;
+ bool xmit_tph_en:1;
+ bool virtual_map:1;
+ bool flush_sq:1;
+ bool flush_rq:1;
+ bool sq_flush_code:1;
+ bool rq_flush_code:1;
+ enum irdma_flush_opcode flush_code;
+ enum irdma_qp_event_type event_type;
+ u8 term_flags;
+ u8 user_pri;
+ struct list_head list;
+};
+
+struct irdma_stats_inst_info {
+ bool use_hmc_fcn_index;
+ u8 hmc_fn_id;
+ u8 stats_idx;
+};
+
+struct irdma_up_info {
+ u8 map[8];
+ u8 cnp_up_override;
+ u8 hmc_fcn_idx;
+ bool use_vlan:1;
+ bool use_cnp_up_override:1;
+};
+
+#define IRDMA_MAX_WS_NODES 0x3FF
+#define IRDMA_WS_NODE_INVALID 0xFFFF
+
+struct irdma_ws_node_info {
+ u16 id;
+ u16 vsi;
+ u16 parent_id;
+ u16 qs_handle;
+ bool type_leaf:1;
+ bool enable:1;
+ u8 prio_type;
+ u8 tc;
+ u8 weight;
+};
+
+struct irdma_hmc_fpm_misc {
+ u32 max_ceqs;
+ u32 max_sds;
+ u32 xf_block_size;
+ u32 q1_block_size;
+ u32 ht_multiplier;
+ u32 timer_bucket;
+ u32 rrf_block_size;
+ u32 ooiscf_block_size;
+};
+
+#define IRDMA_LEAF_DEFAULT_REL_BW 64
+#define IRDMA_PARENT_DEFAULT_REL_BW 1
+
+struct irdma_qos {
+ struct list_head qplist;
+ struct mutex qos_mutex; /* protect QoS attributes per QoS level */
+ u64 lan_qos_handle;
+ u32 l2_sched_node_id;
+ u16 qs_handle;
+ u8 traffic_class;
+ u8 rel_bw;
+ u8 prio_type;
+ bool valid;
+};
+
+#define IRDMA_INVALID_STATS_IDX 0xff
+struct irdma_sc_vsi {
+ u16 vsi_idx;
+ struct irdma_sc_dev *dev;
+ void *back_vsi;
+ u32 ilq_count;
+ struct irdma_virt_mem ilq_mem;
+ struct irdma_puda_rsrc *ilq;
+ u32 ieq_count;
+ struct irdma_virt_mem ieq_mem;
+ struct irdma_puda_rsrc *ieq;
+ u32 exception_lan_q;
+ u16 mtu;
+ u16 vm_id;
+ enum irdma_vm_vf_type vm_vf_type;
+ bool stats_inst_alloc:1;
+ bool tc_change_pending:1;
+ struct irdma_vsi_pestat *pestat;
+ atomic_t qp_suspend_reqs;
+ int (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ void (*unregister_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ u8 qos_rel_bw;
+ u8 qos_prio_type;
+ u8 stats_idx;
+ u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+ struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
+ u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
+ bool dscp_mode:1;
+};
+
+struct irdma_sc_dev {
+ struct list_head cqp_cmd_head; /* head of the CQP command list */
+ spinlock_t cqp_lock; /* protect CQP list access */
+ bool stats_idx_array[IRDMA_MAX_STATS_COUNT_GEN_1];
+ struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
+ u64 fpm_query_buf_pa;
+ u64 fpm_commit_buf_pa;
+ __le64 *fpm_query_buf;
+ __le64 *fpm_commit_buf;
+ struct irdma_hw *hw;
+ u8 __iomem *db_addr;
+ u32 __iomem *wqe_alloc_db;
+ u32 __iomem *cq_arm_db;
+ u32 __iomem *aeq_alloc_db;
+ u32 __iomem *cqp_db;
+ u32 __iomem *cq_ack_db;
+ u32 __iomem *ceq_itr_mask_db;
+ u32 __iomem *aeq_itr_mask_db;
+ u32 __iomem *hw_regs[IRDMA_MAX_REGS];
+ u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
+ u64 hw_masks[IRDMA_MAX_MASKS];
+ u64 hw_shifts[IRDMA_MAX_SHIFTS];
+ const struct irdma_hw_stat_map *hw_stats_map;
+ u64 hw_stats_regs[IRDMA_HW_STAT_INDEX_MAX_GEN_1];
+ u64 feature_info[IRDMA_MAX_FEATURES];
+ u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
+ struct irdma_hw_attrs hw_attrs;
+ struct irdma_hmc_info *hmc_info;
+ struct irdma_sc_cqp *cqp;
+ struct irdma_sc_aeq *aeq;
+ struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
+ struct irdma_sc_cq *ccq;
+ const struct irdma_irq_ops *irq_ops;
+ struct irdma_hmc_fpm_misc hmc_fpm_misc;
+ struct irdma_ws_node *ws_tree_root;
+ struct mutex ws_mutex; /* ws tree mutex */
+ u16 num_vfs;
+ u8 hmc_fn_id;
+ u8 vf_id;
+ bool vchnl_up:1;
+ bool ceq_valid:1;
+ u8 pci_rev;
+ int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
+ void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
+ void (*ws_reset)(struct irdma_sc_vsi *vsi);
+};
+
+struct irdma_modify_cq_info {
+ u64 cq_pa;
+ struct irdma_cqe *cq_base;
+ u32 cq_size;
+ u32 shadow_read_threshold;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ bool virtual_map:1;
+ bool check_overflow;
+ bool cq_resize:1;
+};
+
+struct irdma_create_qp_info {
+ bool ord_valid:1;
+ bool tcp_ctx_valid:1;
+ bool cq_num_valid:1;
+ bool arp_cache_idx_valid:1;
+ bool mac_valid:1;
+ bool force_lpb;
+ u8 next_iwarp_state;
+};
+
+struct irdma_modify_qp_info {
+ u64 rx_win0;
+ u64 rx_win1;
+ u16 new_mss;
+ u8 next_iwarp_state;
+ u8 curr_iwarp_state;
+ u8 termlen;
+ bool ord_valid:1;
+ bool tcp_ctx_valid:1;
+ bool udp_ctx_valid:1;
+ bool cq_num_valid:1;
+ bool arp_cache_idx_valid:1;
+ bool reset_tcp_conn:1;
+ bool remove_hash_idx:1;
+ bool dont_send_term:1;
+ bool dont_send_fin:1;
+ bool cached_var_valid:1;
+ bool mss_change:1;
+ bool force_lpb:1;
+ bool mac_valid:1;
+};
+
+struct irdma_ccq_cqe_info {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ u32 op_ret_val;
+ u16 maj_err_code;
+ u16 min_err_code;
+ u8 op_code;
+ bool error;
+};
+
+struct irdma_dcb_app_info {
+ u8 priority;
+ u8 selector;
+ u16 prot_id;
+};
+
+struct irdma_qos_tc_info {
+ u64 tc_ctx;
+ u8 rel_bw;
+ u8 prio_type;
+ u8 egress_virt_up;
+ u8 ingress_virt_up;
+};
+
+struct irdma_l2params {
+ struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
+ struct irdma_dcb_app_info apps[IRDMA_MAX_APPS];
+ u32 num_apps;
+ u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
+ u16 mtu;
+ u8 up2tc[IRDMA_MAX_USER_PRIORITY];
+ u8 dscp_map[IIDC_MAX_DSCP_MAPPING];
+ u8 num_tc;
+ u8 vsi_rel_bw;
+ u8 vsi_prio_type;
+ bool mtu_changed:1;
+ bool tc_changed:1;
+ bool dscp_mode:1;
+};
+
+struct irdma_vsi_init_info {
+ struct irdma_sc_dev *dev;
+ void *back_vsi;
+ struct irdma_l2params *params;
+ u16 exception_lan_q;
+ u16 pf_data_vsi_num;
+ enum irdma_vm_vf_type vm_vf_type;
+ u16 vm_id;
+ int (*register_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+ void (*unregister_qset)(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node);
+};
+
+struct irdma_vsi_stats_info {
+ struct irdma_vsi_pestat *pestat;
+ u8 fcn_id;
+ bool alloc_stats_inst;
+};
+
+struct irdma_device_init_info {
+ u64 fpm_query_buf_pa;
+ u64 fpm_commit_buf_pa;
+ __le64 *fpm_query_buf;
+ __le64 *fpm_commit_buf;
+ struct irdma_hw *hw;
+ void __iomem *bar0;
+ u8 hmc_fn_id;
+};
+
+struct irdma_ceq_init_info {
+ u64 ceqe_pa;
+ struct irdma_sc_dev *dev;
+ u64 *ceqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ u32 ceq_id;
+ bool virtual_map:1;
+ bool tph_en:1;
+ bool itr_no_expire:1;
+ u8 pbl_chunk_size;
+ u8 tph_val;
+ u32 first_pm_pbl_idx;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_sc_cq **reg_cq;
+ u32 reg_cq_idx;
+};
+
+struct irdma_aeq_init_info {
+ u64 aeq_elem_pa;
+ struct irdma_sc_dev *dev;
+ u32 *aeqe_base;
+ void *pbl_list;
+ u32 elem_cnt;
+ bool virtual_map;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ u32 msix_idx;
+};
+
+struct irdma_ccq_init_info {
+ u64 cq_pa;
+ u64 shadow_area_pa;
+ struct irdma_sc_dev *dev;
+ struct irdma_cqe *cq_base;
+ __le64 *shadow_area;
+ void *pbl_list;
+ u32 num_elem;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ bool ceqe_mask:1;
+ bool ceq_id_valid:1;
+ bool avoid_mem_cflct:1;
+ bool virtual_map:1;
+ bool tph_en:1;
+ u8 tph_val;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ struct irdma_sc_vsi *vsi;
+};
+
+struct irdma_udp_offload_info {
+ bool ipv4:1;
+ bool insert_vlan_tag:1;
+ u8 ttl;
+ u8 tos;
+ u16 src_port;
+ u16 dst_port;
+ u32 dest_ip_addr[4];
+ u32 snd_mss;
+ u16 vlan_tag;
+ u16 arp_idx;
+ u32 flow_label;
+ u8 udp_state;
+ u32 psn_nxt;
+ u32 lsn;
+ u32 epsn;
+ u32 psn_max;
+ u32 psn_una;
+ u32 local_ipaddr[4];
+ u32 cwnd;
+ u8 rexmit_thresh;
+ u8 rnr_nak_thresh;
+};
+
+struct irdma_roce_offload_info {
+ u16 p_key;
+ u16 err_rq_idx;
+ u32 qkey;
+ u32 dest_qp;
+ u8 roce_tver;
+ u8 ack_credits;
+ u8 err_rq_idx_valid;
+ u32 pd_id;
+ u16 ord_size;
+ u16 ird_size;
+ bool is_qp1:1;
+ bool udprivcq_en:1;
+ bool dcqcn_en:1;
+ bool rcv_no_icrc:1;
+ bool wr_rdresp_en:1;
+ bool bind_en:1;
+ bool fast_reg_en:1;
+ bool priv_mode_en:1;
+ bool rd_en:1;
+ bool timely_en:1;
+ bool dctcp_en:1;
+ bool fw_cc_enable:1;
+ bool use_stats_inst:1;
+ u16 t_high;
+ u16 t_low;
+ u8 last_byte_sent;
+ u8 mac_addr[ETH_ALEN];
+ u8 rtomin;
+};
+
+struct irdma_iwarp_offload_info {
+ u16 rcv_mark_offset;
+ u16 snd_mark_offset;
+ u8 ddp_ver;
+ u8 rdmap_ver;
+ u8 iwarp_mode;
+ u16 err_rq_idx;
+ u32 pd_id;
+ u16 ord_size;
+ u16 ird_size;
+ bool ib_rd_en:1;
+ bool align_hdrs:1;
+ bool rcv_no_mpa_crc:1;
+ bool err_rq_idx_valid:1;
+ bool snd_mark_en:1;
+ bool rcv_mark_en:1;
+ bool wr_rdresp_en:1;
+ bool bind_en:1;
+ bool fast_reg_en:1;
+ bool priv_mode_en:1;
+ bool rd_en:1;
+ bool timely_en:1;
+ bool use_stats_inst:1;
+ bool ecn_en:1;
+ bool dctcp_en:1;
+ u16 t_high;
+ u16 t_low;
+ u8 last_byte_sent;
+ u8 mac_addr[ETH_ALEN];
+ u8 rtomin;
+};
+
+struct irdma_tcp_offload_info {
+ bool ipv4:1;
+ bool no_nagle:1;
+ bool insert_vlan_tag:1;
+ bool time_stamp:1;
+ bool drop_ooo_seg:1;
+ bool avoid_stretch_ack:1;
+ bool wscale:1;
+ bool ignore_tcp_opt:1;
+ bool ignore_tcp_uns_opt:1;
+ u8 cwnd_inc_limit;
+ u8 dup_ack_thresh;
+ u8 ttl;
+ u8 src_mac_addr_idx;
+ u8 tos;
+ u16 src_port;
+ u16 dst_port;
+ u32 dest_ip_addr[4];
+ //u32 dest_ip_addr0;
+ //u32 dest_ip_addr1;
+ //u32 dest_ip_addr2;
+ //u32 dest_ip_addr3;
+ u32 snd_mss;
+ u16 syn_rst_handling;
+ u16 vlan_tag;
+ u16 arp_idx;
+ u32 flow_label;
+ u8 tcp_state;
+ u8 snd_wscale;
+ u8 rcv_wscale;
+ u32 time_stamp_recent;
+ u32 time_stamp_age;
+ u32 snd_nxt;
+ u32 snd_wnd;
+ u32 rcv_nxt;
+ u32 rcv_wnd;
+ u32 snd_max;
+ u32 snd_una;
+ u32 srtt;
+ u32 rtt_var;
+ u32 ss_thresh;
+ u32 cwnd;
+ u32 snd_wl1;
+ u32 snd_wl2;
+ u32 max_snd_window;
+ u8 rexmit_thresh;
+ u32 local_ipaddr[4];
+};
+
+struct irdma_qp_host_ctx_info {
+ u64 qp_compl_ctx;
+ union {
+ struct irdma_tcp_offload_info *tcp_info;
+ struct irdma_udp_offload_info *udp_info;
+ };
+ union {
+ struct irdma_iwarp_offload_info *iwarp_info;
+ struct irdma_roce_offload_info *roce_info;
+ };
+ u32 send_cq_num;
+ u32 rcv_cq_num;
+ u32 rem_endpoint_idx;
+ u8 stats_idx;
+ bool srq_valid:1;
+ bool tcp_info_valid:1;
+ bool iwarp_info_valid:1;
+ bool stats_idx_valid:1;
+ u8 user_pri;
+};
+
+struct irdma_aeqe_info {
+ u64 compl_ctx;
+ u32 qp_cq_id;
+ u16 ae_id;
+ u16 wqe_idx;
+ u8 tcp_state;
+ u8 iwarp_state;
+ bool qp:1;
+ bool cq:1;
+ bool sq:1;
+ bool rq:1;
+ bool in_rdrsp_wr:1;
+ bool out_rdrsp:1;
+ bool aeqe_overflow:1;
+ u8 q2_data_written;
+ u8 ae_src;
+};
+
+struct irdma_allocate_stag_info {
+ u64 total_len;
+ u64 first_pm_pbl_idx;
+ u32 chunk_size;
+ u32 stag_idx;
+ u32 page_size;
+ u32 pd_id;
+ u16 access_rights;
+ bool remote_access:1;
+ bool use_hmc_fcn_index:1;
+ bool use_pf_rid:1;
+ bool all_memory:1;
+ u8 hmc_fcn_index;
+};
+
+struct irdma_mw_alloc_info {
+ u32 mw_stag_index;
+ u32 page_size;
+ u32 pd_id;
+ bool remote_access:1;
+ bool mw_wide:1;
+ bool mw1_bind_dont_vldt_key:1;
+};
+
+struct irdma_reg_ns_stag_info {
+ u64 reg_addr_pa;
+ u64 va;
+ u64 total_len;
+ u32 page_size;
+ u32 chunk_size;
+ u32 first_pm_pbl_index;
+ enum irdma_addressing_type addr_type;
+ irdma_stag_index stag_idx;
+ u16 access_rights;
+ u32 pd_id;
+ irdma_stag_key stag_key;
+ bool use_hmc_fcn_index:1;
+ u8 hmc_fcn_index;
+ bool use_pf_rid:1;
+ bool all_memory:1;
+};
+
+struct irdma_fast_reg_stag_info {
+ u64 wr_id;
+ u64 reg_addr_pa;
+ u64 fbo;
+ void *va;
+ u64 total_len;
+ u32 page_size;
+ u32 chunk_size;
+ u32 first_pm_pbl_index;
+ enum irdma_addressing_type addr_type;
+ irdma_stag_index stag_idx;
+ u16 access_rights;
+ u32 pd_id;
+ irdma_stag_key stag_key;
+ bool local_fence:1;
+ bool read_fence:1;
+ bool signaled:1;
+ bool use_hmc_fcn_index:1;
+ u8 hmc_fcn_index;
+ bool use_pf_rid:1;
+ bool defer_flag:1;
+};
+
+struct irdma_dealloc_stag_info {
+ u32 stag_idx;
+ u32 pd_id;
+ bool mr:1;
+ bool dealloc_pbl:1;
+};
+
+struct irdma_register_shared_stag {
+ u64 va;
+ enum irdma_addressing_type addr_type;
+ irdma_stag_index new_stag_idx;
+ irdma_stag_index parent_stag_idx;
+ u32 access_rights;
+ u32 pd_id;
+ u32 page_size;
+ irdma_stag_key new_stag_key;
+};
+
+struct irdma_qp_init_info {
+ struct irdma_qp_uk_init_info qp_uk_init_info;
+ struct irdma_sc_pd *pd;
+ struct irdma_sc_vsi *vsi;
+ __le64 *host_ctx;
+ u8 *q2;
+ u64 sq_pa;
+ u64 rq_pa;
+ u64 host_ctx_pa;
+ u64 q2_pa;
+ u64 shadow_area_pa;
+ u8 sq_tph_val;
+ u8 rq_tph_val;
+ bool sq_tph_en:1;
+ bool rq_tph_en:1;
+ bool rcv_tph_en:1;
+ bool xmit_tph_en:1;
+ bool virtual_map:1;
+};
+
+struct irdma_cq_init_info {
+ struct irdma_sc_dev *dev;
+ u64 cq_base_pa;
+ u64 shadow_area_pa;
+ u32 ceq_id;
+ u32 shadow_read_threshold;
+ u8 pbl_chunk_size;
+ u32 first_pm_pbl_idx;
+ bool virtual_map:1;
+ bool ceqe_mask:1;
+ bool ceq_id_valid:1;
+ bool tph_en:1;
+ u8 tph_val;
+ u8 type;
+ struct irdma_cq_uk_init_info cq_uk_init_info;
+ struct irdma_sc_vsi *vsi;
+};
+
+struct irdma_upload_context_info {
+ u64 buf_pa;
+ u32 qp_id;
+ u8 qp_type;
+ bool freeze_qp:1;
+ bool raw_format:1;
+};
+
+struct irdma_local_mac_entry_info {
+ u8 mac_addr[6];
+ u16 entry_idx;
+};
+
+struct irdma_add_arp_cache_entry_info {
+ u8 mac_addr[ETH_ALEN];
+ u32 reach_max;
+ u16 arp_index;
+ bool permanent;
+};
+
+struct irdma_apbvt_info {
+ u16 port;
+ bool add;
+};
+
+struct irdma_qhash_table_info {
+ struct irdma_sc_vsi *vsi;
+ enum irdma_quad_hash_manage_type manage;
+ enum irdma_quad_entry_type entry_type;
+ bool vlan_valid:1;
+ bool ipv4_valid:1;
+ u8 mac_addr[ETH_ALEN];
+ u16 vlan_id;
+ u8 user_pri;
+ u32 qp_num;
+ u32 dest_ip[4];
+ u32 src_ip[4];
+ u16 dest_port;
+ u16 src_port;
+};
+
+struct irdma_cqp_manage_push_page_info {
+ u32 push_idx;
+ u16 qs_handle;
+ u8 free_page;
+ u8 push_page_type;
+};
+
+struct irdma_qp_flush_info {
+ u16 sq_minor_code;
+ u16 sq_major_code;
+ u16 rq_minor_code;
+ u16 rq_major_code;
+ u16 ae_code;
+ u8 ae_src;
+ bool sq:1;
+ bool rq:1;
+ bool userflushcode:1;
+ bool generate_ae:1;
+};
+
+struct irdma_gen_ae_info {
+ u16 ae_code;
+ u8 ae_src;
+};
+
+struct irdma_cqp_timeout {
+ u64 compl_cqp_cmds;
+ u32 count;
+};
+
+struct irdma_irq_ops {
+ void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
+ void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
+ bool enable);
+ void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
+ void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
+};
+
+void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
+int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
+ bool check_overflow, bool post_sq);
+int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
+int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_cqe_info *info);
+int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
+ struct irdma_ccq_init_info *info);
+
+int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
+int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
+
+int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
+int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
+ struct irdma_ceq_init_info *info);
+void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
+void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
+
+int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
+ struct irdma_aeq_init_info *info);
+int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
+ struct irdma_aeqe_info *info);
+void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
+
+void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
+ int abi_ver);
+void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
+void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
+ struct irdma_sc_dev *dev);
+int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
+int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
+int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
+ struct irdma_cqp_init_info *info);
+void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
+int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
+ struct irdma_ccq_cqe_info *cmpl_info);
+int irdma_sc_fast_register(struct irdma_sc_qp *qp,
+ struct irdma_fast_reg_stag_info *info, bool post_sq);
+int irdma_sc_qp_create(struct irdma_sc_qp *qp,
+ struct irdma_create_qp_info *info, u64 scratch,
+ bool post_sq);
+int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
+ bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq);
+int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
+ struct irdma_qp_flush_info *info, u64 scratch,
+ bool post_sq);
+int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
+int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
+ struct irdma_modify_qp_info *info, u64 scratch,
+ bool post_sq);
+void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
+ irdma_stag stag);
+
+void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
+void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info);
+void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info);
+int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
+int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
+void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
+int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
+ u8 hmc_fn_id, bool post_sq,
+ bool poll_registers);
+
+void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
+struct cqp_info {
+ union {
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_create_qp_info info;
+ u64 scratch;
+ } qp_create;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_modify_qp_info info;
+ u64 scratch;
+ } qp_modify;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ u64 scratch;
+ bool remove_hash_idx;
+ bool ignore_mw_bnd;
+ } qp_destroy;
+
+ struct {
+ struct irdma_sc_cq *cq;
+ u64 scratch;
+ bool check_overflow;
+ } cq_create;
+
+ struct {
+ struct irdma_sc_cq *cq;
+ struct irdma_modify_cq_info info;
+ u64 scratch;
+ } cq_modify;
+
+ struct {
+ struct irdma_sc_cq *cq;
+ u64 scratch;
+ } cq_destroy;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_allocate_stag_info info;
+ u64 scratch;
+ } alloc_stag;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_mw_alloc_info info;
+ u64 scratch;
+ } mw_alloc;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_reg_ns_stag_info info;
+ u64 scratch;
+ } mr_reg_non_shared;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_dealloc_stag_info info;
+ u64 scratch;
+ } dealloc_stag;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_add_arp_cache_entry_info info;
+ u64 scratch;
+ } add_arp_cache_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ u16 arp_index;
+ } del_arp_cache_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_local_mac_entry_info info;
+ u64 scratch;
+ } add_local_mac_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ u8 entry_idx;
+ u8 ignore_ref_count;
+ } del_local_mac_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ u64 scratch;
+ } alloc_local_mac_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_cqp_manage_push_page_info info;
+ u64 scratch;
+ } manage_push_page;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_upload_context_info info;
+ u64 scratch;
+ } qp_upload_context;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_hmc_fcn_info info;
+ u64 scratch;
+ } manage_hmc_pm;
+
+ struct {
+ struct irdma_sc_ceq *ceq;
+ u64 scratch;
+ } ceq_create;
+
+ struct {
+ struct irdma_sc_ceq *ceq;
+ u64 scratch;
+ } ceq_destroy;
+
+ struct {
+ struct irdma_sc_aeq *aeq;
+ u64 scratch;
+ } aeq_create;
+
+ struct {
+ struct irdma_sc_aeq *aeq;
+ u64 scratch;
+ } aeq_destroy;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_qp_flush_info info;
+ u64 scratch;
+ } qp_flush_wqes;
+
+ struct {
+ struct irdma_sc_qp *qp;
+ struct irdma_gen_ae_info info;
+ u64 scratch;
+ } gen_ae;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ void *fpm_val_va;
+ u64 fpm_val_pa;
+ u8 hmc_fn_id;
+ u64 scratch;
+ } query_fpm_val;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ void *fpm_val_va;
+ u64 fpm_val_pa;
+ u8 hmc_fn_id;
+ u64 scratch;
+ } commit_fpm_val;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_apbvt_info info;
+ u64 scratch;
+ } manage_apbvt_entry;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_qhash_table_info info;
+ u64 scratch;
+ } manage_qhash_table_entry;
+
+ struct {
+ struct irdma_sc_dev *dev;
+ struct irdma_update_sds_info info;
+ u64 scratch;
+ } update_pe_sds;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_sc_qp *qp;
+ u64 scratch;
+ } suspend_resume;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_ah_info info;
+ u64 scratch;
+ } ah_create;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_ah_info info;
+ u64 scratch;
+ } ah_destroy;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_mcast_grp_info info;
+ u64 scratch;
+ } mc_create;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_mcast_grp_info info;
+ u64 scratch;
+ } mc_destroy;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_mcast_grp_info info;
+ u64 scratch;
+ } mc_modify;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_stats_inst_info info;
+ u64 scratch;
+ } stats_manage;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_stats_gather_info info;
+ u64 scratch;
+ } stats_gather;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_ws_node_info info;
+ u64 scratch;
+ } ws_node;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_up_info info;
+ u64 scratch;
+ } up_map;
+
+ struct {
+ struct irdma_sc_cqp *cqp;
+ struct irdma_dma_mem query_buff_mem;
+ u64 scratch;
+ } query_rdma;
+ } u;
+};
+
+struct cqp_cmds_info {
+ struct list_head cqp_cmd_entry;
+ u8 cqp_cmd;
+ u8 post_sq;
+ struct cqp_info in;
+};
+
+__le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
+ u32 *wqe_idx);
+
+/**
+ * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
+ * @cqp: struct for cqp hw
+ * @scratch: private data for CQP WQE
+ */
+static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
+{
+ u32 wqe_idx;
+
+ return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
+}
+#endif /* IRDMA_TYPE_H */
diff --git a/drivers/infiniband/hw/irdma/uda.c b/drivers/infiniband/hw/irdma/uda.c
new file mode 100644
index 0000000000..284cec2a74
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uda.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#include <linux/etherdevice.h>
+
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "uda.h"
+#include "uda_d.h"
+
+/**
+ * irdma_sc_access_ah() - Create, modify or delete AH
+ * @cqp: struct for cqp hw
+ * @info: ah information
+ * @op: Operation
+ * @scratch: u64 saved to be used during cqp completion
+ */
+int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
+ u32 op, u64 scratch)
+{
+ __le64 *wqe;
+ u64 qw1, qw2;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
+ qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
+ FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
+
+ qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
+
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
+
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
+ } else {
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
+
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
+ }
+
+ set_64bit_val(wqe, 8, qw1);
+ set_64bit_val(wqe, 16, qw2);
+
+ dma_wmb(); /* need write block before writing WQE header */
+
+ set_64bit_val(
+ wqe, 24,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
+
+ print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_create_mg_ctx() - create a mcg context
+ * @info: multicast group context info
+ */
+static void irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
+{
+ struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
+ u8 idx = 0; /* index in the array */
+ u8 ctx_idx = 0; /* index in the MG context */
+
+ memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
+
+ for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
+ entry_info = &info->mg_ctx_info[idx];
+ if (entry_info->valid_entry) {
+ set_64bit_val((__le64 *)info->dma_mem_mc.va,
+ ctx_idx * sizeof(u64),
+ FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
+ FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
+ FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
+ ctx_idx++;
+ }
+ }
+}
+
+/**
+ * irdma_access_mcast_grp() - Access mcast group based on op
+ * @cqp: Control QP
+ * @info: multicast group context info
+ * @op: operation to perform
+ * @scratch: u64 saved to be used during cqp completion
+ */
+int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info, u32 op,
+ u64 scratch)
+{
+ __le64 *wqe;
+
+ if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
+ ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
+ return -EINVAL;
+ }
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe) {
+ ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
+ return -ENOMEM;
+ }
+
+ irdma_create_mg_ctx(info);
+
+ set_64bit_val(wqe, 32, info->dma_mem_mc.pa);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
+ set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr));
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
+
+ if (!info->ipv4_valid) {
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
+ } else {
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
+ }
+
+ dma_wmb(); /* need write memory block before writing the WQE header. */
+
+ set_64bit_val(wqe, 24,
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
+ FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
+
+ print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, info->dma_mem_mc.va,
+ IRDMA_MAX_MGS_PER_CTX * 8, false);
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_compare_mgs - Compares two multicast group structures
+ * @entry1: Multcast group info
+ * @entry2: Multcast group info in context
+ */
+static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
+ struct irdma_mcast_grp_ctx_entry_info *entry2)
+{
+ if (entry1->dest_port == entry2->dest_port &&
+ entry1->qp_id == entry2->qp_id)
+ return true;
+
+ return false;
+}
+
+/**
+ * irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx
+ * @ctx: Multcast group context
+ * @mg: Multcast group info
+ */
+int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg)
+{
+ u32 idx;
+ bool free_entry_found = false;
+ u32 free_entry_idx = 0;
+
+ /* find either an identical or a free entry for a multicast group */
+ for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
+ if (ctx->mg_ctx_info[idx].valid_entry) {
+ if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
+ ctx->mg_ctx_info[idx].use_cnt++;
+ return 0;
+ }
+ continue;
+ }
+ if (!free_entry_found) {
+ free_entry_found = true;
+ free_entry_idx = idx;
+ }
+ }
+
+ if (free_entry_found) {
+ ctx->mg_ctx_info[free_entry_idx] = *mg;
+ ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
+ ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
+ ctx->no_of_mgs++;
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * irdma_sc_del_mcast_grp - Delete mcast group
+ * @ctx: Multcast group context
+ * @mg: Multcast group info
+ *
+ * Finds and removes a specific mulicast group from context, all
+ * parameters must match to remove a multicast group.
+ */
+int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg)
+{
+ u32 idx;
+
+ /* find an entry in multicast group context */
+ for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
+ if (!ctx->mg_ctx_info[idx].valid_entry)
+ continue;
+
+ if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
+ ctx->mg_ctx_info[idx].use_cnt--;
+
+ if (!ctx->mg_ctx_info[idx].use_cnt) {
+ ctx->mg_ctx_info[idx].valid_entry = false;
+ ctx->no_of_mgs--;
+ /* Remove gap if element was not the last */
+ if (idx != ctx->no_of_mgs &&
+ ctx->no_of_mgs > 0) {
+ memcpy(&ctx->mg_ctx_info[idx],
+ &ctx->mg_ctx_info[ctx->no_of_mgs - 1],
+ sizeof(ctx->mg_ctx_info[idx]));
+ ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
+ }
+ }
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/infiniband/hw/irdma/uda.h b/drivers/infiniband/hw/irdma/uda.h
new file mode 100644
index 0000000000..fe4820ff0c
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uda.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#ifndef IRDMA_UDA_H
+#define IRDMA_UDA_H
+
+#define IRDMA_UDA_MAX_FSI_MGS 4096
+#define IRDMA_UDA_MAX_PFS 16
+#define IRDMA_UDA_MAX_VFS 128
+
+struct irdma_sc_cqp;
+
+struct irdma_ah_info {
+ struct irdma_sc_vsi *vsi;
+ u32 pd_idx;
+ u32 dst_arpindex;
+ u32 dest_ip_addr[4];
+ u32 src_ip_addr[4];
+ u32 flow_label;
+ u32 ah_idx;
+ u16 vlan_tag;
+ u8 insert_vlan_tag;
+ u8 tc_tos;
+ u8 hop_ttl;
+ u8 mac_addr[ETH_ALEN];
+ bool ah_valid:1;
+ bool ipv4_valid:1;
+ bool do_lpbk:1;
+};
+
+struct irdma_sc_ah {
+ struct irdma_sc_dev *dev;
+ struct irdma_ah_info ah_info;
+};
+
+int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg);
+int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
+ struct irdma_mcast_grp_ctx_entry_info *mg);
+int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
+ u32 op, u64 scratch);
+int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info, u32 op,
+ u64 scratch);
+
+static inline void irdma_sc_init_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
+{
+ ah->dev = dev;
+}
+
+static inline int irdma_sc_create_ah(struct irdma_sc_cqp *cqp,
+ struct irdma_ah_info *info, u64 scratch)
+{
+ return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_CREATE_ADDR_HANDLE,
+ scratch);
+}
+
+static inline int irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp,
+ struct irdma_ah_info *info, u64 scratch)
+{
+ return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_DESTROY_ADDR_HANDLE,
+ scratch);
+}
+
+static inline int irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
+{
+ return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_CREATE_MCAST_GRP,
+ scratch);
+}
+
+static inline int irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
+{
+ return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_MODIFY_MCAST_GRP,
+ scratch);
+}
+
+static inline int irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp,
+ struct irdma_mcast_grp_info *info,
+ u64 scratch)
+{
+ return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_DESTROY_MCAST_GRP,
+ scratch);
+}
+#endif /* IRDMA_UDA_H */
diff --git a/drivers/infiniband/hw/irdma/uda_d.h b/drivers/infiniband/hw/irdma/uda_d.h
new file mode 100644
index 0000000000..bfc81cac2c
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uda_d.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2016 - 2021 Intel Corporation */
+#ifndef IRDMA_UDA_D_H
+#define IRDMA_UDA_D_H
+
+/* L4 packet type */
+#define IRDMA_E_UDA_SQ_L4T_UNKNOWN 0
+#define IRDMA_E_UDA_SQ_L4T_TCP 1
+#define IRDMA_E_UDA_SQ_L4T_SCTP 2
+#define IRDMA_E_UDA_SQ_L4T_UDP 3
+
+/* Inner IP header type */
+#define IRDMA_E_UDA_SQ_IIPT_UNKNOWN 0
+#define IRDMA_E_UDA_SQ_IIPT_IPV6 1
+#define IRDMA_E_UDA_SQ_IIPT_IPV4_NO_CSUM 2
+#define IRDMA_E_UDA_SQ_IIPT_IPV4_CSUM 3
+#define IRDMA_UDA_QPSQ_PUSHWQE BIT_ULL(56)
+#define IRDMA_UDA_QPSQ_INLINEDATAFLAG BIT_ULL(57)
+#define IRDMA_UDA_QPSQ_INLINEDATALEN GENMASK_ULL(55, 48)
+#define IRDMA_UDA_QPSQ_ADDFRAGCNT GENMASK_ULL(41, 38)
+#define IRDMA_UDA_QPSQ_IPFRAGFLAGS GENMASK_ULL(43, 42)
+#define IRDMA_UDA_QPSQ_NOCHECKSUM BIT_ULL(45)
+#define IRDMA_UDA_QPSQ_AHIDXVALID BIT_ULL(46)
+#define IRDMA_UDA_QPSQ_LOCAL_FENCE BIT_ULL(61)
+#define IRDMA_UDA_QPSQ_AHIDX GENMASK_ULL(16, 0)
+#define IRDMA_UDA_QPSQ_PROTOCOL GENMASK_ULL(23, 16)
+#define IRDMA_UDA_QPSQ_EXTHDRLEN GENMASK_ULL(40, 32)
+#define IRDMA_UDA_QPSQ_MULTICAST BIT_ULL(63)
+#define IRDMA_UDA_QPSQ_MACLEN GENMASK_ULL(62, 56)
+#define IRDMA_UDA_QPSQ_MACLEN_LINE 2
+#define IRDMA_UDA_QPSQ_IPLEN GENMASK_ULL(54, 48)
+#define IRDMA_UDA_QPSQ_IPLEN_LINE 2
+#define IRDMA_UDA_QPSQ_L4T GENMASK_ULL(31, 30)
+#define IRDMA_UDA_QPSQ_L4T_LINE 2
+#define IRDMA_UDA_QPSQ_IIPT GENMASK_ULL(29, 28)
+#define IRDMA_UDA_QPSQ_IIPT_LINE 2
+
+#define IRDMA_UDA_QPSQ_DO_LPB_LINE 3
+#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM BIT_ULL(45)
+#define IRDMA_UDA_QPSQ_FWD_PROG_CONFIRM_LINE 3
+#define IRDMA_UDA_QPSQ_IMMDATA GENMASK_ULL(63, 0)
+
+/* Byte Offset 0 */
+#define IRDMA_UDAQPC_IPV4_M BIT_ULL(3)
+#define IRDMA_UDAQPC_INSERTVLANTAG BIT_ULL(5)
+#define IRDMA_UDAQPC_ISQP1 BIT_ULL(6)
+
+#define IRDMA_UDAQPC_ECNENABLE BIT_ULL(14)
+#define IRDMA_UDAQPC_PDINDEXHI GENMASK_ULL(21, 20)
+#define IRDMA_UDAQPC_DCTCPENABLE BIT_ULL(25)
+
+#define IRDMA_UDAQPC_RCVTPHEN IRDMAQPC_RCVTPHEN
+#define IRDMA_UDAQPC_XMITTPHEN IRDMAQPC_XMITTPHEN
+#define IRDMA_UDAQPC_RQTPHEN IRDMAQPC_RQTPHEN
+#define IRDMA_UDAQPC_SQTPHEN IRDMAQPC_SQTPHEN
+#define IRDMA_UDAQPC_PPIDX IRDMAQPC_PPIDX
+#define IRDMA_UDAQPC_PMENA IRDMAQPC_PMENA
+#define IRDMA_UDAQPC_INSERTTAG2 BIT_ULL(11)
+#define IRDMA_UDAQPC_INSERTTAG3 BIT_ULL(14)
+
+#define IRDMA_UDAQPC_RQSIZE IRDMAQPC_RQSIZE
+#define IRDMA_UDAQPC_SQSIZE IRDMAQPC_SQSIZE
+#define IRDMA_UDAQPC_TXCQNUM IRDMAQPC_TXCQNUM
+#define IRDMA_UDAQPC_RXCQNUM IRDMAQPC_RXCQNUM
+#define IRDMA_UDAQPC_QPCOMPCTX IRDMAQPC_QPCOMPCTX
+#define IRDMA_UDAQPC_SQTPHVAL IRDMAQPC_SQTPHVAL
+#define IRDMA_UDAQPC_RQTPHVAL IRDMAQPC_RQTPHVAL
+#define IRDMA_UDAQPC_QSHANDLE IRDMAQPC_QSHANDLE
+#define IRDMA_UDAQPC_RQHDRRINGBUFSIZE GENMASK_ULL(49, 48)
+#define IRDMA_UDAQPC_SQHDRRINGBUFSIZE GENMASK_ULL(33, 32)
+#define IRDMA_UDAQPC_PRIVILEGEENABLE BIT_ULL(25)
+#define IRDMA_UDAQPC_USE_STATISTICS_INSTANCE BIT_ULL(26)
+#define IRDMA_UDAQPC_STATISTICS_INSTANCE_INDEX GENMASK_ULL(6, 0)
+#define IRDMA_UDAQPC_PRIVHDRGENENABLE BIT_ULL(0)
+#define IRDMA_UDAQPC_RQHDRSPLITENABLE BIT_ULL(3)
+#define IRDMA_UDAQPC_RQHDRRINGBUFENABLE BIT_ULL(2)
+#define IRDMA_UDAQPC_SQHDRRINGBUFENABLE BIT_ULL(1)
+#define IRDMA_UDAQPC_IPID GENMASK_ULL(47, 32)
+#define IRDMA_UDAQPC_SNDMSS GENMASK_ULL(29, 16)
+#define IRDMA_UDAQPC_VLANTAG GENMASK_ULL(15, 0)
+
+#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(21, 20)
+#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48)
+#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24)
+#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48)
+#define IRDMA_UDA_CQPSQ_MAV_TC GENMASK_ULL(39, 32)
+#define IRDMA_UDA_CQPSQ_MAV_HOPLIMIT GENMASK_ULL(39, 32)
+#define IRDMA_UDA_CQPSQ_MAV_FLOWLABEL GENMASK_ULL(19, 0)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR1 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR2 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_MAV_ADDR3 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_MAV_WQEVALID BIT_ULL(63)
+#define IRDMA_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62)
+#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(59)
+#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(16, 0)
+#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(60)
+#define IRDMA_UDA_MGCTX_VFFLAG BIT_ULL(29)
+#define IRDMA_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32)
+#define IRDMA_UDA_MGCTX_VFID GENMASK_ULL(28, 22)
+#define IRDMA_UDA_MGCTX_VALIDENT BIT_ULL(31)
+#define IRDMA_UDA_MGCTX_PFID GENMASK_ULL(21, 18)
+#define IRDMA_UDA_MGCTX_FLAGIGNOREDPORT BIT_ULL(30)
+#define IRDMA_UDA_MGCTX_QPID GENMASK_ULL(17, 0)
+#define IRDMA_UDA_CQPSQ_MG_WQEVALID BIT_ULL(63)
+#define IRDMA_UDA_CQPSQ_MG_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_CQPSQ_MG_MGIDX GENMASK_ULL(12, 0)
+#define IRDMA_UDA_CQPSQ_MG_IPV4VALID BIT_ULL(60)
+#define IRDMA_UDA_CQPSQ_MG_VLANVALID BIT_ULL(59)
+#define IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID GENMASK_ULL(5, 0)
+#define IRDMA_UDA_CQPSQ_MG_VLANID GENMASK_ULL(43, 32)
+#define IRDMA_UDA_CQPSQ_QS_HANDLE GENMASK_ULL(9, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_QPN GENMASK_ULL(49, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_ BIT_ULL(0)
+#define IRDMA_UDA_CQPSQ_QHASH_SRC_PORT GENMASK_ULL(31, 16)
+#define IRDMA_UDA_CQPSQ_QHASH_DEST_PORT GENMASK_ULL(15, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR0 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR1 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR2 GENMASK_ULL(63, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_ADDR3 GENMASK_ULL(31, 0)
+#define IRDMA_UDA_CQPSQ_QHASH_WQEVALID BIT_ULL(63)
+#define IRDMA_UDA_CQPSQ_QHASH_OPCODE GENMASK_ULL(37, 32)
+#define IRDMA_UDA_CQPSQ_QHASH_MANAGE GENMASK_ULL(62, 61)
+#define IRDMA_UDA_CQPSQ_QHASH_IPV4VALID GENMASK_ULL(60, 60)
+#define IRDMA_UDA_CQPSQ_QHASH_LANFWD GENMASK_ULL(59, 59)
+#define IRDMA_UDA_CQPSQ_QHASH_ENTRYTYPE GENMASK_ULL(44, 42)
+#endif /* IRDMA_UDA_D_H */
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
new file mode 100644
index 0000000000..d8285ca162
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -0,0 +1,1646 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "defs.h"
+#include "user.h"
+#include "irdma.h"
+
+/**
+ * irdma_set_fragment - set fragment in wqe
+ * @wqe: wqe for setting fragment
+ * @offset: offset value
+ * @sge: sge length and stag
+ * @valid: The wqe valid
+ */
+static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
+ u8 valid)
+{
+ if (sge) {
+ set_64bit_val(wqe, offset,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
+ set_64bit_val(wqe, offset + 8,
+ FIELD_PREP(IRDMAQPSQ_VALID, valid) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
+ } else {
+ set_64bit_val(wqe, offset, 0);
+ set_64bit_val(wqe, offset + 8,
+ FIELD_PREP(IRDMAQPSQ_VALID, valid));
+ }
+}
+
+/**
+ * irdma_set_fragment_gen_1 - set fragment in wqe
+ * @wqe: wqe for setting fragment
+ * @offset: offset value
+ * @sge: sge length and stag
+ * @valid: wqe valid flag
+ */
+static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
+ struct ib_sge *sge, u8 valid)
+{
+ if (sge) {
+ set_64bit_val(wqe, offset,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
+ set_64bit_val(wqe, offset + 8,
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
+ } else {
+ set_64bit_val(wqe, offset, 0);
+ set_64bit_val(wqe, offset + 8, 0);
+ }
+}
+
+/**
+ * irdma_nop_1 - insert a NOP wqe
+ * @qp: hw qp ptr
+ */
+static int irdma_nop_1(struct irdma_qp_uk *qp)
+{
+ u64 hdr;
+ __le64 *wqe;
+ u32 wqe_idx;
+ bool signaled = false;
+
+ if (!qp->sq_ring.head)
+ return -EINVAL;
+
+ wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ wqe = qp->sq_base[wqe_idx].elem;
+
+ qp->sq_wrtrk_array[wqe_idx].quanta = IRDMA_QP_WQE_MIN_QUANTA;
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ /* make sure WQE is written before valid bit is set */
+ dma_wmb();
+
+ set_64bit_val(wqe, 24, hdr);
+
+ return 0;
+}
+
+/**
+ * irdma_clr_wqes - clear next 128 sq entries
+ * @qp: hw qp ptr
+ * @qp_wqe_idx: wqe_idx
+ */
+void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
+{
+ struct irdma_qp_quanta *sq;
+ u32 wqe_idx;
+
+ if (!(qp_wqe_idx & 0x7F)) {
+ wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
+ sq = qp->sq_base + wqe_idx;
+ if (wqe_idx)
+ memset(sq, qp->swqe_polarity ? 0 : 0xFF,
+ 128 * sizeof(*sq));
+ else
+ memset(sq, qp->swqe_polarity ? 0xFF : 0,
+ 128 * sizeof(*sq));
+ }
+}
+
+/**
+ * irdma_uk_qp_post_wr - ring doorbell
+ * @qp: hw qp ptr
+ */
+void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
+{
+ u64 temp;
+ u32 hw_sq_tail;
+ u32 sw_sq_head;
+
+ /* valid bit is written and loads completed before reading shadow */
+ mb();
+
+ /* read the doorbell shadow area */
+ get_64bit_val(qp->shadow_area, 0, &temp);
+
+ hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
+ sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ if (sw_sq_head != qp->initial_ring.head) {
+ if (sw_sq_head != hw_sq_tail) {
+ if (sw_sq_head > qp->initial_ring.head) {
+ if (hw_sq_tail >= qp->initial_ring.head &&
+ hw_sq_tail < sw_sq_head)
+ writel(qp->qp_id, qp->wqe_alloc_db);
+ } else {
+ if (hw_sq_tail >= qp->initial_ring.head ||
+ hw_sq_tail < sw_sq_head)
+ writel(qp->qp_id, qp->wqe_alloc_db);
+ }
+ }
+ }
+
+ qp->initial_ring.head = qp->sq_ring.head;
+}
+
+/**
+ * irdma_qp_get_next_send_wqe - pad with NOP if needed, return where next WR should go
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ * @quanta: size of WR in quanta
+ * @total_size: size of WR in bytes
+ * @info: info on WR
+ */
+__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
+ u16 quanta, u32 total_size,
+ struct irdma_post_sq_info *info)
+{
+ __le64 *wqe;
+ __le64 *wqe_0 = NULL;
+ u16 avail_quanta;
+ u16 i;
+
+ avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
+ (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) %
+ qp->uk_attrs->max_hw_sq_chunk);
+ if (quanta <= avail_quanta) {
+ /* WR fits in current chunk */
+ if (quanta > IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
+ return NULL;
+ } else {
+ /* Need to pad with NOP */
+ if (quanta + avail_quanta >
+ IRDMA_SQ_RING_FREE_QUANTA(qp->sq_ring))
+ return NULL;
+
+ for (i = 0; i < avail_quanta; i++) {
+ irdma_nop_1(qp);
+ IRDMA_RING_MOVE_HEAD_NOCHECK(qp->sq_ring);
+ }
+ }
+
+ *wqe_idx = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
+ if (!*wqe_idx)
+ qp->swqe_polarity = !qp->swqe_polarity;
+
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, quanta);
+
+ wqe = qp->sq_base[*wqe_idx].elem;
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1 && quanta == 1 &&
+ (IRDMA_RING_CURRENT_HEAD(qp->sq_ring) & 1)) {
+ wqe_0 = qp->sq_base[IRDMA_RING_CURRENT_HEAD(qp->sq_ring)].elem;
+ wqe_0[3] = cpu_to_le64(FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity ? 0 : 1));
+ }
+ qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
+ qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
+ qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
+
+ return wqe;
+}
+
+/**
+ * irdma_qp_get_next_recv_wqe - get next qp's rcv wqe
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ */
+__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx)
+{
+ __le64 *wqe;
+ int ret_code;
+
+ if (IRDMA_RING_FULL_ERR(qp->rq_ring))
+ return NULL;
+
+ IRDMA_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ if (!*wqe_idx)
+ qp->rwqe_polarity = !qp->rwqe_polarity;
+ /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
+ wqe = qp->rq_base[*wqe_idx * qp->rq_wqe_size_multiplier].elem;
+
+ return wqe;
+}
+
+/**
+ * irdma_uk_rdma_write - rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ u64 hdr;
+ __le64 *wqe;
+ struct irdma_rdma_write *op_info;
+ u32 i, wqe_idx;
+ u32 total_size = 0, byte_off;
+ int ret_code;
+ u32 frag_cnt, addl_frag_cnt;
+ bool read_fence = false;
+ u16 quanta;
+
+ op_info = &info->op.rdma_write;
+ if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].length;
+
+ read_fence |= info->read_fence;
+
+ if (info->imm_data_valid)
+ frag_cnt = op_info->num_lo_sges + 1;
+ else
+ frag_cnt = op_info->num_lo_sges;
+ addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
+ ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
+
+ if (info->imm_data_valid) {
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ i = 0;
+ } else {
+ qp->wqe_ops.iw_set_fragment(wqe, 0,
+ op_info->lo_sg_list,
+ qp->swqe_polarity);
+ i = 1;
+ }
+
+ for (byte_off = 32; i < op_info->num_lo_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off,
+ &op_info->lo_sg_list[i],
+ qp->swqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
+ frag_cnt) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_rdma_read - rdma read command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @inv_stag: flag for inv_stag
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq)
+{
+ struct irdma_rdma_read *op_info;
+ int ret_code;
+ u32 i, byte_off, total_size = 0;
+ bool local_fence = false;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u32 wqe_idx;
+ u16 quanta;
+ u64 hdr;
+
+ op_info = &info->op.rdma_read;
+ if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].length;
+
+ ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ addl_frag_cnt = op_info->num_lo_sges > 1 ?
+ (op_info->num_lo_sges - 1) : 0;
+ local_fence |= info->local_fence;
+
+ qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->lo_sg_list,
+ qp->swqe_polarity);
+ for (i = 1, byte_off = 32; i < op_info->num_lo_sges; ++i) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off,
+ &op_info->lo_sg_list[i],
+ qp->swqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 &&
+ !(op_info->num_lo_sges & 0x01) && op_info->num_lo_sges) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE,
+ (inv_stag ? IRDMAQP_OP_RDMA_READ_LOC_INV : IRDMAQP_OP_RDMA_READ)) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_send - rdma send command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_post_send *op_info;
+ u64 hdr;
+ u32 i, wqe_idx, total_size = 0, byte_off;
+ int ret_code;
+ u32 frag_cnt, addl_frag_cnt;
+ bool read_fence = false;
+ u16 quanta;
+
+ op_info = &info->op.send;
+ if (qp->max_sq_frag_cnt < op_info->num_sges)
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].length;
+
+ if (info->imm_data_valid)
+ frag_cnt = op_info->num_sges + 1;
+ else
+ frag_cnt = op_info->num_sges;
+ ret_code = irdma_fragcnt_to_quanta_sq(frag_cnt, &quanta);
+ if (ret_code)
+ return ret_code;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ read_fence |= info->read_fence;
+ addl_frag_cnt = frag_cnt > 1 ? (frag_cnt - 1) : 0;
+ if (info->imm_data_valid) {
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ i = 0;
+ } else {
+ qp->wqe_ops.iw_set_fragment(wqe, 0,
+ frag_cnt ? op_info->sg_list : NULL,
+ qp->swqe_polarity);
+ i = 1;
+ }
+
+ for (byte_off = 32; i < op_info->num_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, &op_info->sg_list[i],
+ qp->swqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(frag_cnt & 0x01) &&
+ frag_cnt) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->swqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
+ FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ (info->imm_data_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
+ FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_set_mw_bind_wqe_gen_1 - set mw bind wqe
+ * @wqe: wqe for setting fragment
+ * @op_info: info for setting bind wqe values
+ */
+static void irdma_set_mw_bind_wqe_gen_1(__le64 *wqe,
+ struct irdma_bind_window *op_info)
+{
+ set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mw_stag) |
+ FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mr_stag));
+ set_64bit_val(wqe, 16, op_info->bind_len);
+}
+
+/**
+ * irdma_copy_inline_data_gen_1 - Copy inline data to wqe
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: Total inline data length
+ * @polarity: compatibility parameter
+ */
+static void irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
+ u32 num_sges, u8 polarity)
+{
+ u32 quanta_bytes_remaining = 16;
+ int i;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ /* Remaining inline bytes reside after hdr */
+ wqe += 16;
+ quanta_bytes_remaining = 32;
+ }
+ }
+ }
+}
+
+/**
+ * irdma_inline_data_size_to_quanta_gen_1 - based on inline data, quanta
+ * @data_size: data size for inline
+ *
+ * Gets the quanta based on inline and immediate data.
+ */
+static inline u16 irdma_inline_data_size_to_quanta_gen_1(u32 data_size)
+{
+ return data_size <= 16 ? IRDMA_QP_WQE_MIN_QUANTA : 2;
+}
+
+/**
+ * irdma_set_mw_bind_wqe - set mw bind in wqe
+ * @wqe: wqe for setting mw bind
+ * @op_info: info for setting wqe values
+ */
+static void irdma_set_mw_bind_wqe(__le64 *wqe,
+ struct irdma_bind_window *op_info)
+{
+ set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_PARENTMRSTAG, op_info->mr_stag) |
+ FIELD_PREP(IRDMAQPSQ_MWSTAG, op_info->mw_stag));
+ set_64bit_val(wqe, 16, op_info->bind_len);
+}
+
+/**
+ * irdma_copy_inline_data - Copy inline data to wqe
+ * @wqe: pointer to wqe
+ * @sge_list: table of pointers to inline data
+ * @num_sges: number of SGE's
+ * @polarity: polarity of wqe valid bit
+ */
+static void irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
+ u32 num_sges, u8 polarity)
+{
+ u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
+ u32 quanta_bytes_remaining = 8;
+ bool first_quanta = true;
+ int i;
+
+ wqe += 8;
+
+ for (i = 0; i < num_sges; i++) {
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
+
+ while (sge_len) {
+ u32 bytes_copied;
+
+ bytes_copied = min(sge_len, quanta_bytes_remaining);
+ memcpy(wqe, cur_sge, bytes_copied);
+ wqe += bytes_copied;
+ cur_sge += bytes_copied;
+ quanta_bytes_remaining -= bytes_copied;
+ sge_len -= bytes_copied;
+
+ if (!quanta_bytes_remaining) {
+ quanta_bytes_remaining = 31;
+
+ /* Remaining inline bytes reside after hdr */
+ if (first_quanta) {
+ first_quanta = false;
+ wqe += 16;
+ } else {
+ *wqe = inline_valid;
+ wqe++;
+ }
+ }
+ }
+ }
+ if (!first_quanta && quanta_bytes_remaining < 31)
+ *(wqe + quanta_bytes_remaining) = inline_valid;
+}
+
+/**
+ * irdma_inline_data_size_to_quanta - based on inline data, quanta
+ * @data_size: data size for inline
+ *
+ * Gets the quanta based on inline and immediate data.
+ */
+static u16 irdma_inline_data_size_to_quanta(u32 data_size)
+{
+ if (data_size <= 8)
+ return IRDMA_QP_WQE_MIN_QUANTA;
+ else if (data_size <= 39)
+ return 2;
+ else if (data_size <= 70)
+ return 3;
+ else if (data_size <= 101)
+ return 4;
+ else if (data_size <= 132)
+ return 5;
+ else if (data_size <= 163)
+ return 6;
+ else if (data_size <= 194)
+ return 7;
+ else
+ return 8;
+}
+
+/**
+ * irdma_uk_inline_rdma_write - inline rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_rdma_write *op_info;
+ u64 hdr = 0;
+ u32 wqe_idx;
+ bool read_fence = false;
+ u32 i, total_size = 0;
+ u16 quanta;
+
+ op_info = &info->op.rdma_write;
+
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_lo_sges; i++)
+ total_size += op_info->lo_sg_list[i].length;
+
+ if (unlikely(total_size > qp->max_inline_data))
+ return -EINVAL;
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ read_fence |= info->read_fence;
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
+
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid ? 1 : 0) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ if (info->imm_data_valid)
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->lo_sg_list,
+ op_info->num_lo_sges,
+ qp->swqe_polarity);
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_inline_send - inline send operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_post_send *op_info;
+ u64 hdr;
+ u32 wqe_idx;
+ bool read_fence = false;
+ u32 i, total_size = 0;
+ u16 quanta;
+
+ op_info = &info->op.send;
+
+ if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
+ return -EINVAL;
+
+ for (i = 0; i < op_info->num_sges; i++)
+ total_size += op_info->sg_list[i].length;
+
+ if (unlikely(total_size > qp->max_inline_data))
+ return -EINVAL;
+
+ quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMAQPSQ_DESTQKEY, op_info->qkey) |
+ FIELD_PREP(IRDMAQPSQ_DESTQPN, op_info->dest_qp));
+
+ read_fence |= info->read_fence;
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, info->stag_to_inv) |
+ FIELD_PREP(IRDMAQPSQ_AHID, op_info->ah_id) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
+ FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG,
+ (info->imm_data_valid ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
+ FIELD_PREP(IRDMAQPSQ_INLINEDATAFLAG, 1) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_UDPHEADER, info->udp_hdr) |
+ FIELD_PREP(IRDMAQPSQ_L4LEN, info->l4len) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ if (info->imm_data_valid)
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
+ qp->wqe_ops.iw_copy_inline_data((u8 *)wqe, op_info->sg_list,
+ op_info->num_sges, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_stag_local_invalidate - stag invalidate operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq)
+{
+ __le64 *wqe;
+ struct irdma_inv_local_stag *op_info;
+ u64 hdr;
+ u32 wqe_idx;
+ bool local_fence = false;
+ struct ib_sge sge = {};
+
+ op_info = &info->op.inv_local_stag;
+ local_fence = info->local_fence;
+
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
+ 0, info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ sge.lkey = op_info->target_stag;
+ qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
+
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_INV_STAG) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_post_receive - post receive wqe
+ * @qp: hw qp ptr
+ * @info: post rq information
+ */
+int irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info)
+{
+ u32 wqe_idx, i, byte_off;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u64 hdr;
+
+ if (qp->max_rq_frag_cnt < info->num_sges)
+ return -EINVAL;
+
+ wqe = irdma_qp_get_next_recv_wqe(qp, &wqe_idx);
+ if (!wqe)
+ return -ENOMEM;
+
+ qp->rq_wrid_array[wqe_idx] = info->wr_id;
+ addl_frag_cnt = info->num_sges > 1 ? (info->num_sges - 1) : 0;
+ qp->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
+ qp->rwqe_polarity);
+
+ for (i = 1, byte_off = 32; i < info->num_sges; i++) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
+ qp->rwqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (qp->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
+ info->num_sges) {
+ qp->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ qp->rwqe_polarity);
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, 16, 0);
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->rwqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_cq_resize - reset the cq buffer info
+ * @cq: cq to resize
+ * @cq_base: new cq buffer addr
+ * @cq_size: number of cqes
+ */
+void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size)
+{
+ cq->cq_base = cq_base;
+ cq->cq_size = cq_size;
+ IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
+ cq->polarity = 1;
+}
+
+/**
+ * irdma_uk_cq_set_resized_cnt - record the count of the resized buffers
+ * @cq: cq to resize
+ * @cq_cnt: the count of the resized cq buffers
+ */
+void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *cq, u16 cq_cnt)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se;
+ u8 arm_next;
+ u8 arm_seq_num;
+
+ get_64bit_val(cq->shadow_area, 32, &temp_val);
+
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ sw_cq_sel += cq_cnt;
+
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_next = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT, temp_val);
+
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
+
+ set_64bit_val(cq->shadow_area, 32, temp_val);
+}
+
+/**
+ * irdma_uk_cq_request_notification - cq notification request (door bell)
+ * @cq: hw cq
+ * @cq_notify: notification type
+ */
+void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
+ enum irdma_cmpl_notify cq_notify)
+{
+ u64 temp_val;
+ u16 sw_cq_sel;
+ u8 arm_next_se = 0;
+ u8 arm_next = 0;
+ u8 arm_seq_num;
+
+ get_64bit_val(cq->shadow_area, 32, &temp_val);
+ arm_seq_num = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_SEQ_NUM, temp_val);
+ arm_seq_num++;
+ sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
+ arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
+ arm_next_se |= 1;
+ if (cq_notify == IRDMA_CQ_COMPL_EVENT)
+ arm_next = 1;
+ temp_val = FIELD_PREP(IRDMA_CQ_DBSA_ARM_SEQ_NUM, arm_seq_num) |
+ FIELD_PREP(IRDMA_CQ_DBSA_SW_CQ_SELECT, sw_cq_sel) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
+ FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, arm_next);
+
+ set_64bit_val(cq->shadow_area, 32, temp_val);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ writel(cq->cq_id, cq->cqe_alloc_db);
+}
+
+/**
+ * irdma_uk_cq_poll_cmpl - get cq completion info
+ * @cq: hw cq
+ * @info: cq poll information returned
+ */
+int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ struct irdma_cq_poll_info *info)
+{
+ u64 comp_ctx, qword0, qword2, qword3;
+ __le64 *cqe;
+ struct irdma_qp_uk *qp;
+ struct irdma_ring *pring = NULL;
+ u32 wqe_idx;
+ int ret_code;
+ bool move_cq_head = true;
+ u8 polarity;
+ bool ext_valid;
+ __le64 *ext_cqe;
+
+ if (cq->avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
+
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ if (polarity != cq->polarity)
+ return -ENOENT;
+
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3);
+ if (ext_valid) {
+ u64 qword6, qword7;
+ u32 peek_head;
+
+ if (cq->avoid_mem_cflct) {
+ ext_cqe = (__le64 *)((u8 *)cqe + 32);
+ get_64bit_val(ext_cqe, 24, &qword7);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
+ } else {
+ peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
+ ext_cqe = cq->cq_base[peek_head].buf;
+ get_64bit_val(ext_cqe, 24, &qword7);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
+ if (!peek_head)
+ polarity ^= 1;
+ }
+ if (polarity != cq->polarity)
+ return -ENOENT;
+
+ /* Ensure ext CQE contents are read after ext valid bit is checked */
+ dma_rmb();
+
+ info->imm_valid = (bool)FIELD_GET(IRDMA_CQ_IMMVALID, qword7);
+ if (info->imm_valid) {
+ u64 qword4;
+
+ get_64bit_val(ext_cqe, 0, &qword4);
+ info->imm_data = (u32)FIELD_GET(IRDMA_CQ_IMMDATALOW32, qword4);
+ }
+ info->ud_smac_valid = (bool)FIELD_GET(IRDMA_CQ_UDSMACVALID, qword7);
+ info->ud_vlan_valid = (bool)FIELD_GET(IRDMA_CQ_UDVLANVALID, qword7);
+ if (info->ud_smac_valid || info->ud_vlan_valid) {
+ get_64bit_val(ext_cqe, 16, &qword6);
+ if (info->ud_vlan_valid)
+ info->ud_vlan = (u16)FIELD_GET(IRDMA_CQ_UDVLAN, qword6);
+ if (info->ud_smac_valid) {
+ info->ud_smac[5] = qword6 & 0xFF;
+ info->ud_smac[4] = (qword6 >> 8) & 0xFF;
+ info->ud_smac[3] = (qword6 >> 16) & 0xFF;
+ info->ud_smac[2] = (qword6 >> 24) & 0xFF;
+ info->ud_smac[1] = (qword6 >> 32) & 0xFF;
+ info->ud_smac[0] = (qword6 >> 40) & 0xFF;
+ }
+ }
+ } else {
+ info->imm_valid = false;
+ info->ud_smac_valid = false;
+ info->ud_vlan_valid = false;
+ }
+
+ info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
+ info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+ if (info->error) {
+ info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
+ info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
+ if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
+ info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ /* Set the min error to standard flush error code for remaining cqes */
+ if (info->minor_err != FLUSH_GENERAL_ERR) {
+ qword3 &= ~IRDMA_CQ_MINERR;
+ qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
+ set_64bit_val(cqe, 24, qword3);
+ }
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ }
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
+ }
+
+ get_64bit_val(cqe, 0, &qword0);
+ get_64bit_val(cqe, 16, &qword2);
+
+ info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
+ info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
+ info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+
+ info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
+ qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
+ if (!qp || qp->destroy_pending) {
+ ret_code = -EFAULT;
+ goto exit;
+ }
+ wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
+ info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
+ info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
+
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
+ u32 array_idx;
+
+ array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
+
+ if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
+ info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
+ if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
+ ret_code = -ENOENT;
+ goto exit;
+ }
+
+ info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
+ array_idx = qp->rq_ring.tail;
+ } else {
+ info->wr_id = qp->rq_wrid_array[array_idx];
+ }
+
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+
+ if (qword3 & IRDMACQ_STAG) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
+ } else {
+ info->stag_invalid_set = false;
+ }
+ IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
+ if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
+ qp->rq_flush_seen = true;
+ if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
+ qp->rq_flush_complete = true;
+ else
+ move_cq_head = false;
+ }
+ pring = &qp->rq_ring;
+ } else { /* q_type is IRDMA_CQE_QTYPE_SQ */
+ if (qp->first_sq_wq) {
+ if (wqe_idx + 1 >= qp->conn_wqes)
+ qp->first_sq_wq = false;
+
+ if (wqe_idx < qp->conn_wqes && qp->sq_ring.head == qp->sq_ring.tail) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ memset(info, 0,
+ sizeof(struct irdma_cq_poll_info));
+ return irdma_uk_cq_poll_cmpl(cq, info);
+ }
+ }
+ if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
+ info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+ if (!info->comp_status)
+ info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
+ info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
+ IRDMA_RING_SET_TAIL(qp->sq_ring,
+ wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
+ } else {
+ if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
+ ret_code = -ENOENT;
+ goto exit;
+ }
+
+ do {
+ __le64 *sw_wqe;
+ u64 wqe_qword;
+ u32 tail;
+
+ tail = qp->sq_ring.tail;
+ sw_wqe = qp->sq_base[tail].elem;
+ get_64bit_val(sw_wqe, 24,
+ &wqe_qword);
+ info->op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE,
+ wqe_qword);
+ IRDMA_RING_SET_TAIL(qp->sq_ring,
+ tail + qp->sq_wrtrk_array[tail].quanta);
+ if (info->op_type != IRDMAQP_OP_NOP) {
+ info->wr_id = qp->sq_wrtrk_array[tail].wrid;
+ info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
+ break;
+ }
+ } while (1);
+ if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
+ info->minor_err == FLUSH_PROT_ERR)
+ info->minor_err = FLUSH_MW_BIND_ERR;
+ qp->sq_flush_seen = true;
+ if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
+ qp->sq_flush_complete = true;
+ }
+ pring = &qp->sq_ring;
+ }
+
+ ret_code = 0;
+
+exit:
+ if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
+ if (pring && IRDMA_RING_MORE_WORK(*pring))
+ move_cq_head = false;
+
+ if (move_cq_head) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
+ cq->polarity ^= 1;
+
+ if (ext_valid && !cq->avoid_mem_cflct) {
+ IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
+ if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
+ cq->polarity ^= 1;
+ }
+
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ if (!cq->avoid_mem_cflct && ext_valid)
+ IRDMA_RING_MOVE_TAIL(cq->cq_ring);
+ set_64bit_val(cq->shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ } else {
+ qword3 &= ~IRDMA_CQ_WQEIDX;
+ qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
+ set_64bit_val(cqe, 24, qword3);
+ }
+
+ return ret_code;
+}
+
+/**
+ * irdma_qp_round_up - return round up qp wq depth
+ * @wqdepth: wq depth in quanta to round up
+ */
+static int irdma_qp_round_up(u32 wqdepth)
+{
+ int scount = 1;
+
+ for (wqdepth--; scount <= 16; scount *= 2)
+ wqdepth |= wqdepth >> scount;
+
+ return ++wqdepth;
+}
+
+/**
+ * irdma_get_wqe_shift - get shift count for maximum wqe size
+ * @uk_attrs: qp HW attributes
+ * @sge: Maximum Scatter Gather Elements wqe
+ * @inline_data: Maximum inline data size
+ * @shift: Returns the shift needed based on sge
+ *
+ * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size.
+ * For 1 SGE or inline data <= 8, shift = 0 (wqe size of 32
+ * bytes). For 2 or 3 SGEs or inline data <= 39, shift = 1 (wqe
+ * size of 64 bytes).
+ * For 4-7 SGE's and inline <= 101 Shift of 2 otherwise (wqe
+ * size of 256 bytes).
+ */
+void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
+ u32 inline_data, u8 *shift)
+{
+ *shift = 0;
+ if (uk_attrs->hw_rev >= IRDMA_GEN_2) {
+ if (sge > 1 || inline_data > 8) {
+ if (sge < 4 && inline_data <= 39)
+ *shift = 1;
+ else if (sge < 8 && inline_data <= 101)
+ *shift = 2;
+ else
+ *shift = 3;
+ }
+ } else if (sge > 1 || inline_data > 16) {
+ *shift = (sge < 4 && inline_data <= 48) ? 1 : 2;
+ }
+}
+
+/*
+ * irdma_get_sqdepth - get SQ depth (quanta)
+ * @uk_attrs: qp HW attributes
+ * @sq_size: SQ size
+ * @shift: shift which determines size of WQE
+ * @sqdepth: depth of SQ
+ *
+ */
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
+ u32 *sqdepth)
+{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
+ *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
+
+ if (*sqdepth < min_size)
+ *sqdepth = min_size;
+ else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * irdma_get_rqdepth - get RQ depth (quanta)
+ * @uk_attrs: qp HW attributes
+ * @rq_size: RQ size
+ * @shift: shift which determines size of WQE
+ * @rqdepth: depth of RQ
+ */
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
+ u32 *rqdepth)
+{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
+ *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
+
+ if (*rqdepth < min_size)
+ *rqdepth = min_size;
+ else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
+ .iw_copy_inline_data = irdma_copy_inline_data,
+ .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
+ .iw_set_fragment = irdma_set_fragment,
+ .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe,
+};
+
+static const struct irdma_wqe_uk_ops iw_wqe_uk_ops_gen_1 = {
+ .iw_copy_inline_data = irdma_copy_inline_data_gen_1,
+ .iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta_gen_1,
+ .iw_set_fragment = irdma_set_fragment_gen_1,
+ .iw_set_mw_bind_wqe = irdma_set_mw_bind_wqe_gen_1,
+};
+
+/**
+ * irdma_setup_connection_wqes - setup WQEs necessary to complete
+ * connection.
+ * @qp: hw qp (user and kernel)
+ * @info: qp initialization info
+ */
+static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
+ struct irdma_qp_uk_init_info *info)
+{
+ u16 move_cnt = 1;
+
+ if (!info->legacy_mode &&
+ (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE))
+ move_cnt = 3;
+
+ qp->conn_wqes = move_cnt;
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
+ IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
+}
+
+/**
+ * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
+ * @ukinfo: qp initialization info
+ * @sq_shift: Returns shift of SQ
+ * @rq_shift: Returns shift of RQ
+ */
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+}
+
+/**
+ * irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
+ * @ukinfo: qp initialization info
+ * @sq_depth: Returns depth of SQ
+ * @sq_shift: Returns shift of SQ
+ */
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift)
+{
+ bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2;
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs,
+ imm_support ? ukinfo->max_sq_frag_cnt + 1 :
+ ukinfo->max_sq_frag_cnt,
+ ukinfo->max_inline_data, sq_shift);
+ status = irdma_get_sqdepth(ukinfo->uk_attrs, ukinfo->sq_size,
+ *sq_shift, sq_depth);
+
+ return status;
+}
+
+/**
+ * irdma_uk_calc_depth_shift_rq - calculate depth and shift for RQ size.
+ * @ukinfo: qp initialization info
+ * @rq_depth: Returns depth of RQ
+ * @rq_shift: Returns shift of RQ
+ */
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift)
+{
+ int status;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
+ rq_shift);
+
+ if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
+ if (ukinfo->abi_ver > 4)
+ *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
+ }
+
+ status = irdma_get_rqdepth(ukinfo->uk_attrs, ukinfo->rq_size,
+ *rq_shift, rq_depth);
+
+ return status;
+}
+
+/**
+ * irdma_uk_qp_init - initialize shared qp
+ * @qp: hw qp (user and kernel)
+ * @info: qp initialization info
+ *
+ * initializes the vars used in both user and kernel mode.
+ * size of the wqe depends on numbers of max. fragements
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for sq and rq.
+ */
+int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
+{
+ int ret_code = 0;
+ u32 sq_ring_size;
+
+ qp->uk_attrs = info->uk_attrs;
+ if (info->max_sq_frag_cnt > qp->uk_attrs->max_hw_wq_frags ||
+ info->max_rq_frag_cnt > qp->uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ qp->qp_caps = info->qp_caps;
+ qp->sq_base = info->sq;
+ qp->rq_base = info->rq;
+ qp->qp_type = info->type ? info->type : IRDMA_QP_TYPE_IWARP;
+ qp->shadow_area = info->shadow_area;
+ qp->sq_wrtrk_array = info->sq_wrtrk_array;
+
+ qp->rq_wrid_array = info->rq_wrid_array;
+ qp->wqe_alloc_db = info->wqe_alloc_db;
+ qp->qp_id = info->qp_id;
+ qp->sq_size = info->sq_size;
+ qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
+ sq_ring_size = qp->sq_size << info->sq_shift;
+ IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
+ IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
+ if (info->first_sq_wq) {
+ irdma_setup_connection_wqes(qp, info);
+ qp->swqe_polarity = 1;
+ qp->first_sq_wq = true;
+ } else {
+ qp->swqe_polarity = 0;
+ }
+ qp->swqe_polarity_deferred = 1;
+ qp->rwqe_polarity = 0;
+ qp->rq_size = info->rq_size;
+ qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
+ qp->max_inline_data = info->max_inline_data;
+ qp->rq_wqe_size = info->rq_shift;
+ IRDMA_RING_INIT(qp->rq_ring, qp->rq_size);
+ qp->rq_wqe_size_multiplier = 1 << info->rq_shift;
+ if (qp->uk_attrs->hw_rev == IRDMA_GEN_1)
+ qp->wqe_ops = iw_wqe_uk_ops_gen_1;
+ else
+ qp->wqe_ops = iw_wqe_uk_ops;
+ return ret_code;
+}
+
+/**
+ * irdma_uk_cq_init - initialize shared cq (user and kernel)
+ * @cq: hw cq
+ * @info: hw cq initialization info
+ */
+void irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info)
+{
+ cq->cq_base = info->cq_base;
+ cq->cq_id = info->cq_id;
+ cq->cq_size = info->cq_size;
+ cq->cqe_alloc_db = info->cqe_alloc_db;
+ cq->cq_ack_db = info->cq_ack_db;
+ cq->shadow_area = info->shadow_area;
+ cq->avoid_mem_cflct = info->avoid_mem_cflct;
+ IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
+ cq->polarity = 1;
+}
+
+/**
+ * irdma_uk_clean_cq - clean cq entries
+ * @q: completion context
+ * @cq: cq to clean
+ */
+void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq)
+{
+ __le64 *cqe;
+ u64 qword3, comp_ctx;
+ u32 cq_head;
+ u8 polarity, temp;
+
+ cq_head = cq->cq_ring.head;
+ temp = cq->polarity;
+ do {
+ if (cq->avoid_mem_cflct)
+ cqe = ((struct irdma_extended_cqe *)(cq->cq_base))[cq_head].buf;
+ else
+ cqe = cq->cq_base[cq_head].buf;
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ if (polarity != temp)
+ break;
+
+ /* Ensure CQE contents are read after valid bit is checked */
+ dma_rmb();
+
+ get_64bit_val(cqe, 8, &comp_ctx);
+ if ((void *)(unsigned long)comp_ctx == q)
+ set_64bit_val(cqe, 8, 0);
+
+ cq_head = (cq_head + 1) % cq->cq_ring.size;
+ if (!cq_head)
+ temp ^= 1;
+ } while (true);
+}
+
+/**
+ * irdma_nop - post a nop
+ * @qp: hw qp ptr
+ * @wr_id: work request id
+ * @signaled: signaled for completion
+ * @post_sq: ring doorbell
+ */
+int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq)
+{
+ __le64 *wqe;
+ u64 hdr;
+ u32 wqe_idx;
+ struct irdma_post_sq_info info = {};
+
+ info.wr_id = wr_id;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
+ 0, &info);
+ if (!wqe)
+ return -ENOMEM;
+
+ irdma_clr_wqes(qp, wqe_idx);
+
+ set_64bit_val(wqe, 0, 0);
+ set_64bit_val(wqe, 8, 0);
+ set_64bit_val(wqe, 16, 0);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_fragcnt_to_quanta_sq - calculate quanta based on fragment count for SQ
+ * @frag_cnt: number of fragments
+ * @quanta: quanta for frag_cnt
+ */
+int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *quanta = IRDMA_QP_WQE_MIN_QUANTA;
+ break;
+ case 2:
+ case 3:
+ *quanta = 2;
+ break;
+ case 4:
+ case 5:
+ *quanta = 3;
+ break;
+ case 6:
+ case 7:
+ *quanta = 4;
+ break;
+ case 8:
+ case 9:
+ *quanta = 5;
+ break;
+ case 10:
+ case 11:
+ *quanta = 6;
+ break;
+ case 12:
+ case 13:
+ *quanta = 7;
+ break;
+ case 14:
+ case 15: /* when immediate data is present */
+ *quanta = 8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
+ * @frag_cnt: number of fragments
+ * @wqe_size: size in bytes given frag_cnt
+ */
+int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size)
+{
+ switch (frag_cnt) {
+ case 0:
+ case 1:
+ *wqe_size = 32;
+ break;
+ case 2:
+ case 3:
+ *wqe_size = 64;
+ break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ *wqe_size = 128;
+ break;
+ case 8:
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ *wqe_size = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
new file mode 100644
index 0000000000..36feca57b2
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -0,0 +1,413 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_USER_H
+#define IRDMA_USER_H
+
+#define irdma_handle void *
+#define irdma_adapter_handle irdma_handle
+#define irdma_qp_handle irdma_handle
+#define irdma_cq_handle irdma_handle
+#define irdma_pd_id irdma_handle
+#define irdma_stag_handle irdma_handle
+#define irdma_stag_index u32
+#define irdma_stag u32
+#define irdma_stag_key u8
+#define irdma_tagged_offset u64
+#define irdma_access_privileges u32
+#define irdma_physical_fragment u64
+#define irdma_address_list u64 *
+
+#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
+
+#define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01
+#define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02
+#define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04
+#define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05
+#define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08
+#define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a
+#define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10
+#define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20
+#define IRDMA_ACCESS_FLAGS_ALL 0x3f
+
+#define IRDMA_OP_TYPE_RDMA_WRITE 0x00
+#define IRDMA_OP_TYPE_RDMA_READ 0x01
+#define IRDMA_OP_TYPE_SEND 0x03
+#define IRDMA_OP_TYPE_SEND_INV 0x04
+#define IRDMA_OP_TYPE_SEND_SOL 0x05
+#define IRDMA_OP_TYPE_SEND_SOL_INV 0x06
+#define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d
+#define IRDMA_OP_TYPE_BIND_MW 0x08
+#define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09
+#define IRDMA_OP_TYPE_INV_STAG 0x0a
+#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
+#define IRDMA_OP_TYPE_NOP 0x0c
+#define IRDMA_OP_TYPE_REC 0x3e
+#define IRDMA_OP_TYPE_REC_IMM 0x3f
+
+#define IRDMA_FLUSH_MAJOR_ERR 1
+
+enum irdma_device_caps_const {
+ IRDMA_WQE_SIZE = 4,
+ IRDMA_CQP_WQE_SIZE = 8,
+ IRDMA_CQE_SIZE = 4,
+ IRDMA_EXTENDED_CQE_SIZE = 8,
+ IRDMA_AEQE_SIZE = 2,
+ IRDMA_CEQE_SIZE = 1,
+ IRDMA_CQP_CTX_SIZE = 8,
+ IRDMA_SHADOW_AREA_SIZE = 8,
+ IRDMA_QUERY_FPM_BUF_SIZE = 176,
+ IRDMA_COMMIT_FPM_BUF_SIZE = 176,
+ IRDMA_GATHER_STATS_BUF_SIZE = 1024,
+ IRDMA_MIN_IW_QP_ID = 0,
+ IRDMA_MAX_IW_QP_ID = 262143,
+ IRDMA_MIN_CEQID = 0,
+ IRDMA_MAX_CEQID = 1023,
+ IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
+ IRDMA_MIN_CQID = 0,
+ IRDMA_MAX_CQID = 524287,
+ IRDMA_MIN_AEQ_ENTRIES = 1,
+ IRDMA_MAX_AEQ_ENTRIES = 524287,
+ IRDMA_MIN_CEQ_ENTRIES = 1,
+ IRDMA_MAX_CEQ_ENTRIES = 262143,
+ IRDMA_MIN_CQ_SIZE = 1,
+ IRDMA_MAX_CQ_SIZE = 1048575,
+ IRDMA_DB_ID_ZERO = 0,
+ IRDMA_MAX_WQ_FRAGMENT_COUNT = 13,
+ IRDMA_MAX_SGE_RD = 13,
+ IRDMA_MAX_OUTBOUND_MSG_SIZE = 2147483647,
+ IRDMA_MAX_INBOUND_MSG_SIZE = 2147483647,
+ IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
+ IRDMA_MAX_PE_ENA_VF_COUNT = 32,
+ IRDMA_MAX_VF_FPM_ID = 47,
+ IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
+ IRDMA_MAX_INLINE_DATA_SIZE = 101,
+ IRDMA_MAX_WQ_ENTRIES = 32768,
+ IRDMA_Q2_BUF_SIZE = 256,
+ IRDMA_QP_CTX_SIZE = 256,
+ IRDMA_MAX_PDS = 262144,
+ IRDMA_MIN_WQ_SIZE_GEN2 = 8,
+};
+
+enum irdma_addressing_type {
+ IRDMA_ADDR_TYPE_ZERO_BASED = 0,
+ IRDMA_ADDR_TYPE_VA_BASED = 1,
+};
+
+enum irdma_flush_opcode {
+ FLUSH_INVALID = 0,
+ FLUSH_GENERAL_ERR,
+ FLUSH_PROT_ERR,
+ FLUSH_REM_ACCESS_ERR,
+ FLUSH_LOC_QP_OP_ERR,
+ FLUSH_REM_OP_ERR,
+ FLUSH_LOC_LEN_ERR,
+ FLUSH_FATAL_ERR,
+ FLUSH_RETRY_EXC_ERR,
+ FLUSH_MW_BIND_ERR,
+ FLUSH_REM_INV_REQ_ERR,
+};
+
+enum irdma_cmpl_status {
+ IRDMA_COMPL_STATUS_SUCCESS = 0,
+ IRDMA_COMPL_STATUS_FLUSHED,
+ IRDMA_COMPL_STATUS_INVALID_WQE,
+ IRDMA_COMPL_STATUS_QP_CATASTROPHIC,
+ IRDMA_COMPL_STATUS_REMOTE_TERMINATION,
+ IRDMA_COMPL_STATUS_INVALID_STAG,
+ IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION,
+ IRDMA_COMPL_STATUS_ACCESS_VIOLATION,
+ IRDMA_COMPL_STATUS_INVALID_PD_ID,
+ IRDMA_COMPL_STATUS_WRAP_ERROR,
+ IRDMA_COMPL_STATUS_STAG_INVALID_PDID,
+ IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD,
+ IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED,
+ IRDMA_COMPL_STATUS_STAG_NOT_INVALID,
+ IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE,
+ IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY,
+ IRDMA_COMPL_STATUS_INVALID_FBO,
+ IRDMA_COMPL_STATUS_INVALID_LEN,
+ IRDMA_COMPL_STATUS_INVALID_ACCESS,
+ IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG,
+ IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS,
+ IRDMA_COMPL_STATUS_INVALID_REGION,
+ IRDMA_COMPL_STATUS_INVALID_WINDOW,
+ IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN,
+ IRDMA_COMPL_STATUS_UNKNOWN,
+};
+
+enum irdma_cmpl_notify {
+ IRDMA_CQ_COMPL_EVENT = 0,
+ IRDMA_CQ_COMPL_SOLICITED = 1,
+};
+
+enum irdma_qp_caps {
+ IRDMA_WRITE_WITH_IMM = 1,
+ IRDMA_SEND_WITH_IMM = 2,
+ IRDMA_ROCE = 4,
+ IRDMA_PUSH_MODE = 8,
+};
+
+struct irdma_qp_uk;
+struct irdma_cq_uk;
+struct irdma_qp_uk_init_info;
+struct irdma_cq_uk_init_info;
+
+struct irdma_ring {
+ u32 head;
+ u32 tail;
+ u32 size;
+};
+
+struct irdma_cqe {
+ __le64 buf[IRDMA_CQE_SIZE];
+};
+
+struct irdma_extended_cqe {
+ __le64 buf[IRDMA_EXTENDED_CQE_SIZE];
+};
+
+struct irdma_post_send {
+ struct ib_sge *sg_list;
+ u32 num_sges;
+ u32 qkey;
+ u32 dest_qp;
+ u32 ah_id;
+};
+
+struct irdma_post_rq_info {
+ u64 wr_id;
+ struct ib_sge *sg_list;
+ u32 num_sges;
+};
+
+struct irdma_rdma_write {
+ struct ib_sge *lo_sg_list;
+ u32 num_lo_sges;
+ struct ib_sge rem_addr;
+};
+
+struct irdma_rdma_read {
+ struct ib_sge *lo_sg_list;
+ u32 num_lo_sges;
+ struct ib_sge rem_addr;
+};
+
+struct irdma_bind_window {
+ irdma_stag mr_stag;
+ u64 bind_len;
+ void *va;
+ enum irdma_addressing_type addressing_type;
+ bool ena_reads:1;
+ bool ena_writes:1;
+ irdma_stag mw_stag;
+ bool mem_window_type_1:1;
+};
+
+struct irdma_inv_local_stag {
+ irdma_stag target_stag;
+};
+
+struct irdma_post_sq_info {
+ u64 wr_id;
+ u8 op_type;
+ u8 l4len;
+ bool signaled:1;
+ bool read_fence:1;
+ bool local_fence:1;
+ bool inline_data:1;
+ bool imm_data_valid:1;
+ bool report_rtt:1;
+ bool udp_hdr:1;
+ bool defer_flag:1;
+ u32 imm_data;
+ u32 stag_to_inv;
+ union {
+ struct irdma_post_send send;
+ struct irdma_rdma_write rdma_write;
+ struct irdma_rdma_read rdma_read;
+ struct irdma_bind_window bind_window;
+ struct irdma_inv_local_stag inv_local_stag;
+ } op;
+};
+
+struct irdma_cq_poll_info {
+ u64 wr_id;
+ irdma_qp_handle qp_handle;
+ u32 bytes_xfered;
+ u32 tcp_seq_num_rtt;
+ u32 qp_id;
+ u32 ud_src_qpn;
+ u32 imm_data;
+ irdma_stag inv_stag; /* or L_R_Key */
+ enum irdma_cmpl_status comp_status;
+ u16 major_err;
+ u16 minor_err;
+ u16 ud_vlan;
+ u8 ud_smac[6];
+ u8 op_type;
+ u8 q_type;
+ bool stag_invalid_set:1; /* or L_R_Key set */
+ bool error:1;
+ bool solicited_event:1;
+ bool ipv4:1;
+ bool ud_vlan_valid:1;
+ bool ud_smac_valid:1;
+ bool imm_valid:1;
+};
+
+int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_inline_send(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
+ bool post_sq);
+int irdma_uk_post_receive(struct irdma_qp_uk *qp,
+ struct irdma_post_rq_info *info);
+void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
+int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool inv_stag, bool post_sq);
+int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq);
+int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
+ bool post_sq);
+int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info,
+ bool post_sq);
+
+struct irdma_wqe_uk_ops {
+ void (*iw_copy_inline_data)(u8 *dest, struct ib_sge *sge_list,
+ u32 num_sges, u8 polarity);
+ u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
+ void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
+ u8 valid);
+ void (*iw_set_mw_bind_wqe)(__le64 *wqe,
+ struct irdma_bind_window *op_info);
+};
+
+int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
+ struct irdma_cq_poll_info *info);
+void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
+ enum irdma_cmpl_notify cq_notify);
+void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
+void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
+void irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info);
+int irdma_uk_qp_init(struct irdma_qp_uk *qp,
+ struct irdma_qp_uk_init_info *info);
+void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
+ u8 *rq_shift);
+int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *sq_depth, u8 *sq_shift);
+int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
+ u32 *rq_depth, u8 *rq_shift);
+struct irdma_sq_uk_wr_trk_info {
+ u64 wrid;
+ u32 wr_len;
+ u16 quanta;
+ u8 reserved[2];
+};
+
+struct irdma_qp_quanta {
+ __le64 elem[IRDMA_WQE_SIZE];
+};
+
+struct irdma_qp_uk {
+ struct irdma_qp_quanta *sq_base;
+ struct irdma_qp_quanta *rq_base;
+ struct irdma_uk_attrs *uk_attrs;
+ u32 __iomem *wqe_alloc_db;
+ struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ __le64 *shadow_area;
+ struct irdma_ring sq_ring;
+ struct irdma_ring rq_ring;
+ struct irdma_ring initial_ring;
+ u32 qp_id;
+ u32 qp_caps;
+ u32 sq_size;
+ u32 rq_size;
+ u32 max_sq_frag_cnt;
+ u32 max_rq_frag_cnt;
+ u32 max_inline_data;
+ struct irdma_wqe_uk_ops wqe_ops;
+ u16 conn_wqes;
+ u8 qp_type;
+ u8 swqe_polarity;
+ u8 swqe_polarity_deferred;
+ u8 rwqe_polarity;
+ u8 rq_wqe_size;
+ u8 rq_wqe_size_multiplier;
+ bool deferred_flag:1;
+ bool first_sq_wq:1;
+ bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
+ bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
+ bool destroy_pending:1; /* Indicates the QP is being destroyed */
+ void *back_qp;
+ u8 dbg_rq_flushed;
+ u8 sq_flush_seen;
+ u8 rq_flush_seen;
+};
+
+struct irdma_cq_uk {
+ struct irdma_cqe *cq_base;
+ u32 __iomem *cqe_alloc_db;
+ u32 __iomem *cq_ack_db;
+ __le64 *shadow_area;
+ u32 cq_id;
+ u32 cq_size;
+ struct irdma_ring cq_ring;
+ u8 polarity;
+ bool avoid_mem_cflct:1;
+};
+
+struct irdma_qp_uk_init_info {
+ struct irdma_qp_quanta *sq;
+ struct irdma_qp_quanta *rq;
+ struct irdma_uk_attrs *uk_attrs;
+ u32 __iomem *wqe_alloc_db;
+ __le64 *shadow_area;
+ struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array;
+ u64 *rq_wrid_array;
+ u32 qp_id;
+ u32 qp_caps;
+ u32 sq_size;
+ u32 rq_size;
+ u32 max_sq_frag_cnt;
+ u32 max_rq_frag_cnt;
+ u32 max_inline_data;
+ u32 sq_depth;
+ u32 rq_depth;
+ u8 first_sq_wq;
+ u8 type;
+ u8 sq_shift;
+ u8 rq_shift;
+ int abi_ver;
+ bool legacy_mode;
+};
+
+struct irdma_cq_uk_init_info {
+ u32 __iomem *cqe_alloc_db;
+ u32 __iomem *cq_ack_db;
+ struct irdma_cqe *cq_base;
+ __le64 *shadow_area;
+ u32 cq_size;
+ u32 cq_id;
+ bool avoid_mem_cflct;
+};
+
+__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
+ u16 quanta, u32 total_size,
+ struct irdma_post_sq_info *info);
+__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
+void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
+int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
+int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
+int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
+void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
+ u32 inline_data, u8 *shift);
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
+ u32 *wqdepth);
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
+ u32 *wqdepth);
+void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
+#endif /* IRDMA_USER_H */
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
new file mode 100644
index 0000000000..6cd5cb85da
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -0,0 +1,2545 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+
+/**
+ * irdma_arp_table -manage arp table
+ * @rf: RDMA PCI function
+ * @ip_addr: ip address for device
+ * @ipv4: IPv4 flag
+ * @mac_addr: mac address ptr
+ * @action: modify, delete or add
+ */
+int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
+ const u8 *mac_addr, u32 action)
+{
+ unsigned long flags;
+ int arp_index;
+ u32 ip[4] = {};
+
+ if (ipv4)
+ ip[0] = *ip_addr;
+ else
+ memcpy(ip, ip_addr, sizeof(ip));
+
+ spin_lock_irqsave(&rf->arp_lock, flags);
+ for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
+ if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
+ break;
+ }
+
+ switch (action) {
+ case IRDMA_ARP_ADD:
+ if (arp_index != rf->arp_table_size) {
+ arp_index = -1;
+ break;
+ }
+
+ arp_index = 0;
+ if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
+ (u32 *)&arp_index, &rf->next_arp_index)) {
+ arp_index = -1;
+ break;
+ }
+
+ memcpy(rf->arp_table[arp_index].ip_addr, ip,
+ sizeof(rf->arp_table[arp_index].ip_addr));
+ ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
+ break;
+ case IRDMA_ARP_RESOLVE:
+ if (arp_index == rf->arp_table_size)
+ arp_index = -1;
+ break;
+ case IRDMA_ARP_DELETE:
+ if (arp_index == rf->arp_table_size) {
+ arp_index = -1;
+ break;
+ }
+
+ memset(rf->arp_table[arp_index].ip_addr, 0,
+ sizeof(rf->arp_table[arp_index].ip_addr));
+ eth_zero_addr(rf->arp_table[arp_index].mac_addr);
+ irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
+ break;
+ default:
+ arp_index = -1;
+ break;
+ }
+
+ spin_unlock_irqrestore(&rf->arp_lock, flags);
+ return arp_index;
+}
+
+/**
+ * irdma_add_arp - add a new arp entry if needed
+ * @rf: RDMA function
+ * @ip: IP address
+ * @ipv4: IPv4 flag
+ * @mac: MAC address
+ */
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac)
+{
+ int arpidx;
+
+ arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE);
+ if (arpidx >= 0) {
+ if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
+ return arpidx;
+
+ irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
+ ipv4, IRDMA_ARP_DELETE);
+ }
+
+ irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD);
+
+ return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE);
+}
+
+/**
+ * wr32 - write 32 bits to hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ * @val: value to write to register
+ */
+inline void wr32(struct irdma_hw *hw, u32 reg, u32 val)
+{
+ writel(val, hw->hw_addr + reg);
+}
+
+/**
+ * rd32 - read a 32 bit hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ *
+ * Return value of register content
+ */
+inline u32 rd32(struct irdma_hw *hw, u32 reg)
+{
+ return readl(hw->hw_addr + reg);
+}
+
+/**
+ * rd64 - read a 64 bit hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ *
+ * Return value of register content
+ */
+inline u64 rd64(struct irdma_hw *hw, u32 reg)
+{
+ return readq(hw->hw_addr + reg);
+}
+
+static void irdma_gid_change_event(struct ib_device *ibdev)
+{
+ struct ib_event ib_event;
+
+ ib_event.event = IB_EVENT_GID_CHANGE;
+ ib_event.device = ibdev;
+ ib_event.element.port_num = 1;
+ ib_dispatch_event(&ib_event);
+}
+
+/**
+ * irdma_inetaddr_event - system notifier for ipv4 addr events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct in_ifaddr *ifa = ptr;
+ struct net_device *real_dev, *netdev = ifa->ifa_dev->dev;
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ u32 local_ipaddr;
+
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+
+ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+ local_ipaddr = ntohl(ifa->ifa_address);
+ ibdev_dbg(&iwdev->ibdev,
+ "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", real_dev,
+ event, &local_ipaddr, real_dev->dev_addr);
+ switch (event) {
+ case NETDEV_DOWN:
+ irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
+ &local_ipaddr, true, IRDMA_ARP_DELETE);
+ irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, false);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGEADDR:
+ irdma_add_arp(iwdev->rf, &local_ipaddr, true, real_dev->dev_addr);
+ irdma_if_notify(iwdev, real_dev, &local_ipaddr, true, true);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ default:
+ break;
+ }
+
+ ib_device_put(ibdev);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_inet6addr_event - system notifier for ipv6 addr events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct inet6_ifaddr *ifa = ptr;
+ struct net_device *real_dev, *netdev = ifa->idev->dev;
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ u32 local_ipaddr6[4];
+
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+
+ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+ irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+ ibdev_dbg(&iwdev->ibdev,
+ "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", real_dev,
+ event, local_ipaddr6, real_dev->dev_addr);
+ switch (event) {
+ case NETDEV_DOWN:
+ irdma_manage_arp_cache(iwdev->rf, real_dev->dev_addr,
+ local_ipaddr6, false, IRDMA_ARP_DELETE);
+ irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, false);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGEADDR:
+ irdma_add_arp(iwdev->rf, local_ipaddr6, false,
+ real_dev->dev_addr);
+ irdma_if_notify(iwdev, real_dev, local_ipaddr6, false, true);
+ irdma_gid_change_event(&iwdev->ibdev);
+ break;
+ default:
+ break;
+ }
+
+ ib_device_put(ibdev);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_net_event - system notifier for net events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: neighbor
+ */
+int irdma_net_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct neighbour *neigh = ptr;
+ struct net_device *real_dev, *netdev = (struct net_device *)neigh->dev;
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ __be32 *p;
+ u32 local_ipaddr[4] = {};
+ bool ipv4 = true;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+ ibdev = ib_device_get_by_netdev(real_dev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+ p = (__be32 *)neigh->primary_key;
+ if (neigh->tbl->family == AF_INET6) {
+ ipv4 = false;
+ irdma_copy_ip_ntohl(local_ipaddr, p);
+ } else {
+ local_ipaddr[0] = ntohl(*p);
+ }
+
+ ibdev_dbg(&iwdev->ibdev,
+ "DEV: netdev %p state %d local_ip=%pI4 MAC=%pM\n",
+ iwdev->netdev, neigh->nud_state, local_ipaddr,
+ neigh->ha);
+
+ if (neigh->nud_state & NUD_VALID)
+ irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha);
+
+ else
+ irdma_manage_arp_cache(iwdev->rf, neigh->ha,
+ local_ipaddr, ipv4,
+ IRDMA_ARP_DELETE);
+ ib_device_put(ibdev);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_netdevice_event - system notifier for netdev events
+ * @notifier: not used
+ * @event: event for notifier
+ * @ptr: netdev
+ */
+int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
+ void *ptr)
+{
+ struct irdma_device *iwdev;
+ struct ib_device *ibdev;
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
+ if (!ibdev)
+ return NOTIFY_DONE;
+
+ iwdev = to_iwdev(ibdev);
+ iwdev->iw_status = 1;
+ switch (event) {
+ case NETDEV_DOWN:
+ iwdev->iw_status = 0;
+ fallthrough;
+ case NETDEV_UP:
+ irdma_port_ibevent(iwdev);
+ break;
+ default:
+ break;
+ }
+ ib_device_put(ibdev);
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
+ * @iwdev: irdma device
+ */
+static void irdma_add_ipv6_addr(struct irdma_device *iwdev)
+{
+ struct net_device *ip_dev;
+ struct inet6_dev *idev;
+ struct inet6_ifaddr *ifp, *tmp;
+ u32 local_ipaddr6[4];
+
+ rcu_read_lock();
+ for_each_netdev_rcu (&init_net, ip_dev) {
+ if (((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF &&
+ rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev) ||
+ ip_dev == iwdev->netdev) &&
+ (READ_ONCE(ip_dev->flags) & IFF_UP)) {
+ idev = __in6_dev_get(ip_dev);
+ if (!idev) {
+ ibdev_err(&iwdev->ibdev, "ipv6 inet device not found\n");
+ break;
+ }
+ list_for_each_entry_safe (ifp, tmp, &idev->addr_list,
+ if_list) {
+ ibdev_dbg(&iwdev->ibdev,
+ "INIT: IP=%pI6, vlan_id=%d, MAC=%pM\n",
+ &ifp->addr,
+ rdma_vlan_dev_vlan_id(ip_dev),
+ ip_dev->dev_addr);
+
+ irdma_copy_ip_ntohl(local_ipaddr6,
+ ifp->addr.in6_u.u6_addr32);
+ irdma_manage_arp_cache(iwdev->rf,
+ ip_dev->dev_addr,
+ local_ipaddr6, false,
+ IRDMA_ARP_ADD);
+ }
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
+ * @iwdev: irdma device
+ */
+static void irdma_add_ipv4_addr(struct irdma_device *iwdev)
+{
+ struct net_device *dev;
+ struct in_device *idev;
+ u32 ip_addr;
+
+ rcu_read_lock();
+ for_each_netdev_rcu (&init_net, dev) {
+ if (((rdma_vlan_dev_vlan_id(dev) < 0xFFFF &&
+ rdma_vlan_dev_real_dev(dev) == iwdev->netdev) ||
+ dev == iwdev->netdev) && (READ_ONCE(dev->flags) & IFF_UP)) {
+ const struct in_ifaddr *ifa;
+
+ idev = __in_dev_get_rcu(dev);
+ if (!idev)
+ continue;
+
+ in_dev_for_each_ifa_rcu(ifa, idev) {
+ ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI4, vlan_id=%d, MAC=%pM\n",
+ &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev),
+ dev->dev_addr);
+
+ ip_addr = ntohl(ifa->ifa_address);
+ irdma_manage_arp_cache(iwdev->rf, dev->dev_addr,
+ &ip_addr, true,
+ IRDMA_ARP_ADD);
+ }
+ }
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * irdma_add_ip - add ip addresses
+ * @iwdev: irdma device
+ *
+ * Add ipv4/ipv6 addresses to the arp cache
+ */
+void irdma_add_ip(struct irdma_device *iwdev)
+{
+ irdma_add_ipv4_addr(iwdev);
+ irdma_add_ipv6_addr(iwdev);
+}
+
+/**
+ * irdma_alloc_and_get_cqp_request - get cqp struct
+ * @cqp: device cqp ptr
+ * @wait: cqp to be used in wait mode
+ */
+struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
+ bool wait)
+{
+ struct irdma_cqp_request *cqp_request = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cqp->req_lock, flags);
+ if (!list_empty(&cqp->cqp_avail_reqs)) {
+ cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
+ struct irdma_cqp_request, list);
+ list_del_init(&cqp_request->list);
+ }
+ spin_unlock_irqrestore(&cqp->req_lock, flags);
+ if (!cqp_request) {
+ cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
+ if (cqp_request) {
+ cqp_request->dynamic = true;
+ if (wait)
+ init_waitqueue_head(&cqp_request->waitq);
+ }
+ }
+ if (!cqp_request) {
+ ibdev_dbg(to_ibdev(cqp->sc_cqp.dev), "ERR: CQP Request Fail: No Memory");
+ return NULL;
+ }
+
+ cqp_request->waiting = wait;
+ refcount_set(&cqp_request->refcnt, 1);
+ memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
+
+ return cqp_request;
+}
+
+/**
+ * irdma_get_cqp_request - increase refcount for cqp_request
+ * @cqp_request: pointer to cqp_request instance
+ */
+static inline void irdma_get_cqp_request(struct irdma_cqp_request *cqp_request)
+{
+ refcount_inc(&cqp_request->refcnt);
+}
+
+/**
+ * irdma_free_cqp_request - free cqp request
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void irdma_free_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ unsigned long flags;
+
+ if (cqp_request->dynamic) {
+ kfree(cqp_request);
+ } else {
+ WRITE_ONCE(cqp_request->request_done, false);
+ cqp_request->callback_fcn = NULL;
+ cqp_request->waiting = false;
+
+ spin_lock_irqsave(&cqp->req_lock, flags);
+ list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
+ spin_unlock_irqrestore(&cqp->req_lock, flags);
+ }
+ wake_up(&cqp->remove_wq);
+}
+
+/**
+ * irdma_put_cqp_request - dec ref count and free if 0
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void irdma_put_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ if (refcount_dec_and_test(&cqp_request->refcnt))
+ irdma_free_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * irdma_free_pending_cqp_request -free pending cqp request objs
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+static void
+irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ if (cqp_request->waiting) {
+ cqp_request->compl_info.error = true;
+ WRITE_ONCE(cqp_request->request_done, true);
+ wake_up(&cqp_request->waitq);
+ }
+ wait_event_timeout(cqp->remove_wq,
+ refcount_read(&cqp_request->refcnt) == 1, 1000);
+ irdma_put_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * irdma_cleanup_pending_cqp_op - clean-up cqp with no
+ * completions
+ * @rf: RDMA PCI function
+ */
+void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cqp *cqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request = NULL;
+ struct cqp_cmds_info *pcmdinfo = NULL;
+ u32 i, pending_work, wqe_idx;
+
+ pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
+ wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
+ for (i = 0; i < pending_work; i++) {
+ cqp_request = (struct irdma_cqp_request *)(unsigned long)
+ cqp->scratch_array[wqe_idx];
+ if (cqp_request)
+ irdma_free_pending_cqp_request(cqp, cqp_request);
+ wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring);
+ }
+
+ while (!list_empty(&dev->cqp_cmd_head)) {
+ pcmdinfo = irdma_remove_cqp_head(dev);
+ cqp_request =
+ container_of(pcmdinfo, struct irdma_cqp_request, info);
+ if (cqp_request)
+ irdma_free_pending_cqp_request(cqp, cqp_request);
+ }
+}
+
+/**
+ * irdma_wait_event - wait for completion
+ * @rf: RDMA PCI function
+ * @cqp_request: cqp request to wait
+ */
+static int irdma_wait_event(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_cqp_timeout cqp_timeout = {};
+ bool cqp_error = false;
+ int err_code = 0;
+
+ cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops);
+ do {
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+ if (wait_event_timeout(cqp_request->waitq,
+ READ_ONCE(cqp_request->request_done),
+ msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
+ break;
+
+ irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
+
+ if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
+ continue;
+
+ if (!rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
+ return -ETIMEDOUT;
+ } while (1);
+
+ cqp_error = cqp_request->compl_info.error;
+ if (cqp_error) {
+ err_code = -EIO;
+ if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
+ if (cqp_request->compl_info.min_err_code == 0x8002)
+ err_code = -EBUSY;
+ else if (cqp_request->compl_info.min_err_code == 0x8029) {
+ if (!rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
+ }
+ }
+ }
+
+ return err_code;
+}
+
+static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
+ [IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd",
+ [IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd",
+ [IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
+ [IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
+ [IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
+ [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd",
+ [IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
+ [IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
+ [IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
+ [IRDMA_OP_CQ_CREATE] = "Create CQ Cmd",
+ [IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd",
+ [IRDMA_OP_QP_CREATE] = "Create QP Cmd",
+ [IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd",
+ [IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd",
+ [IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd",
+ [IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd",
+ [IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd",
+ [IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd",
+ [IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd",
+ [IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd",
+ [IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd",
+ [IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
+ [IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
+ [IRDMA_OP_RESUME] = "Resume QP Cmd",
+ [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd",
+ [IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
+ [IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
+ [IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
+ [IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd",
+ [IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd",
+ [IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd",
+ [IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd",
+ [IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd",
+ [IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd",
+ [IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd",
+ [IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd",
+ [IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
+ [IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
+ [IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
+ [IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
+ [IRDMA_OP_GEN_AE] = "Generate AE Cmd",
+ [IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
+ [IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd",
+ [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
+ [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
+ [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
+};
+
+static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
+ {0xffff, 0x8002, "Invalid State"},
+ {0xffff, 0x8006, "Flush No Wqe Pending"},
+ {0xffff, 0x8007, "Modify QP Bad Close"},
+ {0xffff, 0x8009, "LLP Closed"},
+ {0xffff, 0x800a, "Reset Not Sent"}
+};
+
+/**
+ * irdma_cqp_crit_err - check if CQP error is critical
+ * @dev: pointer to dev structure
+ * @cqp_cmd: code for last CQP operation
+ * @maj_err_code: major error code
+ * @min_err_code: minot error code
+ */
+bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
+ u16 maj_err_code, u16 min_err_code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) {
+ if (maj_err_code == irdma_noncrit_err_list[i].maj &&
+ min_err_code == irdma_noncrit_err_list[i].min) {
+ ibdev_dbg(to_ibdev(dev),
+ "CQP: [%s Error][%s] maj=0x%x min=0x%x\n",
+ irdma_noncrit_err_list[i].desc,
+ irdma_cqp_cmd_names[cqp_cmd], maj_err_code,
+ min_err_code);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * irdma_handle_cqp_op - process cqp command
+ * @rf: RDMA PCI function
+ * @cqp_request: cqp request to process
+ */
+int irdma_handle_cqp_op(struct irdma_pci_f *rf,
+ struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct cqp_cmds_info *info = &cqp_request->info;
+ int status;
+ bool put_cqp_request = true;
+
+ if (rf->reset)
+ return -EBUSY;
+
+ irdma_get_cqp_request(cqp_request);
+ status = irdma_process_cqp_cmd(dev, info);
+ if (status)
+ goto err;
+
+ if (cqp_request->waiting) {
+ put_cqp_request = false;
+ status = irdma_wait_event(rf, cqp_request);
+ if (status)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (irdma_cqp_crit_err(dev, info->cqp_cmd,
+ cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code))
+ ibdev_err(&rf->iwdev->ibdev,
+ "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
+ irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting,
+ cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code,
+ cqp_request->compl_info.min_err_code);
+
+ if (put_cqp_request)
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+void irdma_qp_add_ref(struct ib_qp *ibqp)
+{
+ struct irdma_qp *iwqp = (struct irdma_qp *)ibqp;
+
+ refcount_inc(&iwqp->refcnt);
+}
+
+void irdma_qp_rem_ref(struct ib_qp *ibqp)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ u32 qp_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
+ if (!refcount_dec_and_test(&iwqp->refcnt)) {
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ return;
+ }
+
+ qp_num = iwqp->ibqp.qp_num;
+ iwdev->rf->qp_table[qp_num] = NULL;
+ spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
+ complete(&iwqp->free_qp);
+}
+
+void irdma_cq_add_ref(struct ib_cq *ibcq)
+{
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+
+ refcount_inc(&iwcq->refcnt);
+}
+
+void irdma_cq_rem_ref(struct ib_cq *ibcq)
+{
+ struct ib_device *ibdev = ibcq->device;
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwdev->rf->cqtable_lock, flags);
+ if (!refcount_dec_and_test(&iwcq->refcnt)) {
+ spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
+ return;
+ }
+
+ iwdev->rf->cq_table[iwcq->cq_num] = NULL;
+ spin_unlock_irqrestore(&iwdev->rf->cqtable_lock, flags);
+ complete(&iwcq->free_cq);
+}
+
+struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
+{
+ return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
+}
+
+/**
+ * irdma_get_qp - get qp address
+ * @device: iwarp device
+ * @qpn: qp number
+ */
+struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
+{
+ struct irdma_device *iwdev = to_iwdev(device);
+
+ if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
+ return NULL;
+
+ return &iwdev->rf->qp_table[qpn]->ibqp;
+}
+
+/**
+ * irdma_remove_cqp_head - return head entry and remove
+ * @dev: device
+ */
+void *irdma_remove_cqp_head(struct irdma_sc_dev *dev)
+{
+ struct list_head *entry;
+ struct list_head *list = &dev->cqp_cmd_head;
+
+ if (list_empty(list))
+ return NULL;
+
+ entry = list->next;
+ list_del(entry);
+
+ return entry;
+}
+
+/**
+ * irdma_cqp_sds_cmd - create cqp command for sd
+ * @dev: hardware control device structure
+ * @sdinfo: information for sd cqp
+ *
+ */
+int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
+ struct irdma_update_sds_info *sdinfo)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
+ sizeof(cqp_info->in.u.update_pe_sds.info));
+ cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.update_pe_sds.dev = dev;
+ cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume
+ * @qp: hardware control qp
+ * @op: suspend or resume
+ */
+int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 op)
+{
+ struct irdma_sc_dev *dev = qp->dev;
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_sc_cqp *cqp = dev->cqp;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = op;
+ cqp_info->in.u.suspend_resume.cqp = cqp;
+ cqp_info->in.u.suspend_resume.qp = qp;
+ cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_term_modify_qp - modify qp for term message
+ * @qp: hardware control qp
+ * @next_state: qp's next state
+ * @term: terminate code
+ * @term_len: length
+ */
+void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
+ u8 term_len)
+{
+ struct irdma_qp *iwqp;
+
+ iwqp = qp->qp_uk.back_qp;
+ irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
+};
+
+/**
+ * irdma_terminate_done - after terminate is completed
+ * @qp: hardware control qp
+ * @timeout_occurred: indicates if terminate timer expired
+ */
+void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
+{
+ struct irdma_qp *iwqp;
+ u8 hte = 0;
+ bool first_time;
+ unsigned long flags;
+
+ iwqp = qp->qp_uk.back_qp;
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->hte_added) {
+ iwqp->hte_added = 0;
+ hte = 1;
+ }
+ first_time = !(qp->term_flags & IRDMA_TERM_DONE);
+ qp->term_flags |= IRDMA_TERM_DONE;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (first_time) {
+ if (!timeout_occurred)
+ irdma_terminate_del_timer(qp);
+
+ irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
+ irdma_cm_disconn(iwqp);
+ }
+}
+
+static void irdma_terminate_timeout(struct timer_list *t)
+{
+ struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+
+ irdma_terminate_done(qp, 1);
+ irdma_qp_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * irdma_terminate_start_timer - start terminate timeout
+ * @qp: hardware control qp
+ */
+void irdma_terminate_start_timer(struct irdma_sc_qp *qp)
+{
+ struct irdma_qp *iwqp;
+
+ iwqp = qp->qp_uk.back_qp;
+ irdma_qp_add_ref(&iwqp->ibqp);
+ timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
+ iwqp->terminate_timer.expires = jiffies + HZ;
+
+ add_timer(&iwqp->terminate_timer);
+}
+
+/**
+ * irdma_terminate_del_timer - delete terminate timeout
+ * @qp: hardware control qp
+ */
+void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
+{
+ struct irdma_qp *iwqp;
+ int ret;
+
+ iwqp = qp->qp_uk.back_qp;
+ ret = del_timer(&iwqp->terminate_timer);
+ if (ret)
+ irdma_qp_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
+ * @dev: function device struct
+ * @val_mem: buffer for fpm
+ * @hmc_fn_id: function id for fpm
+ */
+int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_request->param = NULL;
+ cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
+ cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
+ cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
+ cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
+ cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
+ * @dev: hardware control device structure
+ * @val_mem: buffer with fpm values
+ * @hmc_fn_id: function id for fpm
+ */
+int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
+ struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_request->param = NULL;
+ cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
+ cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
+ cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
+ cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
+ cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_cq_create_cmd - create a cq for the cqp
+ * @dev: device pointer
+ * @cq: pointer to created cq
+ */
+int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_create.cq = cq;
+ cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_qp_create_cmd - create a qp for the cqp
+ * @dev: device pointer
+ * @qp: pointer to created qp
+ */
+int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_create_qp_info *qp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ qp_info = &cqp_request->info.in.u.qp_create.info;
+ memset(qp_info, 0, sizeof(*qp_info));
+ qp_info->cq_num_valid = true;
+ qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
+ cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_create.qp = qp;
+ cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_dealloc_push_page - free a push page for qp
+ * @rf: RDMA PCI function
+ * @qp: hardware control qp
+ */
+static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
+ struct irdma_sc_qp *qp)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
+ return;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
+ cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
+ cqp_info->in.u.manage_push_page.info.free_page = 1;
+ cqp_info->in.u.manage_push_page.info.push_page_type = 0;
+ cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (!status)
+ qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_free_qp_rsrc - free up memory resources for qp
+ * @iwqp: qp ptr (user or kernel)
+ */
+void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ u32 qp_num = iwqp->ibqp.qp_num;
+
+ irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
+ irdma_dealloc_push_page(rf, &iwqp->sc_qp);
+ if (iwqp->sc_qp.vsi) {
+ irdma_qp_rem_qos(&iwqp->sc_qp);
+ iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
+ iwqp->sc_qp.user_pri);
+ }
+
+ if (qp_num > 2)
+ irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
+ iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
+ iwqp->q2_ctx_mem.va = NULL;
+ dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size,
+ iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
+ iwqp->kqp.dma_mem.va = NULL;
+ kfree(iwqp->kqp.sq_wrid_mem);
+ kfree(iwqp->kqp.rq_wrid_mem);
+}
+
+/**
+ * irdma_cq_wq_destroy - send cq destroy cqp
+ * @rf: RDMA PCI function
+ * @cq: hardware control cq
+ */
+void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_destroy.cq = cq;
+ cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait
+ * @cqp_request: modify QP completion
+ */
+static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
+{
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_qp *iwqp;
+
+ cqp_info = &cqp_request->info;
+ iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
+ atomic_dec(&iwqp->hw_mod_qp_pend);
+ wake_up(&iwqp->mod_qp_waitq);
+}
+
+/**
+ * irdma_hw_modify_qp - setup cqp for modify qp
+ * @iwdev: RDMA device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: info for modify qp
+ * @wait: flag to wait or not for modify qp completion
+ */
+int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
+ struct irdma_modify_qp_info *info, bool wait)
+{
+ int status;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_modify_qp_info *m_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ if (!wait) {
+ cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
+ atomic_inc(&iwqp->hw_mod_qp_pend);
+ }
+ cqp_info = &cqp_request->info;
+ m_info = &cqp_info->in.u.qp_modify.info;
+ memcpy(m_info, info, sizeof(*m_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (status) {
+ if (rdma_protocol_roce(&iwdev->ibdev, 1))
+ return status;
+
+ switch (m_info->next_iwarp_state) {
+ struct irdma_gen_ae_info ae_info;
+
+ case IRDMA_QP_STATE_RTS:
+ case IRDMA_QP_STATE_IDLE:
+ case IRDMA_QP_STATE_TERMINATE:
+ case IRDMA_QP_STATE_CLOSING:
+ if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE)
+ irdma_send_reset(iwqp->cm_node);
+ else
+ iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
+ if (!wait) {
+ ae_info.ae_code = IRDMA_AE_BAD_CLOSE;
+ ae_info.ae_src = 0;
+ irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
+ } else {
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
+ wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ m_info = &cqp_info->in.u.qp_modify.info;
+ memcpy(m_info, info, sizeof(*m_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
+ m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ m_info->reset_tcp_conn = true;
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ }
+ break;
+ case IRDMA_QP_STATE_ERROR:
+ default:
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * irdma_cqp_cq_destroy_cmd - destroy the cqp cq
+ * @dev: device pointer
+ * @cq: pointer to cq
+ */
+void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ irdma_cq_wq_destroy(rf, cq);
+}
+
+/**
+ * irdma_cqp_qp_destroy_cmd - destroy the cqp
+ * @dev: device pointer
+ * @qp: pointer to qp
+ */
+int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_destroy.qp = qp;
+ cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_ieq_mpa_crc_ae - generate AE for crc error
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ */
+void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
+{
+ struct irdma_gen_ae_info info = {};
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n");
+ info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
+ info.ae_src = IRDMA_AE_SOURCE_RQ;
+ irdma_gen_ae(rf, qp, &info, false);
+}
+
+/**
+ * irdma_init_hash_desc - initialize hash for crc calculation
+ * @desc: cryption type
+ */
+int irdma_init_hash_desc(struct shash_desc **desc)
+{
+ struct crypto_shash *tfm;
+ struct shash_desc *tdesc;
+
+ tfm = crypto_alloc_shash("crc32c", 0, 0);
+ if (IS_ERR(tfm))
+ return -EINVAL;
+
+ tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
+ GFP_KERNEL);
+ if (!tdesc) {
+ crypto_free_shash(tfm);
+ return -EINVAL;
+ }
+
+ tdesc->tfm = tfm;
+ *desc = tdesc;
+
+ return 0;
+}
+
+/**
+ * irdma_free_hash_desc - free hash desc
+ * @desc: to be freed
+ */
+void irdma_free_hash_desc(struct shash_desc *desc)
+{
+ if (desc) {
+ crypto_free_shash(desc->tfm);
+ kfree(desc);
+ }
+}
+
+/**
+ * irdma_ieq_check_mpacrc - check if mpa crc is OK
+ * @desc: desc for hash
+ * @addr: address of buffer for crc
+ * @len: length of buffer
+ * @val: value to be compared
+ */
+int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len,
+ u32 val)
+{
+ u32 crc = 0;
+ int ret;
+ int ret_code = 0;
+
+ crypto_shash_init(desc);
+ ret = crypto_shash_update(desc, addr, len);
+ if (!ret)
+ crypto_shash_final(desc, (u8 *)&crc);
+ if (crc != val)
+ ret_code = -EINVAL;
+
+ return ret_code;
+}
+
+/**
+ * irdma_ieq_get_qp - get qp based on quad in puda buffer
+ * @dev: hardware control device structure
+ * @buf: receive puda buffer on exception q
+ */
+struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
+ struct irdma_puda_buf *buf)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_cm_node *cm_node;
+ struct irdma_device *iwdev = buf->vsi->back_vsi;
+ u32 loc_addr[4] = {};
+ u32 rem_addr[4] = {};
+ u16 loc_port, rem_port;
+ struct ipv6hdr *ip6h;
+ struct iphdr *iph = (struct iphdr *)buf->iph;
+ struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
+
+ if (iph->version == 4) {
+ loc_addr[0] = ntohl(iph->daddr);
+ rem_addr[0] = ntohl(iph->saddr);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ irdma_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
+ }
+ loc_port = ntohs(tcph->dest);
+ rem_port = ntohs(tcph->source);
+ cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
+ loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF);
+ if (!cm_node)
+ return NULL;
+
+ iwqp = cm_node->iwqp;
+ irdma_rem_ref_cm_node(cm_node);
+
+ return &iwqp->sc_qp;
+}
+
+/**
+ * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs
+ * @qp: qp ptr
+ */
+void irdma_send_ieq_ack(struct irdma_sc_qp *qp)
+{
+ struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node;
+ struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf;
+ struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
+
+ cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum;
+ cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+
+ irdma_send_ack(cm_node);
+}
+
+/**
+ * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer
+ * @qp: qp pointer
+ * @ah_info: AH info pointer
+ */
+void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
+ struct irdma_ah_info *ah_info)
+{
+ struct irdma_puda_buf *buf = qp->pfpdu.ah_buf;
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+
+ memset(ah_info, 0, sizeof(*ah_info));
+ ah_info->do_lpbk = true;
+ ah_info->vlan_tag = buf->vlan_id;
+ ah_info->insert_vlan_tag = buf->vlan_valid;
+ ah_info->ipv4_valid = buf->ipv4;
+ ah_info->vsi = qp->vsi;
+
+ if (buf->smac_valid)
+ ether_addr_copy(ah_info->mac_addr, buf->smac);
+
+ if (buf->ipv4) {
+ ah_info->ipv4_valid = true;
+ iph = (struct iphdr *)buf->iph;
+ ah_info->hop_ttl = iph->ttl;
+ ah_info->tc_tos = iph->tos;
+ ah_info->dest_ip_addr[0] = ntohl(iph->daddr);
+ ah_info->src_ip_addr[0] = ntohl(iph->saddr);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ ah_info->hop_ttl = ip6h->hop_limit;
+ ah_info->tc_tos = ip6h->priority;
+ irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
+ ip6h->daddr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(ah_info->src_ip_addr,
+ ip6h->saddr.in6_u.u6_addr32);
+ }
+
+ ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev),
+ ah_info->dest_ip_addr,
+ ah_info->ipv4_valid,
+ NULL, IRDMA_ARP_RESOLVE);
+}
+
+/**
+ * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer
+ * @buf: puda to update
+ * @len: length of buffer
+ * @seqnum: seq number for tcp
+ */
+static void irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf,
+ u16 len, u32 seqnum)
+{
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ u16 iphlen;
+ u16 pktsize;
+ u8 *addr = buf->mem.va;
+
+ iphlen = (buf->ipv4) ? 20 : 40;
+ iph = (struct iphdr *)(addr + buf->maclen);
+ tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
+ pktsize = len + buf->tcphlen + iphlen;
+ iph->tot_len = htons(pktsize);
+ tcph->seq = htonl(seqnum);
+}
+
+/**
+ * irdma_ieq_update_tcpip_info - update tcpip in the buffer
+ * @buf: puda to update
+ * @len: length of buffer
+ * @seqnum: seq number for tcp
+ */
+void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
+ u32 seqnum)
+{
+ struct tcphdr *tcph;
+ u8 *addr;
+
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum);
+
+ addr = buf->mem.va;
+ tcph = (struct tcphdr *)addr;
+ tcph->seq = htonl(seqnum);
+}
+
+/**
+ * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda
+ * buffer
+ * @info: to get information
+ * @buf: puda buffer
+ */
+static int irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf)
+{
+ struct iphdr *iph;
+ struct ipv6hdr *ip6h;
+ struct tcphdr *tcph;
+ u16 iphlen;
+ u16 pkt_len;
+ u8 *mem = buf->mem.va;
+ struct ethhdr *ethh = buf->mem.va;
+
+ if (ethh->h_proto == htons(0x8100)) {
+ info->vlan_valid = true;
+ buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) &
+ VLAN_VID_MASK;
+ }
+
+ buf->maclen = (info->vlan_valid) ? 18 : 14;
+ iphlen = (info->l3proto) ? 40 : 20;
+ buf->ipv4 = (info->l3proto) ? false : true;
+ buf->iph = mem + buf->maclen;
+ iph = (struct iphdr *)buf->iph;
+ buf->tcph = buf->iph + iphlen;
+ tcph = (struct tcphdr *)buf->tcph;
+
+ if (buf->ipv4) {
+ pkt_len = ntohs(iph->tot_len);
+ } else {
+ ip6h = (struct ipv6hdr *)buf->iph;
+ pkt_len = ntohs(ip6h->payload_len) + iphlen;
+ }
+
+ buf->totallen = pkt_len + buf->maclen;
+
+ if (info->payload_len < buf->totallen) {
+ ibdev_dbg(to_ibdev(buf->vsi->dev),
+ "ERR: payload_len = 0x%x totallen expected0x%x\n",
+ info->payload_len, buf->totallen);
+ return -EINVAL;
+ }
+
+ buf->tcphlen = tcph->doff << 2;
+ buf->datalen = pkt_len - iphlen - buf->tcphlen;
+ buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
+ buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
+ buf->seqnum = ntohl(tcph->seq);
+
+ return 0;
+}
+
+/**
+ * irdma_puda_get_tcpip_info - get tcpip info from puda buffer
+ * @info: to get information
+ * @buf: puda buffer
+ */
+int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
+ struct irdma_puda_buf *buf)
+{
+ struct tcphdr *tcph;
+ u32 pkt_len;
+ u8 *mem;
+
+ if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
+ return irdma_gen1_puda_get_tcpip_info(info, buf);
+
+ mem = buf->mem.va;
+ buf->vlan_valid = info->vlan_valid;
+ if (info->vlan_valid)
+ buf->vlan_id = info->vlan;
+
+ buf->ipv4 = info->ipv4;
+ if (buf->ipv4)
+ buf->iph = mem + IRDMA_IPV4_PAD;
+ else
+ buf->iph = mem;
+
+ buf->tcph = mem + IRDMA_TCP_OFFSET;
+ tcph = (struct tcphdr *)buf->tcph;
+ pkt_len = info->payload_len;
+ buf->totallen = pkt_len;
+ buf->tcphlen = tcph->doff << 2;
+ buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen;
+ buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
+ buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen;
+ buf->seqnum = ntohl(tcph->seq);
+
+ if (info->smac_valid) {
+ ether_addr_copy(buf->smac, info->smac);
+ buf->smac_valid = true;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats
+ * @t: timer_list pointer
+ */
+static void irdma_hw_stats_timeout(struct timer_list *t)
+{
+ struct irdma_vsi_pestat *pf_devstat =
+ from_timer(pf_devstat, t, stats_timer);
+ struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
+
+ if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
+ else
+ irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat);
+
+ mod_timer(&pf_devstat->stats_timer,
+ jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * irdma_hw_stats_start_timer - Start periodic stats timer
+ * @vsi: vsi structure pointer
+ */
+void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_vsi_pestat *devstat = vsi->pestat;
+
+ timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0);
+ mod_timer(&devstat->stats_timer,
+ jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * irdma_hw_stats_stop_timer - Delete periodic stats timer
+ * @vsi: pointer to vsi structure
+ */
+void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
+{
+ struct irdma_vsi_pestat *devstat = vsi->pestat;
+
+ del_timer_sync(&devstat->stats_timer);
+}
+
+/**
+ * irdma_process_stats - Checking for wrap and update stats
+ * @pestat: stats structure pointer
+ */
+static inline void irdma_process_stats(struct irdma_vsi_pestat *pestat)
+{
+ sc_vsi_update_stats(pestat->vsi);
+}
+
+/**
+ * irdma_cqp_gather_stats_gen1 - Gather stats
+ * @dev: pointer to device structure
+ * @pestat: statistics structure
+ */
+void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat)
+{
+ struct irdma_gather_stats *gather_stats =
+ pestat->gather_info.gather_stats_va;
+ const struct irdma_hw_stat_map *map = dev->hw_stats_map;
+ u16 max_stats_idx = dev->hw_attrs.max_stat_idx;
+ u32 stats_inst_offset_32;
+ u32 stats_inst_offset_64;
+ u64 new_val;
+ u16 i;
+
+ stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ?
+ pestat->gather_info.stats_inst_index :
+ pestat->hw->hmc.hmc_fn_id;
+ stats_inst_offset_32 *= 4;
+ stats_inst_offset_64 = stats_inst_offset_32 * 2;
+
+ for (i = 0; i < max_stats_idx; i++) {
+ if (map[i].bitmask <= IRDMA_MAX_STATS_32)
+ new_val = rd32(dev->hw,
+ dev->hw_stats_regs[i] + stats_inst_offset_32);
+ else
+ new_val = rd64(dev->hw,
+ dev->hw_stats_regs[i] + stats_inst_offset_64);
+ gather_stats->val[map[i].byteoff / sizeof(u64)] = new_val;
+ }
+
+ irdma_process_stats(pestat);
+}
+
+/**
+ * irdma_process_cqp_stats - Checking for wrap and update stats
+ * @cqp_request: cqp_request structure pointer
+ */
+static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_vsi_pestat *pestat = cqp_request->param;
+
+ irdma_process_stats(pestat);
+}
+
+/**
+ * irdma_cqp_gather_stats_cmd - Gather stats
+ * @dev: pointer to device structure
+ * @pestat: pointer to stats info
+ * @wait: flag to wait or not wait for stats
+ */
+int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
+ struct irdma_vsi_pestat *pestat, bool wait)
+
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.stats_gather.info = pestat->gather_info;
+ cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
+ cqp_request->param = pestat;
+ if (!wait)
+ cqp_request->callback_fcn = irdma_process_cqp_stats;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (wait)
+ irdma_process_stats(pestat);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_stats_inst_cmd - Allocate/free stats instance
+ * @vsi: pointer to vsi structure
+ * @cmd: command to allocate or free
+ * @stats_info: pointer to allocate stats info
+ */
+int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
+ struct irdma_stats_inst_info *stats_info)
+{
+ struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+ bool wait = false;
+
+ if (cmd == IRDMA_OP_STATS_ALLOCATE)
+ wait = true;
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = cmd;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.stats_manage.info = *stats_info;
+ cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (wait)
+ stats_info->stats_idx = cqp_request->compl_info.op_ret_val;
+ irdma_put_cqp_request(iwcqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
+ * @dev: pointer to device info
+ * @sc_ceq: pointer to ceq structure
+ * @op: Create or Destroy
+ */
+int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
+ u8 op)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->post_sq = 1;
+ cqp_info->cqp_cmd = op;
+ cqp_info->in.u.ceq_create.ceq = sc_ceq;
+ cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_aeq_cmd - Create/Destroy AEQ
+ * @dev: pointer to device info
+ * @sc_aeq: pointer to aeq structure
+ * @op: Create or Destroy
+ */
+int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
+ u8 op)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->post_sq = 1;
+ cqp_info->cqp_cmd = op;
+ cqp_info->in.u.aeq_create.aeq = sc_aeq;
+ cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_cqp_ws_node_cmd - Add/modify/delete ws node
+ * @dev: pointer to device structure
+ * @cmd: Add, modify or delete
+ * @node_info: pointer to ws node info
+ */
+int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
+ struct irdma_ws_node_info *node_info)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ struct irdma_cqp *iwcqp = &rf->cqp;
+ struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+ bool poll;
+
+ if (!rf->sc_dev.ceq_valid)
+ poll = true;
+ else
+ poll = false;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ memset(cqp_info, 0, sizeof(*cqp_info));
+ cqp_info->cqp_cmd = cmd;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.ws_node.info = *node_info;
+ cqp_info->in.u.ws_node.cqp = cqp;
+ cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ if (status)
+ goto exit;
+
+ if (poll) {
+ struct irdma_ccq_cqe_info compl_info;
+
+ status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
+ &compl_info);
+ node_info->qs_handle = compl_info.op_ret_val;
+ ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n",
+ compl_info.op_code, compl_info.op_ret_val);
+ } else {
+ node_info->qs_handle = cqp_request->compl_info.op_ret_val;
+ }
+
+exit:
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_ah_cqp_op - perform an AH cqp operation
+ * @rf: RDMA PCI function
+ * @sc_ah: address handle
+ * @cmd: AH operation
+ * @wait: wait if true
+ * @callback_fcn: Callback function on CQP op completion
+ * @cb_param: parameter for callback function
+ *
+ * returns errno
+ */
+int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
+ bool wait,
+ void (*callback_fcn)(struct irdma_cqp_request *),
+ void *cb_param)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
+ return -EINVAL;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = cmd;
+ cqp_info->post_sq = 1;
+ if (cmd == IRDMA_OP_AH_CREATE) {
+ cqp_info->in.u.ah_create.info = sc_ah->ah_info;
+ cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
+ } else if (cmd == IRDMA_OP_AH_DESTROY) {
+ cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
+ cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
+ }
+
+ if (!wait) {
+ cqp_request->callback_fcn = callback_fcn;
+ cqp_request->param = cb_param;
+ }
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ if (status)
+ return -ENOMEM;
+
+ if (wait)
+ sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE);
+
+ return 0;
+}
+
+/**
+ * irdma_ieq_ah_cb - callback after creation of AH for IEQ
+ * @cqp_request: pointer to cqp_request of create AH
+ */
+static void irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_sc_qp *qp = cqp_request->param;
+ struct irdma_sc_ah *sc_ah = qp->pfpdu.ah;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->pfpdu.lock, flags);
+ if (!cqp_request->compl_info.op_ret_val) {
+ sc_ah->ah_info.ah_valid = true;
+ irdma_ieq_process_fpdus(qp, qp->vsi->ieq);
+ } else {
+ sc_ah->ah_info.ah_valid = false;
+ irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
+ }
+ spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
+}
+
+/**
+ * irdma_ilq_ah_cb - callback after creation of AH for ILQ
+ * @cqp_request: pointer to cqp_request of create AH
+ */
+static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_cm_node *cm_node = cqp_request->param;
+ struct irdma_sc_ah *sc_ah = cm_node->ah;
+
+ sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
+ irdma_add_conn_est_qh(cm_node);
+}
+
+/**
+ * irdma_puda_create_ah - create AH for ILQ/IEQ qp's
+ * @dev: device pointer
+ * @ah_info: Address handle info
+ * @wait: When true will wait for operation to complete
+ * @type: ILQ/IEQ
+ * @cb_param: Callback param when not waiting
+ * @ah_ret: Returned pointer to address handle if created
+ *
+ */
+int irdma_puda_create_ah(struct irdma_sc_dev *dev,
+ struct irdma_ah_info *ah_info, bool wait,
+ enum puda_rsrc_type type, void *cb_param,
+ struct irdma_sc_ah **ah_ret)
+{
+ struct irdma_sc_ah *ah;
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ int err;
+
+ ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
+ *ah_ret = ah;
+ if (!ah)
+ return -ENOMEM;
+
+ err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
+ &ah_info->ah_idx, &rf->next_ah);
+ if (err)
+ goto err_free;
+
+ ah->dev = dev;
+ ah->ah_info = *ah_info;
+
+ if (type == IRDMA_PUDA_RSRC_TYPE_ILQ)
+ err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
+ irdma_ilq_ah_cb, cb_param);
+ else
+ err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
+ irdma_ieq_ah_cb, cb_param);
+
+ if (err)
+ goto error;
+ return 0;
+
+error:
+ irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
+err_free:
+ kfree(ah);
+ *ah_ret = NULL;
+ return -ENOMEM;
+}
+
+/**
+ * irdma_puda_free_ah - free a puda address handle
+ * @dev: device pointer
+ * @ah: The address handle to free
+ */
+void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ if (!ah)
+ return;
+
+ if (ah->ah_info.ah_valid) {
+ irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
+ irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
+ }
+
+ kfree(ah);
+}
+
+/**
+ * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
+ * @cqp_request: pointer to cqp_request of create AH
+ */
+void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
+{
+ struct irdma_sc_ah *sc_ah = cqp_request->param;
+
+ if (!cqp_request->compl_info.op_ret_val)
+ sc_ah->ah_info.ah_valid = true;
+ else
+ sc_ah->ah_info.ah_valid = false;
+}
+
+/**
+ * irdma_prm_add_pble_mem - add moemory to pble resources
+ * @pprm: pble resource manager
+ * @pchunk: chunk of memory to add
+ */
+int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
+ struct irdma_chunk *pchunk)
+{
+ u64 sizeofbitmap;
+
+ if (pchunk->size & 0xfff)
+ return -EINVAL;
+
+ sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
+
+ pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL);
+ if (!pchunk->bitmapbuf)
+ return -ENOMEM;
+
+ pchunk->sizeofbitmap = sizeofbitmap;
+ /* each pble is 8 bytes hence shift by 3 */
+ pprm->total_pble_alloc += pchunk->size >> 3;
+ pprm->free_pble_cnt += pchunk->size >> 3;
+
+ return 0;
+}
+
+/**
+ * irdma_prm_get_pbles - get pble's from prm
+ * @pprm: pble resource manager
+ * @chunkinfo: nformation about chunk where pble's were acquired
+ * @mem_size: size of pble memory needed
+ * @vaddr: returns virtual address of pble memory
+ * @fpm_addr: returns fpm address of pble memory
+ */
+int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
+ u64 **vaddr, u64 *fpm_addr)
+{
+ u64 bits_needed;
+ u64 bit_idx = PBLE_INVALID_IDX;
+ struct irdma_chunk *pchunk = NULL;
+ struct list_head *chunk_entry = pprm->clist.next;
+ u32 offset;
+ unsigned long flags;
+ *vaddr = NULL;
+ *fpm_addr = 0;
+
+ bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift));
+
+ spin_lock_irqsave(&pprm->prm_lock, flags);
+ while (chunk_entry != &pprm->clist) {
+ pchunk = (struct irdma_chunk *)chunk_entry;
+ bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf,
+ pchunk->sizeofbitmap, 0,
+ bits_needed, 0);
+ if (bit_idx < pchunk->sizeofbitmap)
+ break;
+
+ /* list.next used macro */
+ chunk_entry = pchunk->list.next;
+ }
+
+ if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
+ spin_unlock_irqrestore(&pprm->prm_lock, flags);
+ return -ENOMEM;
+ }
+
+ bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
+ offset = bit_idx << pprm->pble_shift;
+ *vaddr = pchunk->vaddr + offset;
+ *fpm_addr = pchunk->fpm_addr + offset;
+
+ chunkinfo->pchunk = pchunk;
+ chunkinfo->bit_idx = bit_idx;
+ chunkinfo->bits_used = bits_needed;
+ /* 3 is sizeof pble divide */
+ pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3);
+ spin_unlock_irqrestore(&pprm->prm_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_prm_return_pbles - return pbles back to prm
+ * @pprm: pble resource manager
+ * @chunkinfo: chunk where pble's were acquired and to be freed
+ */
+void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
+ struct irdma_pble_chunkinfo *chunkinfo)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pprm->prm_lock, flags);
+ pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3);
+ bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx,
+ chunkinfo->bits_used);
+ spin_unlock_irqrestore(&pprm->prm_lock, flags);
+}
+
+int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma,
+ u32 pg_cnt)
+{
+ struct page *vm_page;
+ int i;
+ u8 *addr;
+
+ addr = (u8 *)(uintptr_t)va;
+ for (i = 0; i < pg_cnt; i++) {
+ vm_page = vmalloc_to_page(addr);
+ if (!vm_page)
+ goto err;
+
+ pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(hw->device, pg_dma[i]))
+ goto err;
+
+ addr += PAGE_SIZE;
+ }
+
+ return 0;
+
+err:
+ irdma_unmap_vm_page_list(hw, pg_dma, i);
+ return -ENOMEM;
+}
+
+void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt)
+{
+ int i;
+
+ for (i = 0; i < pg_cnt; i++)
+ dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+}
+
+/**
+ * irdma_pble_free_paged_mem - free virtual paged memory
+ * @chunk: chunk to free with paged memory
+ */
+void irdma_pble_free_paged_mem(struct irdma_chunk *chunk)
+{
+ if (!chunk->pg_cnt)
+ goto done;
+
+ irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs,
+ chunk->pg_cnt);
+
+done:
+ kfree(chunk->dmainfo.dmaaddrs);
+ chunk->dmainfo.dmaaddrs = NULL;
+ vfree(chunk->vaddr);
+ chunk->vaddr = NULL;
+ chunk->type = 0;
+}
+
+/**
+ * irdma_pble_get_paged_mem -allocate paged memory for pbles
+ * @chunk: chunk to add for paged memory
+ * @pg_cnt: number of pages needed
+ */
+int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt)
+{
+ u32 size;
+ void *va;
+
+ chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
+ if (!chunk->dmainfo.dmaaddrs)
+ return -ENOMEM;
+
+ size = PAGE_SIZE * pg_cnt;
+ va = vmalloc(size);
+ if (!va)
+ goto err;
+
+ if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs,
+ pg_cnt)) {
+ vfree(va);
+ goto err;
+ }
+ chunk->vaddr = va;
+ chunk->size = size;
+ chunk->pg_cnt = pg_cnt;
+ chunk->type = PBLE_SD_PAGED;
+
+ return 0;
+err:
+ kfree(chunk->dmainfo.dmaaddrs);
+ chunk->dmainfo.dmaaddrs = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID
+ * @dev: device pointer
+ */
+u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+ u32 next = 1;
+ u32 node_id;
+
+ if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
+ &node_id, &next))
+ return IRDMA_WS_NODE_INVALID;
+
+ return (u16)node_id;
+}
+
+/**
+ * irdma_free_ws_node_id - Free a tx scheduler node ID
+ * @dev: device pointer
+ * @node_id: Work scheduler node ID
+ */
+void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id)
+{
+ struct irdma_pci_f *rf = dev_to_rf(dev);
+
+ irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
+}
+
+/**
+ * irdma_modify_qp_to_err - Modify a QP to error
+ * @sc_qp: qp structure
+ */
+void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
+{
+ struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
+ struct ib_qp_attr attr;
+
+ if (qp->iwdev->rf->reset)
+ return;
+ attr.qp_state = IB_QPS_ERR;
+
+ if (rdma_protocol_roce(qp->ibqp.device, 1))
+ irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
+ else
+ irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL);
+}
+
+void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
+{
+ struct ib_event ibevent;
+
+ if (!iwqp->ibqp.event_handler)
+ return;
+
+ switch (event) {
+ case IRDMA_QP_EVENT_CATASTROPHIC:
+ ibevent.event = IB_EVENT_QP_FATAL;
+ break;
+ case IRDMA_QP_EVENT_ACCESS_ERR:
+ ibevent.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ case IRDMA_QP_EVENT_REQ_ERR:
+ ibevent.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ }
+ ibevent.device = iwqp->ibqp.device;
+ ibevent.element.qp = &iwqp->ibqp;
+ iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
+}
+
+bool irdma_cq_empty(struct irdma_cq *iwcq)
+{
+ struct irdma_cq_uk *ukcq;
+ u64 qword3;
+ __le64 *cqe;
+ u8 polarity;
+
+ ukcq = &iwcq->sc_cq.cq_uk;
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ return polarity != ukcq->polarity;
+}
+
+void irdma_remove_cmpls_list(struct irdma_cq *iwcq)
+{
+ struct irdma_cmpl_gen *cmpl_node;
+ struct list_head *tmp_node, *list_node;
+
+ list_for_each_safe (list_node, tmp_node, &iwcq->cmpl_generated) {
+ cmpl_node = list_entry(list_node, struct irdma_cmpl_gen, list);
+ list_del(&cmpl_node->list);
+ kfree(cmpl_node);
+ }
+}
+
+int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info)
+{
+ struct irdma_cmpl_gen *cmpl;
+
+ if (list_empty(&iwcq->cmpl_generated))
+ return -ENOENT;
+ cmpl = list_first_entry_or_null(&iwcq->cmpl_generated, struct irdma_cmpl_gen, list);
+ list_del(&cmpl->list);
+ memcpy(cq_poll_info, &cmpl->cpi, sizeof(*cq_poll_info));
+ kfree(cmpl);
+
+ ibdev_dbg(iwcq->ibcq.device,
+ "VERBS: %s: Poll artificially generated completion for QP 0x%X, op %u, wr_id=0x%llx\n",
+ __func__, cq_poll_info->qp_id, cq_poll_info->op_type,
+ cq_poll_info->wr_id);
+
+ return 0;
+}
+
+/**
+ * irdma_set_cpi_common_values - fill in values for polling info struct
+ * @cpi: resulting structure of cq_poll_info type
+ * @qp: QPair
+ * @qp_num: id of the QP
+ */
+static void irdma_set_cpi_common_values(struct irdma_cq_poll_info *cpi,
+ struct irdma_qp_uk *qp, u32 qp_num)
+{
+ cpi->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ cpi->error = true;
+ cpi->major_err = IRDMA_FLUSH_MAJOR_ERR;
+ cpi->minor_err = FLUSH_GENERAL_ERR;
+ cpi->qp_handle = (irdma_qp_handle)(uintptr_t)qp;
+ cpi->qp_id = qp_num;
+}
+
+static inline void irdma_comp_handler(struct irdma_cq *cq)
+{
+ if (!cq->ibcq.comp_handler)
+ return;
+ if (atomic_cmpxchg(&cq->armed, 1, 0))
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+void irdma_generate_flush_completions(struct irdma_qp *iwqp)
+{
+ struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
+ struct irdma_ring *sq_ring = &qp->sq_ring;
+ struct irdma_ring *rq_ring = &qp->rq_ring;
+ struct irdma_cmpl_gen *cmpl;
+ __le64 *sw_wqe;
+ u64 wqe_qword;
+ u32 wqe_idx;
+ bool compl_generated = false;
+ unsigned long flags1;
+
+ spin_lock_irqsave(&iwqp->iwscq->lock, flags1);
+ if (irdma_cq_empty(iwqp->iwscq)) {
+ unsigned long flags2;
+
+ spin_lock_irqsave(&iwqp->lock, flags2);
+ while (IRDMA_RING_MORE_WORK(*sq_ring)) {
+ cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
+ if (!cmpl) {
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
+ spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ return;
+ }
+
+ wqe_idx = sq_ring->tail;
+ irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
+
+ cmpl->cpi.wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+ sw_wqe = qp->sq_base[wqe_idx].elem;
+ get_64bit_val(sw_wqe, 24, &wqe_qword);
+ cmpl->cpi.op_type = (u8)FIELD_GET(IRDMAQPSQ_OPCODE, IRDMAQPSQ_OPCODE);
+ cmpl->cpi.q_type = IRDMA_CQE_QTYPE_SQ;
+ /* remove the SQ WR by moving SQ tail*/
+ IRDMA_RING_SET_TAIL(*sq_ring,
+ sq_ring->tail + qp->sq_wrtrk_array[sq_ring->tail].quanta);
+ if (cmpl->cpi.op_type == IRDMAQP_OP_NOP) {
+ kfree(cmpl);
+ continue;
+ }
+ ibdev_dbg(iwqp->iwscq->ibcq.device,
+ "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
+ __func__, cmpl->cpi.wr_id, qp->qp_id);
+ list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
+ compl_generated = true;
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
+ spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ if (compl_generated)
+ irdma_comp_handler(iwqp->iwscq);
+ } else {
+ spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
+ }
+
+ spin_lock_irqsave(&iwqp->iwrcq->lock, flags1);
+ if (irdma_cq_empty(iwqp->iwrcq)) {
+ unsigned long flags2;
+
+ spin_lock_irqsave(&iwqp->lock, flags2);
+ while (IRDMA_RING_MORE_WORK(*rq_ring)) {
+ cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
+ if (!cmpl) {
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
+ spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ return;
+ }
+
+ wqe_idx = rq_ring->tail;
+ irdma_set_cpi_common_values(&cmpl->cpi, qp, qp->qp_id);
+
+ cmpl->cpi.wr_id = qp->rq_wrid_array[wqe_idx];
+ cmpl->cpi.op_type = IRDMA_OP_TYPE_REC;
+ cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
+ /* remove the RQ WR by moving RQ tail */
+ IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
+ ibdev_dbg(iwqp->iwrcq->ibcq.device,
+ "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
+ __func__, cmpl->cpi.wr_id, qp->qp_id,
+ wqe_idx);
+ list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
+
+ compl_generated = true;
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags2);
+ spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ if (compl_generated)
+ irdma_comp_handler(iwqp->iwrcq);
+ } else {
+ spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
+ }
+}
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
new file mode 100644
index 0000000000..2f1bedd3a5
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -0,0 +1,4728 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#include "main.h"
+
+/**
+ * irdma_query_device - get device attributes
+ * @ibdev: device pointer from stack
+ * @props: returning device attributes
+ * @udata: user data
+ */
+static int irdma_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct pci_dev *pcidev = iwdev->rf->pcidev;
+ struct irdma_hw_attrs *hw_attrs = &rf->sc_dev.hw_attrs;
+
+ if (udata->inlen || udata->outlen)
+ return -EINVAL;
+
+ memset(props, 0, sizeof(*props));
+ addrconf_addr_eui48((u8 *)&props->sys_image_guid,
+ iwdev->netdev->dev_addr);
+ props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
+ irdma_fw_minor_ver(&rf->sc_dev);
+ props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
+ props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ props->vendor_id = pcidev->vendor;
+ props->vendor_part_id = pcidev->device;
+
+ props->hw_ver = rf->pcidev->revision;
+ props->page_size_cap = hw_attrs->page_size_cap;
+ props->max_mr_size = hw_attrs->max_mr_size;
+ props->max_qp = rf->max_qp - rf->used_qps;
+ props->max_qp_wr = hw_attrs->max_qp_wr;
+ props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ props->max_cq = rf->max_cq - rf->used_cqs;
+ props->max_cqe = rf->max_cqe - 1;
+ props->max_mr = rf->max_mr - rf->used_mrs;
+ props->max_mw = props->max_mr;
+ props->max_pd = rf->max_pd - rf->used_pds;
+ props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
+ props->max_qp_rd_atom = hw_attrs->max_hw_ird;
+ props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
+ if (rdma_protocol_roce(ibdev, 1)) {
+ props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
+ props->max_pkeys = IRDMA_PKEY_TBL_SZ;
+ }
+
+ props->max_ah = rf->max_ah;
+ props->max_mcast_grp = rf->max_mcg;
+ props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
+ props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
+ props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
+#define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
+ props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
+
+ return 0;
+}
+
+/**
+ * irdma_query_port - get port attributes
+ * @ibdev: device pointer from stack
+ * @port: port number for query
+ * @props: returning device attributes
+ */
+static int irdma_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct net_device *netdev = iwdev->netdev;
+
+ /* no need to zero out pros here. done by caller */
+
+ props->max_mtu = IB_MTU_4096;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
+ props->lid = 1;
+ props->lmc = 0;
+ props->sm_lid = 0;
+ props->sm_sl = 0;
+ if (netif_carrier_ok(netdev) && netif_running(netdev)) {
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else {
+ props->state = IB_PORT_DOWN;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+
+ ib_get_eth_speed(ibdev, port, &props->active_speed,
+ &props->active_width);
+
+ if (rdma_protocol_roce(ibdev, 1)) {
+ props->gid_tbl_len = 32;
+ props->ip_gids = true;
+ props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
+ } else {
+ props->gid_tbl_len = 1;
+ }
+ props->qkey_viol_cntr = 0;
+ props->port_cap_flags |= IB_PORT_CM_SUP | IB_PORT_REINIT_SUP;
+ props->max_msg_sz = iwdev->rf->sc_dev.hw_attrs.max_hw_outbound_msg_size;
+
+ return 0;
+}
+
+/**
+ * irdma_disassociate_ucontext - Disassociate user context
+ * @context: ib user context
+ */
+static void irdma_disassociate_ucontext(struct ib_ucontext *context)
+{
+}
+
+static int irdma_mmap_legacy(struct irdma_ucontext *ucontext,
+ struct vm_area_struct *vma)
+{
+ u64 pfn;
+
+ if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ vma->vm_private_data = ucontext;
+ pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
+ pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
+
+ return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot), NULL);
+}
+
+static void irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct irdma_user_mmap_entry *entry = to_irdma_mmap_entry(rdma_entry);
+
+ kfree(entry);
+}
+
+static struct rdma_user_mmap_entry*
+irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
+ enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
+{
+ struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ int ret;
+
+ if (!entry)
+ return NULL;
+
+ entry->bar_offset = bar_offset;
+ entry->mmap_flag = mmap_flag;
+
+ ret = rdma_user_mmap_entry_insert(&ucontext->ibucontext,
+ &entry->rdma_entry, PAGE_SIZE);
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+ *mmap_offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+/**
+ * irdma_mmap - user memory map
+ * @context: context created during alloc
+ * @vma: kernel info for user memory map
+ */
+static int irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct irdma_user_mmap_entry *entry;
+ struct irdma_ucontext *ucontext;
+ u64 pfn;
+ int ret;
+
+ ucontext = to_ucontext(context);
+
+ /* Legacy support for libi40iw with hard-coded mmap key */
+ if (ucontext->legacy_mode)
+ return irdma_mmap_legacy(ucontext, vma);
+
+ rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&ucontext->iwdev->ibdev,
+ "VERBS: pgoff[0x%lx] does not have valid entry\n",
+ vma->vm_pgoff);
+ return -EINVAL;
+ }
+
+ entry = to_irdma_mmap_entry(rdma_entry);
+ ibdev_dbg(&ucontext->iwdev->ibdev,
+ "VERBS: bar_offset [0x%llx] mmap_flag [%d]\n",
+ entry->bar_offset, entry->mmap_flag);
+
+ pfn = (entry->bar_offset +
+ pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
+
+ switch (entry->mmap_flag) {
+ case IRDMA_MMAP_IO_NC:
+ ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ case IRDMA_MMAP_IO_WC:
+ ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
+ pgprot_writecombine(vma->vm_page_prot),
+ rdma_entry);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ ibdev_dbg(&ucontext->iwdev->ibdev,
+ "VERBS: bar_offset [0x%llx] mmap_flag[%d] err[%d]\n",
+ entry->bar_offset, entry->mmap_flag, ret);
+ rdma_user_mmap_entry_put(rdma_entry);
+
+ return ret;
+}
+
+/**
+ * irdma_alloc_push_page - allocate a push page for qp
+ * @iwqp: qp pointer
+ */
+static void irdma_alloc_push_page(struct irdma_qp *iwqp)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.manage_push_page.info.push_idx = 0;
+ cqp_info->in.u.manage_push_page.info.qs_handle =
+ qp->vsi->qos[qp->user_pri].qs_handle;
+ cqp_info->in.u.manage_push_page.info.free_page = 0;
+ cqp_info->in.u.manage_push_page.info.push_page_type = 0;
+ cqp_info->in.u.manage_push_page.cqp = &iwdev->rf->cqp.sc_cqp;
+ cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ if (!status && cqp_request->compl_info.op_ret_val <
+ iwdev->rf->sc_dev.hw_attrs.max_hw_device_pages) {
+ qp->push_idx = cqp_request->compl_info.op_ret_val;
+ qp->push_offset = 0;
+ }
+
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+}
+
+/**
+ * irdma_alloc_ucontext - Allocate the user context data structure
+ * @uctx: uverbs context pointer
+ * @udata: user data
+ *
+ * This keeps track of all objects associated with a particular
+ * user-mode client.
+ */
+static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
+{
+#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
+#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
+ struct ib_device *ibdev = uctx->device;
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_alloc_ucontext_req req = {};
+ struct irdma_alloc_ucontext_resp uresp = {};
+ struct irdma_ucontext *ucontext = to_ucontext(uctx);
+ struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
+
+ if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
+ return -EINVAL;
+
+ if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
+ return -EINVAL;
+
+ if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
+ goto ver_error;
+
+ ucontext->iwdev = iwdev;
+ ucontext->abi_ver = req.userspace_ver;
+
+ if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
+ ucontext->use_raw_attrs = true;
+
+ /* GEN_1 legacy support with libi40iw */
+ if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
+ if (uk_attrs->hw_rev != IRDMA_GEN_1)
+ return -EOPNOTSUPP;
+
+ ucontext->legacy_mode = true;
+ uresp.max_qps = iwdev->rf->max_qp;
+ uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
+ uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
+ uresp.kernel_ver = req.userspace_ver;
+ if (ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen)))
+ return -EFAULT;
+ } else {
+ u64 bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
+
+ ucontext->db_mmap_entry =
+ irdma_user_mmap_entry_insert(ucontext, bar_off,
+ IRDMA_MMAP_IO_NC,
+ &uresp.db_mmap_key);
+ if (!ucontext->db_mmap_entry)
+ return -ENOMEM;
+
+ uresp.kernel_ver = IRDMA_ABI_VER;
+ uresp.feature_flags = uk_attrs->feature_flags;
+ uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
+ uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
+ uresp.max_hw_inline = uk_attrs->max_hw_inline;
+ uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
+ uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
+ uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
+ uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
+ uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
+ uresp.hw_rev = uk_attrs->hw_rev;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
+ uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
+ if (ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen))) {
+ rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
+ return -EFAULT;
+ }
+ }
+
+ INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
+ spin_lock_init(&ucontext->cq_reg_mem_list_lock);
+ INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
+ spin_lock_init(&ucontext->qp_reg_mem_list_lock);
+
+ return 0;
+
+ver_error:
+ ibdev_err(&iwdev->ibdev,
+ "Invalid userspace driver version detected. Detected version %d, should be %d\n",
+ req.userspace_ver, IRDMA_ABI_VER);
+ return -EINVAL;
+}
+
+/**
+ * irdma_dealloc_ucontext - deallocate the user context data structure
+ * @context: user context created during alloc
+ */
+static void irdma_dealloc_ucontext(struct ib_ucontext *context)
+{
+ struct irdma_ucontext *ucontext = to_ucontext(context);
+
+ rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
+}
+
+/**
+ * irdma_alloc_pd - allocate protection domain
+ * @pd: PD pointer
+ * @udata: user data
+ */
+static int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+{
+#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
+ struct irdma_pd *iwpd = to_iwpd(pd);
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_alloc_pd_resp uresp = {};
+ struct irdma_sc_pd *sc_pd;
+ u32 pd_id = 0;
+ int err;
+
+ if (udata && udata->outlen < IRDMA_ALLOC_PD_MIN_RESP_LEN)
+ return -EINVAL;
+
+ err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
+ &rf->next_pd);
+ if (err)
+ return err;
+
+ sc_pd = &iwpd->sc_pd;
+ if (udata) {
+ struct irdma_ucontext *ucontext =
+ rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
+ uresp.pd_id = pd_id;
+ if (ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen))) {
+ err = -EFAULT;
+ goto error;
+ }
+ } else {
+ irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
+ }
+
+ return 0;
+error:
+ irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
+
+ return err;
+}
+
+/**
+ * irdma_dealloc_pd - deallocate pd
+ * @ibpd: ptr of pd to be deallocated
+ * @udata: user data
+ */
+static int irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct irdma_pd *iwpd = to_iwpd(ibpd);
+ struct irdma_device *iwdev = to_iwdev(ibpd->device);
+
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
+
+ return 0;
+}
+
+/**
+ * irdma_get_pbl - Retrieve pbl from a list given a virtual
+ * address
+ * @va: user virtual address
+ * @pbl_list: pbl list to search in (QP's or CQ's)
+ */
+static struct irdma_pbl *irdma_get_pbl(unsigned long va,
+ struct list_head *pbl_list)
+{
+ struct irdma_pbl *iwpbl;
+
+ list_for_each_entry (iwpbl, pbl_list, list) {
+ if (iwpbl->user_base == va) {
+ list_del(&iwpbl->list);
+ iwpbl->on_list = false;
+ return iwpbl;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_clean_cqes - clean cq entries for qp
+ * @iwqp: qp ptr (user or kernel)
+ * @iwcq: cq ptr
+ */
+static void irdma_clean_cqes(struct irdma_qp *iwqp, struct irdma_cq *iwcq)
+{
+ struct irdma_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ irdma_uk_clean_cq(&iwqp->sc_qp.qp_uk, ukcq);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+}
+
+static void irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
+{
+ if (iwqp->push_db_mmap_entry) {
+ rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
+ iwqp->push_db_mmap_entry = NULL;
+ }
+ if (iwqp->push_wqe_mmap_entry) {
+ rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
+ iwqp->push_wqe_mmap_entry = NULL;
+ }
+}
+
+static int irdma_setup_push_mmap_entries(struct irdma_ucontext *ucontext,
+ struct irdma_qp *iwqp,
+ u64 *push_wqe_mmap_key,
+ u64 *push_db_mmap_key)
+{
+ struct irdma_device *iwdev = ucontext->iwdev;
+ u64 rsvd, bar_off;
+
+ rsvd = IRDMA_PF_BAR_RSVD;
+ bar_off = (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
+ /* skip over db page */
+ bar_off += IRDMA_HW_PAGE_SIZE;
+ /* push wqe page */
+ bar_off += rsvd + iwqp->sc_qp.push_idx * IRDMA_HW_PAGE_SIZE;
+ iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
+ bar_off, IRDMA_MMAP_IO_WC,
+ push_wqe_mmap_key);
+ if (!iwqp->push_wqe_mmap_entry)
+ return -ENOMEM;
+
+ /* push doorbell page */
+ bar_off += IRDMA_HW_PAGE_SIZE;
+ iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
+ bar_off, IRDMA_MMAP_IO_NC,
+ push_db_mmap_key);
+ if (!iwqp->push_db_mmap_entry) {
+ rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_destroy_qp - destroy qp
+ * @ibqp: qp's ib pointer also to get to device's qp address
+ * @udata: user data
+ */
+static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+
+ iwqp->sc_qp.qp_uk.destroy_pending = true;
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
+ irdma_modify_qp_to_err(&iwqp->sc_qp);
+
+ if (!iwqp->user_mode)
+ cancel_delayed_work_sync(&iwqp->dwork_flush);
+
+ if (!iwqp->user_mode) {
+ if (iwqp->iwscq) {
+ irdma_clean_cqes(iwqp, iwqp->iwscq);
+ if (iwqp->iwrcq != iwqp->iwscq)
+ irdma_clean_cqes(iwqp, iwqp->iwrcq);
+ }
+ }
+
+ irdma_qp_rem_ref(&iwqp->ibqp);
+ wait_for_completion(&iwqp->free_qp);
+ irdma_free_lsmm_rsrc(iwqp);
+ irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
+
+ irdma_remove_push_mmap_entries(iwqp);
+ irdma_free_qp_rsrc(iwqp);
+
+ return 0;
+}
+
+/**
+ * irdma_setup_virt_qp - setup for allocation of virtual qp
+ * @iwdev: irdma device
+ * @iwqp: qp ptr
+ * @init_info: initialize info to return
+ */
+static void irdma_setup_virt_qp(struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *init_info)
+{
+ struct irdma_pbl *iwpbl = iwqp->iwpbl;
+ struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
+
+ iwqp->page = qpmr->sq_page;
+ init_info->shadow_area_pa = qpmr->shadow;
+ if (iwpbl->pbl_allocated) {
+ init_info->virtual_map = true;
+ init_info->sq_pa = qpmr->sq_pbl.idx;
+ init_info->rq_pa = qpmr->rq_pbl.idx;
+ } else {
+ init_info->sq_pa = qpmr->sq_pbl.addr;
+ init_info->rq_pa = qpmr->rq_pbl.addr;
+ }
+}
+
+/**
+ * irdma_setup_umode_qp - setup sq and rq size in user mode qp
+ * @udata: udata
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ * @init_attr: Initial QP create attributes
+ */
+static int irdma_setup_umode_qp(struct ib_udata *udata,
+ struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *info,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext, ibucontext);
+ struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+ struct irdma_create_qp_req req;
+ unsigned long flags;
+ int ret;
+
+ ret = ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen));
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: ib_copy_from_data fail\n");
+ return ret;
+ }
+
+ iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+ iwqp->user_mode = 1;
+ if (req.user_wqe_bufs) {
+ info->qp_uk_init_info.legacy_mode = ucontext->legacy_mode;
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ iwqp->iwpbl = irdma_get_pbl((unsigned long)req.user_wqe_bufs,
+ &ucontext->qp_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ if (!iwqp->iwpbl) {
+ ret = -ENODATA;
+ ibdev_dbg(&iwdev->ibdev, "VERBS: no pbl info\n");
+ return ret;
+ }
+ }
+
+ if (!ucontext->use_raw_attrs) {
+ /**
+ * Maintain backward compat with older ABI which passes sq and
+ * rq depth in quanta in cap.max_send_wr and cap.max_recv_wr.
+ * There is no way to compute the correct value of
+ * iwqp->max_send_wr/max_recv_wr in the kernel.
+ */
+ iwqp->max_send_wr = init_attr->cap.max_send_wr;
+ iwqp->max_recv_wr = init_attr->cap.max_recv_wr;
+ ukinfo->sq_size = init_attr->cap.max_send_wr;
+ ukinfo->rq_size = init_attr->cap.max_recv_wr;
+ irdma_uk_calc_shift_wq(ukinfo, &ukinfo->sq_shift,
+ &ukinfo->rq_shift);
+ } else {
+ ret = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
+ if (ret)
+ return ret;
+
+ ret = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
+ if (ret)
+ return ret;
+
+ iwqp->max_send_wr =
+ (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr =
+ (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+ }
+
+ irdma_setup_virt_qp(iwdev, iwqp, info);
+
+ return 0;
+}
+
+/**
+ * irdma_setup_kmode_qp - setup initialization for kernel mode qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ * @init_attr: Initial QP create attributes
+ */
+static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *info,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_dma_mem *mem = &iwqp->kqp.dma_mem;
+ u32 size;
+ int status;
+ struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+
+ status = irdma_uk_calc_depth_shift_sq(ukinfo, &ukinfo->sq_depth,
+ &ukinfo->sq_shift);
+ if (status)
+ return status;
+
+ status = irdma_uk_calc_depth_shift_rq(ukinfo, &ukinfo->rq_depth,
+ &ukinfo->rq_shift);
+ if (status)
+ return status;
+
+ iwqp->kqp.sq_wrid_mem =
+ kcalloc(ukinfo->sq_depth, sizeof(*iwqp->kqp.sq_wrid_mem), GFP_KERNEL);
+ if (!iwqp->kqp.sq_wrid_mem)
+ return -ENOMEM;
+
+ iwqp->kqp.rq_wrid_mem =
+ kcalloc(ukinfo->rq_depth, sizeof(*iwqp->kqp.rq_wrid_mem), GFP_KERNEL);
+
+ if (!iwqp->kqp.rq_wrid_mem) {
+ kfree(iwqp->kqp.sq_wrid_mem);
+ iwqp->kqp.sq_wrid_mem = NULL;
+ return -ENOMEM;
+ }
+
+ ukinfo->sq_wrtrk_array = iwqp->kqp.sq_wrid_mem;
+ ukinfo->rq_wrid_array = iwqp->kqp.rq_wrid_mem;
+
+ size = (ukinfo->sq_depth + ukinfo->rq_depth) * IRDMA_QP_WQE_MIN_SIZE;
+ size += (IRDMA_SHADOW_AREA_SIZE << 3);
+
+ mem->size = ALIGN(size, 256);
+ mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (!mem->va) {
+ kfree(iwqp->kqp.sq_wrid_mem);
+ iwqp->kqp.sq_wrid_mem = NULL;
+ kfree(iwqp->kqp.rq_wrid_mem);
+ iwqp->kqp.rq_wrid_mem = NULL;
+ return -ENOMEM;
+ }
+
+ ukinfo->sq = mem->va;
+ info->sq_pa = mem->pa;
+ ukinfo->rq = &ukinfo->sq[ukinfo->sq_depth];
+ info->rq_pa = info->sq_pa + (ukinfo->sq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->shadow_area = ukinfo->rq[ukinfo->rq_depth].elem;
+ info->shadow_area_pa =
+ info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
+ ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
+ ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+ ukinfo->qp_id = iwqp->ibqp.qp_num;
+
+ iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
+ iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
+ init_attr->cap.max_send_wr = iwqp->max_send_wr;
+ init_attr->cap.max_recv_wr = iwqp->max_recv_wr;
+
+ return 0;
+}
+
+static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
+{
+ struct irdma_pci_f *rf = iwqp->iwdev->rf;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_create_qp_info *qp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ qp_info = &cqp_request->info.in.u.qp_create.info;
+ memset(qp_info, 0, sizeof(*qp_info));
+ qp_info->mac_valid = true;
+ qp_info->cq_num_valid = true;
+ qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
+
+ cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.qp_create.qp = &iwqp->sc_qp;
+ cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+
+ return status;
+}
+
+static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
+ struct irdma_qp_host_ctx_info *ctx_info)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_roce_offload_info *roce_info;
+ struct irdma_udp_offload_info *udp_info;
+
+ udp_info = &iwqp->udp_info;
+ udp_info->snd_mss = ib_mtu_enum_to_int(ib_mtu_int_to_enum(iwdev->vsi.mtu));
+ udp_info->cwnd = iwdev->roce_cwnd;
+ udp_info->rexmit_thresh = 2;
+ udp_info->rnr_nak_thresh = 2;
+ udp_info->src_port = 0xc000;
+ udp_info->dst_port = ROCE_V2_UDP_DPORT;
+ roce_info = &iwqp->roce_info;
+ ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
+
+ roce_info->rd_en = true;
+ roce_info->wr_rdresp_en = true;
+ roce_info->bind_en = true;
+ roce_info->dcqcn_en = false;
+ roce_info->rtomin = 5;
+
+ roce_info->ack_credits = iwdev->roce_ackcreds;
+ roce_info->ird_size = dev->hw_attrs.max_hw_ird;
+ roce_info->ord_size = dev->hw_attrs.max_hw_ord;
+
+ if (!iwqp->user_mode) {
+ roce_info->priv_mode_en = true;
+ roce_info->fast_reg_en = true;
+ roce_info->udprivcq_en = true;
+ }
+ roce_info->roce_tver = 0;
+
+ ctx_info->roce_info = &iwqp->roce_info;
+ ctx_info->udp_info = &iwqp->udp_info;
+ irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+}
+
+static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
+ struct irdma_qp_host_ctx_info *ctx_info)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_iwarp_offload_info *iwarp_info;
+
+ iwarp_info = &iwqp->iwarp_info;
+ ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
+ iwarp_info->rd_en = true;
+ iwarp_info->wr_rdresp_en = true;
+ iwarp_info->bind_en = true;
+ iwarp_info->ecn_en = true;
+ iwarp_info->rtomin = 5;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ iwarp_info->ib_rd_en = true;
+ if (!iwqp->user_mode) {
+ iwarp_info->priv_mode_en = true;
+ iwarp_info->fast_reg_en = true;
+ }
+ iwarp_info->ddp_ver = 1;
+ iwarp_info->rdmap_ver = 1;
+
+ ctx_info->iwarp_info = &iwqp->iwarp_info;
+ ctx_info->iwarp_info_valid = true;
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+ ctx_info->iwarp_info_valid = false;
+}
+
+static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
+ struct irdma_device *iwdev)
+{
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
+
+ if (init_attr->create_flags)
+ return -EOPNOTSUPP;
+
+ if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
+ init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
+ init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (init_attr->qp_type != IB_QPT_RC &&
+ init_attr->qp_type != IB_QPT_UD &&
+ init_attr->qp_type != IB_QPT_GSI)
+ return -EOPNOTSUPP;
+ } else {
+ if (init_attr->qp_type != IB_QPT_RC)
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void irdma_flush_worker(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct irdma_qp *iwqp = container_of(dwork, struct irdma_qp, dwork_flush);
+
+ irdma_generate_flush_completions(iwqp);
+}
+
+/**
+ * irdma_create_qp - create qp
+ * @ibqp: ptr of qp
+ * @init_attr: attributes for qp
+ * @udata: user data for create qp
+ */
+static int irdma_create_qp(struct ib_qp *ibqp,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+#define IRDMA_CREATE_QP_MIN_REQ_LEN offsetofend(struct irdma_create_qp_req, user_compl_ctx)
+#define IRDMA_CREATE_QP_MIN_RESP_LEN offsetofend(struct irdma_create_qp_resp, rsvd)
+ struct ib_pd *ibpd = ibqp->pd;
+ struct irdma_pd *iwpd = to_iwpd(ibpd);
+ struct irdma_device *iwdev = to_iwdev(ibpd->device);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_create_qp_resp uresp = {};
+ u32 qp_num = 0;
+ int err_code;
+ struct irdma_sc_qp *qp;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
+ struct irdma_qp_init_info init_info = {};
+ struct irdma_qp_host_ctx_info *ctx_info;
+
+ err_code = irdma_validate_qp_attrs(init_attr, iwdev);
+ if (err_code)
+ return err_code;
+
+ if (udata && (udata->inlen < IRDMA_CREATE_QP_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_QP_MIN_RESP_LEN))
+ return -EINVAL;
+
+ init_info.vsi = &iwdev->vsi;
+ init_info.qp_uk_init_info.uk_attrs = uk_attrs;
+ init_info.qp_uk_init_info.sq_size = init_attr->cap.max_send_wr;
+ init_info.qp_uk_init_info.rq_size = init_attr->cap.max_recv_wr;
+ init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
+ init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
+ init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data;
+
+ qp = &iwqp->sc_qp;
+ qp->qp_uk.back_qp = iwqp;
+ qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
+
+ iwqp->iwdev = iwdev;
+ iwqp->q2_ctx_mem.size = ALIGN(IRDMA_Q2_BUF_SIZE + IRDMA_QP_CTX_SIZE,
+ 256);
+ iwqp->q2_ctx_mem.va = dma_alloc_coherent(dev->hw->device,
+ iwqp->q2_ctx_mem.size,
+ &iwqp->q2_ctx_mem.pa,
+ GFP_KERNEL);
+ if (!iwqp->q2_ctx_mem.va)
+ return -ENOMEM;
+
+ init_info.q2 = iwqp->q2_ctx_mem.va;
+ init_info.q2_pa = iwqp->q2_ctx_mem.pa;
+ init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
+ init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
+
+ if (init_attr->qp_type == IB_QPT_GSI)
+ qp_num = 1;
+ else
+ err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
+ &qp_num, &rf->next_qp);
+ if (err_code)
+ goto error;
+
+ iwqp->iwpd = iwpd;
+ iwqp->ibqp.qp_num = qp_num;
+ qp = &iwqp->sc_qp;
+ iwqp->iwscq = to_iwcq(init_attr->send_cq);
+ iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
+ iwqp->host_ctx.va = init_info.host_ctx;
+ iwqp->host_ctx.pa = init_info.host_ctx_pa;
+ iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
+
+ init_info.pd = &iwpd->sc_pd;
+ init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
+ if (!rdma_protocol_roce(&iwdev->ibdev, 1))
+ init_info.qp_uk_init_info.first_sq_wq = 1;
+ iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
+ init_waitqueue_head(&iwqp->waitq);
+ init_waitqueue_head(&iwqp->mod_qp_waitq);
+
+ if (udata) {
+ init_info.qp_uk_init_info.abi_ver = iwpd->sc_pd.abi_ver;
+ err_code = irdma_setup_umode_qp(udata, iwdev, iwqp, &init_info,
+ init_attr);
+ } else {
+ INIT_DELAYED_WORK(&iwqp->dwork_flush, irdma_flush_worker);
+ init_info.qp_uk_init_info.abi_ver = IRDMA_ABI_VER;
+ err_code = irdma_setup_kmode_qp(iwdev, iwqp, &init_info, init_attr);
+ }
+
+ if (err_code) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: setup qp failed\n");
+ goto error;
+ }
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (init_attr->qp_type == IB_QPT_RC) {
+ init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_RC;
+ init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
+ IRDMA_WRITE_WITH_IMM |
+ IRDMA_ROCE;
+ } else {
+ init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_ROCE_UD;
+ init_info.qp_uk_init_info.qp_caps = IRDMA_SEND_WITH_IMM |
+ IRDMA_ROCE;
+ }
+ } else {
+ init_info.qp_uk_init_info.type = IRDMA_QP_TYPE_IWARP;
+ init_info.qp_uk_init_info.qp_caps = IRDMA_WRITE_WITH_IMM;
+ }
+
+ if (dev->hw_attrs.uk_attrs.hw_rev > IRDMA_GEN_1)
+ init_info.qp_uk_init_info.qp_caps |= IRDMA_PUSH_MODE;
+
+ err_code = irdma_sc_qp_init(qp, &init_info);
+ if (err_code) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: qp_init fail\n");
+ goto error;
+ }
+
+ ctx_info = &iwqp->ctx_info;
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1))
+ irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
+ else
+ irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
+
+ err_code = irdma_cqp_create_qp_cmd(iwqp);
+ if (err_code)
+ goto error;
+
+ refcount_set(&iwqp->refcnt, 1);
+ spin_lock_init(&iwqp->lock);
+ spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
+ iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
+ rf->qp_table[qp_num] = iwqp;
+
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (dev->ws_add(&iwdev->vsi, 0)) {
+ irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
+ err_code = -EINVAL;
+ goto error;
+ }
+
+ irdma_qp_add_qos(&iwqp->sc_qp);
+ }
+
+ if (udata) {
+ /* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
+ if (udata->outlen < sizeof(uresp)) {
+ uresp.lsmm = 1;
+ uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
+ } else {
+ if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
+ uresp.lsmm = 1;
+ }
+ uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
+ uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
+ uresp.qp_id = qp_num;
+ uresp.qp_caps = qp->qp_uk.qp_caps;
+
+ err_code = ib_copy_to_udata(udata, &uresp,
+ min(sizeof(uresp), udata->outlen));
+ if (err_code) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: copy_to_udata failed\n");
+ irdma_destroy_qp(&iwqp->ibqp, udata);
+ return err_code;
+ }
+ }
+
+ init_completion(&iwqp->free_qp);
+ return 0;
+
+error:
+ irdma_free_qp_rsrc(iwqp);
+ return err_code;
+}
+
+static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
+{
+ int acc_flags = 0;
+
+ if (rdma_protocol_roce(iwqp->ibqp.device, 1)) {
+ if (iwqp->roce_info.wr_rdresp_en) {
+ acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ }
+ if (iwqp->roce_info.rd_en)
+ acc_flags |= IB_ACCESS_REMOTE_READ;
+ if (iwqp->roce_info.bind_en)
+ acc_flags |= IB_ACCESS_MW_BIND;
+ } else {
+ if (iwqp->iwarp_info.wr_rdresp_en) {
+ acc_flags |= IB_ACCESS_LOCAL_WRITE;
+ acc_flags |= IB_ACCESS_REMOTE_WRITE;
+ }
+ if (iwqp->iwarp_info.rd_en)
+ acc_flags |= IB_ACCESS_REMOTE_READ;
+ if (iwqp->iwarp_info.bind_en)
+ acc_flags |= IB_ACCESS_MW_BIND;
+ }
+ return acc_flags;
+}
+
+/**
+ * irdma_query_qp - query qp attributes
+ * @ibqp: qp pointer
+ * @attr: attributes pointer
+ * @attr_mask: Not used
+ * @init_attr: qp attributes to return
+ */
+static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_qp_init_attr *init_attr)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
+
+ memset(attr, 0, sizeof(*attr));
+ memset(init_attr, 0, sizeof(*init_attr));
+
+ attr->qp_state = iwqp->ibqp_state;
+ attr->cur_qp_state = iwqp->ibqp_state;
+ attr->cap.max_send_wr = iwqp->max_send_wr;
+ attr->cap.max_recv_wr = iwqp->max_recv_wr;
+ attr->cap.max_inline_data = qp->qp_uk.max_inline_data;
+ attr->cap.max_send_sge = qp->qp_uk.max_sq_frag_cnt;
+ attr->cap.max_recv_sge = qp->qp_uk.max_rq_frag_cnt;
+ attr->qp_access_flags = irdma_get_ib_acc_flags(iwqp);
+ attr->port_num = 1;
+ if (rdma_protocol_roce(ibqp->device, 1)) {
+ attr->path_mtu = ib_mtu_int_to_enum(iwqp->udp_info.snd_mss);
+ attr->qkey = iwqp->roce_info.qkey;
+ attr->rq_psn = iwqp->udp_info.epsn;
+ attr->sq_psn = iwqp->udp_info.psn_nxt;
+ attr->dest_qp_num = iwqp->roce_info.dest_qp;
+ attr->pkey_index = iwqp->roce_info.p_key;
+ attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
+ attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
+ attr->max_rd_atomic = iwqp->roce_info.ord_size;
+ attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
+ }
+
+ init_attr->event_handler = iwqp->ibqp.event_handler;
+ init_attr->qp_context = iwqp->ibqp.qp_context;
+ init_attr->send_cq = iwqp->ibqp.send_cq;
+ init_attr->recv_cq = iwqp->ibqp.recv_cq;
+ init_attr->cap = attr->cap;
+
+ return 0;
+}
+
+/**
+ * irdma_query_pkey - Query partition key
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: index of pkey
+ * @pkey: pointer to store the pkey
+ */
+static int irdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ if (index >= IRDMA_PKEY_TBL_SZ)
+ return -EINVAL;
+
+ *pkey = IRDMA_DEFAULT_PKEY;
+ return 0;
+}
+
+static u8 irdma_roce_get_vlan_prio(const struct ib_gid_attr *attr, u8 prio)
+{
+ struct net_device *ndev;
+
+ rcu_read_lock();
+ ndev = rcu_dereference(attr->ndev);
+ if (!ndev)
+ goto exit;
+ if (is_vlan_dev(ndev)) {
+ u16 vlan_qos = vlan_dev_get_egress_qos_mask(ndev, prio);
+
+ prio = (vlan_qos & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ }
+exit:
+ rcu_read_unlock();
+ return prio;
+}
+
+static int irdma_wait_for_suspend(struct irdma_qp *iwqp)
+{
+ if (!wait_event_timeout(iwqp->iwdev->suspend_wq,
+ !iwqp->suspend_pending,
+ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS))) {
+ iwqp->suspend_pending = false;
+ ibdev_warn(&iwqp->iwdev->ibdev,
+ "modify_qp timed out waiting for suspend. qp_id = %d, last_ae = 0x%x\n",
+ iwqp->ibqp.qp_num, iwqp->last_aeq);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_modify_qp_roce - modify qp request
+ * @ibqp: qp's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
+ struct irdma_pd *iwpd = to_iwpd(ibqp->pd);
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_qp_host_ctx_info *ctx_info;
+ struct irdma_roce_offload_info *roce_info;
+ struct irdma_udp_offload_info *udp_info;
+ struct irdma_modify_qp_info info = {};
+ struct irdma_modify_qp_resp uresp = {};
+ struct irdma_modify_qp_req ureq = {};
+ unsigned long flags;
+ u8 issue_modify_qp = 0;
+ int ret = 0;
+
+ ctx_info = &iwqp->ctx_info;
+ roce_info = &iwqp->roce_info;
+ udp_info = &iwqp->udp_info;
+
+ if (udata) {
+ /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ roce_info->dest_qp = attr->dest_qp_num;
+
+ if (attr_mask & IB_QP_PKEY_INDEX) {
+ ret = irdma_query_pkey(ibqp->device, 0, attr->pkey_index,
+ &roce_info->p_key);
+ if (ret)
+ return ret;
+ }
+
+ if (attr_mask & IB_QP_QKEY)
+ roce_info->qkey = attr->qkey;
+
+ if (attr_mask & IB_QP_PATH_MTU)
+ udp_info->snd_mss = ib_mtu_enum_to_int(attr->path_mtu);
+
+ if (attr_mask & IB_QP_SQ_PSN) {
+ udp_info->psn_nxt = attr->sq_psn;
+ udp_info->lsn = 0xffff;
+ udp_info->psn_una = attr->sq_psn;
+ udp_info->psn_max = attr->sq_psn;
+ }
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ udp_info->epsn = attr->rq_psn;
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ udp_info->rnr_nak_thresh = attr->rnr_retry;
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ udp_info->rexmit_thresh = attr->retry_cnt;
+
+ ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
+
+ if (attr_mask & IB_QP_AV) {
+ struct irdma_av *av = &iwqp->roce_ah.av;
+ const struct ib_gid_attr *sgid_attr =
+ attr->ah_attr.grh.sgid_attr;
+ u16 vlan_id = VLAN_N_VID;
+ u32 local_ip[4];
+
+ memset(&iwqp->roce_ah, 0, sizeof(iwqp->roce_ah));
+ if (attr->ah_attr.ah_flags & IB_AH_GRH) {
+ udp_info->ttl = attr->ah_attr.grh.hop_limit;
+ udp_info->flow_label = attr->ah_attr.grh.flow_label;
+ udp_info->tos = attr->ah_attr.grh.traffic_class;
+ udp_info->src_port =
+ rdma_get_udp_sport(udp_info->flow_label,
+ ibqp->qp_num,
+ roce_info->dest_qp);
+ irdma_qp_rem_qos(&iwqp->sc_qp);
+ dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
+ if (iwqp->sc_qp.vsi->dscp_mode)
+ ctx_info->user_pri =
+ iwqp->sc_qp.vsi->dscp_map[irdma_tos2dscp(udp_info->tos)];
+ else
+ ctx_info->user_pri = rt_tos2priority(udp_info->tos);
+ }
+ ret = rdma_read_gid_l2_fields(sgid_attr, &vlan_id,
+ ctx_info->roce_info->mac_addr);
+ if (ret)
+ return ret;
+ ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr,
+ ctx_info->user_pri);
+ if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
+ return -ENOMEM;
+ iwqp->sc_qp.user_pri = ctx_info->user_pri;
+ irdma_qp_add_qos(&iwqp->sc_qp);
+
+ if (vlan_id >= VLAN_N_VID && iwdev->dcb_vlan_mode)
+ vlan_id = 0;
+ if (vlan_id < VLAN_N_VID) {
+ udp_info->insert_vlan_tag = true;
+ udp_info->vlan_tag = vlan_id |
+ ctx_info->user_pri << VLAN_PRIO_SHIFT;
+ } else {
+ udp_info->insert_vlan_tag = false;
+ }
+
+ av->attrs = attr->ah_attr;
+ rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
+ av->net_type = rdma_gid_attr_network_type(sgid_attr);
+ if (av->net_type == RDMA_NETWORK_IPV6) {
+ __be32 *daddr =
+ av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
+ __be32 *saddr =
+ av->sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
+
+ irdma_copy_ip_ntohl(&udp_info->dest_ip_addr[0], daddr);
+ irdma_copy_ip_ntohl(&udp_info->local_ipaddr[0], saddr);
+
+ udp_info->ipv4 = false;
+ irdma_copy_ip_ntohl(local_ip, daddr);
+
+ } else if (av->net_type == RDMA_NETWORK_IPV4) {
+ __be32 saddr = av->sgid_addr.saddr_in.sin_addr.s_addr;
+ __be32 daddr = av->dgid_addr.saddr_in.sin_addr.s_addr;
+
+ local_ip[0] = ntohl(daddr);
+
+ udp_info->ipv4 = true;
+ udp_info->dest_ip_addr[0] = 0;
+ udp_info->dest_ip_addr[1] = 0;
+ udp_info->dest_ip_addr[2] = 0;
+ udp_info->dest_ip_addr[3] = local_ip[0];
+
+ udp_info->local_ipaddr[0] = 0;
+ udp_info->local_ipaddr[1] = 0;
+ udp_info->local_ipaddr[2] = 0;
+ udp_info->local_ipaddr[3] = ntohl(saddr);
+ }
+ udp_info->arp_idx =
+ irdma_add_arp(iwdev->rf, local_ip, udp_info->ipv4,
+ attr->ah_attr.roce.dmac);
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ if (attr->max_rd_atomic > dev->hw_attrs.max_hw_ord) {
+ ibdev_err(&iwdev->ibdev,
+ "rd_atomic = %d, above max_hw_ord=%d\n",
+ attr->max_rd_atomic,
+ dev->hw_attrs.max_hw_ord);
+ return -EINVAL;
+ }
+ if (attr->max_rd_atomic)
+ roce_info->ord_size = attr->max_rd_atomic;
+ info.ord_valid = true;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (attr->max_dest_rd_atomic > dev->hw_attrs.max_hw_ird) {
+ ibdev_err(&iwdev->ibdev,
+ "rd_atomic = %d, above max_hw_ird=%d\n",
+ attr->max_rd_atomic,
+ dev->hw_attrs.max_hw_ird);
+ return -EINVAL;
+ }
+ if (attr->max_dest_rd_atomic)
+ roce_info->ird_size = attr->max_dest_rd_atomic;
+ }
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
+ roce_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ roce_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ roce_info->rd_en = true;
+ }
+
+ wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
+
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d attr_mask=0x%x\n",
+ __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
+ iwqp->ibqp_state, iwqp->iwarp_state, attr_mask);
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (attr_mask & IB_QP_STATE) {
+ if (!ib_modify_qp_is_ok(iwqp->ibqp_state, attr->qp_state,
+ iwqp->ibqp.qp_type, attr_mask)) {
+ ibdev_warn(&iwdev->ibdev, "modify_qp invalid for qp_id=%d, old_state=0x%x, new_state=0x%x\n",
+ iwqp->ibqp.qp_num, iwqp->ibqp_state,
+ attr->qp_state);
+ ret = -EINVAL;
+ goto exit;
+ }
+ info.curr_iwarp_state = iwqp->iwarp_state;
+
+ switch (attr->qp_state) {
+ case IB_QPS_INIT:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
+ info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
+ issue_modify_qp = 1;
+ }
+ break;
+ case IB_QPS_RTR:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ info.arp_cache_idx_valid = true;
+ info.cq_num_valid = true;
+ info.next_iwarp_state = IRDMA_QP_STATE_RTR;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_RTS:
+ if (iwqp->ibqp_state < IB_QPS_RTR ||
+ iwqp->ibqp_state == IB_QPS_ERR) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ info.arp_cache_idx_valid = true;
+ info.cq_num_valid = true;
+ info.ord_valid = true;
+ info.next_iwarp_state = IRDMA_QP_STATE_RTS;
+ issue_modify_qp = 1;
+ if (iwdev->push_mode && udata &&
+ iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_alloc_push_page(iwqp);
+ spin_lock_irqsave(&iwqp->lock, flags);
+ }
+ break;
+ case IB_QPS_SQD:
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_SQD)
+ goto exit;
+
+ if (iwqp->iwarp_state != IRDMA_QP_STATE_RTS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_SQD;
+ issue_modify_qp = 1;
+ iwqp->suspend_pending = true;
+ break;
+ case IB_QPS_SQE:
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (udata && udata->inlen) {
+ if (ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen)))
+ return -EINVAL;
+
+ irdma_flush_wqes(iwqp,
+ (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
+ (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
+ IRDMA_REFLUSH);
+ }
+ return 0;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ issue_modify_qp = 1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ iwqp->ibqp_state = attr->qp_state;
+ }
+
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+ irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ if (attr_mask & IB_QP_STATE) {
+ if (issue_modify_qp) {
+ ctx_info->rem_endpoint_idx = udp_info->arp_idx;
+ if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
+ return -EINVAL;
+ if (info.next_iwarp_state == IRDMA_QP_STATE_SQD) {
+ ret = irdma_wait_for_suspend(iwqp);
+ if (ret)
+ return ret;
+ }
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->iwarp_state == info.curr_iwarp_state) {
+ iwqp->iwarp_state = info.next_iwarp_state;
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ if (iwqp->ibqp_state > IB_QPS_RTS &&
+ !iwqp->flush_issued) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_flush_wqes(iwqp, IRDMA_FLUSH_SQ |
+ IRDMA_FLUSH_RQ |
+ IRDMA_FLUSH_WAIT);
+ iwqp->flush_issued = 1;
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ } else {
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ struct irdma_ucontext *ucontext;
+
+ ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext, ibucontext);
+ if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ !iwqp->push_wqe_mmap_entry &&
+ !irdma_setup_push_mmap_entries(ucontext, iwqp,
+ &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
+ uresp.push_valid = 1;
+ uresp.push_offset = iwqp->sc_qp.push_offset;
+ }
+ ret = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
+ udata->outlen));
+ if (ret) {
+ irdma_remove_push_mmap_entries(iwqp);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: copy_to_udata failed\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+exit:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ return ret;
+}
+
+/**
+ * irdma_modify_qp - modify qp request
+ * @ibqp: qp's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
+ struct ib_udata *udata)
+{
+#define IRDMA_MODIFY_QP_MIN_REQ_LEN offsetofend(struct irdma_modify_qp_req, rq_flush)
+#define IRDMA_MODIFY_QP_MIN_RESP_LEN offsetofend(struct irdma_modify_qp_resp, push_valid)
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+ struct irdma_qp_host_ctx_info *ctx_info;
+ struct irdma_tcp_offload_info *tcp_info;
+ struct irdma_iwarp_offload_info *offload_info;
+ struct irdma_modify_qp_info info = {};
+ struct irdma_modify_qp_resp uresp = {};
+ struct irdma_modify_qp_req ureq = {};
+ u8 issue_modify_qp = 0;
+ u8 dont_wait = 0;
+ int err;
+ unsigned long flags;
+
+ if (udata) {
+ /* udata inlen/outlen can be 0 when supporting legacy libi40iw */
+ if ((udata->inlen && udata->inlen < IRDMA_MODIFY_QP_MIN_REQ_LEN) ||
+ (udata->outlen && udata->outlen < IRDMA_MODIFY_QP_MIN_RESP_LEN))
+ return -EINVAL;
+ }
+
+ if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
+ return -EOPNOTSUPP;
+
+ ctx_info = &iwqp->ctx_info;
+ offload_info = &iwqp->iwarp_info;
+ tcp_info = &iwqp->tcp_info;
+ wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: caller: %pS qp_id=%d to_ibqpstate=%d ibqpstate=%d irdma_qpstate=%d last_aeq=%d hw_tcp_state=%d hw_iwarp_state=%d attr_mask=0x%x\n",
+ __builtin_return_address(0), ibqp->qp_num, attr->qp_state,
+ iwqp->ibqp_state, iwqp->iwarp_state, iwqp->last_aeq,
+ iwqp->hw_tcp_state, iwqp->hw_iwarp_state, attr_mask);
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (attr_mask & IB_QP_STATE) {
+ info.curr_iwarp_state = iwqp->iwarp_state;
+ switch (attr->qp_state) {
+ case IB_QPS_INIT:
+ case IB_QPS_RTR:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_IDLE) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_INVALID) {
+ info.next_iwarp_state = IRDMA_QP_STATE_IDLE;
+ issue_modify_qp = 1;
+ }
+ if (iwdev->push_mode && udata &&
+ iwqp->sc_qp.push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_alloc_push_page(iwqp);
+ spin_lock_irqsave(&iwqp->lock, flags);
+ }
+ break;
+ case IB_QPS_RTS:
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_RTS ||
+ !iwqp->cm_id) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ issue_modify_qp = 1;
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_ESTABLISHED;
+ iwqp->hte_added = 1;
+ info.next_iwarp_state = IRDMA_QP_STATE_RTS;
+ info.tcp_ctx_valid = true;
+ info.ord_valid = true;
+ info.arp_cache_idx_valid = true;
+ info.cq_num_valid = true;
+ break;
+ case IB_QPS_SQD:
+ if (iwqp->hw_iwarp_state > IRDMA_QP_STATE_RTS) {
+ err = 0;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_CLOSING ||
+ iwqp->iwarp_state < IRDMA_QP_STATE_RTS) {
+ err = 0;
+ goto exit;
+ }
+
+ if (iwqp->iwarp_state > IRDMA_QP_STATE_CLOSING) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_CLOSING;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_SQE:
+ if (iwqp->iwarp_state >= IRDMA_QP_STATE_TERMINATE) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ info.next_iwarp_state = IRDMA_QP_STATE_TERMINATE;
+ issue_modify_qp = 1;
+ break;
+ case IB_QPS_ERR:
+ case IB_QPS_RESET:
+ if (iwqp->iwarp_state == IRDMA_QP_STATE_ERROR) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (udata && udata->inlen) {
+ if (ib_copy_from_udata(&ureq, udata,
+ min(sizeof(ureq), udata->inlen)))
+ return -EINVAL;
+
+ irdma_flush_wqes(iwqp,
+ (ureq.sq_flush ? IRDMA_FLUSH_SQ : 0) |
+ (ureq.rq_flush ? IRDMA_FLUSH_RQ : 0) |
+ IRDMA_REFLUSH);
+ }
+ return 0;
+ }
+
+ if (iwqp->sc_qp.term_flags) {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ irdma_terminate_del_timer(&iwqp->sc_qp);
+ spin_lock_irqsave(&iwqp->lock, flags);
+ }
+ info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ if (iwqp->hw_tcp_state > IRDMA_TCP_STATE_CLOSED &&
+ iwdev->iw_status &&
+ iwqp->hw_tcp_state != IRDMA_TCP_STATE_TIME_WAIT)
+ info.reset_tcp_conn = true;
+ else
+ dont_wait = 1;
+
+ issue_modify_qp = 1;
+ info.next_iwarp_state = IRDMA_QP_STATE_ERROR;
+ break;
+ default:
+ err = -EINVAL;
+ goto exit;
+ }
+
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ if (attr_mask & IB_QP_ACCESS_FLAGS) {
+ ctx_info->iwarp_info_valid = true;
+ if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
+ offload_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ offload_info->wr_rdresp_en = true;
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ offload_info->rd_en = true;
+ }
+
+ if (ctx_info->iwarp_info_valid) {
+ ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+ ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+ irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ if (attr_mask & IB_QP_STATE) {
+ if (issue_modify_qp) {
+ ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
+ if (irdma_hw_modify_qp(iwdev, iwqp, &info, true))
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->iwarp_state == info.curr_iwarp_state) {
+ iwqp->iwarp_state = info.next_iwarp_state;
+ iwqp->ibqp_state = attr->qp_state;
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+
+ if (issue_modify_qp && iwqp->ibqp_state > IB_QPS_RTS) {
+ if (dont_wait) {
+ if (iwqp->hw_tcp_state) {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSED;
+ iwqp->last_aeq = IRDMA_AE_RESET_SENT;
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ }
+ irdma_cm_disconn(iwqp);
+ } else {
+ int close_timer_started;
+
+ spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
+
+ if (iwqp->cm_node) {
+ refcount_inc(&iwqp->cm_node->refcnt);
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ close_timer_started = atomic_inc_return(&iwqp->close_timer_started);
+ if (iwqp->cm_id && close_timer_started == 1)
+ irdma_schedule_cm_timer(iwqp->cm_node,
+ (struct irdma_puda_buf *)iwqp,
+ IRDMA_TIMER_TYPE_CLOSE, 1, 0);
+
+ irdma_rem_ref_cm_node(iwqp->cm_node);
+ } else {
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ }
+ }
+ }
+ if (attr_mask & IB_QP_STATE && udata && udata->outlen &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
+ struct irdma_ucontext *ucontext;
+
+ ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext, ibucontext);
+ if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
+ !iwqp->push_wqe_mmap_entry &&
+ !irdma_setup_push_mmap_entries(ucontext, iwqp,
+ &uresp.push_wqe_mmap_key, &uresp.push_db_mmap_key)) {
+ uresp.push_valid = 1;
+ uresp.push_offset = iwqp->sc_qp.push_offset;
+ }
+
+ err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp),
+ udata->outlen));
+ if (err) {
+ irdma_remove_push_mmap_entries(iwqp);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: copy_to_udata failed\n");
+ return err;
+ }
+ }
+
+ return 0;
+exit:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+
+ return err;
+}
+
+/**
+ * irdma_cq_free_rsrc - free up resources for cq
+ * @rf: RDMA PCI function
+ * @iwcq: cq ptr
+ */
+static void irdma_cq_free_rsrc(struct irdma_pci_f *rf, struct irdma_cq *iwcq)
+{
+ struct irdma_sc_cq *cq = &iwcq->sc_cq;
+
+ if (!iwcq->user_mode) {
+ dma_free_coherent(rf->sc_dev.hw->device, iwcq->kmem.size,
+ iwcq->kmem.va, iwcq->kmem.pa);
+ iwcq->kmem.va = NULL;
+ dma_free_coherent(rf->sc_dev.hw->device,
+ iwcq->kmem_shadow.size,
+ iwcq->kmem_shadow.va, iwcq->kmem_shadow.pa);
+ iwcq->kmem_shadow.va = NULL;
+ }
+
+ irdma_free_rsrc(rf, rf->allocated_cqs, cq->cq_uk.cq_id);
+}
+
+/**
+ * irdma_free_cqbuf - worker to free a cq buffer
+ * @work: provides access to the cq buffer to free
+ */
+static void irdma_free_cqbuf(struct work_struct *work)
+{
+ struct irdma_cq_buf *cq_buf = container_of(work, struct irdma_cq_buf, work);
+
+ dma_free_coherent(cq_buf->hw->device, cq_buf->kmem_buf.size,
+ cq_buf->kmem_buf.va, cq_buf->kmem_buf.pa);
+ cq_buf->kmem_buf.va = NULL;
+ kfree(cq_buf);
+}
+
+/**
+ * irdma_process_resize_list - remove resized cq buffers from the resize_list
+ * @iwcq: cq which owns the resize_list
+ * @iwdev: irdma device
+ * @lcqe_buf: the buffer where the last cqe is received
+ */
+static int irdma_process_resize_list(struct irdma_cq *iwcq,
+ struct irdma_device *iwdev,
+ struct irdma_cq_buf *lcqe_buf)
+{
+ struct list_head *tmp_node, *list_node;
+ struct irdma_cq_buf *cq_buf;
+ int cnt = 0;
+
+ list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
+ cq_buf = list_entry(list_node, struct irdma_cq_buf, list);
+ if (cq_buf == lcqe_buf)
+ return cnt;
+
+ list_del(&cq_buf->list);
+ queue_work(iwdev->cleanup_wq, &cq_buf->work);
+ cnt++;
+ }
+
+ return cnt;
+}
+
+/**
+ * irdma_destroy_cq - destroy cq
+ * @ib_cq: cq pointer
+ * @udata: user data
+ */
+static int irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ib_cq->device);
+ struct irdma_cq *iwcq = to_iwcq(ib_cq);
+ struct irdma_sc_cq *cq = &iwcq->sc_cq;
+ struct irdma_sc_dev *dev = cq->dev;
+ struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
+ struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ if (!list_empty(&iwcq->cmpl_generated))
+ irdma_remove_cmpls_list(iwcq);
+ if (!list_empty(&iwcq->resize_list))
+ irdma_process_resize_list(iwcq, iwdev, NULL);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ irdma_cq_rem_ref(ib_cq);
+ wait_for_completion(&iwcq->free_cq);
+
+ irdma_cq_wq_destroy(iwdev->rf, cq);
+
+ spin_lock_irqsave(&iwceq->ce_lock, flags);
+ irdma_sc_cleanup_ceqes(cq, ceq);
+ spin_unlock_irqrestore(&iwceq->ce_lock, flags);
+ irdma_cq_free_rsrc(iwdev->rf, iwcq);
+
+ return 0;
+}
+
+/**
+ * irdma_resize_cq - resize cq
+ * @ibcq: cq to be resized
+ * @entries: desired cq size
+ * @udata: user data
+ */
+static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
+ struct ib_udata *udata)
+{
+#define IRDMA_RESIZE_CQ_MIN_REQ_LEN offsetofend(struct irdma_resize_cq_req, user_cq_buffer)
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+ struct irdma_sc_dev *dev = iwcq->sc_cq.dev;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_modify_cq_info *m_info;
+ struct irdma_modify_cq_info info = {};
+ struct irdma_dma_mem kmem_buf;
+ struct irdma_cq_mr *cqmr_buf;
+ struct irdma_pbl *iwpbl_buf;
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ struct irdma_cq_buf *cq_buf = NULL;
+ unsigned long flags;
+ int ret;
+
+ iwdev = to_iwdev(ibcq->device);
+ rf = iwdev->rf;
+
+ if (!(rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_CQ_RESIZE))
+ return -EOPNOTSUPP;
+
+ if (udata && udata->inlen < IRDMA_RESIZE_CQ_MIN_REQ_LEN)
+ return -EINVAL;
+
+ if (entries > rf->max_cqe)
+ return -EINVAL;
+
+ if (!iwcq->user_mode) {
+ entries++;
+ if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ entries *= 2;
+ }
+
+ info.cq_size = max(entries, 4);
+
+ if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1)
+ return 0;
+
+ if (udata) {
+ struct irdma_resize_cq_req req = {};
+ struct irdma_ucontext *ucontext =
+ rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+
+ /* CQ resize not supported with legacy GEN_1 libi40iw */
+ if (ucontext->legacy_mode)
+ return -EOPNOTSUPP;
+
+ if (ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen)))
+ return -EINVAL;
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl_buf = irdma_get_pbl((unsigned long)req.user_cq_buffer,
+ &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+ if (!iwpbl_buf)
+ return -ENOMEM;
+
+ cqmr_buf = &iwpbl_buf->cq_mr;
+ if (iwpbl_buf->pbl_allocated) {
+ info.virtual_map = true;
+ info.pbl_chunk_size = 1;
+ info.first_pm_pbl_idx = cqmr_buf->cq_pbl.idx;
+ } else {
+ info.cq_pa = cqmr_buf->cq_pbl.addr;
+ }
+ } else {
+ /* Kmode CQ resize */
+ int rsize;
+
+ rsize = info.cq_size * sizeof(struct irdma_cqe);
+ kmem_buf.size = ALIGN(round_up(rsize, 256), 256);
+ kmem_buf.va = dma_alloc_coherent(dev->hw->device,
+ kmem_buf.size, &kmem_buf.pa,
+ GFP_KERNEL);
+ if (!kmem_buf.va)
+ return -ENOMEM;
+
+ info.cq_base = kmem_buf.va;
+ info.cq_pa = kmem_buf.pa;
+ cq_buf = kzalloc(sizeof(*cq_buf), GFP_KERNEL);
+ if (!cq_buf) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ info.shadow_read_threshold = iwcq->sc_cq.shadow_read_threshold;
+ info.cq_resize = true;
+
+ cqp_info = &cqp_request->info;
+ m_info = &cqp_info->in.u.cq_modify.info;
+ memcpy(m_info, &info, sizeof(*m_info));
+
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_MODIFY;
+ cqp_info->in.u.cq_modify.cq = &iwcq->sc_cq;
+ cqp_info->in.u.cq_modify.scratch = (uintptr_t)cqp_request;
+ cqp_info->post_sq = 1;
+ ret = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (ret)
+ goto error;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ if (cq_buf) {
+ cq_buf->kmem_buf = iwcq->kmem;
+ cq_buf->hw = dev->hw;
+ memcpy(&cq_buf->cq_uk, &iwcq->sc_cq.cq_uk, sizeof(cq_buf->cq_uk));
+ INIT_WORK(&cq_buf->work, irdma_free_cqbuf);
+ list_add_tail(&cq_buf->list, &iwcq->resize_list);
+ iwcq->kmem = kmem_buf;
+ }
+
+ irdma_sc_cq_resize(&iwcq->sc_cq, &info);
+ ibcq->cqe = info.cq_size - 1;
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ return 0;
+error:
+ if (!udata) {
+ dma_free_coherent(dev->hw->device, kmem_buf.size, kmem_buf.va,
+ kmem_buf.pa);
+ kmem_buf.va = NULL;
+ }
+ kfree(cq_buf);
+
+ return ret;
+}
+
+static inline int cq_validate_flags(u32 flags, u8 hw_rev)
+{
+ /* GEN1 does not support CQ create flags */
+ if (hw_rev == IRDMA_GEN_1)
+ return flags ? -EOPNOTSUPP : 0;
+
+ return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
+}
+
+/**
+ * irdma_create_cq - create cq
+ * @ibcq: CQ allocated
+ * @attr: attributes for cq
+ * @udata: user data
+ */
+static int irdma_create_cq(struct ib_cq *ibcq,
+ const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
+{
+#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
+#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
+ struct ib_device *ibdev = ibcq->device;
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_cq *iwcq = to_iwcq(ibcq);
+ u32 cq_num = 0;
+ struct irdma_sc_cq *cq;
+ struct irdma_sc_dev *dev = &rf->sc_dev;
+ struct irdma_cq_init_info info = {};
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
+ unsigned long flags;
+ int err_code;
+ int entries = attr->cqe;
+
+ err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
+ if (err_code)
+ return err_code;
+
+ if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
+ udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
+ return -EINVAL;
+
+ err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
+ &rf->next_cq);
+ if (err_code)
+ return err_code;
+
+ cq = &iwcq->sc_cq;
+ cq->back_cq = iwcq;
+ refcount_set(&iwcq->refcnt, 1);
+ spin_lock_init(&iwcq->lock);
+ INIT_LIST_HEAD(&iwcq->resize_list);
+ INIT_LIST_HEAD(&iwcq->cmpl_generated);
+ info.dev = dev;
+ ukinfo->cq_size = max(entries, 4);
+ ukinfo->cq_id = cq_num;
+ iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
+ if (attr->comp_vector < rf->ceqs_count)
+ info.ceq_id = attr->comp_vector;
+ info.ceq_id_valid = true;
+ info.ceqe_mask = 1;
+ info.type = IRDMA_CQ_TYPE_IWARP;
+ info.vsi = &iwdev->vsi;
+
+ if (udata) {
+ struct irdma_ucontext *ucontext;
+ struct irdma_create_cq_req req = {};
+ struct irdma_cq_mr *cqmr;
+ struct irdma_pbl *iwpbl;
+ struct irdma_pbl *iwpbl_shadow;
+ struct irdma_cq_mr *cqmr_shadow;
+
+ iwcq->user_mode = true;
+ ucontext =
+ rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ if (ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen))) {
+ err_code = -EFAULT;
+ goto cq_free_rsrc;
+ }
+
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl = irdma_get_pbl((unsigned long)req.user_cq_buf,
+ &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ if (!iwpbl) {
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+
+ iwcq->iwpbl = iwpbl;
+ iwcq->cq_mem_size = 0;
+ cqmr = &iwpbl->cq_mr;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_CQ_RESIZE && !ucontext->legacy_mode) {
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ iwpbl_shadow = irdma_get_pbl(
+ (unsigned long)req.user_shadow_area,
+ &ucontext->cq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+ if (!iwpbl_shadow) {
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+ iwcq->iwpbl_shadow = iwpbl_shadow;
+ cqmr_shadow = &iwpbl_shadow->cq_mr;
+ info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
+ cqmr->split = true;
+ } else {
+ info.shadow_area_pa = cqmr->shadow;
+ }
+ if (iwpbl->pbl_allocated) {
+ info.virtual_map = true;
+ info.pbl_chunk_size = 1;
+ info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
+ } else {
+ info.cq_base_pa = cqmr->cq_pbl.addr;
+ }
+ } else {
+ /* Kmode allocations */
+ int rsize;
+
+ if (entries < 1 || entries > rf->max_cqe) {
+ err_code = -EINVAL;
+ goto cq_free_rsrc;
+ }
+
+ entries++;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ entries *= 2;
+ ukinfo->cq_size = entries;
+
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
+ iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
+ iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
+ iwcq->kmem.size,
+ &iwcq->kmem.pa, GFP_KERNEL);
+ if (!iwcq->kmem.va) {
+ err_code = -ENOMEM;
+ goto cq_free_rsrc;
+ }
+
+ iwcq->kmem_shadow.size = ALIGN(IRDMA_SHADOW_AREA_SIZE << 3,
+ 64);
+ iwcq->kmem_shadow.va = dma_alloc_coherent(dev->hw->device,
+ iwcq->kmem_shadow.size,
+ &iwcq->kmem_shadow.pa,
+ GFP_KERNEL);
+ if (!iwcq->kmem_shadow.va) {
+ err_code = -ENOMEM;
+ goto cq_free_rsrc;
+ }
+ info.shadow_area_pa = iwcq->kmem_shadow.pa;
+ ukinfo->shadow_area = iwcq->kmem_shadow.va;
+ ukinfo->cq_base = iwcq->kmem.va;
+ info.cq_base_pa = iwcq->kmem.pa;
+ }
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2,
+ (u32)IRDMA_MAX_CQ_READ_THRESH);
+
+ if (irdma_sc_cq_init(cq, &info)) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n");
+ err_code = -EPROTO;
+ goto cq_free_rsrc;
+ }
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request) {
+ err_code = -ENOMEM;
+ goto cq_free_rsrc;
+ }
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.cq_create.cq = cq;
+ cqp_info->in.u.cq_create.check_overflow = true;
+ cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+ err_code = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (err_code)
+ goto cq_free_rsrc;
+
+ if (udata) {
+ struct irdma_create_cq_resp resp = {};
+
+ resp.cq_id = info.cq_uk_init_info.cq_id;
+ resp.cq_size = info.cq_uk_init_info.cq_size;
+ if (ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen))) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: copy to user data\n");
+ err_code = -EPROTO;
+ goto cq_destroy;
+ }
+ }
+ rf->cq_table[cq_num] = iwcq;
+ init_completion(&iwcq->free_cq);
+
+ return 0;
+cq_destroy:
+ irdma_cq_wq_destroy(rf, cq);
+cq_free_rsrc:
+ irdma_cq_free_rsrc(rf, iwcq);
+
+ return err_code;
+}
+
+/**
+ * irdma_get_mr_access - get hw MR access permissions from IB access flags
+ * @access: IB access flags
+ */
+static inline u16 irdma_get_mr_access(int access)
+{
+ u16 hw_access = 0;
+
+ hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
+ IRDMA_ACCESS_FLAGS_LOCALWRITE : 0;
+ hw_access |= (access & IB_ACCESS_REMOTE_WRITE) ?
+ IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
+ hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
+ IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
+ hw_access |= (access & IB_ACCESS_MW_BIND) ?
+ IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
+ hw_access |= (access & IB_ZERO_BASED) ?
+ IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
+ hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
+
+ return hw_access;
+}
+
+/**
+ * irdma_free_stag - free stag resource
+ * @iwdev: irdma device
+ * @stag: stag to free
+ */
+static void irdma_free_stag(struct irdma_device *iwdev, u32 stag)
+{
+ u32 stag_idx;
+
+ stag_idx = (stag & iwdev->rf->mr_stagmask) >> IRDMA_CQPSQ_STAG_IDX_S;
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_mrs, stag_idx);
+}
+
+/**
+ * irdma_create_stag - create random stag
+ * @iwdev: irdma device
+ */
+static u32 irdma_create_stag(struct irdma_device *iwdev)
+{
+ u32 stag = 0;
+ u32 stag_index = 0;
+ u32 next_stag_index;
+ u32 driver_key;
+ u32 random;
+ u8 consumer_key;
+ int ret;
+
+ get_random_bytes(&random, sizeof(random));
+ consumer_key = (u8)random;
+
+ driver_key = random & ~iwdev->rf->mr_stagmask;
+ next_stag_index = (random & iwdev->rf->mr_stagmask) >> 8;
+ next_stag_index %= iwdev->rf->max_mr;
+
+ ret = irdma_alloc_rsrc(iwdev->rf, iwdev->rf->allocated_mrs,
+ iwdev->rf->max_mr, &stag_index,
+ &next_stag_index);
+ if (ret)
+ return stag;
+ stag = stag_index << IRDMA_CQPSQ_STAG_IDX_S;
+ stag |= driver_key;
+ stag += (u32)consumer_key;
+
+ return stag;
+}
+
+/**
+ * irdma_next_pbl_addr - Get next pbl address
+ * @pbl: pointer to a pble
+ * @pinfo: info pointer
+ * @idx: index
+ */
+static inline u64 *irdma_next_pbl_addr(u64 *pbl, struct irdma_pble_info **pinfo,
+ u32 *idx)
+{
+ *idx += 1;
+ if (!(*pinfo) || *idx != (*pinfo)->cnt)
+ return ++pbl;
+ *idx = 0;
+ (*pinfo)++;
+
+ return (*pinfo)->addr;
+}
+
+/**
+ * irdma_copy_user_pgaddrs - copy user page address to pble's os locally
+ * @iwmr: iwmr for IB's user page addresses
+ * @pbl: ple pointer to save 1 level or 0 level pble
+ * @level: indicated level 0, 1 or 2
+ */
+static void irdma_copy_user_pgaddrs(struct irdma_mr *iwmr, u64 *pbl,
+ enum irdma_pble_level level)
+{
+ struct ib_umem *region = iwmr->region;
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_pble_info *pinfo;
+ struct ib_block_iter biter;
+ u32 idx = 0;
+ u32 pbl_cnt = 0;
+
+ pinfo = (level == PBLE_LEVEL_1) ? NULL : palloc->level2.leaf;
+
+ if (iwmr->type == IRDMA_MEMREG_TYPE_QP)
+ iwpbl->qp_mr.sq_page = sg_page(region->sgt_append.sgt.sgl);
+
+ rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) {
+ *pbl = rdma_block_iter_dma_address(&biter);
+ if (++pbl_cnt == palloc->total_cnt)
+ break;
+ pbl = irdma_next_pbl_addr(pbl, &pinfo, &idx);
+ }
+}
+
+/**
+ * irdma_check_mem_contiguous - check if pbls stored in arr are contiguous
+ * @arr: lvl1 pbl array
+ * @npages: page count
+ * @pg_size: page size
+ *
+ */
+static bool irdma_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size)
+{
+ u32 pg_idx;
+
+ for (pg_idx = 0; pg_idx < npages; pg_idx++) {
+ if ((*arr + (pg_size * pg_idx)) != arr[pg_idx])
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * irdma_check_mr_contiguous - check if MR is physically contiguous
+ * @palloc: pbl allocation struct
+ * @pg_size: page size
+ */
+static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
+ u32 pg_size)
+{
+ struct irdma_pble_level2 *lvl2 = &palloc->level2;
+ struct irdma_pble_info *leaf = lvl2->leaf;
+ u64 *arr = NULL;
+ u64 *start_addr = NULL;
+ int i;
+ bool ret;
+
+ if (palloc->level == PBLE_LEVEL_1) {
+ arr = palloc->level1.addr;
+ ret = irdma_check_mem_contiguous(arr, palloc->total_cnt,
+ pg_size);
+ return ret;
+ }
+
+ start_addr = leaf->addr;
+
+ for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+ arr = leaf->addr;
+ if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr)
+ return false;
+ ret = irdma_check_mem_contiguous(arr, leaf->cnt, pg_size);
+ if (!ret)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * irdma_setup_pbles - copy user pg address to pble's
+ * @rf: RDMA PCI function
+ * @iwmr: mr pointer for this memory registration
+ * @lvl: requested pble levels
+ */
+static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
+ u8 lvl)
+{
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_pble_info *pinfo;
+ u64 *pbl;
+ int status;
+ enum irdma_pble_level level = PBLE_LEVEL_1;
+
+ if (lvl) {
+ status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
+ lvl);
+ if (status)
+ return status;
+
+ iwpbl->pbl_allocated = true;
+ level = palloc->level;
+ pinfo = (level == PBLE_LEVEL_1) ? &palloc->level1 :
+ palloc->level2.leaf;
+ pbl = pinfo->addr;
+ } else {
+ pbl = iwmr->pgaddrmem;
+ }
+
+ irdma_copy_user_pgaddrs(iwmr, pbl, level);
+
+ if (lvl)
+ iwmr->pgaddrmem[0] = *pbl;
+
+ return 0;
+}
+
+/**
+ * irdma_handle_q_mem - handle memory for qp and cq
+ * @iwdev: irdma device
+ * @req: information for q memory management
+ * @iwpbl: pble struct
+ * @lvl: pble level mask
+ */
+static int irdma_handle_q_mem(struct irdma_device *iwdev,
+ struct irdma_mem_reg_req *req,
+ struct irdma_pbl *iwpbl, u8 lvl)
+{
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_mr *iwmr = iwpbl->iwmr;
+ struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
+ struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
+ struct irdma_hmc_pble *hmc_p;
+ u64 *arr = iwmr->pgaddrmem;
+ u32 pg_size, total;
+ int err = 0;
+ bool ret = true;
+
+ pg_size = iwmr->page_size;
+ err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
+ if (err)
+ return err;
+
+ if (lvl)
+ arr = palloc->level1.addr;
+
+ switch (iwmr->type) {
+ case IRDMA_MEMREG_TYPE_QP:
+ total = req->sq_pages + req->rq_pages;
+ hmc_p = &qpmr->sq_pbl;
+ qpmr->shadow = (dma_addr_t)arr[total];
+
+ if (lvl) {
+ ret = irdma_check_mem_contiguous(arr, req->sq_pages,
+ pg_size);
+ if (ret)
+ ret = irdma_check_mem_contiguous(&arr[req->sq_pages],
+ req->rq_pages,
+ pg_size);
+ }
+
+ if (!ret) {
+ hmc_p->idx = palloc->level1.idx;
+ hmc_p = &qpmr->rq_pbl;
+ hmc_p->idx = palloc->level1.idx + req->sq_pages;
+ } else {
+ hmc_p->addr = arr[0];
+ hmc_p = &qpmr->rq_pbl;
+ hmc_p->addr = arr[req->sq_pages];
+ }
+ break;
+ case IRDMA_MEMREG_TYPE_CQ:
+ hmc_p = &cqmr->cq_pbl;
+
+ if (!cqmr->split)
+ cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
+
+ if (lvl)
+ ret = irdma_check_mem_contiguous(arr, req->cq_pages,
+ pg_size);
+
+ if (!ret)
+ hmc_p->idx = palloc->level1.idx;
+ else
+ hmc_p->addr = arr[0];
+ break;
+ default:
+ ibdev_dbg(&iwdev->ibdev, "VERBS: MR type error\n");
+ err = -EINVAL;
+ }
+
+ if (lvl && ret) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+ iwpbl->pbl_allocated = false;
+ }
+
+ return err;
+}
+
+/**
+ * irdma_hw_alloc_mw - create the hw memory window
+ * @iwdev: irdma device
+ * @iwmr: pointer to memory window info
+ */
+static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
+{
+ struct irdma_mw_alloc_info *info;
+ struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.mw_alloc.info;
+ memset(info, 0, sizeof(*info));
+ if (iwmr->ibmw.type == IB_MW_TYPE_1)
+ info->mw_wide = true;
+
+ info->page_size = PAGE_SIZE;
+ info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->remote_access = true;
+ cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_alloc_mw - Allocate memory window
+ * @ibmw: Memory Window
+ * @udata: user data pointer
+ */
+static int irdma_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibmw->device);
+ struct irdma_mr *iwmr = to_iwmw(ibmw);
+ int err_code;
+ u32 stag;
+
+ stag = irdma_create_stag(iwdev);
+ if (!stag)
+ return -ENOMEM;
+
+ iwmr->stag = stag;
+ ibmw->rkey = stag;
+
+ err_code = irdma_hw_alloc_mw(iwdev, iwmr);
+ if (err_code) {
+ irdma_free_stag(iwdev, stag);
+ return err_code;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_dealloc_mw - Dealloc memory window
+ * @ibmw: memory window structure.
+ */
+static int irdma_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct ib_pd *ibpd = ibmw->pd;
+ struct irdma_pd *iwpd = to_iwpd(ibpd);
+ struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
+ struct irdma_device *iwdev = to_iwdev(ibmw->device);
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_dealloc_stag_info *info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.dealloc_stag.info;
+ memset(info, 0, sizeof(*info));
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->stag_idx = ibmw->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->mr = false;
+ cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
+ irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ irdma_free_stag(iwdev, iwmr->stag);
+
+ return 0;
+}
+
+/**
+ * irdma_hw_alloc_stag - cqp command to allocate stag
+ * @iwdev: irdma device
+ * @iwmr: irdma mr pointer
+ */
+static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_allocate_stag_info *info;
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct irdma_pd *iwpd = to_iwpd(pd);
+ int status;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.alloc_stag.info;
+ memset(info, 0, sizeof(*info));
+ info->page_size = PAGE_SIZE;
+ info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->total_len = iwmr->len;
+ info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+ info->remote_access = true;
+ cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.alloc_stag.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_alloc_mr - register stag for fast memory registration
+ * @pd: ibpd pointer
+ * @mr_type: memory for stag registrion
+ * @max_num_sg: man number of pages
+ */
+static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_pble_alloc *palloc;
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
+ u32 stag;
+ int err_code;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ err_code = -ENOMEM;
+ goto err;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->type = IRDMA_MEMREG_TYPE_MEM;
+ palloc = &iwpbl->pble_alloc;
+ iwmr->page_cnt = max_num_sg;
+ /* Use system PAGE_SIZE as the sg page sizes are unknown at this point */
+ iwmr->len = max_num_sg * PAGE_SIZE;
+ err_code = irdma_get_pble(iwdev->rf->pble_rsrc, palloc, iwmr->page_cnt,
+ false);
+ if (err_code)
+ goto err_get_pble;
+
+ err_code = irdma_hw_alloc_stag(iwdev, iwmr);
+ if (err_code)
+ goto err_alloc_stag;
+
+ iwpbl->pbl_allocated = true;
+
+ return &iwmr->ibmr;
+err_alloc_stag:
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+err_get_pble:
+ irdma_free_stag(iwdev, stag);
+err:
+ kfree(iwmr);
+
+ return ERR_PTR(err_code);
+}
+
+/**
+ * irdma_set_page - populate pbl list for fmr
+ * @ibmr: ib mem to access iwarp mr pointer
+ * @addr: page dma address fro pbl list
+ */
+static int irdma_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct irdma_mr *iwmr = to_iwmr(ibmr);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ u64 *pbl;
+
+ if (unlikely(iwmr->npages == iwmr->page_cnt))
+ return -ENOMEM;
+
+ if (palloc->level == PBLE_LEVEL_2) {
+ struct irdma_pble_info *palloc_info =
+ palloc->level2.leaf + (iwmr->npages >> PBLE_512_SHIFT);
+
+ palloc_info->addr[iwmr->npages & (PBLE_PER_PAGE - 1)] = addr;
+ } else {
+ pbl = palloc->level1.addr;
+ pbl[iwmr->npages] = addr;
+ }
+ iwmr->npages++;
+
+ return 0;
+}
+
+/**
+ * irdma_map_mr_sg - map of sg list for fmr
+ * @ibmr: ib mem to access iwarp mr pointer
+ * @sg: scatter gather list
+ * @sg_nents: number of sg pages
+ * @sg_offset: scatter gather list for fmr
+ */
+static int irdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct irdma_mr *iwmr = to_iwmr(ibmr);
+
+ iwmr->npages = 0;
+
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, irdma_set_page);
+}
+
+/**
+ * irdma_hwreg_mr - send cqp command for memory registration
+ * @iwdev: irdma device
+ * @iwmr: irdma mr pointer
+ * @access: access for MR
+ */
+static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
+ u16 access)
+{
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_reg_ns_stag_info *stag_info;
+ struct ib_pd *pd = iwmr->ibmr.pd;
+ struct irdma_pd *iwpd = to_iwpd(pd);
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int ret;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
+ memset(stag_info, 0, sizeof(*stag_info));
+ stag_info->va = iwpbl->user_base;
+ stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
+ stag_info->stag_key = (u8)iwmr->stag;
+ stag_info->total_len = iwmr->len;
+ stag_info->access_rights = irdma_get_mr_access(access);
+ stag_info->pd_id = iwpd->sc_pd.pd_id;
+ stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+ if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
+ stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
+ else
+ stag_info->addr_type = IRDMA_ADDR_TYPE_VA_BASED;
+ stag_info->page_size = iwmr->page_size;
+
+ if (iwpbl->pbl_allocated) {
+ if (palloc->level == PBLE_LEVEL_1) {
+ stag_info->first_pm_pbl_index = palloc->level1.idx;
+ stag_info->chunk_size = 1;
+ } else {
+ stag_info->first_pm_pbl_index = palloc->level2.root.idx;
+ stag_info->chunk_size = 3;
+ }
+ } else {
+ stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
+ }
+
+ cqp_info->cqp_cmd = IRDMA_OP_MR_REG_NON_SHARED;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
+ ret = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ return ret;
+}
+
+static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ u32 stag;
+ u8 lvl;
+ int err;
+
+ lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
+
+ err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
+ if (err)
+ return err;
+
+ if (lvl) {
+ err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
+ iwmr->page_size);
+ if (err) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+ iwpbl->pbl_allocated = false;
+ }
+ }
+
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ err = -ENOMEM;
+ goto free_pble;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ err = irdma_hwreg_mr(iwdev, iwmr, access);
+ if (err)
+ goto err_hwreg;
+
+ return 0;
+
+err_hwreg:
+ irdma_free_stag(iwdev, stag);
+
+free_pble:
+ if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+
+ return err;
+}
+
+static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
+ struct ib_pd *pd, u64 virt,
+ enum irdma_memreg_type reg_type)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
+ unsigned long pgsz_bitmap;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->region = region;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwmr->ibmr.iova = virt;
+ iwmr->type = reg_type;
+
+ pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
+ iwdev->rf->sc_dev.hw_attrs.page_size_cap : SZ_4K;
+
+ iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
+ if (unlikely(!iwmr->page_size)) {
+ kfree(iwmr);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ iwmr->len = region->length;
+ iwpbl->user_base = virt;
+ iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
+
+ return iwmr;
+}
+
+static void irdma_free_iwmr(struct irdma_mr *iwmr)
+{
+ kfree(iwmr);
+}
+
+static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext = NULL;
+ unsigned long flags;
+ u32 total;
+ int err;
+ u8 lvl;
+
+ /* iWarp: Catch page not starting on OS page boundary */
+ if (!rdma_protocol_roce(&iwdev->ibdev, 1) &&
+ ib_umem_offset(iwmr->region))
+ return -EINVAL;
+
+ total = req.sq_pages + req.rq_pages + 1;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ total = req.sq_pages + req.rq_pages;
+ lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ if (err)
+ return err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
+static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext = NULL;
+ u8 shadow_pgcnt = 1;
+ unsigned long flags;
+ u32 total;
+ int err;
+ u8 lvl;
+
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
+ shadow_pgcnt = 0;
+ total = req.cq_pages + shadow_pgcnt;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ if (err)
+ return err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
+/**
+ * irdma_reg_user_mr - Register a user memory region
+ * @pd: ptr of pd
+ * @start: virtual start address
+ * @len: length of mr
+ * @virt: virtual address
+ * @access: access of mr
+ * @udata: user data
+ */
+static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
+ u64 virt, int access,
+ struct ib_udata *udata)
+{
+#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_mem_reg_req req = {};
+ struct ib_umem *region = NULL;
+ struct irdma_mr *iwmr = NULL;
+ int err;
+
+ if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ if (udata->inlen < IRDMA_MEM_REG_MIN_REQ_LEN)
+ return ERR_PTR(-EINVAL);
+
+ region = ib_umem_get(pd->device, start, len, access);
+
+ if (IS_ERR(region)) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: Failed to create ib_umem region\n");
+ return (struct ib_mr *)region;
+ }
+
+ if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen))) {
+ ib_umem_release(region);
+ return ERR_PTR(-EFAULT);
+ }
+
+ iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
+ if (IS_ERR(iwmr)) {
+ ib_umem_release(region);
+ return (struct ib_mr *)iwmr;
+ }
+
+ switch (req.reg_type) {
+ case IRDMA_MEMREG_TYPE_QP:
+ err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
+ if (err)
+ goto error;
+
+ break;
+ case IRDMA_MEMREG_TYPE_CQ:
+ err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
+ if (err)
+ goto error;
+ break;
+ case IRDMA_MEMREG_TYPE_MEM:
+ err = irdma_reg_user_mr_type_mem(iwmr, access);
+ if (err)
+ goto error;
+
+ break;
+ default:
+ err = -EINVAL;
+ goto error;
+ }
+
+ return &iwmr->ibmr;
+error:
+ ib_umem_release(region);
+ irdma_free_iwmr(iwmr);
+
+ return ERR_PTR(err);
+}
+
+static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
+ u64 len, u64 virt,
+ int fd, int access,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct irdma_mr *iwmr;
+ int err;
+
+ if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
+ if (IS_ERR(umem_dmabuf)) {
+ err = PTR_ERR(umem_dmabuf);
+ ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
+ return ERR_PTR(err);
+ }
+
+ iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
+ if (IS_ERR(iwmr)) {
+ err = PTR_ERR(iwmr);
+ goto err_release;
+ }
+
+ err = irdma_reg_user_mr_type_mem(iwmr, access);
+ if (err)
+ goto err_iwmr;
+
+ return &iwmr->ibmr;
+
+err_iwmr:
+ irdma_free_iwmr(iwmr);
+
+err_release:
+ ib_umem_release(&umem_dmabuf->umem);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * irdma_reg_phys_mr - register kernel physical memory
+ * @pd: ibpd pointer
+ * @addr: physical address of memory to register
+ * @size: size of memory to register
+ * @access: Access rights
+ * @iova_start: start of virtual address for physical buffers
+ */
+struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
+ u64 *iova_start)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_pbl *iwpbl;
+ struct irdma_mr *iwmr;
+ u32 stag;
+ int ret;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->type = IRDMA_MEMREG_TYPE_MEM;
+ iwpbl->user_base = *iova_start;
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.iova = *iova_start;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ iwmr->page_cnt = 1;
+ iwmr->pgaddrmem[0] = addr;
+ iwmr->len = size;
+ iwmr->page_size = SZ_4K;
+ ret = irdma_hwreg_mr(iwdev, iwmr, access);
+ if (ret) {
+ irdma_free_stag(iwdev, stag);
+ goto err;
+ }
+
+ return &iwmr->ibmr;
+
+err:
+ kfree(iwmr);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * irdma_get_dma_mr - register physical mem
+ * @pd: ptr of pd
+ * @acc: access for memory
+ */
+static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ u64 kva = 0;
+
+ return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
+}
+
+/**
+ * irdma_del_memlist - Deleting pbl list entries for CQ/QP
+ * @iwmr: iwmr for IB's user page addresses
+ * @ucontext: ptr to user context
+ */
+static void irdma_del_memlist(struct irdma_mr *iwmr,
+ struct irdma_ucontext *ucontext)
+{
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ unsigned long flags;
+
+ switch (iwmr->type) {
+ case IRDMA_MEMREG_TYPE_CQ:
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
+ list_del(&iwpbl->list);
+ }
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+ break;
+ case IRDMA_MEMREG_TYPE_QP:
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
+ list_del(&iwpbl->list);
+ }
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * irdma_dereg_mr - deregister mr
+ * @ib_mr: mr ptr for dereg
+ * @udata: user data
+ */
+static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
+{
+ struct ib_pd *ibpd = ib_mr->pd;
+ struct irdma_pd *iwpd = to_iwpd(ibpd);
+ struct irdma_mr *iwmr = to_iwmr(ib_mr);
+ struct irdma_device *iwdev = to_iwdev(ib_mr->device);
+ struct irdma_dealloc_stag_info *info;
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
+ if (iwmr->region) {
+ struct irdma_ucontext *ucontext;
+
+ ucontext = rdma_udata_to_drv_context(udata,
+ struct irdma_ucontext,
+ ibucontext);
+ irdma_del_memlist(iwmr, ucontext);
+ }
+ goto done;
+ }
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.dealloc_stag.info;
+ memset(info, 0, sizeof(*info));
+ info->pd_id = iwpd->sc_pd.pd_id;
+ info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
+ info->mr = true;
+ if (iwpbl->pbl_allocated)
+ info->dealloc_pbl = true;
+
+ cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
+ cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (status)
+ return status;
+
+ irdma_free_stag(iwdev, iwmr->stag);
+done:
+ if (iwpbl->pbl_allocated)
+ irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
+ ib_umem_release(iwmr->region);
+ kfree(iwmr);
+
+ return 0;
+}
+
+/**
+ * irdma_post_send - kernel application wr
+ * @ibqp: qp ptr for wr
+ * @ib_wr: work request ptr
+ * @bad_wr: return of bad wr if err
+ */
+static int irdma_post_send(struct ib_qp *ibqp,
+ const struct ib_send_wr *ib_wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_qp_uk *ukqp;
+ struct irdma_sc_dev *dev;
+ struct irdma_post_sq_info info;
+ int err = 0;
+ unsigned long flags;
+ bool inv_stag;
+ struct irdma_ah *ah;
+
+ iwqp = to_iwqp(ibqp);
+ ukqp = &iwqp->sc_qp.qp_uk;
+ dev = &iwqp->iwdev->rf->sc_dev;
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ while (ib_wr) {
+ memset(&info, 0, sizeof(info));
+ inv_stag = false;
+ info.wr_id = (ib_wr->wr_id);
+ if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
+ info.signaled = true;
+ if (ib_wr->send_flags & IB_SEND_FENCE)
+ info.read_fence = true;
+ switch (ib_wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
+ info.imm_data_valid = true;
+ info.imm_data = ntohl(ib_wr->ex.imm_data);
+ } else {
+ err = -EINVAL;
+ break;
+ }
+ fallthrough;
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_INV:
+ if (ib_wr->opcode == IB_WR_SEND ||
+ ib_wr->opcode == IB_WR_SEND_WITH_IMM) {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_SEND_SOL;
+ else
+ info.op_type = IRDMA_OP_TYPE_SEND;
+ } else {
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_SEND_SOL_INV;
+ else
+ info.op_type = IRDMA_OP_TYPE_SEND_INV;
+ info.stag_to_inv = ib_wr->ex.invalidate_rkey;
+ }
+
+ info.op.send.num_sges = ib_wr->num_sge;
+ info.op.send.sg_list = ib_wr->sg_list;
+ if (iwqp->ibqp.qp_type == IB_QPT_UD ||
+ iwqp->ibqp.qp_type == IB_QPT_GSI) {
+ ah = to_iwah(ud_wr(ib_wr)->ah);
+ info.op.send.ah_id = ah->sc_ah.ah_info.ah_idx;
+ info.op.send.qkey = ud_wr(ib_wr)->remote_qkey;
+ info.op.send.dest_qp = ud_wr(ib_wr)->remote_qpn;
+ }
+
+ if (ib_wr->send_flags & IB_SEND_INLINE)
+ err = irdma_uk_inline_send(ukqp, &info, false);
+ else
+ err = irdma_uk_send(ukqp, &info, false);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (ukqp->qp_caps & IRDMA_WRITE_WITH_IMM) {
+ info.imm_data_valid = true;
+ info.imm_data = ntohl(ib_wr->ex.imm_data);
+ } else {
+ err = -EINVAL;
+ break;
+ }
+ fallthrough;
+ case IB_WR_RDMA_WRITE:
+ if (ib_wr->send_flags & IB_SEND_SOLICITED)
+ info.op_type = IRDMA_OP_TYPE_RDMA_WRITE_SOL;
+ else
+ info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
+
+ info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+ info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
+ info.op.rdma_write.rem_addr.addr =
+ rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
+ if (ib_wr->send_flags & IB_SEND_INLINE)
+ err = irdma_uk_inline_rdma_write(ukqp, &info, false);
+ else
+ err = irdma_uk_rdma_write(ukqp, &info, false);
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ inv_stag = true;
+ fallthrough;
+ case IB_WR_RDMA_READ:
+ if (ib_wr->num_sge >
+ dev->hw_attrs.uk_attrs.max_hw_read_sges) {
+ err = -EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_RDMA_READ;
+ info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
+ err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
+ break;
+ case IB_WR_LOCAL_INV:
+ info.op_type = IRDMA_OP_TYPE_INV_STAG;
+ info.local_fence = info.read_fence;
+ info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
+ err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
+ break;
+ case IB_WR_REG_MR: {
+ struct irdma_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr);
+ struct irdma_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc;
+ struct irdma_fast_reg_stag_info stag_info = {};
+
+ stag_info.signaled = info.signaled;
+ stag_info.read_fence = info.read_fence;
+ stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
+ stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
+ stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
+ stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
+ stag_info.wr_id = ib_wr->wr_id;
+ stag_info.addr_type = IRDMA_ADDR_TYPE_VA_BASED;
+ stag_info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
+ stag_info.total_len = iwmr->ibmr.length;
+ stag_info.reg_addr_pa = *palloc->level1.addr;
+ stag_info.first_pm_pbl_index = palloc->level1.idx;
+ stag_info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
+ if (iwmr->npages > IRDMA_MIN_PAGES_PER_FMR)
+ stag_info.chunk_size = 1;
+ err = irdma_sc_mr_fast_register(&iwqp->sc_qp, &stag_info,
+ true);
+ break;
+ }
+ default:
+ err = -EINVAL;
+ ibdev_dbg(&iwqp->iwdev->ibdev,
+ "VERBS: upost_send bad opcode = 0x%x\n",
+ ib_wr->opcode);
+ break;
+ }
+
+ if (err)
+ break;
+ ib_wr = ib_wr->next;
+ }
+
+ if (!iwqp->flush_issued) {
+ if (iwqp->hw_iwarp_state <= IRDMA_QP_STATE_RTS)
+ irdma_uk_qp_post_wr(ukqp);
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ } else {
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
+ }
+ if (err)
+ *bad_wr = ib_wr;
+
+ return err;
+}
+
+/**
+ * irdma_post_recv - post receive wr for kernel application
+ * @ibqp: ib qp pointer
+ * @ib_wr: work request for receive
+ * @bad_wr: bad wr caused an error
+ */
+static int irdma_post_recv(struct ib_qp *ibqp,
+ const struct ib_recv_wr *ib_wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct irdma_qp *iwqp;
+ struct irdma_qp_uk *ukqp;
+ struct irdma_post_rq_info post_recv = {};
+ unsigned long flags;
+ int err = 0;
+
+ iwqp = to_iwqp(ibqp);
+ ukqp = &iwqp->sc_qp.qp_uk;
+
+ spin_lock_irqsave(&iwqp->lock, flags);
+ while (ib_wr) {
+ post_recv.num_sges = ib_wr->num_sge;
+ post_recv.wr_id = ib_wr->wr_id;
+ post_recv.sg_list = ib_wr->sg_list;
+ err = irdma_uk_post_receive(ukqp, &post_recv);
+ if (err) {
+ ibdev_dbg(&iwqp->iwdev->ibdev,
+ "VERBS: post_recv err %d\n", err);
+ goto out;
+ }
+
+ ib_wr = ib_wr->next;
+ }
+
+out:
+ spin_unlock_irqrestore(&iwqp->lock, flags);
+ if (iwqp->flush_issued)
+ mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
+ msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
+
+ if (err)
+ *bad_wr = ib_wr;
+
+ return err;
+}
+
+/**
+ * irdma_flush_err_to_ib_wc_status - return change flush error code to IB status
+ * @opcode: iwarp flush code
+ */
+static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode opcode)
+{
+ switch (opcode) {
+ case FLUSH_PROT_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case FLUSH_REM_ACCESS_ERR:
+ return IB_WC_REM_ACCESS_ERR;
+ case FLUSH_LOC_QP_OP_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case FLUSH_REM_OP_ERR:
+ return IB_WC_REM_OP_ERR;
+ case FLUSH_LOC_LEN_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case FLUSH_GENERAL_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ case FLUSH_RETRY_EXC_ERR:
+ return IB_WC_RETRY_EXC_ERR;
+ case FLUSH_MW_BIND_ERR:
+ return IB_WC_MW_BIND_ERR;
+ case FLUSH_REM_INV_REQ_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
+ case FLUSH_FATAL_ERR:
+ default:
+ return IB_WC_FATAL_ERR;
+ }
+}
+
+/**
+ * irdma_process_cqe - process cqe info
+ * @entry: processed cqe
+ * @cq_poll_info: cqe info
+ */
+static void irdma_process_cqe(struct ib_wc *entry,
+ struct irdma_cq_poll_info *cq_poll_info)
+{
+ struct irdma_sc_qp *qp;
+
+ entry->wc_flags = 0;
+ entry->pkey_index = 0;
+ entry->wr_id = cq_poll_info->wr_id;
+
+ qp = cq_poll_info->qp_handle;
+ entry->qp = qp->qp_uk.back_qp;
+
+ if (cq_poll_info->error) {
+ entry->status = (cq_poll_info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) ?
+ irdma_flush_err_to_ib_wc_status(cq_poll_info->minor_err) : IB_WC_GENERAL_ERR;
+
+ entry->vendor_err = cq_poll_info->major_err << 16 |
+ cq_poll_info->minor_err;
+ } else {
+ entry->status = IB_WC_SUCCESS;
+ if (cq_poll_info->imm_valid) {
+ entry->ex.imm_data = htonl(cq_poll_info->imm_data);
+ entry->wc_flags |= IB_WC_WITH_IMM;
+ }
+ if (cq_poll_info->ud_smac_valid) {
+ ether_addr_copy(entry->smac, cq_poll_info->ud_smac);
+ entry->wc_flags |= IB_WC_WITH_SMAC;
+ }
+
+ if (cq_poll_info->ud_vlan_valid) {
+ u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
+
+ entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
+ if (vlan) {
+ entry->vlan_id = vlan;
+ entry->wc_flags |= IB_WC_WITH_VLAN;
+ }
+ } else {
+ entry->sl = 0;
+ }
+ }
+
+ if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
+ set_ib_wc_op_sq(cq_poll_info, entry);
+ } else {
+ set_ib_wc_op_rq(cq_poll_info, entry,
+ qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM);
+ if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
+ cq_poll_info->stag_invalid_set) {
+ entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
+ entry->wc_flags |= IB_WC_WITH_INVALIDATE;
+ }
+ }
+
+ if (qp->qp_uk.qp_type == IRDMA_QP_TYPE_ROCE_UD) {
+ entry->src_qp = cq_poll_info->ud_src_qpn;
+ entry->slid = 0;
+ entry->wc_flags |=
+ (IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE);
+ entry->network_hdr_type = cq_poll_info->ipv4 ?
+ RDMA_NETWORK_IPV4 :
+ RDMA_NETWORK_IPV6;
+ } else {
+ entry->src_qp = cq_poll_info->qp_id;
+ }
+
+ entry->byte_len = cq_poll_info->bytes_xfered;
+}
+
+/**
+ * irdma_poll_one - poll one entry of the CQ
+ * @ukcq: ukcq to poll
+ * @cur_cqe: current CQE info to be filled in
+ * @entry: ibv_wc object to be filled for non-extended CQ or NULL for extended CQ
+ *
+ * Returns the internal irdma device error code or 0 on success
+ */
+static inline int irdma_poll_one(struct irdma_cq_uk *ukcq,
+ struct irdma_cq_poll_info *cur_cqe,
+ struct ib_wc *entry)
+{
+ int ret = irdma_uk_cq_poll_cmpl(ukcq, cur_cqe);
+
+ if (ret)
+ return ret;
+
+ irdma_process_cqe(entry, cur_cqe);
+
+ return 0;
+}
+
+/**
+ * __irdma_poll_cq - poll cq for completion (kernel apps)
+ * @iwcq: cq to poll
+ * @num_entries: number of entries to poll
+ * @entry: wr of a completed entry
+ */
+static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc *entry)
+{
+ struct list_head *tmp_node, *list_node;
+ struct irdma_cq_buf *last_buf = NULL;
+ struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
+ struct irdma_cq_buf *cq_buf;
+ int ret;
+ struct irdma_device *iwdev;
+ struct irdma_cq_uk *ukcq;
+ bool cq_new_cqe = false;
+ int resized_bufs = 0;
+ int npolled = 0;
+
+ iwdev = to_iwdev(iwcq->ibcq.device);
+ ukcq = &iwcq->sc_cq.cq_uk;
+
+ /* go through the list of previously resized CQ buffers */
+ list_for_each_safe(list_node, tmp_node, &iwcq->resize_list) {
+ cq_buf = container_of(list_node, struct irdma_cq_buf, list);
+ while (npolled < num_entries) {
+ ret = irdma_poll_one(&cq_buf->cq_uk, cur_cqe, entry + npolled);
+ if (!ret) {
+ ++npolled;
+ cq_new_cqe = true;
+ continue;
+ }
+ if (ret == -ENOENT)
+ break;
+ /* QP using the CQ is destroyed. Skip reporting this CQE */
+ if (ret == -EFAULT) {
+ cq_new_cqe = true;
+ continue;
+ }
+ goto error;
+ }
+
+ /* save the resized CQ buffer which received the last cqe */
+ if (cq_new_cqe)
+ last_buf = cq_buf;
+ cq_new_cqe = false;
+ }
+
+ /* check the current CQ for new cqes */
+ while (npolled < num_entries) {
+ ret = irdma_poll_one(ukcq, cur_cqe, entry + npolled);
+ if (ret == -ENOENT) {
+ ret = irdma_generated_cmpls(iwcq, cur_cqe);
+ if (!ret)
+ irdma_process_cqe(entry + npolled, cur_cqe);
+ }
+ if (!ret) {
+ ++npolled;
+ cq_new_cqe = true;
+ continue;
+ }
+
+ if (ret == -ENOENT)
+ break;
+ /* QP using the CQ is destroyed. Skip reporting this CQE */
+ if (ret == -EFAULT) {
+ cq_new_cqe = true;
+ continue;
+ }
+ goto error;
+ }
+
+ if (cq_new_cqe)
+ /* all previous CQ resizes are complete */
+ resized_bufs = irdma_process_resize_list(iwcq, iwdev, NULL);
+ else if (last_buf)
+ /* only CQ resizes up to the last_buf are complete */
+ resized_bufs = irdma_process_resize_list(iwcq, iwdev, last_buf);
+ if (resized_bufs)
+ /* report to the HW the number of complete CQ resizes */
+ irdma_uk_cq_set_resized_cnt(ukcq, resized_bufs);
+
+ return npolled;
+error:
+ ibdev_dbg(&iwdev->ibdev, "%s: Error polling CQ, irdma_err: %d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/**
+ * irdma_poll_cq - poll cq for completion (kernel apps)
+ * @ibcq: cq to poll
+ * @num_entries: number of entries to poll
+ * @entry: wr of a completed entry
+ */
+static int irdma_poll_cq(struct ib_cq *ibcq, int num_entries,
+ struct ib_wc *entry)
+{
+ struct irdma_cq *iwcq;
+ unsigned long flags;
+ int ret;
+
+ iwcq = to_iwcq(ibcq);
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ ret = __irdma_poll_cq(iwcq, num_entries, entry);
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ return ret;
+}
+
+/**
+ * irdma_req_notify_cq - arm cq kernel application
+ * @ibcq: cq to arm
+ * @notify_flags: notofication flags
+ */
+static int irdma_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags notify_flags)
+{
+ struct irdma_cq *iwcq;
+ struct irdma_cq_uk *ukcq;
+ unsigned long flags;
+ enum irdma_cmpl_notify cq_notify;
+ bool promo_event = false;
+ int ret = 0;
+
+ cq_notify = notify_flags == IB_CQ_SOLICITED ?
+ IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT;
+ iwcq = to_iwcq(ibcq);
+ ukcq = &iwcq->sc_cq.cq_uk;
+
+ spin_lock_irqsave(&iwcq->lock, flags);
+ /* Only promote to arm the CQ for any event if the last arm event was solicited. */
+ if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED)
+ promo_event = true;
+
+ if (!atomic_cmpxchg(&iwcq->armed, 0, 1) || promo_event) {
+ iwcq->last_notify = cq_notify;
+ irdma_uk_cq_request_notification(ukcq, cq_notify);
+ }
+
+ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
+ ret = 1;
+ spin_unlock_irqrestore(&iwcq->lock, flags);
+
+ return ret;
+}
+
+static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
+static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
+static const struct rdma_stat_desc irdma_hw_stat_names[] = {
+ /* gen1 - 32-bit */
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
+ [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
+ /* gen1 - 64-bit */
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets",
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS].name = "ip4InPkts",
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS].name = "ip4InReasmRqd",
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS].name = "ip4InMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS].name = "ip4OutOctets",
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS].name = "ip4OutPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS].name = "ip4OutSegRqd",
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS].name = "ip4OutMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS].name = "ip6InOctets",
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS].name = "ip6InPkts",
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS].name = "ip6InReasmRqd",
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS].name = "ip6InMcastPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS].name = "ip6OutOctets",
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd",
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts",
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "tcpInSegs",
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "tcpOutSegs",
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "iwInRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "iwInRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "iwInRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "iwOutRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "iwOutRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "iwOutRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "iwRdmaBnd",
+ [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "iwRdmaInv",
+
+ /* gen2 - 32-bit */
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored",
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent",
+ /* gen2 - 64-bit */
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS].name = "ip4InMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS].name = "ip4OutMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS].name = "ip6InMcastOctets",
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS].name = "ip6OutMcastOctets",
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP",
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP",
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd",
+
+};
+
+static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
+{
+ struct irdma_device *iwdev = to_iwdev(dev);
+
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u",
+ irdma_fw_major_ver(&iwdev->rf->sc_dev),
+ irdma_fw_minor_ver(&iwdev->rf->sc_dev));
+}
+
+/**
+ * irdma_alloc_hw_port_stats - Allocate a hw stats structure
+ * @ibdev: device pointer from stack
+ * @port_num: port number
+ */
+static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
+ u32 port_num)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
+
+ int num_counters = dev->hw_attrs.max_stat_idx;
+ unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
+
+ return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
+ lifespan);
+}
+
+/**
+ * irdma_get_hw_stats - Populates the rdma_hw_stats structure
+ * @ibdev: device pointer from stack
+ * @stats: stats pointer from stack
+ * @port_num: port number
+ * @index: which hw counter the stack is requesting we update
+ */
+static int irdma_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats, u32 port_num,
+ int index)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+ struct irdma_dev_hw_stats *hw_stats = &iwdev->vsi.pestat->hw_stats;
+
+ if (iwdev->rf->rdma_ver >= IRDMA_GEN_2)
+ irdma_cqp_gather_stats_cmd(&iwdev->rf->sc_dev, iwdev->vsi.pestat, true);
+ else
+ irdma_cqp_gather_stats_gen1(&iwdev->rf->sc_dev, iwdev->vsi.pestat);
+
+ memcpy(&stats->value[0], hw_stats, sizeof(u64) * stats->num_counters);
+
+ return stats->num_counters;
+}
+
+/**
+ * irdma_query_gid - Query port GID
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: Entry index
+ * @gid: Global ID
+ */
+static int irdma_query_gid(struct ib_device *ibdev, u32 port, int index,
+ union ib_gid *gid)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+
+ memset(gid->raw, 0, sizeof(gid->raw));
+ ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
+
+ return 0;
+}
+
+/**
+ * mcast_list_add - Add a new mcast item to list
+ * @rf: RDMA PCI function
+ * @new_elem: pointer to element to add
+ */
+static void mcast_list_add(struct irdma_pci_f *rf,
+ struct mc_table_list *new_elem)
+{
+ list_add(&new_elem->list, &rf->mc_qht_list.list);
+}
+
+/**
+ * mcast_list_del - Remove an mcast item from list
+ * @mc_qht_elem: pointer to mcast table list element
+ */
+static void mcast_list_del(struct mc_table_list *mc_qht_elem)
+{
+ if (mc_qht_elem)
+ list_del(&mc_qht_elem->list);
+}
+
+/**
+ * mcast_list_lookup_ip - Search mcast list for address
+ * @rf: RDMA PCI function
+ * @ip_mcast: pointer to mcast IP address
+ */
+static struct mc_table_list *mcast_list_lookup_ip(struct irdma_pci_f *rf,
+ u32 *ip_mcast)
+{
+ struct mc_table_list *mc_qht_el;
+ struct list_head *pos, *q;
+
+ list_for_each_safe (pos, q, &rf->mc_qht_list.list) {
+ mc_qht_el = list_entry(pos, struct mc_table_list, list);
+ if (!memcmp(mc_qht_el->mc_info.dest_ip, ip_mcast,
+ sizeof(mc_qht_el->mc_info.dest_ip)))
+ return mc_qht_el;
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_mcast_cqp_op - perform a mcast cqp operation
+ * @iwdev: irdma device
+ * @mc_grp_ctx: mcast group info
+ * @op: operation
+ *
+ * returns error status
+ */
+static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
+ struct irdma_mcast_grp_info *mc_grp_ctx, u8 op)
+{
+ struct cqp_cmds_info *cqp_info;
+ struct irdma_cqp_request *cqp_request;
+ int status;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_request->info.in.u.mc_create.info = *mc_grp_ctx;
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = op;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.mc_create.scratch = (uintptr_t)cqp_request;
+ cqp_info->in.u.mc_create.cqp = &iwdev->rf->cqp.sc_cqp;
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+
+ return status;
+}
+
+/**
+ * irdma_mcast_mac - Get the multicast MAC for an IP address
+ * @ip_addr: IPv4 or IPv6 address
+ * @mac: pointer to result MAC address
+ * @ipv4: flag indicating IPv4 or IPv6
+ *
+ */
+void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4)
+{
+ u8 *ip = (u8 *)ip_addr;
+
+ if (ipv4) {
+ unsigned char mac4[ETH_ALEN] = {0x01, 0x00, 0x5E, 0x00,
+ 0x00, 0x00};
+
+ mac4[3] = ip[2] & 0x7F;
+ mac4[4] = ip[1];
+ mac4[5] = ip[0];
+ ether_addr_copy(mac, mac4);
+ } else {
+ unsigned char mac6[ETH_ALEN] = {0x33, 0x33, 0x00, 0x00,
+ 0x00, 0x00};
+
+ mac6[2] = ip[3];
+ mac6[3] = ip[2];
+ mac6[4] = ip[1];
+ mac6[5] = ip[0];
+ ether_addr_copy(mac, mac6);
+ }
+}
+
+/**
+ * irdma_attach_mcast - attach a qp to a multicast group
+ * @ibqp: ptr to qp
+ * @ibgid: pointer to global ID
+ * @lid: local ID
+ *
+ * returns error status
+ */
+static int irdma_attach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct mc_table_list *mc_qht_elem;
+ struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
+ unsigned long flags;
+ u32 ip_addr[4] = {};
+ u32 mgn;
+ u32 no_mgs;
+ int ret = 0;
+ bool ipv4;
+ u16 vlan_id;
+ union irdma_sockaddr sgid_addr;
+ unsigned char dmac[ETH_ALEN];
+
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
+
+ if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
+ irdma_copy_ip_ntohl(ip_addr,
+ sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ irdma_get_vlan_mac_ipv6(ip_addr, &vlan_id, NULL);
+ ipv4 = false;
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: qp_id=%d, IP6address=%pI6\n", ibqp->qp_num,
+ ip_addr);
+ irdma_mcast_mac(ip_addr, dmac, false);
+ } else {
+ ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
+ ipv4 = true;
+ vlan_id = irdma_get_vlan_ipv4(ip_addr);
+ irdma_mcast_mac(ip_addr, dmac, true);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: qp_id=%d, IP4address=%pI4, MAC=%pM\n",
+ ibqp->qp_num, ip_addr, dmac);
+ }
+
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
+ if (!mc_qht_elem) {
+ struct irdma_dma_mem *dma_mem_mc;
+
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ mc_qht_elem = kzalloc(sizeof(*mc_qht_elem), GFP_KERNEL);
+ if (!mc_qht_elem)
+ return -ENOMEM;
+
+ mc_qht_elem->mc_info.ipv4_valid = ipv4;
+ memcpy(mc_qht_elem->mc_info.dest_ip, ip_addr,
+ sizeof(mc_qht_elem->mc_info.dest_ip));
+ ret = irdma_alloc_rsrc(rf, rf->allocated_mcgs, rf->max_mcg,
+ &mgn, &rf->next_mcg);
+ if (ret) {
+ kfree(mc_qht_elem);
+ return -ENOMEM;
+ }
+
+ mc_qht_elem->mc_info.mgn = mgn;
+ dma_mem_mc = &mc_qht_elem->mc_grp_ctx.dma_mem_mc;
+ dma_mem_mc->size = ALIGN(sizeof(u64) * IRDMA_MAX_MGS_PER_CTX,
+ IRDMA_HW_PAGE_SIZE);
+ dma_mem_mc->va = dma_alloc_coherent(rf->hw.device,
+ dma_mem_mc->size,
+ &dma_mem_mc->pa,
+ GFP_KERNEL);
+ if (!dma_mem_mc->va) {
+ irdma_free_rsrc(rf, rf->allocated_mcgs, mgn);
+ kfree(mc_qht_elem);
+ return -ENOMEM;
+ }
+
+ mc_qht_elem->mc_grp_ctx.mg_id = (u16)mgn;
+ memcpy(mc_qht_elem->mc_grp_ctx.dest_ip_addr, ip_addr,
+ sizeof(mc_qht_elem->mc_grp_ctx.dest_ip_addr));
+ mc_qht_elem->mc_grp_ctx.ipv4_valid = ipv4;
+ mc_qht_elem->mc_grp_ctx.vlan_id = vlan_id;
+ if (vlan_id < VLAN_N_VID)
+ mc_qht_elem->mc_grp_ctx.vlan_valid = true;
+ mc_qht_elem->mc_grp_ctx.hmc_fcn_id = iwdev->rf->sc_dev.hmc_fn_id;
+ mc_qht_elem->mc_grp_ctx.qs_handle =
+ iwqp->sc_qp.vsi->qos[iwqp->sc_qp.user_pri].qs_handle;
+ ether_addr_copy(mc_qht_elem->mc_grp_ctx.dest_mac_addr, dmac);
+
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mcast_list_add(rf, mc_qht_elem);
+ } else {
+ if (mc_qht_elem->mc_grp_ctx.no_of_mgs ==
+ IRDMA_MAX_MGS_PER_CTX) {
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ mcg_info.qp_id = iwqp->ibqp.qp_num;
+ no_mgs = mc_qht_elem->mc_grp_ctx.no_of_mgs;
+ irdma_sc_add_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+
+ /* Only if there is a change do we need to modify or create */
+ if (!no_mgs) {
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_CREATE);
+ } else if (no_mgs != mc_qht_elem->mc_grp_ctx.no_of_mgs) {
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_MODIFY);
+ } else {
+ return 0;
+ }
+
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
+ if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
+ mcast_list_del(mc_qht_elem);
+ dma_free_coherent(rf->hw.device,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
+ irdma_free_rsrc(rf, rf->allocated_mcgs,
+ mc_qht_elem->mc_grp_ctx.mg_id);
+ kfree(mc_qht_elem);
+ }
+
+ return ret;
+}
+
+/**
+ * irdma_detach_mcast - detach a qp from a multicast group
+ * @ibqp: ptr to qp
+ * @ibgid: pointer to global ID
+ * @lid: local ID
+ *
+ * returns error status
+ */
+static int irdma_detach_mcast(struct ib_qp *ibqp, union ib_gid *ibgid, u16 lid)
+{
+ struct irdma_qp *iwqp = to_iwqp(ibqp);
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ u32 ip_addr[4] = {};
+ struct mc_table_list *mc_qht_elem;
+ struct irdma_mcast_grp_ctx_entry_info mcg_info = {};
+ int ret;
+ unsigned long flags;
+ union irdma_sockaddr sgid_addr;
+
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, ibgid);
+ if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid))
+ irdma_copy_ip_ntohl(ip_addr,
+ sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ else
+ ip_addr[0] = ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
+
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mc_qht_elem = mcast_list_lookup_ip(rf, ip_addr);
+ if (!mc_qht_elem) {
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: address not found MCG\n");
+ return 0;
+ }
+
+ mcg_info.qp_id = iwqp->ibqp.qp_num;
+ irdma_sc_del_mcast_grp(&mc_qht_elem->mc_grp_ctx, &mcg_info);
+ if (!mc_qht_elem->mc_grp_ctx.no_of_mgs) {
+ mcast_list_del(mc_qht_elem);
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_DESTROY);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: failed MC_DESTROY MCG\n");
+ spin_lock_irqsave(&rf->qh_list_lock, flags);
+ mcast_list_add(rf, mc_qht_elem);
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ return -EAGAIN;
+ }
+
+ dma_free_coherent(rf->hw.device,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.size,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va,
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.pa);
+ mc_qht_elem->mc_grp_ctx.dma_mem_mc.va = NULL;
+ irdma_free_rsrc(rf, rf->allocated_mcgs,
+ mc_qht_elem->mc_grp_ctx.mg_id);
+ kfree(mc_qht_elem);
+ } else {
+ spin_unlock_irqrestore(&rf->qh_list_lock, flags);
+ ret = irdma_mcast_cqp_op(iwdev, &mc_qht_elem->mc_grp_ctx,
+ IRDMA_OP_MC_MODIFY);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev,
+ "VERBS: failed Modify MCG\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int irdma_create_hw_ah(struct irdma_device *iwdev, struct irdma_ah *ah, bool sleep)
+{
+ struct irdma_pci_f *rf = iwdev->rf;
+ int err;
+
+ err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah, &ah->sc_ah.ah_info.ah_idx,
+ &rf->next_ah);
+ if (err)
+ return err;
+
+ err = irdma_ah_cqp_op(rf, &ah->sc_ah, IRDMA_OP_AH_CREATE, sleep,
+ irdma_gsi_ud_qp_ah_cb, &ah->sc_ah);
+
+ if (err) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: CQP-OP Create AH fail");
+ goto err_ah_create;
+ }
+
+ if (!sleep) {
+ int cnt = CQP_COMPL_WAIT_TIME_MS * CQP_TIMEOUT_THRESHOLD;
+
+ do {
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+ mdelay(1);
+ } while (!ah->sc_ah.ah_info.ah_valid && --cnt);
+
+ if (!cnt) {
+ ibdev_dbg(&iwdev->ibdev, "VERBS: CQP create AH timed out");
+ err = -ETIMEDOUT;
+ goto err_ah_create;
+ }
+ }
+ return 0;
+
+err_ah_create:
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah->sc_ah.ah_info.ah_idx);
+
+ return err;
+}
+
+static int irdma_setup_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr)
+{
+ struct irdma_pd *pd = to_iwpd(ibah->pd);
+ struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
+ struct rdma_ah_attr *ah_attr = attr->ah_attr;
+ const struct ib_gid_attr *sgid_attr;
+ struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_sc_ah *sc_ah;
+ struct irdma_ah_info *ah_info;
+ union irdma_sockaddr sgid_addr, dgid_addr;
+ int err;
+ u8 dmac[ETH_ALEN];
+
+ ah->pd = pd;
+ sc_ah = &ah->sc_ah;
+ sc_ah->ah_info.vsi = &iwdev->vsi;
+ irdma_sc_init_ah(&rf->sc_dev, sc_ah);
+ ah->sgid_index = ah_attr->grh.sgid_index;
+ sgid_attr = ah_attr->grh.sgid_attr;
+ memcpy(&ah->dgid, &ah_attr->grh.dgid, sizeof(ah->dgid));
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid_attr->gid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, &ah_attr->grh.dgid);
+ ah->av.attrs = *ah_attr;
+ ah->av.net_type = rdma_gid_attr_network_type(sgid_attr);
+ ah_info = &sc_ah->ah_info;
+ ah_info->pd_idx = pd->sc_pd.pd_id;
+ if (ah_attr->ah_flags & IB_AH_GRH) {
+ ah_info->flow_label = ah_attr->grh.flow_label;
+ ah_info->hop_ttl = ah_attr->grh.hop_limit;
+ ah_info->tc_tos = ah_attr->grh.traffic_class;
+ }
+
+ ether_addr_copy(dmac, ah_attr->roce.dmac);
+ if (ah->av.net_type == RDMA_NETWORK_IPV4) {
+ ah_info->ipv4_valid = true;
+ ah_info->dest_ip_addr[0] =
+ ntohl(dgid_addr.saddr_in.sin_addr.s_addr);
+ ah_info->src_ip_addr[0] =
+ ntohl(sgid_addr.saddr_in.sin_addr.s_addr);
+ ah_info->do_lpbk = irdma_ipv4_is_lpb(ah_info->src_ip_addr[0],
+ ah_info->dest_ip_addr[0]);
+ if (ipv4_is_multicast(dgid_addr.saddr_in.sin_addr.s_addr)) {
+ ah_info->do_lpbk = true;
+ irdma_mcast_mac(ah_info->dest_ip_addr, dmac, true);
+ }
+ } else {
+ irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
+ dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ irdma_copy_ip_ntohl(ah_info->src_ip_addr,
+ sgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32);
+ ah_info->do_lpbk = irdma_ipv6_is_lpb(ah_info->src_ip_addr,
+ ah_info->dest_ip_addr);
+ if (rdma_is_multicast_addr(&dgid_addr.saddr_in6.sin6_addr)) {
+ ah_info->do_lpbk = true;
+ irdma_mcast_mac(ah_info->dest_ip_addr, dmac, false);
+ }
+ }
+
+ err = rdma_read_gid_l2_fields(sgid_attr, &ah_info->vlan_tag,
+ ah_info->mac_addr);
+ if (err)
+ return err;
+
+ ah_info->dst_arpindex = irdma_add_arp(iwdev->rf, ah_info->dest_ip_addr,
+ ah_info->ipv4_valid, dmac);
+
+ if (ah_info->dst_arpindex == -1)
+ return -EINVAL;
+
+ if (ah_info->vlan_tag >= VLAN_N_VID && iwdev->dcb_vlan_mode)
+ ah_info->vlan_tag = 0;
+
+ if (ah_info->vlan_tag < VLAN_N_VID) {
+ u8 prio = rt_tos2priority(ah_info->tc_tos);
+
+ prio = irdma_roce_get_vlan_prio(sgid_attr, prio);
+
+ ah_info->vlan_tag |= (u16)prio << VLAN_PRIO_SHIFT;
+ ah_info->insert_vlan_tag = true;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_ah_exists - Check for existing identical AH
+ * @iwdev: irdma device
+ * @new_ah: AH to check for
+ *
+ * returns true if AH is found, false if not found.
+ */
+static bool irdma_ah_exists(struct irdma_device *iwdev,
+ struct irdma_ah *new_ah)
+{
+ struct irdma_ah *ah;
+ u32 key = new_ah->sc_ah.ah_info.dest_ip_addr[0] ^
+ new_ah->sc_ah.ah_info.dest_ip_addr[1] ^
+ new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
+ new_ah->sc_ah.ah_info.dest_ip_addr[3];
+
+ hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) {
+ /* Set ah_valid and ah_id the same so memcmp can work */
+ new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
+ new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
+ if (!memcmp(&ah->sc_ah.ah_info, &new_ah->sc_ah.ah_info,
+ sizeof(ah->sc_ah.ah_info))) {
+ refcount_inc(&ah->refcnt);
+ new_ah->parent_ah = ah;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * irdma_destroy_ah - Destroy address handle
+ * @ibah: pointer to address handle
+ * @ah_flags: flags for sleepable
+ */
+static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
+{
+ struct irdma_device *iwdev = to_iwdev(ibah->device);
+ struct irdma_ah *ah = to_iwah(ibah);
+
+ if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
+ mutex_lock(&iwdev->ah_tbl_lock);
+ if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
+ mutex_unlock(&iwdev->ah_tbl_lock);
+ return 0;
+ }
+ hash_del(&ah->parent_ah->list);
+ kfree(ah->parent_ah);
+ mutex_unlock(&iwdev->ah_tbl_lock);
+ }
+
+ irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
+ false, NULL, ah);
+
+ irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
+ ah->sc_ah.ah_info.ah_idx);
+
+ return 0;
+}
+
+/**
+ * irdma_create_user_ah - create user address handle
+ * @ibah: address handle
+ * @attr: address handle attributes
+ * @udata: User data
+ *
+ * returns 0 on success, error otherwise
+ */
+static int irdma_create_user_ah(struct ib_ah *ibah,
+ struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata)
+{
+#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
+ struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
+ struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
+ struct irdma_create_ah_resp uresp;
+ struct irdma_ah *parent_ah;
+ int err;
+
+ if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
+ return -EINVAL;
+
+ err = irdma_setup_ah(ibah, attr);
+ if (err)
+ return err;
+ mutex_lock(&iwdev->ah_tbl_lock);
+ if (!irdma_ah_exists(iwdev, ah)) {
+ err = irdma_create_hw_ah(iwdev, ah, true);
+ if (err) {
+ mutex_unlock(&iwdev->ah_tbl_lock);
+ return err;
+ }
+ /* Add new AH to list */
+ parent_ah = kmemdup(ah, sizeof(*ah), GFP_KERNEL);
+ if (parent_ah) {
+ u32 key = parent_ah->sc_ah.ah_info.dest_ip_addr[0] ^
+ parent_ah->sc_ah.ah_info.dest_ip_addr[1] ^
+ parent_ah->sc_ah.ah_info.dest_ip_addr[2] ^
+ parent_ah->sc_ah.ah_info.dest_ip_addr[3];
+
+ ah->parent_ah = parent_ah;
+ hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key);
+ refcount_set(&parent_ah->refcnt, 1);
+ }
+ }
+ mutex_unlock(&iwdev->ah_tbl_lock);
+
+ uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
+ err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
+ if (err)
+ irdma_destroy_ah(ibah, attr->flags);
+
+ return err;
+}
+
+/**
+ * irdma_create_ah - create address handle
+ * @ibah: address handle
+ * @attr: address handle attributes
+ * @udata: NULL
+ *
+ * returns 0 on success, error otherwise
+ */
+static int irdma_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct irdma_ah *ah = container_of(ibah, struct irdma_ah, ibah);
+ struct irdma_device *iwdev = to_iwdev(ibah->pd->device);
+ int err;
+
+ err = irdma_setup_ah(ibah, attr);
+ if (err)
+ return err;
+ err = irdma_create_hw_ah(iwdev, ah, attr->flags & RDMA_CREATE_AH_SLEEPABLE);
+
+ return err;
+}
+
+/**
+ * irdma_query_ah - Query address handle
+ * @ibah: pointer to address handle
+ * @ah_attr: address handle attributes
+ */
+static int irdma_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct irdma_ah *ah = to_iwah(ibah);
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ if (ah->av.attrs.ah_flags & IB_AH_GRH) {
+ ah_attr->ah_flags = IB_AH_GRH;
+ ah_attr->grh.flow_label = ah->sc_ah.ah_info.flow_label;
+ ah_attr->grh.traffic_class = ah->sc_ah.ah_info.tc_tos;
+ ah_attr->grh.hop_limit = ah->sc_ah.ah_info.hop_ttl;
+ ah_attr->grh.sgid_index = ah->sgid_index;
+ memcpy(&ah_attr->grh.dgid, &ah->dgid,
+ sizeof(ah_attr->grh.dgid));
+ }
+
+ return 0;
+}
+
+static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
+ u32 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static const struct ib_device_ops irdma_roce_dev_ops = {
+ .attach_mcast = irdma_attach_mcast,
+ .create_ah = irdma_create_ah,
+ .create_user_ah = irdma_create_user_ah,
+ .destroy_ah = irdma_destroy_ah,
+ .detach_mcast = irdma_detach_mcast,
+ .get_link_layer = irdma_get_link_layer,
+ .get_port_immutable = irdma_roce_port_immutable,
+ .modify_qp = irdma_modify_qp_roce,
+ .query_ah = irdma_query_ah,
+ .query_pkey = irdma_query_pkey,
+};
+
+static const struct ib_device_ops irdma_iw_dev_ops = {
+ .get_port_immutable = irdma_iw_port_immutable,
+ .iw_accept = irdma_accept,
+ .iw_add_ref = irdma_qp_add_ref,
+ .iw_connect = irdma_connect,
+ .iw_create_listen = irdma_create_listen,
+ .iw_destroy_listen = irdma_destroy_listen,
+ .iw_get_qp = irdma_get_qp,
+ .iw_reject = irdma_reject,
+ .iw_rem_ref = irdma_qp_rem_ref,
+ .modify_qp = irdma_modify_qp,
+ .query_gid = irdma_query_gid,
+};
+
+static const struct ib_device_ops irdma_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_IRDMA,
+ .uverbs_abi_ver = IRDMA_ABI_VER,
+
+ .alloc_hw_port_stats = irdma_alloc_hw_port_stats,
+ .alloc_mr = irdma_alloc_mr,
+ .alloc_mw = irdma_alloc_mw,
+ .alloc_pd = irdma_alloc_pd,
+ .alloc_ucontext = irdma_alloc_ucontext,
+ .create_cq = irdma_create_cq,
+ .create_qp = irdma_create_qp,
+ .dealloc_driver = irdma_ib_dealloc_device,
+ .dealloc_mw = irdma_dealloc_mw,
+ .dealloc_pd = irdma_dealloc_pd,
+ .dealloc_ucontext = irdma_dealloc_ucontext,
+ .dereg_mr = irdma_dereg_mr,
+ .destroy_cq = irdma_destroy_cq,
+ .destroy_qp = irdma_destroy_qp,
+ .disassociate_ucontext = irdma_disassociate_ucontext,
+ .get_dev_fw_str = irdma_get_dev_fw_str,
+ .get_dma_mr = irdma_get_dma_mr,
+ .get_hw_stats = irdma_get_hw_stats,
+ .map_mr_sg = irdma_map_mr_sg,
+ .mmap = irdma_mmap,
+ .mmap_free = irdma_mmap_free,
+ .poll_cq = irdma_poll_cq,
+ .post_recv = irdma_post_recv,
+ .post_send = irdma_post_send,
+ .query_device = irdma_query_device,
+ .query_port = irdma_query_port,
+ .query_qp = irdma_query_qp,
+ .reg_user_mr = irdma_reg_user_mr,
+ .reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf,
+ .req_notify_cq = irdma_req_notify_cq,
+ .resize_cq = irdma_resize_cq,
+ INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, irdma_ucontext, ibucontext),
+ INIT_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
+ INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
+};
+
+/**
+ * irdma_init_roce_device - initialization of roce rdma device
+ * @iwdev: irdma device
+ */
+static void irdma_init_roce_device(struct irdma_device *iwdev)
+{
+ iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
+ addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
+ iwdev->netdev->dev_addr);
+ ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
+}
+
+/**
+ * irdma_init_iw_device - initialization of iwarp rdma device
+ * @iwdev: irdma device
+ */
+static void irdma_init_iw_device(struct irdma_device *iwdev)
+{
+ struct net_device *netdev = iwdev->netdev;
+
+ iwdev->ibdev.node_type = RDMA_NODE_RNIC;
+ addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
+ netdev->dev_addr);
+ memcpy(iwdev->ibdev.iw_ifname, netdev->name,
+ sizeof(iwdev->ibdev.iw_ifname));
+ ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
+}
+
+/**
+ * irdma_init_rdma_device - initialization of rdma device
+ * @iwdev: irdma device
+ */
+static void irdma_init_rdma_device(struct irdma_device *iwdev)
+{
+ struct pci_dev *pcidev = iwdev->rf->pcidev;
+
+ if (iwdev->roce_mode)
+ irdma_init_roce_device(iwdev);
+ else
+ irdma_init_iw_device(iwdev);
+
+ iwdev->ibdev.phys_port_cnt = 1;
+ iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
+ iwdev->ibdev.dev.parent = &pcidev->dev;
+ ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
+}
+
+/**
+ * irdma_port_ibevent - indicate port event
+ * @iwdev: irdma device
+ */
+void irdma_port_ibevent(struct irdma_device *iwdev)
+{
+ struct ib_event event;
+
+ event.device = &iwdev->ibdev;
+ event.element.port_num = 1;
+ event.event =
+ iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ ib_dispatch_event(&event);
+}
+
+/**
+ * irdma_ib_unregister_device - unregister rdma device from IB
+ * core
+ * @iwdev: irdma device
+ */
+void irdma_ib_unregister_device(struct irdma_device *iwdev)
+{
+ iwdev->iw_status = 0;
+ irdma_port_ibevent(iwdev);
+ ib_unregister_device(&iwdev->ibdev);
+}
+
+/**
+ * irdma_ib_register_device - register irdma device to IB core
+ * @iwdev: irdma device
+ */
+int irdma_ib_register_device(struct irdma_device *iwdev)
+{
+ int ret;
+
+ irdma_init_rdma_device(iwdev);
+
+ ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
+ if (ret)
+ goto error;
+ dma_set_max_seg_size(iwdev->rf->hw.device, UINT_MAX);
+ ret = ib_register_device(&iwdev->ibdev, "irdma%d", iwdev->rf->hw.device);
+ if (ret)
+ goto error;
+
+ iwdev->iw_status = 1;
+ irdma_port_ibevent(iwdev);
+
+ return 0;
+
+error:
+ if (ret)
+ ibdev_dbg(&iwdev->ibdev, "VERBS: Register RDMA device fail\n");
+
+ return ret;
+}
+
+/**
+ * irdma_ib_dealloc_device
+ * @ibdev: ib device
+ *
+ * callback from ibdev dealloc_driver to deallocate resources
+ * unber irdma device
+ */
+void irdma_ib_dealloc_device(struct ib_device *ibdev)
+{
+ struct irdma_device *iwdev = to_iwdev(ibdev);
+
+ irdma_rt_deinit_hw(iwdev);
+ irdma_ctrl_deinit_hw(iwdev->rf);
+ kfree(iwdev->rf);
+}
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
new file mode 100644
index 0000000000..20297a14c9
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2021 Intel Corporation */
+#ifndef IRDMA_VERBS_H
+#define IRDMA_VERBS_H
+
+#define IRDMA_MAX_SAVED_PHY_PGADDR 4
+#define IRDMA_FLUSH_DELAY_MS 20
+
+#define IRDMA_PKEY_TBL_SZ 1
+#define IRDMA_DEFAULT_PKEY 0xFFFF
+
+struct irdma_ucontext {
+ struct ib_ucontext ibucontext;
+ struct irdma_device *iwdev;
+ struct rdma_user_mmap_entry *db_mmap_entry;
+ struct list_head cq_reg_mem_list;
+ spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
+ struct list_head qp_reg_mem_list;
+ spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
+ int abi_ver;
+ u8 legacy_mode : 1;
+ u8 use_raw_attrs : 1;
+};
+
+struct irdma_pd {
+ struct ib_pd ibpd;
+ struct irdma_sc_pd sc_pd;
+};
+
+union irdma_sockaddr {
+ struct sockaddr_in saddr_in;
+ struct sockaddr_in6 saddr_in6;
+};
+
+struct irdma_av {
+ u8 macaddr[16];
+ struct rdma_ah_attr attrs;
+ union irdma_sockaddr sgid_addr;
+ union irdma_sockaddr dgid_addr;
+ u8 net_type;
+};
+
+struct irdma_ah {
+ struct ib_ah ibah;
+ struct irdma_sc_ah sc_ah;
+ struct irdma_pd *pd;
+ struct irdma_av av;
+ u8 sgid_index;
+ union ib_gid dgid;
+ struct hlist_node list;
+ refcount_t refcnt;
+ struct irdma_ah *parent_ah; /* AH from cached list */
+};
+
+struct irdma_hmc_pble {
+ union {
+ u32 idx;
+ dma_addr_t addr;
+ };
+};
+
+struct irdma_cq_mr {
+ struct irdma_hmc_pble cq_pbl;
+ dma_addr_t shadow;
+ bool split;
+};
+
+struct irdma_qp_mr {
+ struct irdma_hmc_pble sq_pbl;
+ struct irdma_hmc_pble rq_pbl;
+ dma_addr_t shadow;
+ struct page *sq_page;
+};
+
+struct irdma_cq_buf {
+ struct irdma_dma_mem kmem_buf;
+ struct irdma_cq_uk cq_uk;
+ struct irdma_hw *hw;
+ struct list_head list;
+ struct work_struct work;
+};
+
+struct irdma_pbl {
+ struct list_head list;
+ union {
+ struct irdma_qp_mr qp_mr;
+ struct irdma_cq_mr cq_mr;
+ };
+
+ bool pbl_allocated:1;
+ bool on_list:1;
+ u64 user_base;
+ struct irdma_pble_alloc pble_alloc;
+ struct irdma_mr *iwmr;
+};
+
+struct irdma_mr {
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ };
+ struct ib_umem *region;
+ u16 type;
+ u32 page_cnt;
+ u64 page_size;
+ u32 npages;
+ u32 stag;
+ u64 len;
+ u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
+ struct irdma_pbl iwpbl;
+};
+
+struct irdma_cq {
+ struct ib_cq ibcq;
+ struct irdma_sc_cq sc_cq;
+ u16 cq_head;
+ u16 cq_size;
+ u16 cq_num;
+ bool user_mode;
+ atomic_t armed;
+ enum irdma_cmpl_notify last_notify;
+ u32 polled_cmpls;
+ u32 cq_mem_size;
+ struct irdma_dma_mem kmem;
+ struct irdma_dma_mem kmem_shadow;
+ struct completion free_cq;
+ refcount_t refcnt;
+ spinlock_t lock; /* for poll cq */
+ struct irdma_pbl *iwpbl;
+ struct irdma_pbl *iwpbl_shadow;
+ struct list_head resize_list;
+ struct irdma_cq_poll_info cur_cqe;
+ struct list_head cmpl_generated;
+};
+
+struct irdma_cmpl_gen {
+ struct list_head list;
+ struct irdma_cq_poll_info cpi;
+};
+
+struct disconn_work {
+ struct work_struct work;
+ struct irdma_qp *iwqp;
+};
+
+struct iw_cm_id;
+
+struct irdma_qp_kmode {
+ struct irdma_dma_mem dma_mem;
+ struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
+ u64 *rq_wrid_mem;
+};
+
+struct irdma_qp {
+ struct ib_qp ibqp;
+ struct irdma_sc_qp sc_qp;
+ struct irdma_device *iwdev;
+ struct irdma_cq *iwscq;
+ struct irdma_cq *iwrcq;
+ struct irdma_pd *iwpd;
+ struct rdma_user_mmap_entry *push_wqe_mmap_entry;
+ struct rdma_user_mmap_entry *push_db_mmap_entry;
+ struct irdma_qp_host_ctx_info ctx_info;
+ union {
+ struct irdma_iwarp_offload_info iwarp_info;
+ struct irdma_roce_offload_info roce_info;
+ };
+
+ union {
+ struct irdma_tcp_offload_info tcp_info;
+ struct irdma_udp_offload_info udp_info;
+ };
+
+ struct irdma_ah roce_ah;
+ struct list_head teardown_entry;
+ refcount_t refcnt;
+ struct iw_cm_id *cm_id;
+ struct irdma_cm_node *cm_node;
+ struct delayed_work dwork_flush;
+ struct ib_mr *lsmm_mr;
+ atomic_t hw_mod_qp_pend;
+ enum ib_qp_state ibqp_state;
+ u32 qp_mem_size;
+ u32 last_aeq;
+ int max_send_wr;
+ int max_recv_wr;
+ atomic_t close_timer_started;
+ spinlock_t lock; /* serialize posting WRs to SQ/RQ */
+ struct irdma_qp_context *iwqp_context;
+ void *pbl_vbase;
+ dma_addr_t pbl_pbase;
+ struct page *page;
+ u8 active_conn : 1;
+ u8 user_mode : 1;
+ u8 hte_added : 1;
+ u8 flush_issued : 1;
+ u8 sig_all : 1;
+ u8 pau_mode : 1;
+ u8 suspend_pending : 1;
+ u8 rsvd : 1;
+ u8 iwarp_state;
+ u16 term_sq_flush_code;
+ u16 term_rq_flush_code;
+ u8 hw_iwarp_state;
+ u8 hw_tcp_state;
+ struct irdma_qp_kmode kqp;
+ struct irdma_dma_mem host_ctx;
+ struct timer_list terminate_timer;
+ struct irdma_pbl *iwpbl;
+ struct irdma_dma_mem q2_ctx_mem;
+ struct irdma_dma_mem ietf_mem;
+ struct completion free_qp;
+ wait_queue_head_t waitq;
+ wait_queue_head_t mod_qp_waitq;
+ u8 rts_ae_rcvd;
+};
+
+enum irdma_mmap_flag {
+ IRDMA_MMAP_IO_NC,
+ IRDMA_MMAP_IO_WC,
+};
+
+struct irdma_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ u64 bar_offset;
+ u8 mmap_flag;
+};
+
+static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
+{
+ return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
+}
+
+static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
+{
+ return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
+}
+
+static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
+ struct ib_wc *entry)
+{
+ switch (cq_poll_info->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case IRDMA_OP_TYPE_RDMA_READ_INV_STAG:
+ case IRDMA_OP_TYPE_RDMA_READ:
+ entry->opcode = IB_WC_RDMA_READ;
+ break;
+ case IRDMA_OP_TYPE_SEND_SOL:
+ case IRDMA_OP_TYPE_SEND_SOL_INV:
+ case IRDMA_OP_TYPE_SEND_INV:
+ case IRDMA_OP_TYPE_SEND:
+ entry->opcode = IB_WC_SEND;
+ break;
+ case IRDMA_OP_TYPE_FAST_REG_NSMR:
+ entry->opcode = IB_WC_REG_MR;
+ break;
+ case IRDMA_OP_TYPE_INV_STAG:
+ entry->opcode = IB_WC_LOCAL_INV;
+ break;
+ default:
+ entry->status = IB_WC_GENERAL_ERR;
+ }
+}
+
+static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
+ struct ib_wc *entry, bool send_imm_support)
+{
+ /**
+ * iWARP does not support sendImm, so the presence of Imm data
+ * must be WriteImm.
+ */
+ if (!send_imm_support) {
+ entry->opcode = cq_poll_info->imm_valid ?
+ IB_WC_RECV_RDMA_WITH_IMM :
+ IB_WC_RECV;
+ return;
+ }
+
+ switch (cq_poll_info->op_type) {
+ case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE:
+ case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE:
+ entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ break;
+ default:
+ entry->opcode = IB_WC_RECV;
+ }
+}
+
+void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
+int irdma_ib_register_device(struct irdma_device *iwdev);
+void irdma_ib_unregister_device(struct irdma_device *iwdev);
+void irdma_ib_dealloc_device(struct ib_device *ibdev);
+void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
+void irdma_generate_flush_completions(struct irdma_qp *iwqp);
+void irdma_remove_cmpls_list(struct irdma_cq *iwcq);
+int irdma_generated_cmpls(struct irdma_cq *iwcq, struct irdma_cq_poll_info *cq_poll_info);
+#endif /* IRDMA_VERBS_H */
diff --git a/drivers/infiniband/hw/irdma/ws.c b/drivers/infiniband/hw/irdma/ws.c
new file mode 100644
index 0000000000..20bc8d0d7f
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ws.c
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2017 - 2021 Intel Corporation */
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+
+#include "ws.h"
+
+/**
+ * irdma_alloc_node - Allocate a WS node and init
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ * @node_type: Type of node, leaf or parent
+ * @parent: parent node pointer
+ */
+static struct irdma_ws_node *irdma_alloc_node(struct irdma_sc_vsi *vsi,
+ u8 user_pri,
+ enum irdma_ws_node_type node_type,
+ struct irdma_ws_node *parent)
+{
+ struct irdma_virt_mem ws_mem;
+ struct irdma_ws_node *node;
+ u16 node_index = 0;
+
+ ws_mem.size = sizeof(struct irdma_ws_node);
+ ws_mem.va = kzalloc(ws_mem.size, GFP_KERNEL);
+ if (!ws_mem.va)
+ return NULL;
+
+ if (parent) {
+ node_index = irdma_alloc_ws_node_id(vsi->dev);
+ if (node_index == IRDMA_WS_NODE_INVALID) {
+ kfree(ws_mem.va);
+ return NULL;
+ }
+ }
+
+ node = ws_mem.va;
+ node->index = node_index;
+ node->vsi_index = vsi->vsi_idx;
+ INIT_LIST_HEAD(&node->child_list_head);
+ if (node_type == WS_NODE_TYPE_LEAF) {
+ node->type_leaf = true;
+ node->traffic_class = vsi->qos[user_pri].traffic_class;
+ node->user_pri = user_pri;
+ node->rel_bw = vsi->qos[user_pri].rel_bw;
+ if (!node->rel_bw)
+ node->rel_bw = 1;
+
+ node->lan_qs_handle = vsi->qos[user_pri].lan_qos_handle;
+ node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
+ } else {
+ node->rel_bw = 1;
+ node->prio_type = IRDMA_PRIO_WEIGHTED_RR;
+ node->enable = true;
+ }
+
+ node->parent = parent;
+
+ return node;
+}
+
+/**
+ * irdma_free_node - Free a WS node
+ * @vsi: VSI stricture of device
+ * @node: Pointer to node to free
+ */
+static void irdma_free_node(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *node)
+{
+ struct irdma_virt_mem ws_mem;
+
+ if (node->index)
+ irdma_free_ws_node_id(vsi->dev, node->index);
+
+ ws_mem.va = node;
+ ws_mem.size = sizeof(struct irdma_ws_node);
+ kfree(ws_mem.va);
+}
+
+/**
+ * irdma_ws_cqp_cmd - Post CQP work scheduler node cmd
+ * @vsi: vsi pointer
+ * @node: pointer to node
+ * @cmd: add, remove or modify
+ */
+static int irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *node, u8 cmd)
+{
+ struct irdma_ws_node_info node_info = {};
+
+ node_info.id = node->index;
+ node_info.vsi = node->vsi_index;
+ if (node->parent)
+ node_info.parent_id = node->parent->index;
+ else
+ node_info.parent_id = node_info.id;
+
+ node_info.weight = node->rel_bw;
+ node_info.tc = node->traffic_class;
+ node_info.prio_type = node->prio_type;
+ node_info.type_leaf = node->type_leaf;
+ node_info.enable = node->enable;
+ if (irdma_cqp_ws_node_cmd(vsi->dev, cmd, &node_info)) {
+ ibdev_dbg(to_ibdev(vsi->dev), "WS: CQP WS CMD failed\n");
+ return -ENOMEM;
+ }
+
+ if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) {
+ node->qs_handle = node_info.qs_handle;
+ vsi->qos[node->user_pri].qs_handle = node_info.qs_handle;
+ }
+
+ return 0;
+}
+
+/**
+ * ws_find_node - Find SC WS node based on VSI id or TC
+ * @parent: parent node of First VSI or TC node
+ * @match_val: value to match
+ * @type: match type VSI/TC
+ */
+static struct irdma_ws_node *ws_find_node(struct irdma_ws_node *parent,
+ u16 match_val,
+ enum irdma_ws_match_type type)
+{
+ struct irdma_ws_node *node;
+
+ switch (type) {
+ case WS_MATCH_TYPE_VSI:
+ list_for_each_entry(node, &parent->child_list_head, siblings) {
+ if (node->vsi_index == match_val)
+ return node;
+ }
+ break;
+ case WS_MATCH_TYPE_TC:
+ list_for_each_entry(node, &parent->child_list_head, siblings) {
+ if (node->traffic_class == match_val)
+ return node;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+/**
+ * irdma_tc_in_use - Checks to see if a leaf node is in use
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+static bool irdma_tc_in_use(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ int i;
+
+ mutex_lock(&vsi->qos[user_pri].qos_mutex);
+ if (!list_empty(&vsi->qos[user_pri].qplist)) {
+ mutex_unlock(&vsi->qos[user_pri].qos_mutex);
+ return true;
+ }
+
+ /* Check if the traffic class associated with the given user priority
+ * is in use by any other user priority. If so, nothing left to do
+ */
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->qos[i].traffic_class == vsi->qos[user_pri].traffic_class &&
+ !list_empty(&vsi->qos[i].qplist)) {
+ mutex_unlock(&vsi->qos[user_pri].qos_mutex);
+ return true;
+ }
+ }
+ mutex_unlock(&vsi->qos[user_pri].qos_mutex);
+
+ return false;
+}
+
+/**
+ * irdma_remove_leaf - Remove leaf node unconditionally
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+static void irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ struct irdma_ws_node *ws_tree_root, *vsi_node, *tc_node;
+ int i;
+ u16 traffic_class;
+
+ traffic_class = vsi->qos[user_pri].traffic_class;
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++)
+ if (vsi->qos[i].traffic_class == traffic_class)
+ vsi->qos[i].valid = false;
+
+ ws_tree_root = vsi->dev->ws_tree_root;
+ if (!ws_tree_root)
+ return;
+
+ vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx,
+ WS_MATCH_TYPE_VSI);
+ if (!vsi_node)
+ return;
+
+ tc_node = ws_find_node(vsi_node,
+ vsi->qos[user_pri].traffic_class,
+ WS_MATCH_TYPE_TC);
+ if (!tc_node)
+ return;
+
+ irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
+ vsi->unregister_qset(vsi, tc_node);
+ list_del(&tc_node->siblings);
+ irdma_free_node(vsi, tc_node);
+ /* Check if VSI node can be freed */
+ if (list_empty(&vsi_node->child_list_head)) {
+ irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE);
+ list_del(&vsi_node->siblings);
+ irdma_free_node(vsi, vsi_node);
+ /* Free head node there are no remaining VSI nodes */
+ if (list_empty(&ws_tree_root->child_list_head)) {
+ irdma_ws_cqp_cmd(vsi, ws_tree_root,
+ IRDMA_OP_WS_DELETE_NODE);
+ irdma_free_node(vsi, ws_tree_root);
+ vsi->dev->ws_tree_root = NULL;
+ }
+ }
+}
+
+/**
+ * irdma_ws_add - Build work scheduler tree, set RDMA qs_handle
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ struct irdma_ws_node *ws_tree_root;
+ struct irdma_ws_node *vsi_node;
+ struct irdma_ws_node *tc_node;
+ u16 traffic_class;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&vsi->dev->ws_mutex);
+ if (vsi->tc_change_pending) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ if (vsi->qos[user_pri].valid)
+ goto exit;
+
+ ws_tree_root = vsi->dev->ws_tree_root;
+ if (!ws_tree_root) {
+ ibdev_dbg(to_ibdev(vsi->dev), "WS: Creating root node\n");
+ ws_tree_root = irdma_alloc_node(vsi, user_pri,
+ WS_NODE_TYPE_PARENT, NULL);
+ if (!ws_tree_root) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ret = irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_ADD_NODE);
+ if (ret) {
+ irdma_free_node(vsi, ws_tree_root);
+ goto exit;
+ }
+
+ vsi->dev->ws_tree_root = ws_tree_root;
+ }
+
+ /* Find a second tier node that matches the VSI */
+ vsi_node = ws_find_node(ws_tree_root, vsi->vsi_idx,
+ WS_MATCH_TYPE_VSI);
+
+ /* If VSI node doesn't exist, add one */
+ if (!vsi_node) {
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Node not found matching VSI %d\n",
+ vsi->vsi_idx);
+ vsi_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT,
+ ws_tree_root);
+ if (!vsi_node) {
+ ret = -ENOMEM;
+ goto vsi_add_err;
+ }
+
+ ret = irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_ADD_NODE);
+ if (ret) {
+ irdma_free_node(vsi, vsi_node);
+ goto vsi_add_err;
+ }
+
+ list_add(&vsi_node->siblings, &ws_tree_root->child_list_head);
+ }
+
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Using node %d which represents VSI %d\n",
+ vsi_node->index, vsi->vsi_idx);
+ traffic_class = vsi->qos[user_pri].traffic_class;
+ tc_node = ws_find_node(vsi_node, traffic_class,
+ WS_MATCH_TYPE_TC);
+ if (!tc_node) {
+ /* Add leaf node */
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Node not found matching VSI %d and TC %d\n",
+ vsi->vsi_idx, traffic_class);
+ tc_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF,
+ vsi_node);
+ if (!tc_node) {
+ ret = -ENOMEM;
+ goto leaf_add_err;
+ }
+
+ ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_ADD_NODE);
+ if (ret) {
+ irdma_free_node(vsi, tc_node);
+ goto leaf_add_err;
+ }
+
+ list_add(&tc_node->siblings, &vsi_node->child_list_head);
+ /*
+ * callback to LAN to update the LAN tree with our node
+ */
+ ret = vsi->register_qset(vsi, tc_node);
+ if (ret)
+ goto reg_err;
+
+ tc_node->enable = true;
+ ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
+ if (ret) {
+ vsi->unregister_qset(vsi, tc_node);
+ goto reg_err;
+ }
+ }
+ ibdev_dbg(to_ibdev(vsi->dev),
+ "WS: Using node %d which represents VSI %d TC %d\n",
+ tc_node->index, vsi->vsi_idx, traffic_class);
+ /*
+ * Iterate through other UPs and update the QS handle if they have
+ * a matching traffic class.
+ */
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ if (vsi->qos[i].traffic_class == traffic_class) {
+ vsi->qos[i].qs_handle = tc_node->qs_handle;
+ vsi->qos[i].lan_qos_handle = tc_node->lan_qs_handle;
+ vsi->qos[i].l2_sched_node_id = tc_node->l2_sched_node_id;
+ vsi->qos[i].valid = true;
+ }
+ }
+ goto exit;
+
+reg_err:
+ irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
+ list_del(&tc_node->siblings);
+ irdma_free_node(vsi, tc_node);
+leaf_add_err:
+ if (list_empty(&vsi_node->child_list_head)) {
+ if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
+ goto exit;
+ list_del(&vsi_node->siblings);
+ irdma_free_node(vsi, vsi_node);
+ }
+
+vsi_add_err:
+ /* Free head node there are no remaining VSI nodes */
+ if (list_empty(&ws_tree_root->child_list_head)) {
+ irdma_ws_cqp_cmd(vsi, ws_tree_root, IRDMA_OP_WS_DELETE_NODE);
+ vsi->dev->ws_tree_root = NULL;
+ irdma_free_node(vsi, ws_tree_root);
+ }
+
+exit:
+ mutex_unlock(&vsi->dev->ws_mutex);
+ return ret;
+}
+
+/**
+ * irdma_ws_remove - Free WS scheduler node, update WS tree
+ * @vsi: vsi pointer
+ * @user_pri: user priority
+ */
+void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri)
+{
+ mutex_lock(&vsi->dev->ws_mutex);
+ if (irdma_tc_in_use(vsi, user_pri))
+ goto exit;
+ irdma_remove_leaf(vsi, user_pri);
+exit:
+ mutex_unlock(&vsi->dev->ws_mutex);
+}
+
+/**
+ * irdma_ws_reset - Reset entire WS tree
+ * @vsi: vsi pointer
+ */
+void irdma_ws_reset(struct irdma_sc_vsi *vsi)
+{
+ u8 i;
+
+ mutex_lock(&vsi->dev->ws_mutex);
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; ++i)
+ irdma_remove_leaf(vsi, i);
+ mutex_unlock(&vsi->dev->ws_mutex);
+}
diff --git a/drivers/infiniband/hw/irdma/ws.h b/drivers/infiniband/hw/irdma/ws.h
new file mode 100644
index 0000000000..d431e3327d
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ws.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2020 Intel Corporation */
+#ifndef IRDMA_WS_H
+#define IRDMA_WS_H
+
+#include "osdep.h"
+
+enum irdma_ws_node_type {
+ WS_NODE_TYPE_PARENT,
+ WS_NODE_TYPE_LEAF,
+};
+
+enum irdma_ws_match_type {
+ WS_MATCH_TYPE_VSI,
+ WS_MATCH_TYPE_TC,
+};
+
+struct irdma_ws_node {
+ struct list_head siblings;
+ struct list_head child_list_head;
+ struct irdma_ws_node *parent;
+ u64 lan_qs_handle; /* opaque handle used by LAN */
+ u32 l2_sched_node_id;
+ u16 index;
+ u16 qs_handle;
+ u16 vsi_index;
+ u8 traffic_class;
+ u8 user_pri;
+ u8 rel_bw;
+ u8 abstraction_layer; /* used for splitting a TC */
+ u8 prio_type;
+ bool type_leaf:1;
+ bool enable:1;
+};
+
+struct irdma_sc_vsi;
+int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri);
+void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri);
+void irdma_ws_reset(struct irdma_sc_vsi *vsi);
+
+#endif /* IRDMA_WS_H */