summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig9
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile30
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c149
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c234
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h62
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c463
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c930
-rw-r--r--drivers/infiniband/hw/mlx5/counters.h17
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c1441
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c3101
-rw-r--r--drivers/infiniband/hw/mlx5/devx.h45
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c607
-rw-r--r--drivers/infiniband/hw/mlx5/dm.h68
-rw-r--r--drivers/infiniband/hw/mlx5/doorbell.c105
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c2897
-rw-r--r--drivers/infiniband/hw/mlx5/fs.h45
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c489
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c236
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h40
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c224
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c646
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4444
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c295
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1616
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c2344
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c1821
-rw-r--r--drivers/infiniband/hw/mlx5/qos.c133
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c5752
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h47
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c646
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.c179
-rw-r--r--drivers/infiniband/hw/mlx5/restrack.h13
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c469
-rw-r--r--drivers/infiniband/hw/mlx5/srq.h69
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c774
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c222
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c763
-rw-r--r--drivers/infiniband/hw/mlx5/umr.h97
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c1284
-rw-r--r--drivers/infiniband/hw/mlx5/wr.h136
40 files changed, 32942 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
new file mode 100644
index 000000000..ef1ff42ea
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config MLX5_INFINIBAND
+ tristate "Mellanox 5th generation network adapters (ConnectX series) support"
+ depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ help
+ This driver provides low-level InfiniBand support for
+ Mellanox Connect-IB PCI Express host channel adapters (HCAs).
+ This is required to use InfiniBand protocols such as
+ IP-over-IB or SRP with these devices.
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
new file mode 100644
index 000000000..612ee8190
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
+
+mlx5_ib-y := ah.o \
+ cmd.o \
+ cong.o \
+ counters.o \
+ cq.o \
+ dm.o \
+ doorbell.o \
+ gsi.o \
+ ib_virt.o \
+ mad.o \
+ main.o \
+ mem.o \
+ mr.o \
+ qp.o \
+ qpc.o \
+ restrack.o \
+ srq.o \
+ srq_cmd.o \
+ umr.o \
+ wr.o
+
+mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
+mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
+mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o \
+ fs.o \
+ qos.o \
+ std_types.o
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
new file mode 100644
index 000000000..505bc47fd
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx5_ib.h"
+
+static __be16 mlx5_ah_get_udp_sport(const struct mlx5_ib_dev *dev,
+ const struct rdma_ah_attr *ah_attr)
+{
+ enum ib_gid_type gid_type = ah_attr->grh.sgid_attr->gid_type;
+ __be16 sport;
+
+ if ((gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
+ (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
+ (ah_attr->grh.flow_label & IB_GRH_FLOWLABEL_MASK))
+ sport = cpu_to_be16(
+ rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label));
+ else
+ sport = mlx5_get_roce_udp_sport_min(dev,
+ ah_attr->grh.sgid_attr);
+
+ return sport;
+}
+
+static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
+ struct rdma_ah_init_attr *init_attr)
+{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+ enum ib_gid_type gid_type;
+
+ if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+
+ memcpy(ah->av.rgid, &grh->dgid, 16);
+ ah->av.grh_gid_fl = cpu_to_be32(grh->flow_label |
+ (1 << 30) |
+ grh->sgid_index << 20);
+ ah->av.hop_limit = grh->hop_limit;
+ ah->av.tclass = grh->traffic_class;
+ }
+
+ ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
+
+ if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ if (init_attr->xmit_slave)
+ ah->xmit_port =
+ mlx5_lag_get_slave_port(dev->mdev,
+ init_attr->xmit_slave);
+ gid_type = ah_attr->grh.sgid_attr->gid_type;
+
+ memcpy(ah->av.rmac, ah_attr->roce.dmac,
+ sizeof(ah_attr->roce.dmac));
+ ah->av.udp_sport = mlx5_ah_get_udp_sport(dev, ah_attr);
+ ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0x7) << 1;
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+#define MLX5_ECN_ENABLED BIT(1)
+ ah->av.tclass |= MLX5_ECN_ENABLED;
+ } else {
+ ah->av.rlid = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
+ ah->av.fl_mlid = rdma_ah_get_path_bits(ah_attr) & 0x7f;
+ ah->av.stat_rate_sl |= (rdma_ah_get_sl(ah_attr) & 0xf);
+ }
+}
+
+int mlx5_ib_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+
+{
+ struct rdma_ah_attr *ah_attr = init_attr->ah_attr;
+ struct mlx5_ib_ah *ah = to_mah(ibah);
+ struct mlx5_ib_dev *dev = to_mdev(ibah->device);
+ enum rdma_ah_attr_type ah_type = ah_attr->type;
+
+ if ((ah_type == RDMA_AH_ATTR_TYPE_ROCE) &&
+ !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
+ return -EINVAL;
+
+ if (ah_type == RDMA_AH_ATTR_TYPE_ROCE && udata) {
+ int err;
+ struct mlx5_ib_create_ah_resp resp = {};
+ u32 min_resp_len =
+ offsetofend(struct mlx5_ib_create_ah_resp, dmac);
+
+ if (udata->outlen < min_resp_len)
+ return -EINVAL;
+
+ resp.response_length = min_resp_len;
+
+ memcpy(resp.dmac, ah_attr->roce.dmac, ETH_ALEN);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ return err;
+ }
+
+ create_ib_ah(dev, ah, init_attr);
+ return 0;
+}
+
+int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct mlx5_ib_ah *ah = to_mah(ibah);
+ u32 tmp;
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ ah_attr->type = ibah->type;
+
+ tmp = be32_to_cpu(ah->av.grh_gid_fl);
+ if (tmp & (1 << 30)) {
+ rdma_ah_set_grh(ah_attr, NULL,
+ tmp & 0xfffff,
+ (tmp >> 20) & 0xff,
+ ah->av.hop_limit,
+ ah->av.tclass);
+ rdma_ah_set_dgid_raw(ah_attr, ah->av.rgid);
+ }
+ rdma_ah_set_dlid(ah_attr, be16_to_cpu(ah->av.rlid));
+ rdma_ah_set_static_rate(ah_attr, ah->av.stat_rate_sl >> 4);
+ rdma_ah_set_sl(ah_attr, ah->av.stat_rate_sl & 0xf);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
new file mode 100644
index 000000000..ff3742b04
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2017-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include "cmd.h"
+
+int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
+{
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ int err;
+
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
+ if (!err)
+ *mkey = MLX5_GET(query_special_contexts_out, out,
+ dump_fill_mkey);
+ return err;
+}
+
+int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
+{
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ int err;
+
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
+ if (!err)
+ *null_mkey = MLX5_GET(query_special_contexts_out, out,
+ null_mkey);
+ return err;
+}
+
+int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
+ void *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = {};
+
+ MLX5_SET(query_cong_params_in, in, opcode,
+ MLX5_CMD_OP_QUERY_CONG_PARAMS);
+ MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
+
+ return mlx5_cmd_exec_inout(dev, query_cong_params, in, out);
+}
+
+void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+ MLX5_SET(destroy_tir_in, in, uid, uid);
+ mlx5_cmd_exec_in(dev, destroy_tir, in);
+}
+
+void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+ MLX5_SET(destroy_tis_in, in, uid, uid);
+ mlx5_cmd_exec_in(dev, destroy_tis, in);
+}
+
+int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(destroy_rqt_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev, destroy_rqt, in);
+}
+
+int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
+ u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {};
+ int err;
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(alloc_transport_domain_in, in, uid, uid);
+
+ err = mlx5_cmd_exec_inout(dev, alloc_transport_domain, in, out);
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+
+ return err;
+}
+
+void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
+ u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {};
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, uid, uid);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+ mlx5_cmd_exec_in(dev, dealloc_transport_domain, in);
+}
+
+int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
+
+ MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, in, pd, pdn);
+ MLX5_SET(dealloc_pd_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev, dealloc_pd, in);
+}
+
+int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
+ void *gid;
+
+ MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
+ MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
+ MLX5_SET(attach_to_mcg_in, in, uid, uid);
+ gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec_in(dev, attach_to_mcg, in);
+}
+
+int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
+ void *gid;
+
+ MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
+ MLX5_SET(detach_from_mcg_in, in, uid, uid);
+ gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec_in(dev, detach_from_mcg, in);
+}
+
+int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+ MLX5_SET(alloc_xrcd_in, in, uid, uid);
+ err = mlx5_cmd_exec_inout(dev, alloc_xrcd, in, out);
+ if (!err)
+ *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
+ return err;
+}
+
+int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
+
+ MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+ MLX5_SET(dealloc_xrcd_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev, dealloc_xrcd, in);
+}
+
+int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
+ u16 opmod, u8 port)
+{
+ int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
+ int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
+ int err = -ENOMEM;
+ void *data;
+ void *resp;
+ u32 *out;
+ u32 *in;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!in || !out)
+ goto out;
+
+ MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
+ MLX5_SET(mad_ifc_in, in, op_mod, opmod);
+ MLX5_SET(mad_ifc_in, in, port, port);
+
+ data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
+ memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
+
+ err = mlx5_cmd_exec_inout(dev, mad_ifc, in, out);
+ if (err)
+ goto out;
+
+ resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
+ memcpy(outb, resp,
+ MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
+
+out:
+ kfree(out);
+ kfree(in);
+ return err;
+}
+
+int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
+ MLX5_SET(alloc_uar_in, in, uid, uid);
+ err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
+ if (err)
+ return err;
+
+ *uarn = MLX5_GET(alloc_uar_out, out, uar);
+ return 0;
+}
+
+int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
+
+ MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
+ MLX5_SET(dealloc_uar_in, in, uar, uarn);
+ MLX5_SET(dealloc_uar_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev, dealloc_uar, in);
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
new file mode 100644
index 000000000..ee46638db
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_IB_CMD_H
+#define MLX5_IB_CMD_H
+
+#include "mlx5_ib.h"
+#include <linux/kernel.h>
+#include <linux/mlx5/driver.h>
+
+int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
+int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
+int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
+ void *out);
+int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
+void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
+void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
+int mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid);
+int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
+ u16 uid);
+void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
+ u16 uid);
+int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid);
+int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid);
+int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
+int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
+int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
+ u16 opmod, u8 port);
+int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid);
+int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid);
+#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
new file mode 100644
index 000000000..290ea8ac3
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2013-2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+
+#include "mlx5_ib.h"
+#include "cmd.h"
+
+enum mlx5_ib_cong_node_type {
+ MLX5_IB_RROCE_ECN_RP = 1,
+ MLX5_IB_RROCE_ECN_NP = 2,
+};
+
+static const char * const mlx5_ib_dbg_cc_name[] = {
+ "rp_clamp_tgt_rate",
+ "rp_clamp_tgt_rate_ati",
+ "rp_time_reset",
+ "rp_byte_reset",
+ "rp_threshold",
+ "rp_ai_rate",
+ "rp_max_rate",
+ "rp_hai_rate",
+ "rp_min_dec_fac",
+ "rp_min_rate",
+ "rp_rate_to_set_on_first_cnp",
+ "rp_dce_tcp_g",
+ "rp_dce_tcp_rtt",
+ "rp_rate_reduce_monitor_period",
+ "rp_initial_alpha_value",
+ "rp_gd",
+ "np_min_time_between_cnps",
+ "np_cnp_dscp",
+ "np_cnp_prio_mode",
+ "np_cnp_prio",
+};
+
+#define MLX5_IB_RP_CLAMP_TGT_RATE_ATTR BIT(1)
+#define MLX5_IB_RP_CLAMP_TGT_RATE_ATI_ATTR BIT(2)
+#define MLX5_IB_RP_TIME_RESET_ATTR BIT(3)
+#define MLX5_IB_RP_BYTE_RESET_ATTR BIT(4)
+#define MLX5_IB_RP_THRESHOLD_ATTR BIT(5)
+#define MLX5_IB_RP_MAX_RATE_ATTR BIT(6)
+#define MLX5_IB_RP_AI_RATE_ATTR BIT(7)
+#define MLX5_IB_RP_HAI_RATE_ATTR BIT(8)
+#define MLX5_IB_RP_MIN_DEC_FAC_ATTR BIT(9)
+#define MLX5_IB_RP_MIN_RATE_ATTR BIT(10)
+#define MLX5_IB_RP_RATE_TO_SET_ON_FIRST_CNP_ATTR BIT(11)
+#define MLX5_IB_RP_DCE_TCP_G_ATTR BIT(12)
+#define MLX5_IB_RP_DCE_TCP_RTT_ATTR BIT(13)
+#define MLX5_IB_RP_RATE_REDUCE_MONITOR_PERIOD_ATTR BIT(14)
+#define MLX5_IB_RP_INITIAL_ALPHA_VALUE_ATTR BIT(15)
+#define MLX5_IB_RP_GD_ATTR BIT(16)
+
+#define MLX5_IB_NP_MIN_TIME_BETWEEN_CNPS_ATTR BIT(2)
+#define MLX5_IB_NP_CNP_DSCP_ATTR BIT(3)
+#define MLX5_IB_NP_CNP_PRIO_MODE_ATTR BIT(4)
+
+static enum mlx5_ib_cong_node_type
+mlx5_ib_param_to_node(enum mlx5_ib_dbg_cc_types param_offset)
+{
+ if (param_offset >= MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE &&
+ param_offset <= MLX5_IB_DBG_CC_RP_GD)
+ return MLX5_IB_RROCE_ECN_RP;
+ else
+ return MLX5_IB_RROCE_ECN_NP;
+}
+
+static u32 mlx5_get_cc_param_val(void *field, int offset)
+{
+ switch (offset) {
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate);
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate_after_time_inc);
+ case MLX5_IB_DBG_CC_RP_TIME_RESET:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_time_reset);
+ case MLX5_IB_DBG_CC_RP_BYTE_RESET:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_byte_reset);
+ case MLX5_IB_DBG_CC_RP_THRESHOLD:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_threshold);
+ case MLX5_IB_DBG_CC_RP_AI_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_ai_rate);
+ case MLX5_IB_DBG_CC_RP_MAX_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_max_rate);
+ case MLX5_IB_DBG_CC_RP_HAI_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_hai_rate);
+ case MLX5_IB_DBG_CC_RP_MIN_DEC_FAC:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_dec_fac);
+ case MLX5_IB_DBG_CC_RP_MIN_RATE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_rate);
+ case MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rate_to_set_on_first_cnp);
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_G:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_g);
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_RTT:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_rtt);
+ case MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rate_reduce_monitor_period);
+ case MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ initial_alpha_value);
+ case MLX5_IB_DBG_CC_RP_GD:
+ return MLX5_GET(cong_control_r_roce_ecn_rp, field,
+ rpg_gd);
+ case MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ min_time_between_cnps);
+ case MLX5_IB_DBG_CC_NP_CNP_DSCP:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ cnp_dscp);
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ cnp_prio_mode);
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO:
+ return MLX5_GET(cong_control_r_roce_ecn_np, field,
+ cnp_802p_prio);
+ default:
+ return 0;
+ }
+}
+
+static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
+ u32 var, u32 *attr_mask)
+{
+ switch (offset) {
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE:
+ *attr_mask |= MLX5_IB_RP_CLAMP_TGT_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI:
+ *attr_mask |= MLX5_IB_RP_CLAMP_TGT_RATE_ATI_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ clamp_tgt_rate_after_time_inc, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_TIME_RESET:
+ *attr_mask |= MLX5_IB_RP_TIME_RESET_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_time_reset, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_BYTE_RESET:
+ *attr_mask |= MLX5_IB_RP_BYTE_RESET_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_byte_reset, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_THRESHOLD:
+ *attr_mask |= MLX5_IB_RP_THRESHOLD_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_threshold, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_AI_RATE:
+ *attr_mask |= MLX5_IB_RP_AI_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_ai_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_MAX_RATE:
+ *attr_mask |= MLX5_IB_RP_MAX_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_max_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_HAI_RATE:
+ *attr_mask |= MLX5_IB_RP_HAI_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_hai_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_MIN_DEC_FAC:
+ *attr_mask |= MLX5_IB_RP_MIN_DEC_FAC_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_dec_fac, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_MIN_RATE:
+ *attr_mask |= MLX5_IB_RP_MIN_RATE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_min_rate, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP:
+ *attr_mask |= MLX5_IB_RP_RATE_TO_SET_ON_FIRST_CNP_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rate_to_set_on_first_cnp, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_G:
+ *attr_mask |= MLX5_IB_RP_DCE_TCP_G_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_g, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_DCE_TCP_RTT:
+ *attr_mask |= MLX5_IB_RP_DCE_TCP_RTT_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ dce_tcp_rtt, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD:
+ *attr_mask |= MLX5_IB_RP_RATE_REDUCE_MONITOR_PERIOD_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rate_reduce_monitor_period, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE:
+ *attr_mask |= MLX5_IB_RP_INITIAL_ALPHA_VALUE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ initial_alpha_value, var);
+ break;
+ case MLX5_IB_DBG_CC_RP_GD:
+ *attr_mask |= MLX5_IB_RP_GD_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_rp, field,
+ rpg_gd, var);
+ break;
+ case MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS:
+ *attr_mask |= MLX5_IB_NP_MIN_TIME_BETWEEN_CNPS_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field,
+ min_time_between_cnps, var);
+ break;
+ case MLX5_IB_DBG_CC_NP_CNP_DSCP:
+ *attr_mask |= MLX5_IB_NP_CNP_DSCP_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_dscp, var);
+ break;
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE:
+ *attr_mask |= MLX5_IB_NP_CNP_PRIO_MODE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_prio_mode, var);
+ break;
+ case MLX5_IB_DBG_CC_NP_CNP_PRIO:
+ *attr_mask |= MLX5_IB_NP_CNP_PRIO_MODE_ATTR;
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_prio_mode, 0);
+ MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_802p_prio, var);
+ break;
+ }
+}
+
+static int mlx5_ib_get_cc_params(struct mlx5_ib_dev *dev, u32 port_num,
+ int offset, u32 *var)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_cong_params_out);
+ void *out;
+ void *field;
+ int err;
+ enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
+
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
+
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!out) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ node = mlx5_ib_param_to_node(offset);
+
+ err = mlx5_cmd_query_cong_params(mdev, node, out);
+ if (err)
+ goto free;
+
+ field = MLX5_ADDR_OF(query_cong_params_out, out, congestion_parameters);
+ *var = mlx5_get_cc_param_val(field, offset);
+
+free:
+ kvfree(out);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+ return err;
+}
+
+static int mlx5_ib_set_cc_params(struct mlx5_ib_dev *dev, u32 port_num,
+ int offset, u32 var)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_cong_params_in);
+ void *in;
+ void *field;
+ enum mlx5_ib_cong_node_type node;
+ struct mlx5_core_dev *mdev;
+ u32 attr_mask = 0;
+ int err;
+
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return -ENODEV;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ MLX5_SET(modify_cong_params_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_CONG_PARAMS);
+
+ node = mlx5_ib_param_to_node(offset);
+ MLX5_SET(modify_cong_params_in, in, cong_protocol, node);
+
+ field = MLX5_ADDR_OF(modify_cong_params_in, in, congestion_parameters);
+ mlx5_ib_set_cc_param_mask_val(field, offset, var, &attr_mask);
+
+ field = MLX5_ADDR_OF(modify_cong_params_in, in, field_select);
+ MLX5_SET(field_select_r_roce_rp, field, field_select_r_roce_rp,
+ attr_mask);
+
+ err = mlx5_cmd_exec_in(dev->mdev, modify_cong_params, in);
+ kvfree(in);
+alloc_err:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+ return err;
+}
+
+static ssize_t set_param(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_ib_dbg_param *param = filp->private_data;
+ int offset = param->offset;
+ char lbuf[11] = { };
+ u32 var;
+ int ret;
+
+ if (count > sizeof(lbuf))
+ return -EINVAL;
+
+ if (copy_from_user(lbuf, buf, count))
+ return -EFAULT;
+
+ lbuf[sizeof(lbuf) - 1] = '\0';
+
+ if (kstrtou32(lbuf, 0, &var))
+ return -EINVAL;
+
+ ret = mlx5_ib_set_cc_params(param->dev, param->port_num, offset, var);
+ return ret ? ret : count;
+}
+
+static ssize_t get_param(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_ib_dbg_param *param = filp->private_data;
+ int offset = param->offset;
+ u32 var = 0;
+ int ret;
+ char lbuf[11];
+
+ ret = mlx5_ib_get_cc_params(param->dev, param->port_num, offset, &var);
+ if (ret)
+ return ret;
+
+ ret = snprintf(lbuf, sizeof(lbuf), "%d\n", var);
+ if (ret < 0)
+ return ret;
+
+ return simple_read_from_buffer(buf, count, pos, lbuf, ret);
+}
+
+static const struct file_operations dbg_cc_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = set_param,
+ .read = get_param,
+};
+
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ if (!mlx5_debugfs_root ||
+ !dev->port[port_num].dbg_cc_params ||
+ !dev->port[port_num].dbg_cc_params->root)
+ return;
+
+ debugfs_remove_recursive(dev->port[port_num].dbg_cc_params->root);
+ kfree(dev->port[port_num].dbg_cc_params);
+ dev->port[port_num].dbg_cc_params = NULL;
+}
+
+void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ struct mlx5_core_dev *mdev;
+ int i;
+
+ if (!mlx5_debugfs_root)
+ return;
+
+ /* Takes a 1-based port number */
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num + 1, NULL);
+ if (!mdev)
+ return;
+
+ if (!MLX5_CAP_GEN(mdev, cc_query_allowed) ||
+ !MLX5_CAP_GEN(mdev, cc_modify_allowed))
+ goto put_mdev;
+
+ dbg_cc_params = kzalloc(sizeof(*dbg_cc_params), GFP_KERNEL);
+ if (!dbg_cc_params)
+ goto err;
+
+ dev->port[port_num].dbg_cc_params = dbg_cc_params;
+
+ dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev));
+
+ for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) {
+ dbg_cc_params->params[i].offset = i;
+ dbg_cc_params->params[i].dev = dev;
+ dbg_cc_params->params[i].port_num = port_num;
+ dbg_cc_params->params[i].dentry =
+ debugfs_create_file(mlx5_ib_dbg_cc_name[i],
+ 0600, dbg_cc_params->root,
+ &dbg_cc_params->params[i],
+ &dbg_cc_fops);
+ }
+
+put_mdev:
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+ return;
+
+err:
+ mlx5_ib_warn(dev, "cong debugfs failure\n");
+ mlx5_ib_cleanup_cong_debugfs(dev, port_num);
+ mlx5_ib_put_native_port_mdev(dev, port_num + 1);
+
+ /*
+ * We don't want to fail driver if debugfs failed to initialize,
+ * so we are not forwarding error to the user.
+ */
+ return;
+}
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
new file mode 100644
index 000000000..3e1272695
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -0,0 +1,930 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include "mlx5_ib.h"
+#include <linux/mlx5/eswitch.h>
+#include "counters.h"
+#include "ib_rep.h"
+#include "qp.h"
+
+struct mlx5_ib_counter {
+ const char *name;
+ size_t offset;
+ u32 type;
+};
+
+#define INIT_Q_COUNTER(_name) \
+ { .name = #_name, .offset = MLX5_BYTE_OFF(query_q_counter_out, _name)}
+
+static const struct mlx5_ib_counter basic_q_cnts[] = {
+ INIT_Q_COUNTER(rx_write_requests),
+ INIT_Q_COUNTER(rx_read_requests),
+ INIT_Q_COUNTER(rx_atomic_requests),
+ INIT_Q_COUNTER(out_of_buffer),
+};
+
+static const struct mlx5_ib_counter out_of_seq_q_cnts[] = {
+ INIT_Q_COUNTER(out_of_sequence),
+};
+
+static const struct mlx5_ib_counter retrans_q_cnts[] = {
+ INIT_Q_COUNTER(duplicate_request),
+ INIT_Q_COUNTER(rnr_nak_retry_err),
+ INIT_Q_COUNTER(packet_seq_err),
+ INIT_Q_COUNTER(implied_nak_seq_err),
+ INIT_Q_COUNTER(local_ack_timeout_err),
+};
+
+#define INIT_CONG_COUNTER(_name) \
+ { .name = #_name, .offset = \
+ MLX5_BYTE_OFF(query_cong_statistics_out, _name ## _high)}
+
+static const struct mlx5_ib_counter cong_cnts[] = {
+ INIT_CONG_COUNTER(rp_cnp_ignored),
+ INIT_CONG_COUNTER(rp_cnp_handled),
+ INIT_CONG_COUNTER(np_ecn_marked_roce_packets),
+ INIT_CONG_COUNTER(np_cnp_sent),
+};
+
+static const struct mlx5_ib_counter extended_err_cnts[] = {
+ INIT_Q_COUNTER(resp_local_length_error),
+ INIT_Q_COUNTER(resp_cqe_error),
+ INIT_Q_COUNTER(req_cqe_error),
+ INIT_Q_COUNTER(req_remote_invalid_request),
+ INIT_Q_COUNTER(req_remote_access_errors),
+ INIT_Q_COUNTER(resp_remote_access_errors),
+ INIT_Q_COUNTER(resp_cqe_flush_error),
+ INIT_Q_COUNTER(req_cqe_flush_error),
+};
+
+static const struct mlx5_ib_counter roce_accl_cnts[] = {
+ INIT_Q_COUNTER(roce_adp_retrans),
+ INIT_Q_COUNTER(roce_adp_retrans_to),
+ INIT_Q_COUNTER(roce_slow_restart),
+ INIT_Q_COUNTER(roce_slow_restart_cnps),
+ INIT_Q_COUNTER(roce_slow_restart_trans),
+};
+
+#define INIT_EXT_PPCNT_COUNTER(_name) \
+ { .name = #_name, .offset = \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_extended_cntrs_grp_data_layout._name##_high)}
+
+static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
+ INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
+};
+
+#define INIT_OP_COUNTER(_name, _type) \
+ { .name = #_name, .type = MLX5_IB_OPCOUNTER_##_type}
+
+static const struct mlx5_ib_counter basic_op_cnts[] = {
+ INIT_OP_COUNTER(cc_rx_ce_pkts, CC_RX_CE_PKTS),
+};
+
+static const struct mlx5_ib_counter rdmarx_cnp_op_cnts[] = {
+ INIT_OP_COUNTER(cc_rx_cnp_pkts, CC_RX_CNP_PKTS),
+};
+
+static const struct mlx5_ib_counter rdmatx_cnp_op_cnts[] = {
+ INIT_OP_COUNTER(cc_tx_cnp_pkts, CC_TX_CNP_PKTS),
+};
+
+static int mlx5_ib_read_counters(struct ib_counters *counters,
+ struct ib_counters_read_attr *read_attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+ struct mlx5_read_counters_attr mread_attr = {};
+ struct mlx5_ib_flow_counters_desc *desc;
+ int ret, i;
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ if (mcounters->cntrs_max_index > read_attr->ncounters) {
+ ret = -EINVAL;
+ goto err_bound;
+ }
+
+ mread_attr.out = kcalloc(mcounters->counters_num, sizeof(u64),
+ GFP_KERNEL);
+ if (!mread_attr.out) {
+ ret = -ENOMEM;
+ goto err_bound;
+ }
+
+ mread_attr.hw_cntrs_hndl = mcounters->hw_cntrs_hndl;
+ mread_attr.flags = read_attr->flags;
+ ret = mcounters->read_counters(counters->device, &mread_attr);
+ if (ret)
+ goto err_read;
+
+ /* do the pass over the counters data array to assign according to the
+ * descriptions and indexing pairs
+ */
+ desc = mcounters->counters_data;
+ for (i = 0; i < mcounters->ncounters; i++)
+ read_attr->counters_buff[desc[i].index] += mread_attr.out[desc[i].description];
+
+err_read:
+ kfree(mread_attr.out);
+err_bound:
+ mutex_unlock(&mcounters->mcntrs_mutex);
+ return ret;
+}
+
+static int mlx5_ib_destroy_counters(struct ib_counters *counters)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+
+ mlx5_ib_counters_clear_description(counters);
+ if (mcounters->hw_cntrs_hndl)
+ mlx5_fc_destroy(to_mdev(counters->device)->mdev,
+ mcounters->hw_cntrs_hndl);
+ return 0;
+}
+
+static int mlx5_ib_create_counters(struct ib_counters *counters,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+
+ mutex_init(&mcounters->mcntrs_mutex);
+ return 0;
+}
+
+
+static const struct mlx5_ib_counters *get_counters(struct mlx5_ib_dev *dev,
+ u32 port_num)
+{
+ return is_mdev_switchdev_mode(dev->mdev) ? &dev->port[0].cnts :
+ &dev->port[port_num].cnts;
+}
+
+/**
+ * mlx5_ib_get_counters_id - Returns counters id to use for device+port
+ * @dev: Pointer to mlx5 IB device
+ * @port_num: Zero based port number
+ *
+ * mlx5_ib_get_counters_id() Returns counters set id to use for given
+ * device port combination in switchdev and non switchdev mode of the
+ * parent device.
+ */
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ const struct mlx5_ib_counters *cnts = get_counters(dev, port_num);
+
+ return cnts->set_id;
+}
+
+static struct rdma_hw_stats *do_alloc_stats(const struct mlx5_ib_counters *cnts)
+{
+ struct rdma_hw_stats *stats;
+ u32 num_hw_counters;
+ int i;
+
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ stats = rdma_alloc_hw_stats_struct(cnts->descs,
+ num_hw_counters +
+ cnts->num_op_counters,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ if (!stats)
+ return NULL;
+
+ for (i = 0; i < cnts->num_op_counters; i++)
+ set_bit(num_hw_counters + i, stats->is_disabled);
+
+ return stats;
+}
+
+static struct rdma_hw_stats *
+mlx5_ib_alloc_hw_device_stats(struct ib_device *ibdev)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts = &dev->port[0].cnts;
+
+ return do_alloc_stats(cnts);
+}
+
+static struct rdma_hw_stats *
+mlx5_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts;
+
+ return do_alloc_stats(cnts);
+}
+
+static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
+ const struct mlx5_ib_counters *cnts,
+ struct rdma_hw_stats *stats,
+ u16 set_id)
+{
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
+ __be32 val;
+ int ret, i;
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, set_id);
+ ret = mlx5_cmd_exec_inout(mdev, query_q_counter, in, out);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cnts->num_q_counters; i++) {
+ val = *(__be32 *)((void *)out + cnts->offsets[i]);
+ stats->value[i] = (u64)be32_to_cpu(val);
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_query_ext_ppcnt_counters(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_counters *cnts,
+ struct rdma_hw_stats *stats)
+{
+ int offset = cnts->num_q_counters + cnts->num_cong_counters;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int ret, i;
+ void *out;
+
+ out = kvzalloc(sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
+ ret = mlx5_core_access_reg(dev->mdev, in, sz, out, sz, MLX5_REG_PPCNT,
+ 0, 0);
+ if (ret)
+ goto free;
+
+ for (i = 0; i < cnts->num_ext_ppcnt_counters; i++)
+ stats->value[i + offset] =
+ be64_to_cpup((__be64 *)(out +
+ cnts->offsets[i + offset]));
+free:
+ kvfree(out);
+ return ret;
+}
+
+static int do_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
+ struct mlx5_core_dev *mdev;
+ int ret, num_counters;
+
+ if (!stats)
+ return -EINVAL;
+
+ num_counters = cnts->num_q_counters +
+ cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+
+ /* q_counters are per IB device, query the master mdev */
+ ret = mlx5_ib_query_q_counters(dev->mdev, cnts, stats, cnts->set_id);
+ if (ret)
+ return ret;
+
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ ret = mlx5_ib_query_ext_ppcnt_counters(dev, cnts, stats);
+ if (ret)
+ return ret;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
+ if (!port_num)
+ port_num = 1;
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL);
+ if (!mdev) {
+ /* If port is not affiliated yet, its in down state
+ * which doesn't have any counters yet, so it would be
+ * zero. So no need to read from the HCA.
+ */
+ goto done;
+ }
+ ret = mlx5_lag_query_cong_counters(dev->mdev,
+ stats->value +
+ cnts->num_q_counters,
+ cnts->num_cong_counters,
+ cnts->offsets +
+ cnts->num_q_counters);
+
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ if (ret)
+ return ret;
+ }
+
+done:
+ return num_counters;
+}
+
+static int do_get_op_stat(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+ const struct mlx5_ib_op_fc *opfcs;
+ u64 packets = 0, bytes;
+ u32 type;
+ int ret;
+
+ cnts = get_counters(dev, port_num - 1);
+ opfcs = cnts->opfcs;
+ type = *(u32 *)cnts->descs[index].priv;
+ if (type >= MLX5_IB_OPCOUNTER_MAX)
+ return -EINVAL;
+
+ if (!opfcs[type].fc)
+ goto out;
+
+ ret = mlx5_fc_query(dev->mdev, opfcs[type].fc,
+ &packets, &bytes);
+ if (ret)
+ return ret;
+
+out:
+ stats->value[index] = packets;
+ return index;
+}
+
+static int do_get_op_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+ int index, ret, num_hw_counters;
+
+ cnts = get_counters(dev, port_num - 1);
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ for (index = num_hw_counters;
+ index < (num_hw_counters + cnts->num_op_counters); index++) {
+ ret = do_get_op_stat(ibdev, stats, port_num, index);
+ if (ret != index)
+ return ret;
+ }
+
+ return cnts->num_op_counters;
+}
+
+static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ int num_counters, num_hw_counters, num_op_counters;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+
+ cnts = get_counters(dev, port_num - 1);
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ num_counters = num_hw_counters + cnts->num_op_counters;
+
+ if (index < 0 || index > num_counters)
+ return -EINVAL;
+ else if (index > 0 && index < num_hw_counters)
+ return do_get_hw_stats(ibdev, stats, port_num, index);
+ else if (index >= num_hw_counters && index < num_counters)
+ return do_get_op_stat(ibdev, stats, port_num, index);
+
+ num_hw_counters = do_get_hw_stats(ibdev, stats, port_num, index);
+ if (num_hw_counters < 0)
+ return num_hw_counters;
+
+ num_op_counters = do_get_op_stats(ibdev, stats, port_num);
+ if (num_op_counters < 0)
+ return num_op_counters;
+
+ return num_hw_counters + num_op_counters;
+}
+
+static struct rdma_hw_stats *
+mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ const struct mlx5_ib_counters *cnts =
+ get_counters(dev, counter->port - 1);
+
+ return do_alloc_stats(cnts);
+}
+
+static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ const struct mlx5_ib_counters *cnts =
+ get_counters(dev, counter->port - 1);
+
+ return mlx5_ib_query_q_counters(dev->mdev, cnts,
+ counter->stats, counter->id);
+}
+
+static int mlx5_ib_counter_dealloc(struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(counter->device);
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
+
+ if (!counter->id)
+ return 0;
+
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter->id);
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
+}
+
+static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
+ struct ib_qp *qp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ int err;
+
+ if (!counter->id) {
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
+
+ MLX5_SET(alloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ MLX5_SET(alloc_q_counter_in, in, uid, MLX5_SHARED_RESOURCE_UID);
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
+ if (err)
+ return err;
+ counter->id =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ }
+
+ err = mlx5_ib_qp_set_counter(qp, counter);
+ if (err)
+ goto fail_set_counter;
+
+ return 0;
+
+fail_set_counter:
+ mlx5_ib_counter_dealloc(counter);
+ counter->id = 0;
+
+ return err;
+}
+
+static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
+{
+ return mlx5_ib_qp_set_counter(qp, NULL);
+}
+
+static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
+ struct rdma_stat_desc *descs, size_t *offsets)
+{
+ int i;
+ int j = 0;
+
+ for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
+ descs[j].name = basic_q_cnts[i].name;
+ offsets[j] = basic_q_cnts[i].offset;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
+ for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
+ descs[j].name = out_of_seq_q_cnts[i].name;
+ offsets[j] = out_of_seq_q_cnts[i].offset;
+ }
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
+ for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
+ descs[j].name = retrans_q_cnts[i].name;
+ offsets[j] = retrans_q_cnts[i].offset;
+ }
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
+ for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
+ descs[j].name = extended_err_cnts[i].name;
+ offsets[j] = extended_err_cnts[i].offset;
+ }
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, roce_accl)) {
+ for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) {
+ descs[j].name = roce_accl_cnts[i].name;
+ offsets[j] = roce_accl_cnts[i].offset;
+ }
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
+ for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
+ descs[j].name = cong_cnts[i].name;
+ offsets[j] = cong_cnts[i].offset;
+ }
+ }
+
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
+ descs[j].name = ext_ppcnt_cnts[i].name;
+ offsets[j] = ext_ppcnt_cnts[i].offset;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(basic_op_cnts); i++, j++) {
+ descs[j].name = basic_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &basic_op_cnts[i].type;
+ }
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode)) {
+ for (i = 0; i < ARRAY_SIZE(rdmarx_cnp_op_cnts); i++, j++) {
+ descs[j].name = rdmarx_cnp_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &rdmarx_cnp_op_cnts[i].type;
+ }
+ }
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode)) {
+ for (i = 0; i < ARRAY_SIZE(rdmatx_cnp_op_cnts); i++, j++) {
+ descs[j].name = rdmatx_cnp_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &rdmatx_cnp_op_cnts[i].type;
+ }
+ }
+}
+
+
+static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_counters *cnts)
+{
+ u32 num_counters, num_op_counters;
+
+ num_counters = ARRAY_SIZE(basic_q_cnts);
+
+ if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt))
+ num_counters += ARRAY_SIZE(out_of_seq_q_cnts);
+
+ if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters))
+ num_counters += ARRAY_SIZE(retrans_q_cnts);
+
+ if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters))
+ num_counters += ARRAY_SIZE(extended_err_cnts);
+
+ if (MLX5_CAP_GEN(dev->mdev, roce_accl))
+ num_counters += ARRAY_SIZE(roce_accl_cnts);
+
+ cnts->num_q_counters = num_counters;
+
+ if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
+ cnts->num_cong_counters = ARRAY_SIZE(cong_cnts);
+ num_counters += ARRAY_SIZE(cong_cnts);
+ }
+ if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
+ cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
+ num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
+ }
+
+ num_op_counters = ARRAY_SIZE(basic_op_cnts);
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode))
+ num_op_counters += ARRAY_SIZE(rdmarx_cnp_op_cnts);
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode))
+ num_op_counters += ARRAY_SIZE(rdmatx_cnp_op_cnts);
+
+ cnts->num_op_counters = num_op_counters;
+ num_counters += num_op_counters;
+ cnts->descs = kcalloc(num_counters,
+ sizeof(struct rdma_stat_desc), GFP_KERNEL);
+ if (!cnts->descs)
+ return -ENOMEM;
+
+ cnts->offsets = kcalloc(num_counters,
+ sizeof(*cnts->offsets), GFP_KERNEL);
+ if (!cnts->offsets)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(cnts->descs);
+ cnts->descs = NULL;
+ return -ENOMEM;
+}
+
+static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
+ int num_cnt_ports;
+ int i, j;
+
+ num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
+
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+
+ for (i = 0; i < num_cnt_ports; i++) {
+ if (dev->port[i].cnts.set_id) {
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
+ dev->port[i].cnts.set_id);
+ mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
+ }
+ kfree(dev->port[i].cnts.descs);
+ kfree(dev->port[i].cnts.offsets);
+
+ for (j = 0; j < MLX5_IB_OPCOUNTER_MAX; j++) {
+ if (!dev->port[i].cnts.opfcs[j].fc)
+ continue;
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
+ mlx5_ib_fs_remove_op_fc(dev,
+ &dev->port[i].cnts.opfcs[j], j);
+ mlx5_fc_destroy(dev->mdev,
+ dev->port[i].cnts.opfcs[j].fc);
+ dev->port[i].cnts.opfcs[j].fc = NULL;
+ }
+ }
+}
+
+static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {};
+ int num_cnt_ports;
+ int err = 0;
+ int i;
+ bool is_shared;
+
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ is_shared = MLX5_CAP_GEN(dev->mdev, log_max_uctx) != 0;
+ num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
+
+ for (i = 0; i < num_cnt_ports; i++) {
+ err = __mlx5_ib_alloc_counters(dev, &dev->port[i].cnts);
+ if (err)
+ goto err_alloc;
+
+ mlx5_ib_fill_counters(dev, dev->port[i].cnts.descs,
+ dev->port[i].cnts.offsets);
+
+ MLX5_SET(alloc_q_counter_in, in, uid,
+ is_shared ? MLX5_SHARED_RESOURCE_UID : 0);
+
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_q_counter, in, out);
+ if (err) {
+ mlx5_ib_warn(dev,
+ "couldn't allocate queue counter for port %d, err %d\n",
+ i + 1, err);
+ goto err_alloc;
+ }
+
+ dev->port[i].cnts.set_id =
+ MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ }
+ return 0;
+
+err_alloc:
+ mlx5_ib_dealloc_counters(dev);
+ return err;
+}
+
+static int read_flow_counters(struct ib_device *ibdev,
+ struct mlx5_read_counters_attr *read_attr)
+{
+ struct mlx5_fc *fc = read_attr->hw_cntrs_hndl;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+ return mlx5_fc_query(dev->mdev, fc,
+ &read_attr->out[IB_COUNTER_PACKETS],
+ &read_attr->out[IB_COUNTER_BYTES]);
+}
+
+/* flow counters currently expose two counters packets and bytes */
+#define FLOW_COUNTERS_NUM 2
+static int counters_set_description(
+ struct ib_counters *counters, enum mlx5_ib_counters_type counters_type,
+ struct mlx5_ib_flow_counters_desc *desc_data, u32 ncounters)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(counters);
+ u32 cntrs_max_index = 0;
+ int i;
+
+ if (counters_type != MLX5_IB_COUNTERS_FLOW)
+ return -EINVAL;
+
+ /* init the fields for the object */
+ mcounters->type = counters_type;
+ mcounters->read_counters = read_flow_counters;
+ mcounters->counters_num = FLOW_COUNTERS_NUM;
+ mcounters->ncounters = ncounters;
+ /* each counter entry have both description and index pair */
+ for (i = 0; i < ncounters; i++) {
+ if (desc_data[i].description > IB_COUNTER_BYTES)
+ return -EINVAL;
+
+ if (cntrs_max_index <= desc_data[i].index)
+ cntrs_max_index = desc_data[i].index + 1;
+ }
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ mcounters->counters_data = desc_data;
+ mcounters->cntrs_max_index = cntrs_max_index;
+ mutex_unlock(&mcounters->mcntrs_mutex);
+
+ return 0;
+}
+
+#define MAX_COUNTERS_NUM (USHRT_MAX / (sizeof(u32) * 2))
+int mlx5_ib_flow_counters_set_data(struct ib_counters *ibcounters,
+ struct mlx5_ib_create_flow *ucmd)
+{
+ struct mlx5_ib_mcounters *mcounters = to_mcounters(ibcounters);
+ struct mlx5_ib_flow_counters_data *cntrs_data = NULL;
+ struct mlx5_ib_flow_counters_desc *desc_data = NULL;
+ bool hw_hndl = false;
+ int ret = 0;
+
+ if (ucmd && ucmd->ncounters_data != 0) {
+ cntrs_data = ucmd->data;
+ if (cntrs_data->ncounters > MAX_COUNTERS_NUM)
+ return -EINVAL;
+
+ desc_data = kcalloc(cntrs_data->ncounters,
+ sizeof(*desc_data),
+ GFP_KERNEL);
+ if (!desc_data)
+ return -ENOMEM;
+
+ if (copy_from_user(desc_data,
+ u64_to_user_ptr(cntrs_data->counters_data),
+ sizeof(*desc_data) * cntrs_data->ncounters)) {
+ ret = -EFAULT;
+ goto free;
+ }
+ }
+
+ if (!mcounters->hw_cntrs_hndl) {
+ mcounters->hw_cntrs_hndl = mlx5_fc_create(
+ to_mdev(ibcounters->device)->mdev, false);
+ if (IS_ERR(mcounters->hw_cntrs_hndl)) {
+ ret = PTR_ERR(mcounters->hw_cntrs_hndl);
+ goto free;
+ }
+ hw_hndl = true;
+ }
+
+ if (desc_data) {
+ /* counters already bound to at least one flow */
+ if (mcounters->cntrs_max_index) {
+ ret = -EINVAL;
+ goto free_hndl;
+ }
+
+ ret = counters_set_description(ibcounters,
+ MLX5_IB_COUNTERS_FLOW,
+ desc_data,
+ cntrs_data->ncounters);
+ if (ret)
+ goto free_hndl;
+
+ } else if (!mcounters->cntrs_max_index) {
+ /* counters not bound yet, must have udata passed */
+ ret = -EINVAL;
+ goto free_hndl;
+ }
+
+ return 0;
+
+free_hndl:
+ if (hw_hndl) {
+ mlx5_fc_destroy(to_mdev(ibcounters->device)->mdev,
+ mcounters->hw_cntrs_hndl);
+ mcounters->hw_cntrs_hndl = NULL;
+ }
+free:
+ kfree(desc_data);
+ return ret;
+}
+
+void mlx5_ib_counters_clear_description(struct ib_counters *counters)
+{
+ struct mlx5_ib_mcounters *mcounters;
+
+ if (!counters || atomic_read(&counters->usecnt) != 1)
+ return;
+
+ mcounters = to_mcounters(counters);
+
+ mutex_lock(&mcounters->mcntrs_mutex);
+ kfree(mcounters->counters_data);
+ mcounters->counters_data = NULL;
+ mcounters->cntrs_max_index = 0;
+ mutex_unlock(&mcounters->mcntrs_mutex);
+}
+
+static int mlx5_ib_modify_stat(struct ib_device *device, u32 port,
+ unsigned int index, bool enable)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_ib_counters *cnts;
+ struct mlx5_ib_op_fc *opfc;
+ u32 num_hw_counters, type;
+ int ret;
+
+ cnts = &dev->port[port - 1].cnts;
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ if (index < num_hw_counters ||
+ index >= (num_hw_counters + cnts->num_op_counters))
+ return -EINVAL;
+
+ if (!(cnts->descs[index].flags & IB_STAT_FLAG_OPTIONAL))
+ return -EINVAL;
+
+ type = *(u32 *)cnts->descs[index].priv;
+ if (type >= MLX5_IB_OPCOUNTER_MAX)
+ return -EINVAL;
+
+ opfc = &cnts->opfcs[type];
+
+ if (enable) {
+ if (opfc->fc)
+ return -EEXIST;
+
+ opfc->fc = mlx5_fc_create(dev->mdev, false);
+ if (IS_ERR(opfc->fc))
+ return PTR_ERR(opfc->fc);
+
+ ret = mlx5_ib_fs_add_op_fc(dev, port, opfc, type);
+ if (ret) {
+ mlx5_fc_destroy(dev->mdev, opfc->fc);
+ opfc->fc = NULL;
+ }
+ return ret;
+ }
+
+ if (!opfc->fc)
+ return -EINVAL;
+
+ mlx5_ib_fs_remove_op_fc(dev, opfc, type);
+ mlx5_fc_destroy(dev->mdev, opfc->fc);
+ opfc->fc = NULL;
+ return 0;
+}
+
+static const struct ib_device_ops hw_stats_ops = {
+ .alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats,
+ .get_hw_stats = mlx5_ib_get_hw_stats,
+ .counter_bind_qp = mlx5_ib_counter_bind_qp,
+ .counter_unbind_qp = mlx5_ib_counter_unbind_qp,
+ .counter_dealloc = mlx5_ib_counter_dealloc,
+ .counter_alloc_stats = mlx5_ib_counter_alloc_stats,
+ .counter_update_stats = mlx5_ib_counter_update_stats,
+ .modify_hw_stat = IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) ?
+ mlx5_ib_modify_stat : NULL,
+};
+
+static const struct ib_device_ops hw_switchdev_stats_ops = {
+ .alloc_hw_device_stats = mlx5_ib_alloc_hw_device_stats,
+ .get_hw_stats = mlx5_ib_get_hw_stats,
+ .counter_bind_qp = mlx5_ib_counter_bind_qp,
+ .counter_unbind_qp = mlx5_ib_counter_unbind_qp,
+ .counter_dealloc = mlx5_ib_counter_dealloc,
+ .counter_alloc_stats = mlx5_ib_counter_alloc_stats,
+ .counter_update_stats = mlx5_ib_counter_update_stats,
+};
+
+static const struct ib_device_ops counters_ops = {
+ .create_counters = mlx5_ib_create_counters,
+ .destroy_counters = mlx5_ib_destroy_counters,
+ .read_counters = mlx5_ib_read_counters,
+
+ INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
+};
+
+int mlx5_ib_counters_init(struct mlx5_ib_dev *dev)
+{
+ ib_set_device_ops(&dev->ib_dev, &counters_ops);
+
+ if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+ return 0;
+
+ if (is_mdev_switchdev_mode(dev->mdev))
+ ib_set_device_ops(&dev->ib_dev, &hw_switchdev_stats_ops);
+ else
+ ib_set_device_ops(&dev->ib_dev, &hw_stats_ops);
+ return mlx5_ib_alloc_counters(dev);
+}
+
+void mlx5_ib_counters_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
+ return;
+
+ mlx5_ib_dealloc_counters(dev);
+}
diff --git a/drivers/infiniband/hw/mlx5/counters.h b/drivers/infiniband/hw/mlx5/counters.h
new file mode 100644
index 000000000..6bcaaa52e
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/counters.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_COUNTERS_H
+#define _MLX5_IB_COUNTERS_H
+
+#include "mlx5_ib.h"
+
+int mlx5_ib_counters_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_counters_cleanup(struct mlx5_ib_dev *dev);
+void mlx5_ib_counters_clear_description(struct ib_counters *counters);
+int mlx5_ib_flow_counters_set_data(struct ib_counters *ibcounters,
+ struct mlx5_ib_create_flow *ucmd);
+u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num);
+#endif /* _MLX5_IB_COUNTERS_H */
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
new file mode 100644
index 000000000..be189e052
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -0,0 +1,1441 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kref.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_cache.h>
+#include "mlx5_ib.h"
+#include "srq.h"
+#include "qp.h"
+
+static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
+{
+ struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
+
+ ibcq->comp_handler(ibcq, ibcq->cq_context);
+}
+
+static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
+{
+ struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct ib_cq *ibcq = &cq->ibcq;
+ struct ib_event event;
+
+ if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
+ mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
+ type, mcq->cqn);
+ return;
+ }
+
+ if (ibcq->event_handler) {
+ event.device = &dev->ib_dev;
+ event.event = IB_EVENT_CQ_ERR;
+ event.element.cq = ibcq;
+ ibcq->event_handler(&event, ibcq->cq_context);
+ }
+}
+
+static void *get_cqe(struct mlx5_ib_cq *cq, int n)
+{
+ return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
+}
+
+static u8 sw_ownership_bit(int n, int nent)
+{
+ return (n & nent) ? 1 : 0;
+}
+
+static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
+{
+ void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
+ struct mlx5_cqe64 *cqe64;
+
+ cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
+
+ if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
+ !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
+ return cqe;
+ } else {
+ return NULL;
+ }
+}
+
+static void *next_cqe_sw(struct mlx5_ib_cq *cq)
+{
+ return get_sw_cqe(cq, cq->mcq.cons_index);
+}
+
+static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
+{
+ switch (wq->wr_data[idx]) {
+ case MLX5_IB_WR_UMR:
+ return 0;
+
+ case IB_WR_LOCAL_INV:
+ return IB_WC_LOCAL_INV;
+
+ case IB_WR_REG_MR:
+ return IB_WC_REG_MR;
+
+ default:
+ pr_warn("unknown completion status\n");
+ return 0;
+ }
+}
+
+static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
+ struct mlx5_ib_wq *wq, int idx)
+{
+ wc->wc_flags = 0;
+ switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
+ case MLX5_OPCODE_RDMA_WRITE_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ fallthrough;
+ case MLX5_OPCODE_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case MLX5_OPCODE_SEND_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ fallthrough;
+ case MLX5_OPCODE_SEND:
+ case MLX5_OPCODE_SEND_INVAL:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case MLX5_OPCODE_RDMA_READ:
+ wc->opcode = IB_WC_RDMA_READ;
+ wc->byte_len = be32_to_cpu(cqe->byte_cnt);
+ break;
+ case MLX5_OPCODE_ATOMIC_CS:
+ wc->opcode = IB_WC_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case MLX5_OPCODE_ATOMIC_FA:
+ wc->opcode = IB_WC_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
+ case MLX5_OPCODE_ATOMIC_MASKED_CS:
+ wc->opcode = IB_WC_MASKED_COMP_SWAP;
+ wc->byte_len = 8;
+ break;
+ case MLX5_OPCODE_ATOMIC_MASKED_FA:
+ wc->opcode = IB_WC_MASKED_FETCH_ADD;
+ wc->byte_len = 8;
+ break;
+ case MLX5_OPCODE_UMR:
+ wc->opcode = get_umr_comp(wq, idx);
+ break;
+ }
+}
+
+enum {
+ MLX5_GRH_IN_BUFFER = 1,
+ MLX5_GRH_IN_CQE = 2,
+};
+
+static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
+ struct mlx5_ib_qp *qp)
+{
+ enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+ struct mlx5_ib_srq *srq = NULL;
+ struct mlx5_ib_wq *wq;
+ u16 wqe_ctr;
+ u8 roce_packet_type;
+ bool vlan_present;
+ u8 g;
+
+ if (qp->ibqp.srq || qp->ibqp.xrcd) {
+ struct mlx5_core_srq *msrq = NULL;
+
+ if (qp->ibqp.xrcd) {
+ msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
+ if (msrq)
+ srq = to_mibsrq(msrq);
+ } else {
+ srq = to_msrq(qp->ibqp.srq);
+ }
+ if (srq) {
+ wqe_ctr = be16_to_cpu(cqe->wqe_counter);
+ wc->wr_id = srq->wrid[wqe_ctr];
+ mlx5_ib_free_srq_wqe(srq, wqe_ctr);
+ if (msrq)
+ mlx5_core_res_put(&msrq->common);
+ }
+ } else {
+ wq = &qp->rq;
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+ }
+ wc->byte_len = be32_to_cpu(cqe->byte_cnt);
+
+ switch (get_cqe_opcode(cqe)) {
+ case MLX5_CQE_RESP_WR_IMM:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->immediate;
+ break;
+ case MLX5_CQE_RESP_SEND:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_IP_CSUM_OK;
+ if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK))))
+ wc->wc_flags = 0;
+ break;
+ case MLX5_CQE_RESP_SEND_IMM:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->immediate;
+ break;
+ case MLX5_CQE_RESP_SEND_INV:
+ wc->opcode = IB_WC_RECV;
+ wc->wc_flags = IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
+ break;
+ }
+ wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
+ wc->dlid_path_bits = cqe->ml_path;
+ g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
+ wc->wc_flags |= g ? IB_WC_GRH : 0;
+ if (is_qp1(qp->type)) {
+ u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
+
+ ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
+ &wc->pkey_index);
+ } else {
+ wc->pkey_index = 0;
+ }
+
+ if (ll != IB_LINK_LAYER_ETHERNET) {
+ wc->slid = be16_to_cpu(cqe->slid);
+ wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
+ return;
+ }
+
+ wc->slid = 0;
+ vlan_present = cqe->l4_l3_hdr_type & 0x1;
+ roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
+ if (vlan_present) {
+ wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
+ wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ } else {
+ wc->sl = 0;
+ }
+
+ switch (roce_packet_type) {
+ case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
+ wc->network_hdr_type = RDMA_NETWORK_ROCE_V1;
+ break;
+ case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+ break;
+ case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ break;
+ }
+ wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+}
+
+static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
+{
+ mlx5_ib_warn(dev, "dump error cqe\n");
+ mlx5_dump_err_cqe(dev->mdev, cqe);
+}
+
+static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
+ struct mlx5_err_cqe *cqe,
+ struct ib_wc *wc)
+{
+ int dump = 1;
+
+ switch (cqe->syndrome) {
+ case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
+ wc->status = IB_WC_LOC_LEN_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
+ wc->status = IB_WC_LOC_QP_OP_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
+ wc->status = IB_WC_LOC_PROT_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
+ dump = 0;
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_MW_BIND_ERR:
+ wc->status = IB_WC_MW_BIND_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
+ wc->status = IB_WC_BAD_RESP_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
+ wc->status = IB_WC_LOC_ACCESS_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
+ wc->status = IB_WC_REM_INV_REQ_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
+ wc->status = IB_WC_REM_ACCESS_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
+ wc->status = IB_WC_REM_OP_ERR;
+ break;
+ case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
+ wc->status = IB_WC_RETRY_EXC_ERR;
+ dump = 0;
+ break;
+ case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
+ wc->status = IB_WC_RNR_RETRY_EXC_ERR;
+ dump = 0;
+ break;
+ case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
+ wc->status = IB_WC_REM_ABORT_ERR;
+ break;
+ default:
+ wc->status = IB_WC_GENERAL_ERR;
+ break;
+ }
+
+ wc->vendor_err = cqe->vendor_err_synd;
+ if (dump) {
+ mlx5_ib_warn(dev, "WC error: %d, Message: %s\n", wc->status,
+ ib_wc_status_msg(wc->status));
+ dump_cqe(dev, cqe);
+ }
+}
+
+static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
+ u16 tail, u16 head)
+{
+ u16 idx;
+
+ do {
+ idx = tail & (qp->sq.wqe_cnt - 1);
+ if (idx == head)
+ break;
+
+ tail = qp->sq.w_list[idx].next;
+ } while (1);
+ tail = qp->sq.w_list[idx].next;
+ qp->sq.last_poll = tail;
+}
+
+static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
+{
+ mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
+}
+
+static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
+ struct ib_sig_err *item)
+{
+ u16 syndrome = be16_to_cpu(cqe->syndrome);
+
+#define GUARD_ERR (1 << 13)
+#define APPTAG_ERR (1 << 12)
+#define REFTAG_ERR (1 << 11)
+
+ if (syndrome & GUARD_ERR) {
+ item->err_type = IB_SIG_BAD_GUARD;
+ item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
+ item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
+ } else
+ if (syndrome & REFTAG_ERR) {
+ item->err_type = IB_SIG_BAD_REFTAG;
+ item->expected = be32_to_cpu(cqe->expected_reftag);
+ item->actual = be32_to_cpu(cqe->actual_reftag);
+ } else
+ if (syndrome & APPTAG_ERR) {
+ item->err_type = IB_SIG_BAD_APPTAG;
+ item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
+ item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
+ } else {
+ pr_err("Got signature completion error with bad syndrome %04x\n",
+ syndrome);
+ }
+
+ item->sig_err_offset = be64_to_cpu(cqe->err_offset);
+ item->key = be32_to_cpu(cqe->mkey);
+}
+
+static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
+ int *npolled, bool is_send)
+{
+ struct mlx5_ib_wq *wq;
+ unsigned int cur;
+ int np;
+ int i;
+
+ wq = (is_send) ? &qp->sq : &qp->rq;
+ cur = wq->head - wq->tail;
+ np = *npolled;
+
+ if (cur == 0)
+ return;
+
+ for (i = 0; i < cur && np < num_entries; i++) {
+ unsigned int idx;
+
+ idx = (is_send) ? wq->last_poll : wq->tail;
+ idx &= (wq->wqe_cnt - 1);
+ wc->wr_id = wq->wrid[idx];
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ wq->tail++;
+ if (is_send)
+ wq->last_poll = wq->w_list[idx].next;
+ np++;
+ wc->qp = &qp->ibqp;
+ wc++;
+ }
+ *npolled = np;
+}
+
+static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
+ struct ib_wc *wc, int *npolled)
+{
+ struct mlx5_ib_qp *qp;
+
+ *npolled = 0;
+ /* Find uncompleted WQEs belonging to that cq and return mmics ones */
+ list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
+ sw_comp(qp, num_entries, wc + *npolled, npolled, true);
+ if (*npolled >= num_entries)
+ return;
+ }
+
+ list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
+ sw_comp(qp, num_entries, wc + *npolled, npolled, false);
+ if (*npolled >= num_entries)
+ return;
+ }
+}
+
+static int mlx5_poll_one(struct mlx5_ib_cq *cq,
+ struct mlx5_ib_qp **cur_qp,
+ struct ib_wc *wc)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_err_cqe *err_cqe;
+ struct mlx5_cqe64 *cqe64;
+ struct mlx5_core_qp *mqp;
+ struct mlx5_ib_wq *wq;
+ uint8_t opcode;
+ uint32_t qpn;
+ u16 wqe_ctr;
+ void *cqe;
+ int idx;
+
+repoll:
+ cqe = next_cqe_sw(cq);
+ if (!cqe)
+ return -EAGAIN;
+
+ cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
+
+ ++cq->mcq.cons_index;
+
+ /* Make sure we read CQ entry contents after we've checked the
+ * ownership bit.
+ */
+ rmb();
+
+ opcode = get_cqe_opcode(cqe64);
+ if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
+ if (likely(cq->resize_buf)) {
+ free_cq_buf(dev, &cq->buf);
+ cq->buf = *cq->resize_buf;
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ goto repoll;
+ } else {
+ mlx5_ib_warn(dev, "unexpected resize cqe\n");
+ }
+ }
+
+ qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
+ if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
+ /* We do not have to take the QP table lock here,
+ * because CQs will be locked while QPs are removed
+ * from the table.
+ */
+ mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
+ *cur_qp = to_mibqp(mqp);
+ }
+
+ wc->qp = &(*cur_qp)->ibqp;
+ switch (opcode) {
+ case MLX5_CQE_REQ:
+ wq = &(*cur_qp)->sq;
+ wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
+ idx = wqe_ctr & (wq->wqe_cnt - 1);
+ handle_good_req(wc, cqe64, wq, idx);
+ handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
+ wc->wr_id = wq->wrid[idx];
+ wq->tail = wq->wqe_head[idx] + 1;
+ wc->status = IB_WC_SUCCESS;
+ break;
+ case MLX5_CQE_RESP_WR_IMM:
+ case MLX5_CQE_RESP_SEND:
+ case MLX5_CQE_RESP_SEND_IMM:
+ case MLX5_CQE_RESP_SEND_INV:
+ handle_responder(wc, cqe64, *cur_qp);
+ wc->status = IB_WC_SUCCESS;
+ break;
+ case MLX5_CQE_RESIZE_CQ:
+ break;
+ case MLX5_CQE_REQ_ERR:
+ case MLX5_CQE_RESP_ERR:
+ err_cqe = (struct mlx5_err_cqe *)cqe64;
+ mlx5_handle_error_cqe(dev, err_cqe, wc);
+ mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
+ opcode == MLX5_CQE_REQ_ERR ?
+ "Requestor" : "Responder", cq->mcq.cqn);
+ mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
+ err_cqe->syndrome, err_cqe->vendor_err_synd);
+ if (wc->status != IB_WC_WR_FLUSH_ERR &&
+ (*cur_qp)->type == MLX5_IB_QPT_REG_UMR)
+ dev->umrc.state = MLX5_UMR_STATE_RECOVER;
+
+ if (opcode == MLX5_CQE_REQ_ERR) {
+ wq = &(*cur_qp)->sq;
+ wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
+ idx = wqe_ctr & (wq->wqe_cnt - 1);
+ wc->wr_id = wq->wrid[idx];
+ wq->tail = wq->wqe_head[idx] + 1;
+ } else {
+ struct mlx5_ib_srq *srq;
+
+ if ((*cur_qp)->ibqp.srq) {
+ srq = to_msrq((*cur_qp)->ibqp.srq);
+ wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
+ wc->wr_id = srq->wrid[wqe_ctr];
+ mlx5_ib_free_srq_wqe(srq, wqe_ctr);
+ } else {
+ wq = &(*cur_qp)->rq;
+ wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
+ ++wq->tail;
+ }
+ }
+ break;
+ case MLX5_CQE_SIG_ERR: {
+ struct mlx5_sig_err_cqe *sig_err_cqe =
+ (struct mlx5_sig_err_cqe *)cqe64;
+ struct mlx5_core_sig_ctx *sig;
+
+ xa_lock(&dev->sig_mrs);
+ sig = xa_load(&dev->sig_mrs,
+ mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
+ get_sig_err_item(sig_err_cqe, &sig->err_item);
+ sig->sig_err_exists = true;
+ sig->sigerr_count++;
+
+ mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
+ cq->mcq.cqn, sig->err_item.key,
+ sig->err_item.err_type,
+ sig->err_item.sig_err_offset,
+ sig->err_item.expected,
+ sig->err_item.actual);
+
+ xa_unlock(&dev->sig_mrs);
+ goto repoll;
+ }
+ }
+
+ return 0;
+}
+
+static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
+ struct ib_wc *wc, bool is_fatal_err)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_ib_wc *soft_wc, *next;
+ int npolled = 0;
+
+ list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
+ if (npolled >= num_entries)
+ break;
+
+ mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
+ cq->mcq.cqn);
+
+ if (unlikely(is_fatal_err)) {
+ soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
+ soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
+ }
+ wc[npolled++] = soft_wc->wc;
+ list_del(&soft_wc->list);
+ kfree(soft_wc);
+ }
+
+ return npolled;
+}
+
+int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+{
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ struct mlx5_ib_qp *cur_qp = NULL;
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ unsigned long flags;
+ int soft_polled = 0;
+ int npolled;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ /* make sure no soft wqe's are waiting */
+ if (unlikely(!list_empty(&cq->wc_list)))
+ soft_polled = poll_soft_wc(cq, num_entries, wc, true);
+
+ mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
+ wc + soft_polled, &npolled);
+ goto out;
+ }
+
+ if (unlikely(!list_empty(&cq->wc_list)))
+ soft_polled = poll_soft_wc(cq, num_entries, wc, false);
+
+ for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
+ if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
+ break;
+ }
+
+ if (npolled)
+ mlx5_cq_set_ci(&cq->mcq);
+out:
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return soft_polled + npolled;
+}
+
+int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ void __iomem *uar_page = mdev->priv.uar->map;
+ unsigned long irq_flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cq->lock, irq_flags);
+ if (cq->notify_flags != IB_CQ_NEXT_COMP)
+ cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
+
+ if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
+ ret = 1;
+ spin_unlock_irqrestore(&cq->lock, irq_flags);
+
+ mlx5_cq_arm(&cq->mcq,
+ (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
+ MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
+ uar_page, to_mcq(ibcq)->mcq.cons_index);
+
+ return ret;
+}
+
+static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_cq_buf *buf,
+ int nent,
+ int cqe_size)
+{
+ struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
+ u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
+ u8 log_wq_sz = ilog2(cqe_size);
+ int err;
+
+ err = mlx5_frag_buf_alloc_node(dev->mdev,
+ nent * cqe_size,
+ frag_buf,
+ dev->mdev->priv.numa_node);
+ if (err)
+ return err;
+
+ mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
+
+ buf->cqe_size = cqe_size;
+ buf->nent = nent;
+
+ return 0;
+}
+
+enum {
+ MLX5_CQE_RES_FORMAT_HASH = 0,
+ MLX5_CQE_RES_FORMAT_CSUM = 1,
+ MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
+};
+
+static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
+{
+ switch (format) {
+ case MLX5_IB_CQE_RES_FORMAT_HASH:
+ return MLX5_CQE_RES_FORMAT_HASH;
+ case MLX5_IB_CQE_RES_FORMAT_CSUM:
+ return MLX5_CQE_RES_FORMAT_CSUM;
+ case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
+ if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
+ return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
+ struct mlx5_ib_cq *cq, int entries, u32 **cqb,
+ int *cqe_size, int *index, int *inlen)
+{
+ struct mlx5_ib_create_cq ucmd = {};
+ unsigned long page_size;
+ unsigned int page_offset_quantized;
+ size_t ucmdlen;
+ __be64 *pas;
+ int ncont;
+ void *cqc;
+ int err;
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ ucmdlen = min(udata->inlen, sizeof(ucmd));
+ if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags))
+ return -EINVAL;
+
+ if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
+ return -EFAULT;
+
+ if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
+ MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX |
+ MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)))
+ return -EINVAL;
+
+ if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
+ ucmd.reserved0 || ucmd.reserved1)
+ return -EINVAL;
+
+ *cqe_size = ucmd.cqe_size;
+
+ cq->buf.umem =
+ ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
+ entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(cq->buf.umem)) {
+ err = PTR_ERR(cq->buf.umem);
+ return err;
+ }
+
+ page_size = mlx5_umem_find_best_cq_quantized_pgoff(
+ cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64, &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
+ goto err_umem;
+ }
+
+ err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db);
+ if (err)
+ goto err_umem;
+
+ ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size);
+ mlx5_ib_dbg(
+ dev,
+ "addr 0x%llx, size %u, npages %zu, page_size %lu, ncont %d\n",
+ ucmd.buf_addr, entries * ucmd.cqe_size,
+ ib_umem_num_pages(cq->buf.umem), page_size, ncont);
+
+ *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
+ *cqb = kvzalloc(*inlen, GFP_KERNEL);
+ if (!*cqb) {
+ err = -ENOMEM;
+ goto err_db;
+ }
+
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
+ mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0);
+
+ cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
+ MLX5_SET(cqc, cqc, log_page_size,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
+
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
+ *index = ucmd.uar_page_index;
+ } else if (context->bfregi.lib_uar_dyn) {
+ err = -EINVAL;
+ goto err_cqb;
+ } else {
+ *index = context->bfregi.sys_pages[0];
+ }
+
+ if (ucmd.cqe_comp_en == 1) {
+ int mini_cqe_format;
+
+ if (!((*cqe_size == 128 &&
+ MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
+ (*cqe_size == 64 &&
+ MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
+ err = -EOPNOTSUPP;
+ mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
+ *cqe_size);
+ goto err_cqb;
+ }
+
+ mini_cqe_format =
+ mini_cqe_res_format_to_hw(dev,
+ ucmd.cqe_comp_res_format);
+ if (mini_cqe_format < 0) {
+ err = mini_cqe_format;
+ mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
+ ucmd.cqe_comp_res_format, err);
+ goto err_cqb;
+ }
+
+ MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
+ }
+
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
+ if (*cqe_size != 128 ||
+ !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
+ err = -EOPNOTSUPP;
+ mlx5_ib_warn(dev,
+ "CQE padding is not supported for CQE size of %dB!\n",
+ *cqe_size);
+ goto err_cqb;
+ }
+
+ cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
+ }
+
+ if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)
+ cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS;
+
+ MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
+ return 0;
+
+err_cqb:
+ kvfree(*cqb);
+
+err_db:
+ mlx5_ib_db_unmap_user(context, &cq->db);
+
+err_umem:
+ ib_umem_release(cq->buf.umem);
+ return err;
+}
+
+static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ mlx5_ib_db_unmap_user(context, &cq->db);
+ ib_umem_release(cq->buf.umem);
+}
+
+static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
+{
+ int i;
+ void *cqe;
+ struct mlx5_cqe64 *cqe64;
+
+ for (i = 0; i < buf->nent; i++) {
+ cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
+ cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
+ cqe64->op_own = MLX5_CQE_INVALID << 4;
+ }
+}
+
+static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ int entries, int cqe_size,
+ u32 **cqb, int *index, int *inlen)
+{
+ __be64 *pas;
+ void *cqc;
+ int err;
+
+ err = mlx5_db_alloc(dev->mdev, &cq->db);
+ if (err)
+ return err;
+
+ cq->mcq.set_ci_db = cq->db.db;
+ cq->mcq.arm_db = cq->db.db + 1;
+ cq->mcq.cqe_sz = cqe_size;
+
+ err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
+ if (err)
+ goto err_db;
+
+ init_cq_frag_buf(&cq->buf);
+
+ *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
+ cq->buf.frag_buf.npages;
+ *cqb = kvzalloc(*inlen, GFP_KERNEL);
+ if (!*cqb) {
+ err = -ENOMEM;
+ goto err_buf;
+ }
+
+ pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
+ mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
+
+ cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
+ MLX5_SET(cqc, cqc, log_page_size,
+ cq->buf.frag_buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+
+ *index = dev->mdev->priv.uar->index;
+
+ return 0;
+
+err_buf:
+ free_cq_buf(dev, &cq->buf);
+
+err_db:
+ mlx5_db_free(dev->mdev, &cq->db);
+ return err;
+}
+
+static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
+{
+ free_cq_buf(dev, &cq->buf);
+ mlx5_db_free(dev->mdev, &cq->db);
+}
+
+static void notify_soft_wc_handler(struct work_struct *work)
+{
+ struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
+ notify_work);
+
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ib_device *ibdev = ibcq->device;
+ int entries = attr->cqe;
+ int vector = attr->comp_vector;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ int index;
+ int inlen;
+ u32 *cqb = NULL;
+ void *cqc;
+ int cqe_size;
+ int eqn;
+ int err;
+
+ if (entries < 0 ||
+ (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
+ return -EINVAL;
+
+ if (check_cq_create_flags(attr->flags))
+ return -EOPNOTSUPP;
+
+ entries = roundup_pow_of_two(entries + 1);
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
+ return -EINVAL;
+
+ cq->ibcq.cqe = entries - 1;
+ mutex_init(&cq->resize_mutex);
+ spin_lock_init(&cq->lock);
+ cq->resize_buf = NULL;
+ cq->resize_umem = NULL;
+ cq->create_flags = attr->flags;
+ INIT_LIST_HEAD(&cq->list_send_qp);
+ INIT_LIST_HEAD(&cq->list_recv_qp);
+
+ if (udata) {
+ err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
+ &index, &inlen);
+ if (err)
+ return err;
+ } else {
+ cqe_size = cache_line_size() == 128 ? 128 : 64;
+ err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
+ &index, &inlen);
+ if (err)
+ return err;
+
+ INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
+ }
+
+ err = mlx5_vector2eqn(dev->mdev, vector, &eqn);
+ if (err)
+ goto err_cqb;
+
+ cq->cqe_size = cqe_size;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
+ MLX5_SET(cqc, cqc, cqe_sz,
+ cqe_sz_to_mlx_sz(cqe_size,
+ cq->private_flags &
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
+ MLX5_SET(cqc, cqc, uar_page, index);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
+ if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
+ MLX5_SET(cqc, cqc, oi, 1);
+
+ err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
+ if (err)
+ goto err_cqb;
+
+ mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
+ if (udata)
+ cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
+ else
+ cq->mcq.comp = mlx5_ib_cq_comp;
+ cq->mcq.event = mlx5_ib_cq_event;
+
+ INIT_LIST_HEAD(&cq->wc_list);
+
+ if (udata)
+ if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
+ err = -EFAULT;
+ goto err_cmd;
+ }
+
+
+ kvfree(cqb);
+ return 0;
+
+err_cmd:
+ mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
+
+err_cqb:
+ kvfree(cqb);
+ if (udata)
+ destroy_cq_user(cq, udata);
+ else
+ destroy_cq_kernel(dev, cq);
+ return err;
+}
+
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->device);
+ struct mlx5_ib_cq *mcq = to_mcq(cq);
+ int ret;
+
+ ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
+ if (ret)
+ return ret;
+
+ if (udata)
+ destroy_cq_user(mcq, udata);
+ else
+ destroy_cq_kernel(dev, mcq);
+ return 0;
+}
+
+static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
+{
+ return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
+}
+
+void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
+{
+ struct mlx5_cqe64 *cqe64, *dest64;
+ void *cqe, *dest;
+ u32 prod_index;
+ int nfreed = 0;
+ u8 owner_bit;
+
+ if (!cq)
+ return;
+
+ /* First we need to find the current producer index, so we
+ * know where to start cleaning from. It doesn't matter if HW
+ * adds new entries after this loop -- the QP we're worried
+ * about is already in RESET, so the new entries won't come
+ * from our QP and therefore don't need to be checked.
+ */
+ for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
+ if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
+ break;
+
+ /* Now sweep backwards through the CQ, removing CQ entries
+ * that match our QP by copying older entries on top of them.
+ */
+ while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
+ cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
+ cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
+ if (is_equal_rsn(cqe64, rsn)) {
+ if (srq && (ntohl(cqe64->srqn) & 0xffffff))
+ mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
+ ++nfreed;
+ } else if (nfreed) {
+ dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
+ dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
+ owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
+ memcpy(dest, cqe, cq->mcq.cqe_sz);
+ dest64->op_own = owner_bit |
+ (dest64->op_own & ~MLX5_CQE_OWNER_MASK);
+ }
+ }
+
+ if (nfreed) {
+ cq->mcq.cons_index += nfreed;
+ /* Make sure update of buffer contents is done before
+ * updating consumer index.
+ */
+ wmb();
+ mlx5_cq_set_ci(&cq->mcq);
+ }
+}
+
+void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
+{
+ if (!cq)
+ return;
+
+ spin_lock_irq(&cq->lock);
+ __mlx5_ib_cq_clean(cq, qpn, srq);
+ spin_unlock_irq(&cq->lock);
+}
+
+int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->device);
+ struct mlx5_ib_cq *mcq = to_mcq(cq);
+ int err;
+
+ if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
+ return -EOPNOTSUPP;
+
+ if (cq_period > MLX5_MAX_CQ_PERIOD)
+ return -EINVAL;
+
+ err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
+ cq_period, cq_count);
+ if (err)
+ mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
+
+ return err;
+}
+
+static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ int entries, struct ib_udata *udata,
+ int *cqe_size)
+{
+ struct mlx5_ib_resize_cq ucmd;
+ struct ib_umem *umem;
+ int err;
+
+ err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
+ if (err)
+ return err;
+
+ if (ucmd.reserved0 || ucmd.reserved1)
+ return -EINVAL;
+
+ /* check multiplication overflow */
+ if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
+ return -EINVAL;
+
+ umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
+ (size_t)ucmd.cqe_size * entries,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(umem)) {
+ err = PTR_ERR(umem);
+ return err;
+ }
+
+ cq->resize_umem = umem;
+ *cqe_size = ucmd.cqe_size;
+
+ return 0;
+}
+
+static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ int entries, int cqe_size)
+{
+ int err;
+
+ cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
+ if (!cq->resize_buf)
+ return -ENOMEM;
+
+ err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
+ if (err)
+ goto ex;
+
+ init_cq_frag_buf(cq->resize_buf);
+
+ return 0;
+
+ex:
+ kfree(cq->resize_buf);
+ return err;
+}
+
+static int copy_resize_cqes(struct mlx5_ib_cq *cq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_cqe64 *scqe64;
+ struct mlx5_cqe64 *dcqe64;
+ void *start_cqe;
+ void *scqe;
+ void *dcqe;
+ int ssize;
+ int dsize;
+ int i;
+ u8 sw_own;
+
+ ssize = cq->buf.cqe_size;
+ dsize = cq->resize_buf->cqe_size;
+ if (ssize != dsize) {
+ mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
+ return -EINVAL;
+ }
+
+ i = cq->mcq.cons_index;
+ scqe = get_sw_cqe(cq, i);
+ scqe64 = ssize == 64 ? scqe : scqe + 64;
+ start_cqe = scqe;
+ if (!scqe) {
+ mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
+ return -EINVAL;
+ }
+
+ while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
+ dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
+ (i + 1) & cq->resize_buf->nent);
+ dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
+ sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
+ memcpy(dcqe, scqe, dsize);
+ dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
+
+ ++i;
+ scqe = get_sw_cqe(cq, i);
+ scqe64 = ssize == 64 ? scqe : scqe + 64;
+ if (!scqe) {
+ mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
+ return -EINVAL;
+ }
+
+ if (scqe == start_cqe) {
+ pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
+ cq->mcq.cqn);
+ return -ENOMEM;
+ }
+ }
+ ++cq->mcq.cons_index;
+ return 0;
+}
+
+int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ void *cqc;
+ u32 *in;
+ int err;
+ int npas;
+ __be64 *pas;
+ unsigned int page_offset_quantized = 0;
+ unsigned int page_shift;
+ int inlen;
+ int cqe_size;
+ unsigned long flags;
+
+ if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
+ pr_info("Firmware does not support resize CQ\n");
+ return -ENOSYS;
+ }
+
+ if (entries < 1 ||
+ entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
+ mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
+ entries,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
+ return -EINVAL;
+ }
+
+ entries = roundup_pow_of_two(entries + 1);
+ if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
+ return -EINVAL;
+
+ if (entries == ibcq->cqe + 1)
+ return 0;
+
+ mutex_lock(&cq->resize_mutex);
+ if (udata) {
+ unsigned long page_size;
+
+ err = resize_user(dev, cq, entries, udata, &cqe_size);
+ if (err)
+ goto ex;
+
+ page_size = mlx5_umem_find_best_cq_quantized_pgoff(
+ cq->resize_umem, cqc, log_page_size,
+ MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64,
+ &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
+ goto ex_resize;
+ }
+ npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size);
+ page_shift = order_base_2(page_size);
+ } else {
+ struct mlx5_frag_buf *frag_buf;
+
+ cqe_size = 64;
+ err = resize_kernel(dev, cq, entries, cqe_size);
+ if (err)
+ goto ex;
+ frag_buf = &cq->resize_buf->frag_buf;
+ npas = frag_buf->npages;
+ page_shift = frag_buf->page_shift;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
+ MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto ex_resize;
+ }
+
+ pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
+ if (udata)
+ mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas,
+ 0);
+ else
+ mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
+
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.resize_field_select.resize_field_select,
+ MLX5_MODIFY_CQ_MASK_LOG_SIZE |
+ MLX5_MODIFY_CQ_MASK_PG_OFFSET |
+ MLX5_MODIFY_CQ_MASK_PG_SIZE);
+
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+
+ MLX5_SET(cqc, cqc, log_page_size,
+ page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
+ MLX5_SET(cqc, cqc, cqe_sz,
+ cqe_sz_to_mlx_sz(cqe_size,
+ cq->private_flags &
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
+ MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
+
+ MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
+ MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
+
+ err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
+ if (err)
+ goto ex_alloc;
+
+ if (udata) {
+ cq->ibcq.cqe = entries - 1;
+ ib_umem_release(cq->buf.umem);
+ cq->buf.umem = cq->resize_umem;
+ cq->resize_umem = NULL;
+ } else {
+ struct mlx5_ib_cq_buf tbuf;
+ int resized = 0;
+
+ spin_lock_irqsave(&cq->lock, flags);
+ if (cq->resize_buf) {
+ err = copy_resize_cqes(cq);
+ if (!err) {
+ tbuf = cq->buf;
+ cq->buf = *cq->resize_buf;
+ kfree(cq->resize_buf);
+ cq->resize_buf = NULL;
+ resized = 1;
+ }
+ }
+ cq->ibcq.cqe = entries - 1;
+ spin_unlock_irqrestore(&cq->lock, flags);
+ if (resized)
+ free_cq_buf(dev, &tbuf);
+ }
+ mutex_unlock(&cq->resize_mutex);
+
+ kvfree(in);
+ return 0;
+
+ex_alloc:
+ kvfree(in);
+
+ex_resize:
+ ib_umem_release(cq->resize_umem);
+ if (!udata) {
+ free_cq_buf(dev, cq->resize_buf);
+ cq->resize_buf = NULL;
+ }
+ex:
+ mutex_unlock(&cq->resize_mutex);
+ return err;
+}
+
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
+{
+ struct mlx5_ib_cq *cq;
+
+ if (!ibcq)
+ return 128;
+
+ cq = to_mcq(ibcq);
+ return cq->cqe_size;
+}
+
+/* Called from atomic context */
+int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
+{
+ struct mlx5_ib_wc *soft_wc;
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+ unsigned long flags;
+
+ soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
+ if (!soft_wc)
+ return -ENOMEM;
+
+ soft_wc->wc = *wc;
+ spin_lock_irqsave(&cq->lock, flags);
+ list_add_tail(&soft_wc->list, &cq->wc_list);
+ if (cq->notify_flags == IB_CQ_NEXT_COMP ||
+ wc->status != IB_WC_SUCCESS) {
+ cq->notify_flags = 0;
+ schedule_work(&cq->notify_work);
+ }
+ spin_unlock_irqrestore(&cq->lock, flags);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
new file mode 100644
index 000000000..f8e2baed2
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -0,0 +1,3101 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/uverbs_std_types.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_ib.h"
+#include "devx.h"
+#include "qp.h"
+#include <linux/xarray.h>
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static void dispatch_event_fd(struct list_head *fd_list, const void *data);
+
+enum devx_obj_flags {
+ DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
+ DEVX_OBJ_FLAGS_DCT = 1 << 1,
+ DEVX_OBJ_FLAGS_CQ = 1 << 2,
+};
+
+struct devx_async_data {
+ struct mlx5_ib_dev *mdev;
+ struct list_head list;
+ struct devx_async_cmd_event_file *ev_file;
+ struct mlx5_async_work cb_work;
+ u16 cmd_out_len;
+ /* must be last field in this structure */
+ struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
+};
+
+struct devx_async_event_data {
+ struct list_head list; /* headed in ev_file->event_list */
+ struct mlx5_ib_uapi_devx_async_event_hdr hdr;
+};
+
+/* first level XA value data structure */
+struct devx_event {
+ struct xarray object_ids; /* second XA level, Key = object id */
+ struct list_head unaffiliated_list;
+};
+
+/* second level XA value data structure */
+struct devx_obj_event {
+ struct rcu_head rcu;
+ struct list_head obj_sub_list;
+};
+
+struct devx_event_subscription {
+ struct list_head file_list; /* headed in ev_file->
+ * subscribed_events_list
+ */
+ struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
+ * devx_obj_event->obj_sub_list
+ */
+ struct list_head obj_list; /* headed in devx_object */
+ struct list_head event_list; /* headed in ev_file->event_list or in
+ * temp list via subscription
+ */
+
+ u8 is_cleaned:1;
+ u32 xa_key_level1;
+ u32 xa_key_level2;
+ struct rcu_head rcu;
+ u64 cookie;
+ struct devx_async_event_file *ev_file;
+ struct eventfd_ctx *eventfd;
+};
+
+struct devx_async_event_file {
+ struct ib_uobject uobj;
+ /* Head of events that are subscribed to this FD */
+ struct list_head subscribed_events_list;
+ spinlock_t lock;
+ wait_queue_head_t poll_wait;
+ struct list_head event_list;
+ struct mlx5_ib_dev *dev;
+ u8 omit_data:1;
+ u8 is_overflow_err:1;
+ u8 is_destroyed:1;
+};
+
+struct devx_umem {
+ struct mlx5_core_dev *mdev;
+ struct ib_umem *umem;
+ u32 dinlen;
+ u32 dinbox[MLX5_ST_SZ_DW(destroy_umem_in)];
+};
+
+struct devx_umem_reg_cmd {
+ void *in;
+ u32 inlen;
+ u32 out[MLX5_ST_SZ_DW(create_umem_out)];
+};
+
+static struct mlx5_ib_ucontext *
+devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
+{
+ return to_mucontext(ib_uverbs_get_ucontext(attrs));
+}
+
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
+{
+ u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {};
+ void *uctx;
+ int err;
+ u16 uid;
+ u32 cap = 0;
+
+ /* 0 means not supported */
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
+ return -EINVAL;
+
+ uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
+ if (is_user && capable(CAP_NET_RAW) &&
+ (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
+ cap |= MLX5_UCTX_CAP_RAW_TX;
+ if (is_user && capable(CAP_SYS_RAWIO) &&
+ (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
+ MLX5_UCTX_CAP_INTERNAL_DEV_RES))
+ cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
+
+ MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
+ MLX5_SET(uctx, uctx, cap, cap);
+
+ err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ uid = MLX5_GET(create_uctx_out, out, uid);
+ return uid;
+}
+
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {};
+
+ MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
+ MLX5_SET(destroy_uctx_in, in, uid, uid);
+
+ mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static bool is_legacy_unaffiliated_event_num(u16 event_num)
+{
+ switch (event_num) {
+ case MLX5_EVENT_TYPE_PORT_CHANGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_legacy_obj_event_num(u16 event_num)
+{
+ switch (event_num) {
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ case MLX5_EVENT_TYPE_COMM_EST:
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ case MLX5_EVENT_TYPE_CQ_ERROR:
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ case MLX5_EVENT_TYPE_COMP:
+ case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
+ case MLX5_EVENT_TYPE_XRQ_ERROR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u16 get_legacy_obj_type(u16 opcode)
+{
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_RQ:
+ return MLX5_EVENT_QUEUE_TYPE_RQ;
+ case MLX5_CMD_OP_CREATE_QP:
+ return MLX5_EVENT_QUEUE_TYPE_QP;
+ case MLX5_CMD_OP_CREATE_SQ:
+ return MLX5_EVENT_QUEUE_TYPE_SQ;
+ case MLX5_CMD_OP_CREATE_DCT:
+ return MLX5_EVENT_QUEUE_TYPE_DCT;
+ default:
+ return 0;
+ }
+}
+
+static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
+{
+ u16 opcode;
+
+ opcode = (obj->obj_id >> 32) & 0xffff;
+
+ if (is_legacy_obj_event_num(event_num))
+ return get_legacy_obj_type(opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ return (obj->obj_id >> 48);
+ case MLX5_CMD_OP_CREATE_RQ:
+ return MLX5_OBJ_TYPE_RQ;
+ case MLX5_CMD_OP_CREATE_QP:
+ return MLX5_OBJ_TYPE_QP;
+ case MLX5_CMD_OP_CREATE_SQ:
+ return MLX5_OBJ_TYPE_SQ;
+ case MLX5_CMD_OP_CREATE_DCT:
+ return MLX5_OBJ_TYPE_DCT;
+ case MLX5_CMD_OP_CREATE_TIR:
+ return MLX5_OBJ_TYPE_TIR;
+ case MLX5_CMD_OP_CREATE_TIS:
+ return MLX5_OBJ_TYPE_TIS;
+ case MLX5_CMD_OP_CREATE_PSV:
+ return MLX5_OBJ_TYPE_PSV;
+ case MLX5_OBJ_TYPE_MKEY:
+ return MLX5_OBJ_TYPE_MKEY;
+ case MLX5_CMD_OP_CREATE_RMP:
+ return MLX5_OBJ_TYPE_RMP;
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return MLX5_OBJ_TYPE_XRC_SRQ;
+ case MLX5_CMD_OP_CREATE_XRQ:
+ return MLX5_OBJ_TYPE_XRQ;
+ case MLX5_CMD_OP_CREATE_RQT:
+ return MLX5_OBJ_TYPE_RQT;
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ return MLX5_OBJ_TYPE_FLOW_COUNTER;
+ case MLX5_CMD_OP_CREATE_CQ:
+ return MLX5_OBJ_TYPE_CQ;
+ default:
+ return 0;
+ }
+}
+
+static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
+{
+ switch (event_type) {
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX5_EVENT_TYPE_COMM_EST:
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ return eqe->data.qp_srq.type;
+ case MLX5_EVENT_TYPE_CQ_ERROR:
+ case MLX5_EVENT_TYPE_XRQ_ERROR:
+ return 0;
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
+ return MLX5_EVENT_QUEUE_TYPE_DCT;
+ default:
+ return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
+ }
+}
+
+static u32 get_dec_obj_id(u64 obj_id)
+{
+ return (obj_id & 0xffffffff);
+}
+
+/*
+ * As the obj_id in the firmware is not globally unique the object type
+ * must be considered upon checking for a valid object id.
+ * For that the opcode of the creator command is encoded as part of the obj_id.
+ */
+static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
+{
+ return ((u64)opcode << 32) | obj_id;
+}
+
+static u32 devx_get_created_obj_id(const void *in, const void *out, u16 opcode)
+{
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ return MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ case MLX5_CMD_OP_CREATE_UMEM:
+ return MLX5_GET(create_umem_out, out, umem_id);
+ case MLX5_CMD_OP_CREATE_MKEY:
+ return MLX5_GET(create_mkey_out, out, mkey_index);
+ case MLX5_CMD_OP_CREATE_CQ:
+ return MLX5_GET(create_cq_out, out, cqn);
+ case MLX5_CMD_OP_ALLOC_PD:
+ return MLX5_GET(alloc_pd_out, out, pd);
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ return MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+ case MLX5_CMD_OP_CREATE_RMP:
+ return MLX5_GET(create_rmp_out, out, rmpn);
+ case MLX5_CMD_OP_CREATE_SQ:
+ return MLX5_GET(create_sq_out, out, sqn);
+ case MLX5_CMD_OP_CREATE_RQ:
+ return MLX5_GET(create_rq_out, out, rqn);
+ case MLX5_CMD_OP_CREATE_RQT:
+ return MLX5_GET(create_rqt_out, out, rqtn);
+ case MLX5_CMD_OP_CREATE_TIR:
+ return MLX5_GET(create_tir_out, out, tirn);
+ case MLX5_CMD_OP_CREATE_TIS:
+ return MLX5_GET(create_tis_out, out, tisn);
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ return MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ return MLX5_GET(create_flow_table_out, out, table_id);
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ return MLX5_GET(create_flow_group_out, out, group_id);
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ return MLX5_GET(set_fte_in, in, flow_index);
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ return MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
+ return MLX5_GET(alloc_packet_reformat_context_out, out,
+ packet_reformat_id);
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ return MLX5_GET(alloc_modify_header_context_out, out,
+ modify_header_id);
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ return MLX5_GET(create_scheduling_element_out, out,
+ scheduling_element_id);
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ return MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ return MLX5_GET(set_l2_table_entry_in, in, table_index);
+ case MLX5_CMD_OP_CREATE_QP:
+ return MLX5_GET(create_qp_out, out, qpn);
+ case MLX5_CMD_OP_CREATE_SRQ:
+ return MLX5_GET(create_srq_out, out, srqn);
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+ case MLX5_CMD_OP_CREATE_DCT:
+ return MLX5_GET(create_dct_out, out, dctn);
+ case MLX5_CMD_OP_CREATE_XRQ:
+ return MLX5_GET(create_xrq_out, out, xrqn);
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ return MLX5_GET(attach_to_mcg_in, in, qpn);
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ return MLX5_GET(alloc_xrcd_out, out, xrcd);
+ case MLX5_CMD_OP_CREATE_PSV:
+ return MLX5_GET(create_psv_out, out, psv0_index);
+ default:
+ /* The entry must match to one of the devx_is_obj_create_cmd */
+ WARN_ON(true);
+ return 0;
+ }
+}
+
+static u64 devx_get_obj_id(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+ u64 obj_id;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_type) << 16,
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_id));
+ break;
+ case MLX5_CMD_OP_QUERY_MKEY:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
+ MLX5_GET(query_mkey_in, in,
+ mkey_index));
+ break;
+ case MLX5_CMD_OP_QUERY_CQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ MLX5_GET(query_cq_in, in, cqn));
+ break;
+ case MLX5_CMD_OP_MODIFY_CQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ MLX5_GET(modify_cq_in, in, cqn));
+ break;
+ case MLX5_CMD_OP_QUERY_SQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ MLX5_GET(query_sq_in, in, sqn));
+ break;
+ case MLX5_CMD_OP_MODIFY_SQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ MLX5_GET(modify_sq_in, in, sqn));
+ break;
+ case MLX5_CMD_OP_QUERY_RQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(query_rq_in, in, rqn));
+ break;
+ case MLX5_CMD_OP_MODIFY_RQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(modify_rq_in, in, rqn));
+ break;
+ case MLX5_CMD_OP_QUERY_RMP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
+ MLX5_GET(query_rmp_in, in, rmpn));
+ break;
+ case MLX5_CMD_OP_MODIFY_RMP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
+ MLX5_GET(modify_rmp_in, in, rmpn));
+ break;
+ case MLX5_CMD_OP_QUERY_RQT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ MLX5_GET(query_rqt_in, in, rqtn));
+ break;
+ case MLX5_CMD_OP_MODIFY_RQT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ MLX5_GET(modify_rqt_in, in, rqtn));
+ break;
+ case MLX5_CMD_OP_QUERY_TIR:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ MLX5_GET(query_tir_in, in, tirn));
+ break;
+ case MLX5_CMD_OP_MODIFY_TIR:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ MLX5_GET(modify_tir_in, in, tirn));
+ break;
+ case MLX5_CMD_OP_QUERY_TIS:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ MLX5_GET(query_tis_in, in, tisn));
+ break;
+ case MLX5_CMD_OP_MODIFY_TIS:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ MLX5_GET(modify_tis_in, in, tisn));
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
+ MLX5_GET(query_flow_table_in, in,
+ table_id));
+ break;
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
+ MLX5_GET(modify_flow_table_in, in,
+ table_id));
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
+ MLX5_GET(query_flow_group_in, in,
+ group_id));
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
+ MLX5_GET(query_fte_in, in,
+ flow_index));
+ break;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
+ MLX5_GET(set_fte_in, in, flow_index));
+ break;
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
+ MLX5_GET(query_q_counter_in, in,
+ counter_set_id));
+ break;
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
+ MLX5_GET(query_flow_counter_in, in,
+ flow_counter_id));
+ break;
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
+ MLX5_GET(query_modify_header_context_in,
+ in, modify_header_id));
+ break;
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
+ MLX5_GET(query_scheduling_element_in,
+ in, scheduling_element_id));
+ break;
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
+ MLX5_GET(modify_scheduling_element_in,
+ in, scheduling_element_id));
+ break;
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
+ MLX5_GET(add_vxlan_udp_dport_in, in,
+ vxlan_udp_port));
+ break;
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
+ MLX5_GET(query_l2_table_entry_in, in,
+ table_index));
+ break;
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
+ MLX5_GET(set_l2_table_entry_in, in,
+ table_index));
+ break;
+ case MLX5_CMD_OP_QUERY_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(query_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rst2init_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(init2init_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(init2rtr_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rtr2rts_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rts2rts_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(sqerr2rts_qp_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(qp_2err_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_2RST_QP:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(qp_2rst_in, in, qpn));
+ break;
+ case MLX5_CMD_OP_QUERY_DCT:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ MLX5_GET(query_dct_in, in, dctn));
+ break;
+ case MLX5_CMD_OP_QUERY_XRQ:
+ case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
+ MLX5_GET(query_xrq_in, in, xrqn));
+ break;
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
+ MLX5_GET(query_xrc_srq_in, in,
+ xrc_srqn));
+ break;
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
+ MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
+ break;
+ case MLX5_CMD_OP_QUERY_SRQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
+ MLX5_GET(query_srq_in, in, srqn));
+ break;
+ case MLX5_CMD_OP_ARM_RQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(arm_rq_in, in, srq_number));
+ break;
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ MLX5_GET(drain_dct_in, in, dctn));
+ break;
+ case MLX5_CMD_OP_ARM_XRQ:
+ case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
+ case MLX5_CMD_OP_MODIFY_XRQ:
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
+ MLX5_GET(arm_xrq_in, in, xrqn));
+ break;
+ case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
+ obj_id = get_enc_obj_id
+ (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
+ MLX5_GET(query_packet_reformat_context_in,
+ in, packet_reformat_id));
+ break;
+ default:
+ obj_id = 0;
+ }
+
+ return obj_id;
+}
+
+static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
+ struct ib_uobject *uobj, const void *in)
+{
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ u64 obj_id = devx_get_obj_id(in);
+
+ if (!obj_id)
+ return false;
+
+ switch (uobj_get_object_id(uobj)) {
+ case UVERBS_OBJECT_CQ:
+ return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ to_mcq(uobj->object)->mcq.cqn) ==
+ obj_id;
+
+ case UVERBS_OBJECT_SRQ:
+ {
+ struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
+ u16 opcode;
+
+ switch (srq->common.res) {
+ case MLX5_RES_XSRQ:
+ opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
+ break;
+ case MLX5_RES_XRQ:
+ opcode = MLX5_CMD_OP_CREATE_XRQ;
+ break;
+ default:
+ if (!dev->mdev->issi)
+ opcode = MLX5_CMD_OP_CREATE_SRQ;
+ else
+ opcode = MLX5_CMD_OP_CREATE_RMP;
+ }
+
+ return get_enc_obj_id(opcode,
+ to_msrq(uobj->object)->msrq.srqn) ==
+ obj_id;
+ }
+
+ case UVERBS_OBJECT_QP:
+ {
+ struct mlx5_ib_qp *qp = to_mqp(uobj->object);
+
+ if (qp->type == IB_QPT_RAW_PACKET ||
+ (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp =
+ &qp->raw_packet_qp;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+
+ return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ rq->base.mqp.qpn) == obj_id ||
+ get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ sq->base.mqp.qpn) == obj_id ||
+ get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ rq->tirn) == obj_id ||
+ get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ sq->tisn) == obj_id);
+ }
+
+ if (qp->type == MLX5_IB_QPT_DCT)
+ return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ qp->dct.mdct.mqp.qpn) == obj_id;
+ return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ qp->ibqp.qp_num) == obj_id;
+ }
+
+ case UVERBS_OBJECT_WQ:
+ return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ to_mrwq(uobj->object)->core_qp.qpn) ==
+ obj_id;
+
+ case UVERBS_OBJECT_RWQ_IND_TBL:
+ return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ to_mrwq_ind_table(uobj->object)->rqtn) ==
+ obj_id;
+
+ case MLX5_IB_OBJECT_DEVX_OBJ:
+ {
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+ struct devx_obj *devx_uobj = uobj->object;
+
+ if (opcode == MLX5_CMD_OP_QUERY_FLOW_COUNTER &&
+ devx_uobj->flow_counter_bulk_size) {
+ u64 end;
+
+ end = devx_uobj->obj_id +
+ devx_uobj->flow_counter_bulk_size;
+ return devx_uobj->obj_id <= obj_id && end > obj_id;
+ }
+
+ return devx_uobj->obj_id == obj_id;
+ }
+
+ default:
+ return false;
+ }
+}
+
+static void devx_set_umem_valid(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_MKEY:
+ MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
+ break;
+ case MLX5_CMD_OP_CREATE_CQ:
+ {
+ void *cqc;
+
+ MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
+ break;
+ }
+ case MLX5_CMD_OP_CREATE_QP:
+ {
+ void *qpc;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
+ MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_RQ:
+ {
+ void *rqc, *wq;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_SQ:
+ {
+ void *sqc, *wq;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_MODIFY_CQ:
+ MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
+ break;
+
+ case MLX5_CMD_OP_CREATE_RMP:
+ {
+ void *rmpc, *wq;
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_XRQ:
+ {
+ void *xrqc, *wq;
+
+ xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
+ wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ {
+ void *xrc_srqc;
+
+ MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
+ xrc_srq_context_entry);
+ MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
+ break;
+ }
+
+ default:
+ return;
+ }
+}
+
+static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
+{
+ *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (*opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ case MLX5_CMD_OP_CREATE_MKEY:
+ case MLX5_CMD_OP_CREATE_CQ:
+ case MLX5_CMD_OP_ALLOC_PD:
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ case MLX5_CMD_OP_CREATE_RMP:
+ case MLX5_CMD_OP_CREATE_SQ:
+ case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_CREATE_RQT:
+ case MLX5_CMD_OP_CREATE_TIR:
+ case MLX5_CMD_OP_CREATE_TIS:
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_CREATE_QP:
+ case MLX5_CMD_OP_CREATE_SRQ:
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ case MLX5_CMD_OP_CREATE_DCT:
+ case MLX5_CMD_OP_CREATE_XRQ:
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ return true;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ {
+ u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
+ if (op_mod == 0)
+ return true;
+ return false;
+ }
+ case MLX5_CMD_OP_CREATE_PSV:
+ {
+ u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
+
+ if (num_psv == 1)
+ return true;
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_obj_modify_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_MODIFY_CQ:
+ case MLX5_CMD_OP_MODIFY_RMP:
+ case MLX5_CMD_OP_MODIFY_SQ:
+ case MLX5_CMD_OP_MODIFY_RQ:
+ case MLX5_CMD_OP_MODIFY_RQT:
+ case MLX5_CMD_OP_MODIFY_TIR:
+ case MLX5_CMD_OP_MODIFY_TIS:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_RST2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ case MLX5_CMD_OP_ARM_RQ:
+ case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+ case MLX5_CMD_OP_ARM_XRQ:
+ case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
+ case MLX5_CMD_OP_MODIFY_XRQ:
+ return true;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ {
+ u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
+
+ if (op_mod == 1)
+ return true;
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_obj_query_cmd(const void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_MKEY:
+ case MLX5_CMD_OP_QUERY_CQ:
+ case MLX5_CMD_OP_QUERY_RMP:
+ case MLX5_CMD_OP_QUERY_SQ:
+ case MLX5_CMD_OP_QUERY_RQ:
+ case MLX5_CMD_OP_QUERY_RQT:
+ case MLX5_CMD_OP_QUERY_TIR:
+ case MLX5_CMD_OP_QUERY_TIS:
+ case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+ case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+ case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+ case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
+ case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+ case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+ case MLX5_CMD_OP_QUERY_QP:
+ case MLX5_CMD_OP_QUERY_SRQ:
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ case MLX5_CMD_OP_QUERY_DCT:
+ case MLX5_CMD_OP_QUERY_XRQ:
+ case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
+ case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
+ case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool devx_is_whitelist_cmd(void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
+{
+ if (devx_is_whitelist_cmd(cmd_in)) {
+ struct mlx5_ib_dev *dev;
+
+ if (c->devx_uid)
+ return c->devx_uid;
+
+ dev = to_mdev(c->ibucontext.device);
+ if (dev->devx_whitelist_uid)
+ return dev->devx_whitelist_uid;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ return c->devx_uid;
+}
+
+static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
+ if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
+ MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
+ (opcode >= MLX5_CMD_OP_GENERAL_START &&
+ opcode < MLX5_CMD_OP_GENERAL_END))
+ return true;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_VPORT_STATE:
+ case MLX5_CMD_OP_QUERY_ADAPTER:
+ case MLX5_CMD_OP_QUERY_ISSI:
+ case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+ case MLX5_CMD_OP_QUERY_VNIC_ENV:
+ case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+ case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+ case MLX5_CMD_OP_NOP:
+ case MLX5_CMD_OP_QUERY_CONG_STATUS:
+ case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+ case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+ case MLX5_CMD_OP_QUERY_LAG:
+ case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ int user_vector;
+ int dev_eqn;
+ int err;
+
+ if (uverbs_copy_from(&user_vector, attrs,
+ MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
+ return -EFAULT;
+
+ c = devx_ufile2uctx(attrs);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn);
+ if (err < 0)
+ return err;
+
+ if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
+ &dev_eqn, sizeof(dev_eqn)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/*
+ *Security note:
+ * The hardware protection mechanism works like this: Each device object that
+ * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
+ * the device specification manual) upon its creation. Then upon doorbell,
+ * hardware fetches the object context for which the doorbell was rang, and
+ * validates that the UAR through which the DB was rang matches the UAR ID
+ * of the object.
+ * If no match the doorbell is silently ignored by the hardware. Of course,
+ * the user cannot ring a doorbell on a UAR that was not mapped to it.
+ * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
+ * mailboxes (except tagging them with UID), we expose to the user its UAR
+ * ID, so it can embed it in these objects in the expected specification
+ * format. So the only thing the user can do is hurt itself by creating a
+ * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
+ * may ring a doorbell on its objects.
+ * The consequence of that will be that another user can schedule a QP/SQ
+ * of the buggy user for execution (just insert it to the hardware schedule
+ * queue or arm its CQ for event generation), no further harm is expected.
+ */
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ u32 user_idx;
+ s32 dev_idx;
+
+ c = devx_ufile2uctx(attrs);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (uverbs_copy_from(&user_idx, attrs,
+ MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
+ return -EFAULT;
+
+ dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
+ if (dev_idx < 0)
+ return dev_idx;
+
+ if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
+ &dev_idx, sizeof(dev_idx)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ void *cmd_in = uverbs_attr_get_alloced_ptr(
+ attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
+ void *cmd_out;
+ int err, err2;
+ int uid;
+
+ c = devx_ufile2uctx(attrs);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
+
+ /* Only white list of some general HCA commands are allowed for this method. */
+ if (!devx_is_general_cmd(cmd_in, dev))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ err = mlx5_cmd_do(dev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err && err != -EREMOTEIO)
+ return err;
+
+ err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
+ cmd_out_len);
+
+ return err2 ?: err;
+}
+
+static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
+ u32 *dinlen,
+ u32 *obj_id)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+ u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
+
+ *obj_id = devx_get_created_obj_id(in, out, opcode);
+ *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
+ MLX5_SET(general_obj_in_cmd_hdr, din, obj_type,
+ MLX5_GET(general_obj_in_cmd_hdr, in, obj_type));
+ break;
+
+ case MLX5_CMD_OP_CREATE_UMEM:
+ MLX5_SET(destroy_umem_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_UMEM);
+ MLX5_SET(destroy_umem_in, din, umem_id, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_MKEY:
+ MLX5_SET(destroy_mkey_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_MKEY);
+ MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_CQ:
+ MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+ MLX5_SET(destroy_cq_in, din, cqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_PD:
+ MLX5_SET(dealloc_pd_in, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, din, pd, *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+ MLX5_SET(dealloc_transport_domain_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, din, transport_domain,
+ *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_RMP:
+ MLX5_SET(destroy_rmp_in, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, din, rmpn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_SQ:
+ MLX5_SET(destroy_sq_in, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, din, sqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_RQ:
+ MLX5_SET(destroy_rq_in, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, din, rqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_RQT:
+ MLX5_SET(destroy_rqt_in, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, din, rqtn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_TIR:
+ MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_TIS:
+ MLX5_SET(destroy_tis_in, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, din, tisn, *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+ MLX5_SET(dealloc_q_counter_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, din, counter_set_id, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
+ MLX5_SET(destroy_flow_table_in, din, other_vport,
+ MLX5_GET(create_flow_table_in, in, other_vport));
+ MLX5_SET(destroy_flow_table_in, din, vport_number,
+ MLX5_GET(create_flow_table_in, in, vport_number));
+ MLX5_SET(destroy_flow_table_in, din, table_type,
+ MLX5_GET(create_flow_table_in, in, table_type));
+ MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
+ MLX5_SET(destroy_flow_table_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+ break;
+ case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
+ MLX5_SET(destroy_flow_group_in, din, other_vport,
+ MLX5_GET(create_flow_group_in, in, other_vport));
+ MLX5_SET(destroy_flow_group_in, din, vport_number,
+ MLX5_GET(create_flow_group_in, in, vport_number));
+ MLX5_SET(destroy_flow_group_in, din, table_type,
+ MLX5_GET(create_flow_group_in, in, table_type));
+ MLX5_SET(destroy_flow_group_in, din, table_id,
+ MLX5_GET(create_flow_group_in, in, table_id));
+ MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
+ MLX5_SET(destroy_flow_group_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ break;
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
+ MLX5_SET(delete_fte_in, din, other_vport,
+ MLX5_GET(set_fte_in, in, other_vport));
+ MLX5_SET(delete_fte_in, din, vport_number,
+ MLX5_GET(set_fte_in, in, vport_number));
+ MLX5_SET(delete_fte_in, din, table_type,
+ MLX5_GET(set_fte_in, in, table_type));
+ MLX5_SET(delete_fte_in, din, table_id,
+ MLX5_GET(set_fte_in, in, table_id));
+ MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
+ MLX5_SET(delete_fte_in, din, opcode,
+ MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ break;
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ MLX5_SET(dealloc_flow_counter_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+ MLX5_SET(dealloc_flow_counter_in, din, flow_counter_id,
+ *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
+ MLX5_SET(dealloc_packet_reformat_context_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_context_in, din,
+ packet_reformat_id, *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
+ MLX5_SET(dealloc_modify_header_context_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(dealloc_modify_header_context_in, din,
+ modify_header_id, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+ *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
+ MLX5_SET(destroy_scheduling_element_in, din,
+ scheduling_hierarchy,
+ MLX5_GET(create_scheduling_element_in, in,
+ scheduling_hierarchy));
+ MLX5_SET(destroy_scheduling_element_in, din,
+ scheduling_element_id, *obj_id);
+ MLX5_SET(destroy_scheduling_element_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
+ break;
+ case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
+ MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
+ MLX5_SET(delete_vxlan_udp_dport_in, din, opcode,
+ MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+ break;
+ case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+ *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
+ MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
+ MLX5_SET(delete_l2_table_entry_in, din, opcode,
+ MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
+ break;
+ case MLX5_CMD_OP_CREATE_QP:
+ MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, din, qpn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_SRQ:
+ MLX5_SET(destroy_srq_in, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, din, srqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ MLX5_SET(destroy_xrc_srq_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, din, xrc_srqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_DCT:
+ MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, din, dctn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_XRQ:
+ MLX5_SET(destroy_xrq_in, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
+ MLX5_SET(destroy_xrq_in, din, xrqn, *obj_id);
+ break;
+ case MLX5_CMD_OP_ATTACH_TO_MCG:
+ *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
+ MLX5_SET(detach_from_mcg_in, din, qpn,
+ MLX5_GET(attach_to_mcg_in, in, qpn));
+ memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
+ MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
+ MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
+ MLX5_SET(detach_from_mcg_in, din, opcode,
+ MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, din, qpn, *obj_id);
+ break;
+ case MLX5_CMD_OP_ALLOC_XRCD:
+ MLX5_SET(dealloc_xrcd_in, din, opcode,
+ MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, din, xrcd, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_PSV:
+ MLX5_SET(destroy_psv_in, din, opcode,
+ MLX5_CMD_OP_DESTROY_PSV);
+ MLX5_SET(destroy_psv_in, din, psvn, *obj_id);
+ break;
+ default:
+ /* The entry must match to one of the devx_is_obj_create_cmd */
+ WARN_ON(true);
+ break;
+ }
+}
+
+static int devx_handle_mkey_indirect(struct devx_obj *obj,
+ struct mlx5_ib_dev *dev,
+ void *in, void *out)
+{
+ struct mlx5_ib_mkey *mkey = &obj->mkey;
+ void *mkc;
+ u8 key;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ key = MLX5_GET(mkc, mkc, mkey_7_0);
+ mkey->key = mlx5_idx_to_mkey(
+ MLX5_GET(create_mkey_out, out, mkey_index)) | key;
+ mkey->type = MLX5_MKEY_INDIRECT_DEVX;
+ mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
+ init_waitqueue_head(&mkey->wait);
+
+ return mlx5r_store_odp_mkey(dev, mkey);
+}
+
+static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
+ struct devx_obj *obj,
+ void *in, int in_len)
+{
+ int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
+ MLX5_FLD_SZ_BYTES(create_mkey_in,
+ memory_key_mkey_entry);
+ void *mkc;
+ u8 access_mode;
+
+ if (in_len < min_len)
+ return -EINVAL;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
+ access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
+
+ if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
+ access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
+ return 0;
+ }
+
+ MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
+ return 0;
+}
+
+static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
+ struct devx_event_subscription *sub)
+{
+ struct devx_event *event;
+ struct devx_obj_event *xa_val_level2;
+
+ if (sub->is_cleaned)
+ return;
+
+ sub->is_cleaned = 1;
+ list_del_rcu(&sub->xa_list);
+
+ if (list_empty(&sub->obj_list))
+ return;
+
+ list_del_rcu(&sub->obj_list);
+ /* check whether key level 1 for this obj_sub_list is empty */
+ event = xa_load(&dev->devx_event_table.event_xa,
+ sub->xa_key_level1);
+ WARN_ON(!event);
+
+ xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
+ if (list_empty(&xa_val_level2->obj_sub_list)) {
+ xa_erase(&event->object_ids,
+ sub->xa_key_level2);
+ kfree_rcu(xa_val_level2, rcu);
+ }
+}
+
+static int devx_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct mlx5_devx_event_table *devx_event_table;
+ struct devx_obj *obj = uobject->object;
+ struct devx_event_subscription *sub_entry, *tmp;
+ struct mlx5_ib_dev *dev;
+ int ret;
+
+ dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
+ xa_erase(&obj->ib_dev->odp_mkeys,
+ mlx5_base_mkey(obj->mkey.key)))
+ /*
+ * The pagefault_single_data_segment() does commands against
+ * the mmkey, we must wait for that to stop before freeing the
+ * mkey, as another allocation could get the same mkey #.
+ */
+ mlx5r_deref_wait_odp_mkey(&obj->mkey);
+
+ if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+ ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
+ else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
+ ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
+ else
+ ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
+ obj->dinlen, out, sizeof(out));
+ if (ret)
+ return ret;
+
+ devx_event_table = &dev->devx_event_table;
+
+ mutex_lock(&devx_event_table->event_xa_lock);
+ list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
+ devx_cleanup_subscription(dev, sub_entry);
+ mutex_unlock(&devx_event_table->event_xa_lock);
+
+ kfree(obj);
+ return ret;
+}
+
+static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
+{
+ struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
+ struct mlx5_devx_event_table *table;
+ struct devx_event *event;
+ struct devx_obj_event *obj_event;
+ u32 obj_id = mcq->cqn;
+
+ table = &obj->ib_dev->devx_event_table;
+ rcu_read_lock();
+ event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
+ if (!event)
+ goto out;
+
+ obj_event = xa_load(&event->object_ids, obj_id);
+ if (!obj_event)
+ goto out;
+
+ dispatch_event_fd(&obj_event->obj_sub_list, eqe);
+out:
+ rcu_read_unlock();
+}
+
+static bool is_apu_cq(struct mlx5_ib_dev *dev, const void *in)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, apu) ||
+ !MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), apu_cq))
+ return false;
+
+ return true;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
+ int cmd_in_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
+ void *cmd_out;
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct devx_obj *obj;
+ u16 obj_type = 0;
+ int err, err2 = 0;
+ int uid;
+ u32 obj_id;
+ u16 opcode;
+
+ if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
+ return -EINVAL;
+
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
+
+ if (!devx_is_obj_create_cmd(cmd_in, &opcode))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
+ err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
+ if (err)
+ goto obj_free;
+ } else {
+ devx_set_umem_valid(cmd_in);
+ }
+
+ if (opcode == MLX5_CMD_OP_CREATE_DCT) {
+ obj->flags |= DEVX_OBJ_FLAGS_DCT;
+ err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
+ cmd_in_len, cmd_out, cmd_out_len);
+ } else if (opcode == MLX5_CMD_OP_CREATE_CQ &&
+ !is_apu_cq(dev, cmd_in)) {
+ obj->flags |= DEVX_OBJ_FLAGS_CQ;
+ obj->core_cq.comp = devx_cq_comp;
+ err = mlx5_create_cq(dev->mdev, &obj->core_cq,
+ cmd_in, cmd_in_len, cmd_out,
+ cmd_out_len);
+ } else {
+ err = mlx5_cmd_do(dev->mdev, cmd_in, cmd_in_len,
+ cmd_out, cmd_out_len);
+ }
+
+ if (err == -EREMOTEIO)
+ err2 = uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
+ cmd_out, cmd_out_len);
+ if (err)
+ goto obj_free;
+
+ if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
+ u32 bulk = MLX5_GET(alloc_flow_counter_in,
+ cmd_in,
+ flow_counter_bulk_log_size);
+
+ if (bulk)
+ bulk = 1 << bulk;
+ else
+ bulk = 128UL * MLX5_GET(alloc_flow_counter_in,
+ cmd_in,
+ flow_counter_bulk);
+ obj->flow_counter_bulk_size = bulk;
+ }
+
+ uobj->object = obj;
+ INIT_LIST_HEAD(&obj->event_sub);
+ obj->ib_dev = dev;
+ devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
+ &obj_id);
+ WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
+ if (err)
+ goto obj_destroy;
+
+ if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
+ obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
+ obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
+
+ if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+ err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
+ if (err)
+ goto obj_destroy;
+ }
+ return 0;
+
+obj_destroy:
+ if (obj->flags & DEVX_OBJ_FLAGS_DCT)
+ mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
+ else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
+ mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
+ else
+ mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
+ sizeof(out));
+obj_free:
+ kfree(obj);
+ return err2 ?: err;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
+ void *cmd_out;
+ int err, err2;
+ int uid;
+
+ if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
+ return -EINVAL;
+
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
+
+ if (!devx_is_obj_modify_cmd(cmd_in))
+ return -EINVAL;
+
+ if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ devx_set_umem_valid(cmd_in);
+
+ err = mlx5_cmd_do(mdev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err && err != -EREMOTEIO)
+ return err;
+
+ err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+ cmd_out, cmd_out_len);
+
+ return err2 ?: err;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
+ int cmd_out_len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ void *cmd_out;
+ int err, err2;
+ int uid;
+ struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
+
+ if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
+ return -EINVAL;
+
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
+
+ if (!devx_is_obj_query_cmd(cmd_in))
+ return -EINVAL;
+
+ if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
+ return -EINVAL;
+
+ cmd_out = uverbs_zalloc(attrs, cmd_out_len);
+ if (IS_ERR(cmd_out))
+ return PTR_ERR(cmd_out);
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ err = mlx5_cmd_do(mdev->mdev, cmd_in,
+ uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
+ cmd_out, cmd_out_len);
+ if (err && err != -EREMOTEIO)
+ return err;
+
+ err2 = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+ cmd_out, cmd_out_len);
+
+ return err2 ?: err;
+}
+
+struct devx_async_event_queue {
+ spinlock_t lock;
+ wait_queue_head_t poll_wait;
+ struct list_head event_list;
+ atomic_t bytes_in_use;
+ u8 is_destroyed:1;
+};
+
+struct devx_async_cmd_event_file {
+ struct ib_uobject uobj;
+ struct devx_async_event_queue ev_queue;
+ struct mlx5_async_ctx async_ctx;
+};
+
+static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
+{
+ spin_lock_init(&ev_queue->lock);
+ INIT_LIST_HEAD(&ev_queue->event_list);
+ init_waitqueue_head(&ev_queue->poll_wait);
+ atomic_set(&ev_queue->bytes_in_use, 0);
+ ev_queue->is_destroyed = 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct devx_async_cmd_event_file *ev_file;
+
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
+ struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
+
+ ev_file = container_of(uobj, struct devx_async_cmd_event_file,
+ uobj);
+ devx_init_event_queue(&ev_file->ev_queue);
+ mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
+ struct devx_async_event_file *ev_file;
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ u32 flags;
+ int err;
+
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
+ MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
+
+ if (err)
+ return err;
+
+ ev_file = container_of(uobj, struct devx_async_event_file,
+ uobj);
+ spin_lock_init(&ev_file->lock);
+ INIT_LIST_HEAD(&ev_file->event_list);
+ init_waitqueue_head(&ev_file->poll_wait);
+ if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
+ ev_file->omit_data = 1;
+ INIT_LIST_HEAD(&ev_file->subscribed_events_list);
+ ev_file->dev = dev;
+ get_device(&dev->ib_dev.dev);
+ return 0;
+}
+
+static void devx_query_callback(int status, struct mlx5_async_work *context)
+{
+ struct devx_async_data *async_data =
+ container_of(context, struct devx_async_data, cb_work);
+ struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
+ struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
+ unsigned long flags;
+
+ /*
+ * Note that if the struct devx_async_cmd_event_file uobj begins to be
+ * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
+ * routine returns, ensuring that it always remains valid here.
+ */
+ spin_lock_irqsave(&ev_queue->lock, flags);
+ list_add_tail(&async_data->list, &ev_queue->event_list);
+ spin_unlock_irqrestore(&ev_queue->lock, flags);
+
+ wake_up_interruptible(&ev_queue->poll_wait);
+}
+
+#define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
+ u16 cmd_out_len;
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct ib_uobject *fd_uobj;
+ int err;
+ int uid;
+ struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
+ struct devx_async_cmd_event_file *ev_file;
+ struct devx_async_data *async_data;
+
+ if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
+ return -EINVAL;
+
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
+
+ if (!devx_is_obj_query_cmd(cmd_in))
+ return -EINVAL;
+
+ err = uverbs_get_const(&cmd_out_len, attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
+ if (err)
+ return err;
+
+ if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
+ return -EINVAL;
+
+ fd_uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
+ if (IS_ERR(fd_uobj))
+ return PTR_ERR(fd_uobj);
+
+ ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
+ uobj);
+
+ if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
+ MAX_ASYNC_BYTES_IN_USE) {
+ atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
+ return -EAGAIN;
+ }
+
+ async_data = kvzalloc(struct_size(async_data, hdr.out_data,
+ cmd_out_len), GFP_KERNEL);
+ if (!async_data) {
+ err = -ENOMEM;
+ goto sub_bytes;
+ }
+
+ err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
+ if (err)
+ goto free_async;
+
+ async_data->cmd_out_len = cmd_out_len;
+ async_data->mdev = mdev;
+ async_data->ev_file = ev_file;
+
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
+ uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
+ async_data->hdr.out_data,
+ async_data->cmd_out_len,
+ devx_query_callback, &async_data->cb_work);
+
+ if (err)
+ goto free_async;
+
+ return 0;
+
+free_async:
+ kvfree(async_data);
+sub_bytes:
+ atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
+ return err;
+}
+
+static void
+subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
+ u32 key_level1,
+ bool is_level2,
+ u32 key_level2)
+{
+ struct devx_event *event;
+ struct devx_obj_event *xa_val_level2;
+
+ /* Level 1 is valid for future use, no need to free */
+ if (!is_level2)
+ return;
+
+ event = xa_load(&devx_event_table->event_xa, key_level1);
+ WARN_ON(!event);
+
+ xa_val_level2 = xa_load(&event->object_ids,
+ key_level2);
+ if (list_empty(&xa_val_level2->obj_sub_list)) {
+ xa_erase(&event->object_ids,
+ key_level2);
+ kfree_rcu(xa_val_level2, rcu);
+ }
+}
+
+static int
+subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
+ u32 key_level1,
+ bool is_level2,
+ u32 key_level2)
+{
+ struct devx_obj_event *obj_event;
+ struct devx_event *event;
+ int err;
+
+ event = xa_load(&devx_event_table->event_xa, key_level1);
+ if (!event) {
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&event->unaffiliated_list);
+ xa_init(&event->object_ids);
+
+ err = xa_insert(&devx_event_table->event_xa,
+ key_level1,
+ event,
+ GFP_KERNEL);
+ if (err) {
+ kfree(event);
+ return err;
+ }
+ }
+
+ if (!is_level2)
+ return 0;
+
+ obj_event = xa_load(&event->object_ids, key_level2);
+ if (!obj_event) {
+ obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
+ if (!obj_event)
+ /* Level1 is valid for future use, no need to free */
+ return -ENOMEM;
+
+ err = xa_insert(&event->object_ids,
+ key_level2,
+ obj_event,
+ GFP_KERNEL);
+ if (err) {
+ kfree(obj_event);
+ return err;
+ }
+ INIT_LIST_HEAD(&obj_event->obj_sub_list);
+ }
+
+ return 0;
+}
+
+static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
+ struct devx_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < num_events; i++) {
+ if (obj) {
+ if (!is_legacy_obj_event_num(event_type_num_list[i]))
+ return false;
+ } else if (!is_legacy_unaffiliated_event_num(
+ event_type_num_list[i])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+#define MAX_SUPP_EVENT_NUM 255
+static bool is_valid_events(struct mlx5_core_dev *dev,
+ int num_events, u16 *event_type_num_list,
+ struct devx_obj *obj)
+{
+ __be64 *aff_events;
+ __be64 *unaff_events;
+ int mask_entry;
+ int mask_bit;
+ int i;
+
+ if (MLX5_CAP_GEN(dev, event_cap)) {
+ aff_events = MLX5_CAP_DEV_EVENT(dev,
+ user_affiliated_events);
+ unaff_events = MLX5_CAP_DEV_EVENT(dev,
+ user_unaffiliated_events);
+ } else {
+ return is_valid_events_legacy(num_events, event_type_num_list,
+ obj);
+ }
+
+ for (i = 0; i < num_events; i++) {
+ if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
+ return false;
+
+ mask_entry = event_type_num_list[i] / 64;
+ mask_bit = event_type_num_list[i] % 64;
+
+ if (obj) {
+ /* CQ completion */
+ if (event_type_num_list[i] == 0)
+ continue;
+
+ if (!(be64_to_cpu(aff_events[mask_entry]) &
+ (1ull << mask_bit)))
+ return false;
+
+ continue;
+ }
+
+ if (!(be64_to_cpu(unaff_events[mask_entry]) &
+ (1ull << mask_bit)))
+ return false;
+ }
+
+ return true;
+}
+
+#define MAX_NUM_EVENTS 16
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
+ attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ struct ib_uobject *fd_uobj;
+ struct devx_obj *obj = NULL;
+ struct devx_async_event_file *ev_file;
+ struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
+ u16 *event_type_num_list;
+ struct devx_event_subscription *event_sub, *tmp_sub;
+ struct list_head sub_list;
+ int redirect_fd;
+ bool use_eventfd = false;
+ int num_events;
+ int num_alloc_xa_entries = 0;
+ u16 obj_type = 0;
+ u64 cookie = 0;
+ u32 obj_id = 0;
+ int err;
+ int i;
+
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ if (!IS_ERR(devx_uobj)) {
+ obj = (struct devx_obj *)devx_uobj->object;
+ if (obj)
+ obj_id = get_dec_obj_id(obj->obj_id);
+ }
+
+ fd_uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
+ if (IS_ERR(fd_uobj))
+ return PTR_ERR(fd_uobj);
+
+ ev_file = container_of(fd_uobj, struct devx_async_event_file,
+ uobj);
+
+ if (uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
+ err = uverbs_copy_from(&redirect_fd, attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
+ if (err)
+ return err;
+
+ use_eventfd = true;
+ }
+
+ if (uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
+ if (use_eventfd)
+ return -EINVAL;
+
+ err = uverbs_copy_from(&cookie, attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
+ if (err)
+ return err;
+ }
+
+ num_events = uverbs_attr_ptr_get_array_size(
+ attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
+ sizeof(u16));
+
+ if (num_events < 0)
+ return num_events;
+
+ if (num_events > MAX_NUM_EVENTS)
+ return -EINVAL;
+
+ event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
+
+ if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&sub_list);
+
+ /* Protect from concurrent subscriptions to same XA entries to allow
+ * both to succeed
+ */
+ mutex_lock(&devx_event_table->event_xa_lock);
+ for (i = 0; i < num_events; i++) {
+ u32 key_level1;
+
+ if (obj)
+ obj_type = get_dec_obj_type(obj,
+ event_type_num_list[i]);
+ key_level1 = event_type_num_list[i] | obj_type << 16;
+
+ err = subscribe_event_xa_alloc(devx_event_table,
+ key_level1,
+ obj,
+ obj_id);
+ if (err)
+ goto err;
+
+ num_alloc_xa_entries++;
+ event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
+ if (!event_sub) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ list_add_tail(&event_sub->event_list, &sub_list);
+ uverbs_uobject_get(&ev_file->uobj);
+ if (use_eventfd) {
+ event_sub->eventfd =
+ eventfd_ctx_fdget(redirect_fd);
+
+ if (IS_ERR(event_sub->eventfd)) {
+ err = PTR_ERR(event_sub->eventfd);
+ event_sub->eventfd = NULL;
+ goto err;
+ }
+ }
+
+ event_sub->cookie = cookie;
+ event_sub->ev_file = ev_file;
+ /* May be needed upon cleanup the devx object/subscription */
+ event_sub->xa_key_level1 = key_level1;
+ event_sub->xa_key_level2 = obj_id;
+ INIT_LIST_HEAD(&event_sub->obj_list);
+ }
+
+ /* Once all the allocations and the XA data insertions were done we
+ * can go ahead and add all the subscriptions to the relevant lists
+ * without concern of a failure.
+ */
+ list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
+ struct devx_event *event;
+ struct devx_obj_event *obj_event;
+
+ list_del_init(&event_sub->event_list);
+
+ spin_lock_irq(&ev_file->lock);
+ list_add_tail_rcu(&event_sub->file_list,
+ &ev_file->subscribed_events_list);
+ spin_unlock_irq(&ev_file->lock);
+
+ event = xa_load(&devx_event_table->event_xa,
+ event_sub->xa_key_level1);
+ WARN_ON(!event);
+
+ if (!obj) {
+ list_add_tail_rcu(&event_sub->xa_list,
+ &event->unaffiliated_list);
+ continue;
+ }
+
+ obj_event = xa_load(&event->object_ids, obj_id);
+ WARN_ON(!obj_event);
+ list_add_tail_rcu(&event_sub->xa_list,
+ &obj_event->obj_sub_list);
+ list_add_tail_rcu(&event_sub->obj_list,
+ &obj->event_sub);
+ }
+
+ mutex_unlock(&devx_event_table->event_xa_lock);
+ return 0;
+
+err:
+ list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
+ list_del(&event_sub->event_list);
+
+ subscribe_event_xa_dealloc(devx_event_table,
+ event_sub->xa_key_level1,
+ obj,
+ obj_id);
+
+ if (event_sub->eventfd)
+ eventfd_ctx_put(event_sub->eventfd);
+ uverbs_uobject_put(&event_sub->ev_file->uobj);
+ kfree(event_sub);
+ }
+
+ mutex_unlock(&devx_event_table->event_xa_lock);
+ return err;
+}
+
+static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
+ struct uverbs_attr_bundle *attrs,
+ struct devx_umem *obj, u32 access_flags)
+{
+ u64 addr;
+ size_t size;
+ int err;
+
+ if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
+ uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
+ return -EFAULT;
+
+ err = ib_check_mr_access(&dev->ib_dev, access_flags);
+ if (err)
+ return err;
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD)) {
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int dmabuf_fd;
+
+ err = uverbs_get_raw_fd(&dmabuf_fd, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD);
+ if (err)
+ return -EFAULT;
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(
+ &dev->ib_dev, addr, size, dmabuf_fd, access_flags);
+ if (IS_ERR(umem_dmabuf))
+ return PTR_ERR(umem_dmabuf);
+ obj->umem = &umem_dmabuf->umem;
+ } else {
+ obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access_flags);
+ if (IS_ERR(obj->umem))
+ return PTR_ERR(obj->umem);
+ }
+ return 0;
+}
+
+static unsigned int devx_umem_find_best_pgsize(struct ib_umem *umem,
+ unsigned long pgsz_bitmap)
+{
+ unsigned long page_size;
+
+ /* Don't bother checking larger page sizes as offset must be zero and
+ * total DEVX umem length must be equal to total umem length.
+ */
+ pgsz_bitmap &= GENMASK_ULL(max_t(u64, order_base_2(umem->length),
+ PAGE_SHIFT),
+ MLX5_ADAPTER_PAGE_SHIFT);
+ if (!pgsz_bitmap)
+ return 0;
+
+ page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, U64_MAX);
+ if (!page_size)
+ return 0;
+
+ /* If the page_size is less than the CPU page size then we can use the
+ * offset and create a umem which is a subset of the page list.
+ * For larger page sizes we can't be sure the DMA list reflects the
+ * VA so we must ensure that the umem extent is exactly equal to the
+ * page list. Reduce the page size until one of these cases is true.
+ */
+ while ((ib_umem_dma_offset(umem, page_size) != 0 ||
+ (umem->length % page_size) != 0) &&
+ page_size > PAGE_SIZE)
+ page_size /= 2;
+
+ return page_size;
+}
+
+static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev *dev,
+ struct uverbs_attr_bundle *attrs,
+ struct devx_umem *obj,
+ struct devx_umem_reg_cmd *cmd,
+ int access)
+{
+ unsigned long pgsz_bitmap;
+ unsigned int page_size;
+ __be64 *mtt;
+ void *umem;
+ int ret;
+
+ /*
+ * If the user does not pass in pgsz_bitmap then the user promises not
+ * to use umem_offset!=0 in any commands that allocate on top of the
+ * umem.
+ *
+ * If the user wants to use a umem_offset then it must pass in
+ * pgsz_bitmap which guides the maximum page size and thus maximum
+ * object alignment inside the umem. See the PRM.
+ *
+ * Users are not allowed to use IOVA here, mkeys are not supported on
+ * umem.
+ */
+ ret = uverbs_get_const_default(&pgsz_bitmap, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+ GENMASK_ULL(63,
+ min(PAGE_SHIFT, MLX5_ADAPTER_PAGE_SHIFT)));
+ if (ret)
+ return ret;
+
+ page_size = devx_umem_find_best_pgsize(obj->umem, pgsz_bitmap);
+ if (!page_size)
+ return -EINVAL;
+
+ cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
+ (MLX5_ST_SZ_BYTES(mtt) *
+ ib_umem_num_dma_blocks(obj->umem, page_size));
+ cmd->in = uverbs_zalloc(attrs, cmd->inlen);
+ if (IS_ERR(cmd->in))
+ return PTR_ERR(cmd->in);
+
+ umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
+ mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
+
+ MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
+ MLX5_SET64(umem, umem, num_of_mtt,
+ ib_umem_num_dma_blocks(obj->umem, page_size));
+ MLX5_SET(umem, umem, log_page_size,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(umem, umem, page_offset,
+ ib_umem_dma_offset(obj->umem, page_size));
+
+ if (mlx5_umem_needs_ats(dev, obj->umem, access))
+ MLX5_SET(umem, umem, ats, 1);
+
+ mlx5_ib_populate_pas(obj->umem, page_size, mtt,
+ (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
+ MLX5_IB_MTT_READ);
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct devx_umem_reg_cmd cmd;
+ struct devx_umem *obj;
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
+ u32 obj_id;
+ struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ int access_flags;
+ int err;
+
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ err = uverbs_get_flags32(&access_flags, attrs,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_RELAXED_ORDERING);
+ if (err)
+ return err;
+
+ obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ err = devx_umem_get(dev, &c->ibucontext, attrs, obj, access_flags);
+ if (err)
+ goto err_obj_free;
+
+ err = devx_umem_reg_cmd_alloc(dev, attrs, obj, &cmd, access_flags);
+ if (err)
+ goto err_umem_release;
+
+ MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
+ err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
+ sizeof(cmd.out));
+ if (err)
+ goto err_umem_release;
+
+ obj->mdev = dev->mdev;
+ uobj->object = obj;
+ devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id,
+ sizeof(obj_id));
+ return err;
+
+err_umem_release:
+ ib_umem_release(obj->umem);
+err_obj_free:
+ kfree(obj);
+ return err;
+}
+
+static int devx_umem_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct devx_umem *obj = uobject->object;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ int err;
+
+ err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
+ if (err)
+ return err;
+
+ ib_umem_release(obj->umem);
+ kfree(obj);
+ return 0;
+}
+
+static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
+ unsigned long event_type)
+{
+ __be64 *unaff_events;
+ int mask_entry;
+ int mask_bit;
+
+ if (!MLX5_CAP_GEN(dev, event_cap))
+ return is_legacy_unaffiliated_event_num(event_type);
+
+ unaff_events = MLX5_CAP_DEV_EVENT(dev,
+ user_unaffiliated_events);
+ WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
+
+ mask_entry = event_type / 64;
+ mask_bit = event_type % 64;
+
+ if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
+ return false;
+
+ return true;
+}
+
+static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
+{
+ struct mlx5_eqe *eqe = data;
+ u32 obj_id = 0;
+
+ switch (event_type) {
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ case MLX5_EVENT_TYPE_COMM_EST:
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+ break;
+ case MLX5_EVENT_TYPE_XRQ_ERROR:
+ obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
+ break;
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
+ obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
+ break;
+ case MLX5_EVENT_TYPE_CQ_ERROR:
+ obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
+ break;
+ default:
+ obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
+ break;
+ }
+
+ return obj_id;
+}
+
+static int deliver_event(struct devx_event_subscription *event_sub,
+ const void *data)
+{
+ struct devx_async_event_file *ev_file;
+ struct devx_async_event_data *event_data;
+ unsigned long flags;
+
+ ev_file = event_sub->ev_file;
+
+ if (ev_file->omit_data) {
+ spin_lock_irqsave(&ev_file->lock, flags);
+ if (!list_empty(&event_sub->event_list) ||
+ ev_file->is_destroyed) {
+ spin_unlock_irqrestore(&ev_file->lock, flags);
+ return 0;
+ }
+
+ list_add_tail(&event_sub->event_list, &ev_file->event_list);
+ spin_unlock_irqrestore(&ev_file->lock, flags);
+ wake_up_interruptible(&ev_file->poll_wait);
+ return 0;
+ }
+
+ event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
+ GFP_ATOMIC);
+ if (!event_data) {
+ spin_lock_irqsave(&ev_file->lock, flags);
+ ev_file->is_overflow_err = 1;
+ spin_unlock_irqrestore(&ev_file->lock, flags);
+ return -ENOMEM;
+ }
+
+ event_data->hdr.cookie = event_sub->cookie;
+ memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
+
+ spin_lock_irqsave(&ev_file->lock, flags);
+ if (!ev_file->is_destroyed)
+ list_add_tail(&event_data->list, &ev_file->event_list);
+ else
+ kfree(event_data);
+ spin_unlock_irqrestore(&ev_file->lock, flags);
+ wake_up_interruptible(&ev_file->poll_wait);
+
+ return 0;
+}
+
+static void dispatch_event_fd(struct list_head *fd_list,
+ const void *data)
+{
+ struct devx_event_subscription *item;
+
+ list_for_each_entry_rcu(item, fd_list, xa_list) {
+ if (item->eventfd)
+ eventfd_signal(item->eventfd, 1);
+ else
+ deliver_event(item, data);
+ }
+}
+
+static int devx_event_notifier(struct notifier_block *nb,
+ unsigned long event_type, void *data)
+{
+ struct mlx5_devx_event_table *table;
+ struct mlx5_ib_dev *dev;
+ struct devx_event *event;
+ struct devx_obj_event *obj_event;
+ u16 obj_type = 0;
+ bool is_unaffiliated;
+ u32 obj_id;
+
+ /* Explicit filtering to kernel events which may occur frequently */
+ if (event_type == MLX5_EVENT_TYPE_CMD ||
+ event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
+ return NOTIFY_OK;
+
+ table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
+ dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
+ is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
+
+ if (!is_unaffiliated)
+ obj_type = get_event_obj_type(event_type, data);
+
+ rcu_read_lock();
+ event = xa_load(&table->event_xa, event_type | (obj_type << 16));
+ if (!event) {
+ rcu_read_unlock();
+ return NOTIFY_DONE;
+ }
+
+ if (is_unaffiliated) {
+ dispatch_event_fd(&event->unaffiliated_list, data);
+ rcu_read_unlock();
+ return NOTIFY_OK;
+ }
+
+ obj_id = devx_get_obj_id_from_event(event_type, data);
+ obj_event = xa_load(&event->object_ids, obj_id);
+ if (!obj_event) {
+ rcu_read_unlock();
+ return NOTIFY_DONE;
+ }
+
+ dispatch_event_fd(&obj_event->obj_sub_list, data);
+
+ rcu_read_unlock();
+ return NOTIFY_OK;
+}
+
+int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_devx_event_table *table = &dev->devx_event_table;
+ int uid;
+
+ uid = mlx5_ib_devx_create(dev, false);
+ if (uid > 0) {
+ dev->devx_whitelist_uid = uid;
+ xa_init(&table->event_xa);
+ mutex_init(&table->event_xa_lock);
+ MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
+ mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
+ }
+
+ return 0;
+}
+
+void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_devx_event_table *table = &dev->devx_event_table;
+ struct devx_event_subscription *sub, *tmp;
+ struct devx_event *event;
+ void *entry;
+ unsigned long id;
+
+ if (dev->devx_whitelist_uid) {
+ mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
+ mutex_lock(&dev->devx_event_table.event_xa_lock);
+ xa_for_each(&table->event_xa, id, entry) {
+ event = entry;
+ list_for_each_entry_safe(
+ sub, tmp, &event->unaffiliated_list, xa_list)
+ devx_cleanup_subscription(dev, sub);
+ kfree(entry);
+ }
+ mutex_unlock(&dev->devx_event_table.event_xa_lock);
+ xa_destroy(&table->event_xa);
+
+ mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
+ }
+}
+
+static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
+ struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
+ struct devx_async_data *event;
+ int ret = 0;
+ size_t eventsz;
+
+ spin_lock_irq(&ev_queue->lock);
+
+ while (list_empty(&ev_queue->event_list)) {
+ spin_unlock_irq(&ev_queue->lock);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(
+ ev_queue->poll_wait,
+ (!list_empty(&ev_queue->event_list) ||
+ ev_queue->is_destroyed))) {
+ return -ERESTARTSYS;
+ }
+
+ spin_lock_irq(&ev_queue->lock);
+ if (ev_queue->is_destroyed) {
+ spin_unlock_irq(&ev_queue->lock);
+ return -EIO;
+ }
+ }
+
+ event = list_entry(ev_queue->event_list.next,
+ struct devx_async_data, list);
+ eventsz = event->cmd_out_len +
+ sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
+
+ if (eventsz > count) {
+ spin_unlock_irq(&ev_queue->lock);
+ return -ENOSPC;
+ }
+
+ list_del(ev_queue->event_list.next);
+ spin_unlock_irq(&ev_queue->lock);
+
+ if (copy_to_user(buf, &event->hdr, eventsz))
+ ret = -EFAULT;
+ else
+ ret = eventsz;
+
+ atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
+ kvfree(event);
+ return ret;
+}
+
+static __poll_t devx_async_cmd_event_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
+ struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
+ __poll_t pollflags = 0;
+
+ poll_wait(filp, &ev_queue->poll_wait, wait);
+
+ spin_lock_irq(&ev_queue->lock);
+ if (ev_queue->is_destroyed)
+ pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ else if (!list_empty(&ev_queue->event_list))
+ pollflags = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irq(&ev_queue->lock);
+
+ return pollflags;
+}
+
+static const struct file_operations devx_async_cmd_event_fops = {
+ .owner = THIS_MODULE,
+ .read = devx_async_cmd_event_read,
+ .poll = devx_async_cmd_event_poll,
+ .release = uverbs_uobject_fd_release,
+ .llseek = no_llseek,
+};
+
+static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct devx_async_event_file *ev_file = filp->private_data;
+ struct devx_event_subscription *event_sub;
+ struct devx_async_event_data *event;
+ int ret = 0;
+ size_t eventsz;
+ bool omit_data;
+ void *event_data;
+
+ omit_data = ev_file->omit_data;
+
+ spin_lock_irq(&ev_file->lock);
+
+ if (ev_file->is_overflow_err) {
+ ev_file->is_overflow_err = 0;
+ spin_unlock_irq(&ev_file->lock);
+ return -EOVERFLOW;
+ }
+
+
+ while (list_empty(&ev_file->event_list)) {
+ spin_unlock_irq(&ev_file->lock);
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ if (wait_event_interruptible(ev_file->poll_wait,
+ (!list_empty(&ev_file->event_list) ||
+ ev_file->is_destroyed))) {
+ return -ERESTARTSYS;
+ }
+
+ spin_lock_irq(&ev_file->lock);
+ if (ev_file->is_destroyed) {
+ spin_unlock_irq(&ev_file->lock);
+ return -EIO;
+ }
+ }
+
+ if (omit_data) {
+ event_sub = list_first_entry(&ev_file->event_list,
+ struct devx_event_subscription,
+ event_list);
+ eventsz = sizeof(event_sub->cookie);
+ event_data = &event_sub->cookie;
+ } else {
+ event = list_first_entry(&ev_file->event_list,
+ struct devx_async_event_data, list);
+ eventsz = sizeof(struct mlx5_eqe) +
+ sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
+ event_data = &event->hdr;
+ }
+
+ if (eventsz > count) {
+ spin_unlock_irq(&ev_file->lock);
+ return -EINVAL;
+ }
+
+ if (omit_data)
+ list_del_init(&event_sub->event_list);
+ else
+ list_del(&event->list);
+
+ spin_unlock_irq(&ev_file->lock);
+
+ if (copy_to_user(buf, event_data, eventsz))
+ /* This points to an application issue, not a kernel concern */
+ ret = -EFAULT;
+ else
+ ret = eventsz;
+
+ if (!omit_data)
+ kfree(event);
+ return ret;
+}
+
+static __poll_t devx_async_event_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct devx_async_event_file *ev_file = filp->private_data;
+ __poll_t pollflags = 0;
+
+ poll_wait(filp, &ev_file->poll_wait, wait);
+
+ spin_lock_irq(&ev_file->lock);
+ if (ev_file->is_destroyed)
+ pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ else if (!list_empty(&ev_file->event_list))
+ pollflags = EPOLLIN | EPOLLRDNORM;
+ spin_unlock_irq(&ev_file->lock);
+
+ return pollflags;
+}
+
+static void devx_free_subscription(struct rcu_head *rcu)
+{
+ struct devx_event_subscription *event_sub =
+ container_of(rcu, struct devx_event_subscription, rcu);
+
+ if (event_sub->eventfd)
+ eventfd_ctx_put(event_sub->eventfd);
+ uverbs_uobject_put(&event_sub->ev_file->uobj);
+ kfree(event_sub);
+}
+
+static const struct file_operations devx_async_event_fops = {
+ .owner = THIS_MODULE,
+ .read = devx_async_event_read,
+ .poll = devx_async_event_poll,
+ .release = uverbs_uobject_fd_release,
+ .llseek = no_llseek,
+};
+
+static void devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
+{
+ struct devx_async_cmd_event_file *comp_ev_file =
+ container_of(uobj, struct devx_async_cmd_event_file,
+ uobj);
+ struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
+ struct devx_async_data *entry, *tmp;
+
+ spin_lock_irq(&ev_queue->lock);
+ ev_queue->is_destroyed = 1;
+ spin_unlock_irq(&ev_queue->lock);
+ wake_up_interruptible(&ev_queue->poll_wait);
+
+ mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
+
+ spin_lock_irq(&comp_ev_file->ev_queue.lock);
+ list_for_each_entry_safe(entry, tmp,
+ &comp_ev_file->ev_queue.event_list, list) {
+ list_del(&entry->list);
+ kvfree(entry);
+ }
+ spin_unlock_irq(&comp_ev_file->ev_queue.lock);
+};
+
+static void devx_async_event_destroy_uobj(struct ib_uobject *uobj,
+ enum rdma_remove_reason why)
+{
+ struct devx_async_event_file *ev_file =
+ container_of(uobj, struct devx_async_event_file,
+ uobj);
+ struct devx_event_subscription *event_sub, *event_sub_tmp;
+ struct mlx5_ib_dev *dev = ev_file->dev;
+
+ spin_lock_irq(&ev_file->lock);
+ ev_file->is_destroyed = 1;
+
+ /* free the pending events allocation */
+ if (ev_file->omit_data) {
+ struct devx_event_subscription *event_sub, *tmp;
+
+ list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
+ event_list)
+ list_del_init(&event_sub->event_list);
+
+ } else {
+ struct devx_async_event_data *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
+ spin_unlock_irq(&ev_file->lock);
+ wake_up_interruptible(&ev_file->poll_wait);
+
+ mutex_lock(&dev->devx_event_table.event_xa_lock);
+ /* delete the subscriptions which are related to this FD */
+ list_for_each_entry_safe(event_sub, event_sub_tmp,
+ &ev_file->subscribed_events_list, file_list) {
+ devx_cleanup_subscription(dev, event_sub);
+ list_del_rcu(&event_sub->file_list);
+ /* subscription may not be used by the read API any more */
+ call_rcu(&event_sub->rcu, devx_free_subscription);
+ }
+ mutex_unlock(&dev->devx_event_table.event_xa_lock);
+
+ put_device(&dev->ib_dev.dev);
+};
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_UMEM_REG,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_RAW_FD(MLX5_IB_ATTR_DEVX_UMEM_REG_DMABUF_FD,
+ UA_OPTIONAL),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ enum ib_access_flags),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_PGSZ_BITMAP,
+ u64),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DEVX_UMEM_DEREG,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_QUERY_EQN,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_QUERY_UAR,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OTHER,
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
+ UVERBS_IDR_ANY_OBJECT,
+ UVERBS_ACCESS_WRITE,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
+ UVERBS_IDR_ANY_OBJECT,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
+ UVERBS_IDR_ANY_OBJECT,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
+ UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
+ u16, UA_MANDATORY),
+ UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
+ UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
+ UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
+ UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
+
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
+ UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
+ devx_async_cmd_event_destroy_uobj,
+ &devx_async_cmd_event_fops, "[devx_async_cmd]",
+ O_RDONLY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
+ UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
+ enum mlx5_ib_uapi_devx_create_event_channel_flags,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
+ devx_async_event_destroy_uobj,
+ &devx_async_event_fops, "[devx_async_event]",
+ O_RDONLY),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
+
+static bool devx_is_supported(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
+}
+
+const struct uapi_definition mlx5_ib_devx_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
+ {},
+};
diff --git a/drivers/infiniband/hw/mlx5/devx.h b/drivers/infiniband/hw/mlx5/devx.h
new file mode 100644
index 000000000..ee2213275
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/devx.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_DEVX_H
+#define _MLX5_IB_DEVX_H
+
+#include "mlx5_ib.h"
+
+#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
+struct devx_obj {
+ struct mlx5_ib_dev *ib_dev;
+ u64 obj_id;
+ u32 dinlen; /* destroy inbox length */
+ u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
+ u32 flags;
+ union {
+ struct mlx5_ib_mkey mkey;
+ struct mlx5_core_dct core_dct;
+ struct mlx5_core_cq core_cq;
+ u32 flow_counter_bulk_size;
+ };
+ struct list_head event_sub; /* holds devx_event_subscription entries */
+};
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
+int mlx5_ib_devx_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev);
+#else
+static inline int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
+{
+ return -EOPNOTSUPP;
+}
+static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
+static inline int mlx5_ib_devx_init(struct mlx5_ib_dev *dev)
+{
+ return 0;
+}
+static inline void mlx5_ib_devx_cleanup(struct mlx5_ib_dev *dev)
+{
+}
+#endif
+#endif /* _MLX5_IB_DEVX_H */
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
new file mode 100644
index 000000000..3669c90b2
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dm.c
@@ -0,0 +1,607 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_std_types.h>
+#include "dm.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static int mlx5_cmd_alloc_memic(struct mlx5_dm *dm, phys_addr_t *addr,
+ u64 length, u32 alignment)
+{
+ struct mlx5_core_dev *dev = dm->dev;
+ u64 num_memic_hw_pages = MLX5_CAP_DEV_MEM(dev, memic_bar_size)
+ >> PAGE_SHIFT;
+ u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
+ u32 max_alignment = MLX5_CAP_DEV_MEM(dev, log_max_memic_addr_alignment);
+ u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+ u32 out[MLX5_ST_SZ_DW(alloc_memic_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_memic_in)] = {};
+ u32 mlx5_alignment;
+ u64 page_idx = 0;
+ int ret = 0;
+
+ if (!length || (length & MLX5_MEMIC_ALLOC_SIZE_MASK))
+ return -EINVAL;
+
+ /* mlx5 device sets alignment as 64*2^driver_value
+ * so normalizing is needed.
+ */
+ mlx5_alignment = (alignment < MLX5_MEMIC_BASE_ALIGN) ? 0 :
+ alignment - MLX5_MEMIC_BASE_ALIGN;
+ if (mlx5_alignment > max_alignment)
+ return -EINVAL;
+
+ MLX5_SET(alloc_memic_in, in, opcode, MLX5_CMD_OP_ALLOC_MEMIC);
+ MLX5_SET(alloc_memic_in, in, range_size, num_pages * PAGE_SIZE);
+ MLX5_SET(alloc_memic_in, in, memic_size, length);
+ MLX5_SET(alloc_memic_in, in, log_memic_addr_alignment,
+ mlx5_alignment);
+
+ while (page_idx < num_memic_hw_pages) {
+ spin_lock(&dm->lock);
+ page_idx = bitmap_find_next_zero_area(dm->memic_alloc_pages,
+ num_memic_hw_pages,
+ page_idx,
+ num_pages, 0);
+
+ if (page_idx < num_memic_hw_pages)
+ bitmap_set(dm->memic_alloc_pages,
+ page_idx, num_pages);
+
+ spin_unlock(&dm->lock);
+
+ if (page_idx >= num_memic_hw_pages)
+ break;
+
+ MLX5_SET64(alloc_memic_in, in, range_start_addr,
+ hw_start_addr + (page_idx * PAGE_SIZE));
+
+ ret = mlx5_cmd_exec_inout(dev, alloc_memic, in, out);
+ if (ret) {
+ spin_lock(&dm->lock);
+ bitmap_clear(dm->memic_alloc_pages,
+ page_idx, num_pages);
+ spin_unlock(&dm->lock);
+
+ if (ret == -EAGAIN) {
+ page_idx++;
+ continue;
+ }
+
+ return ret;
+ }
+
+ *addr = dev->bar_addr +
+ MLX5_GET64(alloc_memic_out, out, memic_start_addr);
+
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
+ u64 length)
+{
+ struct mlx5_core_dev *dev = dm->dev;
+ u64 hw_start_addr = MLX5_CAP64_DEV_MEM(dev, memic_bar_start_addr);
+ u32 num_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+ u32 in[MLX5_ST_SZ_DW(dealloc_memic_in)] = {};
+ u64 start_page_idx;
+ int err;
+
+ addr -= dev->bar_addr;
+ start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
+
+ MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
+ MLX5_SET64(dealloc_memic_in, in, memic_start_addr, addr);
+ MLX5_SET(dealloc_memic_in, in, memic_size, length);
+
+ err = mlx5_cmd_exec_in(dev, dealloc_memic, in);
+ if (err)
+ return;
+
+ spin_lock(&dm->lock);
+ bitmap_clear(dm->memic_alloc_pages,
+ start_page_idx, num_pages);
+ spin_unlock(&dm->lock);
+}
+
+void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
+ struct mlx5_core_dev *dev = dm->dev;
+
+ MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
+ MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC);
+ MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
+ MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
+
+ mlx5_cmd_exec_in(dev, modify_memic, in);
+}
+
+static int mlx5_cmd_alloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation, phys_addr_t *op_addr)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_memic_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(modify_memic_in)] = {};
+ struct mlx5_core_dev *dev = dm->dev;
+ int err;
+
+ MLX5_SET(modify_memic_in, in, opcode, MLX5_CMD_OP_MODIFY_MEMIC);
+ MLX5_SET(modify_memic_in, in, op_mod, MLX5_MODIFY_MEMIC_OP_MOD_ALLOC);
+ MLX5_SET(modify_memic_in, in, memic_operation_type, operation);
+ MLX5_SET64(modify_memic_in, in, memic_start_addr, addr - dev->bar_addr);
+
+ err = mlx5_cmd_exec_inout(dev, modify_memic, in, out);
+ if (err)
+ return err;
+
+ *op_addr = dev->bar_addr +
+ MLX5_GET64(modify_memic_out, out, memic_operation_addr);
+ return 0;
+}
+
+static int add_dm_mmap_entry(struct ib_ucontext *context,
+ struct mlx5_user_mmap_entry *mentry, u8 mmap_flag,
+ size_t size, u64 address)
+{
+ mentry->mmap_flag = mmap_flag;
+ mentry->address = address;
+
+ return rdma_user_mmap_entry_insert_range(
+ context, &mentry->rdma_entry, size,
+ MLX5_IB_MMAP_DEVICE_MEM << 16,
+ (MLX5_IB_MMAP_DEVICE_MEM << 16) + (1UL << 16) - 1);
+}
+
+static void mlx5_ib_dm_memic_free(struct kref *kref)
+{
+ struct mlx5_ib_dm_memic *dm =
+ container_of(kref, struct mlx5_ib_dm_memic, ref);
+ struct mlx5_ib_dev *dev = to_mdev(dm->base.ibdm.device);
+
+ mlx5_cmd_dealloc_memic(&dev->dm, dm->base.dev_addr, dm->base.size);
+ kfree(dm);
+}
+
+static int copy_op_to_user(struct mlx5_ib_dm_op_entry *op_entry,
+ struct uverbs_attr_bundle *attrs)
+{
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+
+ page_idx = op_entry->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ start_offset = op_entry->op_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+}
+
+static int map_existing_op(struct mlx5_ib_dm_memic *dm, u8 op,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dm_op_entry *op_entry;
+
+ op_entry = xa_load(&dm->ops, op);
+ if (!op_entry)
+ return -ENOENT;
+
+ return copy_op_to_user(op_entry, attrs);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE);
+ struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ struct ib_dm *ibdm = uobj->object;
+ struct mlx5_ib_dm_memic *dm = to_memic(ibdm);
+ struct mlx5_ib_dm_op_entry *op_entry;
+ int err;
+ u8 op;
+
+ err = uverbs_copy_from(&op, attrs, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP);
+ if (err)
+ return err;
+
+ if (op >= BITS_PER_TYPE(u32))
+ return -EOPNOTSUPP;
+
+ if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&dm->ops_xa_lock);
+ err = map_existing_op(dm, op, attrs);
+ if (!err || err != -ENOENT)
+ goto err_unlock;
+
+ op_entry = kzalloc(sizeof(*op_entry), GFP_KERNEL);
+ if (!op_entry)
+ goto err_unlock;
+
+ err = mlx5_cmd_alloc_memic_op(&dev->dm, dm->base.dev_addr, op,
+ &op_entry->op_addr);
+ if (err) {
+ kfree(op_entry);
+ goto err_unlock;
+ }
+ op_entry->op = op;
+ op_entry->dm = dm;
+
+ err = add_dm_mmap_entry(uobj->context, &op_entry->mentry,
+ MLX5_IB_MMAP_TYPE_MEMIC_OP, dm->base.size,
+ op_entry->op_addr & PAGE_MASK);
+ if (err) {
+ mlx5_cmd_dealloc_memic_op(&dev->dm, dm->base.dev_addr, op);
+ kfree(op_entry);
+ goto err_unlock;
+ }
+ /* From this point, entry will be freed by mmap_free */
+ kref_get(&dm->ref);
+
+ err = copy_op_to_user(op_entry, attrs);
+ if (err)
+ goto err_remove;
+
+ err = xa_insert(&dm->ops, op, op_entry, GFP_KERNEL);
+ if (err)
+ goto err_remove;
+ mutex_unlock(&dm->ops_xa_lock);
+
+ return 0;
+
+err_remove:
+ rdma_user_mmap_entry_remove(&op_entry->mentry.rdma_entry);
+err_unlock:
+ mutex_unlock(&dm->ops_xa_lock);
+
+ return err;
+}
+
+static struct ib_dm *handle_alloc_dm_memic(struct ib_ucontext *ctx,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_dm *dm_db = &to_mdev(ctx->device)->dm;
+ struct mlx5_ib_dm_memic *dm;
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+ u64 address;
+
+ if (!MLX5_CAP_DEV_MEM(dm_db->dev, memic))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ dm->base.type = MLX5_IB_UAPI_DM_TYPE_MEMIC;
+ dm->base.size = roundup(attr->length, MLX5_MEMIC_BASE_SIZE);
+ dm->base.ibdm.device = ctx->device;
+
+ kref_init(&dm->ref);
+ xa_init(&dm->ops);
+ mutex_init(&dm->ops_xa_lock);
+ dm->req_length = attr->length;
+
+ err = mlx5_cmd_alloc_memic(dm_db, &dm->base.dev_addr,
+ dm->base.size, attr->alignment);
+ if (err) {
+ kfree(dm);
+ return ERR_PTR(err);
+ }
+
+ address = dm->base.dev_addr & PAGE_MASK;
+ err = add_dm_mmap_entry(ctx, &dm->mentry, MLX5_IB_MMAP_TYPE_MEMIC,
+ dm->base.size, address);
+ if (err) {
+ mlx5_cmd_dealloc_memic(dm_db, dm->base.dev_addr, dm->base.size);
+ kfree(dm);
+ return ERR_PTR(err);
+ }
+
+ page_idx = dm->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ goto err_copy;
+
+ start_offset = dm->base.dev_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs,
+ MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+ if (err)
+ goto err_copy;
+
+ return &dm->base.ibdm;
+
+err_copy:
+ rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+ return ERR_PTR(err);
+}
+
+static enum mlx5_sw_icm_type get_icm_type(int uapi_type)
+{
+ switch (uapi_type) {
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ return MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ return MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ default:
+ return MLX5_SW_ICM_TYPE_STEERING;
+ }
+}
+
+static struct ib_dm *handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs,
+ int type)
+{
+ struct mlx5_core_dev *dev = to_mdev(ctx->device)->mdev;
+ enum mlx5_sw_icm_type icm_type;
+ struct mlx5_ib_dm_icm *dm;
+ u64 act_size;
+ int err;
+
+ if (!capable(CAP_SYS_RAWIO) || !capable(CAP_NET_RAW))
+ return ERR_PTR(-EPERM);
+
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2)))
+ return ERR_PTR(-EOPNOTSUPP);
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(dev, sw_owner_v2) ||
+ !MLX5_CAP_FLOWTABLE_NIC_TX(dev, sw_owner_v2))
+ return ERR_PTR(-EOPNOTSUPP);
+ break;
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ dm = kzalloc(sizeof(*dm), GFP_KERNEL);
+ if (!dm)
+ return ERR_PTR(-ENOMEM);
+
+ dm->base.type = type;
+ dm->base.ibdm.device = ctx->device;
+
+ /* Allocation size must a multiple of the basic block size
+ * and a power of 2.
+ */
+ act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dev));
+ act_size = roundup_pow_of_two(act_size);
+
+ dm->base.size = act_size;
+ icm_type = get_icm_type(type);
+
+ err = mlx5_dm_sw_icm_alloc(dev, icm_type, act_size, attr->alignment,
+ to_mucontext(ctx)->devx_uid,
+ &dm->base.dev_addr, &dm->obj_id);
+ if (err)
+ goto free;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ &dm->base.dev_addr, sizeof(dm->base.dev_addr));
+ if (err) {
+ mlx5_dm_sw_icm_dealloc(dev, icm_type, dm->base.size,
+ to_mucontext(ctx)->devx_uid,
+ dm->base.dev_addr, dm->obj_id);
+ goto free;
+ }
+ return &dm->base.ibdm;
+free:
+ kfree(dm);
+ return ERR_PTR(err);
+}
+
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ enum mlx5_ib_uapi_dm_type type;
+ int err;
+
+ err = uverbs_get_const_default(&type, attrs,
+ MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ MLX5_IB_UAPI_DM_TYPE_MEMIC);
+ if (err)
+ return ERR_PTR(err);
+
+ mlx5_ib_dbg(to_mdev(ibdev), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
+ type, attr->length, attr->alignment);
+
+ switch (type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ return handle_alloc_dm_memic(context, attr, attrs);
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ return handle_alloc_dm_sw_icm(context, attr, attrs, type);
+ default:
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+}
+
+static void dm_memic_remove_ops(struct mlx5_ib_dm_memic *dm)
+{
+ struct mlx5_ib_dm_op_entry *entry;
+ unsigned long idx;
+
+ mutex_lock(&dm->ops_xa_lock);
+ xa_for_each(&dm->ops, idx, entry) {
+ xa_erase(&dm->ops, idx);
+ rdma_user_mmap_entry_remove(&entry->mentry.rdma_entry);
+ }
+ mutex_unlock(&dm->ops_xa_lock);
+}
+
+static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic *dm)
+{
+ dm_memic_remove_ops(dm);
+ rdma_user_mmap_entry_remove(&dm->mentry.rdma_entry);
+}
+
+static int mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext *ctx,
+ struct mlx5_ib_dm_icm *dm)
+{
+ enum mlx5_sw_icm_type type = get_icm_type(dm->base.type);
+ struct mlx5_core_dev *dev = to_mdev(dm->base.ibdm.device)->mdev;
+ int err;
+
+ err = mlx5_dm_sw_icm_dealloc(dev, type, dm->base.size, ctx->devx_uid,
+ dm->base.dev_addr, dm->obj_id);
+ if (!err)
+ kfree(dm);
+ return 0;
+}
+
+static int mlx5_ib_dealloc_dm(struct ib_dm *ibdm,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_ucontext *ctx = rdma_udata_to_drv_context(
+ &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dm *dm = to_mdm(ibdm);
+
+ switch (dm->type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ mlx5_dm_memic_dealloc(to_memic(ibdm));
+ return 0;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ return mlx5_dm_icm_dealloc(ctx, to_icm(ibdm));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_dm *ibdm =
+ uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE);
+ struct mlx5_ib_dm *dm = to_mdm(ibdm);
+ struct mlx5_ib_dm_memic *memic;
+ u64 start_offset;
+ u16 page_idx;
+ int err;
+
+ if (dm->type != MLX5_IB_UAPI_DM_TYPE_MEMIC)
+ return -EOPNOTSUPP;
+
+ memic = to_memic(ibdm);
+ page_idx = memic->mentry.rdma_entry.start_pgoff & 0xFFFF;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+ &page_idx, sizeof(page_idx));
+ if (err)
+ return err;
+
+ start_offset = memic->base.dev_addr & ~PAGE_MASK;
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+ &start_offset, sizeof(start_offset));
+ if (err)
+ return err;
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+ &memic->req_length,
+ sizeof(memic->req_length));
+}
+
+void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
+ struct mlx5_user_mmap_entry *mentry)
+{
+ struct mlx5_ib_dm_op_entry *op_entry;
+ struct mlx5_ib_dm_memic *mdm;
+
+ switch (mentry->mmap_flag) {
+ case MLX5_IB_MMAP_TYPE_MEMIC:
+ mdm = container_of(mentry, struct mlx5_ib_dm_memic, mentry);
+ kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
+ break;
+ case MLX5_IB_MMAP_TYPE_MEMIC_OP:
+ op_entry = container_of(mentry, struct mlx5_ib_dm_op_entry,
+ mentry);
+ mdm = op_entry->dm;
+ mlx5_cmd_dealloc_memic_op(&dev->dm, mdm->base.dev_addr,
+ op_entry->op);
+ kfree(op_entry);
+ kref_put(&mdm->ref, mlx5_ib_dm_memic_free);
+ break;
+ default:
+ WARN_ON(true);
+ }
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DM_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE, UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_READ, UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY));
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_dm, UVERBS_OBJECT_DM, UVERBS_METHOD_DM_ALLOC,
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16), UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
+ enum mlx5_ib_uapi_dm_type, UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_DM_MAP_OP_ADDR,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE,
+ UVERBS_OBJECT_DM,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP,
+ UVERBS_ATTR_TYPE(u8),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX,
+ UVERBS_ATTR_TYPE(u16),
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DM,
+ &UVERBS_METHOD(MLX5_IB_METHOD_DM_MAP_OP_ADDR),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DM_QUERY));
+
+const struct uapi_definition mlx5_ib_dm_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM),
+ {},
+};
+
+const struct ib_device_ops mlx5_ib_dev_dm_ops = {
+ .alloc_dm = mlx5_ib_alloc_dm,
+ .dealloc_dm = mlx5_ib_dealloc_dm,
+ .reg_dm_mr = mlx5_ib_reg_dm_mr,
+};
diff --git a/drivers/infiniband/hw/mlx5/dm.h b/drivers/infiniband/hw/mlx5/dm.h
new file mode 100644
index 000000000..9674a80d8
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/dm.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_DM_H
+#define _MLX5_IB_DM_H
+
+#include "mlx5_ib.h"
+
+extern const struct ib_device_ops mlx5_ib_dev_dm_ops;
+extern const struct uapi_definition mlx5_ib_dm_defs[];
+
+struct mlx5_ib_dm {
+ struct ib_dm ibdm;
+ u32 type;
+ phys_addr_t dev_addr;
+ size_t size;
+};
+
+struct mlx5_ib_dm_op_entry {
+ struct mlx5_user_mmap_entry mentry;
+ phys_addr_t op_addr;
+ struct mlx5_ib_dm_memic *dm;
+ u8 op;
+};
+
+struct mlx5_ib_dm_memic {
+ struct mlx5_ib_dm base;
+ struct mlx5_user_mmap_entry mentry;
+ struct xarray ops;
+ struct mutex ops_xa_lock;
+ struct kref ref;
+ size_t req_length;
+};
+
+struct mlx5_ib_dm_icm {
+ struct mlx5_ib_dm base;
+ u32 obj_id;
+};
+
+static inline struct mlx5_ib_dm *to_mdm(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm, ibdm);
+}
+
+static inline struct mlx5_ib_dm_memic *to_memic(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm_memic, base.ibdm);
+}
+
+static inline struct mlx5_ib_dm_icm *to_icm(struct ib_dm *ibdm)
+{
+ return container_of(ibdm, struct mlx5_ib_dm_icm, base.ibdm);
+}
+
+struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
+ struct ib_ucontext *context,
+ struct ib_dm_alloc_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev *dev,
+ struct mlx5_user_mmap_entry *mentry);
+void mlx5_cmd_dealloc_memic(struct mlx5_dm *dm, phys_addr_t addr,
+ u64 length);
+void mlx5_cmd_dealloc_memic_op(struct mlx5_dm *dm, phys_addr_t addr,
+ u8 operation);
+
+#endif /* _MLX5_IB_DM_H */
diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c
new file mode 100644
index 000000000..e32111117
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/doorbell.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/sched/mm.h>
+#include <rdma/ib_umem.h>
+
+#include "mlx5_ib.h"
+
+struct mlx5_ib_user_db_page {
+ struct list_head list;
+ struct ib_umem *umem;
+ unsigned long user_virt;
+ int refcnt;
+ struct mm_struct *mm;
+};
+
+int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
+ struct mlx5_db *db)
+{
+ struct mlx5_ib_user_db_page *page;
+ int err = 0;
+
+ mutex_lock(&context->db_page_mutex);
+
+ list_for_each_entry(page, &context->db_page_list, list)
+ if ((current->mm == page->mm) &&
+ (page->user_virt == (virt & PAGE_MASK)))
+ goto found;
+
+ page = kmalloc(sizeof(*page), GFP_KERNEL);
+ if (!page) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ page->user_virt = (virt & PAGE_MASK);
+ page->refcnt = 0;
+ page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK,
+ PAGE_SIZE, 0);
+ if (IS_ERR(page->umem)) {
+ err = PTR_ERR(page->umem);
+ kfree(page);
+ goto out;
+ }
+ mmgrab(current->mm);
+ page->mm = current->mm;
+
+ list_add(&page->list, &context->db_page_list);
+
+found:
+ db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) +
+ (virt & ~PAGE_MASK);
+ db->u.user_page = page;
+ ++page->refcnt;
+
+out:
+ mutex_unlock(&context->db_page_mutex);
+
+ return err;
+}
+
+void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db)
+{
+ mutex_lock(&context->db_page_mutex);
+
+ if (!--db->u.user_page->refcnt) {
+ list_del(&db->u.user_page->list);
+ mmdrop(db->u.user_page->mm);
+ ib_umem_release(db->u.user_page->umem);
+ kfree(db->u.user_page);
+ }
+
+ mutex_unlock(&context->db_page_mutex);
+}
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
new file mode 100644
index 000000000..1022cebd0
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -0,0 +1,2897 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/uverbs_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/uverbs_std_types.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <rdma/ib_hdrs.h>
+#include <rdma/ib_umem.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include <linux/mlx5/fs_helpers.h>
+#include <linux/mlx5/eswitch.h>
+#include <net/inet_ecn.h>
+#include "mlx5_ib.h"
+#include "counters.h"
+#include "devx.h"
+#include "fs.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+enum {
+ MATCH_CRITERIA_ENABLE_OUTER_BIT,
+ MATCH_CRITERIA_ENABLE_MISC_BIT,
+ MATCH_CRITERIA_ENABLE_INNER_BIT,
+ MATCH_CRITERIA_ENABLE_MISC2_BIT
+};
+
+#define HEADER_IS_ZERO(match_criteria, headers) \
+ !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
+ 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
+
+static u8 get_match_criteria_enable(u32 *match_criteria)
+{
+ u8 match_criteria_enable;
+
+ match_criteria_enable =
+ (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
+ MATCH_CRITERIA_ENABLE_OUTER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
+ MATCH_CRITERIA_ENABLE_MISC_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
+ MATCH_CRITERIA_ENABLE_INNER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
+ MATCH_CRITERIA_ENABLE_MISC2_BIT;
+
+ return match_criteria_enable;
+}
+
+static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
+{
+ u8 entry_mask;
+ u8 entry_val;
+ int err = 0;
+
+ if (!mask)
+ goto out;
+
+ entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
+ ip_protocol);
+ entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
+ ip_protocol);
+ if (!entry_mask) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
+ goto out;
+ }
+ /* Don't override existing ip protocol */
+ if (mask != entry_mask || val != entry_val)
+ err = -EINVAL;
+out:
+ return err;
+}
+
+static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
+ bool inner)
+{
+ if (inner) {
+ MLX5_SET(fte_match_set_misc,
+ misc_c, inner_ipv6_flow_label, mask);
+ MLX5_SET(fte_match_set_misc,
+ misc_v, inner_ipv6_flow_label, val);
+ } else {
+ MLX5_SET(fte_match_set_misc,
+ misc_c, outer_ipv6_flow_label, mask);
+ MLX5_SET(fte_match_set_misc,
+ misc_v, outer_ipv6_flow_label, val);
+ }
+}
+
+static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val)
+{
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2);
+}
+
+static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
+{
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_label) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_LABEL))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_exp) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_EXP))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_s_bos) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_S_BOS))
+ return -EOPNOTSUPP;
+
+ if (MLX5_GET(fte_match_mpls, set_mask, mpls_ttl) &&
+ !(field_support & MLX5_FIELD_SUPPORT_MPLS_TTL))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+#define LAST_ETH_FIELD vlan_tag
+#define LAST_IB_FIELD sl
+#define LAST_IPV4_FIELD tos
+#define LAST_IPV6_FIELD traffic_class
+#define LAST_TCP_UDP_FIELD src_port
+#define LAST_TUNNEL_FIELD tunnel_id
+#define LAST_FLOW_TAG_FIELD tag_id
+#define LAST_DROP_FIELD size
+#define LAST_COUNTERS_FIELD counters
+
+/* Field is the last supported field */
+#define FIELDS_NOT_SUPPORTED(filter, field) \
+ memchr_inv((void *)&filter.field + sizeof(filter.field), 0, \
+ sizeof(filter) - offsetofend(typeof(filter), field))
+
+int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
+ bool is_egress,
+ struct mlx5_flow_act *action)
+{
+
+ switch (maction->ib_action.type) {
+ case IB_FLOW_ACTION_UNSPECIFIED:
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ return -EINVAL;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ action->modify_hdr =
+ maction->flow_action_raw.modify_hdr;
+ return 0;
+ }
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_DECAP) {
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ return -EINVAL;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ return 0;
+ }
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
+ if (action->action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
+ return -EINVAL;
+ action->action |=
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ action->pkt_reformat =
+ maction->flow_action_raw.pkt_reformat;
+ return 0;
+ }
+ fallthrough;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int parse_flow_attr(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_spec *spec,
+ const union ib_flow_spec *ib_spec,
+ const struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_act *action, u32 prev_type)
+{
+ struct mlx5_flow_context *flow_context = &spec->flow_context;
+ u32 *match_c = spec->match_criteria;
+ u32 *match_v = spec->match_value;
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters);
+ void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ misc_parameters);
+ void *misc_params2_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ misc_parameters_2);
+ void *misc_params2_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ misc_parameters_2);
+ void *headers_c;
+ void *headers_v;
+ int match_ipv;
+ int ret;
+
+ if (ib_spec->type & IB_FLOW_SPEC_INNER) {
+ headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ inner_headers);
+ match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.inner_ip_version);
+ } else {
+ headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ match_ipv = MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version);
+ }
+
+ switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
+ case IB_FLOW_SPEC_ETH:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD))
+ return -EOPNOTSUPP;
+
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dmac_47_16),
+ ib_spec->eth.mask.dst_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dmac_47_16),
+ ib_spec->eth.val.dst_mac);
+
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ smac_47_16),
+ ib_spec->eth.mask.src_mac);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ smac_47_16),
+ ib_spec->eth.val.src_mac);
+
+ if (ib_spec->eth.mask.vlan_tag) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ cvlan_tag, 1);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ first_vid, ntohs(ib_spec->eth.val.vlan_tag));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ first_cfi,
+ ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ first_cfi,
+ ntohs(ib_spec->eth.val.vlan_tag) >> 12);
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ first_prio,
+ ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ first_prio,
+ ntohs(ib_spec->eth.val.vlan_tag) >> 13);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ ethertype, ntohs(ib_spec->eth.mask.ether_type));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ethertype, ntohs(ib_spec->eth.val.ether_type));
+ break;
+ case IB_FLOW_SPEC_IPV4:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD))
+ return -EOPNOTSUPP;
+
+ if (match_ipv) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ ip_version, 0xf);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ip_version, MLX5_FS_IPV4_VERSION);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ ethertype, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ethertype, ETH_P_IP);
+ }
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.mask.src_ip,
+ sizeof(ib_spec->ipv4.mask.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.val.src_ip,
+ sizeof(ib_spec->ipv4.val.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.mask.dst_ip,
+ sizeof(ib_spec->ipv4.mask.dst_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &ib_spec->ipv4.val.dst_ip,
+ sizeof(ib_spec->ipv4.val.dst_ip));
+
+ set_tos(headers_c, headers_v,
+ ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
+
+ if (set_proto(headers_c, headers_v,
+ ib_spec->ipv4.mask.proto,
+ ib_spec->ipv4.val.proto))
+ return -EINVAL;
+ break;
+ case IB_FLOW_SPEC_IPV6:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
+ return -EOPNOTSUPP;
+
+ if (match_ipv) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ ip_version, 0xf);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ip_version, MLX5_FS_IPV6_VERSION);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ ethertype, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ ethertype, ETH_P_IPV6);
+ }
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.mask.src_ip,
+ sizeof(ib_spec->ipv6.mask.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.val.src_ip,
+ sizeof(ib_spec->ipv6.val.src_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.mask.dst_ip,
+ sizeof(ib_spec->ipv6.mask.dst_ip));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &ib_spec->ipv6.val.dst_ip,
+ sizeof(ib_spec->ipv6.val.dst_ip));
+
+ set_tos(headers_c, headers_v,
+ ib_spec->ipv6.mask.traffic_class,
+ ib_spec->ipv6.val.traffic_class);
+
+ if (set_proto(headers_c, headers_v,
+ ib_spec->ipv6.mask.next_hdr,
+ ib_spec->ipv6.val.next_hdr))
+ return -EINVAL;
+
+ set_flow_label(misc_params_c, misc_params_v,
+ ntohl(ib_spec->ipv6.mask.flow_label),
+ ntohl(ib_spec->ipv6.val.flow_label),
+ ib_spec->type & IB_FLOW_SPEC_INNER);
+ break;
+ case IB_FLOW_SPEC_ESP:
+ return -EOPNOTSUPP;
+ case IB_FLOW_SPEC_TCP:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
+ LAST_TCP_UDP_FIELD))
+ return -EOPNOTSUPP;
+
+ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
+ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
+ ntohs(ib_spec->tcp_udp.mask.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
+ ntohs(ib_spec->tcp_udp.val.src_port));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_dport,
+ ntohs(ib_spec->tcp_udp.mask.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
+ ntohs(ib_spec->tcp_udp.val.dst_port));
+ break;
+ case IB_FLOW_SPEC_UDP:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask,
+ LAST_TCP_UDP_FIELD))
+ return -EOPNOTSUPP;
+
+ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
+ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
+ ntohs(ib_spec->tcp_udp.mask.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
+ ntohs(ib_spec->tcp_udp.val.src_port));
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport,
+ ntohs(ib_spec->tcp_udp.mask.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ ntohs(ib_spec->tcp_udp.val.dst_port));
+ break;
+ case IB_FLOW_SPEC_GRE:
+ if (ib_spec->gre.mask.c_ks_res0_ver)
+ return -EOPNOTSUPP;
+
+ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
+ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ IPPROTO_GRE);
+
+ MLX5_SET(fte_match_set_misc, misc_params_c, gre_protocol,
+ ntohs(ib_spec->gre.mask.protocol));
+ MLX5_SET(fte_match_set_misc, misc_params_v, gre_protocol,
+ ntohs(ib_spec->gre.val.protocol));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
+ gre_key.nvgre.hi),
+ &ib_spec->gre.mask.key,
+ sizeof(ib_spec->gre.mask.key));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
+ gre_key.nvgre.hi),
+ &ib_spec->gre.val.key,
+ sizeof(ib_spec->gre.val.key));
+ break;
+ case IB_FLOW_SPEC_MPLS:
+ switch (prev_type) {
+ case IB_FLOW_SPEC_UDP:
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls_over_udp),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls_over_udp),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls_over_udp),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ break;
+ case IB_FLOW_SPEC_GRE:
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls_over_gre),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls_over_gre),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls_over_gre),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ break;
+ default:
+ if (ib_spec->type & IB_FLOW_SPEC_INNER) {
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.inner_first_mpls),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ inner_first_mpls),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ inner_first_mpls),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ } else {
+ if (check_mpls_supp_fields(MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_first_mpls),
+ &ib_spec->mpls.mask.tag))
+ return -EOPNOTSUPP;
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_v,
+ outer_first_mpls),
+ &ib_spec->mpls.val.tag,
+ sizeof(ib_spec->mpls.val.tag));
+ memcpy(MLX5_ADDR_OF(fte_match_set_misc2, misc_params2_c,
+ outer_first_mpls),
+ &ib_spec->mpls.mask.tag,
+ sizeof(ib_spec->mpls.mask.tag));
+ }
+ }
+ break;
+ case IB_FLOW_SPEC_VXLAN_TUNNEL:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->tunnel.mask,
+ LAST_TUNNEL_FIELD))
+ return -EOPNOTSUPP;
+
+ MLX5_SET(fte_match_set_misc, misc_params_c, vxlan_vni,
+ ntohl(ib_spec->tunnel.mask.tunnel_id));
+ MLX5_SET(fte_match_set_misc, misc_params_v, vxlan_vni,
+ ntohl(ib_spec->tunnel.val.tunnel_id));
+ break;
+ case IB_FLOW_SPEC_ACTION_TAG:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->flow_tag,
+ LAST_FLOW_TAG_FIELD))
+ return -EOPNOTSUPP;
+ if (ib_spec->flow_tag.tag_id >= BIT(24))
+ return -EINVAL;
+
+ flow_context->flow_tag = ib_spec->flow_tag.tag_id;
+ flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
+ break;
+ case IB_FLOW_SPEC_ACTION_DROP:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->drop,
+ LAST_DROP_FIELD))
+ return -EOPNOTSUPP;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ break;
+ case IB_FLOW_SPEC_ACTION_HANDLE:
+ ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
+ flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
+ if (ret)
+ return ret;
+ break;
+ case IB_FLOW_SPEC_ACTION_COUNT:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->flow_count,
+ LAST_COUNTERS_FIELD))
+ return -EOPNOTSUPP;
+
+ /* for now support only one counters spec per flow */
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ return -EINVAL;
+
+ action->counters = ib_spec->flow_count.counters;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* If a flow could catch both multicast and unicast packets,
+ * it won't fall into the multicast flow steering table and this rule
+ * could steal other multicast packets.
+ */
+static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
+{
+ union ib_flow_spec *flow_spec;
+
+ if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
+ ib_attr->num_of_specs < 1)
+ return false;
+
+ flow_spec = (union ib_flow_spec *)(ib_attr + 1);
+ if (flow_spec->type == IB_FLOW_SPEC_IPV4) {
+ struct ib_flow_spec_ipv4 *ipv4_spec;
+
+ ipv4_spec = (struct ib_flow_spec_ipv4 *)flow_spec;
+ if (ipv4_is_multicast(ipv4_spec->val.dst_ip))
+ return true;
+
+ return false;
+ }
+
+ if (flow_spec->type == IB_FLOW_SPEC_ETH) {
+ struct ib_flow_spec_eth *eth_spec;
+
+ eth_spec = (struct ib_flow_spec_eth *)flow_spec;
+ return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
+ is_multicast_ether_addr(eth_spec->val.dst_mac);
+ }
+
+ return false;
+}
+
+static bool is_valid_ethertype(struct mlx5_core_dev *mdev,
+ const struct ib_flow_attr *flow_attr,
+ bool check_inner)
+{
+ union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
+ int match_ipv = check_inner ?
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.inner_ip_version) :
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version);
+ int inner_bit = check_inner ? IB_FLOW_SPEC_INNER : 0;
+ bool ipv4_spec_valid, ipv6_spec_valid;
+ unsigned int ip_spec_type = 0;
+ bool has_ethertype = false;
+ unsigned int spec_index;
+ bool mask_valid = true;
+ u16 eth_type = 0;
+ bool type_valid;
+
+ /* Validate that ethertype is correct */
+ for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
+ if ((ib_spec->type == (IB_FLOW_SPEC_ETH | inner_bit)) &&
+ ib_spec->eth.mask.ether_type) {
+ mask_valid = (ib_spec->eth.mask.ether_type ==
+ htons(0xffff));
+ has_ethertype = true;
+ eth_type = ntohs(ib_spec->eth.val.ether_type);
+ } else if ((ib_spec->type == (IB_FLOW_SPEC_IPV4 | inner_bit)) ||
+ (ib_spec->type == (IB_FLOW_SPEC_IPV6 | inner_bit))) {
+ ip_spec_type = ib_spec->type;
+ }
+ ib_spec = (void *)ib_spec + ib_spec->size;
+ }
+
+ type_valid = (!has_ethertype) || (!ip_spec_type);
+ if (!type_valid && mask_valid) {
+ ipv4_spec_valid = (eth_type == ETH_P_IP) &&
+ (ip_spec_type == (IB_FLOW_SPEC_IPV4 | inner_bit));
+ ipv6_spec_valid = (eth_type == ETH_P_IPV6) &&
+ (ip_spec_type == (IB_FLOW_SPEC_IPV6 | inner_bit));
+
+ type_valid = (ipv4_spec_valid) || (ipv6_spec_valid) ||
+ (((eth_type == ETH_P_MPLS_UC) ||
+ (eth_type == ETH_P_MPLS_MC)) && match_ipv);
+ }
+
+ return type_valid;
+}
+
+static bool is_valid_attr(struct mlx5_core_dev *mdev,
+ const struct ib_flow_attr *flow_attr)
+{
+ return is_valid_ethertype(mdev, flow_attr, false) &&
+ is_valid_ethertype(mdev, flow_attr, true);
+}
+
+static void put_flow_table(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *prio, bool ft_added)
+{
+ prio->refcount -= !!ft_added;
+ if (!prio->refcount) {
+ mlx5_destroy_flow_table(prio->flow_table);
+ prio->flow_table = NULL;
+ }
+}
+
+static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
+{
+ struct mlx5_ib_flow_handler *handler = container_of(flow_id,
+ struct mlx5_ib_flow_handler,
+ ibflow);
+ struct mlx5_ib_flow_handler *iter, *tmp;
+ struct mlx5_ib_dev *dev = handler->dev;
+
+ mutex_lock(&dev->flow_db->lock);
+
+ list_for_each_entry_safe(iter, tmp, &handler->list, list) {
+ mlx5_del_flow_rules(iter->rule);
+ put_flow_table(dev, iter->prio, true);
+ list_del(&iter->list);
+ kfree(iter);
+ }
+
+ mlx5_del_flow_rules(handler->rule);
+ put_flow_table(dev, handler->prio, true);
+ mlx5_ib_counters_clear_description(handler->ibcounters);
+ mutex_unlock(&dev->flow_db->lock);
+ if (handler->flow_matcher)
+ atomic_dec(&handler->flow_matcher->usecnt);
+ kfree(handler);
+
+ return 0;
+}
+
+static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
+{
+ priority *= 2;
+ if (!dont_trap)
+ priority++;
+ return priority;
+}
+
+enum flow_table_type {
+ MLX5_IB_FT_RX,
+ MLX5_IB_FT_TX
+};
+
+#define MLX5_FS_MAX_TYPES 6
+#define MLX5_FS_MAX_ENTRIES BIT(16)
+
+static bool mlx5_ib_shared_ft_allowed(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return MLX5_CAP_GEN(dev->mdev, shared_object_to_user_object_allowed);
+}
+
+static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_namespace *ns,
+ struct mlx5_ib_flow_prio *prio,
+ int priority,
+ int num_entries, int num_groups,
+ u32 flags)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *ft;
+
+ ft_attr.prio = priority;
+ ft_attr.max_fte = num_entries;
+ ft_attr.flags = flags;
+ ft_attr.autogroup.max_num_groups = num_groups;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft))
+ return ERR_CAST(ft);
+
+ prio->flow_table = ft;
+ prio->refcount = 0;
+ return prio;
+}
+
+static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
+ struct ib_flow_attr *flow_attr,
+ enum flow_table_type ft_type)
+{
+ bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
+ struct mlx5_flow_namespace *ns = NULL;
+ enum mlx5_flow_namespace_type fn_type;
+ struct mlx5_ib_flow_prio *prio;
+ struct mlx5_flow_table *ft;
+ int max_table_size;
+ int num_entries;
+ int num_groups;
+ bool esw_encap;
+ u32 flags = 0;
+ int priority;
+
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ log_max_ft_size));
+ esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
+ DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+ switch (flow_attr->type) {
+ case IB_FLOW_ATTR_NORMAL:
+ if (flow_is_multicast_only(flow_attr) && !dont_trap)
+ priority = MLX5_IB_FLOW_MCAST_PRIO;
+ else
+ priority = ib_prio_to_core_prio(flow_attr->priority,
+ dont_trap);
+ if (ft_type == MLX5_IB_FT_RX) {
+ fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
+ prio = &dev->flow_db->prios[priority];
+ if (!dev->is_rep && !esw_encap &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (!dev->is_rep && !esw_encap &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ reformat_l3_tunnel_to_l2))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ } else {
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(
+ dev->mdev, log_max_ft_size));
+ fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
+ prio = &dev->flow_db->egress_prios[priority];
+ if (!dev->is_rep && !esw_encap &&
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ }
+ ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
+ num_entries = MLX5_FS_MAX_ENTRIES;
+ num_groups = MLX5_FS_MAX_TYPES;
+ break;
+ case IB_FLOW_ATTR_ALL_DEFAULT:
+ case IB_FLOW_ATTR_MC_DEFAULT:
+ ns = mlx5_get_flow_namespace(dev->mdev,
+ MLX5_FLOW_NAMESPACE_LEFTOVERS);
+ build_leftovers_ft_param(&priority, &num_entries, &num_groups);
+ prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
+ break;
+ case IB_FLOW_ATTR_SNIFFER:
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ allow_sniffer_and_nic_rx_shared_tir))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ ns = mlx5_get_flow_namespace(
+ dev->mdev, ft_type == MLX5_IB_FT_RX ?
+ MLX5_FLOW_NAMESPACE_SNIFFER_RX :
+ MLX5_FLOW_NAMESPACE_SNIFFER_TX);
+
+ prio = &dev->flow_db->sniffer[ft_type];
+ priority = 0;
+ num_entries = 1;
+ num_groups = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (!ns)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ max_table_size = min_t(int, num_entries, max_table_size);
+
+ ft = prio->flow_table;
+ if (!ft)
+ return _get_prio(dev, ns, prio, priority, max_table_size,
+ num_groups, flags);
+
+ return prio;
+}
+
+enum {
+ RDMA_RX_ECN_OPCOUNTER_PRIO,
+ RDMA_RX_CNP_OPCOUNTER_PRIO,
+};
+
+enum {
+ RDMA_TX_CNP_OPCOUNTER_PRIO,
+};
+
+static int set_vhca_port_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec)
+{
+ if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
+ ft_field_support.source_vhca_port) ||
+ !MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
+ ft_field_support.source_vhca_port))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, &spec->match_criteria,
+ misc_parameters.source_vhca_port);
+ MLX5_SET(fte_match_param, &spec->match_value,
+ misc_parameters.source_vhca_port, port_num);
+
+ return 0;
+}
+
+static int set_ecn_ce_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec, int ipv)
+{
+ if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
+ ft_field_support.outer_ip_version))
+ return -EOPNOTSUPP;
+
+ if (mlx5_core_mp_enabled(dev->mdev) &&
+ set_vhca_port_spec(dev, port_num, spec))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_ecn);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_ecn,
+ INET_ECN_CE);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version,
+ ipv);
+
+ spec->match_criteria_enable =
+ get_match_criteria_enable(spec->match_criteria);
+
+ return 0;
+}
+
+static int set_cnp_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec)
+{
+ if (mlx5_core_mp_enabled(dev->mdev) &&
+ set_vhca_port_spec(dev, port_num, spec))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.bth_opcode);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters.bth_opcode,
+ IB_BTH_OPCODE_CNP);
+
+ spec->match_criteria_enable =
+ get_match_criteria_enable(spec->match_criteria);
+
+ return 0;
+}
+
+int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type)
+{
+ enum mlx5_flow_namespace_type fn_type;
+ int priority, i, err, spec_num;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dst;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_ib_flow_prio *prio;
+ struct mlx5_flow_spec *spec;
+
+ spec = kcalloc(MAX_OPFC_RULES, sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS:
+ if (set_ecn_ce_spec(dev, port_num, &spec[0],
+ MLX5_FS_IPV4_VERSION) ||
+ set_ecn_ce_spec(dev, port_num, &spec[1],
+ MLX5_FS_IPV6_VERSION)) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 2;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_ECN_OPCOUNTER_PRIO;
+ break;
+
+ case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS:
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_CNP_OPCOUNTER_PRIO;
+ break;
+
+ case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS:
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_CNP_OPCOUNTER_PRIO;
+ break;
+
+ default:
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
+ if (!ns) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ prio = &dev->flow_db->opfcs[type];
+ if (!prio->flow_table) {
+ prio = _get_prio(dev, ns, prio, priority,
+ dev->num_ports * MAX_OPFC_RULES, 1, 0);
+ if (IS_ERR(prio)) {
+ err = PTR_ERR(prio);
+ goto free;
+ }
+ }
+
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dst.counter_id = mlx5_fc_id(opfc->fc);
+
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ for (i = 0; i < spec_num; i++) {
+ opfc->rule[i] = mlx5_add_flow_rules(prio->flow_table, &spec[i],
+ &flow_act, &dst, 1);
+ if (IS_ERR(opfc->rule[i])) {
+ err = PTR_ERR(opfc->rule[i]);
+ goto del_rules;
+ }
+ }
+ prio->refcount += spec_num;
+ kfree(spec);
+
+ return 0;
+
+del_rules:
+ for (i -= 1; i >= 0; i--)
+ mlx5_del_flow_rules(opfc->rule[i]);
+ put_flow_table(dev, prio, false);
+free:
+ kfree(spec);
+ return err;
+}
+
+void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type)
+{
+ int i;
+
+ for (i = 0; i < MAX_OPFC_RULES && opfc->rule[i]; i++) {
+ mlx5_del_flow_rules(opfc->rule[i]);
+ put_flow_table(dev, &dev->flow_db->opfcs[type], true);
+ }
+}
+
+static void set_underlay_qp(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_spec *spec,
+ u32 underlay_qpn)
+{
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ misc_parameters);
+ void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ if (underlay_qpn &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ ft_field_support.bth_dst_qp)) {
+ MLX5_SET(fte_match_set_misc,
+ misc_params_v, bth_dst_qp, underlay_qpn);
+ MLX5_SET(fte_match_set_misc,
+ misc_params_c, bth_dst_qp, 0xffffff);
+ }
+}
+
+static void mlx5_ib_set_rule_source_port(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ void *misc;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters_2);
+
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(rep->esw,
+ rep->vport));
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters_2);
+
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+ } else {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ MLX5_SET(fte_match_set_misc, misc, source_port, rep->vport);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ }
+}
+
+static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ const struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst,
+ u32 underlay_qpn,
+ struct mlx5_ib_create_flow *ucmd)
+{
+ struct mlx5_flow_table *ft = ft_prio->flow_table;
+ struct mlx5_ib_flow_handler *handler;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_destination dest_arr[2] = {};
+ struct mlx5_flow_destination *rule_dst = dest_arr;
+ const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
+ unsigned int spec_index;
+ u32 prev_type = 0;
+ int err = 0;
+ int dest_num = 0;
+ bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
+
+ if (!is_valid_attr(dev->mdev, flow_attr))
+ return ERR_PTR(-EINVAL);
+
+ if (dev->is_rep && is_egress)
+ return ERR_PTR(-EINVAL);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler || !spec) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ INIT_LIST_HEAD(&handler->list);
+
+ for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
+ err = parse_flow_attr(dev->mdev, spec,
+ ib_flow, flow_attr, &flow_act,
+ prev_type);
+ if (err < 0)
+ goto free;
+
+ prev_type = ((union ib_flow_spec *)ib_flow)->type;
+ ib_flow += ((union ib_flow_spec *)ib_flow)->size;
+ }
+
+ if (dst && !(flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP)) {
+ memcpy(&dest_arr[0], dst, sizeof(*dst));
+ dest_num++;
+ }
+
+ if (!flow_is_multicast_only(flow_attr))
+ set_underlay_qp(dev, spec, underlay_qpn);
+
+ if (dev->is_rep && flow_attr->type != IB_FLOW_ATTR_SNIFFER) {
+ struct mlx5_eswitch_rep *rep;
+
+ rep = dev->port[flow_attr->port - 1].rep;
+ if (!rep) {
+ err = -EINVAL;
+ goto free;
+ }
+
+ mlx5_ib_set_rule_source_port(dev, spec, rep);
+ }
+
+ spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
+
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ struct mlx5_ib_mcounters *mcounters;
+
+ err = mlx5_ib_flow_counters_set_data(flow_act.counters, ucmd);
+ if (err)
+ goto free;
+
+ mcounters = to_mcounters(flow_act.counters);
+ handler->ibcounters = flow_act.counters;
+ dest_arr[dest_num].type =
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest_arr[dest_num].counter_id =
+ mlx5_fc_id(mcounters->hw_cntrs_hndl);
+ dest_num++;
+ }
+
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ if (!dest_num)
+ rule_dst = NULL;
+ } else {
+ if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)
+ flow_act.action |=
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+ if (is_egress)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ else if (dest_num)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
+ if ((spec->flow_context.flags & FLOW_CONTEXT_HAS_TAG) &&
+ (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
+ flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
+ mlx5_ib_warn(dev, "Flow tag %u and attribute type %x isn't allowed in leftovers\n",
+ spec->flow_context.flow_tag, flow_attr->type);
+ err = -EINVAL;
+ goto free;
+ }
+ handler->rule = mlx5_add_flow_rules(ft, spec,
+ &flow_act,
+ rule_dst, dest_num);
+
+ if (IS_ERR(handler->rule)) {
+ err = PTR_ERR(handler->rule);
+ goto free;
+ }
+
+ ft_prio->refcount++;
+ handler->prio = ft_prio;
+ handler->dev = dev;
+
+ ft_prio->flow_table = ft;
+free:
+ if (err && handler) {
+ mlx5_ib_counters_clear_description(handler->ibcounters);
+ kfree(handler);
+ }
+ kvfree(spec);
+ return err ? ERR_PTR(err) : handler;
+}
+
+static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ const struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst)
+{
+ return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0, NULL);
+}
+
+enum {
+ LEFTOVERS_MC,
+ LEFTOVERS_UC,
+};
+
+static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst)
+{
+ struct mlx5_ib_flow_handler *handler_ucast = NULL;
+ struct mlx5_ib_flow_handler *handler = NULL;
+
+ static struct {
+ struct ib_flow_attr flow_attr;
+ struct ib_flow_spec_eth eth_flow;
+ } leftovers_specs[] = {
+ [LEFTOVERS_MC] = {
+ .flow_attr = {
+ .num_of_specs = 1,
+ .size = sizeof(leftovers_specs[0])
+ },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = {.dst_mac = {0x1} },
+ .val = {.dst_mac = {0x1} }
+ }
+ },
+ [LEFTOVERS_UC] = {
+ .flow_attr = {
+ .num_of_specs = 1,
+ .size = sizeof(leftovers_specs[0])
+ },
+ .eth_flow = {
+ .type = IB_FLOW_SPEC_ETH,
+ .size = sizeof(struct ib_flow_spec_eth),
+ .mask = {.dst_mac = {0x1} },
+ .val = {.dst_mac = {} }
+ }
+ }
+ };
+
+ handler = create_flow_rule(dev, ft_prio,
+ &leftovers_specs[LEFTOVERS_MC].flow_attr,
+ dst);
+ if (!IS_ERR(handler) &&
+ flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
+ handler_ucast = create_flow_rule(dev, ft_prio,
+ &leftovers_specs[LEFTOVERS_UC].flow_attr,
+ dst);
+ if (IS_ERR(handler_ucast)) {
+ mlx5_del_flow_rules(handler->rule);
+ ft_prio->refcount--;
+ kfree(handler);
+ handler = handler_ucast;
+ } else {
+ list_add(&handler_ucast->list, &handler->list);
+ }
+ }
+
+ return handler;
+}
+
+static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_rx,
+ struct mlx5_ib_flow_prio *ft_tx,
+ struct mlx5_flow_destination *dst)
+{
+ struct mlx5_ib_flow_handler *handler_rx;
+ struct mlx5_ib_flow_handler *handler_tx;
+ int err;
+ static const struct ib_flow_attr flow_attr = {
+ .num_of_specs = 0,
+ .type = IB_FLOW_ATTR_SNIFFER,
+ .size = sizeof(flow_attr)
+ };
+
+ handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst);
+ if (IS_ERR(handler_rx)) {
+ err = PTR_ERR(handler_rx);
+ goto err;
+ }
+
+ handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst);
+ if (IS_ERR(handler_tx)) {
+ err = PTR_ERR(handler_tx);
+ goto err_tx;
+ }
+
+ list_add(&handler_tx->list, &handler_rx->list);
+
+ return handler_rx;
+
+err_tx:
+ mlx5_del_flow_rules(handler_rx->rule);
+ ft_rx->refcount--;
+ kfree(handler_rx);
+err:
+ return ERR_PTR(err);
+}
+
+static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
+ struct ib_flow_attr *flow_attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_flow_handler *handler = NULL;
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
+ struct mlx5_ib_flow_prio *ft_prio;
+ bool is_egress = flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS;
+ struct mlx5_ib_create_flow *ucmd = NULL, ucmd_hdr;
+ size_t min_ucmd_sz, required_ucmd_sz;
+ int err;
+ int underlay_qpn;
+
+ if (udata && udata->inlen) {
+ min_ucmd_sz = offsetofend(struct mlx5_ib_create_flow, reserved);
+ if (udata->inlen < min_ucmd_sz)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ err = ib_copy_from_udata(&ucmd_hdr, udata, min_ucmd_sz);
+ if (err)
+ return ERR_PTR(err);
+
+ /* currently supports only one counters data */
+ if (ucmd_hdr.ncounters_data > 1)
+ return ERR_PTR(-EINVAL);
+
+ required_ucmd_sz = min_ucmd_sz +
+ sizeof(struct mlx5_ib_flow_counters_data) *
+ ucmd_hdr.ncounters_data;
+ if (udata->inlen > required_ucmd_sz &&
+ !ib_is_udata_cleared(udata, required_ucmd_sz,
+ udata->inlen - required_ucmd_sz))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ ucmd = kzalloc(required_ucmd_sz, GFP_KERNEL);
+ if (!ucmd)
+ return ERR_PTR(-ENOMEM);
+
+ err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
+ if (err)
+ goto free_ucmd;
+ }
+
+ if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
+ err = -ENOMEM;
+ goto free_ucmd;
+ }
+
+ if (flow_attr->flags &
+ ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP | IB_FLOW_ATTR_FLAGS_EGRESS)) {
+ err = -EINVAL;
+ goto free_ucmd;
+ }
+
+ if (is_egress &&
+ (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
+ flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
+ err = -EINVAL;
+ goto free_ucmd;
+ }
+
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst) {
+ err = -ENOMEM;
+ goto free_ucmd;
+ }
+
+ mutex_lock(&dev->flow_db->lock);
+
+ ft_prio = get_flow_table(dev, flow_attr,
+ is_egress ? MLX5_IB_FT_TX : MLX5_IB_FT_RX);
+ if (IS_ERR(ft_prio)) {
+ err = PTR_ERR(ft_prio);
+ goto unlock;
+ }
+ if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
+ ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX);
+ if (IS_ERR(ft_prio_tx)) {
+ err = PTR_ERR(ft_prio_tx);
+ ft_prio_tx = NULL;
+ goto destroy_ft;
+ }
+ }
+
+ if (is_egress) {
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ } else {
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ if (mqp->is_rss)
+ dst->tir_num = mqp->rss_qp.tirn;
+ else
+ dst->tir_num = mqp->raw_packet_qp.rq.tirn;
+ }
+
+ switch (flow_attr->type) {
+ case IB_FLOW_ATTR_NORMAL:
+ underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ?
+ mqp->underlay_qpn :
+ 0;
+ handler = _create_flow_rule(dev, ft_prio, flow_attr, dst,
+ underlay_qpn, ucmd);
+ break;
+ case IB_FLOW_ATTR_ALL_DEFAULT:
+ case IB_FLOW_ATTR_MC_DEFAULT:
+ handler = create_leftovers_rule(dev, ft_prio, flow_attr, dst);
+ break;
+ case IB_FLOW_ATTR_SNIFFER:
+ handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst);
+ break;
+ default:
+ err = -EINVAL;
+ goto destroy_ft;
+ }
+
+ if (IS_ERR(handler)) {
+ err = PTR_ERR(handler);
+ handler = NULL;
+ goto destroy_ft;
+ }
+
+ mutex_unlock(&dev->flow_db->lock);
+ kfree(dst);
+ kfree(ucmd);
+
+ return &handler->ibflow;
+
+destroy_ft:
+ put_flow_table(dev, ft_prio, false);
+ if (ft_prio_tx)
+ put_flow_table(dev, ft_prio_tx, false);
+unlock:
+ mutex_unlock(&dev->flow_db->lock);
+ kfree(dst);
+free_ucmd:
+ kfree(ucmd);
+ return ERR_PTR(err);
+}
+
+static struct mlx5_ib_flow_prio *
+_get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
+ enum mlx5_flow_namespace_type ns_type,
+ bool mcast)
+{
+ struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_ib_flow_prio *prio = NULL;
+ int max_table_size = 0;
+ bool esw_encap;
+ u32 flags = 0;
+ int priority;
+
+ if (mcast)
+ priority = MLX5_IB_FLOW_MCAST_PRIO;
+ else
+ priority = ib_prio_to_core_prio(user_priority, false);
+
+ esw_encap = mlx5_eswitch_get_encap_mode(dev->mdev) !=
+ DEVLINK_ESWITCH_ENCAP_MODE_NONE;
+ switch (ns_type) {
+ case MLX5_FLOW_NAMESPACE_BYPASS:
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, log_max_ft_size));
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap) && !esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ reformat_l3_tunnel_to_l2) &&
+ !esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ break;
+ case MLX5_FLOW_NAMESPACE_EGRESS:
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, log_max_ft_size));
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat) &&
+ !esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ break;
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
+ max_table_size = BIT(
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size));
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev,
+ reformat_l3_tunnel_to_l2) &&
+ esw_encap)
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ priority = user_priority;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX:
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, log_max_ft_size));
+ priority = user_priority;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ max_table_size = BIT(
+ MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, log_max_ft_size));
+ priority = user_priority;
+ break;
+ default:
+ break;
+ }
+
+ max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES);
+
+ ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
+ if (!ns)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ switch (ns_type) {
+ case MLX5_FLOW_NAMESPACE_BYPASS:
+ prio = &dev->flow_db->prios[priority];
+ break;
+ case MLX5_FLOW_NAMESPACE_EGRESS:
+ prio = &dev->flow_db->egress_prios[priority];
+ break;
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
+ prio = &dev->flow_db->fdb[priority];
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX:
+ prio = &dev->flow_db->rdma_rx[priority];
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ prio = &dev->flow_db->rdma_tx[priority];
+ break;
+ default: return ERR_PTR(-EINVAL);
+ }
+
+ if (!prio)
+ return ERR_PTR(-EINVAL);
+
+ if (prio->flow_table)
+ return prio;
+
+ return _get_prio(dev, ns, prio, priority, max_table_size,
+ MLX5_FS_MAX_TYPES, flags);
+}
+
+static struct mlx5_ib_flow_handler *
+_create_raw_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ struct mlx5_flow_destination *dst,
+ struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_context *flow_context,
+ struct mlx5_flow_act *flow_act,
+ void *cmd_in, int inlen,
+ int dst_num)
+{
+ struct mlx5_ib_flow_handler *handler;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft = ft_prio->flow_table;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ handler = kzalloc(sizeof(*handler), GFP_KERNEL);
+ if (!handler || !spec) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ INIT_LIST_HEAD(&handler->list);
+
+ memcpy(spec->match_value, cmd_in, inlen);
+ memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params,
+ fs_matcher->mask_len);
+ spec->match_criteria_enable = fs_matcher->match_criteria_enable;
+ spec->flow_context = *flow_context;
+
+ handler->rule = mlx5_add_flow_rules(ft, spec,
+ flow_act, dst, dst_num);
+
+ if (IS_ERR(handler->rule)) {
+ err = PTR_ERR(handler->rule);
+ goto free;
+ }
+
+ ft_prio->refcount++;
+ handler->prio = ft_prio;
+ handler->dev = dev;
+ ft_prio->flow_table = ft;
+
+free:
+ if (err)
+ kfree(handler);
+ kvfree(spec);
+ return err ? ERR_PTR(err) : handler;
+}
+
+static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
+ void *match_v)
+{
+ void *match_c;
+ void *match_v_set_lyr_2_4, *match_c_set_lyr_2_4;
+ void *dmac, *dmac_mask;
+ void *ipv4, *ipv4_mask;
+
+ if (!(fs_matcher->match_criteria_enable &
+ (1 << MATCH_CRITERIA_ENABLE_OUTER_BIT)))
+ return false;
+
+ match_c = fs_matcher->matcher_mask.match_params;
+ match_v_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ match_c_set_lyr_2_4 = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+
+ dmac = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
+ dmac_47_16);
+ dmac_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
+ dmac_47_16);
+
+ if (is_multicast_ether_addr(dmac) &&
+ is_multicast_ether_addr(dmac_mask))
+ return true;
+
+ ipv4 = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_v_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+
+ ipv4_mask = MLX5_ADDR_OF(fte_match_set_lyr_2_4, match_c_set_lyr_2_4,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+
+ if (ipv4_is_multicast(*(__be32 *)(ipv4)) &&
+ ipv4_is_multicast(*(__be32 *)(ipv4_mask)))
+ return true;
+
+ return false;
+}
+
+static struct mlx5_ib_flow_handler *raw_fs_rule_add(
+ struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_context *flow_context, struct mlx5_flow_act *flow_act,
+ u32 counter_id, void *cmd_in, int inlen, int dest_id, int dest_type)
+{
+ struct mlx5_flow_destination *dst;
+ struct mlx5_ib_flow_prio *ft_prio;
+ struct mlx5_ib_flow_handler *handler;
+ int dst_num = 0;
+ bool mcast;
+ int err;
+
+ if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
+ return ERR_PTR(-ENOMEM);
+
+ dst = kcalloc(2, sizeof(*dst), GFP_KERNEL);
+ if (!dst)
+ return ERR_PTR(-ENOMEM);
+
+ mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
+ mutex_lock(&dev->flow_db->lock);
+
+ ft_prio = _get_flow_table(dev, fs_matcher->priority,
+ fs_matcher->ns_type, mcast);
+ if (IS_ERR(ft_prio)) {
+ err = PTR_ERR(ft_prio);
+ goto unlock;
+ }
+
+ switch (dest_type) {
+ case MLX5_FLOW_DESTINATION_TYPE_TIR:
+ dst[dst_num].type = dest_type;
+ dst[dst_num++].tir_num = dest_id;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+ dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
+ dst[dst_num++].ft_num = dest_id;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_PORT:
+ dst[dst_num++].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ break;
+ default:
+ break;
+ }
+
+ if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dst[dst_num].counter_id = counter_id;
+ dst_num++;
+ }
+
+ handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
+ fs_matcher, flow_context, flow_act,
+ cmd_in, inlen, dst_num);
+
+ if (IS_ERR(handler)) {
+ err = PTR_ERR(handler);
+ goto destroy_ft;
+ }
+
+ mutex_unlock(&dev->flow_db->lock);
+ atomic_inc(&fs_matcher->usecnt);
+ handler->flow_matcher = fs_matcher;
+
+ kfree(dst);
+
+ return handler;
+
+destroy_ft:
+ put_flow_table(dev, ft_prio, false);
+unlock:
+ mutex_unlock(&dev->flow_db->lock);
+ kfree(dst);
+
+ return ERR_PTR(err);
+}
+
+static void destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
+{
+ switch (maction->flow_action_raw.sub_type) {
+ case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
+ mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
+ maction->flow_action_raw.modify_hdr);
+ break;
+ case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
+ mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
+ maction->flow_action_raw.pkt_reformat);
+ break;
+ case MLX5_IB_FLOW_ACTION_DECAP:
+ break;
+ default:
+ break;
+ }
+}
+
+static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
+{
+ struct mlx5_ib_flow_action *maction = to_mflow_act(action);
+
+ switch (action->type) {
+ case IB_FLOW_ACTION_UNSPECIFIED:
+ destroy_flow_action_raw(maction);
+ break;
+ default:
+ WARN_ON(true);
+ break;
+ }
+
+ kfree(maction);
+ return 0;
+}
+
+static int
+mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
+ enum mlx5_flow_namespace_type *namespace)
+{
+ switch (table_type) {
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX:
+ *namespace = MLX5_FLOW_NAMESPACE_BYPASS;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX:
+ *namespace = MLX5_FLOW_NAMESPACE_EGRESS;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB:
+ *namespace = MLX5_FLOW_NAMESPACE_FDB_BYPASS;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX:
+ *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_TX:
+ *namespace = MLX5_FLOW_NAMESPACE_RDMA_TX;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
+ [MLX5_IB_FLOW_TYPE_NORMAL] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ .u.ptr = {
+ .len = sizeof(u16), /* data is priority */
+ .min_len = sizeof(u16),
+ }
+ },
+ [MLX5_IB_FLOW_TYPE_SNIFFER] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [MLX5_IB_FLOW_TYPE_ALL_DEFAULT] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+ [MLX5_IB_FLOW_TYPE_MC_DEFAULT] = {
+ .type = UVERBS_ATTR_TYPE_PTR_IN,
+ UVERBS_ATTR_NO_DATA(),
+ },
+};
+
+static bool is_flow_dest(void *obj, int *dest_id, int *dest_type)
+{
+ struct devx_obj *devx_obj = obj;
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_DESTROY_TIR:
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
+ obj_id);
+ return true;
+
+ case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
+ table_id);
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int get_dests(struct uverbs_attr_bundle *attrs,
+ struct mlx5_ib_flow_matcher *fs_matcher, int *dest_id,
+ int *dest_type, struct ib_qp **qp, u32 *flags)
+{
+ bool dest_devx, dest_qp;
+ void *devx_obj;
+ int err;
+
+ dest_devx = uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+ dest_qp = uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+
+ *flags = 0;
+ err = uverbs_get_flags32(flags, attrs, MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS |
+ MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP);
+ if (err)
+ return err;
+
+ /* Both flags are not allowed */
+ if (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS &&
+ *flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
+ return -EINVAL;
+
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
+ if (dest_devx && (dest_qp || *flags))
+ return -EINVAL;
+ else if (dest_qp && *flags)
+ return -EINVAL;
+ }
+
+ /* Allow only DEVX object, drop as dest for FDB */
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS &&
+ !(dest_devx || (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)))
+ return -EINVAL;
+
+ /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */
+ if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
+ ((!dest_devx && !dest_qp) || (dest_devx && dest_qp)))
+ return -EINVAL;
+
+ *qp = NULL;
+ if (dest_devx) {
+ devx_obj =
+ uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX);
+
+ /* Verify that the given DEVX object is a flow
+ * steering destination.
+ */
+ if (!is_flow_dest(devx_obj, dest_id, dest_type))
+ return -EINVAL;
+ /* Allow only flow table as dest when inserting to FDB or RDMA_RX */
+ if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) &&
+ *dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ return -EINVAL;
+ } else if (dest_qp) {
+ struct mlx5_ib_qp *mqp;
+
+ *qp = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
+ if (IS_ERR(*qp))
+ return PTR_ERR(*qp);
+
+ if ((*qp)->qp_type != IB_QPT_RAW_PACKET)
+ return -EINVAL;
+
+ mqp = to_mqp(*qp);
+ if (mqp->is_rss)
+ *dest_id = mqp->rss_qp.tirn;
+ else
+ *dest_id = mqp->raw_packet_qp.rq.tirn;
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ } else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
+ !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
+ *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ }
+
+ if (*dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+ (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
+ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX))
+ return -EINVAL;
+
+ return 0;
+}
+
+static bool is_flow_counter(void *obj, u32 offset, u32 *counter_id)
+{
+ struct devx_obj *devx_obj = obj;
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
+
+ if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
+
+ if (offset && offset >= devx_obj->flow_counter_bulk_size)
+ return false;
+
+ *counter_id = MLX5_GET(dealloc_flow_counter_in,
+ devx_obj->dinbox,
+ flow_counter_id);
+ *counter_id += offset;
+ return true;
+ }
+
+ return false;
+}
+
+#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
+static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_flow_context flow_context = {.flow_tag =
+ MLX5_FS_DEFAULT_FLOW_TAG};
+ u32 *offset_attr, offset = 0, counter_id = 0;
+ int dest_id, dest_type = -1, inlen, len, ret, i;
+ struct mlx5_ib_flow_handler *flow_handler;
+ struct mlx5_ib_flow_matcher *fs_matcher;
+ struct ib_uobject **arr_flow_actions;
+ struct ib_uflow_resources *uflow_res;
+ struct mlx5_flow_act flow_act = {};
+ struct ib_qp *qp = NULL;
+ void *devx_obj, *cmd_in;
+ struct ib_uobject *uobj;
+ struct mlx5_ib_dev *dev;
+ u32 flags;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ fs_matcher = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
+ uobj = uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
+ dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+
+ if (get_dests(attrs, fs_matcher, &dest_id, &dest_type, &qp, &flags))
+ return -EINVAL;
+
+ if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DEFAULT_MISS)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+
+ if (flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ len = uverbs_attr_get_uobjs_arr(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX, &arr_flow_actions);
+ if (len) {
+ devx_obj = arr_flow_actions[0]->object;
+
+ if (uverbs_attr_is_valid(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET)) {
+
+ int num_offsets = uverbs_attr_ptr_get_array_size(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ sizeof(u32));
+
+ if (num_offsets != 1)
+ return -EINVAL;
+
+ offset_attr = uverbs_attr_get_alloced_ptr(
+ attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET);
+ offset = *offset_attr;
+ }
+
+ if (!is_flow_counter(devx_obj, offset, &counter_id))
+ return -EINVAL;
+
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ }
+
+ cmd_in = uverbs_attr_get_alloced_ptr(
+ attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
+ inlen = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
+
+ uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
+ if (!uflow_res)
+ return -ENOMEM;
+
+ len = uverbs_attr_get_uobjs_arr(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
+ for (i = 0; i < len; i++) {
+ struct mlx5_ib_flow_action *maction =
+ to_mflow_act(arr_flow_actions[i]->object);
+
+ ret = parse_flow_flow_action(maction, false, &flow_act);
+ if (ret)
+ goto err_out;
+ flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE,
+ arr_flow_actions[i]->object);
+ }
+
+ ret = uverbs_copy_from(&flow_context.flow_tag, attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_TAG);
+ if (!ret) {
+ if (flow_context.flow_tag >= BIT(24)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+ flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
+ }
+
+ flow_handler =
+ raw_fs_rule_add(dev, fs_matcher, &flow_context, &flow_act,
+ counter_id, cmd_in, inlen, dest_id, dest_type);
+ if (IS_ERR(flow_handler)) {
+ ret = PTR_ERR(flow_handler);
+ goto err_out;
+ }
+
+ ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res);
+
+ return 0;
+err_out:
+ ib_uverbs_flow_resources_free(uflow_res);
+ return ret;
+}
+
+static int flow_matcher_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_flow_matcher *obj = uobject->object;
+
+ if (atomic_read(&obj->usecnt))
+ return -EBUSY;
+
+ kfree(obj);
+ return 0;
+}
+
+static int steering_anchor_create_ft(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+
+ if (ft_prio->anchor.ft)
+ return 0;
+
+ ns = mlx5_get_flow_namespace(dev->mdev, ns_type);
+ if (!ns)
+ return -EOPNOTSUPP;
+
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.uid = MLX5_SHARED_RESOURCE_UID;
+ ft_attr.prio = 0;
+ ft_attr.max_fte = 2;
+ ft_attr.level = 1;
+
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ ft_prio->anchor.ft = ft;
+
+ return 0;
+}
+
+static void steering_anchor_destroy_ft(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.ft) {
+ mlx5_destroy_flow_table(ft_prio->anchor.ft);
+ ft_prio->anchor.ft = NULL;
+ }
+}
+
+static int
+steering_anchor_create_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ void *flow_group_in;
+ int err = 0;
+
+ if (ft_prio->anchor.fg_drop)
+ return 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ goto out;
+ }
+
+ ft_prio->anchor.fg_drop = fg;
+
+out:
+ kvfree(flow_group_in);
+
+ return err;
+}
+
+static void
+steering_anchor_destroy_fg_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.fg_drop) {
+ mlx5_destroy_flow_group(ft_prio->anchor.fg_drop);
+ ft_prio->anchor.fg_drop = NULL;
+ }
+}
+
+static int
+steering_anchor_create_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ void *flow_group_in;
+ int err = 0;
+
+ if (ft_prio->anchor.fg_goto_table)
+ return 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ fg = mlx5_create_flow_group(ft_prio->anchor.ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ goto out;
+ }
+ ft_prio->anchor.fg_goto_table = fg;
+
+out:
+ kvfree(flow_group_in);
+
+ return err;
+}
+
+static void
+steering_anchor_destroy_fg_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.fg_goto_table) {
+ mlx5_destroy_flow_group(ft_prio->anchor.fg_goto_table);
+ ft_prio->anchor.fg_goto_table = NULL;
+ }
+}
+
+static int
+steering_anchor_create_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *handle;
+
+ if (ft_prio->anchor.rule_drop)
+ return 0;
+
+ flow_act.fg = ft_prio->anchor.fg_drop;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+
+ handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
+ NULL, 0);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ ft_prio->anchor.rule_drop = handle;
+
+ return 0;
+}
+
+static void steering_anchor_destroy_rule_drop(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.rule_drop) {
+ mlx5_del_flow_rules(ft_prio->anchor.rule_drop);
+ ft_prio->anchor.rule_drop = NULL;
+ }
+}
+
+static int
+steering_anchor_create_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *handle;
+
+ if (ft_prio->anchor.rule_goto_table)
+ return 0;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ flow_act.fg = ft_prio->anchor.fg_goto_table;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = ft_prio->flow_table;
+
+ handle = mlx5_add_flow_rules(ft_prio->anchor.ft, NULL, &flow_act,
+ &dest, 1);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ ft_prio->anchor.rule_goto_table = handle;
+
+ return 0;
+}
+
+static void
+steering_anchor_destroy_rule_goto_table(struct mlx5_ib_flow_prio *ft_prio)
+{
+ if (ft_prio->anchor.rule_goto_table) {
+ mlx5_del_flow_rules(ft_prio->anchor.rule_goto_table);
+ ft_prio->anchor.rule_goto_table = NULL;
+ }
+}
+
+static int steering_anchor_create_res(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ int err;
+
+ err = steering_anchor_create_ft(dev, ft_prio, ns_type);
+ if (err)
+ return err;
+
+ err = steering_anchor_create_fg_drop(ft_prio);
+ if (err)
+ goto destroy_ft;
+
+ err = steering_anchor_create_fg_goto_table(ft_prio);
+ if (err)
+ goto destroy_fg_drop;
+
+ err = steering_anchor_create_rule_drop(ft_prio);
+ if (err)
+ goto destroy_fg_goto_table;
+
+ err = steering_anchor_create_rule_goto_table(ft_prio);
+ if (err)
+ goto destroy_rule_drop;
+
+ return 0;
+
+destroy_rule_drop:
+ steering_anchor_destroy_rule_drop(ft_prio);
+destroy_fg_goto_table:
+ steering_anchor_destroy_fg_goto_table(ft_prio);
+destroy_fg_drop:
+ steering_anchor_destroy_fg_drop(ft_prio);
+destroy_ft:
+ steering_anchor_destroy_ft(ft_prio);
+
+ return err;
+}
+
+static void mlx5_steering_anchor_destroy_res(struct mlx5_ib_flow_prio *ft_prio)
+{
+ steering_anchor_destroy_rule_goto_table(ft_prio);
+ steering_anchor_destroy_rule_drop(ft_prio);
+ steering_anchor_destroy_fg_goto_table(ft_prio);
+ steering_anchor_destroy_fg_drop(ft_prio);
+ steering_anchor_destroy_ft(ft_prio);
+}
+
+static int steering_anchor_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_steering_anchor *obj = uobject->object;
+
+ if (atomic_read(&obj->usecnt))
+ return -EBUSY;
+
+ mutex_lock(&obj->dev->flow_db->lock);
+ if (!--obj->ft_prio->anchor.rule_goto_table_ref)
+ steering_anchor_destroy_rule_goto_table(obj->ft_prio);
+
+ put_flow_table(obj->dev, obj->ft_prio, true);
+ mutex_unlock(&obj->dev->flow_db->lock);
+
+ kfree(obj);
+ return 0;
+}
+
+static void fs_cleanup_anchor(struct mlx5_ib_flow_prio *prio,
+ int count)
+{
+ while (count--)
+ mlx5_steering_anchor_destroy_res(&prio[count]);
+}
+
+void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev)
+{
+ fs_cleanup_anchor(dev->flow_db->prios, MLX5_IB_NUM_FLOW_FT);
+ fs_cleanup_anchor(dev->flow_db->egress_prios, MLX5_IB_NUM_FLOW_FT);
+ fs_cleanup_anchor(dev->flow_db->sniffer, MLX5_IB_NUM_SNIFFER_FTS);
+ fs_cleanup_anchor(dev->flow_db->egress, MLX5_IB_NUM_EGRESS_FTS);
+ fs_cleanup_anchor(dev->flow_db->fdb, MLX5_IB_NUM_FDB_FTS);
+ fs_cleanup_anchor(dev->flow_db->rdma_rx, MLX5_IB_NUM_FLOW_FT);
+ fs_cleanup_anchor(dev->flow_db->rdma_tx, MLX5_IB_NUM_FLOW_FT);
+}
+
+static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs,
+ struct mlx5_ib_flow_matcher *obj)
+{
+ enum mlx5_ib_uapi_flow_table_type ft_type =
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX;
+ u32 flags;
+ int err;
+
+ /* New users should use MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE and older
+ * users should switch to it. We leave this to not break userspace
+ */
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE) &&
+ uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS))
+ return -EINVAL;
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE)) {
+ err = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE);
+ if (err)
+ return err;
+
+ err = mlx5_ib_ft_type_to_namespace(ft_type, &obj->ns_type);
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ if (uverbs_attr_is_valid(attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS)) {
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ IB_FLOW_ATTR_FLAGS_EGRESS);
+ if (err)
+ return err;
+
+ if (flags)
+ return mlx5_ib_ft_type_to_namespace(
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX,
+ &obj->ns_type);
+ }
+
+ obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ struct mlx5_ib_flow_matcher *obj;
+ int err;
+
+ obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ obj->mask_len = uverbs_attr_get_len(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
+ err = uverbs_copy_from(&obj->matcher_mask,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
+ if (err)
+ goto end;
+
+ obj->flow_type = uverbs_attr_get_enum_id(
+ attrs, MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
+
+ if (obj->flow_type == MLX5_IB_FLOW_TYPE_NORMAL) {
+ err = uverbs_copy_from(&obj->priority,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE);
+ if (err)
+ goto end;
+ }
+
+ err = uverbs_copy_from(&obj->match_criteria_enable,
+ attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA);
+ if (err)
+ goto end;
+
+ err = mlx5_ib_matcher_ns(attrs, obj);
+ if (err)
+ goto end;
+
+ if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS &&
+ mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) {
+ err = -EINVAL;
+ goto end;
+ }
+
+ uobj->object = obj;
+ obj->mdev = dev->mdev;
+ atomic_set(&obj->usecnt, 0);
+ return 0;
+
+end:
+ kfree(obj);
+ return err;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE);
+ struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ enum mlx5_ib_uapi_flow_table_type ib_uapi_ft_type;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_ib_steering_anchor *obj;
+ struct mlx5_ib_flow_prio *ft_prio;
+ u16 priority;
+ u32 ft_id;
+ int err;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ err = uverbs_get_const(&ib_uapi_ft_type, attrs,
+ MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE);
+ if (err)
+ return err;
+
+ err = mlx5_ib_ft_type_to_namespace(ib_uapi_ft_type, &ns_type);
+ if (err)
+ return err;
+
+ err = uverbs_copy_from(&priority, attrs,
+ MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY);
+ if (err)
+ return err;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return -ENOMEM;
+
+ mutex_lock(&dev->flow_db->lock);
+
+ ft_prio = _get_flow_table(dev, priority, ns_type, 0);
+ if (IS_ERR(ft_prio)) {
+ err = PTR_ERR(ft_prio);
+ goto free_obj;
+ }
+
+ ft_prio->refcount++;
+
+ if (!ft_prio->anchor.rule_goto_table_ref) {
+ err = steering_anchor_create_res(dev, ft_prio, ns_type);
+ if (err)
+ goto put_flow_table;
+ }
+
+ ft_prio->anchor.rule_goto_table_ref++;
+
+ ft_id = mlx5_flow_table_id(ft_prio->anchor.ft);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+ &ft_id, sizeof(ft_id));
+ if (err)
+ goto destroy_res;
+
+ mutex_unlock(&dev->flow_db->lock);
+
+ uobj->object = obj;
+ obj->dev = dev;
+ obj->ft_prio = ft_prio;
+ atomic_set(&obj->usecnt, 0);
+
+ return 0;
+
+destroy_res:
+ --ft_prio->anchor.rule_goto_table_ref;
+ mlx5_steering_anchor_destroy_res(ft_prio);
+put_flow_table:
+ put_flow_table(dev, ft_prio, true);
+free_obj:
+ mutex_unlock(&dev->flow_db->lock);
+ kfree(obj);
+
+ return err;
+}
+
+static struct ib_flow_action *
+mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
+ enum mlx5_ib_uapi_flow_table_type ft_type,
+ u8 num_actions, void *in)
+{
+ enum mlx5_flow_namespace_type namespace;
+ struct mlx5_ib_flow_action *maction;
+ int ret;
+
+ ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ maction = kzalloc(sizeof(*maction), GFP_KERNEL);
+ if (!maction)
+ return ERR_PTR(-ENOMEM);
+
+ maction->flow_action_raw.modify_hdr =
+ mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in);
+
+ if (IS_ERR(maction->flow_action_raw.modify_hdr)) {
+ ret = PTR_ERR(maction->flow_action_raw.modify_hdr);
+ kfree(maction);
+ return ERR_PTR(ret);
+ }
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER;
+ maction->flow_action_raw.dev = dev;
+
+ return &maction->ib_action;
+}
+
+static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
+{
+ return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ max_modify_header_actions) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
+ max_modify_header_actions) ||
+ MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
+ max_modify_header_actions);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE);
+ struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ enum mlx5_ib_uapi_flow_table_type ft_type;
+ struct ib_flow_action *action;
+ int num_actions;
+ void *in;
+ int ret;
+
+ if (!mlx5_ib_modify_header_supported(mdev))
+ return -EOPNOTSUPP;
+
+ in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
+
+ num_actions = uverbs_attr_ptr_get_array_size(
+ attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
+ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto));
+ if (num_actions < 0)
+ return num_actions;
+
+ ret = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE);
+ if (ret)
+ return ret;
+ action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in);
+ if (IS_ERR(action))
+ return PTR_ERR(action);
+
+ uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev,
+ IB_FLOW_ACTION_UNSPECIFIED);
+
+ return 0;
+}
+
+static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev,
+ u8 packet_reformat_type,
+ u8 ft_type)
+{
+ switch (packet_reformat_type) {
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
+ return MLX5_CAP_FLOWTABLE(ibdev->mdev,
+ encap_general_header);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
+ return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev,
+ reformat_l2_to_l3_tunnel);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
+ return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev,
+ reformat_l3_tunnel_to_l2);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
+ return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap);
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt)
+{
+ switch (dv_prt) {
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_flow_action_create_packet_reformat_ctx(
+ struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_action *maction,
+ u8 ft_type, u8 dv_prt,
+ void *in, size_t len)
+{
+ struct mlx5_pkt_reformat_params reformat_params;
+ enum mlx5_flow_namespace_type namespace;
+ u8 prm_prt;
+ int ret;
+
+ ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
+ if (ret)
+ return ret;
+
+ ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt);
+ if (ret)
+ return ret;
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = prm_prt;
+ reformat_params.size = len;
+ reformat_params.data = in;
+ maction->flow_action_raw.pkt_reformat =
+ mlx5_packet_reformat_alloc(dev->mdev, &reformat_params,
+ namespace);
+ if (IS_ERR(maction->flow_action_raw.pkt_reformat)) {
+ ret = PTR_ERR(maction->flow_action_raw.pkt_reformat);
+ return ret;
+ }
+
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
+ maction->flow_action_raw.dev = dev;
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE);
+ struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
+ enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt;
+ enum mlx5_ib_uapi_flow_table_type ft_type;
+ struct mlx5_ib_flow_action *maction;
+ int ret;
+
+ ret = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_const(&dv_prt, attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE);
+ if (ret)
+ return ret;
+
+ if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type))
+ return -EOPNOTSUPP;
+
+ maction = kzalloc(sizeof(*maction), GFP_KERNEL);
+ if (!maction)
+ return -ENOMEM;
+
+ if (dv_prt ==
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) {
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_DECAP;
+ maction->flow_action_raw.dev = mdev;
+ } else {
+ void *in;
+ int len;
+
+ in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
+ if (IS_ERR(in)) {
+ ret = PTR_ERR(in);
+ goto free_maction;
+ }
+
+ len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
+
+ ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev,
+ maction, ft_type, dv_prt, in, len);
+ if (ret)
+ goto free_maction;
+ }
+
+ uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev,
+ IB_FLOW_ACTION_UNSPECIFIED);
+ return 0;
+
+free_maction:
+ kfree(maction);
+ return ret;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_CREATE_FLOW,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
+ UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
+ UVERBS_OBJECT_QP,
+ UVERBS_ACCESS_READ),
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ),
+ UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_READ, 1,
+ MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL),
+ UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ UVERBS_ACCESS_READ, 1, 1,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
+ UVERBS_ATTR_MIN_SIZE(sizeof(u32)),
+ UA_OPTIONAL,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_FLAGS,
+ enum mlx5_ib_create_flow_flags,
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_DESTROY_FLOW,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
+ UVERBS_OBJECT_FLOW,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(mlx5_ib_fs,
+ UVERBS_OBJECT_FLOW,
+ &UVERBS_METHOD(MLX5_IB_METHOD_CREATE_FLOW),
+ &UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
+ UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
+ set_add_copy_action_in_auto)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
+ UVERBS_ATTR_MIN_SIZE(1),
+ UA_ALLOC_AND_COPY,
+ UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
+ enum mlx5_ib_uapi_flow_action_packet_reformat_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(
+ mlx5_ib_flow_actions,
+ UVERBS_OBJECT_FLOW_ACTION,
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
+ UVERBS_ATTR_SIZE(1, sizeof(struct mlx5_ib_match_params)),
+ UA_MANDATORY),
+ UVERBS_ATTR_ENUM_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
+ mlx5_ib_flow_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
+ UVERBS_ATTR_TYPE(u8),
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ enum ib_flow_flags,
+ UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_OPTIONAL));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER,
+ UVERBS_TYPE_ALLOC_IDR(flow_matcher_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_STEERING_ANCHOR_CREATE,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UVERBS_TYPE_ALLOC_IDR(steering_anchor_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE),
+ &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY));
+
+const struct uapi_definition mlx5_ib_flow_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_FLOW_MATCHER),
+ UAPI_DEF_CHAIN_OBJ_TREE(
+ UVERBS_OBJECT_FLOW,
+ &mlx5_ib_fs),
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
+ &mlx5_ib_flow_actions),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_STEERING_ANCHOR,
+ UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)),
+ {},
+};
+
+static const struct ib_device_ops flow_ops = {
+ .create_flow = mlx5_ib_create_flow,
+ .destroy_flow = mlx5_ib_destroy_flow,
+ .destroy_flow_action = mlx5_ib_destroy_flow_action,
+};
+
+int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
+{
+ dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
+
+ if (!dev->flow_db)
+ return -ENOMEM;
+
+ mutex_init(&dev->flow_db->lock);
+
+ ib_set_device_ops(&dev->ib_dev, &flow_ops);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/fs.h b/drivers/infiniband/hw/mlx5/fs.h
new file mode 100644
index 000000000..b9734904f
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/fs.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_FS_H
+#define _MLX5_IB_FS_H
+
+#include "mlx5_ib.h"
+
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+int mlx5_ib_fs_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev);
+#else
+static inline int mlx5_ib_fs_init(struct mlx5_ib_dev *dev)
+{
+ dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
+
+ if (!dev->flow_db)
+ return -ENOMEM;
+
+ mutex_init(&dev->flow_db->lock);
+ return 0;
+}
+
+inline void mlx5_ib_fs_cleanup_anchor(struct mlx5_ib_dev *dev) {}
+#endif
+
+static inline void mlx5_ib_fs_cleanup(struct mlx5_ib_dev *dev)
+{
+ /* When a steering anchor is created, a special flow table is also
+ * created for the user to reference. Since the user can reference it,
+ * the kernel cannot trust that when the user destroys the steering
+ * anchor, they no longer reference the flow table.
+ *
+ * To address this issue, when a user destroys a steering anchor, only
+ * the flow steering rule in the table is destroyed, but the table
+ * itself is kept to deal with the above scenario. The remaining
+ * resources are only removed when the RDMA device is destroyed, which
+ * is a safe assumption that all references are gone.
+ */
+ mlx5_ib_fs_cleanup_anchor(dev);
+ kfree(dev->flow_db);
+}
+#endif /* _MLX5_IB_FS_H */
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
new file mode 100644
index 000000000..b804f2dd5
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -0,0 +1,489 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx5_ib.h"
+
+struct mlx5_ib_gsi_wr {
+ struct ib_cqe cqe;
+ struct ib_wc wc;
+ bool completed:1;
+};
+
+static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
+{
+ return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
+}
+
+/* Call with gsi->lock locked */
+static void generate_completions(struct mlx5_ib_qp *mqp)
+{
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ struct ib_cq *gsi_cq = mqp->ibqp.send_cq;
+ struct mlx5_ib_gsi_wr *wr;
+ u32 index;
+
+ for (index = gsi->outstanding_ci; index != gsi->outstanding_pi;
+ index++) {
+ wr = &gsi->outstanding_wrs[index % gsi->cap.max_send_wr];
+
+ if (!wr->completed)
+ break;
+
+ WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
+ wr->completed = false;
+ }
+
+ gsi->outstanding_ci = index;
+}
+
+static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_gsi_qp *gsi = cq->cq_context;
+ struct mlx5_ib_gsi_wr *wr =
+ container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe);
+ struct mlx5_ib_qp *mqp = container_of(gsi, struct mlx5_ib_qp, gsi);
+ u64 wr_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ wr->completed = true;
+ wr_id = wr->wc.wr_id;
+ wr->wc = *wc;
+ wr->wc.wr_id = wr_id;
+ wr->wc.qp = &mqp->ibqp;
+
+ generate_completions(mqp);
+ spin_unlock_irqrestore(&gsi->lock, flags);
+}
+
+int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
+ struct ib_qp_init_attr *attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_gsi_qp *gsi;
+ struct ib_qp_init_attr hw_init_attr = *attr;
+ const u8 port_num = attr->port_num;
+ int num_qps = 0;
+ int ret;
+
+ if (mlx5_ib_deth_sqpn_cap(dev)) {
+ if (MLX5_CAP_GEN(dev->mdev,
+ port_type) == MLX5_CAP_PORT_TYPE_IB)
+ num_qps = pd->device->attrs.max_pkeys;
+ else if (dev->lag_active)
+ num_qps = dev->lag_ports;
+ }
+
+ gsi = &mqp->gsi;
+ gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
+ if (!gsi->tx_qps)
+ return -ENOMEM;
+
+ gsi->outstanding_wrs =
+ kcalloc(attr->cap.max_send_wr, sizeof(*gsi->outstanding_wrs),
+ GFP_KERNEL);
+ if (!gsi->outstanding_wrs) {
+ ret = -ENOMEM;
+ goto err_free_tx;
+ }
+
+ if (dev->devr.ports[port_num - 1].gsi) {
+ mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
+ port_num);
+ ret = -EBUSY;
+ goto err_free_wrs;
+ }
+ gsi->num_qps = num_qps;
+ spin_lock_init(&gsi->lock);
+
+ gsi->cap = attr->cap;
+ gsi->port_num = port_num;
+
+ gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0,
+ IB_POLL_SOFTIRQ);
+ if (IS_ERR(gsi->cq)) {
+ mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
+ PTR_ERR(gsi->cq));
+ ret = PTR_ERR(gsi->cq);
+ goto err_free_wrs;
+ }
+
+ hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI;
+ hw_init_attr.send_cq = gsi->cq;
+ if (num_qps) {
+ hw_init_attr.cap.max_send_wr = 0;
+ hw_init_attr.cap.max_send_sge = 0;
+ hw_init_attr.cap.max_inline_data = 0;
+ }
+
+ gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
+ if (IS_ERR(gsi->rx_qp)) {
+ mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
+ PTR_ERR(gsi->rx_qp));
+ ret = PTR_ERR(gsi->rx_qp);
+ goto err_destroy_cq;
+ }
+
+ dev->devr.ports[attr->port_num - 1].gsi = gsi;
+ return 0;
+
+err_destroy_cq:
+ ib_free_cq(gsi->cq);
+err_free_wrs:
+ kfree(gsi->outstanding_wrs);
+err_free_tx:
+ kfree(gsi->tx_qps);
+ return ret;
+}
+
+int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ const int port_num = gsi->port_num;
+ int qp_index;
+ int ret;
+
+ ret = ib_destroy_qp(gsi->rx_qp);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n",
+ ret);
+ return ret;
+ }
+ dev->devr.ports[port_num - 1].gsi = NULL;
+ gsi->rx_qp = NULL;
+
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) {
+ if (!gsi->tx_qps[qp_index])
+ continue;
+ WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index]));
+ gsi->tx_qps[qp_index] = NULL;
+ }
+
+ ib_free_cq(gsi->cq);
+
+ kfree(gsi->outstanding_wrs);
+ kfree(gsi->tx_qps);
+ return 0;
+}
+
+static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
+{
+ struct ib_pd *pd = gsi->rx_qp->pd;
+ struct ib_qp_init_attr init_attr = {
+ .event_handler = gsi->rx_qp->event_handler,
+ .qp_context = gsi->rx_qp->qp_context,
+ .send_cq = gsi->cq,
+ .recv_cq = gsi->rx_qp->recv_cq,
+ .cap = {
+ .max_send_wr = gsi->cap.max_send_wr,
+ .max_send_sge = gsi->cap.max_send_sge,
+ .max_inline_data = gsi->cap.max_inline_data,
+ },
+ .qp_type = IB_QPT_UD,
+ .create_flags = MLX5_IB_QP_CREATE_SQPN_QP1,
+ };
+
+ return ib_create_qp(pd, &init_attr);
+}
+
+static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
+ u16 pkey_index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct ib_qp_attr attr;
+ int mask;
+ int ret;
+
+ mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
+ attr.qp_state = IB_QPS_INIT;
+ attr.pkey_index = pkey_index;
+ attr.qkey = IB_QP1_QKEY;
+ attr.port_num = gsi->port_num;
+ ret = ib_modify_qp(qp, &attr, mask);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to INIT: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ attr.qp_state = IB_QPS_RTR;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to RTR: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ attr.qp_state = IB_QPS_RTS;
+ attr.sq_psn = 0;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
+ if (ret) {
+ mlx5_ib_err(dev, "could not change QP%d state to RTS: %d\n",
+ qp->qp_num, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
+{
+ struct ib_device *device = gsi->rx_qp->device;
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ int pkey_index = qp_index;
+ struct mlx5_ib_qp *mqp;
+ struct ib_qp *qp;
+ unsigned long flags;
+ u16 pkey;
+ int ret;
+
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ pkey_index = 0;
+
+ ret = ib_query_pkey(device, gsi->port_num, pkey_index, &pkey);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ if (!pkey) {
+ mlx5_ib_dbg(dev, "invalid P_Key at port %d, index %d. Skipping.\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ qp = gsi->tx_qps[qp_index];
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ if (qp) {
+ mlx5_ib_dbg(dev, "already existing GSI TX QP at port %d, index %d. Skipping\n",
+ gsi->port_num, qp_index);
+ return;
+ }
+
+ qp = create_gsi_ud_qp(gsi);
+ if (IS_ERR(qp)) {
+ mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
+ PTR_ERR(qp));
+ return;
+ }
+
+ mqp = to_mqp(qp);
+ if (dev->lag_active)
+ mqp->gsi_lag_port = qp_index + 1;
+ ret = modify_to_rts(gsi, qp, pkey_index);
+ if (ret)
+ goto err_destroy_qp;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ WARN_ON_ONCE(gsi->tx_qps[qp_index]);
+ gsi->tx_qps[qp_index] = qp;
+ spin_unlock_irqrestore(&gsi->lock, flags);
+
+ return;
+
+err_destroy_qp:
+ WARN_ON_ONCE(qp);
+}
+
+int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ u16 qp_index;
+ int ret;
+
+ mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state);
+
+ ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask);
+ if (ret) {
+ mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret);
+ return ret;
+ }
+
+ if (to_mqp(gsi->rx_qp)->state != IB_QPS_RTS)
+ return 0;
+
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
+ setup_qp(gsi, qp_index);
+ return 0;
+}
+
+int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ int ret;
+
+ ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr);
+ qp_init_attr->cap = gsi->cap;
+ return ret;
+}
+
+/* Call with gsi->lock locked */
+static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_qp *mqp,
+ struct ib_ud_wr *wr, struct ib_wc *wc)
+{
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+ struct mlx5_ib_gsi_wr *gsi_wr;
+
+ if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) {
+ mlx5_ib_warn(dev, "no available GSI work request.\n");
+ return -ENOMEM;
+ }
+
+ gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi %
+ gsi->cap.max_send_wr];
+ gsi->outstanding_pi++;
+
+ if (!wc) {
+ memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
+ gsi_wr->wc.pkey_index = wr->pkey_index;
+ gsi_wr->wc.wr_id = wr->wr.wr_id;
+ } else {
+ gsi_wr->wc = *wc;
+ gsi_wr->completed = true;
+ }
+
+ gsi_wr->cqe.done = &handle_single_completion;
+ wr->wr.wr_cqe = &gsi_wr->cqe;
+
+ return 0;
+}
+
+/* Call with gsi->lock locked */
+static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_qp *mqp, struct ib_ud_wr *wr)
+{
+ struct ib_wc wc = {
+ { .wr_id = wr->wr.wr_id },
+ .status = IB_WC_SUCCESS,
+ .opcode = IB_WC_SEND,
+ .qp = &mqp->ibqp,
+ };
+ int ret;
+
+ ret = mlx5_ib_add_outstanding_wr(mqp, wr, &wc);
+ if (ret)
+ return ret;
+
+ generate_completions(mqp);
+
+ return 0;
+}
+
+/* Call with gsi->lock locked */
+static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+ struct mlx5_ib_ah *ah = to_mah(wr->ah);
+ int qp_index = wr->pkey_index;
+
+ if (!gsi->num_qps)
+ return gsi->rx_qp;
+
+ if (dev->lag_active && ah->xmit_port)
+ qp_index = ah->xmit_port - 1;
+
+ if (qp_index >= gsi->num_qps)
+ return NULL;
+
+ return gsi->tx_qps[qp_index];
+}
+
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+ struct ib_qp *tx_qp;
+ unsigned long flags;
+ int ret;
+
+ for (; wr; wr = wr->next) {
+ struct ib_ud_wr cur_wr = *ud_wr(wr);
+
+ cur_wr.wr.next = NULL;
+
+ spin_lock_irqsave(&gsi->lock, flags);
+ tx_qp = get_tx_qp(gsi, &cur_wr);
+ if (!tx_qp) {
+ ret = mlx5_ib_gsi_silent_drop(mqp, &cur_wr);
+ if (ret)
+ goto err;
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ continue;
+ }
+
+ ret = mlx5_ib_add_outstanding_wr(mqp, &cur_wr, NULL);
+ if (ret)
+ goto err;
+
+ ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
+ if (ret) {
+ /* Undo the effect of adding the outstanding wr */
+ gsi->outstanding_pi--;
+ goto err;
+ }
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ }
+
+ return 0;
+
+err:
+ spin_unlock_irqrestore(&gsi->lock, flags);
+ *bad_wr = wr;
+ return ret;
+}
+
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_gsi_qp *gsi = &mqp->gsi;
+
+ return ib_post_recv(gsi->rx_qp, wr, bad_wr);
+}
+
+void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi)
+{
+ u16 qp_index;
+
+ for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
+ setup_qp(gsi, qp_index);
+}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
new file mode 100644
index 000000000..528214853
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ */
+
+#include <linux/mlx5/vport.h>
+#include "ib_rep.h"
+#include "srq.h"
+
+static int
+mlx5_ib_set_vport_rep(struct mlx5_core_dev *dev,
+ struct mlx5_eswitch_rep *rep,
+ int vport_index)
+{
+ struct mlx5_ib_dev *ibdev;
+
+ ibdev = mlx5_eswitch_uplink_get_proto_dev(dev->priv.eswitch, REP_IB);
+ if (!ibdev)
+ return -EINVAL;
+
+ ibdev->port[vport_index].rep = rep;
+ rep->rep_data[REP_IB].priv = ibdev;
+ write_lock(&ibdev->port[vport_index].roce.netdev_lock);
+ ibdev->port[vport_index].roce.netdev =
+ mlx5_ib_get_rep_netdev(rep->esw, rep->vport);
+ write_unlock(&ibdev->port[vport_index].roce.netdev_lock);
+
+ return 0;
+}
+
+static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev);
+
+static int
+mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+{
+ u32 num_ports = mlx5_eswitch_get_total_vports(dev);
+ const struct mlx5_ib_profile *profile;
+ struct mlx5_core_dev *peer_dev;
+ struct mlx5_ib_dev *ibdev;
+ u32 peer_num_ports;
+ int vport_index;
+ int ret;
+
+ vport_index = rep->vport_index;
+
+ if (mlx5_lag_is_shared_fdb(dev)) {
+ peer_dev = mlx5_lag_get_peer_mdev(dev);
+ peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev);
+ if (mlx5_lag_is_master(dev)) {
+ /* Only 1 ib port is the representor for both uplinks */
+ num_ports += peer_num_ports - 1;
+ } else {
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ return 0;
+ vport_index += peer_num_ports;
+ dev = peer_dev;
+ }
+ }
+
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ profile = &raw_eth_profile;
+ else
+ return mlx5_ib_set_vport_rep(dev, rep, vport_index);
+
+ ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
+ if (!ibdev)
+ return -ENOMEM;
+
+ ibdev->port = kcalloc(num_ports, sizeof(*ibdev->port),
+ GFP_KERNEL);
+ if (!ibdev->port) {
+ ret = -ENOMEM;
+ goto fail_port;
+ }
+
+ ibdev->is_rep = true;
+ vport_index = rep->vport_index;
+ ibdev->port[vport_index].rep = rep;
+ ibdev->port[vport_index].roce.netdev =
+ mlx5_ib_get_rep_netdev(dev->priv.eswitch, rep->vport);
+ ibdev->mdev = dev;
+ ibdev->num_ports = num_ports;
+
+ ret = __mlx5_ib_add(ibdev, profile);
+ if (ret)
+ goto fail_add;
+
+ rep->rep_data[REP_IB].priv = ibdev;
+ if (mlx5_lag_is_shared_fdb(dev))
+ mlx5_ib_register_peer_vport_reps(dev);
+
+ return 0;
+
+fail_add:
+ kfree(ibdev->port);
+fail_port:
+ ib_dealloc_device(&ibdev->ib_dev);
+ return ret;
+}
+
+static void *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
+{
+ return rep->rep_data[REP_IB].priv;
+}
+
+static void
+mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_core_dev *mdev = mlx5_eswitch_get_core_dev(rep->esw);
+ struct mlx5_ib_dev *dev = mlx5_ib_rep_to_dev(rep);
+ int vport_index = rep->vport_index;
+ struct mlx5_ib_port *port;
+
+ if (WARN_ON(!mdev))
+ return;
+
+ if (mlx5_lag_is_shared_fdb(mdev) &&
+ !mlx5_lag_is_master(mdev)) {
+ struct mlx5_core_dev *peer_mdev;
+
+ if (rep->vport == MLX5_VPORT_UPLINK)
+ return;
+ peer_mdev = mlx5_lag_get_peer_mdev(mdev);
+ vport_index += mlx5_eswitch_get_total_vports(peer_mdev);
+ }
+
+ if (!dev)
+ return;
+
+ port = &dev->port[vport_index];
+ write_lock(&port->roce.netdev_lock);
+ port->roce.netdev = NULL;
+ write_unlock(&port->roce.netdev_lock);
+ rep->rep_data[REP_IB].priv = NULL;
+ port->rep = NULL;
+
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+ struct mlx5_core_dev *peer_mdev;
+ struct mlx5_eswitch *esw;
+
+ if (mlx5_lag_is_shared_fdb(mdev)) {
+ peer_mdev = mlx5_lag_get_peer_mdev(mdev);
+ esw = peer_mdev->priv.eswitch;
+ mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
+ }
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+ }
+}
+
+static const struct mlx5_eswitch_rep_ops rep_ops = {
+ .load = mlx5_ib_vport_rep_load,
+ .unload = mlx5_ib_vport_rep_unload,
+ .get_proto_dev = mlx5_ib_rep_to_dev,
+};
+
+static void mlx5_ib_register_peer_vport_reps(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *peer_mdev = mlx5_lag_get_peer_mdev(mdev);
+ struct mlx5_eswitch *esw;
+
+ if (!peer_mdev)
+ return;
+
+ esw = peer_mdev->priv.eswitch;
+ mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
+}
+
+struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH);
+}
+
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq,
+ u32 port)
+{
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep;
+
+ if (!dev->is_rep || !port)
+ return NULL;
+
+ if (!dev->port[port - 1].rep)
+ return ERR_PTR(-EINVAL);
+
+ rep = dev->port[port - 1].rep;
+
+ return mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep, sq->base.mqp.qpn);
+}
+
+static int mlx5r_rep_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = idev->mdev;
+ struct mlx5_eswitch *esw;
+
+ esw = mdev->priv.eswitch;
+ mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_IB);
+ return 0;
+}
+
+static void mlx5r_rep_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = idev->mdev;
+ struct mlx5_eswitch *esw;
+
+ esw = mdev->priv.eswitch;
+ mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
+}
+
+static const struct auxiliary_device_id mlx5r_rep_id_table[] = {
+ { .name = MLX5_ADEV_NAME ".rdma-rep", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5r_rep_id_table);
+
+static struct auxiliary_driver mlx5r_rep_driver = {
+ .name = "rep",
+ .probe = mlx5r_rep_probe,
+ .remove = mlx5r_rep_remove,
+ .id_table = mlx5r_rep_id_table,
+};
+
+int mlx5r_rep_init(void)
+{
+ return auxiliary_driver_register(&mlx5r_rep_driver);
+}
+
+void mlx5r_rep_cleanup(void)
+{
+ auxiliary_driver_unregister(&mlx5r_rep_driver);
+}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
new file mode 100644
index 000000000..9c55e5c52
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef __MLX5_IB_REP_H__
+#define __MLX5_IB_REP_H__
+
+#include <linux/mlx5/eswitch.h>
+#include "mlx5_ib.h"
+
+extern const struct mlx5_ib_profile raw_eth_profile;
+
+#ifdef CONFIG_MLX5_ESWITCH
+int mlx5r_rep_init(void);
+void mlx5r_rep_cleanup(void);
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq,
+ u32 port);
+struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
+ u16 vport_num);
+#else /* CONFIG_MLX5_ESWITCH */
+static inline int mlx5r_rep_init(void) { return 0; }
+static inline void mlx5r_rep_cleanup(void) {}
+static inline
+struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq,
+ u32 port)
+{
+ return NULL;
+}
+
+static inline
+struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ return NULL;
+}
+#endif
+#endif /* __MLX5_IB_REP_H__ */
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
new file mode 100644
index 000000000..afeb5e532
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/vport.h>
+#include "mlx5_ib.h"
+
+static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy)
+{
+ switch (mlx_policy) {
+ case MLX5_POLICY_DOWN:
+ return IFLA_VF_LINK_STATE_DISABLE;
+ case MLX5_POLICY_UP:
+ return IFLA_VF_LINK_STATE_ENABLE;
+ case MLX5_POLICY_FOLLOW:
+ return IFLA_VF_LINK_STATE_AUTO;
+ default:
+ return __IFLA_VF_LINK_STATE_MAX;
+ }
+}
+
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u32 port,
+ struct ifla_vf_info *info)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *rep;
+ int err;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep)
+ return -ENOMEM;
+
+ err = mlx5_query_hca_vport_context(mdev, 1, 1, vf + 1, rep);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to query port policy for vf %d (%d)\n",
+ vf, err);
+ goto free;
+ }
+ memset(info, 0, sizeof(*info));
+ info->linkstate = mlx_to_net_policy(rep->policy);
+ if (info->linkstate == __IFLA_VF_LINK_STATE_MAX)
+ err = -EINVAL;
+
+free:
+ kfree(rep);
+ return err;
+}
+
+static inline enum port_state_policy net_to_mlx_policy(int policy)
+{
+ switch (policy) {
+ case IFLA_VF_LINK_STATE_DISABLE:
+ return MLX5_POLICY_DOWN;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ return MLX5_POLICY_UP;
+ case IFLA_VF_LINK_STATE_AUTO:
+ return MLX5_POLICY_FOLLOW;
+ default:
+ return MLX5_POLICY_INVALID;
+ }
+}
+
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+ u32 port, int state)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->policy = net_to_mlx_policy(state);
+ if (in->policy == MLX5_POLICY_INVALID) {
+ err = -EINVAL;
+ goto out;
+ }
+ in->field_select = MLX5_HCA_VPORT_SEL_STATE_POLICY;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ if (!err)
+ vfs_ctx[vf].policy = in->policy;
+
+out:
+ kfree(in);
+ return err;
+}
+
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+ u32 port, struct ifla_vf_stats *stats)
+{
+ int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ struct mlx5_core_dev *mdev;
+ struct mlx5_ib_dev *dev;
+ void *out;
+ int err;
+
+ dev = to_mdev(device);
+ mdev = dev->mdev;
+
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_vport_counter(mdev, true, vf, port, out);
+ if (err)
+ goto ex;
+
+ stats->rx_packets = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.packets);
+ stats->tx_packets = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.packets);
+ stats->rx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.octets);
+ stats->tx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.octets);
+ stats->multicast = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_multicast.packets);
+
+ex:
+ kfree(out);
+ return err;
+}
+
+static int set_vf_node_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
+ in->node_guid = guid;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ if (!err) {
+ vfs_ctx[vf].node_guid = guid;
+ vfs_ctx[vf].node_guid_valid = 1;
+ }
+ kfree(in);
+ return err;
+}
+
+static int set_vf_port_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *in;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
+ int err;
+
+ in = kzalloc(sizeof(*in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
+ in->port_guid = guid;
+ err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+ if (!err) {
+ vfs_ctx[vf].port_guid = guid;
+ vfs_ctx[vf].port_guid_valid = 1;
+ }
+ kfree(in);
+ return err;
+}
+
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid, int type)
+{
+ if (type == IFLA_VF_IB_NODE_GUID)
+ return set_vf_node_guid(device, vf, port, guid);
+ else if (type == IFLA_VF_IB_PORT_GUID)
+ return set_vf_port_guid(device, vf, port, guid);
+
+ return -EINVAL;
+}
+
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_vf_context *vfs_ctx = mdev->priv.sriov.vfs_ctx;
+
+ node_guid->guid =
+ vfs_ctx[vf].node_guid_valid ? vfs_ctx[vf].node_guid : 0;
+ port_guid->guid =
+ vfs_ctx[vf].port_guid_valid ? vfs_ctx[vf].port_guid : 0;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
new file mode 100644
index 000000000..9c8a7b206
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/vport.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_pma.h>
+#include "mlx5_ib.h"
+#include "cmd.h"
+
+enum {
+ MLX5_IB_VENDOR_CLASS1 = 0x9,
+ MLX5_IB_VENDOR_CLASS2 = 0xa
+};
+
+static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u32 port_num,
+ struct ib_mad *in_mad)
+{
+ if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED &&
+ in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ return true;
+ return dev->port_caps[port_num - 1].has_smi;
+}
+
+static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
+ int ignore_bkey, u32 port, const struct ib_wc *in_wc,
+ const struct ib_grh *in_grh, const void *in_mad,
+ void *response_mad)
+{
+ u8 op_modifier = 0;
+
+ if (!can_do_mad_ifc(dev, port, (struct ib_mad *)in_mad))
+ return -EPERM;
+
+ /* Key check traps can't be generated unless we have in_wc to
+ * tell us where to send the trap.
+ */
+ if (ignore_mkey || !in_wc)
+ op_modifier |= 0x1;
+ if (ignore_bkey || !in_wc)
+ op_modifier |= 0x2;
+
+ return mlx5_cmd_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier,
+ port);
+}
+
+static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
+ void *out)
+{
+#define MLX5_SUM_CNT(p, cntr1, cntr2) \
+ (MLX5_GET64(query_vport_counter_out, p, cntr1) + \
+ MLX5_GET64(query_vport_counter_out, p, cntr2))
+
+ pma_cnt_ext->port_xmit_data =
+ cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
+ transmitted_ib_multicast.octets) >> 2);
+ pma_cnt_ext->port_rcv_data =
+ cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
+ received_ib_multicast.octets) >> 2);
+ pma_cnt_ext->port_xmit_packets =
+ cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets,
+ transmitted_ib_multicast.packets));
+ pma_cnt_ext->port_rcv_packets =
+ cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets,
+ received_ib_multicast.packets));
+ pma_cnt_ext->port_unicast_xmit_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, transmitted_ib_unicast.packets);
+ pma_cnt_ext->port_unicast_rcv_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, received_ib_unicast.packets);
+ pma_cnt_ext->port_multicast_xmit_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, transmitted_ib_multicast.packets);
+ pma_cnt_ext->port_multicast_rcv_packets =
+ MLX5_GET64_BE(query_vport_counter_out,
+ out, received_ib_multicast.packets);
+}
+
+static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
+ void *out)
+{
+ /* Traffic counters will be reported in
+ * their 64bit form via ib_pma_portcounters_ext by default.
+ */
+ void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
+ counter_set);
+
+#define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \
+ counter_var = MLX5_GET_BE(typeof(counter_var), \
+ ib_port_cntrs_grp_data_layout, \
+ out_pma, counter_name); \
+ }
+
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter,
+ symbol_error_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter,
+ link_error_recovery_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter,
+ link_downed_counter);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors,
+ port_rcv_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors,
+ port_rcv_remote_physical_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors,
+ port_rcv_switch_relay_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards,
+ port_xmit_discards);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors,
+ port_xmit_constraint_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_wait,
+ port_xmit_wait);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors,
+ port_rcv_constraint_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors,
+ link_overrun_errors);
+ MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped,
+ vl_15_dropped);
+}
+
+static int query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out,
+ size_t sz)
+{
+ u32 *in;
+ int err;
+
+ in = kvzalloc(sz, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ MLX5_SET(ppcnt_reg, in, local_port, port_num);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
+ err = mlx5_core_access_reg(dev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+
+ kvfree(in);
+ return err;
+}
+
+static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
+ const struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ struct mlx5_core_dev *mdev;
+ bool native_port = true;
+ u32 mdev_port_num;
+ void *out_cnt;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev) {
+ /* Fail to get the native port, likely due to 2nd port is still
+ * unaffiliated. In such case default to 1st port and attached
+ * PF device.
+ */
+ native_port = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ /* set local port to one for Function-Per-Port HCA. */
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
+ /* Declaring support of extended counters */
+ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
+ struct ib_class_port_info cpi = {};
+
+ cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
+ err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+ goto done;
+ }
+
+ if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
+ struct ib_pma_portcounters_ext *pma_cnt_ext =
+ (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+ int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+
+ out_cnt = kvzalloc(sz, GFP_KERNEL);
+ if (!out_cnt) {
+ err = IB_MAD_RESULT_FAILURE;
+ goto done;
+ }
+
+ err = mlx5_core_query_vport_counter(mdev, 0, 0, mdev_port_num,
+ out_cnt);
+ if (!err)
+ pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
+ } else {
+ struct ib_pma_portcounters *pma_cnt =
+ (struct ib_pma_portcounters *)(out_mad->data + 40);
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ out_cnt = kvzalloc(sz, GFP_KERNEL);
+ if (!out_cnt) {
+ err = IB_MAD_RESULT_FAILURE;
+ goto done;
+ }
+
+ err = query_ib_ppcnt(mdev, mdev_port_num, out_cnt, sz);
+ if (!err)
+ pma_cnt_assign(pma_cnt, out_cnt);
+ }
+ kvfree(out_cnt);
+ err = err ? IB_MAD_RESULT_FAILURE :
+ IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+done:
+ if (native_port)
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return err;
+}
+
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ u8 mgmt_class = in->mad_hdr.mgmt_class;
+ u8 method = in->mad_hdr.method;
+ u16 slid;
+ int err;
+
+ slid = in_wc ? ib_lid_cpu16(in_wc->slid) :
+ be16_to_cpu(IB_LID_PERMISSIVE);
+
+ if (method == IB_MGMT_METHOD_TRAP && !slid)
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+
+ switch (mgmt_class) {
+ case IB_MGMT_CLASS_SUBN_LID_ROUTED:
+ case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET &&
+ method != IB_MGMT_METHOD_TRAP_REPRESS)
+ return IB_MAD_RESULT_SUCCESS;
+
+ /* Don't process SMInfo queries -- the SMA can't handle them.
+ */
+ if (in->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
+ return IB_MAD_RESULT_SUCCESS;
+ } break;
+ case IB_MGMT_CLASS_PERF_MGMT:
+ if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
+ method == IB_MGMT_METHOD_GET)
+ return process_pma_cmd(dev, port_num, in, out);
+ fallthrough;
+ case MLX5_IB_VENDOR_CLASS1:
+ case MLX5_IB_VENDOR_CLASS2:
+ case IB_MGMT_CLASS_CONG_MGMT: {
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET)
+ return IB_MAD_RESULT_SUCCESS;
+ } break;
+ default:
+ return IB_MAD_RESULT_SUCCESS;
+ }
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), mad_flags & IB_MAD_IGNORE_MKEY,
+ mad_flags & IB_MAD_IGNORE_BKEY, port_num, in_wc,
+ in_grh, in, out);
+ if (err)
+ return IB_MAD_RESULT_FAILURE;
+
+ /* set return bit in status of directed route responses */
+ if (mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ out->mad_hdr.status |= cpu_to_be16(1 << 15);
+
+ if (method == IB_MGMT_METHOD_TRAP_REPRESS)
+ /* no response for trap repress */
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
+
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+ u16 packet_error;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ ib_init_query_mad(in_mad);
+ in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+
+ packet_error = be16_to_cpu(out_mad->status);
+
+ dev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
+ MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
+ struct ib_smp *out_mad)
+{
+ struct ib_smp *in_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ if (!in_mad)
+ return -ENOMEM;
+
+ ib_init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
+ out_mad);
+
+ kfree(in_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
+ __be64 *sys_image_guid)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(sys_image_guid, out_mad->data + 4, 8);
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
+ u16 *max_pkeys)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
+ u32 *vendor_id)
+{
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!out_mad)
+ return -ENOMEM;
+
+ err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
+ if (err)
+ goto out;
+
+ *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
+
+out:
+ kfree(out_mad);
+
+ return err;
+}
+
+int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ ib_init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
+
+ err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ ib_init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+ err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ memcpy(node_guid, out_mad->data + 12, 8);
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ ib_init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
+ in_mad->attr_mod = cpu_to_be32(index / 32);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
+ union ib_gid *gid)
+{
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ ib_init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ memcpy(gid->raw, out_mad->data + 8, 8);
+
+ ib_init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
+ in_mad->attr_mod = cpu_to_be32(index / 8);
+
+ err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
+ out_mad);
+ if (err)
+ goto out;
+
+ memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+ return err;
+}
+
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct ib_smp *in_mad = NULL;
+ struct ib_smp *out_mad = NULL;
+ int ext_active_speed;
+ int err = -ENOMEM;
+
+ in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
+ out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
+ if (!in_mad || !out_mad)
+ goto out;
+
+ /* props being zeroed by the caller, avoid zeroing it here */
+
+ ib_init_query_mad(in_mad);
+ in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
+ if (err) {
+ mlx5_ib_warn(dev, "err %d\n", err);
+ goto out;
+ }
+
+ props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
+ props->lmc = out_mad->data[34] & 0x7;
+ props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
+ props->sm_sl = out_mad->data[36] & 0xf;
+ props->state = out_mad->data[32] & 0xf;
+ props->phys_state = out_mad->data[33] >> 4;
+ props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
+ props->gid_tbl_len = out_mad->data[50];
+ props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+ props->pkey_tbl_len = dev->pkey_table_len;
+ props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
+ props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
+ props->active_width = out_mad->data[31] & 0xf;
+ props->active_speed = out_mad->data[35] >> 4;
+ props->max_mtu = out_mad->data[41] & 0xf;
+ props->active_mtu = out_mad->data[36] >> 4;
+ props->subnet_timeout = out_mad->data[51] & 0x1f;
+ props->max_vl_num = out_mad->data[37] >> 4;
+ props->init_type_reply = out_mad->data[41] >> 4;
+
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) {
+ props->port_cap_flags2 =
+ be16_to_cpup((__be16 *)(out_mad->data + 60));
+
+ if (props->port_cap_flags2 & IB_PORT_LINK_WIDTH_2X_SUP)
+ props->active_width = out_mad->data[31] & 0x1f;
+ }
+
+ /* Check if extended speeds (EDR/FDR/...) are supported */
+ if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
+ ext_active_speed = out_mad->data[62] >> 4;
+
+ switch (ext_active_speed) {
+ case 1:
+ props->active_speed = 16; /* FDR */
+ break;
+ case 2:
+ props->active_speed = 32; /* EDR */
+ break;
+ case 4:
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
+ props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP)
+ props->active_speed = IB_SPEED_HDR;
+ break;
+ case 8:
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
+ props->port_cap_flags2 & IB_PORT_LINK_SPEED_NDR_SUP)
+ props->active_speed = IB_SPEED_NDR;
+ break;
+ }
+ }
+
+ /* If reported active speed is QDR, check if is FDR-10 */
+ if (props->active_speed == 4) {
+ if (dev->port_caps[port - 1].ext_port_cap &
+ MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
+ ib_init_query_mad(in_mad);
+ in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
+ in_mad->attr_mod = cpu_to_be32(port);
+
+ err = mlx5_MAD_IFC(dev, 1, 1, port,
+ NULL, NULL, in_mad, out_mad);
+ if (err)
+ goto out;
+
+ /* Checking LinkSpeedActive for FDR-10 */
+ if (out_mad->data[15] & 0x1)
+ props->active_speed = 8;
+ }
+ }
+
+out:
+ kfree(in_mad);
+ kfree(out_mad);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
new file mode 100644
index 000000000..bce31e28e
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -0,0 +1,4444 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/bitmap.h>
+#include <linux/sched.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/task.h>
+#include <linux/delay.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include <linux/mlx5/port.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include <linux/mlx5/eswitch.h>
+#include <linux/list.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_umem_odp.h>
+#include <rdma/lag.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+#include "mlx5_ib.h"
+#include "ib_rep.h"
+#include "cmd.h"
+#include "devx.h"
+#include "dm.h"
+#include "fs.h"
+#include "srq.h"
+#include "qp.h"
+#include "wr.h"
+#include "restrack.h"
+#include "counters.h"
+#include "umr.h"
+#include <rdma/uverbs_std_types.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
+MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
+MODULE_LICENSE("Dual BSD/GPL");
+
+struct mlx5_ib_event_work {
+ struct work_struct work;
+ union {
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_multiport_info *mpi;
+ };
+ bool is_slave;
+ unsigned int event;
+ void *param;
+};
+
+enum {
+ MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
+};
+
+static struct workqueue_struct *mlx5_ib_event_wq;
+static LIST_HEAD(mlx5_ib_unaffiliated_port_list);
+static LIST_HEAD(mlx5_ib_dev_list);
+/*
+ * This mutex should be held when accessing either of the above lists
+ */
+static DEFINE_MUTEX(mlx5_ib_multiport_mutex);
+
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi)
+{
+ struct mlx5_ib_dev *dev;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ dev = mpi->ibdev;
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return dev;
+}
+
+static enum rdma_link_layer
+mlx5_port_type_cap_to_rdma_ll(int port_type_cap)
+{
+ switch (port_type_cap) {
+ case MLX5_CAP_PORT_TYPE_IB:
+ return IB_LINK_LAYER_INFINIBAND;
+ case MLX5_CAP_PORT_TYPE_ETH:
+ return IB_LINK_LAYER_ETHERNET;
+ default:
+ return IB_LINK_LAYER_UNSPECIFIED;
+ }
+}
+
+static enum rdma_link_layer
+mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
+
+ return mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+}
+
+static int get_port_state(struct ib_device *ibdev,
+ u32 port_num,
+ enum ib_port_state *state)
+{
+ struct ib_port_attr attr;
+ int ret;
+
+ memset(&attr, 0, sizeof(attr));
+ ret = ibdev->ops.query_port(ibdev, port_num, &attr);
+ if (!ret)
+ *state = attr.state;
+ return ret;
+}
+
+static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev,
+ struct net_device *ndev,
+ struct net_device *upper,
+ u32 *port_num)
+{
+ struct net_device *rep_ndev;
+ struct mlx5_ib_port *port;
+ int i;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ port = &dev->port[i];
+ if (!port->rep)
+ continue;
+
+ if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) {
+ *port_num = i + 1;
+ return &port->roce;
+ }
+
+ if (upper && port->rep->vport == MLX5_VPORT_UPLINK)
+ continue;
+
+ read_lock(&port->roce.netdev_lock);
+ rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw,
+ port->rep->vport);
+ if (rep_ndev == ndev) {
+ read_unlock(&port->roce.netdev_lock);
+ *port_num = i + 1;
+ return &port->roce;
+ }
+ read_unlock(&port->roce.netdev_lock);
+ }
+
+ return NULL;
+}
+
+static int mlx5_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb);
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ u32 port_num = roce->native_port_num;
+ struct mlx5_core_dev *mdev;
+ struct mlx5_ib_dev *ibdev;
+
+ ibdev = roce->dev;
+ mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
+ if (!mdev)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ /* Should already be registered during the load */
+ if (ibdev->is_rep)
+ break;
+ write_lock(&roce->netdev_lock);
+ if (ndev->dev.parent == mdev->device)
+ roce->netdev = ndev;
+ write_unlock(&roce->netdev_lock);
+ break;
+
+ case NETDEV_UNREGISTER:
+ /* In case of reps, ib device goes away before the netdevs */
+ write_lock(&roce->netdev_lock);
+ if (roce->netdev == ndev)
+ roce->netdev = NULL;
+ write_unlock(&roce->netdev_lock);
+ break;
+
+ case NETDEV_CHANGE:
+ case NETDEV_UP:
+ case NETDEV_DOWN: {
+ struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
+ struct net_device *upper = NULL;
+
+ if (lag_ndev) {
+ upper = netdev_master_upper_dev_get(lag_ndev);
+ dev_put(lag_ndev);
+ }
+
+ if (ibdev->is_rep)
+ roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
+ if (!roce)
+ return NOTIFY_DONE;
+ if ((upper == ndev ||
+ ((!upper || ibdev->is_rep) && ndev == roce->netdev)) &&
+ ibdev->ib_active) {
+ struct ib_event ibev = { };
+ enum ib_port_state port_state;
+
+ if (get_port_state(&ibdev->ib_dev, port_num,
+ &port_state))
+ goto done;
+
+ if (roce->last_port_state == port_state)
+ goto done;
+
+ roce->last_port_state = port_state;
+ ibev.device = &ibdev->ib_dev;
+ if (port_state == IB_PORT_DOWN)
+ ibev.event = IB_EVENT_PORT_ERR;
+ else if (port_state == IB_PORT_ACTIVE)
+ ibev.event = IB_EVENT_PORT_ACTIVE;
+ else
+ goto done;
+
+ ibev.element.port_num = port_num;
+ ib_dispatch_event(&ibev);
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+done:
+ mlx5_ib_put_native_port_mdev(ibdev, port_num);
+ return NOTIFY_DONE;
+}
+
+static struct net_device *mlx5_ib_get_netdev(struct ib_device *device,
+ u32 port_num)
+{
+ struct mlx5_ib_dev *ibdev = to_mdev(device);
+ struct net_device *ndev;
+ struct mlx5_core_dev *mdev;
+
+ mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
+ if (!mdev)
+ return NULL;
+
+ ndev = mlx5_lag_get_roce_netdev(mdev);
+ if (ndev)
+ goto out;
+
+ /* Ensure ndev does not disappear before we invoke dev_hold()
+ */
+ read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
+ ndev = ibdev->port[port_num - 1].roce.netdev;
+ if (ndev)
+ dev_hold(ndev);
+ read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
+
+out:
+ mlx5_ib_put_native_port_mdev(ibdev, port_num);
+ return ndev;
+}
+
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
+ u32 ib_port_num,
+ u32 *native_port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ ib_port_num);
+ struct mlx5_core_dev *mdev = NULL;
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) ||
+ ll != IB_LINK_LAYER_ETHERNET) {
+ if (native_port_num)
+ *native_port_num = ib_port_num;
+ return ibdev->mdev;
+ }
+
+ if (native_port_num)
+ *native_port_num = 1;
+
+ port = &ibdev->port[ib_port_num - 1];
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[ib_port_num - 1].mp.mpi;
+ if (mpi && !mpi->unaffiliate) {
+ mdev = mpi->mdev;
+ /* If it's the master no need to refcount, it'll exist
+ * as long as the ib_dev exists.
+ */
+ if (!mpi->is_master)
+ mpi->mdev_refcnt++;
+ }
+ spin_unlock(&port->mp.mpi_lock);
+
+ return mdev;
+}
+
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
+{
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev,
+ port_num);
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_port *port;
+
+ if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
+
+ port = &ibdev->port[port_num - 1];
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi = ibdev->port[port_num - 1].mp.mpi;
+ if (mpi->is_master)
+ goto out;
+
+ mpi->mdev_refcnt--;
+ if (mpi->unaffiliate)
+ complete(&mpi->unref_comp);
+out:
+ spin_unlock(&port->mp.mpi_lock);
+}
+
+static int translate_eth_legacy_proto_oper(u32 eth_proto_oper,
+ u16 *active_speed, u8 *active_width)
+{
+ switch (eth_proto_oper) {
+ case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
+ case MLX5E_PROT_MASK(MLX5E_1000BASE_KX):
+ case MLX5E_PROT_MASK(MLX5E_100BASE_TX):
+ case MLX5E_PROT_MASK(MLX5E_1000BASE_T):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_SDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_T):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_KR):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_CR):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_SR):
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_ER):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_25GBASE_CR):
+ case MLX5E_PROT_MASK(MLX5E_25GBASE_KR):
+ case MLX5E_PROT_MASK(MLX5E_25GBASE_SR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4):
+ case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4):
+ case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4):
+ case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2):
+ case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2):
+ case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_56GBASE_R4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_FDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4):
+ case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4):
+ case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4):
+ case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
+ u8 *active_width)
+{
+ switch (eth_proto_oper) {
+ case MLX5E_PROT_MASK(MLX5E_SGMII_100M):
+ case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_SDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_5GBASE_R):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_DDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_QDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_EDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR):
+ *active_width = IB_WIDTH_1X;
+ *active_speed = IB_SPEED_NDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2):
+ *active_width = IB_WIDTH_2X;
+ *active_speed = IB_SPEED_NDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_8):
+ *active_width = IB_WIDTH_8X;
+ *active_speed = IB_SPEED_HDR;
+ break;
+ case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4):
+ *active_width = IB_WIDTH_4X;
+ *active_speed = IB_SPEED_NDR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
+ u8 *active_width, bool ext)
+{
+ return ext ?
+ translate_eth_ext_proto_oper(eth_proto_oper, active_speed,
+ active_width) :
+ translate_eth_legacy_proto_oper(eth_proto_oper, active_speed,
+ active_width);
+}
+
+static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
+ struct ib_port_attr *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
+ struct mlx5_core_dev *mdev;
+ struct net_device *ndev, *upper;
+ enum ib_mtu ndev_ib_mtu;
+ bool put_mdev = true;
+ u32 eth_prot_oper;
+ u32 mdev_port_num;
+ bool ext;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev) {
+ /* This means the port isn't affiliated yet. Get the
+ * info for the master port instead.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ port_num = 1;
+ }
+
+ /* Possible bad flows are checked before filling out props so in case
+ * of an error it will still be zeroed out.
+ * Use native port in case of reps
+ */
+ if (dev->is_rep)
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
+ 1);
+ else
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
+ mdev_port_num);
+ if (err)
+ goto out;
+ ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
+ eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
+
+ props->active_width = IB_WIDTH_4X;
+ props->active_speed = IB_SPEED_QDR;
+
+ translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
+ &props->active_width, ext);
+
+ if (!dev->is_rep && dev->mdev->roce.roce_en) {
+ u16 qkey_viol_cntr;
+
+ props->port_cap_flags |= IB_PORT_CM_SUP;
+ props->ip_gids = true;
+ props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
+ roce_address_table_size);
+ mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
+ props->qkey_viol_cntr = qkey_viol_cntr;
+ }
+ props->max_mtu = IB_MTU_4096;
+ props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
+ props->pkey_tbl_len = 1;
+ props->state = IB_PORT_DOWN;
+ props->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+
+ /* If this is a stub query for an unaffiliated port stop here */
+ if (!put_mdev)
+ goto out;
+
+ ndev = mlx5_ib_get_netdev(device, port_num);
+ if (!ndev)
+ goto out;
+
+ if (dev->lag_active) {
+ rcu_read_lock();
+ upper = netdev_master_upper_dev_get_rcu(ndev);
+ if (upper) {
+ dev_put(ndev);
+ ndev = upper;
+ dev_hold(ndev);
+ }
+ rcu_read_unlock();
+ }
+
+ if (netif_running(ndev) && netif_carrier_ok(ndev)) {
+ props->state = IB_PORT_ACTIVE;
+ props->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ }
+
+ ndev_ib_mtu = iboe_get_mtu(ndev->mtu);
+
+ dev_put(ndev);
+
+ props->active_mtu = min(props->max_mtu, ndev_ib_mtu);
+out:
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return err;
+}
+
+static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr)
+{
+ enum ib_gid_type gid_type;
+ u16 vlan_id = 0xffff;
+ u8 roce_version = 0;
+ u8 roce_l3_type = 0;
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ gid_type = attr->gid_type;
+ if (gid) {
+ ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]);
+ if (ret)
+ return ret;
+ }
+
+ switch (gid_type) {
+ case IB_GID_TYPE_ROCE:
+ roce_version = MLX5_ROCE_VERSION_1;
+ break;
+ case IB_GID_TYPE_ROCE_UDP_ENCAP:
+ roce_version = MLX5_ROCE_VERSION_2;
+ if (gid && ipv6_addr_v4mapped((void *)gid))
+ roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
+ else
+ roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
+ break;
+
+ default:
+ mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
+ }
+
+ return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
+ roce_l3_type, gid->raw, mac,
+ vlan_id < VLAN_CFI_MASK, vlan_id,
+ port_num);
+}
+
+static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
+ __always_unused void **context)
+{
+ return set_roce_addr(to_mdev(attr->device), attr->port_num,
+ attr->index, &attr->gid, attr);
+}
+
+static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
+ __always_unused void **context)
+{
+ return set_roce_addr(to_mdev(attr->device), attr->port_num,
+ attr->index, NULL, attr);
+}
+
+__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr)
+{
+ if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
+ return 0;
+
+ return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
+}
+
+static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
+{
+ if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
+ return !MLX5_CAP_GEN(dev->mdev, ib_virt);
+ return 0;
+}
+
+enum {
+ MLX5_VPORT_ACCESS_METHOD_MAD,
+ MLX5_VPORT_ACCESS_METHOD_HCA,
+ MLX5_VPORT_ACCESS_METHOD_NIC,
+};
+
+static int mlx5_get_vport_access_method(struct ib_device *ibdev)
+{
+ if (mlx5_use_mad_ifc(to_mdev(ibdev)))
+ return MLX5_VPORT_ACCESS_METHOD_MAD;
+
+ if (mlx5_ib_port_link_layer(ibdev, 1) ==
+ IB_LINK_LAYER_ETHERNET)
+ return MLX5_VPORT_ACCESS_METHOD_NIC;
+
+ return MLX5_VPORT_ACCESS_METHOD_HCA;
+}
+
+static void get_atomic_caps(struct mlx5_ib_dev *dev,
+ u8 atomic_size_qp,
+ struct ib_device_attr *props)
+{
+ u8 tmp;
+ u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
+ u8 atomic_req_8B_endianness_mode =
+ MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
+
+ /* Check if HW supports 8 bytes standard atomic operations and capable
+ * of host endianness respond
+ */
+ tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD;
+ if (((atomic_operations & tmp) == tmp) &&
+ (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) &&
+ (atomic_req_8B_endianness_mode)) {
+ props->atomic_cap = IB_ATOMIC_HCA;
+ } else {
+ props->atomic_cap = IB_ATOMIC_NONE;
+ }
+}
+
+static void get_atomic_caps_qp(struct mlx5_ib_dev *dev,
+ struct ib_device_attr *props)
+{
+ u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+
+ get_atomic_caps(dev, atomic_size_qp, props);
+}
+
+static int mlx5_query_system_image_guid(struct ib_device *ibdev,
+ __be64 *sys_image_guid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ u64 tmp;
+ int err;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_system_image_guid(ibdev,
+ sys_image_guid);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+ break;
+
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!err)
+ *sys_image_guid = cpu_to_be64(tmp);
+
+ return err;
+
+}
+
+static int mlx5_query_max_pkeys(struct ib_device *ibdev,
+ u16 *max_pkeys)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
+ pkey_table_size));
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_query_vendor_id(struct ib_device *ibdev,
+ u32 *vendor_id)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
+ __be64 *node_guid)
+{
+ u64 tmp;
+ int err;
+
+ switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_node_guid(dev, node_guid);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
+ break;
+
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!err)
+ *node_guid = cpu_to_be64(tmp);
+
+ return err;
+}
+
+struct mlx5_reg_node_desc {
+ u8 desc[IB_DEVICE_NODE_DESC_MAX];
+};
+
+static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
+{
+ struct mlx5_reg_node_desc in;
+
+ if (mlx5_use_mad_ifc(dev))
+ return mlx5_query_mad_ifc_node_desc(dev, node_desc);
+
+ memset(&in, 0, sizeof(in));
+
+ return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
+ sizeof(struct mlx5_reg_node_desc),
+ MLX5_REG_NODE_DESC, 0, 0);
+}
+
+static int mlx5_ib_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *props,
+ struct ib_udata *uhw)
+{
+ size_t uhw_outlen = (uhw) ? uhw->outlen : 0;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int err = -ENOMEM;
+ int max_sq_desc;
+ int max_rq_sg;
+ int max_sq_sg;
+ u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
+ bool raw_support = !mlx5_core_mp_enabled(mdev);
+ struct mlx5_ib_query_device_resp resp = {};
+ size_t resp_len;
+ u64 max_tso;
+
+ resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length);
+ if (uhw_outlen && uhw_outlen < resp_len)
+ return -EINVAL;
+
+ resp.response_length = resp_len;
+
+ if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen))
+ return -EINVAL;
+
+ memset(props, 0, sizeof(*props));
+ err = mlx5_query_system_image_guid(ibdev,
+ &props->sys_image_guid);
+ if (err)
+ return err;
+
+ props->max_pkeys = dev->pkey_table_len;
+
+ err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
+ if (err)
+ return err;
+
+ props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
+ (fw_rev_min(dev->mdev) << 16) |
+ fw_rev_sub(dev->mdev);
+ props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
+ IB_DEVICE_PORT_ACTIVE_EVENT |
+ IB_DEVICE_SYS_IMAGE_GUID |
+ IB_DEVICE_RC_RNR_NAK_GEN;
+
+ if (MLX5_CAP_GEN(mdev, pkv))
+ props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
+ if (MLX5_CAP_GEN(mdev, qkv))
+ props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
+ if (MLX5_CAP_GEN(mdev, apm))
+ props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
+ if (MLX5_CAP_GEN(mdev, xrc))
+ props->device_cap_flags |= IB_DEVICE_XRC;
+ if (MLX5_CAP_GEN(mdev, imaicl)) {
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_WINDOW_TYPE_2B;
+ props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+ /* We support 'Gappy' memory registration too */
+ props->kernel_cap_flags |= IBK_SG_GAPS_REG;
+ }
+ /* IB_WR_REG_MR always requires changing the entity size with UMR */
+ if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
+ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
+ if (MLX5_CAP_GEN(mdev, sho)) {
+ props->kernel_cap_flags |= IBK_INTEGRITY_HANDOVER;
+ /* At this stage no support for signature handover */
+ props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
+ IB_PROT_T10DIF_TYPE_2 |
+ IB_PROT_T10DIF_TYPE_3;
+ props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
+ IB_GUARD_T10DIF_CSUM;
+ }
+ if (MLX5_CAP_GEN(mdev, block_lb_mc))
+ props->kernel_cap_flags |= IBK_BLOCK_MULTICAST_LOOPBACK;
+
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
+ if (MLX5_CAP_ETH(mdev, csum_cap)) {
+ /* Legacy bit to support old userspace libraries */
+ props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
+ props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM;
+ }
+
+ if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
+ props->raw_packet_caps |=
+ IB_RAW_PACKET_CAP_CVLAN_STRIPPING;
+
+ if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) {
+ max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
+ if (max_tso) {
+ resp.tso_caps.max_tso = 1 << max_tso;
+ resp.tso_caps.supported_qpts |=
+ 1 << IB_QPT_RAW_PACKET;
+ resp.response_length += sizeof(resp.tso_caps);
+ }
+ }
+
+ if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) {
+ resp.rss_caps.rx_hash_function =
+ MLX5_RX_HASH_FUNC_TOEPLITZ;
+ resp.rss_caps.rx_hash_fields_mask =
+ MLX5_RX_HASH_SRC_IPV4 |
+ MLX5_RX_HASH_DST_IPV4 |
+ MLX5_RX_HASH_SRC_IPV6 |
+ MLX5_RX_HASH_DST_IPV6 |
+ MLX5_RX_HASH_SRC_PORT_TCP |
+ MLX5_RX_HASH_DST_PORT_TCP |
+ MLX5_RX_HASH_SRC_PORT_UDP |
+ MLX5_RX_HASH_DST_PORT_UDP |
+ MLX5_RX_HASH_INNER;
+ resp.response_length += sizeof(resp.rss_caps);
+ }
+ } else {
+ if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen)
+ resp.response_length += sizeof(resp.tso_caps);
+ if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen)
+ resp.response_length += sizeof(resp.rss_caps);
+ }
+
+ if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
+ props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+ props->kernel_cap_flags |= IBK_UD_TSO;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
+ MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
+ raw_support)
+ props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP;
+
+ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
+ MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
+ props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
+ raw_support) {
+ /* Legacy bit to support old userspace libraries */
+ props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS;
+ props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS;
+ }
+
+ if (MLX5_CAP_DEV_MEM(mdev, memic)) {
+ props->max_dm_size =
+ MLX5_CAP_DEV_MEM(mdev, max_memic_size);
+ }
+
+ if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
+
+ if (MLX5_CAP_GEN(mdev, end_pad))
+ props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING;
+
+ props->vendor_part_id = mdev->pdev->device;
+ props->hw_ver = mdev->pdev->revision;
+
+ props->max_mr_size = ~0ull;
+ props->page_size_cap = ~(min_page_size - 1);
+ props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
+ props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
+ sizeof(struct mlx5_wqe_data_seg);
+ max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
+ max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ props->max_send_sge = max_sq_sg;
+ props->max_recv_sge = max_rq_sg;
+ props->max_sge_rd = MLX5_MAX_SGE_RD;
+ props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
+ props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
+ props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+ props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
+ props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
+ props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
+ props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
+ props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
+ props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
+ props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
+ props->max_srq_sge = max_rq_sg - 1;
+ props->max_fast_reg_page_list_len =
+ 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
+ props->max_pi_fast_reg_page_list_len =
+ props->max_fast_reg_page_list_len / 2;
+ props->max_sgl_rd =
+ MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
+ get_atomic_caps_qp(dev, props);
+ props->masked_atomic_cap = IB_ATOMIC_NONE;
+ props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
+ props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
+ props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
+ props->max_mcast_grp;
+ props->max_ah = INT_MAX;
+ props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
+ props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
+ props->kernel_cap_flags |= IBK_ON_DEMAND_PAGING;
+ props->odp_caps = dev->odp_caps;
+ if (!uhw) {
+ /* ODP for kernel QPs is not implemented for receive
+ * WQEs and SRQ WQEs
+ */
+ props->odp_caps.per_transport_caps.rc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.uc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.ud_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ props->odp_caps.per_transport_caps.xrc_odp_caps &=
+ ~(IB_ODP_SUPPORT_READ |
+ IB_ODP_SUPPORT_SRQ_RECV);
+ }
+ }
+
+ if (mlx5_core_is_vf(mdev))
+ props->kernel_cap_flags |= IBK_VIRTUAL_FUNCTION;
+
+ if (mlx5_ib_port_link_layer(ibdev, 1) ==
+ IB_LINK_LAYER_ETHERNET && raw_support) {
+ props->rss_caps.max_rwq_indirection_tables =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
+ props->rss_caps.max_rwq_indirection_table_size =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
+ props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET;
+ props->max_wq_type_rq =
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
+ }
+
+ if (MLX5_CAP_GEN(mdev, tag_matching)) {
+ props->tm_caps.max_num_tags =
+ (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
+ props->tm_caps.max_ops =
+ 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
+ }
+
+ if (MLX5_CAP_GEN(mdev, tag_matching) &&
+ MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
+ props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
+ props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
+ props->cq_caps.max_cq_moderation_count =
+ MLX5_MAX_CQ_COUNT;
+ props->cq_caps.max_cq_moderation_period =
+ MLX5_MAX_CQ_PERIOD;
+ }
+
+ if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) {
+ resp.response_length += sizeof(resp.cqe_comp_caps);
+
+ if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
+ resp.cqe_comp_caps.max_num =
+ MLX5_CAP_GEN(dev->mdev,
+ cqe_compression_max_num);
+
+ resp.cqe_comp_caps.supported_format =
+ MLX5_IB_CQE_RES_FORMAT_HASH |
+ MLX5_IB_CQE_RES_FORMAT_CSUM;
+
+ if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
+ resp.cqe_comp_caps.supported_format |=
+ MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX;
+ }
+ }
+
+ if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen &&
+ raw_support) {
+ if (MLX5_CAP_QOS(mdev, packet_pacing) &&
+ MLX5_CAP_GEN(mdev, qos)) {
+ resp.packet_pacing_caps.qp_rate_limit_max =
+ MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
+ resp.packet_pacing_caps.qp_rate_limit_min =
+ MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
+ resp.packet_pacing_caps.supported_qpts |=
+ 1 << IB_QPT_RAW_PACKET;
+ if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
+ MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
+ resp.packet_pacing_caps.cap_flags |=
+ MLX5_IB_PP_SUPPORT_BURST;
+ }
+ resp.response_length += sizeof(resp.packet_pacing_caps);
+ }
+
+ if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <=
+ uhw_outlen) {
+ if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
+ resp.mlx5_ib_support_multi_pkt_send_wqes =
+ MLX5_IB_ALLOW_MPW;
+
+ if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
+ resp.mlx5_ib_support_multi_pkt_send_wqes |=
+ MLX5_IB_SUPPORT_EMPW;
+
+ resp.response_length +=
+ sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
+ }
+
+ if (offsetofend(typeof(resp), flags) <= uhw_outlen) {
+ resp.response_length += sizeof(resp.flags);
+
+ if (MLX5_CAP_GEN(mdev, cqe_compression_128))
+ resp.flags |=
+ MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP;
+
+ if (MLX5_CAP_GEN(mdev, cqe_128_always))
+ resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
+ if (MLX5_CAP_GEN(mdev, qp_packet_based))
+ resp.flags |=
+ MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
+
+ resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT;
+ }
+
+ if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) {
+ resp.response_length += sizeof(resp.sw_parsing_caps);
+ if (MLX5_CAP_ETH(mdev, swp)) {
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING;
+
+ if (MLX5_CAP_ETH(mdev, swp_csum))
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING_CSUM;
+
+ if (MLX5_CAP_ETH(mdev, swp_lso))
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING_LSO;
+
+ if (resp.sw_parsing_caps.sw_parsing_offloads)
+ resp.sw_parsing_caps.supported_qpts =
+ BIT(IB_QPT_RAW_PACKET);
+ }
+ }
+
+ if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen &&
+ raw_support) {
+ resp.response_length += sizeof(resp.striding_rq_caps);
+ if (MLX5_CAP_GEN(mdev, striding_rq)) {
+ resp.striding_rq_caps.min_single_stride_log_num_of_bytes =
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
+ resp.striding_rq_caps.max_single_stride_log_num_of_bytes =
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES;
+ if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ else
+ resp.striding_rq_caps
+ .min_single_wqe_log_num_of_strides =
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
+ resp.striding_rq_caps.max_single_wqe_log_num_of_strides =
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES;
+ resp.striding_rq_caps.supported_qpts =
+ BIT(IB_QPT_RAW_PACKET);
+ }
+ }
+
+ if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) {
+ resp.response_length += sizeof(resp.tunnel_offloads_caps);
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_VXLAN;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GENEVE;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GRE;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
+ }
+
+ if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) {
+ resp.response_length += sizeof(resp.dci_streams_caps);
+
+ resp.dci_streams_caps.max_log_num_concurent =
+ MLX5_CAP_GEN(mdev, log_max_dci_stream_channels);
+
+ resp.dci_streams_caps.max_log_num_errored =
+ MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
+ }
+
+ if (uhw_outlen) {
+ err = ib_copy_to_udata(uhw, &resp, resp.response_length);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void translate_active_width(struct ib_device *ibdev, u16 active_width,
+ u8 *ib_width)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+
+ if (active_width & MLX5_PTYS_WIDTH_1X)
+ *ib_width = IB_WIDTH_1X;
+ else if (active_width & MLX5_PTYS_WIDTH_2X)
+ *ib_width = IB_WIDTH_2X;
+ else if (active_width & MLX5_PTYS_WIDTH_4X)
+ *ib_width = IB_WIDTH_4X;
+ else if (active_width & MLX5_PTYS_WIDTH_8X)
+ *ib_width = IB_WIDTH_8X;
+ else if (active_width & MLX5_PTYS_WIDTH_12X)
+ *ib_width = IB_WIDTH_12X;
+ else {
+ mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
+ active_width);
+ *ib_width = IB_WIDTH_4X;
+ }
+
+ return;
+}
+
+static int mlx5_mtu_to_ib_mtu(int mtu)
+{
+ switch (mtu) {
+ case 256: return 1;
+ case 512: return 2;
+ case 1024: return 3;
+ case 2048: return 4;
+ case 4096: return 5;
+ default:
+ pr_warn("invalid mtu\n");
+ return -1;
+ }
+}
+
+enum ib_max_vl_num {
+ __IB_MAX_VL_0 = 1,
+ __IB_MAX_VL_0_1 = 2,
+ __IB_MAX_VL_0_3 = 3,
+ __IB_MAX_VL_0_7 = 4,
+ __IB_MAX_VL_0_14 = 5,
+};
+
+enum mlx5_vl_hw_cap {
+ MLX5_VL_HW_0 = 1,
+ MLX5_VL_HW_0_1 = 2,
+ MLX5_VL_HW_0_2 = 3,
+ MLX5_VL_HW_0_3 = 4,
+ MLX5_VL_HW_0_4 = 5,
+ MLX5_VL_HW_0_5 = 6,
+ MLX5_VL_HW_0_6 = 7,
+ MLX5_VL_HW_0_7 = 8,
+ MLX5_VL_HW_0_14 = 15
+};
+
+static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
+ u8 *max_vl_num)
+{
+ switch (vl_hw_cap) {
+ case MLX5_VL_HW_0:
+ *max_vl_num = __IB_MAX_VL_0;
+ break;
+ case MLX5_VL_HW_0_1:
+ *max_vl_num = __IB_MAX_VL_0_1;
+ break;
+ case MLX5_VL_HW_0_3:
+ *max_vl_num = __IB_MAX_VL_0_3;
+ break;
+ case MLX5_VL_HW_0_7:
+ *max_vl_num = __IB_MAX_VL_0_7;
+ break;
+ case MLX5_VL_HW_0_14:
+ *max_vl_num = __IB_MAX_VL_0_14;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_hca_vport_context *rep;
+ u16 max_mtu;
+ u16 oper_mtu;
+ int err;
+ u16 ib_link_width_oper;
+ u8 vl_hw_cap;
+
+ rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ if (!rep) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* props being zeroed by the caller, avoid zeroing it here */
+
+ err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
+ if (err)
+ goto out;
+
+ props->lid = rep->lid;
+ props->lmc = rep->lmc;
+ props->sm_lid = rep->sm_lid;
+ props->sm_sl = rep->sm_sl;
+ props->state = rep->vport_state;
+ props->phys_state = rep->port_physical_state;
+ props->port_cap_flags = rep->cap_mask1;
+ props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
+ props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
+ props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
+ props->bad_pkey_cntr = rep->pkey_violation_counter;
+ props->qkey_viol_cntr = rep->qkey_violation_counter;
+ props->subnet_timeout = rep->subnet_timeout;
+ props->init_type_reply = rep->init_type_reply;
+
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
+ props->port_cap_flags2 = rep->cap_mask2;
+
+ err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
+ &props->active_speed, port);
+ if (err)
+ goto out;
+
+ translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
+
+ mlx5_query_port_max_mtu(mdev, &max_mtu, port);
+
+ props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
+
+ mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
+
+ props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
+
+ err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
+ if (err)
+ goto out;
+
+ err = translate_max_vl_num(ibdev, vl_hw_cap,
+ &props->max_vl_num);
+out:
+ kfree(rep);
+ return err;
+}
+
+int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
+{
+ unsigned int count;
+ int ret;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ ret = mlx5_query_mad_ifc_port(ibdev, port, props);
+ break;
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ ret = mlx5_query_hca_port(ibdev, port, props);
+ break;
+
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ ret = mlx5_query_port_roce(ibdev, port, props);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ if (!ret && props) {
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
+ if (!mdev) {
+ /* If the port isn't affiliated yet query the master.
+ * The master and slave will have the same values.
+ */
+ mdev = dev->mdev;
+ port = 1;
+ put_mdev = false;
+ }
+ count = mlx5_core_reserved_gids_count(mdev);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
+ props->gid_tbl_len -= count;
+ }
+ return ret;
+}
+
+static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props)
+{
+ return mlx5_query_port_roce(ibdev, port, props);
+}
+
+static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ /* Default special Pkey for representor device port as per the
+ * IB specification 1.3 section 10.9.1.2.
+ */
+ *pkey = 0xffff;
+ return 0;
+}
+
+static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
+ union ib_gid *gid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
+
+ default:
+ return -EINVAL;
+ }
+
+}
+
+static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port,
+ u16 index, u16 *pkey)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_core_dev *mdev;
+ bool put_mdev = true;
+ u32 mdev_port_num;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
+ if (!mdev) {
+ /* The port isn't affiliated yet, get the PKey from the master
+ * port. For RoCE the PKey tables will be the same.
+ */
+ put_mdev = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
+ err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
+ index, pkey);
+ if (put_mdev)
+ mlx5_ib_put_native_port_mdev(dev, port);
+
+ return err;
+}
+
+static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ switch (mlx5_get_vport_access_method(ibdev)) {
+ case MLX5_VPORT_ACCESS_METHOD_MAD:
+ return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
+
+ case MLX5_VPORT_ACCESS_METHOD_HCA:
+ case MLX5_VPORT_ACCESS_METHOD_NIC:
+ return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
+ struct ib_device_modify *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_reg_node_desc in;
+ struct mlx5_reg_node_desc out;
+ int err;
+
+ if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
+ return 0;
+
+ /*
+ * If possible, pass node desc to FW, so it can generate
+ * a 144 trap. If cmd fails, just ignore.
+ */
+ memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
+ err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
+ sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
+ if (err)
+ return err;
+
+ memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
+
+ return err;
+}
+
+static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
+ u32 value)
+{
+ struct mlx5_hca_vport_context ctx = {};
+ struct mlx5_core_dev *mdev;
+ u32 mdev_port_num;
+ int err;
+
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev)
+ return -ENODEV;
+
+ err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
+ if (err)
+ goto out;
+
+ if (~ctx.cap_mask1_perm & mask) {
+ mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n",
+ mask, ctx.cap_mask1_perm);
+ err = -EINVAL;
+ goto out;
+ }
+
+ ctx.cap_mask1 = value;
+ ctx.cap_mask1_perm = mask;
+ err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
+ 0, &ctx);
+
+out:
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+
+ return err;
+}
+
+static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask,
+ struct ib_port_modify *props)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct ib_port_attr attr;
+ u32 tmp;
+ int err;
+ u32 change_mask;
+ u32 value;
+ bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) ==
+ IB_LINK_LAYER_INFINIBAND);
+
+ /* CM layer calls ib_modify_port() regardless of the link layer. For
+ * Ethernet ports, qkey violation and Port capabilities are meaningless.
+ */
+ if (!is_ib)
+ return 0;
+
+ if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
+ change_mask = props->clr_port_cap_mask | props->set_port_cap_mask;
+ value = ~props->clr_port_cap_mask | props->set_port_cap_mask;
+ return set_port_caps_atomic(dev, port, change_mask, value);
+ }
+
+ mutex_lock(&dev->cap_mask_mutex);
+
+ err = ib_query_port(ibdev, port, &attr);
+ if (err)
+ goto out;
+
+ tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
+ ~props->clr_port_cap_mask;
+
+ err = mlx5_set_port_caps(dev->mdev, port, tmp);
+
+out:
+ mutex_unlock(&dev->cap_mask_mutex);
+ return err;
+}
+
+static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps)
+{
+ mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n",
+ caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n");
+}
+
+static u16 calc_dynamic_bfregs(int uars_per_sys_page)
+{
+ /* Large page with non 4k uar support might limit the dynamic size */
+ if (uars_per_sys_page == 1 && PAGE_SIZE > 4096)
+ return MLX5_MIN_DYN_BFREGS;
+
+ return MLX5_MAX_DYN_BFREGS;
+}
+
+static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k,
+ struct mlx5_ib_alloc_ucontext_req_v2 *req,
+ struct mlx5_bfreg_info *bfregi)
+{
+ int uars_per_sys_page;
+ int bfregs_per_sys_page;
+ int ref_bfregs = req->total_num_bfregs;
+
+ if (req->total_num_bfregs == 0)
+ return -EINVAL;
+
+ BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE);
+ BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE);
+
+ if (req->total_num_bfregs > MLX5_MAX_BFREGS)
+ return -ENOMEM;
+
+ uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k);
+ bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR;
+ /* This holds the required static allocation asked by the user */
+ req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page);
+ if (req->num_low_latency_bfregs > req->total_num_bfregs - 1)
+ return -EINVAL;
+
+ bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page;
+ bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page);
+ bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs;
+ bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page;
+
+ mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n",
+ MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
+ lib_uar_4k ? "yes" : "no", ref_bfregs,
+ req->total_num_bfregs, bfregi->total_num_bfregs,
+ bfregi->num_sys_pages);
+
+ return 0;
+}
+
+static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+{
+ struct mlx5_bfreg_info *bfregi;
+ int err;
+ int i;
+
+ bfregi = &context->bfregi;
+ for (i = 0; i < bfregi->num_static_sys_pages; i++) {
+ err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
+ context->devx_uid);
+ if (err)
+ goto error;
+
+ mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]);
+ }
+
+ for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++)
+ bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX;
+
+ return 0;
+
+error:
+ for (--i; i >= 0; i--)
+ if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
+ context->devx_uid))
+ mlx5_ib_warn(dev, "failed to free uar %d\n", i);
+
+ return err;
+}
+
+static void deallocate_uars(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_ucontext *context)
+{
+ struct mlx5_bfreg_info *bfregi;
+ int i;
+
+ bfregi = &context->bfregi;
+ for (i = 0; i < bfregi->num_sys_pages; i++)
+ if (i < bfregi->num_static_sys_pages ||
+ bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
+ mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
+ context->devx_uid);
+}
+
+int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+{
+ int err = 0;
+
+ mutex_lock(&dev->lb.mutex);
+ if (td)
+ dev->lb.user_td++;
+ if (qp)
+ dev->lb.qps++;
+
+ if (dev->lb.user_td == 2 ||
+ dev->lb.qps == 1) {
+ if (!dev->lb.enabled) {
+ err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
+ dev->lb.enabled = true;
+ }
+ }
+
+ mutex_unlock(&dev->lb.mutex);
+
+ return err;
+}
+
+void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+{
+ mutex_lock(&dev->lb.mutex);
+ if (td)
+ dev->lb.user_td--;
+ if (qp)
+ dev->lb.qps--;
+
+ if (dev->lb.user_td == 1 &&
+ dev->lb.qps == 0) {
+ if (dev->lb.enabled) {
+ mlx5_nic_vport_update_local_lb(dev->mdev, false);
+ dev->lb.enabled = false;
+ }
+ }
+
+ mutex_unlock(&dev->lb.mutex);
+}
+
+static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
+ u16 uid)
+{
+ int err;
+
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
+ return 0;
+
+ err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
+ if (err)
+ return err;
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
+ (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
+ return err;
+
+ return mlx5_ib_enable_lb(dev, true, false);
+}
+
+static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
+ u16 uid)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
+ return;
+
+ mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
+ (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
+ !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
+ return;
+
+ mlx5_ib_disable_lb(dev, true, false);
+}
+
+static int set_ucontext_resp(struct ib_ucontext *uctx,
+ struct mlx5_ib_alloc_ucontext_resp *resp)
+{
+ struct ib_device *ibdev = uctx->device;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_ucontext *context = to_mucontext(uctx);
+ struct mlx5_bfreg_info *bfregi = &context->bfregi;
+ int err;
+
+ if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
+ err = mlx5_cmd_dump_fill_mkey(dev->mdev,
+ &resp->dump_fill_mkey);
+ if (err)
+ return err;
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
+ }
+
+ resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
+ if (dev->wc_support)
+ resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_bf_reg_size);
+ resp->cache_line_size = cache_line_size();
+ resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
+ resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
+ resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+ resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
+ resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
+ resp->cqe_version = context->cqe_version;
+ resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT;
+ resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_CAP_GEN(dev->mdev,
+ num_of_uars_per_page) : 1;
+ resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 :
+ bfregi->total_num_bfregs - bfregi->num_dyn_bfregs;
+ resp->num_ports = dev->num_ports;
+ resp->cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE |
+ MLX5_USER_CMDS_SUPP_UHW_CREATE_AH;
+
+ if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
+ mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline);
+ resp->eth_min_inline++;
+ }
+
+ if (dev->mdev->clock_info)
+ resp->clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1);
+
+ /*
+ * We don't want to expose information from the PCI bar that is located
+ * after 4096 bytes, so if the arch only supports larger pages, let's
+ * pretend we don't support reading the HCA's core clock. This is also
+ * forced by mmap function.
+ */
+ if (PAGE_SIZE <= 4096) {
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET;
+ resp->hca_core_clock_offset =
+ offsetof(struct mlx5_init_seg,
+ internal_timer_h) % PAGE_SIZE;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE;
+
+ if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) &&
+ rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) &&
+ rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format)))
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS;
+
+ resp->num_dyn_bfregs = bfregi->num_dyn_bfregs;
+
+ if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
+ resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS;
+
+ resp->comp_mask |=
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG;
+
+ return 0;
+}
+
+static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ struct ib_device *ibdev = uctx->device;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ struct mlx5_ib_alloc_ucontext_req_v2 req = {};
+ struct mlx5_ib_alloc_ucontext_resp resp = {};
+ struct mlx5_ib_ucontext *context = to_mucontext(uctx);
+ struct mlx5_bfreg_info *bfregi;
+ int ver;
+ int err;
+ size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
+ max_cqe_version);
+ bool lib_uar_4k;
+ bool lib_uar_dyn;
+
+ if (!dev->ib_active)
+ return -EAGAIN;
+
+ if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
+ ver = 0;
+ else if (udata->inlen >= min_req_v2)
+ ver = 2;
+ else
+ return -EINVAL;
+
+ err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
+ if (err)
+ return err;
+
+ if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX)
+ return -EOPNOTSUPP;
+
+ if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
+ return -EOPNOTSUPP;
+
+ req.total_num_bfregs = ALIGN(req.total_num_bfregs,
+ MLX5_NON_FP_BFREGS_PER_UAR);
+ if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
+ return -EINVAL;
+
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
+ err = mlx5_ib_devx_create(dev, true);
+ if (err < 0)
+ goto out_ctx;
+ context->devx_uid = err;
+ }
+
+ lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
+ lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
+ bfregi = &context->bfregi;
+
+ if (lib_uar_dyn) {
+ bfregi->lib_uar_dyn = lib_uar_dyn;
+ goto uar_done;
+ }
+
+ /* updates req->total_num_bfregs */
+ err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
+ if (err)
+ goto out_devx;
+
+ mutex_init(&bfregi->lock);
+ bfregi->lib_uar_4k = lib_uar_4k;
+ bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count),
+ GFP_KERNEL);
+ if (!bfregi->count) {
+ err = -ENOMEM;
+ goto out_devx;
+ }
+
+ bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
+ sizeof(*bfregi->sys_pages),
+ GFP_KERNEL);
+ if (!bfregi->sys_pages) {
+ err = -ENOMEM;
+ goto out_count;
+ }
+
+ err = allocate_uars(dev, context);
+ if (err)
+ goto out_sys_pages;
+
+uar_done:
+ err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
+ context->devx_uid);
+ if (err)
+ goto out_uars;
+
+ INIT_LIST_HEAD(&context->db_page_list);
+ mutex_init(&context->db_page_mutex);
+
+ context->cqe_version = min_t(__u8,
+ (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
+ req.max_cqe_version);
+
+ err = set_ucontext_resp(uctx, &resp);
+ if (err)
+ goto out_mdev;
+
+ resp.response_length = min(udata->outlen, sizeof(resp));
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto out_mdev;
+
+ bfregi->ver = ver;
+ bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs;
+ context->lib_caps = req.lib_caps;
+ print_lib_caps(dev, context->lib_caps);
+
+ if (mlx5_ib_lag_should_assign_affinity(dev)) {
+ u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
+
+ atomic_set(&context->tx_port_affinity,
+ atomic_add_return(
+ 1, &dev->port[port].roce.tx_port_affinity));
+ }
+
+ return 0;
+
+out_mdev:
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+
+out_uars:
+ deallocate_uars(dev, context);
+
+out_sys_pages:
+ kfree(bfregi->sys_pages);
+
+out_count:
+ kfree(bfregi->count);
+
+out_devx:
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
+
+out_ctx:
+ return err;
+}
+
+static int mlx5_ib_query_ucontext(struct ib_ucontext *ibcontext,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_alloc_ucontext_resp uctx_resp = {};
+ int ret;
+
+ ret = set_ucontext_resp(ibcontext, &uctx_resp);
+ if (ret)
+ return ret;
+
+ uctx_resp.response_length =
+ min_t(size_t,
+ uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX),
+ sizeof(uctx_resp));
+
+ ret = uverbs_copy_to_struct_or_zero(attrs,
+ MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
+ &uctx_resp,
+ sizeof(uctx_resp));
+ return ret;
+}
+
+static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+ struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
+ struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
+ struct mlx5_bfreg_info *bfregi;
+
+ bfregi = &context->bfregi;
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+
+ deallocate_uars(dev, context);
+ kfree(bfregi->sys_pages);
+ kfree(bfregi->count);
+
+ if (context->devx_uid)
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
+}
+
+static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
+ int uar_idx)
+{
+ int fw_uars_per_page;
+
+ fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
+
+ return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
+}
+
+static u64 uar_index2paddress(struct mlx5_ib_dev *dev,
+ int uar_idx)
+{
+ unsigned int fw_uars_per_page;
+
+ fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_UARS_IN_PAGE : 1;
+
+ return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
+}
+
+static int get_command(unsigned long offset)
+{
+ return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
+}
+
+static int get_arg(unsigned long offset)
+{
+ return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
+}
+
+static int get_index(unsigned long offset)
+{
+ return get_arg(offset);
+}
+
+/* Index resides in an extra byte to enable larger values than 255 */
+static int get_extended_index(unsigned long offset)
+{
+ return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
+}
+
+
+static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+}
+
+static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
+{
+ switch (cmd) {
+ case MLX5_IB_MMAP_WC_PAGE:
+ return "WC";
+ case MLX5_IB_MMAP_REGULAR_PAGE:
+ return "best effort WC";
+ case MLX5_IB_MMAP_NC_PAGE:
+ return "NC";
+ case MLX5_IB_MMAP_DEVICE_MEM:
+ return "Device Memory";
+ default:
+ return "Unknown";
+ }
+}
+
+static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
+ struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *context)
+{
+ if ((vma->vm_end - vma->vm_start != PAGE_SIZE) ||
+ !(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1)
+ return -EOPNOTSUPP;
+
+ if (vma->vm_flags & (VM_WRITE | VM_EXEC))
+ return -EPERM;
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ if (!dev->mdev->clock_info)
+ return -EOPNOTSUPP;
+
+ return vm_insert_page(vma, vma->vm_start,
+ virt_to_page(dev->mdev->clock_info));
+}
+
+static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
+{
+ struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
+ struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
+ struct mlx5_var_table *var_table = &dev->var_table;
+ struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext);
+
+ switch (mentry->mmap_flag) {
+ case MLX5_IB_MMAP_TYPE_MEMIC:
+ case MLX5_IB_MMAP_TYPE_MEMIC_OP:
+ mlx5_ib_dm_mmap_free(dev, mentry);
+ break;
+ case MLX5_IB_MMAP_TYPE_VAR:
+ mutex_lock(&var_table->bitmap_lock);
+ clear_bit(mentry->page_idx, var_table->bitmap);
+ mutex_unlock(&var_table->bitmap_lock);
+ kfree(mentry);
+ break;
+ case MLX5_IB_MMAP_TYPE_UAR_WC:
+ case MLX5_IB_MMAP_TYPE_UAR_NC:
+ mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
+ context->devx_uid);
+ kfree(mentry);
+ break;
+ default:
+ WARN_ON(true);
+ }
+}
+
+static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
+ struct vm_area_struct *vma,
+ struct mlx5_ib_ucontext *context)
+{
+ struct mlx5_bfreg_info *bfregi = &context->bfregi;
+ int err;
+ unsigned long idx;
+ phys_addr_t pfn;
+ pgprot_t prot;
+ u32 bfreg_dyn_idx = 0;
+ u32 uar_index;
+ int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC);
+ int max_valid_idx = dyn_uar ? bfregi->num_sys_pages :
+ bfregi->num_static_sys_pages;
+
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
+
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (dyn_uar)
+ idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages;
+ else
+ idx = get_index(vma->vm_pgoff);
+
+ if (idx >= max_valid_idx) {
+ mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n",
+ idx, max_valid_idx);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
+ case MLX5_IB_MMAP_REGULAR_PAGE:
+ /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */
+ prot = pgprot_writecombine(vma->vm_page_prot);
+ break;
+ case MLX5_IB_MMAP_NC_PAGE:
+ prot = pgprot_noncached(vma->vm_page_prot);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dyn_uar) {
+ int uars_per_page;
+
+ uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k);
+ bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR);
+ if (bfreg_dyn_idx >= bfregi->total_num_bfregs) {
+ mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n",
+ bfreg_dyn_idx, bfregi->total_num_bfregs);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bfregi->lock);
+ /* Fail if uar already allocated, first bfreg index of each
+ * page holds its count.
+ */
+ if (bfregi->count[bfreg_dyn_idx]) {
+ mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx);
+ mutex_unlock(&bfregi->lock);
+ return -EINVAL;
+ }
+
+ bfregi->count[bfreg_dyn_idx]++;
+ mutex_unlock(&bfregi->lock);
+
+ err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
+ context->devx_uid);
+ if (err) {
+ mlx5_ib_warn(dev, "UAR alloc failed\n");
+ goto free_bfreg;
+ }
+ } else {
+ uar_index = bfregi->sys_pages[idx];
+ }
+
+ pfn = uar_index2pfn(dev, uar_index);
+ mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
+
+ err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
+ prot, NULL);
+ if (err) {
+ mlx5_ib_err(dev,
+ "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
+ err, mmap_cmd2str(cmd));
+ goto err;
+ }
+
+ if (dyn_uar)
+ bfregi->sys_pages[idx] = uar_index;
+ return 0;
+
+err:
+ if (!dyn_uar)
+ return err;
+
+ mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);
+
+free_bfreg:
+ mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
+
+ return err;
+}
+
+static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma)
+{
+ unsigned long idx;
+ u8 command;
+
+ command = get_command(vma->vm_pgoff);
+ idx = get_extended_index(vma->vm_pgoff);
+
+ return (command << 16 | idx);
+}
+
+static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
+ struct vm_area_struct *vma,
+ struct ib_ucontext *ucontext)
+{
+ struct mlx5_user_mmap_entry *mentry;
+ struct rdma_user_mmap_entry *entry;
+ unsigned long pgoff;
+ pgprot_t prot;
+ phys_addr_t pfn;
+ int ret;
+
+ pgoff = mlx5_vma_to_pgoff(vma);
+ entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff);
+ if (!entry)
+ return -EINVAL;
+
+ mentry = to_mmmap(entry);
+ pfn = (mentry->address >> PAGE_SHIFT);
+ if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR ||
+ mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC)
+ prot = pgprot_noncached(vma->vm_page_prot);
+ else
+ prot = pgprot_writecombine(vma->vm_page_prot);
+ ret = rdma_user_mmap_io(ucontext, vma, pfn,
+ entry->npages * PAGE_SIZE,
+ prot,
+ entry);
+ rdma_user_mmap_entry_put(&mentry->rdma_entry);
+ return ret;
+}
+
+static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
+{
+ u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF;
+ u64 index = entry->rdma_entry.start_pgoff & 0xFFFF;
+
+ return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
+ (index & 0xFF)) << PAGE_SHIFT;
+}
+
+static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
+{
+ struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
+ struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
+ unsigned long command;
+ phys_addr_t pfn;
+
+ command = get_command(vma->vm_pgoff);
+ switch (command) {
+ case MLX5_IB_MMAP_WC_PAGE:
+ case MLX5_IB_MMAP_ALLOC_WC:
+ if (!dev->wc_support)
+ return -EPERM;
+ fallthrough;
+ case MLX5_IB_MMAP_NC_PAGE:
+ case MLX5_IB_MMAP_REGULAR_PAGE:
+ return uar_mmap(dev, command, vma, context);
+
+ case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
+ return -ENOSYS;
+
+ case MLX5_IB_MMAP_CORE_CLOCK:
+ if (vma->vm_end - vma->vm_start != PAGE_SIZE)
+ return -EINVAL;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ /* Don't expose to user-space information it shouldn't have */
+ if (PAGE_SIZE > 4096)
+ return -EOPNOTSUPP;
+
+ pfn = (dev->mdev->iseg_base +
+ offsetof(struct mlx5_init_seg, internal_timer_h)) >>
+ PAGE_SHIFT;
+ return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot),
+ NULL);
+ case MLX5_IB_MMAP_CLOCK_INFO:
+ return mlx5_ib_mmap_clock_info_page(dev, vma, context);
+
+ default:
+ return mlx5_ib_mmap_offset(dev, vma, ibcontext);
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct mlx5_ib_pd *pd = to_mpd(ibpd);
+ struct ib_device *ibdev = ibpd->device;
+ struct mlx5_ib_alloc_pd_resp resp;
+ int err;
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
+ u16 uid = 0;
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ uid = context ? context->devx_uid : 0;
+ MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+ MLX5_SET(alloc_pd_in, in, uid, uid);
+ err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
+ if (err)
+ return err;
+
+ pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
+ pd->uid = uid;
+ if (udata) {
+ resp.pdn = pd->pdn;
+ if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
+ mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *mdev = to_mdev(pd->device);
+ struct mlx5_ib_pd *mpd = to_mpd(pd);
+
+ return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
+}
+
+static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(ibqp);
+ int err;
+ u16 uid;
+
+ uid = ibqp->pd ?
+ to_mpd(ibqp->pd)->uid : 0;
+
+ if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
+ if (err)
+ mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
+ ibqp->qp_num, gid->raw);
+
+ return err;
+}
+
+static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ int err;
+ u16 uid;
+
+ uid = ibqp->pd ?
+ to_mpd(ibqp->pd)->uid : 0;
+ err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
+ if (err)
+ mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
+ ibqp->qp_num, gid->raw);
+
+ return err;
+}
+
+static int init_node_data(struct mlx5_ib_dev *dev)
+{
+ int err;
+
+ err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
+ if (err)
+ return err;
+
+ dev->mdev->rev_id = dev->mdev->pdev->revision;
+
+ return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
+}
+
+static ssize_t fw_pages_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlx5_ib_dev *dev =
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
+
+ return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages);
+}
+static DEVICE_ATTR_RO(fw_pages);
+
+static ssize_t reg_pages_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlx5_ib_dev *dev =
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
+
+ return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
+}
+static DEVICE_ATTR_RO(reg_pages);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlx5_ib_dev *dev =
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
+
+ return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlx5_ib_dev *dev =
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
+
+ return sysfs_emit(buf, "%x\n", dev->mdev->rev_id);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct mlx5_ib_dev *dev =
+ rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev);
+
+ return sysfs_emit(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
+ dev->mdev->board_id);
+}
+static DEVICE_ATTR_RO(board_id);
+
+static struct attribute *mlx5_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_fw_pages.attr,
+ &dev_attr_reg_pages.attr,
+ NULL,
+};
+
+static const struct attribute_group mlx5_attr_group = {
+ .attrs = mlx5_class_attributes,
+};
+
+static void pkey_change_handler(struct work_struct *work)
+{
+ struct mlx5_ib_port_resources *ports =
+ container_of(work, struct mlx5_ib_port_resources,
+ pkey_change_work);
+
+ if (!ports->gsi)
+ /*
+ * We got this event before device was fully configured
+ * and MAD registration code wasn't called/finished yet.
+ */
+ return;
+
+ mlx5_ib_gsi_pkey_change(ports->gsi);
+}
+
+static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev)
+{
+ struct mlx5_ib_qp *mqp;
+ struct mlx5_ib_cq *send_mcq, *recv_mcq;
+ struct mlx5_core_cq *mcq;
+ struct list_head cq_armed_list;
+ unsigned long flags_qp;
+ unsigned long flags_cq;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&cq_armed_list);
+
+ /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
+ spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
+ list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
+ spin_lock_irqsave(&mqp->sq.lock, flags_qp);
+ if (mqp->sq.tail != mqp->sq.head) {
+ send_mcq = to_mcq(mqp->ibqp.send_cq);
+ spin_lock_irqsave(&send_mcq->lock, flags_cq);
+ if (send_mcq->mcq.comp &&
+ mqp->ibqp.send_cq->comp_handler) {
+ if (!send_mcq->mcq.reset_notify_added) {
+ send_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&send_mcq->mcq.reset_notify,
+ &cq_armed_list);
+ }
+ }
+ spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
+ }
+ spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
+ spin_lock_irqsave(&mqp->rq.lock, flags_qp);
+ /* no handling is needed for SRQ */
+ if (!mqp->ibqp.srq) {
+ if (mqp->rq.tail != mqp->rq.head) {
+ recv_mcq = to_mcq(mqp->ibqp.recv_cq);
+ spin_lock_irqsave(&recv_mcq->lock, flags_cq);
+ if (recv_mcq->mcq.comp &&
+ mqp->ibqp.recv_cq->comp_handler) {
+ if (!recv_mcq->mcq.reset_notify_added) {
+ recv_mcq->mcq.reset_notify_added = 1;
+ list_add_tail(&recv_mcq->mcq.reset_notify,
+ &cq_armed_list);
+ }
+ }
+ spin_unlock_irqrestore(&recv_mcq->lock,
+ flags_cq);
+ }
+ }
+ spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
+ }
+ /*At that point all inflight post send were put to be executed as of we
+ * lock/unlock above locks Now need to arm all involved CQs.
+ */
+ list_for_each_entry(mcq, &cq_armed_list, reset_notify) {
+ mcq->comp(mcq, NULL);
+ }
+ spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
+}
+
+static void delay_drop_handler(struct work_struct *work)
+{
+ int err;
+ struct mlx5_ib_delay_drop *delay_drop =
+ container_of(work, struct mlx5_ib_delay_drop,
+ delay_drop_work);
+
+ atomic_inc(&delay_drop->events_cnt);
+
+ mutex_lock(&delay_drop->lock);
+ err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
+ if (err) {
+ mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
+ delay_drop->timeout);
+ delay_drop->activate = false;
+ }
+ mutex_unlock(&delay_drop->lock);
+}
+
+static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
+ struct ib_event *ibev)
+{
+ u32 port = (eqe->data.port.port >> 4) & 0xf;
+
+ switch (eqe->sub_type) {
+ case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
+ if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET)
+ schedule_work(&ibdev->delay_drop.delay_drop_work);
+ break;
+ default: /* do nothing */
+ return;
+ }
+}
+
+static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
+ struct ib_event *ibev)
+{
+ u32 port = (eqe->data.port.port >> 4) & 0xf;
+
+ ibev->element.port_num = port;
+
+ switch (eqe->sub_type) {
+ case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
+ /* In RoCE, port up/down events are handled in
+ * mlx5_netdev_event().
+ */
+ if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET)
+ return -EINVAL;
+
+ ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ?
+ IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+ break;
+
+ case MLX5_PORT_CHANGE_SUBTYPE_LID:
+ ibev->event = IB_EVENT_LID_CHANGE;
+ break;
+
+ case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
+ ibev->event = IB_EVENT_PKEY_CHANGE;
+ schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
+ break;
+
+ case MLX5_PORT_CHANGE_SUBTYPE_GUID:
+ ibev->event = IB_EVENT_GID_CHANGE;
+ break;
+
+ case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
+ ibev->event = IB_EVENT_CLIENT_REREGISTER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mlx5_ib_handle_event(struct work_struct *_work)
+{
+ struct mlx5_ib_event_work *work =
+ container_of(_work, struct mlx5_ib_event_work, work);
+ struct mlx5_ib_dev *ibdev;
+ struct ib_event ibev;
+ bool fatal = false;
+
+ if (work->is_slave) {
+ ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi);
+ if (!ibdev)
+ goto out;
+ } else {
+ ibdev = work->dev;
+ }
+
+ switch (work->event) {
+ case MLX5_DEV_EVENT_SYS_ERROR:
+ ibev.event = IB_EVENT_DEVICE_FATAL;
+ mlx5_ib_handle_internal_error(ibdev);
+ ibev.element.port_num = (u8)(unsigned long)work->param;
+ fatal = true;
+ break;
+ case MLX5_EVENT_TYPE_PORT_CHANGE:
+ if (handle_port_change(ibdev, work->param, &ibev))
+ goto out;
+ break;
+ case MLX5_EVENT_TYPE_GENERAL_EVENT:
+ handle_general_event(ibdev, work->param, &ibev);
+ fallthrough;
+ default:
+ goto out;
+ }
+
+ ibev.device = &ibdev->ib_dev;
+
+ if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
+ mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num);
+ goto out;
+ }
+
+ if (ibdev->ib_active)
+ ib_dispatch_event(&ibev);
+
+ if (fatal)
+ ibdev->ib_active = false;
+out:
+ kfree(work);
+}
+
+static int mlx5_ib_event(struct notifier_block *nb,
+ unsigned long event, void *param)
+{
+ struct mlx5_ib_event_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return NOTIFY_DONE;
+
+ INIT_WORK(&work->work, mlx5_ib_handle_event);
+ work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events);
+ work->is_slave = false;
+ work->param = param;
+ work->event = event;
+
+ queue_work(mlx5_ib_event_wq, &work->work);
+
+ return NOTIFY_OK;
+}
+
+static int mlx5_ib_event_slave_port(struct notifier_block *nb,
+ unsigned long event, void *param)
+{
+ struct mlx5_ib_event_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return NOTIFY_DONE;
+
+ INIT_WORK(&work->work, mlx5_ib_handle_event);
+ work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events);
+ work->is_slave = true;
+ work->param = param;
+ work->event = event;
+ queue_work(mlx5_ib_event_wq, &work->work);
+
+ return NOTIFY_OK;
+}
+
+static int set_has_smi_cap(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_hca_vport_context vport_ctx;
+ int err;
+ int port;
+
+ if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
+ return 0;
+
+ for (port = 1; port <= dev->num_ports; port++) {
+ if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
+ dev->port_caps[port - 1].has_smi = true;
+ continue;
+ }
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
+ &vport_ctx);
+ if (err) {
+ mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n",
+ port, err);
+ return err;
+ }
+ dev->port_caps[port - 1].has_smi = vport_ctx.has_smi;
+ }
+
+ return 0;
+}
+
+static void get_ext_port_caps(struct mlx5_ib_dev *dev)
+{
+ unsigned int port;
+
+ rdma_for_each_port (&dev->ib_dev, port)
+ mlx5_query_ext_port_caps(dev, port);
+}
+
+static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
+{
+ switch (umr_fence_cap) {
+ case MLX5_CAP_UMR_FENCE_NONE:
+ return MLX5_FENCE_MODE_NONE;
+ case MLX5_CAP_UMR_FENCE_SMALL:
+ return MLX5_FENCE_MODE_INITIATOR_SMALL;
+ default:
+ return MLX5_FENCE_MODE_STRONG_ORDERING;
+ }
+}
+
+static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_resources *devr = &dev->devr;
+ struct ib_srq_init_attr attr;
+ struct ib_device *ibdev;
+ struct ib_cq_init_attr cq_attr = {.cqe = 1};
+ int port;
+ int ret = 0;
+
+ ibdev = &dev->ib_dev;
+
+ if (!MLX5_CAP_GEN(dev->mdev, xrc))
+ return -EOPNOTSUPP;
+
+ devr->p0 = ib_alloc_pd(ibdev, 0);
+ if (IS_ERR(devr->p0))
+ return PTR_ERR(devr->p0);
+
+ devr->c0 = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
+ if (IS_ERR(devr->c0)) {
+ ret = PTR_ERR(devr->c0);
+ goto error1;
+ }
+
+ ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
+ if (ret)
+ goto error2;
+
+ ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
+ if (ret)
+ goto error3;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.attr.max_sge = 1;
+ attr.attr.max_wr = 1;
+ attr.srq_type = IB_SRQT_XRC;
+ attr.ext.cq = devr->c0;
+
+ devr->s0 = ib_create_srq(devr->p0, &attr);
+ if (IS_ERR(devr->s0)) {
+ ret = PTR_ERR(devr->s0);
+ goto err_create;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ attr.attr.max_sge = 1;
+ attr.attr.max_wr = 1;
+ attr.srq_type = IB_SRQT_BASIC;
+
+ devr->s1 = ib_create_srq(devr->p0, &attr);
+ if (IS_ERR(devr->s1)) {
+ ret = PTR_ERR(devr->s1);
+ goto error6;
+ }
+
+ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
+ INIT_WORK(&devr->ports[port].pkey_change_work,
+ pkey_change_handler);
+
+ return 0;
+
+error6:
+ ib_destroy_srq(devr->s0);
+err_create:
+ mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
+error3:
+ mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
+error2:
+ ib_destroy_cq(devr->c0);
+error1:
+ ib_dealloc_pd(devr->p0);
+ return ret;
+}
+
+static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int port;
+
+ /*
+ * Make sure no change P_Key work items are still executing.
+ *
+ * At this stage, the mlx5_ib_event should be unregistered
+ * and it ensures that no new works are added.
+ */
+ for (port = 0; port < ARRAY_SIZE(devr->ports); ++port)
+ cancel_work_sync(&devr->ports[port].pkey_change_work);
+
+ ib_destroy_srq(devr->s1);
+ ib_destroy_srq(devr->s0);
+ mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
+ mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
+ ib_destroy_cq(devr->c0);
+ ib_dealloc_pd(devr->p0);
+}
+
+static u32 get_core_cap_flags(struct ib_device *ibdev,
+ struct mlx5_hca_vport_context *rep)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1);
+ u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
+ u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
+ bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
+ u32 ret = 0;
+
+ if (rep->grh_required)
+ ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
+
+ if (ll == IB_LINK_LAYER_INFINIBAND)
+ return ret | RDMA_CORE_PORT_IBA_IB;
+
+ if (raw_support)
+ ret |= RDMA_CORE_PORT_RAW_PACKET;
+
+ if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP))
+ return ret;
+
+ if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP))
+ return ret;
+
+ if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP)
+ ret |= RDMA_CORE_PORT_IBA_ROCE;
+
+ if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP)
+ ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ return ret;
+}
+
+static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
+ struct mlx5_hca_vport_context rep = {0};
+ int err;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ if (ll == IB_LINK_LAYER_INFINIBAND) {
+ err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
+ &rep);
+ if (err)
+ return err;
+ }
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
+
+ return 0;
+}
+
+static void get_dev_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct mlx5_ib_dev *dev =
+ container_of(ibdev, struct mlx5_ib_dev, ib_dev);
+ snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d",
+ fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
+ fw_rev_sub(dev->mdev));
+}
+
+static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
+ MLX5_FLOW_NAMESPACE_LAG);
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (!ns || !mlx5_lag_is_active(mdev))
+ return 0;
+
+ err = mlx5_cmd_create_vport_lag(mdev);
+ if (err)
+ return err;
+
+ ft = mlx5_create_lag_demux_flow_table(ns, 0, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_destroy_vport_lag;
+ }
+
+ dev->flow_db->lag_demux_ft = ft;
+ dev->lag_ports = mlx5_lag_get_num_ports(mdev);
+ dev->lag_active = true;
+ return 0;
+
+err_destroy_vport_lag:
+ mlx5_cmd_destroy_vport_lag(mdev);
+ return err;
+}
+
+static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ if (dev->lag_active) {
+ dev->lag_active = false;
+
+ mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
+ dev->flow_db->lag_demux_ft = NULL;
+
+ mlx5_cmd_destroy_vport_lag(mdev);
+ }
+}
+
+static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ int err;
+
+ dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
+ if (err) {
+ dev->port[port_num].roce.nb.notifier_call = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ if (dev->port[port_num].roce.nb.notifier_call) {
+ unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
+ dev->port[port_num].roce.nb.notifier_call = NULL;
+ }
+}
+
+static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
+{
+ int err;
+
+ if (!dev->is_rep && dev->profile != &raw_eth_profile) {
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
+ }
+
+ err = mlx5_eth_lag_init(dev);
+ if (err)
+ goto err_disable_roce;
+
+ return 0;
+
+err_disable_roce:
+ if (!dev->is_rep && dev->profile != &raw_eth_profile)
+ mlx5_nic_vport_disable_roce(dev->mdev);
+
+ return err;
+}
+
+static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
+{
+ mlx5_eth_lag_cleanup(dev);
+ if (!dev->is_rep && dev->profile != &raw_eth_profile)
+ mlx5_nic_vport_disable_roce(dev->mdev);
+}
+
+static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
+ enum rdma_netdev_t type,
+ struct rdma_netdev_alloc_params *params)
+{
+ if (type != RDMA_NETDEV_IPOIB)
+ return -EOPNOTSUPP;
+
+ return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
+}
+
+static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
+ char lbuf[20];
+ int len;
+
+ len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout);
+ return simple_read_from_buffer(buf, count, pos, lbuf, len);
+}
+
+static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_ib_delay_drop *delay_drop = filp->private_data;
+ u32 timeout;
+ u32 var;
+
+ if (kstrtouint_from_user(buf, count, 0, &var))
+ return -EFAULT;
+
+ timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS *
+ 1000);
+ if (timeout != var)
+ mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n",
+ timeout);
+
+ delay_drop->timeout = timeout;
+
+ return count;
+}
+
+static const struct file_operations fops_delay_drop_timeout = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = delay_drop_timeout_write,
+ .read = delay_drop_timeout_read,
+};
+
+static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
+{
+ u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ struct mlx5_ib_port *port = &ibdev->port[port_num];
+ int comps;
+ int err;
+ int i;
+
+ lockdep_assert_held(&mlx5_ib_multiport_mutex);
+
+ mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
+
+ spin_lock(&port->mp.mpi_lock);
+ if (!mpi->ibdev) {
+ spin_unlock(&port->mp.mpi_lock);
+ return;
+ }
+
+ mpi->ibdev = NULL;
+
+ spin_unlock(&port->mp.mpi_lock);
+ if (mpi->mdev_events.notifier_call)
+ mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
+ mpi->mdev_events.notifier_call = NULL;
+ mlx5_remove_netdev_notifier(ibdev, port_num);
+ spin_lock(&port->mp.mpi_lock);
+
+ comps = mpi->mdev_refcnt;
+ if (comps) {
+ mpi->unaffiliate = true;
+ init_completion(&mpi->unref_comp);
+ spin_unlock(&port->mp.mpi_lock);
+
+ for (i = 0; i < comps; i++)
+ wait_for_completion(&mpi->unref_comp);
+
+ spin_lock(&port->mp.mpi_lock);
+ mpi->unaffiliate = false;
+ }
+
+ port->mp.mpi = NULL;
+
+ spin_unlock(&port->mp.mpi_lock);
+
+ err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
+
+ mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
+ /* Log an error, still needed to cleanup the pointers and add
+ * it back to the list.
+ */
+ if (err)
+ mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n",
+ port_num + 1);
+
+ ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
+}
+
+static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
+ struct mlx5_ib_multiport_info *mpi)
+{
+ u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
+ int err;
+
+ lockdep_assert_held(&mlx5_ib_multiport_mutex);
+
+ spin_lock(&ibdev->port[port_num].mp.mpi_lock);
+ if (ibdev->port[port_num].mp.mpi) {
+ mlx5_ib_dbg(ibdev, "port %u already affiliated.\n",
+ port_num + 1);
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+ return false;
+ }
+
+ ibdev->port[port_num].mp.mpi = mpi;
+ mpi->ibdev = ibdev;
+ mpi->mdev_events.notifier_call = NULL;
+ spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
+
+ err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
+ if (err)
+ goto unbind;
+
+ err = mlx5_add_netdev_notifier(ibdev, port_num);
+ if (err) {
+ mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
+ port_num + 1);
+ goto unbind;
+ }
+
+ mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
+ mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
+
+ mlx5_ib_init_cong_debugfs(ibdev, port_num);
+
+ return true;
+
+unbind:
+ mlx5_ib_unbind_slave_port(ibdev, mpi);
+ return false;
+}
+
+static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
+{
+ u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ struct mlx5_ib_multiport_info *mpi;
+ int err;
+ u32 i;
+
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return 0;
+
+ err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
+ &dev->sys_image_guid);
+ if (err)
+ return err;
+
+ err = mlx5_nic_vport_enable_roce(dev->mdev);
+ if (err)
+ return err;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ bool bound = false;
+
+ /* build a stub multiport info struct for the native port. */
+ if (i == port_num) {
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi) {
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ mlx5_nic_vport_disable_roce(dev->mdev);
+ return -ENOMEM;
+ }
+
+ mpi->is_master = true;
+ mpi->mdev = dev->mdev;
+ mpi->sys_image_guid = dev->sys_image_guid;
+ dev->port[i].mp.mpi = mpi;
+ mpi->ibdev = dev;
+ mpi = NULL;
+ continue;
+ }
+
+ list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
+ list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid &&
+ (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+ }
+
+ if (bound) {
+ dev_dbg(mpi->mdev->device,
+ "removing port from unaffiliated list.\n");
+ mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
+ list_del(&mpi->list);
+ break;
+ }
+ }
+ if (!bound)
+ mlx5_ib_dbg(dev, "no free port found for port %d\n",
+ i + 1);
+ }
+
+ list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ return err;
+}
+
+static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
+{
+ u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev,
+ port_num + 1);
+ u32 i;
+
+ if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
+ return;
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ for (i = 0; i < dev->num_ports; i++) {
+ if (dev->port[i].mp.mpi) {
+ /* Destroy the native port stub */
+ if (i == port_num) {
+ kfree(dev->port[i].mp.mpi);
+ dev->port[i].mp.mpi = NULL;
+ } else {
+ mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
+ i + 1);
+ list_add_tail(&dev->port[i].mp.mpi->list,
+ &mlx5_ib_unaffiliated_port_list);
+ mlx5_ib_unbind_slave_port(dev,
+ dev->port[i].mp.mpi);
+ }
+ }
+ }
+
+ mlx5_ib_dbg(dev, "removing from devlist\n");
+ list_del(&dev->ib_dev_list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+
+ mlx5_nic_vport_disable_roce(dev->mdev);
+}
+
+static int mmap_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_user_mmap_entry *obj = uobject->object;
+
+ rdma_user_mmap_entry_remove(&obj->rdma_entry);
+ return 0;
+}
+
+static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c,
+ struct mlx5_user_mmap_entry *entry,
+ size_t length)
+{
+ return rdma_user_mmap_entry_insert_range(
+ &c->ibucontext, &entry->rdma_entry, length,
+ (MLX5_IB_MMAP_OFFSET_START << 16),
+ ((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1));
+}
+
+static struct mlx5_user_mmap_entry *
+alloc_var_entry(struct mlx5_ib_ucontext *c)
+{
+ struct mlx5_user_mmap_entry *entry;
+ struct mlx5_var_table *var_table;
+ u32 page_idx;
+ int err;
+
+ var_table = &to_mdev(c->ibucontext.device)->var_table;
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&var_table->bitmap_lock);
+ page_idx = find_first_zero_bit(var_table->bitmap,
+ var_table->num_var_hw_entries);
+ if (page_idx >= var_table->num_var_hw_entries) {
+ err = -ENOSPC;
+ mutex_unlock(&var_table->bitmap_lock);
+ goto end;
+ }
+
+ set_bit(page_idx, var_table->bitmap);
+ mutex_unlock(&var_table->bitmap_lock);
+
+ entry->address = var_table->hw_start_addr +
+ (page_idx * var_table->stride_size);
+ entry->page_idx = page_idx;
+ entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR;
+
+ err = mlx5_rdma_user_mmap_entry_insert(c, entry,
+ var_table->stride_size);
+ if (err)
+ goto err_insert;
+
+ return entry;
+
+err_insert:
+ mutex_lock(&var_table->bitmap_lock);
+ clear_bit(page_idx, var_table->bitmap);
+ mutex_unlock(&var_table->bitmap_lock);
+end:
+ kfree(entry);
+ return ERR_PTR(err);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_user_mmap_entry *entry;
+ u64 mmap_offset;
+ u32 length;
+ int err;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ entry = alloc_var_entry(c);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+
+ mmap_offset = mlx5_entry_to_mmap_offset(entry);
+ length = entry->rdma_entry.npages * PAGE_SIZE;
+ uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+ &mmap_offset, sizeof(mmap_offset));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+ &entry->page_idx, sizeof(entry->page_idx));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+ &length, sizeof(length));
+ return err;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_VAR_OBJ_ALLOC,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_VAR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_VAR_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_VAR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR,
+ UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC),
+ &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY));
+
+static bool var_is_supported(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q);
+}
+
+static struct mlx5_user_mmap_entry *
+alloc_uar_entry(struct mlx5_ib_ucontext *c,
+ enum mlx5_ib_uapi_uar_alloc_type alloc_type)
+{
+ struct mlx5_user_mmap_entry *entry;
+ struct mlx5_ib_dev *dev;
+ u32 uar_index;
+ int err;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ dev = to_mdev(c->ibucontext.device);
+ err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
+ if (err)
+ goto end;
+
+ entry->page_idx = uar_index;
+ entry->address = uar_index2paddress(dev, uar_index);
+ if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
+ entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC;
+ else
+ entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC;
+
+ err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE);
+ if (err)
+ goto err_insert;
+
+ return entry;
+
+err_insert:
+ mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
+end:
+ kfree(entry);
+ return ERR_PTR(err);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
+ enum mlx5_ib_uapi_uar_alloc_type alloc_type;
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_user_mmap_entry *entry;
+ u64 mmap_offset;
+ u32 length;
+ int err;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ err = uverbs_get_const(&alloc_type, attrs,
+ MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE);
+ if (err)
+ return err;
+
+ if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF &&
+ alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC)
+ return -EOPNOTSUPP;
+
+ if (!to_mdev(c->ibucontext.device)->wc_support &&
+ alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF)
+ return -EOPNOTSUPP;
+
+ entry = alloc_uar_entry(c, alloc_type);
+ if (IS_ERR(entry))
+ return PTR_ERR(entry);
+
+ mmap_offset = mlx5_entry_to_mmap_offset(entry);
+ length = entry->rdma_entry.npages * PAGE_SIZE;
+ uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
+ &mmap_offset, sizeof(mmap_offset));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
+ &entry->page_idx, sizeof(entry->page_idx));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
+ &length, sizeof(length));
+ return err;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_UAR_OBJ_ALLOC,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_UAR,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE,
+ enum mlx5_ib_uapi_uar_alloc_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_UAR_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_UAR,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR,
+ UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC),
+ &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY));
+
+ADD_UVERBS_ATTRIBUTES_SIMPLE(
+ mlx5_ib_query_context,
+ UVERBS_OBJECT_DEVICE,
+ UVERBS_METHOD_QUERY_CONTEXT,
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX,
+ UVERBS_ATTR_STRUCT(struct mlx5_ib_alloc_ucontext_resp,
+ dump_fill_mkey),
+ UA_MANDATORY));
+
+static const struct uapi_definition mlx5_ib_defs[] = {
+ UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_qos_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_std_types_defs),
+ UAPI_DEF_CHAIN(mlx5_ib_dm_defs),
+
+ UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR,
+ UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR),
+ {}
+};
+
+static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_multiport_master(dev);
+ WARN_ON(!xa_empty(&dev->odp_mkeys));
+ mutex_destroy(&dev->cap_mask_mutex);
+ WARN_ON(!xa_empty(&dev->sig_mrs));
+ WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
+}
+
+static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int err;
+ int i;
+
+ dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
+ dev->ib_dev.phys_port_cnt = dev->num_ports;
+ dev->ib_dev.dev.parent = mdev->device;
+ dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ spin_lock_init(&dev->port[i].mp.mpi_lock);
+ rwlock_init(&dev->port[i].roce.netdev_lock);
+ dev->port[i].roce.dev = dev;
+ dev->port[i].roce.native_port_num = i + 1;
+ dev->port[i].roce.last_port_state = IB_PORT_DOWN;
+ }
+
+ err = mlx5_ib_init_multiport_master(dev);
+ if (err)
+ return err;
+
+ err = set_has_smi_cap(dev);
+ if (err)
+ goto err_mp;
+
+ err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len);
+ if (err)
+ goto err_mp;
+
+ if (mlx5_use_mad_ifc(dev))
+ get_ext_port_caps(dev);
+
+ dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
+
+ mutex_init(&dev->cap_mask_mutex);
+ INIT_LIST_HEAD(&dev->qp_list);
+ spin_lock_init(&dev->reset_flow_resource_lock);
+ xa_init(&dev->odp_mkeys);
+ xa_init(&dev->sig_mrs);
+ atomic_set(&dev->mkey_var, 0);
+
+ spin_lock_init(&dev->dm.lock);
+ dev->dm.dev = mdev;
+ return 0;
+
+err_mp:
+ mlx5_ib_cleanup_multiport_master(dev);
+ return err;
+}
+
+static int mlx5_ib_enable_driver(struct ib_device *dev)
+{
+ struct mlx5_ib_dev *mdev = to_mdev(dev);
+ int ret;
+
+ ret = mlx5_ib_test_wc(mdev);
+ mlx5_ib_dbg(mdev, "Write-Combining %s",
+ mdev->wc_support ? "supported" : "not supported");
+
+ return ret;
+}
+
+static const struct ib_device_ops mlx5_ib_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_MLX5,
+ .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION,
+
+ .add_gid = mlx5_ib_add_gid,
+ .alloc_mr = mlx5_ib_alloc_mr,
+ .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity,
+ .alloc_pd = mlx5_ib_alloc_pd,
+ .alloc_ucontext = mlx5_ib_alloc_ucontext,
+ .attach_mcast = mlx5_ib_mcg_attach,
+ .check_mr_status = mlx5_ib_check_mr_status,
+ .create_ah = mlx5_ib_create_ah,
+ .create_cq = mlx5_ib_create_cq,
+ .create_qp = mlx5_ib_create_qp,
+ .create_srq = mlx5_ib_create_srq,
+ .create_user_ah = mlx5_ib_create_ah,
+ .dealloc_pd = mlx5_ib_dealloc_pd,
+ .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
+ .del_gid = mlx5_ib_del_gid,
+ .dereg_mr = mlx5_ib_dereg_mr,
+ .destroy_ah = mlx5_ib_destroy_ah,
+ .destroy_cq = mlx5_ib_destroy_cq,
+ .destroy_qp = mlx5_ib_destroy_qp,
+ .destroy_srq = mlx5_ib_destroy_srq,
+ .detach_mcast = mlx5_ib_mcg_detach,
+ .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
+ .drain_rq = mlx5_ib_drain_rq,
+ .drain_sq = mlx5_ib_drain_sq,
+ .device_group = &mlx5_attr_group,
+ .enable_driver = mlx5_ib_enable_driver,
+ .get_dev_fw_str = get_dev_fw_str,
+ .get_dma_mr = mlx5_ib_get_dma_mr,
+ .get_link_layer = mlx5_ib_port_link_layer,
+ .map_mr_sg = mlx5_ib_map_mr_sg,
+ .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi,
+ .mmap = mlx5_ib_mmap,
+ .mmap_free = mlx5_ib_mmap_free,
+ .modify_cq = mlx5_ib_modify_cq,
+ .modify_device = mlx5_ib_modify_device,
+ .modify_port = mlx5_ib_modify_port,
+ .modify_qp = mlx5_ib_modify_qp,
+ .modify_srq = mlx5_ib_modify_srq,
+ .poll_cq = mlx5_ib_poll_cq,
+ .post_recv = mlx5_ib_post_recv_nodrain,
+ .post_send = mlx5_ib_post_send_nodrain,
+ .post_srq_recv = mlx5_ib_post_srq_recv,
+ .process_mad = mlx5_ib_process_mad,
+ .query_ah = mlx5_ib_query_ah,
+ .query_device = mlx5_ib_query_device,
+ .query_gid = mlx5_ib_query_gid,
+ .query_pkey = mlx5_ib_query_pkey,
+ .query_qp = mlx5_ib_query_qp,
+ .query_srq = mlx5_ib_query_srq,
+ .query_ucontext = mlx5_ib_query_ucontext,
+ .reg_user_mr = mlx5_ib_reg_user_mr,
+ .reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf,
+ .req_notify_cq = mlx5_ib_arm_cq,
+ .rereg_user_mr = mlx5_ib_rereg_user_mr,
+ .resize_cq = mlx5_ib_resize_cq,
+
+ INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs),
+ INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq),
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext),
+};
+
+static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
+ .rdma_netdev_get_params = mlx5_ib_rn_get_params,
+};
+
+static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
+ .get_vf_config = mlx5_ib_get_vf_config,
+ .get_vf_guid = mlx5_ib_get_vf_guid,
+ .get_vf_stats = mlx5_ib_get_vf_stats,
+ .set_vf_guid = mlx5_ib_set_vf_guid,
+ .set_vf_link_state = mlx5_ib_set_vf_link_state,
+};
+
+static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
+ .alloc_mw = mlx5_ib_alloc_mw,
+ .dealloc_mw = mlx5_ib_dealloc_mw,
+
+ INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw),
+};
+
+static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
+ .alloc_xrcd = mlx5_ib_alloc_xrcd,
+ .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
+
+ INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd),
+};
+
+static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_var_table *var_table = &dev->var_table;
+ u8 log_doorbell_bar_size;
+ u8 log_doorbell_stride;
+ u64 bar_size;
+
+ log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
+ log_doorbell_bar_size);
+ log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
+ log_doorbell_stride);
+ var_table->hw_start_addr = dev->mdev->bar_addr +
+ MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
+ doorbell_bar_offset);
+ bar_size = (1ULL << log_doorbell_bar_size) * 4096;
+ var_table->stride_size = 1ULL << log_doorbell_stride;
+ var_table->num_var_hw_entries = div_u64(bar_size,
+ var_table->stride_size);
+ mutex_init(&var_table->bitmap_lock);
+ var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
+ GFP_KERNEL);
+ return (var_table->bitmap) ? 0 : -ENOMEM;
+}
+
+static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev)
+{
+ bitmap_free(dev->var_table.bitmap);
+}
+
+static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int err;
+
+ if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
+ IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
+ ib_set_device_ops(&dev->ib_dev,
+ &mlx5_ib_dev_ipoib_enhanced_ops);
+
+ if (mlx5_core_is_pf(mdev))
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
+
+ dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
+
+ if (MLX5_CAP_GEN(mdev, imaicl))
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
+
+ if (MLX5_CAP_GEN(mdev, xrc))
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
+
+ if (MLX5_CAP_DEV_MEM(mdev, memic) ||
+ MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM)
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
+
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
+ dev->ib_dev.driver_def = mlx5_ib_defs;
+
+ err = init_node_data(dev);
+ if (err)
+ return err;
+
+ if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
+ MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
+ mutex_init(&dev->lb.mutex);
+
+ if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
+ err = mlx5_ib_init_var_table(dev);
+ if (err)
+ return err;
+ }
+
+ dev->ib_dev.use_cq_dim = true;
+
+ return 0;
+}
+
+static const struct ib_device_ops mlx5_ib_dev_port_ops = {
+ .get_port_immutable = mlx5_port_immutable,
+ .query_port = mlx5_ib_query_port,
+};
+
+static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
+{
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
+ return 0;
+}
+
+static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
+ .get_port_immutable = mlx5_port_rep_immutable,
+ .query_port = mlx5_ib_rep_query_port,
+ .query_pkey = mlx5_ib_rep_query_pkey,
+};
+
+static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
+{
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
+ return 0;
+}
+
+static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
+ .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
+ .create_wq = mlx5_ib_create_wq,
+ .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
+ .destroy_wq = mlx5_ib_destroy_wq,
+ .get_netdev = mlx5_ib_get_netdev,
+ .modify_wq = mlx5_ib_modify_wq,
+
+ INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table,
+ ib_rwq_ind_tbl),
+};
+
+static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u32 port_num = 0;
+ int err;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+
+ /* Register only for native ports */
+ err = mlx5_add_netdev_notifier(dev, port_num);
+ if (err)
+ return err;
+
+ err = mlx5_enable_eth(dev);
+ if (err)
+ goto cleanup;
+ }
+
+ return 0;
+cleanup:
+ mlx5_remove_netdev_notifier(dev, port_num);
+ return err;
+}
+
+static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ u32 port_num;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (ll == IB_LINK_LAYER_ETHERNET) {
+ mlx5_disable_eth(dev);
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ mlx5_remove_netdev_notifier(dev, port_num);
+ }
+}
+
+static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_init_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+ return 0;
+}
+
+static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_cleanup_cong_debugfs(dev,
+ mlx5_core_native_port_num(dev->mdev) - 1);
+}
+
+static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
+{
+ dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
+ return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
+}
+
+static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
+}
+
+static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
+{
+ int err;
+
+ err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
+ if (err)
+ return err;
+
+ err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
+ if (err)
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+
+ return err;
+}
+
+static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
+ mlx5_free_bfreg(dev->mdev, &dev->bfreg);
+}
+
+static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
+{
+ const char *name;
+
+ if (!mlx5_lag_is_active(dev->mdev))
+ name = "mlx5_%d";
+ else
+ name = "mlx5_bond_%d";
+ return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
+}
+
+static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
+{
+ int err;
+
+ err = mlx5_mkey_cache_cleanup(dev);
+ if (err)
+ mlx5_ib_warn(dev, "mr cache cleanup failed\n");
+
+ mlx5r_umr_resource_cleanup(dev);
+}
+
+static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+{
+ ib_unregister_device(&dev->ib_dev);
+}
+
+static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
+{
+ int ret;
+
+ ret = mlx5r_umr_resource_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mlx5_mkey_cache_init(dev);
+ if (ret)
+ mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
+ return ret;
+}
+
+static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
+{
+ struct dentry *root;
+
+ if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
+ return 0;
+
+ mutex_init(&dev->delay_drop.lock);
+ dev->delay_drop.dev = dev;
+ dev->delay_drop.activate = false;
+ dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000;
+ INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler);
+ atomic_set(&dev->delay_drop.rqs_cnt, 0);
+ atomic_set(&dev->delay_drop.events_cnt, 0);
+
+ if (!mlx5_debugfs_root)
+ return 0;
+
+ root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev));
+ dev->delay_drop.dir_debugfs = root;
+
+ debugfs_create_atomic_t("num_timeout_events", 0400, root,
+ &dev->delay_drop.events_cnt);
+ debugfs_create_atomic_t("num_rqs", 0400, root,
+ &dev->delay_drop.rqs_cnt);
+ debugfs_create_file("timeout", 0600, root, &dev->delay_drop,
+ &fops_delay_drop_timeout);
+ return 0;
+}
+
+static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP))
+ return;
+
+ cancel_work_sync(&dev->delay_drop.delay_drop_work);
+ if (!dev->delay_drop.dir_debugfs)
+ return;
+
+ debugfs_remove_recursive(dev->delay_drop.dir_debugfs);
+ dev->delay_drop.dir_debugfs = NULL;
+}
+
+static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
+{
+ dev->mdev_events.notifier_call = mlx5_ib_event;
+ mlx5_notifier_register(dev->mdev, &dev->mdev_events);
+ return 0;
+}
+
+static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
+}
+
+void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile,
+ int stage)
+{
+ dev->ib_active = false;
+
+ /* Number of stages to cleanup */
+ while (stage) {
+ stage--;
+ if (profile->stage[stage].cleanup)
+ profile->stage[stage].cleanup(dev);
+ }
+
+ kfree(dev->port);
+ ib_dealloc_device(&dev->ib_dev);
+}
+
+int __mlx5_ib_add(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile)
+{
+ int err;
+ int i;
+
+ dev->profile = profile;
+
+ for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
+ if (profile->stage[i].init) {
+ err = profile->stage[i].init(dev);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ dev->ib_active = true;
+ return 0;
+
+err_out:
+ /* Clean up stages which were initialized */
+ while (i) {
+ i--;
+ if (profile->stage[i].cleanup)
+ profile->stage[i].cleanup(dev);
+ }
+ return -ENOMEM;
+}
+
+static const struct mlx5_ib_profile pf_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_FS,
+ mlx5_ib_fs_init,
+ mlx5_ib_fs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ mlx5_ib_stage_caps_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
+ mlx5_ib_stage_non_default_cb,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ mlx5_ib_roce_init,
+ mlx5_ib_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_QP,
+ mlx5_init_qp_table,
+ mlx5_cleanup_qp_table),
+ STAGE_CREATE(MLX5_IB_STAGE_SRQ,
+ mlx5_init_srq_table,
+ mlx5_cleanup_srq_table),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_dev_res_init,
+ mlx5_ib_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
+ mlx5_ib_stage_dev_notifier_init,
+ mlx5_ib_stage_dev_notifier_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_ODP,
+ mlx5_ib_odp_init_one,
+ mlx5_ib_odp_cleanup_one),
+ STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ mlx5_ib_counters_init,
+ mlx5_ib_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+ mlx5_ib_stage_cong_debugfs_init,
+ mlx5_ib_stage_cong_debugfs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UAR,
+ mlx5_ib_stage_uar_init,
+ mlx5_ib_stage_uar_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
+ NULL,
+ mlx5_ib_stage_pre_ib_reg_umr_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
+ mlx5_ib_devx_init,
+ mlx5_ib_devx_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
+ mlx5_ib_stage_post_ib_reg_umr_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
+ mlx5_ib_stage_delay_drop_init,
+ mlx5_ib_stage_delay_drop_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
+ mlx5_ib_restrack_init,
+ NULL),
+};
+
+const struct mlx5_ib_profile raw_eth_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_FS,
+ mlx5_ib_fs_init,
+ mlx5_ib_fs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ mlx5_ib_stage_caps_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
+ mlx5_ib_stage_raw_eth_non_default_cb,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ mlx5_ib_roce_init,
+ mlx5_ib_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_QP,
+ mlx5_init_qp_table,
+ mlx5_cleanup_qp_table),
+ STAGE_CREATE(MLX5_IB_STAGE_SRQ,
+ mlx5_init_srq_table,
+ mlx5_cleanup_srq_table),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_dev_res_init,
+ mlx5_ib_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER,
+ mlx5_ib_stage_dev_notifier_init,
+ mlx5_ib_stage_dev_notifier_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ mlx5_ib_counters_init,
+ mlx5_ib_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS,
+ mlx5_ib_stage_cong_debugfs_init,
+ mlx5_ib_stage_cong_debugfs_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UAR,
+ mlx5_ib_stage_uar_init,
+ mlx5_ib_stage_uar_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
+ NULL,
+ mlx5_ib_stage_pre_ib_reg_umr_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
+ mlx5_ib_devx_init,
+ mlx5_ib_devx_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
+ mlx5_ib_stage_post_ib_reg_umr_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
+ mlx5_ib_stage_delay_drop_init,
+ mlx5_ib_stage_delay_drop_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_RESTRACK,
+ mlx5_ib_restrack_init,
+ NULL),
+};
+
+static int mlx5r_mp_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = idev->mdev;
+ struct mlx5_ib_multiport_info *mpi;
+ struct mlx5_ib_dev *dev;
+ bool bound = false;
+ int err;
+
+ mpi = kzalloc(sizeof(*mpi), GFP_KERNEL);
+ if (!mpi)
+ return -ENOMEM;
+
+ mpi->mdev = mdev;
+ err = mlx5_query_nic_vport_system_image_guid(mdev,
+ &mpi->sys_image_guid);
+ if (err) {
+ kfree(mpi);
+ return err;
+ }
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid)
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+
+ if (bound) {
+ rdma_roce_rescan_device(&dev->ib_dev);
+ mpi->ibdev->ib_active = true;
+ break;
+ }
+ }
+
+ if (!bound) {
+ list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
+ dev_dbg(mdev->device,
+ "no suitable IB device found to bind to, added to unaffiliated list.\n");
+ }
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+
+ auxiliary_set_drvdata(adev, mpi);
+ return 0;
+}
+
+static void mlx5r_mp_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_ib_multiport_info *mpi;
+
+ mpi = auxiliary_get_drvdata(adev);
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ if (mpi->ibdev)
+ mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
+ else
+ list_del(&mpi->list);
+ mutex_unlock(&mlx5_ib_multiport_mutex);
+ kfree(mpi);
+}
+
+static int mlx5r_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = idev->mdev;
+ const struct mlx5_ib_profile *profile;
+ int port_type_cap, num_ports, ret;
+ enum rdma_link_layer ll;
+ struct mlx5_ib_dev *dev;
+
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
+ MLX5_CAP_GEN(mdev, num_vhca_ports));
+ dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
+ if (!dev)
+ return -ENOMEM;
+ dev->port = kcalloc(num_ports, sizeof(*dev->port),
+ GFP_KERNEL);
+ if (!dev->port) {
+ ib_dealloc_device(&dev->ib_dev);
+ return -ENOMEM;
+ }
+
+ dev->mdev = mdev;
+ dev->num_ports = num_ports;
+
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
+ profile = &raw_eth_profile;
+ else
+ profile = &pf_profile;
+
+ ret = __mlx5_ib_add(dev, profile);
+ if (ret) {
+ kfree(dev->port);
+ ib_dealloc_device(&dev->ib_dev);
+ return ret;
+ }
+
+ auxiliary_set_drvdata(adev, dev);
+ return 0;
+}
+
+static void mlx5r_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_ib_dev *dev;
+
+ dev = auxiliary_get_drvdata(adev);
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+}
+
+static const struct auxiliary_device_id mlx5r_mp_id_table[] = {
+ { .name = MLX5_ADEV_NAME ".multiport", },
+ {},
+};
+
+static const struct auxiliary_device_id mlx5r_id_table[] = {
+ { .name = MLX5_ADEV_NAME ".rdma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5r_mp_id_table);
+MODULE_DEVICE_TABLE(auxiliary, mlx5r_id_table);
+
+static struct auxiliary_driver mlx5r_mp_driver = {
+ .name = "multiport",
+ .probe = mlx5r_mp_probe,
+ .remove = mlx5r_mp_remove,
+ .id_table = mlx5r_mp_id_table,
+};
+
+static struct auxiliary_driver mlx5r_driver = {
+ .name = "rdma",
+ .probe = mlx5r_probe,
+ .remove = mlx5r_remove,
+ .id_table = mlx5r_id_table,
+};
+
+static int __init mlx5_ib_init(void)
+{
+ int ret;
+
+ xlt_emergency_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!xlt_emergency_page)
+ return -ENOMEM;
+
+ mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
+ if (!mlx5_ib_event_wq) {
+ free_page((unsigned long)xlt_emergency_page);
+ return -ENOMEM;
+ }
+
+ mlx5_ib_odp_init();
+ ret = mlx5r_rep_init();
+ if (ret)
+ goto rep_err;
+ ret = auxiliary_driver_register(&mlx5r_mp_driver);
+ if (ret)
+ goto mp_err;
+ ret = auxiliary_driver_register(&mlx5r_driver);
+ if (ret)
+ goto drv_err;
+ return 0;
+
+drv_err:
+ auxiliary_driver_unregister(&mlx5r_mp_driver);
+mp_err:
+ mlx5r_rep_cleanup();
+rep_err:
+ destroy_workqueue(mlx5_ib_event_wq);
+ free_page((unsigned long)xlt_emergency_page);
+ return ret;
+}
+
+static void __exit mlx5_ib_cleanup(void)
+{
+ auxiliary_driver_unregister(&mlx5r_driver);
+ auxiliary_driver_unregister(&mlx5r_mp_driver);
+ mlx5r_rep_cleanup();
+
+ destroy_workqueue(mlx5_ib_event_wq);
+ free_page((unsigned long)xlt_emergency_page);
+}
+
+module_init(mlx5_ib_init);
+module_exit(mlx5_ib_cleanup);
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
new file mode 100644
index 000000000..96ffbbaf0
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem_odp.h>
+#include "mlx5_ib.h"
+#include <linux/jiffies.h>
+
+/*
+ * Fill in a physical address list. ib_umem_num_dma_blocks() entries will be
+ * filled in the pas array.
+ */
+void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
+ u64 access_flags)
+{
+ struct ib_block_iter biter;
+
+ rdma_umem_for_each_dma_block (umem, &biter, page_size) {
+ *pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) |
+ access_flags);
+ pas++;
+ }
+}
+
+/*
+ * Compute the page shift and page_offset for mailboxes that use a quantized
+ * page_offset. The granulatity of the page offset scales according to page
+ * size.
+ */
+unsigned long __mlx5_umem_find_best_quantized_pgoff(
+ struct ib_umem *umem, unsigned long pgsz_bitmap,
+ unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
+ unsigned int *page_offset_quantized)
+{
+ const u64 page_offset_mask = (1UL << page_offset_bits) - 1;
+ unsigned long page_size;
+ u64 page_offset;
+
+ page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask);
+ if (!page_size)
+ return 0;
+
+ /*
+ * page size is the largest possible page size.
+ *
+ * Reduce the page_size, and thus the page_offset and quanta, until the
+ * page_offset fits into the mailbox field. Once page_size < scale this
+ * loop is guaranteed to terminate.
+ */
+ page_offset = ib_umem_dma_offset(umem, page_size);
+ while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) {
+ page_size /= 2;
+ page_offset = ib_umem_dma_offset(umem, page_size);
+ }
+
+ /*
+ * The address is not aligned, or otherwise cannot be represented by the
+ * page_offset.
+ */
+ if (!(pgsz_bitmap & page_size))
+ return 0;
+
+ *page_offset_quantized =
+ (unsigned long)page_offset / (page_size / scale);
+ if (WARN_ON(*page_offset_quantized > page_offset_mask))
+ return 0;
+ return page_size;
+}
+
+#define WR_ID_BF 0xBF
+#define WR_ID_END 0xBAD
+#define TEST_WC_NUM_WQES 255
+#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
+static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id,
+ bool signaled)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ struct mlx5_bf *bf = &qp->bf;
+ __be32 mmio_wqe[16] = {};
+ unsigned long flags;
+ unsigned int idx;
+ int i;
+
+ if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
+ return -EIO;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
+
+ memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg));
+ ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0;
+ ctrl->opmod_idx_opcode =
+ cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP);
+ ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) |
+ (qp->trans_qp.base.mqp.qpn << 8));
+
+ qp->sq.wrid[idx] = wr_id;
+ qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP;
+ qp->sq.wqe_head[idx] = qp->sq.head + 1;
+ qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg),
+ MLX5_SEND_WQE_BB);
+ qp->sq.w_list[idx].next = qp->sq.cur_post;
+ qp->sq.head++;
+
+ memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
+ ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
+ MLX5_WQE_CTRL_CQ_UPDATE;
+
+ /* Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+
+ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell
+ */
+ wmb();
+ for (i = 0; i < 8; i++)
+ mlx5_write64(&mmio_wqe[i * 2],
+ bf->bfreg->map + bf->offset + i * 8);
+ io_stop_wc();
+
+ bf->offset ^= bf->buf_size;
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return 0;
+}
+
+static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq)
+{
+ int ret;
+ struct ib_wc wc = {};
+ unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
+
+ do {
+ ret = ib_poll_cq(cq, 1, &wc);
+ if (ret < 0 || wc.status)
+ return ret < 0 ? ret : -EINVAL;
+ if (ret)
+ break;
+ } while (!time_after(jiffies, end));
+
+ if (!ret)
+ return -ETIMEDOUT;
+
+ if (wc.wr_id != WR_ID_BF)
+ ret = 0;
+
+ return ret;
+}
+
+static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp)
+{
+ int err, i;
+
+ for (i = 0; i < TEST_WC_NUM_WQES; i++) {
+ err = post_send_nop(dev, qp, WR_ID_BF, false);
+ if (err)
+ return err;
+ }
+
+ return post_send_nop(dev, qp, WR_ID_END, true);
+}
+
+int mlx5_ib_test_wc(struct mlx5_ib_dev *dev)
+{
+ struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 };
+ int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
+ struct ib_qp_init_attr qp_init_attr = {
+ .cap = { .max_send_wr = TEST_WC_NUM_WQES },
+ .qp_type = IB_QPT_UD,
+ .sq_sig_type = IB_SIGNAL_REQ_WR,
+ .create_flags = MLX5_IB_QP_CREATE_WC_TEST,
+ };
+ struct ib_qp_attr qp_attr = { .port_num = 1 };
+ struct ib_device *ibdev = &dev->ib_dev;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ struct ib_pd *pd;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev->mdev, bf))
+ return 0;
+
+ if (!dev->mdev->roce.roce_en &&
+ port_type_cap == MLX5_CAP_PORT_TYPE_ETH) {
+ if (mlx5_core_is_pf(dev->mdev))
+ dev->wc_support = arch_can_pci_mmap_wc();
+ return 0;
+ }
+
+ ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false);
+ if (ret)
+ goto print_err;
+
+ if (!dev->wc_bfreg.wc)
+ goto out1;
+
+ pd = ib_alloc_pd(ibdev, 0);
+ if (IS_ERR(pd)) {
+ ret = PTR_ERR(pd);
+ goto out1;
+ }
+
+ cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
+ if (IS_ERR(cq)) {
+ ret = PTR_ERR(cq);
+ goto out2;
+ }
+
+ qp_init_attr.recv_cq = cq;
+ qp_init_attr.send_cq = cq;
+ qp = ib_create_qp(pd, &qp_init_attr);
+ if (IS_ERR(qp)) {
+ ret = PTR_ERR(qp);
+ goto out3;
+ }
+
+ qp_attr.qp_state = IB_QPS_INIT;
+ ret = ib_modify_qp(qp, &qp_attr,
+ IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX |
+ IB_QP_QKEY);
+ if (ret)
+ goto out4;
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
+ if (ret)
+ goto out4;
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
+ if (ret)
+ goto out4;
+
+ ret = test_wc_do_send(dev, qp);
+ if (ret < 0)
+ goto out4;
+
+ ret = test_wc_poll_cq_result(dev, cq);
+ if (ret > 0) {
+ dev->wc_support = true;
+ ret = 0;
+ }
+
+out4:
+ ib_destroy_qp(qp);
+out3:
+ ib_destroy_cq(cq);
+out2:
+ ib_dealloc_pd(pd);
+out1:
+ mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg);
+print_err:
+ if (ret)
+ mlx5_ib_err(
+ dev,
+ "Error %d while trying to test write-combining support\n",
+ ret);
+ return ret;
+}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
new file mode 100644
index 000000000..8d94e6834
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -0,0 +1,1616 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef MLX5_IB_H
+#define MLX5_IB_H
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_smi.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/cq.h>
+#include <linux/mlx5/fs.h>
+#include <linux/mlx5/qp.h>
+#include <linux/types.h>
+#include <linux/mlx5/transobj.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/mlx5-abi.h>
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+
+#include "srq.h"
+
+#define mlx5_ib_dbg(_dev, format, arg...) \
+ dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
+
+#define mlx5_ib_err(_dev, format, arg...) \
+ dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
+
+#define mlx5_ib_warn(_dev, format, arg...) \
+ dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
+
+#define MLX5_IB_DEFAULT_UIDX 0xffffff
+#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
+
+static __always_inline unsigned long
+__mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
+ unsigned int pgsz_shift)
+{
+ unsigned int largest_pg_shift =
+ min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift,
+ BITS_PER_LONG - 1);
+
+ /*
+ * Despite a command allowing it, the device does not support lower than
+ * 4k page size.
+ */
+ pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift);
+ return GENMASK(largest_pg_shift, pgsz_shift);
+}
+
+/*
+ * For mkc users, instead of a page_offset the command has a start_iova which
+ * specifies both the page_offset and the on-the-wire IOVA
+ */
+#define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \
+ ib_umem_find_best_pgsz(umem, \
+ __mlx5_log_page_size_to_bitmap( \
+ __mlx5_bit_sz(typ, log_pgsz_fld), \
+ pgsz_shift), \
+ iova)
+
+static __always_inline unsigned long
+__mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
+ unsigned int offset_shift)
+{
+ unsigned int largest_offset_shift =
+ min_t(unsigned long, page_offset_bits - 1 + offset_shift,
+ BITS_PER_LONG - 1);
+
+ return GENMASK(largest_offset_shift, offset_shift);
+}
+
+/*
+ * QP/CQ/WQ/etc type commands take a page offset that satisifies:
+ * page_offset_quantized * (page_size/scale) = page_offset
+ * Which restricts allowed page sizes to ones that satisify the above.
+ */
+unsigned long __mlx5_umem_find_best_quantized_pgoff(
+ struct ib_umem *umem, unsigned long pgsz_bitmap,
+ unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
+ unsigned int *page_offset_quantized);
+#define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld, \
+ pgsz_shift, page_offset_fld, \
+ scale, page_offset_quantized) \
+ __mlx5_umem_find_best_quantized_pgoff( \
+ umem, \
+ __mlx5_log_page_size_to_bitmap( \
+ __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
+ __mlx5_bit_sz(typ, page_offset_fld), \
+ GENMASK(31, order_base_2(scale)), scale, \
+ page_offset_quantized)
+
+#define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld, \
+ pgsz_shift, page_offset_fld, \
+ scale, page_offset_quantized) \
+ __mlx5_umem_find_best_quantized_pgoff( \
+ umem, \
+ __mlx5_log_page_size_to_bitmap( \
+ __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
+ __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \
+ page_offset_quantized)
+
+enum {
+ MLX5_IB_MMAP_OFFSET_START = 9,
+ MLX5_IB_MMAP_OFFSET_END = 255,
+};
+
+enum {
+ MLX5_IB_MMAP_CMD_SHIFT = 8,
+ MLX5_IB_MMAP_CMD_MASK = 0xff,
+};
+
+enum {
+ MLX5_RES_SCAT_DATA32_CQE = 0x1,
+ MLX5_RES_SCAT_DATA64_CQE = 0x2,
+ MLX5_REQ_SCAT_DATA32_CQE = 0x11,
+ MLX5_REQ_SCAT_DATA64_CQE = 0x22,
+};
+
+enum mlx5_ib_mad_ifc_flags {
+ MLX5_MAD_IFC_IGNORE_MKEY = 1,
+ MLX5_MAD_IFC_IGNORE_BKEY = 2,
+ MLX5_MAD_IFC_NET_VIEW = 4,
+};
+
+enum {
+ MLX5_CROSS_CHANNEL_BFREG = 0,
+};
+
+enum {
+ MLX5_CQE_VERSION_V0,
+ MLX5_CQE_VERSION_V1,
+};
+
+enum {
+ MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
+ MLX5_TM_MAX_SGE = 1,
+};
+
+enum {
+ MLX5_IB_INVALID_UAR_INDEX = BIT(31),
+ MLX5_IB_INVALID_BFREG = BIT(31),
+};
+
+enum {
+ MLX5_MAX_MEMIC_PAGES = 0x100,
+ MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
+};
+
+enum {
+ MLX5_MEMIC_BASE_ALIGN = 6,
+ MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
+};
+
+enum mlx5_ib_mmap_type {
+ MLX5_IB_MMAP_TYPE_MEMIC = 1,
+ MLX5_IB_MMAP_TYPE_VAR = 2,
+ MLX5_IB_MMAP_TYPE_UAR_WC = 3,
+ MLX5_IB_MMAP_TYPE_UAR_NC = 4,
+ MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
+};
+
+struct mlx5_bfreg_info {
+ u32 *sys_pages;
+ int num_low_latency_bfregs;
+ unsigned int *count;
+
+ /*
+ * protect bfreg allocation data structs
+ */
+ struct mutex lock;
+ u32 ver;
+ u8 lib_uar_4k : 1;
+ u8 lib_uar_dyn : 1;
+ u32 num_sys_pages;
+ u32 num_static_sys_pages;
+ u32 total_num_bfregs;
+ u32 num_dyn_bfregs;
+};
+
+struct mlx5_ib_ucontext {
+ struct ib_ucontext ibucontext;
+ struct list_head db_page_list;
+
+ /* protect doorbell record alloc/free
+ */
+ struct mutex db_page_mutex;
+ struct mlx5_bfreg_info bfregi;
+ u8 cqe_version;
+ /* Transport Domain number */
+ u32 tdn;
+
+ u64 lib_caps;
+ u16 devx_uid;
+ /* For RoCE LAG TX affinity */
+ atomic_t tx_port_affinity;
+};
+
+static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
+{
+ return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
+}
+
+struct mlx5_ib_pd {
+ struct ib_pd ibpd;
+ u32 pdn;
+ u16 uid;
+};
+
+enum {
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
+ MLX5_IB_FLOW_ACTION_DECAP,
+};
+
+#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
+#define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
+#if (MLX5_IB_FLOW_LAST_PRIO <= 0)
+#error "Invalid number of bypass priorities"
+#endif
+#define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
+
+#define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
+#define MLX5_IB_NUM_SNIFFER_FTS 2
+#define MLX5_IB_NUM_EGRESS_FTS 1
+#define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS
+
+struct mlx5_ib_anchor {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg_goto_table;
+ struct mlx5_flow_group *fg_drop;
+ struct mlx5_flow_handle *rule_goto_table;
+ struct mlx5_flow_handle *rule_drop;
+ unsigned int rule_goto_table_ref;
+};
+
+struct mlx5_ib_flow_prio {
+ struct mlx5_flow_table *flow_table;
+ struct mlx5_ib_anchor anchor;
+ unsigned int refcount;
+};
+
+struct mlx5_ib_flow_handler {
+ struct list_head list;
+ struct ib_flow ibflow;
+ struct mlx5_ib_flow_prio *prio;
+ struct mlx5_flow_handle *rule;
+ struct ib_counters *ibcounters;
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_flow_matcher *flow_matcher;
+};
+
+struct mlx5_ib_flow_matcher {
+ struct mlx5_ib_match_params matcher_mask;
+ int mask_len;
+ enum mlx5_ib_flow_type flow_type;
+ enum mlx5_flow_namespace_type ns_type;
+ u16 priority;
+ struct mlx5_core_dev *mdev;
+ atomic_t usecnt;
+ u8 match_criteria_enable;
+};
+
+struct mlx5_ib_steering_anchor {
+ struct mlx5_ib_flow_prio *ft_prio;
+ struct mlx5_ib_dev *dev;
+ atomic_t usecnt;
+};
+
+struct mlx5_ib_pp {
+ u16 index;
+ struct mlx5_core_dev *mdev;
+};
+
+enum mlx5_ib_optional_counter_type {
+ MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
+ MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
+ MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
+
+ MLX5_IB_OPCOUNTER_MAX,
+};
+
+struct mlx5_ib_flow_db {
+ struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
+ struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
+ struct mlx5_ib_flow_prio fdb[MLX5_IB_NUM_FDB_FTS];
+ struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX];
+ struct mlx5_flow_table *lag_demux_ft;
+ /* Protect flow steering bypass flow tables
+ * when add/del flow rules.
+ * only single add/removal of flow steering rule could be done
+ * simultaneously.
+ */
+ struct mutex lock;
+};
+
+/* Use macros here so that don't have to duplicate
+ * enum ib_qp_type for low-level driver
+ */
+
+#define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
+/*
+ * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
+ * creates the actual hardware QP.
+ */
+#define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
+#define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
+#define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
+#define MLX5_IB_WR_UMR IB_WR_RESERVED1
+
+#define MLX5_IB_UPD_XLT_ZAP BIT(0)
+#define MLX5_IB_UPD_XLT_ENABLE BIT(1)
+#define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
+#define MLX5_IB_UPD_XLT_ADDR BIT(3)
+#define MLX5_IB_UPD_XLT_PD BIT(4)
+#define MLX5_IB_UPD_XLT_ACCESS BIT(5)
+#define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
+
+/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
+ *
+ * These flags are intended for internal use by the mlx5_ib driver, and they
+ * rely on the range reserved for that use in the ib_qp_create_flags enum.
+ */
+#define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
+#define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
+
+struct wr_list {
+ u16 opcode;
+ u16 next;
+};
+
+enum mlx5_ib_rq_flags {
+ MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
+ MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
+};
+
+struct mlx5_ib_wq {
+ struct mlx5_frag_buf_ctrl fbc;
+ u64 *wrid;
+ u32 *wr_data;
+ struct wr_list *w_list;
+ unsigned *wqe_head;
+ u16 unsig_count;
+
+ /* serialize post to the work queue
+ */
+ spinlock_t lock;
+ int wqe_cnt;
+ int max_post;
+ int max_gs;
+ int offset;
+ int wqe_shift;
+ unsigned head;
+ unsigned tail;
+ u16 cur_post;
+ u16 last_poll;
+ void *cur_edge;
+};
+
+enum mlx5_ib_wq_flags {
+ MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
+ MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
+};
+
+#define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
+#define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
+#define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
+#define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
+#define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
+
+struct mlx5_ib_rwq {
+ struct ib_wq ibwq;
+ struct mlx5_core_qp core_qp;
+ u32 rq_num_pas;
+ u32 log_rq_stride;
+ u32 log_rq_size;
+ u32 rq_page_offset;
+ u32 log_page_size;
+ u32 log_num_strides;
+ u32 two_byte_shift_en;
+ u32 single_stride_log_num_of_bytes;
+ struct ib_umem *umem;
+ size_t buf_size;
+ unsigned int page_shift;
+ struct mlx5_db db;
+ u32 user_index;
+ u32 wqe_count;
+ u32 wqe_shift;
+ int wq_sig;
+ u32 create_flags; /* Use enum mlx5_ib_wq_flags */
+};
+
+struct mlx5_ib_rwq_ind_table {
+ struct ib_rwq_ind_table ib_rwq_ind_tbl;
+ u32 rqtn;
+ u16 uid;
+};
+
+struct mlx5_ib_ubuffer {
+ struct ib_umem *umem;
+ int buf_size;
+ u64 buf_addr;
+};
+
+struct mlx5_ib_qp_base {
+ struct mlx5_ib_qp *container_mibqp;
+ struct mlx5_core_qp mqp;
+ struct mlx5_ib_ubuffer ubuffer;
+};
+
+struct mlx5_ib_qp_trans {
+ struct mlx5_ib_qp_base base;
+ u16 xrcdn;
+ u32 alt_port;
+ u8 atomic_rd_en;
+ u8 resp_depth;
+};
+
+struct mlx5_ib_rss_qp {
+ u32 tirn;
+};
+
+struct mlx5_ib_rq {
+ struct mlx5_ib_qp_base base;
+ struct mlx5_ib_wq *rq;
+ struct mlx5_ib_ubuffer ubuffer;
+ struct mlx5_db *doorbell;
+ u32 tirn;
+ u8 state;
+ u32 flags;
+};
+
+struct mlx5_ib_sq {
+ struct mlx5_ib_qp_base base;
+ struct mlx5_ib_wq *sq;
+ struct mlx5_ib_ubuffer ubuffer;
+ struct mlx5_db *doorbell;
+ struct mlx5_flow_handle *flow_rule;
+ u32 tisn;
+ u8 state;
+};
+
+struct mlx5_ib_raw_packet_qp {
+ struct mlx5_ib_sq sq;
+ struct mlx5_ib_rq rq;
+};
+
+struct mlx5_bf {
+ int buf_size;
+ unsigned long offset;
+ struct mlx5_sq_bfreg *bfreg;
+};
+
+struct mlx5_ib_dct {
+ struct mlx5_core_dct mdct;
+ u32 *in;
+};
+
+struct mlx5_ib_gsi_qp {
+ struct ib_qp *rx_qp;
+ u32 port_num;
+ struct ib_qp_cap cap;
+ struct ib_cq *cq;
+ struct mlx5_ib_gsi_wr *outstanding_wrs;
+ u32 outstanding_pi, outstanding_ci;
+ int num_qps;
+ /* Protects access to the tx_qps. Post send operations synchronize
+ * with tx_qp creation in setup_qp(). Also protects the
+ * outstanding_wrs array and indices.
+ */
+ spinlock_t lock;
+ struct ib_qp **tx_qps;
+};
+
+struct mlx5_ib_qp {
+ struct ib_qp ibqp;
+ union {
+ struct mlx5_ib_qp_trans trans_qp;
+ struct mlx5_ib_raw_packet_qp raw_packet_qp;
+ struct mlx5_ib_rss_qp rss_qp;
+ struct mlx5_ib_dct dct;
+ struct mlx5_ib_gsi_qp gsi;
+ };
+ struct mlx5_frag_buf buf;
+
+ struct mlx5_db db;
+ struct mlx5_ib_wq rq;
+
+ u8 sq_signal_bits;
+ u8 next_fence;
+ struct mlx5_ib_wq sq;
+
+ /* serialize qp state modifications
+ */
+ struct mutex mutex;
+ /* cached variant of create_flags from struct ib_qp_init_attr */
+ u32 flags;
+ u32 port;
+ u8 state;
+ int max_inline_data;
+ struct mlx5_bf bf;
+ u8 has_rq:1;
+ u8 is_rss:1;
+
+ /* only for user space QPs. For kernel
+ * we have it from the bf object
+ */
+ int bfregn;
+
+ struct list_head qps_list;
+ struct list_head cq_recv_list;
+ struct list_head cq_send_list;
+ struct mlx5_rate_limit rl;
+ u32 underlay_qpn;
+ u32 flags_en;
+ /*
+ * IB/core doesn't store low-level QP types, so
+ * store both MLX and IBTA types in the field below.
+ */
+ enum ib_qp_type type;
+ /* A flag to indicate if there's a new counter is configured
+ * but not take effective
+ */
+ u32 counter_pending;
+ u16 gsi_lag_port;
+};
+
+struct mlx5_ib_cq_buf {
+ struct mlx5_frag_buf_ctrl fbc;
+ struct mlx5_frag_buf frag_buf;
+ struct ib_umem *umem;
+ int cqe_size;
+ int nent;
+};
+
+enum mlx5_ib_cq_pr_flags {
+ MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
+ MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1,
+};
+
+struct mlx5_ib_cq {
+ struct ib_cq ibcq;
+ struct mlx5_core_cq mcq;
+ struct mlx5_ib_cq_buf buf;
+ struct mlx5_db db;
+
+ /* serialize access to the CQ
+ */
+ spinlock_t lock;
+
+ /* protect resize cq
+ */
+ struct mutex resize_mutex;
+ struct mlx5_ib_cq_buf *resize_buf;
+ struct ib_umem *resize_umem;
+ int cqe_size;
+ struct list_head list_send_qp;
+ struct list_head list_recv_qp;
+ u32 create_flags;
+ struct list_head wc_list;
+ enum ib_cq_notify_flags notify_flags;
+ struct work_struct notify_work;
+ u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
+};
+
+struct mlx5_ib_wc {
+ struct ib_wc wc;
+ struct list_head list;
+};
+
+struct mlx5_ib_srq {
+ struct ib_srq ibsrq;
+ struct mlx5_core_srq msrq;
+ struct mlx5_frag_buf buf;
+ struct mlx5_db db;
+ struct mlx5_frag_buf_ctrl fbc;
+ u64 *wrid;
+ /* protect SRQ hanlding
+ */
+ spinlock_t lock;
+ int head;
+ int tail;
+ u16 wqe_ctr;
+ struct ib_umem *umem;
+ /* serialize arming a SRQ
+ */
+ struct mutex mutex;
+ int wq_sig;
+};
+
+struct mlx5_ib_xrcd {
+ struct ib_xrcd ibxrcd;
+ u32 xrcdn;
+};
+
+enum mlx5_ib_mtt_access_flags {
+ MLX5_IB_MTT_READ = (1 << 0),
+ MLX5_IB_MTT_WRITE = (1 << 1),
+};
+
+struct mlx5_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ u8 mmap_flag;
+ u64 address;
+ u32 page_idx;
+};
+
+enum mlx5_mkey_type {
+ MLX5_MKEY_MR = 1,
+ MLX5_MKEY_MW,
+ MLX5_MKEY_INDIRECT_DEVX,
+};
+
+struct mlx5_ib_mkey {
+ u32 key;
+ enum mlx5_mkey_type type;
+ unsigned int ndescs;
+ struct wait_queue_head wait;
+ refcount_t usecount;
+ struct mlx5_cache_ent *cache_ent;
+};
+
+#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
+
+#define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
+ IB_ACCESS_REMOTE_WRITE |\
+ IB_ACCESS_REMOTE_READ |\
+ IB_ACCESS_REMOTE_ATOMIC |\
+ IB_ZERO_BASED)
+
+#define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
+ IB_ACCESS_REMOTE_WRITE |\
+ IB_ACCESS_REMOTE_READ |\
+ IB_ZERO_BASED)
+
+#define mlx5_update_odp_stats(mr, counter_name, value) \
+ atomic64_add(value, &((mr)->odp_stats.counter_name))
+
+struct mlx5_ib_mr {
+ struct ib_mr ibmr;
+ struct mlx5_ib_mkey mmkey;
+
+ struct ib_umem *umem;
+
+ union {
+ /* Used only by kernel MRs (umem == NULL) */
+ struct {
+ void *descs;
+ void *descs_alloc;
+ dma_addr_t desc_map;
+ int max_descs;
+ int desc_size;
+ int access_mode;
+
+ /* For Kernel IB_MR_TYPE_INTEGRITY */
+ struct mlx5_core_sig_ctx *sig;
+ struct mlx5_ib_mr *pi_mr;
+ struct mlx5_ib_mr *klm_mr;
+ struct mlx5_ib_mr *mtt_mr;
+ u64 data_iova;
+ u64 pi_iova;
+ int meta_ndescs;
+ int meta_length;
+ int data_length;
+ };
+
+ /* Used only by User MRs (umem != NULL) */
+ struct {
+ unsigned int page_shift;
+ /* Current access_flags */
+ int access_flags;
+
+ /* For User ODP */
+ struct mlx5_ib_mr *parent;
+ struct xarray implicit_children;
+ union {
+ struct work_struct work;
+ } odp_destroy;
+ struct ib_odp_counters odp_stats;
+ bool is_odp_implicit;
+ };
+ };
+};
+
+static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
+{
+ return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
+ mr->umem->is_odp;
+}
+
+static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
+ mr->umem->is_dmabuf;
+}
+
+struct mlx5_ib_mw {
+ struct ib_mw ibmw;
+ struct mlx5_ib_mkey mmkey;
+};
+
+struct mlx5_ib_umr_context {
+ struct ib_cqe cqe;
+ enum ib_wc_status status;
+ struct completion done;
+};
+
+enum {
+ MLX5_UMR_STATE_UNINIT,
+ MLX5_UMR_STATE_ACTIVE,
+ MLX5_UMR_STATE_RECOVER,
+ MLX5_UMR_STATE_ERR,
+};
+
+struct umr_common {
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+ /* Protects from UMR QP overflow
+ */
+ struct semaphore sem;
+ /* Protects from using UMR while the UMR is not active
+ */
+ struct mutex lock;
+ unsigned int state;
+};
+
+struct mlx5_cache_ent {
+ struct xarray mkeys;
+ unsigned long stored;
+ unsigned long reserved;
+
+ char name[4];
+ u32 order;
+ u32 access_mode;
+ u32 page;
+ unsigned int ndescs;
+
+ u8 disabled:1;
+ u8 fill_to_high_water:1;
+
+ /*
+ * - limit is the low water mark for stored mkeys, 2* limit is the
+ * upper water mark.
+ */
+ u32 in_use;
+ u32 limit;
+
+ /* Statistics */
+ u32 miss;
+
+ struct mlx5_ib_dev *dev;
+ struct delayed_work dwork;
+};
+
+struct mlx5r_async_create_mkey {
+ union {
+ u32 in[MLX5_ST_SZ_BYTES(create_mkey_in)];
+ u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
+ };
+ struct mlx5_async_work cb_work;
+ struct mlx5_cache_ent *ent;
+ u32 mkey;
+};
+
+struct mlx5_mkey_cache {
+ struct workqueue_struct *wq;
+ struct mlx5_cache_ent ent[MAX_MKEY_CACHE_ENTRIES];
+ struct dentry *root;
+ unsigned long last_add;
+};
+
+struct mlx5_ib_port_resources {
+ struct mlx5_ib_gsi_qp *gsi;
+ struct work_struct pkey_change_work;
+};
+
+struct mlx5_ib_resources {
+ struct ib_cq *c0;
+ u32 xrcdn0;
+ u32 xrcdn1;
+ struct ib_pd *p0;
+ struct ib_srq *s0;
+ struct ib_srq *s1;
+ struct mlx5_ib_port_resources ports[2];
+};
+
+#define MAX_OPFC_RULES 2
+
+struct mlx5_ib_op_fc {
+ struct mlx5_fc *fc;
+ struct mlx5_flow_handle *rule[MAX_OPFC_RULES];
+};
+
+struct mlx5_ib_counters {
+ struct rdma_stat_desc *descs;
+ size_t *offsets;
+ u32 num_q_counters;
+ u32 num_cong_counters;
+ u32 num_ext_ppcnt_counters;
+ u32 num_op_counters;
+ u16 set_id;
+ struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
+};
+
+int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type);
+
+void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type);
+
+struct mlx5_ib_multiport_info;
+
+struct mlx5_ib_multiport {
+ struct mlx5_ib_multiport_info *mpi;
+ /* To be held when accessing the multiport info */
+ spinlock_t mpi_lock;
+};
+
+struct mlx5_roce {
+ /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
+ * netdev pointer
+ */
+ rwlock_t netdev_lock;
+ struct net_device *netdev;
+ struct notifier_block nb;
+ atomic_t tx_port_affinity;
+ enum ib_port_state last_port_state;
+ struct mlx5_ib_dev *dev;
+ u32 native_port_num;
+};
+
+struct mlx5_ib_port {
+ struct mlx5_ib_counters cnts;
+ struct mlx5_ib_multiport mp;
+ struct mlx5_ib_dbg_cc_params *dbg_cc_params;
+ struct mlx5_roce roce;
+ struct mlx5_eswitch_rep *rep;
+};
+
+struct mlx5_ib_dbg_param {
+ int offset;
+ struct mlx5_ib_dev *dev;
+ struct dentry *dentry;
+ u32 port_num;
+};
+
+enum mlx5_ib_dbg_cc_types {
+ MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
+ MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
+ MLX5_IB_DBG_CC_RP_TIME_RESET,
+ MLX5_IB_DBG_CC_RP_BYTE_RESET,
+ MLX5_IB_DBG_CC_RP_THRESHOLD,
+ MLX5_IB_DBG_CC_RP_AI_RATE,
+ MLX5_IB_DBG_CC_RP_MAX_RATE,
+ MLX5_IB_DBG_CC_RP_HAI_RATE,
+ MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
+ MLX5_IB_DBG_CC_RP_MIN_RATE,
+ MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
+ MLX5_IB_DBG_CC_RP_DCE_TCP_G,
+ MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
+ MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
+ MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
+ MLX5_IB_DBG_CC_RP_GD,
+ MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
+ MLX5_IB_DBG_CC_NP_CNP_DSCP,
+ MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
+ MLX5_IB_DBG_CC_NP_CNP_PRIO,
+ MLX5_IB_DBG_CC_MAX,
+};
+
+struct mlx5_ib_dbg_cc_params {
+ struct dentry *root;
+ struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
+};
+
+enum {
+ MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
+};
+
+struct mlx5_ib_delay_drop {
+ struct mlx5_ib_dev *dev;
+ struct work_struct delay_drop_work;
+ /* serialize setting of delay drop */
+ struct mutex lock;
+ u32 timeout;
+ bool activate;
+ atomic_t events_cnt;
+ atomic_t rqs_cnt;
+ struct dentry *dir_debugfs;
+};
+
+enum mlx5_ib_stages {
+ MLX5_IB_STAGE_INIT,
+ MLX5_IB_STAGE_FS,
+ MLX5_IB_STAGE_CAPS,
+ MLX5_IB_STAGE_NON_DEFAULT_CB,
+ MLX5_IB_STAGE_ROCE,
+ MLX5_IB_STAGE_QP,
+ MLX5_IB_STAGE_SRQ,
+ MLX5_IB_STAGE_DEVICE_RESOURCES,
+ MLX5_IB_STAGE_DEVICE_NOTIFIER,
+ MLX5_IB_STAGE_ODP,
+ MLX5_IB_STAGE_COUNTERS,
+ MLX5_IB_STAGE_CONG_DEBUGFS,
+ MLX5_IB_STAGE_UAR,
+ MLX5_IB_STAGE_BFREG,
+ MLX5_IB_STAGE_PRE_IB_REG_UMR,
+ MLX5_IB_STAGE_WHITELIST_UID,
+ MLX5_IB_STAGE_IB_REG,
+ MLX5_IB_STAGE_POST_IB_REG_UMR,
+ MLX5_IB_STAGE_DELAY_DROP,
+ MLX5_IB_STAGE_RESTRACK,
+ MLX5_IB_STAGE_MAX,
+};
+
+struct mlx5_ib_stage {
+ int (*init)(struct mlx5_ib_dev *dev);
+ void (*cleanup)(struct mlx5_ib_dev *dev);
+};
+
+#define STAGE_CREATE(_stage, _init, _cleanup) \
+ .stage[_stage] = {.init = _init, .cleanup = _cleanup}
+
+struct mlx5_ib_profile {
+ struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
+};
+
+struct mlx5_ib_multiport_info {
+ struct list_head list;
+ struct mlx5_ib_dev *ibdev;
+ struct mlx5_core_dev *mdev;
+ struct notifier_block mdev_events;
+ struct completion unref_comp;
+ u64 sys_image_guid;
+ u32 mdev_refcnt;
+ bool is_master;
+ bool unaffiliate;
+};
+
+struct mlx5_ib_flow_action {
+ struct ib_flow_action ib_action;
+ union {
+ struct {
+ u64 ib_flags;
+ struct mlx5_accel_esp_xfrm *ctx;
+ } esp_aes_gcm;
+ struct {
+ struct mlx5_ib_dev *dev;
+ u32 sub_type;
+ union {
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ };
+ } flow_action_raw;
+ };
+};
+
+struct mlx5_dm {
+ struct mlx5_core_dev *dev;
+ /* This lock is used to protect the access to the shared
+ * allocation map when concurrent requests by different
+ * processes are handled.
+ */
+ spinlock_t lock;
+ DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
+};
+
+struct mlx5_read_counters_attr {
+ struct mlx5_fc *hw_cntrs_hndl;
+ u64 *out;
+ u32 flags;
+};
+
+enum mlx5_ib_counters_type {
+ MLX5_IB_COUNTERS_FLOW,
+};
+
+struct mlx5_ib_mcounters {
+ struct ib_counters ibcntrs;
+ enum mlx5_ib_counters_type type;
+ /* number of counters supported for this counters type */
+ u32 counters_num;
+ struct mlx5_fc *hw_cntrs_hndl;
+ /* read function for this counters type */
+ int (*read_counters)(struct ib_device *ibdev,
+ struct mlx5_read_counters_attr *read_attr);
+ /* max index set as part of create_flow */
+ u32 cntrs_max_index;
+ /* number of counters data entries (<description,index> pair) */
+ u32 ncounters;
+ /* counters data array for descriptions and indexes */
+ struct mlx5_ib_flow_counters_desc *counters_data;
+ /* protects access to mcounters internal data */
+ struct mutex mcntrs_mutex;
+};
+
+static inline struct mlx5_ib_mcounters *
+to_mcounters(struct ib_counters *ibcntrs)
+{
+ return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
+}
+
+int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
+ bool is_egress,
+ struct mlx5_flow_act *action);
+struct mlx5_ib_lb_state {
+ /* protect the user_td */
+ struct mutex mutex;
+ u32 user_td;
+ int qps;
+ bool enabled;
+};
+
+struct mlx5_ib_pf_eq {
+ struct notifier_block irq_nb;
+ struct mlx5_ib_dev *dev;
+ struct mlx5_eq *core;
+ struct work_struct work;
+ spinlock_t lock; /* Pagefaults spinlock */
+ struct workqueue_struct *wq;
+ mempool_t *pool;
+};
+
+struct mlx5_devx_event_table {
+ struct mlx5_nb devx_nb;
+ /* serialize updating the event_xa */
+ struct mutex event_xa_lock;
+ struct xarray event_xa;
+};
+
+struct mlx5_var_table {
+ /* serialize updating the bitmap */
+ struct mutex bitmap_lock;
+ unsigned long *bitmap;
+ u64 hw_start_addr;
+ u32 stride_size;
+ u64 num_var_hw_entries;
+};
+
+struct mlx5_port_caps {
+ bool has_smi;
+ u8 ext_port_cap;
+};
+
+struct mlx5_ib_dev {
+ struct ib_device ib_dev;
+ struct mlx5_core_dev *mdev;
+ struct notifier_block mdev_events;
+ int num_ports;
+ /* serialize update of capability mask
+ */
+ struct mutex cap_mask_mutex;
+ u8 ib_active:1;
+ u8 is_rep:1;
+ u8 lag_active:1;
+ u8 wc_support:1;
+ u8 fill_delay;
+ struct umr_common umrc;
+ /* sync used page count stats
+ */
+ struct mlx5_ib_resources devr;
+
+ atomic_t mkey_var;
+ struct mlx5_mkey_cache cache;
+ struct timer_list delay_timer;
+ /* Prevents soft lock on massive reg MRs */
+ struct mutex slow_path_mutex;
+ struct ib_odp_caps odp_caps;
+ u64 odp_max_size;
+ struct mutex odp_eq_mutex;
+ struct mlx5_ib_pf_eq odp_pf_eq;
+
+ struct xarray odp_mkeys;
+
+ u32 null_mkey;
+ struct mlx5_ib_flow_db *flow_db;
+ /* protect resources needed as part of reset flow */
+ spinlock_t reset_flow_resource_lock;
+ struct list_head qp_list;
+ /* Array with num_ports elements */
+ struct mlx5_ib_port *port;
+ struct mlx5_sq_bfreg bfreg;
+ struct mlx5_sq_bfreg wc_bfreg;
+ struct mlx5_sq_bfreg fp_bfreg;
+ struct mlx5_ib_delay_drop delay_drop;
+ const struct mlx5_ib_profile *profile;
+
+ struct mlx5_ib_lb_state lb;
+ u8 umr_fence;
+ struct list_head ib_dev_list;
+ u64 sys_image_guid;
+ struct mlx5_dm dm;
+ u16 devx_whitelist_uid;
+ struct mlx5_srq_table srq_table;
+ struct mlx5_qp_table qp_table;
+ struct mlx5_async_ctx async_ctx;
+ struct mlx5_devx_event_table devx_event_table;
+ struct mlx5_var_table var_table;
+
+ struct xarray sig_mrs;
+ struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
+ u16 pkey_table_len;
+ u8 lag_ports;
+};
+
+static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
+{
+ return container_of(mcq, struct mlx5_ib_cq, mcq);
+}
+
+static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
+{
+ return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
+}
+
+static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
+}
+
+static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr)
+{
+ return to_mdev(mr->ibmr.device);
+}
+
+static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ return to_mdev(context->ibucontext.device);
+}
+
+static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct mlx5_ib_cq, ibcq);
+}
+
+static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
+{
+ return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
+}
+
+static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
+{
+ return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
+}
+
+static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct mlx5_ib_pd, ibpd);
+}
+
+static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
+}
+
+static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct mlx5_ib_qp, ibqp);
+}
+
+static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
+{
+ return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
+}
+
+static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+{
+ return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
+}
+
+static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
+{
+ return container_of(msrq, struct mlx5_ib_srq, msrq);
+}
+
+static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct mlx5_ib_mr, ibmr);
+}
+
+static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct mlx5_ib_mw, ibmw);
+}
+
+static inline struct mlx5_ib_flow_action *
+to_mflow_act(struct ib_flow_action *ibact)
+{
+ return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
+}
+
+static inline struct mlx5_user_mmap_entry *
+to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry,
+ struct mlx5_user_mmap_entry, rdma_entry);
+}
+
+int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
+ struct mlx5_db *db);
+void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
+void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
+void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
+void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
+int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
+{
+ return 0;
+}
+int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
+int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
+int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
+void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
+int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata);
+int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
+void mlx5_ib_drain_sq(struct ib_qp *qp);
+void mlx5_ib_drain_rq(struct ib_qp *qp);
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc);
+int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata);
+int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
+int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
+int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
+struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
+struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata);
+struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_udata *udata);
+int mlx5_ib_advise_mr(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags,
+ struct ib_sge *sg_list,
+ u32 num_sge,
+ struct uverbs_attr_bundle *attrs);
+int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
+int mlx5_ib_dealloc_mw(struct ib_mw *mw);
+struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
+ int access_flags);
+void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
+void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
+struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ u64 length, u64 virt_addr, int access_flags,
+ struct ib_pd *pd, struct ib_udata *udata);
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
+ u32 max_num_sg,
+ u32 max_num_meta_sg);
+int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset);
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
+ const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+ const struct ib_mad *in, struct ib_mad *out,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
+int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
+int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
+int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port);
+int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
+ __be64 *sys_image_guid);
+int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
+ u16 *max_pkeys);
+int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
+ u32 *vendor_id);
+int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
+int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
+int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey);
+int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
+ union ib_gid *gid);
+int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
+int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *props);
+void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
+ u64 access_flags);
+void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
+int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
+int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
+
+struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ struct mlx5_cache_ent *ent,
+ int access_flags);
+
+int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
+ struct ib_mr_status *mr_status);
+struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
+int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata);
+int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata);
+int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
+struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
+ struct ib_dm_mr_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
+int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
+void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
+int __init mlx5_ib_odp_init(void);
+void mlx5_ib_odp_cleanup(void);
+void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent);
+void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags);
+
+int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge);
+int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
+int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
+#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
+static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_pf_eq *eq)
+{
+ return 0;
+}
+static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
+static inline int mlx5_ib_odp_init(void) { return 0; }
+static inline void mlx5_ib_odp_cleanup(void) {}
+static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {}
+static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags) {}
+
+static inline int
+mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice, u32 flags,
+ struct ib_sge *sg_list, u32 num_sge)
+{
+ return -EOPNOTSUPP;
+}
+static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
+{
+ return -EOPNOTSUPP;
+}
+static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+
+extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
+
+/* Needed for rep profile */
+void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile,
+ int stage);
+int __mlx5_ib_add(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile);
+
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
+ u32 port, struct ifla_vf_info *info);
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+ u32 port, int state);
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+ u32 port, struct ifla_vf_stats *stats);
+int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
+ u64 guid, int type);
+
+__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
+ const struct ib_gid_attr *attr);
+
+void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
+void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
+
+/* GSI QP helper functions */
+int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
+ struct ib_qp_init_attr *attr);
+int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
+int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+ int attr_mask);
+int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr);
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
+
+int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
+
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
+ int bfregn);
+struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
+struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
+ u32 ib_port_num,
+ u32 *native_port_num);
+void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
+ u32 port_num);
+
+extern const struct uapi_definition mlx5_ib_devx_defs[];
+extern const struct uapi_definition mlx5_ib_flow_defs[];
+extern const struct uapi_definition mlx5_ib_qos_defs[];
+extern const struct uapi_definition mlx5_ib_std_types_defs[];
+
+static inline int is_qp1(enum ib_qp_type qp_type)
+{
+ return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
+}
+
+static inline u32 check_cq_create_flags(u32 flags)
+{
+ /*
+ * It returns non-zero value for unsupported CQ
+ * create flags, otherwise it returns zero.
+ */
+ return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
+ IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
+}
+
+static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
+ u32 *user_index)
+{
+ if (cqe_version) {
+ if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
+ (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
+ return -EINVAL;
+ *user_index = cmd_uidx;
+ } else {
+ *user_index = MLX5_IB_DEFAULT_UIDX;
+ }
+
+ return 0;
+}
+
+static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
+ struct mlx5_ib_create_qp *ucmd,
+ int inlen,
+ u32 *user_index)
+{
+ u8 cqe_version = ucontext->cqe_version;
+
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
+ (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ return 0;
+
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
+ return -EINVAL;
+
+ return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
+}
+
+static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
+ struct mlx5_ib_create_srq *ucmd,
+ int inlen,
+ u32 *user_index)
+{
+ u8 cqe_version = ucontext->cqe_version;
+
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
+ (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
+ return 0;
+
+ if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
+ return -EINVAL;
+
+ return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
+}
+
+static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
+{
+ return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
+ MLX5_UARS_IN_PAGE : 1;
+}
+
+extern void *xlt_emergency_page;
+
+int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, u32 bfregn,
+ bool dyn_bfreg);
+
+static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mmkey)
+{
+ refcount_set(&mmkey->usecount, 1);
+
+ return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
+ mmkey, GFP_KERNEL));
+}
+
+/* deref an mkey that can participate in ODP flow */
+static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
+{
+ if (refcount_dec_and_test(&mmkey->usecount))
+ wake_up(&mmkey->wait);
+}
+
+/* deref an mkey that can participate in ODP flow and wait for relese */
+static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
+{
+ mlx5r_deref_odp_mkey(mmkey);
+ wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
+}
+
+int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
+
+static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
+{
+ /*
+ * If the driver is in hash mode and the port_select_flow_table_bypass cap
+ * is supported, it means that the driver no longer needs to assign the port
+ * affinity by default. If a user wants to set the port affinity explicitly,
+ * the user has a dedicated API to do that, so there is no need to assign
+ * the port affinity by default.
+ */
+ if (dev->lag_active &&
+ mlx5_lag_mode_is_hash(dev->mdev) &&
+ MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
+ return 0;
+
+ if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
+ return 0;
+
+ return dev->lag_active ||
+ (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
+ MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
+}
+
+static inline bool rt_supported(int ts_cap)
+{
+ return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
+ ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+}
+
+/*
+ * PCI Peer to Peer is a trainwreck. If no switch is present then things
+ * sometimes work, depending on the pci_distance_p2p logic for excluding broken
+ * root complexes. However if a switch is present in the path, then things get
+ * really ugly depending on how the switch is setup. This table assumes that the
+ * root complex is strict and is validating that all req/reps are matches
+ * perfectly - so any scenario where it sees only half the transaction is a
+ * failure.
+ *
+ * CR/RR/DT ATS RO P2P
+ * 00X X X OK
+ * 010 X X fails (request is routed to root but root never sees comp)
+ * 011 0 X fails (request is routed to root but root never sees comp)
+ * 011 1 X OK
+ * 10X X 1 OK
+ * 101 X 0 fails (completion is routed to root but root didn't see req)
+ * 110 X 0 SLOW
+ * 111 0 0 SLOW
+ * 111 1 0 fails (completion is routed to root but root didn't see req)
+ * 111 1 1 OK
+ *
+ * Unfortunately we cannot reliably know if a switch is present or what the
+ * CR/RR/DT ACS settings are, as in a VM that is all hidden. Assume that
+ * CR/RR/DT is 111 if the ATS cap is enabled and follow the last three rows.
+ *
+ * For now assume if the umem is a dma_buf then it is P2P.
+ */
+static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
+ struct ib_umem *umem, int access_flags)
+{
+ if (!MLX5_CAP_GEN(dev->mdev, ats) || !umem->is_dmabuf)
+ return false;
+ return access_flags & IB_ACCESS_RELAXED_ORDERING;
+}
+
+#endif /* MLX5_IB_H */
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
new file mode 100644
index 000000000..410cc5fd2
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -0,0 +1,2344 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2020, Intel Corporation. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <linux/kref.h>
+#include <linux/random.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
+#include <rdma/ib_umem_odp.h>
+#include "dm.h"
+#include "mlx5_ib.h"
+#include "umr.h"
+
+enum {
+ MAX_PENDING_REG_MR = 8,
+};
+
+#define MLX5_UMR_ALIGN 2048
+
+static void
+create_mkey_callback(int status, struct mlx5_async_work *context);
+static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
+ u64 iova, int access_flags,
+ unsigned int page_size, bool populate);
+
+static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
+ struct ib_pd *pd)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+
+ MLX5_SET(mkc, mkc, a, !!(acc & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, mkc, rw, !!(acc & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, mkc, rr, !!(acc & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, mkc, lw, !!(acc & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, mkc, lr, 1);
+
+ if ((acc & IB_ACCESS_RELAXED_ORDERING) &&
+ pcie_relaxed_ordering_enabled(dev->mdev->pdev)) {
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write))
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read))
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
+ }
+
+ MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET64(mkc, mkc, start_addr, start_addr);
+}
+
+static void assign_mkey_variant(struct mlx5_ib_dev *dev, u32 *mkey, u32 *in)
+{
+ u8 key = atomic_inc_return(&dev->mkey_var);
+ void *mkc;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, mkey_7_0, key);
+ *mkey = key;
+}
+
+static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mkey, u32 *in, int inlen)
+{
+ int ret;
+
+ assign_mkey_variant(dev, &mkey->key, in);
+ ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
+ if (!ret)
+ init_waitqueue_head(&mkey->wait);
+
+ return ret;
+}
+
+static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create)
+{
+ struct mlx5_ib_dev *dev = async_create->ent->dev;
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ size_t outlen = MLX5_ST_SZ_BYTES(create_mkey_out);
+
+ MLX5_SET(create_mkey_in, async_create->in, opcode,
+ MLX5_CMD_OP_CREATE_MKEY);
+ assign_mkey_variant(dev, &async_create->mkey, async_create->in);
+ return mlx5_cmd_exec_cb(&dev->async_ctx, async_create->in, inlen,
+ async_create->out, outlen, create_mkey_callback,
+ &async_create->cb_work);
+}
+
+static int mkey_cache_max_order(struct mlx5_ib_dev *dev);
+static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
+
+static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
+{
+ WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
+
+ return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
+}
+
+static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
+{
+ if (status == -ENXIO) /* core driver is not available */
+ return;
+
+ mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
+ if (status != -EREMOTEIO) /* driver specific failure */
+ return;
+
+ /* Failed in FW, print cmd out failure details */
+ mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
+}
+
+
+static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
+ void *to_store)
+{
+ XA_STATE(xas, &ent->mkeys, 0);
+ void *curr;
+
+ xa_lock_irq(&ent->mkeys);
+ if (limit_pendings &&
+ (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) {
+ xa_unlock_irq(&ent->mkeys);
+ return -EAGAIN;
+ }
+ while (1) {
+ /*
+ * This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version
+ * doesn't transparently unlock. Instead we set the xas index to
+ * the current value of reserved every iteration.
+ */
+ xas_set(&xas, ent->reserved);
+ curr = xas_load(&xas);
+ if (!curr) {
+ if (to_store && ent->stored == ent->reserved)
+ xas_store(&xas, to_store);
+ else
+ xas_store(&xas, XA_ZERO_ENTRY);
+ if (xas_valid(&xas)) {
+ ent->reserved++;
+ if (to_store) {
+ if (ent->stored != ent->reserved)
+ __xa_store(&ent->mkeys,
+ ent->stored,
+ to_store,
+ GFP_KERNEL);
+ ent->stored++;
+ queue_adjust_cache_locked(ent);
+ WRITE_ONCE(ent->dev->cache.last_add,
+ jiffies);
+ }
+ }
+ }
+ xa_unlock_irq(&ent->mkeys);
+
+ /*
+ * Notice xas_nomem() must always be called as it cleans
+ * up any cached allocation.
+ */
+ if (!xas_nomem(&xas, GFP_KERNEL))
+ break;
+ xa_lock_irq(&ent->mkeys);
+ }
+ if (xas_error(&xas))
+ return xas_error(&xas);
+ if (WARN_ON(curr))
+ return -EINVAL;
+ return 0;
+}
+
+static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent)
+{
+ void *old;
+
+ ent->reserved--;
+ old = __xa_erase(&ent->mkeys, ent->reserved);
+ WARN_ON(old);
+}
+
+static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey)
+{
+ void *old;
+
+ old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0);
+ WARN_ON(old);
+ ent->stored++;
+}
+
+static u32 pop_stored_mkey(struct mlx5_cache_ent *ent)
+{
+ void *old, *xa_mkey;
+
+ ent->stored--;
+ ent->reserved--;
+
+ if (ent->stored == ent->reserved) {
+ xa_mkey = __xa_erase(&ent->mkeys, ent->stored);
+ WARN_ON(!xa_mkey);
+ return (u32)xa_to_value(xa_mkey);
+ }
+
+ xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY,
+ GFP_KERNEL);
+ WARN_ON(!xa_mkey || xa_is_err(xa_mkey));
+ old = __xa_erase(&ent->mkeys, ent->reserved);
+ WARN_ON(old);
+ return (u32)xa_to_value(xa_mkey);
+}
+
+static void create_mkey_callback(int status, struct mlx5_async_work *context)
+{
+ struct mlx5r_async_create_mkey *mkey_out =
+ container_of(context, struct mlx5r_async_create_mkey, cb_work);
+ struct mlx5_cache_ent *ent = mkey_out->ent;
+ struct mlx5_ib_dev *dev = ent->dev;
+ unsigned long flags;
+
+ if (status) {
+ create_mkey_warn(dev, status, mkey_out->out);
+ kfree(mkey_out);
+ xa_lock_irqsave(&ent->mkeys, flags);
+ undo_push_reserve_mkey(ent);
+ WRITE_ONCE(dev->fill_delay, 1);
+ xa_unlock_irqrestore(&ent->mkeys, flags);
+ mod_timer(&dev->delay_timer, jiffies + HZ);
+ return;
+ }
+
+ mkey_out->mkey |= mlx5_idx_to_mkey(
+ MLX5_GET(create_mkey_out, mkey_out->out, mkey_index));
+ WRITE_ONCE(dev->cache.last_add, jiffies);
+
+ xa_lock_irqsave(&ent->mkeys, flags);
+ push_to_reserved(ent, mkey_out->mkey);
+ /* If we are doing fill_to_high_water then keep going. */
+ queue_adjust_cache_locked(ent);
+ xa_unlock_irqrestore(&ent->mkeys, flags);
+ kfree(mkey_out);
+}
+
+static int get_mkc_octo_size(unsigned int access_mode, unsigned int ndescs)
+{
+ int ret = 0;
+
+ switch (access_mode) {
+ case MLX5_MKC_ACCESS_MODE_MTT:
+ ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
+ sizeof(struct mlx5_mtt));
+ break;
+ case MLX5_MKC_ACCESS_MODE_KSM:
+ ret = DIV_ROUND_UP(ndescs, MLX5_IB_UMR_OCTOWORD /
+ sizeof(struct mlx5_klm));
+ break;
+ default:
+ WARN_ON(1);
+ }
+ return ret;
+}
+
+static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
+{
+ set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
+
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_mkc_octo_size(ent->access_mode, ent->ndescs));
+ MLX5_SET(mkc, mkc, log_page_size, ent->page);
+}
+
+/* Asynchronously schedule new MRs to be populated in the cache. */
+static int add_keys(struct mlx5_cache_ent *ent, unsigned int num)
+{
+ struct mlx5r_async_create_mkey *async_create;
+ void *mkc;
+ int err = 0;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ async_create = kzalloc(sizeof(struct mlx5r_async_create_mkey),
+ GFP_KERNEL);
+ if (!async_create)
+ return -ENOMEM;
+ mkc = MLX5_ADDR_OF(create_mkey_in, async_create->in,
+ memory_key_mkey_entry);
+ set_cache_mkc(ent, mkc);
+ async_create->ent = ent;
+
+ err = push_mkey(ent, true, NULL);
+ if (err)
+ goto free_async_create;
+
+ err = mlx5_ib_create_mkey_cb(async_create);
+ if (err) {
+ mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err);
+ goto err_undo_reserve;
+ }
+ }
+
+ return 0;
+
+err_undo_reserve:
+ xa_lock_irq(&ent->mkeys);
+ undo_push_reserve_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+free_async_create:
+ kfree(async_create);
+ return err;
+}
+
+/* Synchronously create a MR in the cache */
+static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey)
+{
+ size_t inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ void *mkc;
+ u32 *in;
+ int err;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ set_cache_mkc(ent, mkc);
+
+ err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen);
+ if (err)
+ goto free_in;
+
+ WRITE_ONCE(ent->dev->cache.last_add, jiffies);
+free_in:
+ kfree(in);
+ return err;
+}
+
+static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
+{
+ u32 mkey;
+
+ lockdep_assert_held(&ent->mkeys.xa_lock);
+ if (!ent->stored)
+ return;
+ mkey = pop_stored_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+ mlx5_core_destroy_mkey(ent->dev->mdev, mkey);
+ xa_lock_irq(&ent->mkeys);
+}
+
+static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target,
+ bool limit_fill)
+ __acquires(&ent->mkeys) __releases(&ent->mkeys)
+{
+ int err;
+
+ lockdep_assert_held(&ent->mkeys.xa_lock);
+
+ while (true) {
+ if (limit_fill)
+ target = ent->limit * 2;
+ if (target == ent->reserved)
+ return 0;
+ if (target > ent->reserved) {
+ u32 todo = target - ent->reserved;
+
+ xa_unlock_irq(&ent->mkeys);
+ err = add_keys(ent, todo);
+ if (err == -EAGAIN)
+ usleep_range(3000, 5000);
+ xa_lock_irq(&ent->mkeys);
+ if (err) {
+ if (err != -EAGAIN)
+ return err;
+ } else
+ return 0;
+ } else {
+ remove_cache_mr_locked(ent);
+ }
+ }
+}
+
+static ssize_t size_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_cache_ent *ent = filp->private_data;
+ u32 target;
+ int err;
+
+ err = kstrtou32_from_user(buf, count, 0, &target);
+ if (err)
+ return err;
+
+ /*
+ * Target is the new value of total_mrs the user requests, however we
+ * cannot free MRs that are in use. Compute the target value for stored
+ * mkeys.
+ */
+ xa_lock_irq(&ent->mkeys);
+ if (target < ent->in_use) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ target = target - ent->in_use;
+ if (target < ent->limit || target > ent->limit*2) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ err = resize_available_mrs(ent, target, false);
+ if (err)
+ goto err_unlock;
+ xa_unlock_irq(&ent->mkeys);
+
+ return count;
+
+err_unlock:
+ xa_unlock_irq(&ent->mkeys);
+ return err;
+}
+
+static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_cache_ent *ent = filp->private_data;
+ char lbuf[20];
+ int err;
+
+ err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use);
+ if (err < 0)
+ return err;
+
+ return simple_read_from_buffer(buf, count, pos, lbuf, err);
+}
+
+static const struct file_operations size_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = size_write,
+ .read = size_read,
+};
+
+static ssize_t limit_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mlx5_cache_ent *ent = filp->private_data;
+ u32 var;
+ int err;
+
+ err = kstrtou32_from_user(buf, count, 0, &var);
+ if (err)
+ return err;
+
+ /*
+ * Upon set we immediately fill the cache to high water mark implied by
+ * the limit.
+ */
+ xa_lock_irq(&ent->mkeys);
+ ent->limit = var;
+ err = resize_available_mrs(ent, 0, true);
+ xa_unlock_irq(&ent->mkeys);
+ if (err)
+ return err;
+ return count;
+}
+
+static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct mlx5_cache_ent *ent = filp->private_data;
+ char lbuf[20];
+ int err;
+
+ err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
+ if (err < 0)
+ return err;
+
+ return simple_read_from_buffer(buf, count, pos, lbuf, err);
+}
+
+static const struct file_operations limit_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = limit_write,
+ .read = limit_read,
+};
+
+static bool someone_adding(struct mlx5_mkey_cache *cache)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
+ struct mlx5_cache_ent *ent = &cache->ent[i];
+ bool ret;
+
+ xa_lock_irq(&ent->mkeys);
+ ret = ent->stored < ent->limit;
+ xa_unlock_irq(&ent->mkeys);
+ if (ret)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Check if the bucket is outside the high/low water mark and schedule an async
+ * update. The cache refill has hysteresis, once the low water mark is hit it is
+ * refilled up to the high mark.
+ */
+static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
+{
+ lockdep_assert_held(&ent->mkeys.xa_lock);
+
+ if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
+ return;
+ if (ent->stored < ent->limit) {
+ ent->fill_to_high_water = true;
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
+ } else if (ent->fill_to_high_water &&
+ ent->reserved < 2 * ent->limit) {
+ /*
+ * Once we start populating due to hitting a low water mark
+ * continue until we pass the high water mark.
+ */
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
+ } else if (ent->stored == 2 * ent->limit) {
+ ent->fill_to_high_water = false;
+ } else if (ent->stored > 2 * ent->limit) {
+ /* Queue deletion of excess entries */
+ ent->fill_to_high_water = false;
+ if (ent->stored != ent->reserved)
+ queue_delayed_work(ent->dev->cache.wq, &ent->dwork,
+ msecs_to_jiffies(1000));
+ else
+ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0);
+ }
+}
+
+static void __cache_work_func(struct mlx5_cache_ent *ent)
+{
+ struct mlx5_ib_dev *dev = ent->dev;
+ struct mlx5_mkey_cache *cache = &dev->cache;
+ int err;
+
+ xa_lock_irq(&ent->mkeys);
+ if (ent->disabled)
+ goto out;
+
+ if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit &&
+ !READ_ONCE(dev->fill_delay)) {
+ xa_unlock_irq(&ent->mkeys);
+ err = add_keys(ent, 1);
+ xa_lock_irq(&ent->mkeys);
+ if (ent->disabled)
+ goto out;
+ if (err) {
+ /*
+ * EAGAIN only happens if there are pending MRs, so we
+ * will be rescheduled when storing them. The only
+ * failure path here is ENOMEM.
+ */
+ if (err != -EAGAIN) {
+ mlx5_ib_warn(
+ dev,
+ "command failed order %d, err %d\n",
+ ent->order, err);
+ queue_delayed_work(cache->wq, &ent->dwork,
+ msecs_to_jiffies(1000));
+ }
+ }
+ } else if (ent->stored > 2 * ent->limit) {
+ bool need_delay;
+
+ /*
+ * The remove_cache_mr() logic is performed as garbage
+ * collection task. Such task is intended to be run when no
+ * other active processes are running.
+ *
+ * The need_resched() will return TRUE if there are user tasks
+ * to be activated in near future.
+ *
+ * In such case, we don't execute remove_cache_mr() and postpone
+ * the garbage collection work to try to run in next cycle, in
+ * order to free CPU resources to other tasks.
+ */
+ xa_unlock_irq(&ent->mkeys);
+ need_delay = need_resched() || someone_adding(cache) ||
+ !time_after(jiffies,
+ READ_ONCE(cache->last_add) + 300 * HZ);
+ xa_lock_irq(&ent->mkeys);
+ if (ent->disabled)
+ goto out;
+ if (need_delay) {
+ queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
+ goto out;
+ }
+ remove_cache_mr_locked(ent);
+ queue_adjust_cache_locked(ent);
+ }
+out:
+ xa_unlock_irq(&ent->mkeys);
+}
+
+static void delayed_cache_work_func(struct work_struct *work)
+{
+ struct mlx5_cache_ent *ent;
+
+ ent = container_of(work, struct mlx5_cache_ent, dwork.work);
+ __cache_work_func(ent);
+}
+
+struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ struct mlx5_cache_ent *ent,
+ int access_flags)
+{
+ struct mlx5_ib_mr *mr;
+ int err;
+
+ if (!mlx5r_umr_can_reconfig(dev, 0, access_flags))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ xa_lock_irq(&ent->mkeys);
+ ent->in_use++;
+
+ if (!ent->stored) {
+ queue_adjust_cache_locked(ent);
+ ent->miss++;
+ xa_unlock_irq(&ent->mkeys);
+ err = create_cache_mkey(ent, &mr->mmkey.key);
+ if (err) {
+ xa_lock_irq(&ent->mkeys);
+ ent->in_use--;
+ xa_unlock_irq(&ent->mkeys);
+ kfree(mr);
+ return ERR_PTR(err);
+ }
+ } else {
+ mr->mmkey.key = pop_stored_mkey(ent);
+ queue_adjust_cache_locked(ent);
+ xa_unlock_irq(&ent->mkeys);
+ }
+ mr->mmkey.cache_ent = ent;
+ mr->mmkey.type = MLX5_MKEY_MR;
+ init_waitqueue_head(&mr->mmkey.wait);
+ return mr;
+}
+
+static void clean_keys(struct mlx5_ib_dev *dev, int c)
+{
+ struct mlx5_mkey_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent = &cache->ent[c];
+ u32 mkey;
+
+ cancel_delayed_work(&ent->dwork);
+ xa_lock_irq(&ent->mkeys);
+ while (ent->stored) {
+ mkey = pop_stored_mkey(ent);
+ xa_unlock_irq(&ent->mkeys);
+ mlx5_core_destroy_mkey(dev->mdev, mkey);
+ xa_lock_irq(&ent->mkeys);
+ }
+ xa_unlock_irq(&ent->mkeys);
+}
+
+static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (!mlx5_debugfs_root || dev->is_rep)
+ return;
+
+ debugfs_remove_recursive(dev->cache.root);
+ dev->cache.root = NULL;
+}
+
+static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_mkey_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent;
+ struct dentry *dir;
+ int i;
+
+ if (!mlx5_debugfs_root || dev->is_rep)
+ return;
+
+ cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev));
+
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
+ ent = &cache->ent[i];
+ sprintf(ent->name, "%d", ent->order);
+ dir = debugfs_create_dir(ent->name, cache->root);
+ debugfs_create_file("size", 0600, dir, ent, &size_fops);
+ debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
+ debugfs_create_ulong("cur", 0400, dir, &ent->stored);
+ debugfs_create_u32("miss", 0600, dir, &ent->miss);
+ }
+}
+
+static void delay_time_func(struct timer_list *t)
+{
+ struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
+
+ WRITE_ONCE(dev->fill_delay, 0);
+}
+
+int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_mkey_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent;
+ int i;
+
+ mutex_init(&dev->slow_path_mutex);
+ cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
+ if (!cache->wq) {
+ mlx5_ib_warn(dev, "failed to create work queue\n");
+ return -ENOMEM;
+ }
+
+ mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
+ timer_setup(&dev->delay_timer, delay_time_func, 0);
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
+ ent = &cache->ent[i];
+ xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
+ ent->order = i + 2;
+ ent->dev = dev;
+ ent->limit = 0;
+
+ INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
+
+ if (i > MKEY_CACHE_LAST_STD_ENTRY) {
+ mlx5_odp_init_mkey_cache_entry(ent);
+ continue;
+ }
+
+ if (ent->order > mkey_cache_max_order(dev))
+ continue;
+
+ ent->page = PAGE_SHIFT;
+ ent->ndescs = 1 << ent->order;
+ ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
+ if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
+ !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
+ mlx5r_umr_can_load_pas(dev, 0))
+ ent->limit = dev->mdev->profile.mr_cache[i].limit;
+ else
+ ent->limit = 0;
+ xa_lock_irq(&ent->mkeys);
+ queue_adjust_cache_locked(ent);
+ xa_unlock_irq(&ent->mkeys);
+ }
+
+ mlx5_mkey_cache_debugfs_init(dev);
+
+ return 0;
+}
+
+int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
+{
+ unsigned int i;
+
+ if (!dev->cache.wq)
+ return 0;
+
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
+ struct mlx5_cache_ent *ent = &dev->cache.ent[i];
+
+ xa_lock_irq(&ent->mkeys);
+ ent->disabled = true;
+ xa_unlock_irq(&ent->mkeys);
+ cancel_delayed_work_sync(&ent->dwork);
+ }
+
+ mlx5_mkey_cache_debugfs_cleanup(dev);
+ mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
+
+ for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++)
+ clean_keys(dev, i);
+
+ destroy_workqueue(dev->cache.wq);
+ del_timer_sync(&dev->delay_timer);
+
+ return 0;
+}
+
+struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ struct mlx5_ib_mr *mr;
+ void *mkc;
+ u32 *in;
+ int err;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+ MLX5_SET(mkc, mkc, length64, 1);
+ set_mkc_access_pd_addr_fields(mkc, acc | IB_ACCESS_RELAXED_ORDERING, 0,
+ pd);
+
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
+ if (err)
+ goto err_in;
+
+ kfree(in);
+ mr->mmkey.type = MLX5_MKEY_MR;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+err_in:
+ kfree(in);
+
+err_free:
+ kfree(mr);
+
+ return ERR_PTR(err);
+}
+
+static int get_octo_len(u64 addr, u64 len, int page_shift)
+{
+ u64 page_size = 1ULL << page_shift;
+ u64 offset;
+ int npages;
+
+ offset = addr & (page_size - 1);
+ npages = ALIGN(len + offset, page_size) >> page_shift;
+ return (npages + 1) / 2;
+}
+
+static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
+{
+ if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+ return MKEY_CACHE_LAST_STD_ENTRY + 2;
+ return MLX5_MAX_UMR_SHIFT;
+}
+
+static struct mlx5_cache_ent *mkey_cache_ent_from_order(struct mlx5_ib_dev *dev,
+ unsigned int order)
+{
+ struct mlx5_mkey_cache *cache = &dev->cache;
+
+ if (order < cache->ent[0].order)
+ return &cache->ent[0];
+ order = order - cache->ent[0].order;
+ if (order > MKEY_CACHE_LAST_STD_ENTRY)
+ return NULL;
+ return &cache->ent[order];
+}
+
+static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
+ u64 length, int access_flags, u64 iova)
+{
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
+ mr->ibmr.length = length;
+ mr->ibmr.device = &dev->ib_dev;
+ mr->ibmr.iova = iova;
+ mr->access_flags = access_flags;
+}
+
+static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem,
+ u64 iova)
+{
+ /*
+ * The alignment of iova has already been checked upon entering
+ * UVERBS_METHOD_REG_DMABUF_MR
+ */
+ umem->iova = iova;
+ return PAGE_SIZE;
+}
+
+static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
+ struct ib_umem *umem, u64 iova,
+ int access_flags)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_cache_ent *ent;
+ struct mlx5_ib_mr *mr;
+ unsigned int page_size;
+
+ if (umem->is_dmabuf)
+ page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
+ else
+ page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
+ 0, iova);
+ if (WARN_ON(!page_size))
+ return ERR_PTR(-EINVAL);
+ ent = mkey_cache_ent_from_order(
+ dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size)));
+ /*
+ * Matches access in alloc_cache_mr(). If the MR can't come from the
+ * cache then synchronously create an uncached one.
+ */
+ if (!ent || ent->limit == 0 ||
+ !mlx5r_umr_can_reconfig(dev, 0, access_flags) ||
+ mlx5_umem_needs_ats(dev, umem, access_flags)) {
+ mutex_lock(&dev->slow_path_mutex);
+ mr = reg_create(pd, umem, iova, access_flags, page_size, false);
+ mutex_unlock(&dev->slow_path_mutex);
+ return mr;
+ }
+
+ mr = mlx5_mr_cache_alloc(dev, ent, access_flags);
+ if (IS_ERR(mr))
+ return mr;
+
+ mr->ibmr.pd = pd;
+ mr->umem = umem;
+ mr->page_shift = order_base_2(page_size);
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
+
+ return mr;
+}
+
+/*
+ * If ibmr is NULL it will be allocated by reg_create.
+ * Else, the given ibmr will be used.
+ */
+static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
+ u64 iova, int access_flags,
+ unsigned int page_size, bool populate)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_mr *mr;
+ __be64 *pas;
+ void *mkc;
+ int inlen;
+ u32 *in;
+ int err;
+ bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
+
+ if (!page_size)
+ return ERR_PTR(-EINVAL);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->ibmr.pd = pd;
+ mr->access_flags = access_flags;
+ mr->page_shift = order_base_2(page_size);
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ if (populate)
+ inlen += sizeof(*pas) *
+ roundup(ib_umem_num_dma_blocks(umem, page_size), 2);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_1;
+ }
+ pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
+ if (populate) {
+ if (WARN_ON(access_flags & IB_ACCESS_ON_DEMAND)) {
+ err = -EINVAL;
+ goto err_2;
+ }
+ mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas,
+ pg_cap ? MLX5_IB_MTT_PRESENT : 0);
+ }
+
+ /* The pg_access bit allows setting the access flags
+ * in the page list submitted with the command. */
+ MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ set_mkc_access_pd_addr_fields(mkc, access_flags, iova,
+ populate ? pd : dev->umrc.pd);
+ MLX5_SET(mkc, mkc, free, !populate);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+
+ MLX5_SET64(mkc, mkc, len, umem->length);
+ MLX5_SET(mkc, mkc, bsf_octword_size, 0);
+ MLX5_SET(mkc, mkc, translations_octword_size,
+ get_octo_len(iova, umem->length, mr->page_shift));
+ MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
+ if (mlx5_umem_needs_ats(dev, umem, access_flags))
+ MLX5_SET(mkc, mkc, ma_translation_mode, 1);
+ if (populate) {
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ get_octo_len(iova, umem->length, mr->page_shift));
+ }
+
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
+ if (err) {
+ mlx5_ib_warn(dev, "create mkey failed\n");
+ goto err_2;
+ }
+ mr->mmkey.type = MLX5_MKEY_MR;
+ mr->umem = umem;
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
+ kvfree(in);
+
+ mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
+
+ return mr;
+
+err_2:
+ kvfree(in);
+err_1:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
+ u64 length, int acc, int mode)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ struct mlx5_ib_mr *mr;
+ void *mkc;
+ u32 *in;
+ int err;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
+ MLX5_SET64(mkc, mkc, len, length);
+ set_mkc_access_pd_addr_fields(mkc, acc, start_addr, pd);
+
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
+ if (err)
+ goto err_in;
+
+ kfree(in);
+
+ set_mr_fields(dev, mr, length, acc, start_addr);
+
+ return &mr->ibmr;
+
+err_in:
+ kfree(in);
+
+err_free:
+ kfree(mr);
+
+ return ERR_PTR(err);
+}
+
+int mlx5_ib_advise_mr(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags,
+ struct ib_sge *sg_list,
+ u32 num_sge,
+ struct uverbs_attr_bundle *attrs)
+{
+ if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH &&
+ advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_advise_mr_prefetch(pd, advice, flags,
+ sg_list, num_sge);
+}
+
+struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
+ struct ib_dm_mr_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_dm *mdm = to_mdm(dm);
+ struct mlx5_core_dev *dev = to_mdev(dm->device)->mdev;
+ u64 start_addr = mdm->dev_addr + attr->offset;
+ int mode;
+
+ switch (mdm->type) {
+ case MLX5_IB_UAPI_DM_TYPE_MEMIC:
+ if (attr->access_flags & ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS)
+ return ERR_PTR(-EINVAL);
+
+ mode = MLX5_MKC_ACCESS_MODE_MEMIC;
+ start_addr -= pci_resource_start(dev->pdev, 0);
+ break;
+ case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM:
+ case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM:
+ if (attr->access_flags & ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS)
+ return ERR_PTR(-EINVAL);
+
+ mode = MLX5_MKC_ACCESS_MODE_SW_ICM;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ return mlx5_ib_get_dm_mr(pd, start_addr, attr->length,
+ attr->access_flags, mode);
+}
+
+static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
+ u64 iova, int access_flags)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_mr *mr = NULL;
+ bool xlt_with_umr;
+ int err;
+
+ xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length);
+ if (xlt_with_umr) {
+ mr = alloc_cacheable_mr(pd, umem, iova, access_flags);
+ } else {
+ unsigned int page_size = mlx5_umem_find_best_pgsz(
+ umem, mkc, log_page_size, 0, iova);
+
+ mutex_lock(&dev->slow_path_mutex);
+ mr = reg_create(pd, umem, iova, access_flags, page_size, true);
+ mutex_unlock(&dev->slow_path_mutex);
+ }
+ if (IS_ERR(mr)) {
+ ib_umem_release(umem);
+ return ERR_CAST(mr);
+ }
+
+ mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
+
+ atomic_add(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
+
+ if (xlt_with_umr) {
+ /*
+ * If the MR was created with reg_create then it will be
+ * configured properly but left disabled. It is safe to go ahead
+ * and configure it again via UMR while enabling it.
+ */
+ err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
+ if (err) {
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
+ return ERR_PTR(err);
+ }
+ }
+ return &mr->ibmr;
+}
+
+static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 iova, int access_flags,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem_odp *odp;
+ struct mlx5_ib_mr *mr;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ err = mlx5r_odp_create_eq(dev, &dev->odp_pf_eq);
+ if (err)
+ return ERR_PTR(err);
+ if (!start && length == U64_MAX) {
+ if (iova != 0)
+ return ERR_PTR(-EINVAL);
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
+ return ERR_PTR(-EINVAL);
+
+ mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
+ if (IS_ERR(mr))
+ return ERR_CAST(mr);
+ return &mr->ibmr;
+ }
+
+ /* ODP requires xlt update via umr to work. */
+ if (!mlx5r_umr_can_load_pas(dev, length))
+ return ERR_PTR(-EINVAL);
+
+ odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
+ &mlx5_mn_ops);
+ if (IS_ERR(odp))
+ return ERR_CAST(odp);
+
+ mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags);
+ if (IS_ERR(mr)) {
+ ib_umem_release(&odp->umem);
+ return ERR_CAST(mr);
+ }
+ xa_init(&mr->implicit_children);
+
+ odp->private = mr;
+ err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
+ if (err)
+ goto err_dereg_mr;
+
+ err = mlx5_ib_init_odp_mr(mr);
+ if (err)
+ goto err_dereg_mr;
+ return &mr->ibmr;
+
+err_dereg_mr:
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
+ return ERR_PTR(err);
+}
+
+struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 iova, int access_flags,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct ib_umem *umem;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mlx5_ib_dbg(dev, "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
+ start, iova, length, access_flags);
+
+ if (access_flags & IB_ACCESS_ON_DEMAND)
+ return create_user_odp_mr(pd, start, length, iova, access_flags,
+ udata);
+ umem = ib_umem_get(&dev->ib_dev, start, length, access_flags);
+ if (IS_ERR(umem))
+ return ERR_CAST(umem);
+ return create_real_mr(pd, umem, iova, access_flags);
+}
+
+static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
+ struct mlx5_ib_mr *mr = umem_dmabuf->private;
+
+ dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+
+ if (!umem_dmabuf->sgt)
+ return;
+
+ mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+}
+
+static struct dma_buf_attach_ops mlx5_ib_dmabuf_attach_ops = {
+ .allow_peer2peer = 1,
+ .move_notify = mlx5_ib_dmabuf_invalidate_cb,
+};
+
+struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_mr *mr = NULL;
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM) ||
+ !IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mlx5_ib_dbg(dev,
+ "offset 0x%llx, virt_addr 0x%llx, length 0x%llx, fd %d, access_flags 0x%x\n",
+ offset, virt_addr, length, fd, access_flags);
+
+ /* dmabuf requires xlt update via umr to work. */
+ if (!mlx5r_umr_can_load_pas(dev, length))
+ return ERR_PTR(-EINVAL);
+
+ umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
+ access_flags,
+ &mlx5_ib_dmabuf_attach_ops);
+ if (IS_ERR(umem_dmabuf)) {
+ mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
+ PTR_ERR(umem_dmabuf));
+ return ERR_CAST(umem_dmabuf);
+ }
+
+ mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
+ access_flags);
+ if (IS_ERR(mr)) {
+ ib_umem_release(&umem_dmabuf->umem);
+ return ERR_CAST(mr);
+ }
+
+ mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
+
+ atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
+ umem_dmabuf->private = mr;
+ err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
+ if (err)
+ goto err_dereg_mr;
+
+ err = mlx5_ib_init_dmabuf_mr(mr);
+ if (err)
+ goto err_dereg_mr;
+ return &mr->ibmr;
+
+err_dereg_mr:
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
+ return ERR_PTR(err);
+}
+
+/*
+ * True if the change in access flags can be done via UMR, only some access
+ * flags can be updated.
+ */
+static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
+ unsigned int current_access_flags,
+ unsigned int target_access_flags)
+{
+ unsigned int diffs = current_access_flags ^ target_access_flags;
+
+ if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
+ return false;
+ return mlx5r_umr_can_reconfig(dev, current_access_flags,
+ target_access_flags);
+}
+
+static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
+ struct ib_umem *new_umem,
+ int new_access_flags, u64 iova,
+ unsigned long *page_size)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+
+ /* We only track the allocated sizes of MRs from the cache */
+ if (!mr->mmkey.cache_ent)
+ return false;
+ if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
+ return false;
+
+ *page_size =
+ mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
+ if (WARN_ON(!*page_size))
+ return false;
+ return (1ULL << mr->mmkey.cache_ent->order) >=
+ ib_umem_num_dma_blocks(new_umem, *page_size);
+}
+
+static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+ int access_flags, int flags, struct ib_umem *new_umem,
+ u64 iova, unsigned long page_size)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
+ int upd_flags = MLX5_IB_UPD_XLT_ADDR | MLX5_IB_UPD_XLT_ENABLE;
+ struct ib_umem *old_umem = mr->umem;
+ int err;
+
+ /*
+ * To keep everything simple the MR is revoked before we start to mess
+ * with it. This ensure the change is atomic relative to any use of the
+ * MR.
+ */
+ err = mlx5r_umr_revoke_mr(mr);
+ if (err)
+ return err;
+
+ if (flags & IB_MR_REREG_PD) {
+ mr->ibmr.pd = pd;
+ upd_flags |= MLX5_IB_UPD_XLT_PD;
+ }
+ if (flags & IB_MR_REREG_ACCESS) {
+ mr->access_flags = access_flags;
+ upd_flags |= MLX5_IB_UPD_XLT_ACCESS;
+ }
+
+ mr->ibmr.iova = iova;
+ mr->ibmr.length = new_umem->length;
+ mr->page_shift = order_base_2(page_size);
+ mr->umem = new_umem;
+ err = mlx5r_umr_update_mr_pas(mr, upd_flags);
+ if (err) {
+ /*
+ * The MR is revoked at this point so there is no issue to free
+ * new_umem.
+ */
+ mr->umem = old_umem;
+ return err;
+ }
+
+ atomic_sub(ib_umem_num_pages(old_umem), &dev->mdev->priv.reg_pages);
+ ib_umem_release(old_umem);
+ atomic_add(ib_umem_num_pages(new_umem), &dev->mdev->priv.reg_pages);
+ return 0;
+}
+
+struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+ u64 length, u64 iova, int new_access_flags,
+ struct ib_pd *new_pd,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
+ struct mlx5_ib_mr *mr = to_mmr(ib_mr);
+ int err;
+
+ if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mlx5_ib_dbg(
+ dev,
+ "start 0x%llx, iova 0x%llx, length 0x%llx, access_flags 0x%x\n",
+ start, iova, length, new_access_flags);
+
+ if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (!(flags & IB_MR_REREG_ACCESS))
+ new_access_flags = mr->access_flags;
+ if (!(flags & IB_MR_REREG_PD))
+ new_pd = ib_mr->pd;
+
+ if (!(flags & IB_MR_REREG_TRANS)) {
+ struct ib_umem *umem;
+
+ /* Fast path for PD/access change */
+ if (can_use_umr_rereg_access(dev, mr->access_flags,
+ new_access_flags)) {
+ err = mlx5r_umr_rereg_pd_access(mr, new_pd,
+ new_access_flags);
+ if (err)
+ return ERR_PTR(err);
+ return NULL;
+ }
+ /* DM or ODP MR's don't have a normal umem so we can't re-use it */
+ if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
+ goto recreate;
+
+ /*
+ * Only one active MR can refer to a umem at one time, revoke
+ * the old MR before assigning the umem to the new one.
+ */
+ err = mlx5r_umr_revoke_mr(mr);
+ if (err)
+ return ERR_PTR(err);
+ umem = mr->umem;
+ mr->umem = NULL;
+ atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
+
+ return create_real_mr(new_pd, umem, mr->ibmr.iova,
+ new_access_flags);
+ }
+
+ /*
+ * DM doesn't have a PAS list so we can't re-use it, odp/dmabuf does
+ * but the logic around releasing the umem is different
+ */
+ if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
+ goto recreate;
+
+ if (!(new_access_flags & IB_ACCESS_ON_DEMAND) &&
+ can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) {
+ struct ib_umem *new_umem;
+ unsigned long page_size;
+
+ new_umem = ib_umem_get(&dev->ib_dev, start, length,
+ new_access_flags);
+ if (IS_ERR(new_umem))
+ return ERR_CAST(new_umem);
+
+ /* Fast path for PAS change */
+ if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova,
+ &page_size)) {
+ err = umr_rereg_pas(mr, new_pd, new_access_flags, flags,
+ new_umem, iova, page_size);
+ if (err) {
+ ib_umem_release(new_umem);
+ return ERR_PTR(err);
+ }
+ return NULL;
+ }
+ return create_real_mr(new_pd, new_umem, iova, new_access_flags);
+ }
+
+ /*
+ * Everything else has no state we can preserve, just create a new MR
+ * from scratch
+ */
+recreate:
+ return mlx5_ib_reg_user_mr(new_pd, start, length, iova,
+ new_access_flags, udata);
+}
+
+static int
+mlx5_alloc_priv_descs(struct ib_device *device,
+ struct mlx5_ib_mr *mr,
+ int ndescs,
+ int desc_size)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct device *ddev = &dev->mdev->pdev->dev;
+ int size = ndescs * desc_size;
+ int add_size;
+ int ret;
+
+ add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+
+ mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
+ if (!mr->descs_alloc)
+ return -ENOMEM;
+
+ mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
+
+ mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, mr->desc_map)) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+err:
+ kfree(mr->descs_alloc);
+
+ return ret;
+}
+
+static void
+mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
+{
+ if (!mr->umem && mr->descs) {
+ struct ib_device *device = mr->ibmr.device;
+ int size = mr->max_descs * mr->desc_size;
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
+ DMA_TO_DEVICE);
+ kfree(mr->descs_alloc);
+ mr->descs = NULL;
+ }
+}
+
+int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
+ int rc;
+
+ /*
+ * Any async use of the mr must hold the refcount, once the refcount
+ * goes to zero no other thread, such as ODP page faults, prefetch, any
+ * UMR activity, etc can touch the mkey. Thus it is safe to destroy it.
+ */
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
+ refcount_read(&mr->mmkey.usecount) != 0 &&
+ xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)))
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
+
+ if (ibmr->type == IB_MR_TYPE_INTEGRITY) {
+ xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+ mr->sig, NULL, GFP_KERNEL);
+
+ if (mr->mtt_mr) {
+ rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
+ if (rc)
+ return rc;
+ mr->mtt_mr = NULL;
+ }
+ if (mr->klm_mr) {
+ rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
+ if (rc)
+ return rc;
+ mr->klm_mr = NULL;
+ }
+
+ if (mlx5_core_destroy_psv(dev->mdev,
+ mr->sig->psv_memory.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
+ mr->sig->psv_memory.psv_idx);
+ if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
+ mr->sig->psv_wire.psv_idx);
+ kfree(mr->sig);
+ mr->sig = NULL;
+ }
+
+ /* Stop DMA */
+ if (mr->mmkey.cache_ent) {
+ xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
+ mr->mmkey.cache_ent->in_use--;
+ xa_unlock_irq(&mr->mmkey.cache_ent->mkeys);
+
+ if (mlx5r_umr_revoke_mr(mr) ||
+ push_mkey(mr->mmkey.cache_ent, false,
+ xa_mk_value(mr->mmkey.key)))
+ mr->mmkey.cache_ent = NULL;
+ }
+ if (!mr->mmkey.cache_ent) {
+ rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
+ if (rc)
+ return rc;
+ }
+
+ if (mr->umem) {
+ bool is_odp = is_odp_mr(mr);
+
+ if (!is_odp)
+ atomic_sub(ib_umem_num_pages(mr->umem),
+ &dev->mdev->priv.reg_pages);
+ ib_umem_release(mr->umem);
+ if (is_odp)
+ mlx5_ib_free_odp_mr(mr);
+ }
+
+ if (!mr->mmkey.cache_ent)
+ mlx5_free_priv_descs(mr);
+
+ kfree(mr);
+ return 0;
+}
+
+static void mlx5_set_umr_free_mkey(struct ib_pd *pd, u32 *in, int ndescs,
+ int access_mode, int page_shift)
+{
+ void *mkc;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ /* This is only used from the kernel, so setting the PD is OK. */
+ set_mkc_access_pd_addr_fields(mkc, IB_ACCESS_RELAXED_ORDERING, 0, pd);
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
+ MLX5_SET(mkc, mkc, access_mode_1_0, access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2, (access_mode >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, log_page_size, page_shift);
+}
+
+static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int ndescs, int desc_size, int page_shift,
+ int access_mode, u32 *in, int inlen)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int err;
+
+ mr->access_mode = access_mode;
+ mr->desc_size = desc_size;
+ mr->max_descs = ndescs;
+
+ err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
+ if (err)
+ return err;
+
+ mlx5_set_umr_free_mkey(pd, in, ndescs, access_mode, page_shift);
+
+ err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
+ if (err)
+ goto err_free_descs;
+
+ mr->mmkey.type = MLX5_MKEY_MR;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
+
+ return 0;
+
+err_free_descs:
+ mlx5_free_priv_descs(mr);
+ return err;
+}
+
+static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd,
+ u32 max_num_sg, u32 max_num_meta_sg,
+ int desc_size, int access_mode)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ int ndescs = ALIGN(max_num_sg + max_num_meta_sg, 4);
+ int page_shift = 0;
+ struct mlx5_ib_mr *mr;
+ u32 *in;
+ int err;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->ibmr.pd = pd;
+ mr->ibmr.device = pd->device;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ if (access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ page_shift = PAGE_SHIFT;
+
+ err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
+ access_mode, in, inlen);
+ if (err)
+ goto err_free_in;
+
+ mr->umem = NULL;
+ kfree(in);
+
+ return mr;
+
+err_free_in:
+ kfree(in);
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int ndescs, u32 *in, int inlen)
+{
+ return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
+ PAGE_SHIFT, MLX5_MKC_ACCESS_MODE_MTT, in,
+ inlen);
+}
+
+static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int ndescs, u32 *in, int inlen)
+{
+ return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
+ 0, MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
+}
+
+static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
+ int max_num_sg, int max_num_meta_sg,
+ u32 *in, int inlen)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ u32 psv_index[2];
+ void *mkc;
+ int err;
+
+ mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
+ if (!mr->sig)
+ return -ENOMEM;
+
+ /* create mem & wire PSVs */
+ err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn, 2, psv_index);
+ if (err)
+ goto err_free_sig;
+
+ mr->sig->psv_memory.psv_idx = psv_index[0];
+ mr->sig->psv_wire.psv_idx = psv_index[1];
+
+ mr->sig->sig_status_checked = true;
+ mr->sig->sig_err_exists = false;
+ /* Next UMR, Arm SIGERR */
+ ++mr->sig->sigerr_count;
+ mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
+ sizeof(struct mlx5_klm),
+ MLX5_MKC_ACCESS_MODE_KLMS);
+ if (IS_ERR(mr->klm_mr)) {
+ err = PTR_ERR(mr->klm_mr);
+ goto err_destroy_psv;
+ }
+ mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
+ sizeof(struct mlx5_mtt),
+ MLX5_MKC_ACCESS_MODE_MTT);
+ if (IS_ERR(mr->mtt_mr)) {
+ err = PTR_ERR(mr->mtt_mr);
+ goto err_free_klm_mr;
+ }
+
+ /* Set bsf descriptors for mkey */
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, bsf_en, 1);
+ MLX5_SET(mkc, mkc, bsf_octword_size, MLX5_MKEY_BSF_OCTO_SIZE);
+
+ err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
+ MLX5_MKC_ACCESS_MODE_KLMS, in, inlen);
+ if (err)
+ goto err_free_mtt_mr;
+
+ err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
+ mr->sig, GFP_KERNEL));
+ if (err)
+ goto err_free_descs;
+ return 0;
+
+err_free_descs:
+ destroy_mkey(dev, mr);
+ mlx5_free_priv_descs(mr);
+err_free_mtt_mr:
+ mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
+ mr->mtt_mr = NULL;
+err_free_klm_mr:
+ mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
+ mr->klm_mr = NULL;
+err_destroy_psv:
+ if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
+ mr->sig->psv_memory.psv_idx);
+ if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
+ mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
+ mr->sig->psv_wire.psv_idx);
+err_free_sig:
+ kfree(mr->sig);
+
+ return err;
+}
+
+static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type, u32 max_num_sg,
+ u32 max_num_meta_sg)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ int ndescs = ALIGN(max_num_sg, 4);
+ struct mlx5_ib_mr *mr;
+ u32 *in;
+ int err;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ mr->ibmr.device = pd->device;
+ mr->umem = NULL;
+
+ switch (mr_type) {
+ case IB_MR_TYPE_MEM_REG:
+ err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
+ break;
+ case IB_MR_TYPE_SG_GAPS:
+ err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
+ break;
+ case IB_MR_TYPE_INTEGRITY:
+ err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
+ max_num_meta_sg, in, inlen);
+ break;
+ default:
+ mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
+ err = -EINVAL;
+ }
+
+ if (err)
+ goto err_free_in;
+
+ kfree(in);
+
+ return &mr->ibmr;
+
+err_free_in:
+ kfree(in);
+err_free:
+ kfree(mr);
+ return ERR_PTR(err);
+}
+
+struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ return __mlx5_ib_alloc_mr(pd, mr_type, max_num_sg, 0);
+}
+
+struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
+ u32 max_num_sg, u32 max_num_meta_sg)
+{
+ return __mlx5_ib_alloc_mr(pd, IB_MR_TYPE_INTEGRITY, max_num_sg,
+ max_num_meta_sg);
+}
+
+int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
+ int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ struct mlx5_ib_mw *mw = to_mmw(ibmw);
+ unsigned int ndescs;
+ u32 *in = NULL;
+ void *mkc;
+ int err;
+ struct mlx5_ib_alloc_mw req = {};
+ struct {
+ __u32 comp_mask;
+ __u32 response_length;
+ } resp = {};
+
+ err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
+ if (err)
+ return err;
+
+ if (req.comp_mask || req.reserved1 || req.reserved2)
+ return -EOPNOTSUPP;
+
+ if (udata->inlen > sizeof(req) &&
+ !ib_is_udata_cleared(udata, sizeof(req),
+ udata->inlen - sizeof(req)))
+ return -EOPNOTSUPP;
+
+ ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, translations_octword_size, ndescs);
+ MLX5_SET(mkc, mkc, pd, to_mpd(ibmw->pd)->pdn);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
+ MLX5_SET(mkc, mkc, en_rinval, !!((ibmw->type == IB_MW_TYPE_2)));
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+ err = mlx5_ib_create_mkey(dev, &mw->mmkey, in, inlen);
+ if (err)
+ goto free;
+
+ mw->mmkey.type = MLX5_MKEY_MW;
+ ibmw->rkey = mw->mmkey.key;
+ mw->mmkey.ndescs = ndescs;
+
+ resp.response_length =
+ min(offsetofend(typeof(resp), response_length), udata->outlen);
+ if (resp.response_length) {
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto free_mkey;
+ }
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+ err = mlx5r_store_odp_mkey(dev, &mw->mmkey);
+ if (err)
+ goto free_mkey;
+ }
+
+ kfree(in);
+ return 0;
+
+free_mkey:
+ mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key);
+free:
+ kfree(in);
+ return err;
+}
+
+int mlx5_ib_dealloc_mw(struct ib_mw *mw)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mw->device);
+ struct mlx5_ib_mw *mmw = to_mmw(mw);
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) &&
+ xa_erase(&dev->odp_mkeys, mlx5_base_mkey(mmw->mmkey.key)))
+ /*
+ * pagefault_single_data_segment() may be accessing mmw
+ * if the user bound an ODP MR to this MW.
+ */
+ mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
+
+ return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key);
+}
+
+int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
+ struct ib_mr_status *mr_status)
+{
+ struct mlx5_ib_mr *mmr = to_mmr(ibmr);
+ int ret = 0;
+
+ if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
+ pr_err("Invalid status check mask\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ mr_status->fail_status = 0;
+ if (check_mask & IB_MR_CHECK_SIG_STATUS) {
+ if (!mmr->sig) {
+ ret = -EINVAL;
+ pr_err("signature status check requested on a non-signature enabled MR\n");
+ goto done;
+ }
+
+ mmr->sig->sig_status_checked = true;
+ if (!mmr->sig->sig_err_exists)
+ goto done;
+
+ if (ibmr->lkey == mmr->sig->err_item.key)
+ memcpy(&mr_status->sig_err, &mmr->sig->err_item,
+ sizeof(mr_status->sig_err));
+ else {
+ mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
+ mr_status->sig_err.sig_err_offset = 0;
+ mr_status->sig_err.key = mmr->sig->err_item.key;
+ }
+
+ mmr->sig->sig_err_exists = false;
+ mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
+ }
+
+done:
+ return ret;
+}
+
+static int
+mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ unsigned int sg_offset = 0;
+ int n = 0;
+
+ mr->meta_length = 0;
+ if (data_sg_nents == 1) {
+ n++;
+ mr->mmkey.ndescs = 1;
+ if (data_sg_offset)
+ sg_offset = *data_sg_offset;
+ mr->data_length = sg_dma_len(data_sg) - sg_offset;
+ mr->data_iova = sg_dma_address(data_sg) + sg_offset;
+ if (meta_sg_nents == 1) {
+ n++;
+ mr->meta_ndescs = 1;
+ if (meta_sg_offset)
+ sg_offset = *meta_sg_offset;
+ else
+ sg_offset = 0;
+ mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
+ mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
+ }
+ ibmr->length = mr->data_length + mr->meta_length;
+ }
+
+ return n;
+}
+
+static int
+mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
+ struct scatterlist *sgl,
+ unsigned short sg_nents,
+ unsigned int *sg_offset_p,
+ struct scatterlist *meta_sgl,
+ unsigned short meta_sg_nents,
+ unsigned int *meta_sg_offset_p)
+{
+ struct scatterlist *sg = sgl;
+ struct mlx5_klm *klms = mr->descs;
+ unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
+ u32 lkey = mr->ibmr.pd->local_dma_lkey;
+ int i, j = 0;
+
+ mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
+ mr->ibmr.length = 0;
+
+ for_each_sg(sgl, sg, sg_nents, i) {
+ if (unlikely(i >= mr->max_descs))
+ break;
+ klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
+ klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
+ klms[i].key = cpu_to_be32(lkey);
+ mr->ibmr.length += sg_dma_len(sg) - sg_offset;
+
+ sg_offset = 0;
+ }
+
+ if (sg_offset_p)
+ *sg_offset_p = sg_offset;
+
+ mr->mmkey.ndescs = i;
+ mr->data_length = mr->ibmr.length;
+
+ if (meta_sg_nents) {
+ sg = meta_sgl;
+ sg_offset = meta_sg_offset_p ? *meta_sg_offset_p : 0;
+ for_each_sg(meta_sgl, sg, meta_sg_nents, j) {
+ if (unlikely(i + j >= mr->max_descs))
+ break;
+ klms[i + j].va = cpu_to_be64(sg_dma_address(sg) +
+ sg_offset);
+ klms[i + j].bcount = cpu_to_be32(sg_dma_len(sg) -
+ sg_offset);
+ klms[i + j].key = cpu_to_be32(lkey);
+ mr->ibmr.length += sg_dma_len(sg) - sg_offset;
+
+ sg_offset = 0;
+ }
+ if (meta_sg_offset_p)
+ *meta_sg_offset_p = sg_offset;
+
+ mr->meta_ndescs = j;
+ mr->meta_length = mr->ibmr.length - mr->data_length;
+ }
+
+ return i + j;
+}
+
+static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ __be64 *descs;
+
+ if (unlikely(mr->mmkey.ndescs == mr->max_descs))
+ return -ENOMEM;
+
+ descs = mr->descs;
+ descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
+
+ return 0;
+}
+
+static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ __be64 *descs;
+
+ if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
+ return -ENOMEM;
+
+ descs = mr->descs;
+ descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
+ cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
+
+ return 0;
+}
+
+static int
+mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
+ int n;
+
+ pi_mr->mmkey.ndescs = 0;
+ pi_mr->meta_ndescs = 0;
+ pi_mr->meta_length = 0;
+
+ ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ pi_mr->ibmr.page_size = ibmr->page_size;
+ n = ib_sg_to_pages(&pi_mr->ibmr, data_sg, data_sg_nents, data_sg_offset,
+ mlx5_set_page);
+ if (n != data_sg_nents)
+ return n;
+
+ pi_mr->data_iova = pi_mr->ibmr.iova;
+ pi_mr->data_length = pi_mr->ibmr.length;
+ pi_mr->ibmr.length = pi_mr->data_length;
+ ibmr->length = pi_mr->data_length;
+
+ if (meta_sg_nents) {
+ u64 page_mask = ~((u64)ibmr->page_size - 1);
+ u64 iova = pi_mr->data_iova;
+
+ n += ib_sg_to_pages(&pi_mr->ibmr, meta_sg, meta_sg_nents,
+ meta_sg_offset, mlx5_set_page_pi);
+
+ pi_mr->meta_length = pi_mr->ibmr.length;
+ /*
+ * PI address for the HW is the offset of the metadata address
+ * relative to the first data page address.
+ * It equals to first data page address + size of data pages +
+ * metadata offset at the first metadata page
+ */
+ pi_mr->pi_iova = (iova & page_mask) +
+ pi_mr->mmkey.ndescs * ibmr->page_size +
+ (pi_mr->ibmr.iova & ~page_mask);
+ /*
+ * In order to use one MTT MR for data and metadata, we register
+ * also the gaps between the end of the data and the start of
+ * the metadata (the sig MR will verify that the HW will access
+ * to right addresses). This mapping is safe because we use
+ * internal mkey for the registration.
+ */
+ pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
+ pi_mr->ibmr.iova = iova;
+ ibmr->length += pi_mr->meta_length;
+ }
+
+ ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ return n;
+}
+
+static int
+mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_mr *pi_mr = mr->klm_mr;
+ int n;
+
+ pi_mr->mmkey.ndescs = 0;
+ pi_mr->meta_ndescs = 0;
+ pi_mr->meta_length = 0;
+
+ ib_dma_sync_single_for_cpu(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ n = mlx5_ib_sg_to_klms(pi_mr, data_sg, data_sg_nents, data_sg_offset,
+ meta_sg, meta_sg_nents, meta_sg_offset);
+
+ ib_dma_sync_single_for_device(ibmr->device, pi_mr->desc_map,
+ pi_mr->desc_size * pi_mr->max_descs,
+ DMA_TO_DEVICE);
+
+ /* This is zero-based memory region */
+ pi_mr->data_iova = 0;
+ pi_mr->ibmr.iova = 0;
+ pi_mr->pi_iova = pi_mr->data_length;
+ ibmr->length = pi_mr->ibmr.length;
+
+ return n;
+}
+
+int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
+ int data_sg_nents, unsigned int *data_sg_offset,
+ struct scatterlist *meta_sg, int meta_sg_nents,
+ unsigned int *meta_sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct mlx5_ib_mr *pi_mr = NULL;
+ int n;
+
+ WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
+
+ mr->mmkey.ndescs = 0;
+ mr->data_length = 0;
+ mr->data_iova = 0;
+ mr->meta_ndescs = 0;
+ mr->pi_iova = 0;
+ /*
+ * As a performance optimization, if possible, there is no need to
+ * perform UMR operation to register the data/metadata buffers.
+ * First try to map the sg lists to PA descriptors with local_dma_lkey.
+ * Fallback to UMR only in case of a failure.
+ */
+ n = mlx5_ib_map_pa_mr_sg_pi(ibmr, data_sg, data_sg_nents,
+ data_sg_offset, meta_sg, meta_sg_nents,
+ meta_sg_offset);
+ if (n == data_sg_nents + meta_sg_nents)
+ goto out;
+ /*
+ * As a performance optimization, if possible, there is no need to map
+ * the sg lists to KLM descriptors. First try to map the sg lists to MTT
+ * descriptors and fallback to KLM only in case of a failure.
+ * It's more efficient for the HW to work with MTT descriptors
+ * (especially in high load).
+ * Use KLM (indirect access) only if it's mandatory.
+ */
+ pi_mr = mr->mtt_mr;
+ n = mlx5_ib_map_mtt_mr_sg_pi(ibmr, data_sg, data_sg_nents,
+ data_sg_offset, meta_sg, meta_sg_nents,
+ meta_sg_offset);
+ if (n == data_sg_nents + meta_sg_nents)
+ goto out;
+
+ pi_mr = mr->klm_mr;
+ n = mlx5_ib_map_klm_mr_sg_pi(ibmr, data_sg, data_sg_nents,
+ data_sg_offset, meta_sg, meta_sg_nents,
+ meta_sg_offset);
+ if (unlikely(n != data_sg_nents + meta_sg_nents))
+ return -ENOMEM;
+
+out:
+ /* This is zero-based memory region */
+ ibmr->iova = 0;
+ mr->pi_mr = pi_mr;
+ if (pi_mr)
+ ibmr->sig_attrs->meta_length = pi_mr->meta_length;
+ else
+ ibmr->sig_attrs->meta_length = mr->meta_length;
+
+ return 0;
+}
+
+int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ int n;
+
+ mr->mmkey.ndescs = 0;
+
+ ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
+ mr->desc_size * mr->max_descs,
+ DMA_TO_DEVICE);
+
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
+ n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
+ NULL);
+ else
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
+ mlx5_set_page);
+
+ ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
+ mr->desc_size * mr->max_descs,
+ DMA_TO_DEVICE);
+
+ return n;
+}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
new file mode 100644
index 000000000..bc9795881
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -0,0 +1,1821 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <rdma/ib_umem_odp.h>
+#include <linux/kernel.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
+
+#include "mlx5_ib.h"
+#include "cmd.h"
+#include "umr.h"
+#include "qp.h"
+
+#include <linux/mlx5/eq.h>
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+ u32 bytes_committed;
+ u32 token;
+ u8 event_subtype;
+ u8 type;
+ union {
+ /* Initiator or send message responder pagefault details. */
+ struct {
+ /* Received packet size, only valid for responders. */
+ u32 packet_size;
+ /*
+ * Number of resource holding WQE, depends on type.
+ */
+ u32 wq_num;
+ /*
+ * WQE index. Refers to either the send queue or
+ * receive queue, according to event_subtype.
+ */
+ u16 wqe_index;
+ } wqe;
+ /* RDMA responder pagefault details */
+ struct {
+ u32 r_key;
+ /*
+ * Received packet size, minimal size page fault
+ * resolution required for forward progress.
+ */
+ u32 packet_size;
+ u32 rdma_op_len;
+ u64 rdma_va;
+ } rdma;
+ };
+
+ struct mlx5_ib_pf_eq *eq;
+ struct work_struct work;
+};
+
+#define MAX_PREFETCH_LEN (4*1024*1024U)
+
+/* Timeout in ms to wait for an active mmu notifier to complete when handling
+ * a pagefault. */
+#define MMU_NOTIFIER_TIMEOUT 1000
+
+#define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
+#define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
+#define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
+#define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
+#define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
+
+#define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
+
+static u64 mlx5_imr_ksm_entries;
+
+static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *imr, int flags)
+{
+ struct mlx5_klm *end = pklm + nentries;
+
+ if (flags & MLX5_IB_UPD_XLT_ZAP) {
+ for (; pklm != end; pklm++, idx++) {
+ pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
+ pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
+ pklm->va = 0;
+ }
+ return;
+ }
+
+ /*
+ * The locking here is pretty subtle. Ideally the implicit_children
+ * xarray would be protected by the umem_mutex, however that is not
+ * possible. Instead this uses a weaker update-then-lock pattern:
+ *
+ * xa_store()
+ * mutex_lock(umem_mutex)
+ * mlx5r_umr_update_xlt()
+ * mutex_unlock(umem_mutex)
+ * destroy lkey
+ *
+ * ie any change the xarray must be followed by the locked update_xlt
+ * before destroying.
+ *
+ * The umem_mutex provides the acquire/release semantic needed to make
+ * the xa_store() visible to a racing thread.
+ */
+ lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
+
+ for (; pklm != end; pklm++, idx++) {
+ struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
+
+ pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
+ if (mtt) {
+ pklm->key = cpu_to_be32(mtt->ibmr.lkey);
+ pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
+ } else {
+ pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
+ pklm->va = 0;
+ }
+ }
+}
+
+static u64 umem_dma_to_mtt(dma_addr_t umem_dma)
+{
+ u64 mtt_entry = umem_dma & ODP_DMA_ADDR_MASK;
+
+ if (umem_dma & ODP_READ_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_READ;
+ if (umem_dma & ODP_WRITE_ALLOWED_BIT)
+ mtt_entry |= MLX5_IB_MTT_WRITE;
+
+ return mtt_entry;
+}
+
+static void populate_mtt(__be64 *pas, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ dma_addr_t pa;
+ size_t i;
+
+ if (flags & MLX5_IB_UPD_XLT_ZAP)
+ return;
+
+ for (i = 0; i < nentries; i++) {
+ pa = odp->dma_list[idx + i];
+ pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
+ }
+}
+
+void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
+ struct mlx5_ib_mr *mr, int flags)
+{
+ if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
+ populate_klm(xlt, idx, nentries, mr, flags);
+ } else {
+ populate_mtt(xlt, idx, nentries, mr, flags);
+ }
+}
+
+/*
+ * This must be called after the mr has been removed from implicit_children.
+ * NOTE: The MR does not necessarily have to be
+ * empty here, parallel page faults could have raced with the free process and
+ * added pages to it.
+ */
+static void free_implicit_child_mr_work(struct work_struct *work)
+{
+ struct mlx5_ib_mr *mr =
+ container_of(work, struct mlx5_ib_mr, odp_destroy.work);
+ struct mlx5_ib_mr *imr = mr->parent;
+ struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ mlx5r_deref_wait_odp_mkey(&mr->mmkey);
+
+ mutex_lock(&odp_imr->umem_mutex);
+ mlx5r_umr_update_xlt(mr->parent,
+ ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT, 1, 0,
+ MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
+
+ mlx5r_deref_odp_mkey(&imr->mmkey);
+}
+
+static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
+ struct mlx5_ib_mr *imr = mr->parent;
+
+ if (!refcount_inc_not_zero(&imr->mmkey.usecount))
+ return;
+
+ xa_erase(&imr->implicit_children, idx);
+
+ /* Freeing a MR is a sleeping operation, so bounce to a work queue */
+ INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
+ queue_work(system_unbound_wq, &mr->odp_destroy.work);
+}
+
+static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct ib_umem_odp *umem_odp =
+ container_of(mni, struct ib_umem_odp, notifier);
+ struct mlx5_ib_mr *mr;
+ const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
+ sizeof(struct mlx5_mtt)) - 1;
+ u64 idx = 0, blk_start_idx = 0;
+ u64 invalidations = 0;
+ unsigned long start;
+ unsigned long end;
+ int in_block = 0;
+ u64 addr;
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ mutex_lock(&umem_odp->umem_mutex);
+ mmu_interval_set_seq(mni, cur_seq);
+ /*
+ * If npages is zero then umem_odp->private may not be setup yet. This
+ * does not complete until after the first page is mapped for DMA.
+ */
+ if (!umem_odp->npages)
+ goto out;
+ mr = umem_odp->private;
+
+ start = max_t(u64, ib_umem_start(umem_odp), range->start);
+ end = min_t(u64, ib_umem_end(umem_odp), range->end);
+
+ /*
+ * Iteration one - zap the HW's MTTs. The notifiers_count ensures that
+ * while we are doing the invalidation, no page fault will attempt to
+ * overwrite the same MTTs. Concurent invalidations might race us,
+ * but they will write 0s as well, so no difference in the end result.
+ */
+ for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
+ idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
+ /*
+ * Strive to write the MTTs in chunks, but avoid overwriting
+ * non-existing MTTs. The huristic here can be improved to
+ * estimate the cost of another UMR vs. the cost of bigger
+ * UMR.
+ */
+ if (umem_odp->dma_list[idx] &
+ (ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
+ if (!in_block) {
+ blk_start_idx = idx;
+ in_block = 1;
+ }
+
+ /* Count page invalidations */
+ invalidations += idx - blk_start_idx + 1;
+ } else {
+ u64 umr_offset = idx & umr_block_mask;
+
+ if (in_block && umr_offset == 0) {
+ mlx5r_umr_update_xlt(mr, blk_start_idx,
+ idx - blk_start_idx, 0,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ATOMIC);
+ in_block = 0;
+ }
+ }
+ }
+ if (in_block)
+ mlx5r_umr_update_xlt(mr, blk_start_idx,
+ idx - blk_start_idx + 1, 0,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ATOMIC);
+
+ mlx5_update_odp_stats(mr, invalidations, invalidations);
+
+ /*
+ * We are now sure that the device will not access the
+ * memory. We can safely unmap it, and mark it as dirty if
+ * needed.
+ */
+
+ ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
+
+ if (unlikely(!umem_odp->npages && mr->parent))
+ destroy_unused_implicit_child_mr(mr);
+out:
+ mutex_unlock(&umem_odp->umem_mutex);
+ return true;
+}
+
+const struct mmu_interval_notifier_ops mlx5_mn_ops = {
+ .invalidate = mlx5_ib_invalidate_range,
+};
+
+static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
+{
+ struct ib_odp_caps *caps = &dev->odp_caps;
+
+ memset(caps, 0, sizeof(*caps));
+
+ if (!MLX5_CAP_GEN(dev->mdev, pg) || !mlx5r_umr_can_load_pas(dev, 0))
+ return;
+
+ caps->general_caps = IB_ODP_SUPPORT;
+
+ if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
+ dev->odp_max_size = U64_MAX;
+ else
+ dev->odp_max_size = BIT_ULL(MLX5_MAX_UMR_SHIFT + PAGE_SHIFT);
+
+ if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
+ caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+ if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.srq_receive))
+ caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
+
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
+
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.atomic))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+
+ if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.srq_receive))
+ caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.send))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SEND;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.receive))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_RECV;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.write))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_WRITE;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.read))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_READ;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.atomic))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_ATOMIC;
+
+ if (MLX5_CAP_ODP(dev->mdev, xrc_odp_caps.srq_receive))
+ caps->per_transport_caps.xrc_odp_caps |= IB_ODP_SUPPORT_SRQ_RECV;
+
+ if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
+ MLX5_CAP_GEN(dev->mdev, null_mkey) &&
+ MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
+ !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
+ caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
+}
+
+static void mlx5_ib_page_fault_resume(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault,
+ int error)
+{
+ int wq_num = pfault->event_subtype == MLX5_PFAULT_SUBTYPE_WQE ?
+ pfault->wqe.wq_num : pfault->token;
+ u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {};
+ int err;
+
+ MLX5_SET(page_fault_resume_in, in, opcode, MLX5_CMD_OP_PAGE_FAULT_RESUME);
+ MLX5_SET(page_fault_resume_in, in, page_fault_type, pfault->type);
+ MLX5_SET(page_fault_resume_in, in, token, pfault->token);
+ MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
+ MLX5_SET(page_fault_resume_in, in, error, !!error);
+
+ err = mlx5_cmd_exec_in(dev->mdev, page_fault_resume, in);
+ if (err)
+ mlx5_ib_err(dev, "Failed to resolve the page fault on WQ 0x%x err %d\n",
+ wq_num, err);
+}
+
+static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
+ unsigned long idx)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(imr);
+ struct ib_umem_odp *odp;
+ struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *ret;
+ int err;
+
+ odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
+ idx * MLX5_IMR_MTT_SIZE,
+ MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
+ if (IS_ERR(odp))
+ return ERR_CAST(odp);
+
+ mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[MLX5_IMR_MTT_CACHE_ENTRY],
+ imr->access_flags);
+ if (IS_ERR(mr)) {
+ ib_umem_odp_release(odp);
+ return mr;
+ }
+
+ mr->access_flags = imr->access_flags;
+ mr->ibmr.pd = imr->ibmr.pd;
+ mr->ibmr.device = &mr_to_mdev(imr)->ib_dev;
+ mr->umem = &odp->umem;
+ mr->ibmr.lkey = mr->mmkey.key;
+ mr->ibmr.rkey = mr->mmkey.key;
+ mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->parent = imr;
+ odp->private = mr;
+
+ /*
+ * First refcount is owned by the xarray and second refconut
+ * is returned to the caller.
+ */
+ refcount_set(&mr->mmkey.usecount, 2);
+
+ err = mlx5r_umr_update_xlt(mr, 0,
+ MLX5_IMR_MTT_ENTRIES,
+ PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto out_mr;
+ }
+
+ xa_lock(&imr->implicit_children);
+ ret = __xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
+ GFP_KERNEL);
+ if (unlikely(ret)) {
+ if (xa_is_err(ret)) {
+ ret = ERR_PTR(xa_err(ret));
+ goto out_lock;
+ }
+ /*
+ * Another thread beat us to creating the child mr, use
+ * theirs.
+ */
+ refcount_inc(&ret->mmkey.usecount);
+ goto out_lock;
+ }
+ xa_unlock(&imr->implicit_children);
+
+ mlx5_ib_dbg(mr_to_mdev(imr), "key %x mr %p\n", mr->mmkey.key, mr);
+ return mr;
+
+out_lock:
+ xa_unlock(&imr->implicit_children);
+out_mr:
+ mlx5_ib_dereg_mr(&mr->ibmr, NULL);
+ return ret;
+}
+
+struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
+ int access_flags)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
+ struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *imr;
+ int err;
+
+ if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
+ if (IS_ERR(umem_odp))
+ return ERR_CAST(umem_odp);
+
+ imr = mlx5_mr_cache_alloc(dev,
+ &dev->cache.ent[MLX5_IMR_KSM_CACHE_ENTRY],
+ access_flags);
+ if (IS_ERR(imr)) {
+ ib_umem_odp_release(umem_odp);
+ return imr;
+ }
+
+ imr->access_flags = access_flags;
+ imr->ibmr.pd = &pd->ibpd;
+ imr->ibmr.iova = 0;
+ imr->umem = &umem_odp->umem;
+ imr->ibmr.lkey = imr->mmkey.key;
+ imr->ibmr.rkey = imr->mmkey.key;
+ imr->ibmr.device = &dev->ib_dev;
+ imr->is_odp_implicit = true;
+ xa_init(&imr->implicit_children);
+
+ err = mlx5r_umr_update_xlt(imr, 0,
+ mlx5_imr_ksm_entries,
+ MLX5_KSM_PAGE_SHIFT,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ZAP |
+ MLX5_IB_UPD_XLT_ENABLE);
+ if (err)
+ goto out_mr;
+
+ err = mlx5r_store_odp_mkey(dev, &imr->mmkey);
+ if (err)
+ goto out_mr;
+
+ mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
+ return imr;
+out_mr:
+ mlx5_ib_err(dev, "Failed to register MKEY %d\n", err);
+ mlx5_ib_dereg_mr(&imr->ibmr, NULL);
+ return ERR_PTR(err);
+}
+
+void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_mr *mtt;
+ unsigned long idx;
+
+ /*
+ * If this is an implicit MR it is already invalidated so we can just
+ * delete the children mkeys.
+ */
+ xa_for_each(&mr->implicit_children, idx, mtt) {
+ xa_erase(&mr->implicit_children, idx);
+ mlx5_ib_dereg_mr(&mtt->ibmr, NULL);
+ }
+}
+
+#define MLX5_PF_FLAGS_DOWNGRADE BIT(1)
+#define MLX5_PF_FLAGS_SNAPSHOT BIT(2)
+#define MLX5_PF_FLAGS_ENABLE BIT(3)
+static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp,
+ u64 user_va, size_t bcnt, u32 *bytes_mapped,
+ u32 flags)
+{
+ int page_shift, ret, np;
+ bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE;
+ u64 access_mask;
+ u64 start_idx;
+ bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT);
+ u32 xlt_flags = MLX5_IB_UPD_XLT_ATOMIC;
+
+ if (flags & MLX5_PF_FLAGS_ENABLE)
+ xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
+
+ page_shift = odp->page_shift;
+ start_idx = (user_va - ib_umem_start(odp)) >> page_shift;
+ access_mask = ODP_READ_ALLOWED_BIT;
+
+ if (odp->umem.writable && !downgrade)
+ access_mask |= ODP_WRITE_ALLOWED_BIT;
+
+ np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
+ if (np < 0)
+ return np;
+
+ /*
+ * No need to check whether the MTTs really belong to this MR, since
+ * ib_umem_odp_map_dma_and_lock already checks this.
+ */
+ ret = mlx5r_umr_update_xlt(mr, start_idx, np, page_shift, xlt_flags);
+ mutex_unlock(&odp->umem_mutex);
+
+ if (ret < 0) {
+ if (ret != -EAGAIN)
+ mlx5_ib_err(mr_to_mdev(mr),
+ "Failed to update mkey page tables\n");
+ goto out;
+ }
+
+ if (bytes_mapped) {
+ u32 new_mappings = (np << page_shift) -
+ (user_va - round_down(user_va, 1 << page_shift));
+
+ *bytes_mapped += min_t(u32, new_mappings, bcnt);
+ }
+
+ return np << (page_shift - PAGE_SHIFT);
+
+out:
+ return ret;
+}
+
+static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
+ struct ib_umem_odp *odp_imr, u64 user_va,
+ size_t bcnt, u32 *bytes_mapped, u32 flags)
+{
+ unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
+ unsigned long upd_start_idx = end_idx + 1;
+ unsigned long upd_len = 0;
+ unsigned long npages = 0;
+ int err;
+ int ret;
+
+ if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
+ mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
+ return -EFAULT;
+
+ /* Fault each child mr that intersects with our interval. */
+ while (bcnt) {
+ unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
+ struct ib_umem_odp *umem_odp;
+ struct mlx5_ib_mr *mtt;
+ u64 len;
+
+ xa_lock(&imr->implicit_children);
+ mtt = xa_load(&imr->implicit_children, idx);
+ if (unlikely(!mtt)) {
+ xa_unlock(&imr->implicit_children);
+ mtt = implicit_get_child_mr(imr, idx);
+ if (IS_ERR(mtt)) {
+ ret = PTR_ERR(mtt);
+ goto out;
+ }
+ upd_start_idx = min(upd_start_idx, idx);
+ upd_len = idx - upd_start_idx + 1;
+ } else {
+ refcount_inc(&mtt->mmkey.usecount);
+ xa_unlock(&imr->implicit_children);
+ }
+
+ umem_odp = to_ib_umem_odp(mtt->umem);
+ len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
+ user_va;
+
+ ret = pagefault_real_mr(mtt, umem_odp, user_va, len,
+ bytes_mapped, flags);
+
+ mlx5r_deref_odp_mkey(&mtt->mmkey);
+
+ if (ret < 0)
+ goto out;
+ user_va += len;
+ bcnt -= len;
+ npages += ret;
+ }
+
+ ret = npages;
+
+ /*
+ * Any time the implicit_children are changed we must perform an
+ * update of the xlt before exiting to ensure the HW and the
+ * implicit_children remains synchronized.
+ */
+out:
+ if (likely(!upd_len))
+ return ret;
+
+ /*
+ * Notice this is not strictly ordered right, the KSM is updated after
+ * the implicit_children is updated, so a parallel page fault could
+ * see a MR that is not yet visible in the KSM. This is similar to a
+ * parallel page fault seeing a MR that is being concurrently removed
+ * from the KSM. Both of these improbable situations are resolved
+ * safely by resuming the HW and then taking another page fault. The
+ * next pagefault handler will see the new information.
+ */
+ mutex_lock(&odp_imr->umem_mutex);
+ err = mlx5r_umr_update_xlt(imr, upd_start_idx, upd_len, 0,
+ MLX5_IB_UPD_XLT_INDIRECT |
+ MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&odp_imr->umem_mutex);
+ if (err) {
+ mlx5_ib_err(mr_to_mdev(imr), "Failed to update PAS\n");
+ return err;
+ }
+ return ret;
+}
+
+static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
+ u32 *bytes_mapped, u32 flags)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
+ u32 xlt_flags = 0;
+ int err;
+ unsigned int page_size;
+
+ if (flags & MLX5_PF_FLAGS_ENABLE)
+ xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
+
+ dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
+ err = ib_umem_dmabuf_map_pages(umem_dmabuf);
+ if (err) {
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+ return err;
+ }
+
+ page_size = mlx5_umem_find_best_pgsz(&umem_dmabuf->umem, mkc,
+ log_page_size, 0,
+ umem_dmabuf->umem.iova);
+ if (unlikely(page_size < PAGE_SIZE)) {
+ ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ err = -EINVAL;
+ } else {
+ err = mlx5r_umr_update_mr_pas(mr, xlt_flags);
+ }
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+
+ if (err)
+ return err;
+
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+
+ return ib_umem_num_pages(mr->umem);
+}
+
+/*
+ * Returns:
+ * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
+ * not accessible, or the MR is no longer valid.
+ * -EAGAIN/-ENOMEM: The operation should be retried
+ *
+ * -EINVAL/others: General internal malfunction
+ * >0: Number of pages mapped
+ */
+static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
+ u32 *bytes_mapped, u32 flags)
+{
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+
+ if (unlikely(io_virt < mr->ibmr.iova))
+ return -EFAULT;
+
+ if (mr->umem->is_dmabuf)
+ return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
+
+ if (!odp->is_implicit_odp) {
+ u64 user_va;
+
+ if (check_add_overflow(io_virt - mr->ibmr.iova,
+ (u64)odp->umem.address, &user_va))
+ return -EFAULT;
+ if (unlikely(user_va >= ib_umem_end(odp) ||
+ ib_umem_end(odp) - user_va < bcnt))
+ return -EFAULT;
+ return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
+ flags);
+ }
+ return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
+ flags);
+}
+
+int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
+{
+ int ret;
+
+ ret = pagefault_real_mr(mr, to_ib_umem_odp(mr->umem), mr->umem->address,
+ mr->umem->length, NULL,
+ MLX5_PF_FLAGS_SNAPSHOT | MLX5_PF_FLAGS_ENABLE);
+ return ret >= 0 ? 0 : ret;
+}
+
+int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
+{
+ int ret;
+
+ ret = pagefault_dmabuf_mr(mr, mr->umem->length, NULL,
+ MLX5_PF_FLAGS_ENABLE);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+struct pf_frame {
+ struct pf_frame *next;
+ u32 key;
+ u64 io_virt;
+ size_t bcnt;
+ int depth;
+};
+
+static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
+{
+ if (!mmkey)
+ return false;
+ if (mmkey->type == MLX5_MKEY_MW ||
+ mmkey->type == MLX5_MKEY_INDIRECT_DEVX)
+ return mlx5_base_mkey(mmkey->key) == mlx5_base_mkey(key);
+ return mmkey->key == key;
+}
+
+/*
+ * Handle a single data segment in a page-fault WQE or RDMA region.
+ *
+ * Returns number of OS pages retrieved on success. The caller may continue to
+ * the next data segment.
+ * Can return the following error codes:
+ * -EAGAIN to designate a temporary error. The caller will abort handling the
+ * page fault and resolve it.
+ * -EFAULT when there's an error mapping the requested pages. The caller will
+ * abort the page fault handling.
+ */
+static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
+ struct ib_pd *pd, u32 key,
+ u64 io_virt, size_t bcnt,
+ u32 *bytes_committed,
+ u32 *bytes_mapped)
+{
+ int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
+ struct pf_frame *head = NULL, *frame;
+ struct mlx5_ib_mkey *mmkey;
+ struct mlx5_ib_mr *mr;
+ struct mlx5_klm *pklm;
+ u32 *out = NULL;
+ size_t offset;
+
+ io_virt += *bytes_committed;
+ bcnt -= *bytes_committed;
+
+next_mr:
+ xa_lock(&dev->odp_mkeys);
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(key));
+ if (!mmkey) {
+ xa_unlock(&dev->odp_mkeys);
+ mlx5_ib_dbg(
+ dev,
+ "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+ key);
+ if (bytes_mapped)
+ *bytes_mapped += bcnt;
+ /*
+ * The user could specify a SGL with multiple lkeys and only
+ * some of them are ODP. Treat the non-ODP ones as fully
+ * faulted.
+ */
+ ret = 0;
+ goto end;
+ }
+ refcount_inc(&mmkey->usecount);
+ xa_unlock(&dev->odp_mkeys);
+
+ if (!mkey_is_eq(mmkey, key)) {
+ mlx5_ib_dbg(dev, "failed to find mkey %x\n", key);
+ ret = -EFAULT;
+ goto end;
+ }
+
+ switch (mmkey->type) {
+ case MLX5_MKEY_MR:
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+
+ ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
+ if (ret < 0)
+ goto end;
+
+ mlx5_update_odp_stats(mr, faults, ret);
+
+ npages += ret;
+ ret = 0;
+ break;
+
+ case MLX5_MKEY_MW:
+ case MLX5_MKEY_INDIRECT_DEVX:
+ if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
+ mlx5_ib_dbg(dev, "indirection level exceeded\n");
+ ret = -EFAULT;
+ goto end;
+ }
+
+ outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
+ sizeof(*pklm) * (mmkey->ndescs - 2);
+
+ if (outlen > cur_outlen) {
+ kfree(out);
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ cur_outlen = outlen;
+ }
+
+ pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
+ bsf0_klm0_pas_mtt0_1);
+
+ ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
+ if (ret)
+ goto end;
+
+ offset = io_virt - MLX5_GET64(query_mkey_out, out,
+ memory_key_mkey_entry.start_addr);
+
+ for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
+ if (offset >= be32_to_cpu(pklm->bcount)) {
+ offset -= be32_to_cpu(pklm->bcount);
+ continue;
+ }
+
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ frame->key = be32_to_cpu(pklm->key);
+ frame->io_virt = be64_to_cpu(pklm->va) + offset;
+ frame->bcnt = min_t(size_t, bcnt,
+ be32_to_cpu(pklm->bcount) - offset);
+ frame->depth = depth + 1;
+ frame->next = head;
+ head = frame;
+
+ bcnt -= frame->bcnt;
+ offset = 0;
+ }
+ break;
+
+ default:
+ mlx5_ib_dbg(dev, "wrong mkey type %d\n", mmkey->type);
+ ret = -EFAULT;
+ goto end;
+ }
+
+ if (head) {
+ frame = head;
+ head = frame->next;
+
+ key = frame->key;
+ io_virt = frame->io_virt;
+ bcnt = frame->bcnt;
+ depth = frame->depth;
+ kfree(frame);
+
+ mlx5r_deref_odp_mkey(mmkey);
+ goto next_mr;
+ }
+
+end:
+ if (mmkey)
+ mlx5r_deref_odp_mkey(mmkey);
+ while (head) {
+ frame = head;
+ head = frame->next;
+ kfree(frame);
+ }
+ kfree(out);
+
+ *bytes_committed = 0;
+ return ret ? ret : npages;
+}
+
+/*
+ * Parse a series of data segments for page fault handling.
+ *
+ * @dev: Pointer to mlx5 IB device
+ * @pfault: contains page fault information.
+ * @wqe: points at the first data segment in the WQE.
+ * @wqe_end: points after the end of the WQE.
+ * @bytes_mapped: receives the number of bytes that the function was able to
+ * map. This allows the caller to decide intelligently whether
+ * enough memory was mapped to resolve the page fault
+ * successfully (e.g. enough for the next MTU, or the entire
+ * WQE).
+ * @total_wqe_bytes: receives the total data size of this WQE in bytes (minus
+ * the committed bytes).
+ * @receive_queue: receive WQE end of sg list
+ *
+ * Returns the number of pages loaded if positive, zero for an empty WQE, or a
+ * negative error code.
+ */
+static int pagefault_data_segments(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault,
+ void *wqe,
+ void *wqe_end, u32 *bytes_mapped,
+ u32 *total_wqe_bytes, bool receive_queue)
+{
+ int ret = 0, npages = 0;
+ u64 io_virt;
+ u32 key;
+ u32 byte_count;
+ size_t bcnt;
+ int inline_segment;
+
+ if (bytes_mapped)
+ *bytes_mapped = 0;
+ if (total_wqe_bytes)
+ *total_wqe_bytes = 0;
+
+ while (wqe < wqe_end) {
+ struct mlx5_wqe_data_seg *dseg = wqe;
+
+ io_virt = be64_to_cpu(dseg->addr);
+ key = be32_to_cpu(dseg->lkey);
+ byte_count = be32_to_cpu(dseg->byte_count);
+ inline_segment = !!(byte_count & MLX5_INLINE_SEG);
+ bcnt = byte_count & ~MLX5_INLINE_SEG;
+
+ if (inline_segment) {
+ bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
+ wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
+ 16);
+ } else {
+ wqe += sizeof(*dseg);
+ }
+
+ /* receive WQE end of sg list. */
+ if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
+ io_virt == 0)
+ break;
+
+ if (!inline_segment && total_wqe_bytes) {
+ *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
+ pfault->bytes_committed);
+ }
+
+ /* A zero length data segment designates a length of 2GB. */
+ if (bcnt == 0)
+ bcnt = 1U << 31;
+
+ if (inline_segment || bcnt <= pfault->bytes_committed) {
+ pfault->bytes_committed -=
+ min_t(size_t, bcnt,
+ pfault->bytes_committed);
+ continue;
+ }
+
+ ret = pagefault_single_data_segment(dev, NULL, key,
+ io_virt, bcnt,
+ &pfault->bytes_committed,
+ bytes_mapped);
+ if (ret < 0)
+ break;
+ npages += ret;
+ }
+
+ return ret < 0 ? ret : npages;
+}
+
+/*
+ * Parse initiator WQE. Advances the wqe pointer to point at the
+ * scatter-gather list, and set wqe_end to the end of the WQE.
+ */
+static int mlx5_ib_mr_initiator_pfault_handler(
+ struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault,
+ struct mlx5_ib_qp *qp, void **wqe, void **wqe_end, int wqe_length)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
+ u16 wqe_index = pfault->wqe.wqe_index;
+ struct mlx5_base_av *av;
+ unsigned ds, opcode;
+ u32 qpn = qp->trans_qp.base.mqp.qpn;
+
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
+ mlx5_ib_err(dev, "Unable to read the complete WQE. ds = 0x%x, ret = 0x%x\n",
+ ds, wqe_length);
+ return -EFAULT;
+ }
+
+ if (ds == 0) {
+ mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
+ wqe_index, qpn);
+ return -EFAULT;
+ }
+
+ *wqe_end = *wqe + ds * MLX5_WQE_DS_UNITS;
+ *wqe += sizeof(*ctrl);
+
+ opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
+ MLX5_WQE_CTRL_OPCODE_MASK;
+
+ if (qp->type == IB_QPT_XRC_INI)
+ *wqe += sizeof(struct mlx5_wqe_xrc_seg);
+
+ if (qp->type == IB_QPT_UD || qp->type == MLX5_IB_QPT_DCI) {
+ av = *wqe;
+ if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
+ *wqe += sizeof(struct mlx5_av);
+ else
+ *wqe += sizeof(struct mlx5_base_av);
+ }
+
+ switch (opcode) {
+ case MLX5_OPCODE_RDMA_WRITE:
+ case MLX5_OPCODE_RDMA_WRITE_IMM:
+ case MLX5_OPCODE_RDMA_READ:
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+ break;
+ case MLX5_OPCODE_ATOMIC_CS:
+ case MLX5_OPCODE_ATOMIC_FA:
+ *wqe += sizeof(struct mlx5_wqe_raddr_seg);
+ *wqe += sizeof(struct mlx5_wqe_atomic_seg);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Parse responder WQE and set wqe_end to the end of the WQE.
+ */
+static int mlx5_ib_mr_responder_pfault_handler_srq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_srq *srq,
+ void **wqe, void **wqe_end,
+ int wqe_length)
+{
+ int wqe_size = 1 << srq->msrq.wqe_shift;
+
+ if (wqe_size > wqe_length) {
+ mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
+ return -EFAULT;
+ }
+
+ *wqe_end = *wqe + wqe_size;
+ *wqe += sizeof(struct mlx5_wqe_srq_next_seg);
+
+ return 0;
+}
+
+static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ void *wqe, void **wqe_end,
+ int wqe_length)
+{
+ struct mlx5_ib_wq *wq = &qp->rq;
+ int wqe_size = 1 << wq->wqe_shift;
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
+ mlx5_ib_err(dev, "ODP fault with WQE signatures is not supported\n");
+ return -EFAULT;
+ }
+
+ if (wqe_size > wqe_length) {
+ mlx5_ib_err(dev, "Couldn't read all of the receive WQE's content\n");
+ return -EFAULT;
+ }
+
+ *wqe_end = wqe + wqe_size;
+
+ return 0;
+}
+
+static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
+ u32 wq_num, int pf_type)
+{
+ struct mlx5_core_rsc_common *common = NULL;
+ struct mlx5_core_srq *srq;
+
+ switch (pf_type) {
+ case MLX5_WQE_PF_TYPE_RMP:
+ srq = mlx5_cmd_get_srq(dev, wq_num);
+ if (srq)
+ common = &srq->common;
+ break;
+ case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
+ case MLX5_WQE_PF_TYPE_RESP:
+ case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
+ common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP);
+ break;
+ default:
+ break;
+ }
+
+ return common;
+}
+
+static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
+{
+ struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res;
+
+ return to_mibqp(mqp);
+}
+
+static inline struct mlx5_ib_srq *res_to_srq(struct mlx5_core_rsc_common *res)
+{
+ struct mlx5_core_srq *msrq =
+ container_of(res, struct mlx5_core_srq, common);
+
+ return to_mibsrq(msrq);
+}
+
+static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault)
+{
+ bool sq = pfault->type & MLX5_PFAULT_REQUESTOR;
+ u16 wqe_index = pfault->wqe.wqe_index;
+ void *wqe, *wqe_start = NULL, *wqe_end = NULL;
+ u32 bytes_mapped, total_wqe_bytes;
+ struct mlx5_core_rsc_common *res;
+ int resume_with_error = 1;
+ struct mlx5_ib_qp *qp;
+ size_t bytes_copied;
+ int ret = 0;
+
+ res = odp_get_rsc(dev, pfault->wqe.wq_num, pfault->type);
+ if (!res) {
+ mlx5_ib_dbg(dev, "wqe page fault for missing resource %d\n", pfault->wqe.wq_num);
+ return;
+ }
+
+ if (res->res != MLX5_RES_QP && res->res != MLX5_RES_SRQ &&
+ res->res != MLX5_RES_XSRQ) {
+ mlx5_ib_err(dev, "wqe page fault for unsupported type %d\n",
+ pfault->type);
+ goto resolve_page_fault;
+ }
+
+ wqe_start = (void *)__get_free_page(GFP_KERNEL);
+ if (!wqe_start) {
+ mlx5_ib_err(dev, "Error allocating memory for IO page fault handling.\n");
+ goto resolve_page_fault;
+ }
+
+ wqe = wqe_start;
+ qp = (res->res == MLX5_RES_QP) ? res_to_qp(res) : NULL;
+ if (qp && sq) {
+ ret = mlx5_ib_read_wqe_sq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
+ if (ret)
+ goto read_user;
+ ret = mlx5_ib_mr_initiator_pfault_handler(
+ dev, pfault, qp, &wqe, &wqe_end, bytes_copied);
+ } else if (qp && !sq) {
+ ret = mlx5_ib_read_wqe_rq(qp, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
+ if (ret)
+ goto read_user;
+ ret = mlx5_ib_mr_responder_pfault_handler_rq(
+ dev, qp, wqe, &wqe_end, bytes_copied);
+ } else if (!qp) {
+ struct mlx5_ib_srq *srq = res_to_srq(res);
+
+ ret = mlx5_ib_read_wqe_srq(srq, wqe_index, wqe, PAGE_SIZE,
+ &bytes_copied);
+ if (ret)
+ goto read_user;
+ ret = mlx5_ib_mr_responder_pfault_handler_srq(
+ dev, srq, &wqe, &wqe_end, bytes_copied);
+ }
+
+ if (ret < 0 || wqe >= wqe_end)
+ goto resolve_page_fault;
+
+ ret = pagefault_data_segments(dev, pfault, wqe, wqe_end, &bytes_mapped,
+ &total_wqe_bytes, !sq);
+ if (ret == -EAGAIN)
+ goto out;
+
+ if (ret < 0 || total_wqe_bytes > bytes_mapped)
+ goto resolve_page_fault;
+
+out:
+ ret = 0;
+ resume_with_error = 0;
+
+read_user:
+ if (ret)
+ mlx5_ib_err(
+ dev,
+ "Failed reading a WQE following page fault, error %d, wqe_index %x, qpn %x\n",
+ ret, wqe_index, pfault->token);
+
+resolve_page_fault:
+ mlx5_ib_page_fault_resume(dev, pfault, resume_with_error);
+ mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, type: 0x%x\n",
+ pfault->wqe.wq_num, resume_with_error,
+ pfault->type);
+ mlx5_core_res_put(res);
+ free_page((unsigned long)wqe_start);
+}
+
+static int pages_in_range(u64 address, u32 length)
+{
+ return (ALIGN(address + length, PAGE_SIZE) -
+ (address & PAGE_MASK)) >> PAGE_SHIFT;
+}
+
+static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev,
+ struct mlx5_pagefault *pfault)
+{
+ u64 address;
+ u32 length;
+ u32 prefetch_len = pfault->bytes_committed;
+ int prefetch_activated = 0;
+ u32 rkey = pfault->rdma.r_key;
+ int ret;
+
+ /* The RDMA responder handler handles the page fault in two parts.
+ * First it brings the necessary pages for the current packet
+ * (and uses the pfault context), and then (after resuming the QP)
+ * prefetches more pages. The second operation cannot use the pfault
+ * context and therefore uses the dummy_pfault context allocated on
+ * the stack */
+ pfault->rdma.rdma_va += pfault->bytes_committed;
+ pfault->rdma.rdma_op_len -= min(pfault->bytes_committed,
+ pfault->rdma.rdma_op_len);
+ pfault->bytes_committed = 0;
+
+ address = pfault->rdma.rdma_va;
+ length = pfault->rdma.rdma_op_len;
+
+ /* For some operations, the hardware cannot tell the exact message
+ * length, and in those cases it reports zero. Use prefetch
+ * logic. */
+ if (length == 0) {
+ prefetch_activated = 1;
+ length = pfault->rdma.packet_size;
+ prefetch_len = min(MAX_PREFETCH_LEN, prefetch_len);
+ }
+
+ ret = pagefault_single_data_segment(dev, NULL, rkey, address, length,
+ &pfault->bytes_committed, NULL);
+ if (ret == -EAGAIN) {
+ /* We're racing with an invalidation, don't prefetch */
+ prefetch_activated = 0;
+ } else if (ret < 0 || pages_in_range(address, length) > ret) {
+ mlx5_ib_page_fault_resume(dev, pfault, 1);
+ if (ret != -ENOENT)
+ mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n",
+ ret, pfault->token, pfault->type);
+ return;
+ }
+
+ mlx5_ib_page_fault_resume(dev, pfault, 0);
+ mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x, type: 0x%x, prefetch_activated: %d\n",
+ pfault->token, pfault->type,
+ prefetch_activated);
+
+ /* At this point, there might be a new pagefault already arriving in
+ * the eq, switch to the dummy pagefault for the rest of the
+ * processing. We're still OK with the objects being alive as the
+ * work-queue is being fenced. */
+
+ if (prefetch_activated) {
+ u32 bytes_committed = 0;
+
+ ret = pagefault_single_data_segment(dev, NULL, rkey, address,
+ prefetch_len,
+ &bytes_committed, NULL);
+ if (ret < 0 && ret != -EAGAIN) {
+ mlx5_ib_dbg(dev, "Prefetch failed. ret: %d, QP 0x%x, address: 0x%.16llx, length = 0x%.16x\n",
+ ret, pfault->token, address, prefetch_len);
+ }
+ }
+}
+
+static void mlx5_ib_pfault(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault)
+{
+ u8 event_subtype = pfault->event_subtype;
+
+ switch (event_subtype) {
+ case MLX5_PFAULT_SUBTYPE_WQE:
+ mlx5_ib_mr_wqe_pfault_handler(dev, pfault);
+ break;
+ case MLX5_PFAULT_SUBTYPE_RDMA:
+ mlx5_ib_mr_rdma_pfault_handler(dev, pfault);
+ break;
+ default:
+ mlx5_ib_err(dev, "Invalid page fault event subtype: 0x%x\n",
+ event_subtype);
+ mlx5_ib_page_fault_resume(dev, pfault, 1);
+ }
+}
+
+static void mlx5_ib_eqe_pf_action(struct work_struct *work)
+{
+ struct mlx5_pagefault *pfault = container_of(work,
+ struct mlx5_pagefault,
+ work);
+ struct mlx5_ib_pf_eq *eq = pfault->eq;
+
+ mlx5_ib_pfault(eq->dev, pfault);
+ mempool_free(pfault, eq->pool);
+}
+
+static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq)
+{
+ struct mlx5_eqe_page_fault *pf_eqe;
+ struct mlx5_pagefault *pfault;
+ struct mlx5_eqe *eqe;
+ int cc = 0;
+
+ while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) {
+ pfault = mempool_alloc(eq->pool, GFP_ATOMIC);
+ if (!pfault) {
+ schedule_work(&eq->work);
+ break;
+ }
+
+ pf_eqe = &eqe->data.page_fault;
+ pfault->event_subtype = eqe->sub_type;
+ pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
+
+ mlx5_ib_dbg(eq->dev,
+ "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
+ eqe->sub_type, pfault->bytes_committed);
+
+ switch (eqe->sub_type) {
+ case MLX5_PFAULT_SUBTYPE_RDMA:
+ /* RDMA based event */
+ pfault->type =
+ be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
+ pfault->token =
+ be32_to_cpu(pf_eqe->rdma.pftype_token) &
+ MLX5_24BIT_MASK;
+ pfault->rdma.r_key =
+ be32_to_cpu(pf_eqe->rdma.r_key);
+ pfault->rdma.packet_size =
+ be16_to_cpu(pf_eqe->rdma.packet_length);
+ pfault->rdma.rdma_op_len =
+ be32_to_cpu(pf_eqe->rdma.rdma_op_len);
+ pfault->rdma.rdma_va =
+ be64_to_cpu(pf_eqe->rdma.rdma_va);
+ mlx5_ib_dbg(eq->dev,
+ "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
+ pfault->type, pfault->token,
+ pfault->rdma.r_key);
+ mlx5_ib_dbg(eq->dev,
+ "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
+ pfault->rdma.rdma_op_len,
+ pfault->rdma.rdma_va);
+ break;
+
+ case MLX5_PFAULT_SUBTYPE_WQE:
+ /* WQE based event */
+ pfault->type =
+ (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
+ pfault->token =
+ be32_to_cpu(pf_eqe->wqe.token);
+ pfault->wqe.wq_num =
+ be32_to_cpu(pf_eqe->wqe.pftype_wq) &
+ MLX5_24BIT_MASK;
+ pfault->wqe.wqe_index =
+ be16_to_cpu(pf_eqe->wqe.wqe_index);
+ pfault->wqe.packet_size =
+ be16_to_cpu(pf_eqe->wqe.packet_length);
+ mlx5_ib_dbg(eq->dev,
+ "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
+ pfault->type, pfault->token,
+ pfault->wqe.wq_num,
+ pfault->wqe.wqe_index);
+ break;
+
+ default:
+ mlx5_ib_warn(eq->dev,
+ "Unsupported page fault event sub-type: 0x%02hhx\n",
+ eqe->sub_type);
+ /* Unsupported page faults should still be
+ * resolved by the page fault handler
+ */
+ }
+
+ pfault->eq = eq;
+ INIT_WORK(&pfault->work, mlx5_ib_eqe_pf_action);
+ queue_work(eq->wq, &pfault->work);
+
+ cc = mlx5_eq_update_cc(eq->core, ++cc);
+ }
+
+ mlx5_eq_update_ci(eq->core, cc, 1);
+}
+
+static int mlx5_ib_eq_pf_int(struct notifier_block *nb, unsigned long type,
+ void *data)
+{
+ struct mlx5_ib_pf_eq *eq =
+ container_of(nb, struct mlx5_ib_pf_eq, irq_nb);
+ unsigned long flags;
+
+ if (spin_trylock_irqsave(&eq->lock, flags)) {
+ mlx5_ib_eq_pf_process(eq);
+ spin_unlock_irqrestore(&eq->lock, flags);
+ } else {
+ schedule_work(&eq->work);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* mempool_refill() was proposed but unfortunately wasn't accepted
+ * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
+ * Cheap workaround.
+ */
+static void mempool_refill(mempool_t *pool)
+{
+ while (pool->curr_nr < pool->min_nr)
+ mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
+}
+
+static void mlx5_ib_eq_pf_action(struct work_struct *work)
+{
+ struct mlx5_ib_pf_eq *eq =
+ container_of(work, struct mlx5_ib_pf_eq, work);
+
+ mempool_refill(eq->pool);
+
+ spin_lock_irq(&eq->lock);
+ mlx5_ib_eq_pf_process(eq);
+ spin_unlock_irq(&eq->lock);
+}
+
+enum {
+ MLX5_IB_NUM_PF_EQE = 0x1000,
+ MLX5_IB_NUM_PF_DRAIN = 64,
+};
+
+int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
+{
+ struct mlx5_eq_param param = {};
+ int err = 0;
+
+ mutex_lock(&dev->odp_eq_mutex);
+ if (eq->core)
+ goto unlock;
+ INIT_WORK(&eq->work, mlx5_ib_eq_pf_action);
+ spin_lock_init(&eq->lock);
+ eq->dev = dev;
+
+ eq->pool = mempool_create_kmalloc_pool(MLX5_IB_NUM_PF_DRAIN,
+ sizeof(struct mlx5_pagefault));
+ if (!eq->pool) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ eq->wq = alloc_workqueue("mlx5_ib_page_fault",
+ WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM,
+ MLX5_NUM_CMD_EQE);
+ if (!eq->wq) {
+ err = -ENOMEM;
+ goto err_mempool;
+ }
+
+ eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
+ param = (struct mlx5_eq_param) {
+ .nent = MLX5_IB_NUM_PF_EQE,
+ };
+ param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
+ eq->core = mlx5_eq_create_generic(dev->mdev, &param);
+ if (IS_ERR(eq->core)) {
+ err = PTR_ERR(eq->core);
+ goto err_wq;
+ }
+ err = mlx5_eq_enable(dev->mdev, eq->core, &eq->irq_nb);
+ if (err) {
+ mlx5_ib_err(dev, "failed to enable odp EQ %d\n", err);
+ goto err_eq;
+ }
+
+ mutex_unlock(&dev->odp_eq_mutex);
+ return 0;
+err_eq:
+ mlx5_eq_destroy_generic(dev->mdev, eq->core);
+err_wq:
+ eq->core = NULL;
+ destroy_workqueue(eq->wq);
+err_mempool:
+ mempool_destroy(eq->pool);
+unlock:
+ mutex_unlock(&dev->odp_eq_mutex);
+ return err;
+}
+
+static int
+mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
+{
+ int err;
+
+ if (!eq->core)
+ return 0;
+ mlx5_eq_disable(dev->mdev, eq->core, &eq->irq_nb);
+ err = mlx5_eq_destroy_generic(dev->mdev, eq->core);
+ cancel_work_sync(&eq->work);
+ destroy_workqueue(eq->wq);
+ mempool_destroy(eq->pool);
+
+ return err;
+}
+
+void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
+{
+ if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
+ return;
+
+ switch (ent->order - 2) {
+ case MLX5_IMR_MTT_CACHE_ENTRY:
+ ent->page = PAGE_SHIFT;
+ ent->ndescs = MLX5_IMR_MTT_ENTRIES;
+ ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
+ ent->limit = 0;
+ break;
+
+ case MLX5_IMR_KSM_CACHE_ENTRY:
+ ent->page = MLX5_KSM_PAGE_SHIFT;
+ ent->ndescs = mlx5_imr_ksm_entries;
+ ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
+ ent->limit = 0;
+ break;
+ }
+}
+
+static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
+ .advise_mr = mlx5_ib_advise_mr,
+};
+
+int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
+{
+ int ret = 0;
+
+ internal_fill_odp_caps(dev);
+
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
+ return ret;
+
+ ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
+
+ if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
+ ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
+ if (ret) {
+ mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
+ return ret;
+ }
+ }
+
+ mutex_init(&dev->odp_eq_mutex);
+ return ret;
+}
+
+void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
+{
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
+ return;
+
+ mlx5_ib_odp_destroy_eq(dev, &dev->odp_pf_eq);
+}
+
+int mlx5_ib_odp_init(void)
+{
+ mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
+ MLX5_IMR_MTT_BITS);
+
+ return 0;
+}
+
+struct prefetch_mr_work {
+ struct work_struct work;
+ u32 pf_flags;
+ u32 num_sge;
+ struct {
+ u64 io_virt;
+ struct mlx5_ib_mr *mr;
+ size_t length;
+ } frags[];
+};
+
+static void destroy_prefetch_work(struct prefetch_mr_work *work)
+{
+ u32 i;
+
+ for (i = 0; i < work->num_sge; ++i)
+ mlx5r_deref_odp_mkey(&work->frags[i].mr->mmkey);
+
+ kvfree(work);
+}
+
+static struct mlx5_ib_mr *
+get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 lkey)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_mr *mr = NULL;
+ struct mlx5_ib_mkey *mmkey;
+
+ xa_lock(&dev->odp_mkeys);
+ mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
+ if (!mmkey || mmkey->key != lkey) {
+ mr = ERR_PTR(-ENOENT);
+ goto end;
+ }
+ if (mmkey->type != MLX5_MKEY_MR) {
+ mr = ERR_PTR(-EINVAL);
+ goto end;
+ }
+
+ mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
+
+ if (mr->ibmr.pd != pd) {
+ mr = ERR_PTR(-EPERM);
+ goto end;
+ }
+
+ /* prefetch with write-access must be supported by the MR */
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
+ !mr->umem->writable) {
+ mr = ERR_PTR(-EPERM);
+ goto end;
+ }
+
+ refcount_inc(&mmkey->usecount);
+end:
+ xa_unlock(&dev->odp_mkeys);
+ return mr;
+}
+
+static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
+{
+ struct prefetch_mr_work *work =
+ container_of(w, struct prefetch_mr_work, work);
+ u32 bytes_mapped = 0;
+ int ret;
+ u32 i;
+
+ /* We rely on IB/core that work is executed if we have num_sge != 0 only. */
+ WARN_ON(!work->num_sge);
+ for (i = 0; i < work->num_sge; ++i) {
+ ret = pagefault_mr(work->frags[i].mr, work->frags[i].io_virt,
+ work->frags[i].length, &bytes_mapped,
+ work->pf_flags);
+ if (ret <= 0)
+ continue;
+ mlx5_update_odp_stats(work->frags[i].mr, prefetch, ret);
+ }
+
+ destroy_prefetch_work(work);
+}
+
+static int init_prefetch_work(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct prefetch_mr_work *work,
+ struct ib_sge *sg_list, u32 num_sge)
+{
+ u32 i;
+
+ INIT_WORK(&work->work, mlx5_ib_prefetch_mr_work);
+ work->pf_flags = pf_flags;
+
+ for (i = 0; i < num_sge; ++i) {
+ struct mlx5_ib_mr *mr;
+
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (IS_ERR(mr)) {
+ work->num_sge = i;
+ return PTR_ERR(mr);
+ }
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr = mr;
+ }
+ work->num_sge = num_sge;
+ return 0;
+}
+
+static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 pf_flags, struct ib_sge *sg_list,
+ u32 num_sge)
+{
+ u32 bytes_mapped = 0;
+ int ret = 0;
+ u32 i;
+
+ for (i = 0; i < num_sge; ++i) {
+ struct mlx5_ib_mr *mr;
+
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
+ ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
+ &bytes_mapped, pf_flags);
+ if (ret < 0) {
+ mlx5r_deref_odp_mkey(&mr->mmkey);
+ return ret;
+ }
+ mlx5_update_odp_stats(mr, prefetch, ret);
+ mlx5r_deref_odp_mkey(&mr->mmkey);
+ }
+
+ return 0;
+}
+
+int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
+ enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge)
+{
+ u32 pf_flags = 0;
+ struct prefetch_mr_work *work;
+ int rc;
+
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
+ pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
+
+ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT)
+ pf_flags |= MLX5_PF_FLAGS_SNAPSHOT;
+
+ if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH)
+ return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list,
+ num_sge);
+
+ work = kvzalloc(struct_size(work, frags, num_sge), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
+ if (rc) {
+ destroy_prefetch_work(work);
+ return rc;
+ }
+ queue_work(system_unbound_wq, &work->work);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/qos.c b/drivers/infiniband/hw/mlx5/qos.c
new file mode 100644
index 000000000..dce925541
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qos.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_ib.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static bool pp_is_supported(struct ib_device *device)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+
+ return (MLX5_CAP_GEN(dev->mdev, qos) &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing) &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing_uid));
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
+ struct uverbs_attr_bundle *attrs)
+{
+ u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_pp *pp_entry;
+ void *in_ctx;
+ u16 uid;
+ int inlen;
+ u32 flags;
+ int err;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ /* The allocated entry can be used only by a DEVX context */
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ dev = to_mdev(c->ibucontext.device);
+ pp_entry = kzalloc(sizeof(*pp_entry), GFP_KERNEL);
+ if (!pp_entry)
+ return -ENOMEM;
+
+ in_ctx = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
+ inlen = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
+ memcpy(rl_raw, in_ctx, inlen);
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
+ MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX);
+ if (err)
+ goto err;
+
+ uid = (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX) ?
+ c->devx_uid : MLX5_SHARED_RESOURCE_UID;
+
+ err = mlx5_rl_add_rate_raw(dev->mdev, rl_raw, uid,
+ (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX),
+ &pp_entry->index);
+ if (err)
+ goto err;
+
+ pp_entry->mdev = dev->mdev;
+ uobj->object = pp_entry;
+ uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
+
+ err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+ &pp_entry->index, sizeof(pp_entry->index));
+ return err;
+
+err:
+ kfree(pp_entry);
+ return err;
+}
+
+static int pp_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_pp *pp_entry = uobject->object;
+
+ mlx5_rl_remove_rate_raw(pp_entry->mdev, pp_entry->index);
+ kfree(pp_entry);
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_PP_OBJ_ALLOC,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE,
+ MLX5_IB_OBJECT_PP,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(
+ MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX,
+ UVERBS_ATTR_SIZE(1,
+ MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
+ enum mlx5_ib_uapi_pp_alloc_flags,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
+ UVERBS_ATTR_TYPE(u16),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(
+ MLX5_IB_METHOD_PP_OBJ_DESTROY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE,
+ MLX5_IB_OBJECT_PP,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_PP,
+ UVERBS_TYPE_ALLOC_IDR(pp_obj_cleanup),
+ &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_ALLOC),
+ &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_DESTROY));
+
+
+const struct uapi_definition mlx5_ib_qos_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
+ MLX5_IB_OBJECT_PP,
+ UAPI_DEF_IS_OBJ_SUPPORTED(pp_is_supported)),
+ {},
+};
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
new file mode 100644
index 000000000..e0df3017e
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -0,0 +1,5752 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/rdma_counter.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_ib.h"
+#include "ib_rep.h"
+#include "counters.h"
+#include "cmd.h"
+#include "umr.h"
+#include "qp.h"
+#include "wr.h"
+
+enum {
+ MLX5_IB_ACK_REQ_FREQ = 8,
+};
+
+enum {
+ MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
+ MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
+ MLX5_IB_LINK_TYPE_IB = 0,
+ MLX5_IB_LINK_TYPE_ETH = 1
+};
+
+enum raw_qp_set_mask_map {
+ MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
+ MLX5_RAW_QP_RATE_LIMIT = 1UL << 1,
+};
+
+struct mlx5_modify_raw_qp_param {
+ u16 operation;
+
+ u32 set_mask; /* raw_qp_set_mask_map */
+
+ struct mlx5_rate_limit rl;
+
+ u8 rq_q_ctr_id;
+ u32 port;
+};
+
+static void get_cqs(enum ib_qp_type qp_type,
+ struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
+ struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
+
+static int is_qp0(enum ib_qp_type qp_type)
+{
+ return qp_type == IB_QPT_SMI;
+}
+
+static int is_sqp(enum ib_qp_type qp_type)
+{
+ return is_qp0(qp_type) || is_qp1(qp_type);
+}
+
+/**
+ * mlx5_ib_read_user_wqe_common() - Copy a WQE (or part of) from user WQ
+ * to kernel buffer
+ *
+ * @umem: User space memory where the WQ is
+ * @buffer: buffer to copy to
+ * @buflen: buffer length
+ * @wqe_index: index of WQE to copy from
+ * @wq_offset: offset to start of WQ
+ * @wq_wqe_cnt: number of WQEs in WQ
+ * @wq_wqe_shift: log2 of WQE size
+ * @bcnt: number of bytes to copy
+ * @bytes_copied: number of bytes to copy (return value)
+ *
+ * Copies from start of WQE bcnt or less bytes.
+ * Does not gurantee to copy the entire WQE.
+ *
+ * Return: zero on success, or an error code.
+ */
+static int mlx5_ib_read_user_wqe_common(struct ib_umem *umem, void *buffer,
+ size_t buflen, int wqe_index,
+ int wq_offset, int wq_wqe_cnt,
+ int wq_wqe_shift, int bcnt,
+ size_t *bytes_copied)
+{
+ size_t offset = wq_offset + ((wqe_index % wq_wqe_cnt) << wq_wqe_shift);
+ size_t wq_end = wq_offset + (wq_wqe_cnt << wq_wqe_shift);
+ size_t copy_length;
+ int ret;
+
+ /* don't copy more than requested, more than buffer length or
+ * beyond WQ end
+ */
+ copy_length = min_t(u32, buflen, wq_end - offset);
+ copy_length = min_t(u32, copy_length, bcnt);
+
+ ret = ib_umem_copy_from(buffer, umem, offset, copy_length);
+ if (ret)
+ return ret;
+
+ if (!ret && bytes_copied)
+ *bytes_copied = copy_length;
+
+ return 0;
+}
+
+static int mlx5_ib_read_kernel_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ size_t bytes_copied = 0;
+ size_t wqe_length;
+ void *p;
+ int ds;
+
+ wqe_index = wqe_index & qp->sq.fbc.sz_m1;
+
+ /* read the control segment first */
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+ ctrl = p;
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ wqe_length = ds * MLX5_WQE_DS_UNITS;
+
+ /* read rest of WQE if it spreads over more than one stride */
+ while (bytes_copied < wqe_length) {
+ size_t copy_length =
+ min_t(size_t, buflen - bytes_copied, MLX5_SEND_WQE_BB);
+
+ if (!copy_length)
+ break;
+
+ memcpy(buffer + bytes_copied, p, copy_length);
+ bytes_copied += copy_length;
+
+ wqe_index = (wqe_index + 1) & qp->sq.fbc.sz_m1;
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, wqe_index);
+ }
+ *bc = bytes_copied;
+ return 0;
+}
+
+static int mlx5_ib_read_user_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+ struct mlx5_ib_wq *wq = &qp->sq;
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ size_t bytes_copied;
+ size_t bytes_copied2;
+ size_t wqe_length;
+ int ret;
+ int ds;
+
+ /* at first read as much as possible */
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+ wq->offset, wq->wqe_cnt,
+ wq->wqe_shift, buflen,
+ &bytes_copied);
+ if (ret)
+ return ret;
+
+ /* we need at least control segment size to proceed */
+ if (bytes_copied < sizeof(*ctrl))
+ return -EINVAL;
+
+ ctrl = buffer;
+ ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
+ wqe_length = ds * MLX5_WQE_DS_UNITS;
+
+ /* if we copied enough then we are done */
+ if (bytes_copied >= wqe_length) {
+ *bc = bytes_copied;
+ return 0;
+ }
+
+ /* otherwise this a wrapped around wqe
+ * so read the remaining bytes starting
+ * from wqe_index 0
+ */
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer + bytes_copied,
+ buflen - bytes_copied, 0, wq->offset,
+ wq->wqe_cnt, wq->wqe_shift,
+ wqe_length - bytes_copied,
+ &bytes_copied2);
+
+ if (ret)
+ return ret;
+ *bc = bytes_copied + bytes_copied2;
+ return 0;
+}
+
+int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+
+ if (buflen < sizeof(struct mlx5_wqe_ctrl_seg))
+ return -EINVAL;
+
+ if (!umem)
+ return mlx5_ib_read_kernel_wqe_sq(qp, wqe_index, buffer,
+ buflen, bc);
+
+ return mlx5_ib_read_user_wqe_sq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+ struct mlx5_ib_wq *wq = &qp->rq;
+ size_t bytes_copied;
+ int ret;
+
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index,
+ wq->offset, wq->wqe_cnt,
+ wq->wqe_shift, buflen,
+ &bytes_copied);
+
+ if (ret)
+ return ret;
+ *bc = bytes_copied;
+ return 0;
+}
+
+int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct ib_umem *umem = base->ubuffer.umem;
+ struct mlx5_ib_wq *wq = &qp->rq;
+ size_t wqe_size = 1 << wq->wqe_shift;
+
+ if (buflen < wqe_size)
+ return -EINVAL;
+
+ if (!umem)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_read_user_wqe_rq(qp, wqe_index, buffer, buflen, bc);
+}
+
+static int mlx5_ib_read_user_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index,
+ void *buffer, size_t buflen, size_t *bc)
+{
+ struct ib_umem *umem = srq->umem;
+ size_t bytes_copied;
+ int ret;
+
+ ret = mlx5_ib_read_user_wqe_common(umem, buffer, buflen, wqe_index, 0,
+ srq->msrq.max, srq->msrq.wqe_shift,
+ buflen, &bytes_copied);
+
+ if (ret)
+ return ret;
+ *bc = bytes_copied;
+ return 0;
+}
+
+int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
+ size_t buflen, size_t *bc)
+{
+ struct ib_umem *umem = srq->umem;
+ size_t wqe_size = 1 << srq->msrq.wqe_shift;
+
+ if (buflen < wqe_size)
+ return -EINVAL;
+
+ if (!umem)
+ return -EOPNOTSUPP;
+
+ return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
+}
+
+static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
+{
+ struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
+ struct ib_event event;
+
+ if (type == MLX5_EVENT_TYPE_PATH_MIG) {
+ /* This event is only valid for trans_qps */
+ to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
+ }
+
+ if (ibqp->event_handler) {
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ switch (type) {
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ event.event = IB_EVENT_PATH_MIG;
+ break;
+ case MLX5_EVENT_TYPE_COMM_EST:
+ event.event = IB_EVENT_COMM_EST;
+ break;
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ event.event = IB_EVENT_PATH_MIG_ERR;
+ break;
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ default:
+ pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
+ return;
+ }
+
+ ibqp->event_handler(&event, ibqp->qp_context);
+ }
+}
+
+static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
+ int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
+{
+ int wqe_size;
+ int wq_size;
+
+ /* Sanity check RQ size before proceeding */
+ if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
+ return -EINVAL;
+
+ if (!has_rq) {
+ qp->rq.max_gs = 0;
+ qp->rq.wqe_cnt = 0;
+ qp->rq.wqe_shift = 0;
+ cap->max_recv_wr = 0;
+ cap->max_recv_sge = 0;
+ } else {
+ int wq_sig = !!(qp->flags_en & MLX5_QP_FLAG_SIGNATURE);
+
+ if (ucmd) {
+ qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+ if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
+ return -EINVAL;
+ qp->rq.wqe_shift = ucmd->rq_wqe_shift;
+ if ((1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) <
+ wq_sig)
+ return -EINVAL;
+ qp->rq.max_gs =
+ (1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) -
+ wq_sig;
+ qp->rq.max_post = qp->rq.wqe_cnt;
+ } else {
+ wqe_size =
+ wq_sig ? sizeof(struct mlx5_wqe_signature_seg) :
+ 0;
+ wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
+ wqe_size = roundup_pow_of_two(wqe_size);
+ wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
+ wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
+ qp->rq.wqe_cnt = wq_size / wqe_size;
+ if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
+ mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
+ wqe_size,
+ MLX5_CAP_GEN(dev->mdev,
+ max_wqe_sz_rq));
+ return -EINVAL;
+ }
+ qp->rq.wqe_shift = ilog2(wqe_size);
+ qp->rq.max_gs =
+ (1 << qp->rq.wqe_shift) /
+ sizeof(struct mlx5_wqe_data_seg) -
+ wq_sig;
+ qp->rq.max_post = qp->rq.wqe_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static int sq_overhead(struct ib_qp_init_attr *attr)
+{
+ int size = 0;
+
+ switch (attr->qp_type) {
+ case IB_QPT_XRC_INI:
+ size += sizeof(struct mlx5_wqe_xrc_seg);
+ fallthrough;
+ case IB_QPT_RC:
+ size += sizeof(struct mlx5_wqe_ctrl_seg) +
+ max(sizeof(struct mlx5_wqe_atomic_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg),
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg) +
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD /
+ MLX5_IB_UMR_OCTOWORD);
+ break;
+
+ case IB_QPT_XRC_TGT:
+ return 0;
+
+ case IB_QPT_UC:
+ size += sizeof(struct mlx5_wqe_ctrl_seg) +
+ max(sizeof(struct mlx5_wqe_raddr_seg),
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg));
+ break;
+
+ case IB_QPT_UD:
+ if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
+ size += sizeof(struct mlx5_wqe_eth_pad) +
+ sizeof(struct mlx5_wqe_eth_seg);
+ fallthrough;
+ case IB_QPT_SMI:
+ case MLX5_IB_QPT_HW_GSI:
+ size += sizeof(struct mlx5_wqe_ctrl_seg) +
+ sizeof(struct mlx5_wqe_datagram_seg);
+ break;
+
+ case MLX5_IB_QPT_REG_UMR:
+ size += sizeof(struct mlx5_wqe_ctrl_seg) +
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return size;
+}
+
+static int calc_send_wqe(struct ib_qp_init_attr *attr)
+{
+ int inl_size = 0;
+ int size;
+
+ size = sq_overhead(attr);
+ if (size < 0)
+ return size;
+
+ if (attr->cap.max_inline_data) {
+ inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
+ attr->cap.max_inline_data;
+ }
+
+ size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
+ if (attr->create_flags & IB_QP_CREATE_INTEGRITY_EN &&
+ ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
+ return MLX5_SIG_WQE_SIZE;
+ else
+ return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
+}
+
+static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
+{
+ int max_sge;
+
+ if (attr->qp_type == IB_QPT_RC)
+ max_sge = (min_t(int, wqe_size, 512) -
+ sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ else if (attr->qp_type == IB_QPT_XRC_INI)
+ max_sge = (min_t(int, wqe_size, 512) -
+ sizeof(struct mlx5_wqe_ctrl_seg) -
+ sizeof(struct mlx5_wqe_xrc_seg) -
+ sizeof(struct mlx5_wqe_raddr_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ else
+ max_sge = (wqe_size - sq_overhead(attr)) /
+ sizeof(struct mlx5_wqe_data_seg);
+
+ return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
+ sizeof(struct mlx5_wqe_data_seg));
+}
+
+static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
+ struct mlx5_ib_qp *qp)
+{
+ int wqe_size;
+ int wq_size;
+
+ if (!attr->cap.max_send_wr)
+ return 0;
+
+ wqe_size = calc_send_wqe(attr);
+ mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
+ if (wqe_size < 0)
+ return wqe_size;
+
+ if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
+ mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
+ wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
+ return -EINVAL;
+ }
+
+ qp->max_inline_data = wqe_size - sq_overhead(attr) -
+ sizeof(struct mlx5_wqe_inline_seg);
+ attr->cap.max_inline_data = qp->max_inline_data;
+
+ wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
+ qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
+ if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
+ mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
+ attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
+ qp->sq.wqe_cnt,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
+ return -ENOMEM;
+ }
+ qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
+ qp->sq.max_gs = get_send_sge(attr, wqe_size);
+ if (qp->sq.max_gs < attr->cap.max_send_sge)
+ return -ENOMEM;
+
+ attr->cap.max_send_sge = qp->sq.max_gs;
+ qp->sq.max_post = wq_size / wqe_size;
+ attr->cap.max_send_wr = qp->sq.max_post;
+
+ return wq_size;
+}
+
+static int set_user_buf_size(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_ib_create_qp *ucmd,
+ struct mlx5_ib_qp_base *base,
+ struct ib_qp_init_attr *attr)
+{
+ int desc_sz = 1 << qp->sq.wqe_shift;
+
+ if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
+ mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
+ desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
+ return -EINVAL;
+ }
+
+ if (ucmd->sq_wqe_count && !is_power_of_2(ucmd->sq_wqe_count)) {
+ mlx5_ib_warn(dev, "sq_wqe_count %d is not a power of two\n",
+ ucmd->sq_wqe_count);
+ return -EINVAL;
+ }
+
+ qp->sq.wqe_cnt = ucmd->sq_wqe_count;
+
+ if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
+ mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
+ qp->sq.wqe_cnt,
+ 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
+ return -EINVAL;
+ }
+
+ if (attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
+ qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
+ } else {
+ base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
+ (qp->sq.wqe_cnt << 6);
+ }
+
+ return 0;
+}
+
+static int qp_has_rq(struct ib_qp_init_attr *attr)
+{
+ if (attr->qp_type == IB_QPT_XRC_INI ||
+ attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
+ attr->qp_type == MLX5_IB_QPT_REG_UMR ||
+ !attr->cap.max_recv_wr)
+ return 0;
+
+ return 1;
+}
+
+enum {
+ /* this is the first blue flame register in the array of bfregs assigned
+ * to a processes. Since we do not use it for blue flame but rather
+ * regular 64 bit doorbells, we do not need a lock for maintaiing
+ * "odd/even" order
+ */
+ NUM_NON_BLUE_FLAME_BFREGS = 1,
+};
+
+static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
+{
+ return get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
+ bfregi->num_static_sys_pages * MLX5_NON_FP_BFREGS_PER_UAR;
+}
+
+static int num_med_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
+{
+ int n;
+
+ n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
+ NUM_NON_BLUE_FLAME_BFREGS;
+
+ return n >= 0 ? n : 0;
+}
+
+static int first_med_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
+{
+ return num_med_bfreg(dev, bfregi) ? 1 : -ENOMEM;
+}
+
+static int first_hi_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
+{
+ int med;
+
+ med = num_med_bfreg(dev, bfregi);
+ return ++med;
+}
+
+static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
+{
+ int i;
+
+ for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
+ if (!bfregi->count[i]) {
+ bfregi->count[i]++;
+ return i;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
+{
+ int minidx = first_med_bfreg(dev, bfregi);
+ int i;
+
+ if (minidx < 0)
+ return minidx;
+
+ for (i = minidx; i < first_hi_bfreg(dev, bfregi); i++) {
+ if (bfregi->count[i] < bfregi->count[minidx])
+ minidx = i;
+ if (!bfregi->count[minidx])
+ break;
+ }
+
+ bfregi->count[minidx]++;
+ return minidx;
+}
+
+static int alloc_bfreg(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi)
+{
+ int bfregn = -ENOMEM;
+
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
+
+ mutex_lock(&bfregi->lock);
+ if (bfregi->ver >= 2) {
+ bfregn = alloc_high_class_bfreg(dev, bfregi);
+ if (bfregn < 0)
+ bfregn = alloc_med_class_bfreg(dev, bfregi);
+ }
+
+ if (bfregn < 0) {
+ BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
+ bfregn = 0;
+ bfregi->count[bfregn]++;
+ }
+ mutex_unlock(&bfregi->lock);
+
+ return bfregn;
+}
+
+void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
+{
+ mutex_lock(&bfregi->lock);
+ bfregi->count[bfregn]--;
+ mutex_unlock(&bfregi->lock);
+}
+
+static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET: return MLX5_QP_STATE_RST;
+ case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
+ case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
+ case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
+ case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
+ case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
+ case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
+ default: return -1;
+ }
+}
+
+static int to_mlx5_st(enum ib_qp_type type)
+{
+ switch (type) {
+ case IB_QPT_RC: return MLX5_QP_ST_RC;
+ case IB_QPT_UC: return MLX5_QP_ST_UC;
+ case IB_QPT_UD: return MLX5_QP_ST_UD;
+ case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
+ case IB_QPT_XRC_INI:
+ case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
+ case IB_QPT_SMI: return MLX5_QP_ST_QP0;
+ case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
+ case MLX5_IB_QPT_DCI: return MLX5_QP_ST_DCI;
+ case IB_QPT_RAW_PACKET: return MLX5_QP_ST_RAW_ETHERTYPE;
+ default: return -EINVAL;
+ }
+}
+
+static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq);
+static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq);
+
+int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, u32 bfregn,
+ bool dyn_bfreg)
+{
+ unsigned int bfregs_per_sys_page;
+ u32 index_of_sys_page;
+ u32 offset;
+
+ if (bfregi->lib_uar_dyn)
+ return -EINVAL;
+
+ bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
+ MLX5_NON_FP_BFREGS_PER_UAR;
+ index_of_sys_page = bfregn / bfregs_per_sys_page;
+
+ if (dyn_bfreg) {
+ index_of_sys_page += bfregi->num_static_sys_pages;
+
+ if (index_of_sys_page >= bfregi->num_sys_pages)
+ return -EINVAL;
+
+ if (bfregn > bfregi->num_dyn_bfregs ||
+ bfregi->sys_pages[index_of_sys_page] == MLX5_IB_INVALID_UAR_INDEX) {
+ mlx5_ib_dbg(dev, "Invalid dynamic uar index\n");
+ return -EINVAL;
+ }
+ }
+
+ offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
+ return bfregi->sys_pages[index_of_sys_page] + offset;
+}
+
+static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_rwq *rwq, struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *context =
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx5_ib_ucontext,
+ ibucontext);
+
+ if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
+ atomic_dec(&dev->delay_drop.rqs_cnt);
+
+ mlx5_ib_db_unmap_user(context, &rwq->db);
+ ib_umem_release(rwq->umem);
+}
+
+static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct ib_udata *udata, struct mlx5_ib_rwq *rwq,
+ struct mlx5_ib_create_wq *ucmd)
+{
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ unsigned long page_size = 0;
+ u32 offset = 0;
+ int err;
+
+ if (!ucmd->buf_addr)
+ return -EINVAL;
+
+ rwq->umem = ib_umem_get(&dev->ib_dev, ucmd->buf_addr, rwq->buf_size, 0);
+ if (IS_ERR(rwq->umem)) {
+ mlx5_ib_dbg(dev, "umem_get failed\n");
+ err = PTR_ERR(rwq->umem);
+ return err;
+ }
+
+ page_size = mlx5_umem_find_best_quantized_pgoff(
+ rwq->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64, &rwq->rq_page_offset);
+ if (!page_size) {
+ mlx5_ib_warn(dev, "bad offset\n");
+ err = -EINVAL;
+ goto err_umem;
+ }
+
+ rwq->rq_num_pas = ib_umem_num_dma_blocks(rwq->umem, page_size);
+ rwq->page_shift = order_base_2(page_size);
+ rwq->log_page_size = rwq->page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
+
+ mlx5_ib_dbg(
+ dev,
+ "addr 0x%llx, size %zd, npages %zu, page_size %ld, ncont %d, offset %d\n",
+ (unsigned long long)ucmd->buf_addr, rwq->buf_size,
+ ib_umem_num_pages(rwq->umem), page_size, rwq->rq_num_pas,
+ offset);
+
+ err = mlx5_ib_db_map_user(ucontext, ucmd->db_addr, &rwq->db);
+ if (err) {
+ mlx5_ib_dbg(dev, "map failed\n");
+ goto err_umem;
+ }
+
+ return 0;
+
+err_umem:
+ ib_umem_release(rwq->umem);
+ return err;
+}
+
+static int adjust_bfregn(struct mlx5_ib_dev *dev,
+ struct mlx5_bfreg_info *bfregi, int bfregn)
+{
+ return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
+ bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
+}
+
+static int _create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp, struct ib_udata *udata,
+ struct ib_qp_init_attr *attr, u32 **in,
+ struct mlx5_ib_create_qp_resp *resp, int *inlen,
+ struct mlx5_ib_qp_base *base,
+ struct mlx5_ib_create_qp *ucmd)
+{
+ struct mlx5_ib_ucontext *context;
+ struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
+ unsigned int page_offset_quantized = 0;
+ unsigned long page_size = 0;
+ int uar_index = 0;
+ int bfregn;
+ int ncont = 0;
+ __be64 *pas;
+ void *qpc;
+ int err;
+ u16 uid;
+ u32 uar_flags;
+
+ context = rdma_udata_to_drv_context(udata, struct mlx5_ib_ucontext,
+ ibucontext);
+ uar_flags = qp->flags_en &
+ (MLX5_QP_FLAG_UAR_PAGE_INDEX | MLX5_QP_FLAG_BFREG_INDEX);
+ switch (uar_flags) {
+ case MLX5_QP_FLAG_UAR_PAGE_INDEX:
+ uar_index = ucmd->bfreg_index;
+ bfregn = MLX5_IB_INVALID_BFREG;
+ break;
+ case MLX5_QP_FLAG_BFREG_INDEX:
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi,
+ ucmd->bfreg_index, true);
+ if (uar_index < 0)
+ return uar_index;
+ bfregn = MLX5_IB_INVALID_BFREG;
+ break;
+ case 0:
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ return -EINVAL;
+ bfregn = alloc_bfreg(dev, &context->bfregi);
+ if (bfregn < 0)
+ return bfregn;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn,
+ false);
+
+ qp->rq.offset = 0;
+ qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
+ qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
+
+ err = set_user_buf_size(dev, qp, ucmd, base, attr);
+ if (err)
+ goto err_bfreg;
+
+ if (ucmd->buf_addr && ubuffer->buf_size) {
+ ubuffer->buf_addr = ucmd->buf_addr;
+ ubuffer->umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr,
+ ubuffer->buf_size, 0);
+ if (IS_ERR(ubuffer->umem)) {
+ err = PTR_ERR(ubuffer->umem);
+ goto err_bfreg;
+ }
+ page_size = mlx5_umem_find_best_quantized_pgoff(
+ ubuffer->umem, qpc, log_page_size,
+ MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64,
+ &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
+ goto err_umem;
+ }
+ ncont = ib_umem_num_dma_blocks(ubuffer->umem, page_size);
+ } else {
+ ubuffer->umem = NULL;
+ }
+
+ *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
+ *in = kvzalloc(*inlen, GFP_KERNEL);
+ if (!*in) {
+ err = -ENOMEM;
+ goto err_umem;
+ }
+
+ uid = (attr->qp_type != IB_QPT_XRC_INI) ? to_mpd(pd)->uid : 0;
+ MLX5_SET(create_qp_in, *in, uid, uid);
+ qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
+ pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
+ if (ubuffer->umem) {
+ mlx5_ib_populate_pas(ubuffer->umem, page_size, pas, 0);
+ MLX5_SET(qpc, qpc, log_page_size,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(qpc, qpc, page_offset, page_offset_quantized);
+ }
+ MLX5_SET(qpc, qpc, uar_page, uar_index);
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
+ else
+ resp->bfreg_index = MLX5_IB_INVALID_BFREG;
+ qp->bfregn = bfregn;
+
+ err = mlx5_ib_db_map_user(context, ucmd->db_addr, &qp->db);
+ if (err) {
+ mlx5_ib_dbg(dev, "map failed\n");
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ kvfree(*in);
+
+err_umem:
+ ib_umem_release(ubuffer->umem);
+
+err_bfreg:
+ if (bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, bfregn);
+ return err;
+}
+
+static void destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct mlx5_ib_qp_base *base, struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ if (udata) {
+ /* User QP */
+ mlx5_ib_db_unmap_user(context, &qp->db);
+ ib_umem_release(base->ubuffer.umem);
+
+ /*
+ * Free only the BFREGs which are handled by the kernel.
+ * BFREGs of UARs allocated dynamically are handled by user.
+ */
+ if (qp->bfregn != MLX5_IB_INVALID_BFREG)
+ mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
+ return;
+ }
+
+ /* Kernel QP */
+ kvfree(qp->sq.wqe_head);
+ kvfree(qp->sq.w_list);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->sq.wr_data);
+ kvfree(qp->rq.wrid);
+ if (qp->db.db)
+ mlx5_db_free(dev->mdev, &qp->db);
+ if (qp->buf.frags)
+ mlx5_frag_buf_free(dev->mdev, &qp->buf);
+}
+
+static int _create_kernel_qp(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_qp *qp, u32 **in, int *inlen,
+ struct mlx5_ib_qp_base *base)
+{
+ int uar_index;
+ void *qpc;
+ int err;
+
+ if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
+ qp->bf.bfreg = &dev->fp_bfreg;
+ else if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
+ qp->bf.bfreg = &dev->wc_bfreg;
+ else
+ qp->bf.bfreg = &dev->bfreg;
+
+ /* We need to divide by two since each register is comprised of
+ * two buffers of identical size, namely odd and even
+ */
+ qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2;
+ uar_index = qp->bf.bfreg->index;
+
+ err = calc_sq_size(dev, init_attr, qp);
+ if (err < 0) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+
+ qp->rq.offset = 0;
+ qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
+ base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
+
+ err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size,
+ &qp->buf, dev->mdev->priv.numa_node);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+
+ if (qp->rq.wqe_cnt)
+ mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift,
+ ilog2(qp->rq.wqe_cnt), &qp->rq.fbc);
+
+ if (qp->sq.wqe_cnt) {
+ int sq_strides_offset = (qp->sq.offset & (PAGE_SIZE - 1)) /
+ MLX5_SEND_WQE_BB;
+ mlx5_init_fbc_offset(qp->buf.frags +
+ (qp->sq.offset / PAGE_SIZE),
+ ilog2(MLX5_SEND_WQE_BB),
+ ilog2(qp->sq.wqe_cnt),
+ sq_strides_offset, &qp->sq.fbc);
+
+ qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
+ }
+
+ *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+ MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
+ *in = kvzalloc(*inlen, GFP_KERNEL);
+ if (!*in) {
+ err = -ENOMEM;
+ goto err_buf;
+ }
+
+ qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
+ MLX5_SET(qpc, qpc, uar_page, uar_index);
+ MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
+ MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+
+ /* Set "fast registration enabled" for all kernel QPs */
+ MLX5_SET(qpc, qpc, fre, 1);
+ MLX5_SET(qpc, qpc, rlky, 1);
+
+ if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ MLX5_SET(qpc, qpc, deth_sqpn, 1);
+
+ mlx5_fill_page_frag_array(&qp->buf,
+ (__be64 *)MLX5_ADDR_OF(create_qp_in,
+ *in, pas));
+
+ err = mlx5_db_alloc(dev->mdev, &qp->db);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ goto err_free;
+ }
+
+ qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wrid), GFP_KERNEL);
+ qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wr_data), GFP_KERNEL);
+ qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
+ sizeof(*qp->rq.wrid), GFP_KERNEL);
+ qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.w_list), GFP_KERNEL);
+ qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
+ sizeof(*qp->sq.wqe_head), GFP_KERNEL);
+
+ if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
+ !qp->sq.w_list || !qp->sq.wqe_head) {
+ err = -ENOMEM;
+ goto err_wrid;
+ }
+
+ return 0;
+
+err_wrid:
+ kvfree(qp->sq.wqe_head);
+ kvfree(qp->sq.w_list);
+ kvfree(qp->sq.wrid);
+ kvfree(qp->sq.wr_data);
+ kvfree(qp->rq.wrid);
+ mlx5_db_free(dev->mdev, &qp->db);
+
+err_free:
+ kvfree(*in);
+
+err_buf:
+ mlx5_frag_buf_free(dev->mdev, &qp->buf);
+ return err;
+}
+
+static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
+{
+ if (attr->srq || (qp->type == IB_QPT_XRC_TGT) ||
+ (qp->type == MLX5_IB_QPT_DCI) || (qp->type == IB_QPT_XRC_INI))
+ return MLX5_SRQ_RQ;
+ else if (!qp->has_rq)
+ return MLX5_ZERO_LEN_RQ;
+
+ return MLX5_NON_ZERO_RQ;
+}
+
+static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_ib_sq *sq, u32 tdn,
+ struct ib_pd *pd)
+{
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
+ MLX5_SET(tisc, tisc, transport_domain, tdn);
+ if (!mlx5_ib_lag_should_assign_affinity(dev) &&
+ mlx5_lag_is_lacp_owner(dev->mdev))
+ MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
+ MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
+
+ return mlx5_core_create_tis(dev->mdev, in, &sq->tisn);
+}
+
+static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq, struct ib_pd *pd)
+{
+ mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
+}
+
+static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq)
+{
+ if (sq->flow_rule)
+ mlx5_del_flow_rules(sq->flow_rule);
+ sq->flow_rule = NULL;
+}
+
+static bool fr_supported(int ts_cap)
+{
+ return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
+ ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
+}
+
+static int get_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
+ bool fr_sup, bool rt_sup)
+{
+ if (cq->private_flags & MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS) {
+ if (!rt_sup) {
+ mlx5_ib_dbg(dev,
+ "Real time TS format is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ return MLX5_TIMESTAMP_FORMAT_REAL_TIME;
+ }
+ if (cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION) {
+ if (!fr_sup) {
+ mlx5_ib_dbg(dev,
+ "Free running TS format is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ return MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ }
+ return fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
+ MLX5_TIMESTAMP_FORMAT_DEFAULT;
+}
+
+static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *recv_cq)
+{
+ u8 ts_cap = MLX5_CAP_GEN(dev->mdev, rq_ts_format);
+
+ return get_ts_format(dev, recv_cq, fr_supported(ts_cap),
+ rt_supported(ts_cap));
+}
+
+static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
+{
+ u8 ts_cap = MLX5_CAP_GEN(dev->mdev, sq_ts_format);
+
+ return get_ts_format(dev, send_cq, fr_supported(ts_cap),
+ rt_supported(ts_cap));
+}
+
+static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
+ struct mlx5_ib_cq *recv_cq)
+{
+ u8 ts_cap = MLX5_CAP_ROCE(dev->mdev, qp_ts_format);
+ bool fr_sup = fr_supported(ts_cap);
+ bool rt_sup = rt_supported(ts_cap);
+ u8 default_ts = fr_sup ? MLX5_TIMESTAMP_FORMAT_FREE_RUNNING :
+ MLX5_TIMESTAMP_FORMAT_DEFAULT;
+ int send_ts_format =
+ send_cq ? get_ts_format(dev, send_cq, fr_sup, rt_sup) :
+ default_ts;
+ int recv_ts_format =
+ recv_cq ? get_ts_format(dev, recv_cq, fr_sup, rt_sup) :
+ default_ts;
+
+ if (send_ts_format < 0 || recv_ts_format < 0)
+ return -EOPNOTSUPP;
+
+ if (send_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT &&
+ recv_ts_format != MLX5_TIMESTAMP_FORMAT_DEFAULT &&
+ send_ts_format != recv_ts_format) {
+ mlx5_ib_dbg(
+ dev,
+ "The send ts_format does not match the receive ts_format\n");
+ return -EOPNOTSUPP;
+ }
+
+ return send_ts_format == default_ts ? recv_ts_format : send_ts_format;
+}
+
+static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
+ struct ib_udata *udata,
+ struct mlx5_ib_sq *sq, void *qpin,
+ struct ib_pd *pd, struct mlx5_ib_cq *cq)
+{
+ struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
+ __be64 *pas;
+ void *in;
+ void *sqc;
+ void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
+ void *wq;
+ int inlen;
+ int err;
+ unsigned int page_offset_quantized;
+ unsigned long page_size;
+ int ts_format;
+
+ ts_format = get_sq_ts_format(dev, cq);
+ if (ts_format < 0)
+ return ts_format;
+
+ sq->ubuffer.umem = ib_umem_get(&dev->ib_dev, ubuffer->buf_addr,
+ ubuffer->buf_size, 0);
+ if (IS_ERR(sq->ubuffer.umem))
+ return PTR_ERR(sq->ubuffer.umem);
+ page_size = mlx5_umem_find_best_quantized_pgoff(
+ ubuffer->umem, wq, log_wq_pg_sz, MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64, &page_offset_quantized);
+ if (!page_size) {
+ err = -EINVAL;
+ goto err_umem;
+ }
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) *
+ ib_umem_num_dma_blocks(sq->ubuffer.umem, page_size);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto err_umem;
+ }
+
+ MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid);
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+ if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
+ MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+ MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
+ MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
+ MLX5_SET(sqc, sqc, tis_lst_sz, 1);
+ MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(dev->mdev, swp))
+ MLX5_SET(sqc, sqc, allow_swp, 1);
+
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
+ MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page));
+ MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size));
+ MLX5_SET(wq, wq, log_wq_pg_sz,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(wq, wq, page_offset, page_offset_quantized);
+
+ pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
+ mlx5_ib_populate_pas(sq->ubuffer.umem, page_size, pas, 0);
+
+ err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp);
+
+ kvfree(in);
+
+ if (err)
+ goto err_umem;
+
+ return 0;
+
+err_umem:
+ ib_umem_release(sq->ubuffer.umem);
+ sq->ubuffer.umem = NULL;
+
+ return err;
+}
+
+static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq)
+{
+ destroy_flow_rule_vport_sq(sq);
+ mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp);
+ ib_umem_release(sq->ubuffer.umem);
+}
+
+static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq, void *qpin,
+ struct ib_pd *pd, struct mlx5_ib_cq *cq)
+{
+ struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
+ __be64 *pas;
+ void *in;
+ void *rqc;
+ void *wq;
+ void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
+ struct ib_umem *umem = rq->base.ubuffer.umem;
+ unsigned int page_offset_quantized;
+ unsigned long page_size = 0;
+ int ts_format;
+ size_t inlen;
+ int err;
+
+ ts_format = get_rq_ts_format(dev, cq);
+ if (ts_format < 0)
+ return ts_format;
+
+ page_size = mlx5_umem_find_best_quantized_pgoff(umem, wq, log_wq_pg_sz,
+ MLX5_ADAPTER_PAGE_SHIFT,
+ page_offset, 64,
+ &page_offset_quantized);
+ if (!page_size)
+ return -EINVAL;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+ sizeof(u64) * ib_umem_num_dma_blocks(umem, page_size);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
+ MLX5_SET(rqc, rqc, vsd, 1);
+ MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, ts_format, ts_format);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
+ MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
+
+ if (mqp->flags & IB_QP_CREATE_SCATTER_FCS)
+ MLX5_SET(rqc, rqc, scatter_fcs, 1);
+
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ if (rq->flags & MLX5_IB_RQ_PCI_WRITE_END_PADDING)
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, page_offset, page_offset_quantized);
+ MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
+ MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
+ MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4);
+ MLX5_SET(wq, wq, log_wq_pg_sz,
+ order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size));
+
+ pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
+ mlx5_ib_populate_pas(umem, page_size, pas, 0);
+
+ err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq)
+{
+ mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
+}
+
+static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq,
+ u32 qp_flags_en,
+ struct ib_pd *pd)
+{
+ if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
+ mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
+}
+
+static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq, u32 tdn,
+ u32 *qp_flags_en, struct ib_pd *pd,
+ u32 *out)
+{
+ u8 lb_flag = 0;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+ if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+
+ if (dev->is_rep) {
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+ }
+
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
+ rq->tirn = MLX5_GET(create_tir_out, out, tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ destroy_raw_packet_qp_tir(dev, rq, 0, pd);
+ }
+ kvfree(in);
+
+ return err;
+}
+
+static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ u32 *in, size_t inlen, struct ib_pd *pd,
+ struct ib_udata *udata,
+ struct mlx5_ib_create_qp_resp *resp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+ struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ int err;
+ u32 tdn = mucontext->tdn;
+ u16 uid = to_mpd(pd)->uid;
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
+
+ if (!qp->sq.wqe_cnt && !qp->rq.wqe_cnt)
+ return -EINVAL;
+ if (qp->sq.wqe_cnt) {
+ err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
+ if (err)
+ return err;
+
+ err = create_raw_packet_qp_sq(dev, udata, sq, in, pd,
+ to_mcq(init_attr->send_cq));
+ if (err)
+ goto err_destroy_tis;
+
+ if (uid) {
+ resp->tisn = sq->tisn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN;
+ resp->sqn = sq->base.mqp.qpn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN;
+ }
+
+ sq->base.container_mibqp = qp;
+ sq->base.mqp.event = mlx5_ib_qp_event;
+ }
+
+ if (qp->rq.wqe_cnt) {
+ rq->base.container_mibqp = qp;
+
+ if (qp->flags & IB_QP_CREATE_CVLAN_STRIPPING)
+ rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING)
+ rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
+ err = create_raw_packet_qp_rq(dev, rq, in, pd,
+ to_mcq(init_attr->recv_cq));
+ if (err)
+ goto err_destroy_sq;
+
+ err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd,
+ out);
+ if (err)
+ goto err_destroy_rq;
+
+ if (uid) {
+ resp->rqn = rq->base.mqp.qpn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
+ resp->tirn = rq->tirn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) {
+ resp->tir_icm_addr = MLX5_GET(
+ create_tir_out, out, icm_address_31_0);
+ resp->tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_39_32)
+ << 32;
+ resp->tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_63_40)
+ << 40;
+ resp->comp_mask |=
+ MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
+ }
+ }
+ }
+
+ qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
+ rq->base.mqp.qpn;
+ return 0;
+
+err_destroy_rq:
+ destroy_raw_packet_qp_rq(dev, rq);
+err_destroy_sq:
+ if (!qp->sq.wqe_cnt)
+ return err;
+ destroy_raw_packet_qp_sq(dev, sq);
+err_destroy_tis:
+ destroy_raw_packet_qp_tis(dev, sq, pd);
+
+ return err;
+}
+
+static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp)
+{
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+
+ if (qp->rq.wqe_cnt) {
+ destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
+ destroy_raw_packet_qp_rq(dev, rq);
+ }
+
+ if (qp->sq.wqe_cnt) {
+ destroy_raw_packet_qp_sq(dev, sq);
+ destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd);
+ }
+}
+
+static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp)
+{
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+
+ sq->sq = &qp->sq;
+ rq->rq = &qp->rq;
+ sq->doorbell = &qp->db;
+ rq->doorbell = &qp->db;
+}
+
+static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
+{
+ if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
+ to_mpd(qp->ibqp.pd)->uid);
+}
+
+struct mlx5_create_qp_params {
+ struct ib_udata *udata;
+ size_t inlen;
+ size_t outlen;
+ size_t ucmd_size;
+ void *ucmd;
+ u8 is_rss_raw : 1;
+ struct ib_qp_init_attr *attr;
+ u32 uidx;
+ struct mlx5_ib_create_qp_resp resp;
+};
+
+static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp_rss *ucmd = params->ucmd;
+ struct ib_udata *udata = params->udata;
+ struct mlx5_ib_ucontext *mucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ int inlen;
+ int outlen;
+ int err;
+ u32 *in;
+ u32 *out;
+ void *tirc;
+ void *hfso;
+ u32 selected_fields = 0;
+ u32 outer_l4;
+ u32 tdn = mucontext->tdn;
+ u8 lb_flag = 0;
+
+ if (ucmd->comp_mask) {
+ mlx5_ib_dbg(dev, "invalid comp mask\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER &&
+ !(ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)) {
+ mlx5_ib_dbg(dev, "Tunnel offloads must be set for inner RSS\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->is_rep)
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+
+ if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+
+ if (qp->flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ outlen = MLX5_ST_SZ_BYTES(create_tir_out);
+ in = kvzalloc(inlen + outlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ out = in + MLX5_ST_SZ_DW(create_tir_in);
+ MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table,
+ init_attr->rwq_ind_tbl->ind_tbl_num);
+ MLX5_SET(tirc, tirc, transport_domain, tdn);
+
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ if (ucmd->flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
+ MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
+
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_INNER)
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
+ else
+ hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+ switch (ucmd->rx_hash_function) {
+ case MLX5_RX_HASH_FUNC_TOEPLITZ:
+ {
+ void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
+ size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
+
+ if (len != ucmd->rx_key_len) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
+ memcpy(rss_key, ucmd->rx_hash_key, len);
+ break;
+ }
+ default:
+ err = -EOPNOTSUPP;
+ goto err;
+ }
+
+ if (!ucmd->rx_hash_fields_mask) {
+ /* special case when this TIR serves as steering entry without hashing */
+ if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
+ goto create_tir;
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
+ ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+
+ outer_l4 = ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ << 0 |
+ ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ << 1 |
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI) << 2;
+
+ /* Check that only one l4 protocol is set */
+ if (outer_l4 & (outer_l4 - 1)) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ else if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
+ selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
+
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
+ selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
+
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
+ selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
+
+ if ((ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
+ (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
+ selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
+
+ if (ucmd->rx_hash_fields_mask & MLX5_RX_HASH_IPSEC_SPI)
+ selected_fields |= MLX5_HASH_FIELD_SEL_IPSEC_SPI;
+
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
+
+create_tir:
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+ err = mlx5_cmd_exec_inout(dev->mdev, create_tir, in, out);
+
+ qp->rss_qp.tirn = MLX5_GET(create_tir_out, out, tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
+ to_mpd(pd)->uid);
+ }
+
+ if (err)
+ goto err;
+
+ if (mucontext->devx_uid) {
+ params->resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ params->resp.tirn = qp->rss_qp.tirn;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner) ||
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, sw_owner_v2)) {
+ params->resp.tir_icm_addr =
+ MLX5_GET(create_tir_out, out, icm_address_31_0);
+ params->resp.tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_39_32)
+ << 32;
+ params->resp.tir_icm_addr |=
+ (u64)MLX5_GET(create_tir_out, out,
+ icm_address_63_40)
+ << 40;
+ params->resp.comp_mask |=
+ MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR;
+ }
+ }
+
+ kvfree(in);
+ /* qpn is reserved for that QP */
+ qp->trans_qp.base.mqp.qpn = 0;
+ qp->is_rss = true;
+ return 0;
+
+err:
+ kvfree(in);
+ return err;
+}
+
+static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ void *qpc)
+{
+ int scqe_sz;
+ bool allow_scat_cqe = false;
+
+ allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
+
+ if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
+ return;
+
+ scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
+ if (scqe_sz == 128) {
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
+ return;
+ }
+
+ if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
+ MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
+}
+
+static int atomic_size_to_mode(int size_mask)
+{
+ /* driver does not support atomic_size > 256B
+ * and does not know how to translate bigger sizes
+ */
+ int supported_size_mask = size_mask & 0x1ff;
+ int log_max_size;
+
+ if (!supported_size_mask)
+ return -EOPNOTSUPP;
+
+ log_max_size = __fls(supported_size_mask);
+
+ if (log_max_size > 3)
+ return log_max_size;
+
+ return MLX5_ATOMIC_MODE_8B;
+}
+
+static int get_atomic_mode(struct mlx5_ib_dev *dev,
+ enum ib_qp_type qp_type)
+{
+ u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
+ u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
+ int atomic_mode = -EOPNOTSUPP;
+ int atomic_size_mask;
+
+ if (!atomic)
+ return -EOPNOTSUPP;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
+ else
+ atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+
+ if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) ||
+ (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD))
+ atomic_mode = atomic_size_to_mode(atomic_size_mask);
+
+ if (atomic_mode <= 0 &&
+ (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP &&
+ atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
+ atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;
+
+ return atomic_mode;
+}
+
+static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_qp_base *base;
+ unsigned long flags;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, MLX5_QP_ST_XRC);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(devr->p0)->pdn);
+
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ MLX5_SET(qpc, qpc, cd_master, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
+ MLX5_SET(qpc, qpc, cd_slave_receive, 1);
+
+ MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
+ MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
+ MLX5_SET(qpc, qpc, xrcd, to_mxrcd(attr->xrcd)->xrcdn);
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
+ }
+
+ base = &qp->trans_qp.base;
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+ kvfree(in);
+ if (err)
+ return err;
+
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ qp->trans_qp.xrcdn = to_mxrcd(attr->xrcd)->xrcdn;
+ return 0;
+}
+
+static int create_dci(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ struct ib_udata *udata = params->udata;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int ts_format;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ base = &qp->trans_qp.base;
+
+ qp->has_rq = qp_has_rq(init_attr);
+ err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+
+ if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
+ ucmd->rq_wqe_count != qp->rq.wqe_cnt)
+ return -EINVAL;
+
+ if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
+ return -EINVAL;
+
+ ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+
+ if (ts_format < 0)
+ return ts_format;
+
+ err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
+ &inlen, base, ucmd);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, mlx5_st);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
+ MLX5_SET(qpc, qpc, wq_signature, 1);
+
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ MLX5_SET(qpc, qpc, cd_master, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
+ if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE)
+ configure_requester_scat_cqe(dev, qp, init_attr, qpc);
+
+ if (qp->rq.wqe_cnt) {
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
+ }
+
+ if (qp->flags_en & MLX5_QP_FLAG_DCI_STREAM) {
+ MLX5_SET(qpc, qpc, log_num_dci_stream_channels,
+ ucmd->dci_streams.log_num_concurent);
+ MLX5_SET(qpc, qpc, log_num_dci_errored_streams,
+ ucmd->dci_streams.log_num_errored);
+ }
+
+ MLX5_SET(qpc, qpc, ts_format, ts_format);
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
+
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
+
+ /* Set default resources */
+ if (init_attr->srq) {
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(init_attr->srq)->msrq.srqn);
+ } else {
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(devr->s1)->msrq.srqn);
+ }
+
+ if (init_attr->send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd,
+ to_mcq(init_attr->send_cq)->mcq.cqn);
+
+ if (init_attr->recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv,
+ to_mcq(init_attr->recv_cq)->mcq.cqn);
+
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
+ }
+
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+
+ kvfree(in);
+ if (err)
+ goto err_create;
+
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
+
+ get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ return 0;
+
+err_create:
+ destroy_qp(dev, qp, base, udata);
+ return err;
+}
+
+static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *init_attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ struct ib_udata *udata = params->udata;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int ts_format;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
+ if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
+ qp->underlay_qpn = init_attr->source_qpn;
+
+ base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
+ &qp->raw_packet_qp.rq.base :
+ &qp->trans_qp.base;
+
+ qp->has_rq = qp_has_rq(init_attr);
+ err = set_rq_size(dev, &init_attr->cap, qp->has_rq, qp, ucmd);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+
+ if (ucmd->rq_wqe_shift != qp->rq.wqe_shift ||
+ ucmd->rq_wqe_count != qp->rq.wqe_cnt)
+ return -EINVAL;
+
+ if (ucmd->sq_wqe_count > (1 << MLX5_CAP_GEN(mdev, log_max_qp_sz)))
+ return -EINVAL;
+
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ ts_format = get_qp_ts_format(dev, to_mcq(init_attr->send_cq),
+ to_mcq(init_attr->recv_cq));
+ if (ts_format < 0)
+ return ts_format;
+ }
+
+ err = _create_user_qp(dev, pd, qp, udata, init_attr, &in, &params->resp,
+ &inlen, base, ucmd);
+ if (err)
+ return err;
+
+ if (is_sqp(init_attr->qp_type))
+ qp->port = init_attr->port_num;
+
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, mlx5_st);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd)->pdn);
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
+ MLX5_SET(qpc, qpc, wq_signature, 1);
+
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+
+ if (qp->flags & IB_QP_CREATE_CROSS_CHANNEL)
+ MLX5_SET(qpc, qpc, cd_master, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_SEND)
+ MLX5_SET(qpc, qpc, cd_slave_send, 1);
+ if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
+ MLX5_SET(qpc, qpc, cd_slave_receive, 1);
+ if (qp->flags_en & MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)
+ MLX5_SET(qpc, qpc, req_e2e_credit_mode, 1);
+ if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
+ (init_attr->qp_type == IB_QPT_RC ||
+ init_attr->qp_type == IB_QPT_UC)) {
+ int rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
+
+ MLX5_SET(qpc, qpc, cs_res,
+ rcqe_sz == 128 ? MLX5_RES_SCAT_DATA64_CQE :
+ MLX5_RES_SCAT_DATA32_CQE);
+ }
+ if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
+ (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
+ configure_requester_scat_cqe(dev, qp, init_attr, qpc);
+
+ if (qp->rq.wqe_cnt) {
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
+ }
+
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET)
+ MLX5_SET(qpc, qpc, ts_format, ts_format);
+
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
+
+ if (qp->sq.wqe_cnt) {
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
+ } else {
+ MLX5_SET(qpc, qpc, no_sq, 1);
+ if (init_attr->srq &&
+ init_attr->srq->srq_type == IB_SRQT_TM)
+ MLX5_SET(qpc, qpc, offload_type,
+ MLX5_QPC_OFFLOAD_TYPE_RNDV);
+ }
+
+ /* Set default resources */
+ switch (init_attr->qp_type) {
+ case IB_QPT_XRC_INI:
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
+ break;
+ default:
+ if (init_attr->srq) {
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
+ } else {
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
+ }
+ }
+
+ if (init_attr->send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
+
+ if (init_attr->recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
+
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ if (qp->flags & IB_QP_CREATE_PCI_WRITE_END_PADDING &&
+ init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ MLX5_SET(qpc, qpc, end_padding_mode,
+ MLX5_WQ_END_PAD_MODE_ALIGN);
+ /* Special case to clean flag */
+ qp->flags &= ~IB_QP_CREATE_PCI_WRITE_END_PADDING;
+ }
+
+ if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd->sq_buf_addr;
+ raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
+ err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
+ &params->resp, init_attr);
+ } else
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+
+ kvfree(in);
+ if (err)
+ goto err_create;
+
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+ if (MLX5_CAP_GEN(mdev, ece_support))
+ params->resp.ece_options = MLX5_GET(create_qp_out, out, ece);
+
+ get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ return 0;
+
+err_create:
+ destroy_qp(dev, qp, base, udata);
+ return err;
+}
+
+static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ u32 uidx = params->uidx;
+ struct mlx5_ib_resources *devr = &dev->devr;
+ u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+ int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_cq *send_cq;
+ struct mlx5_ib_cq *recv_cq;
+ unsigned long flags;
+ struct mlx5_ib_qp_base *base;
+ int mlx5_st;
+ void *qpc;
+ u32 *in;
+ int err;
+
+ spin_lock_init(&qp->sq.lock);
+ spin_lock_init(&qp->rq.lock);
+
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
+ if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
+ qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ base = &qp->trans_qp.base;
+
+ qp->has_rq = qp_has_rq(attr);
+ err = set_rq_size(dev, &attr->cap, qp->has_rq, qp, NULL);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+
+ err = _create_kernel_qp(dev, attr, qp, &in, &inlen, base);
+ if (err)
+ return err;
+
+ if (is_sqp(attr->qp_type))
+ qp->port = attr->port_num;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+
+ MLX5_SET(qpc, qpc, st, mlx5_st);
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+
+ if (attr->qp_type != MLX5_IB_QPT_REG_UMR)
+ MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
+ else
+ MLX5_SET(qpc, qpc, latency_sensitive, 1);
+
+
+ if (qp->flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
+ MLX5_SET(qpc, qpc, block_lb_mc, 1);
+
+ if (qp->rq.wqe_cnt) {
+ MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
+ MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
+ }
+
+ MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, attr));
+
+ if (qp->sq.wqe_cnt)
+ MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
+ else
+ MLX5_SET(qpc, qpc, no_sq, 1);
+
+ if (attr->srq) {
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn0);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(attr->srq)->msrq.srqn);
+ } else {
+ MLX5_SET(qpc, qpc, xrcd, devr->xrcdn1);
+ MLX5_SET(qpc, qpc, srqn_rmpn_xrqn,
+ to_msrq(devr->s1)->msrq.srqn);
+ }
+
+ if (attr->send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd, to_mcq(attr->send_cq)->mcq.cqn);
+
+ if (attr->recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(attr->recv_cq)->mcq.cqn);
+
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ /* 0xffffff means we ask to work with cqe version 0 */
+ if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
+ MLX5_SET(qpc, qpc, user_index, uidx);
+
+ /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO)
+ MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
+
+ err = mlx5_qpc_create_qp(dev, &base->mqp, in, inlen, out);
+ kvfree(in);
+ if (err)
+ goto err_create;
+
+ base->container_mibqp = qp;
+ base->mqp.event = mlx5_ib_qp_event;
+
+ get_cqs(qp->type, attr->send_cq, attr->recv_cq,
+ &send_cq, &recv_cq);
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* Maintain device to QPs access, needed for further handling via reset
+ * flow
+ */
+ list_add_tail(&qp->qps_list, &dev->qp_list);
+ /* Maintain CQ to QPs access, needed for further handling via reset flow
+ */
+ if (send_cq)
+ list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
+ if (recv_cq)
+ list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ return 0;
+
+err_create:
+ destroy_qp(dev, qp, base, NULL);
+ return err;
+}
+
+static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
+ __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
+{
+ if (send_cq) {
+ if (recv_cq) {
+ if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ spin_lock(&send_cq->lock);
+ spin_lock_nested(&recv_cq->lock,
+ SINGLE_DEPTH_NESTING);
+ } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
+ spin_lock(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ } else {
+ spin_lock(&recv_cq->lock);
+ spin_lock_nested(&send_cq->lock,
+ SINGLE_DEPTH_NESTING);
+ }
+ } else {
+ spin_lock(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ }
+ } else if (recv_cq) {
+ spin_lock(&recv_cq->lock);
+ __acquire(&send_cq->lock);
+ } else {
+ __acquire(&send_cq->lock);
+ __acquire(&recv_cq->lock);
+ }
+}
+
+static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
+ __releases(&send_cq->lock) __releases(&recv_cq->lock)
+{
+ if (send_cq) {
+ if (recv_cq) {
+ if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+ spin_unlock(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
+ __release(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ } else {
+ spin_unlock(&send_cq->lock);
+ spin_unlock(&recv_cq->lock);
+ }
+ } else {
+ __release(&recv_cq->lock);
+ spin_unlock(&send_cq->lock);
+ }
+ } else if (recv_cq) {
+ __release(&send_cq->lock);
+ spin_unlock(&recv_cq->lock);
+ } else {
+ __release(&recv_cq->lock);
+ __release(&send_cq->lock);
+ }
+}
+
+static void get_cqs(enum ib_qp_type qp_type,
+ struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
+ struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
+{
+ switch (qp_type) {
+ case IB_QPT_XRC_TGT:
+ *send_cq = NULL;
+ *recv_cq = NULL;
+ break;
+ case MLX5_IB_QPT_REG_UMR:
+ case IB_QPT_XRC_INI:
+ *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
+ *recv_cq = NULL;
+ break;
+
+ case IB_QPT_SMI:
+ case MLX5_IB_QPT_HW_GSI:
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ case IB_QPT_UD:
+ case IB_QPT_RAW_PACKET:
+ *send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
+ *recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
+ break;
+ default:
+ *send_cq = NULL;
+ *recv_cq = NULL;
+ break;
+ }
+}
+
+static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param,
+ u8 lag_tx_affinity);
+
+static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_cq *send_cq, *recv_cq;
+ struct mlx5_ib_qp_base *base;
+ unsigned long flags;
+ int err;
+
+ if (qp->is_rss) {
+ destroy_rss_raw_qp_tir(dev, qp);
+ return;
+ }
+
+ base = (qp->type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
+ &qp->raw_packet_qp.rq.base :
+ &qp->trans_qp.base;
+
+ if (qp->state != IB_QPS_RESET) {
+ if (qp->type != IB_QPT_RAW_PACKET &&
+ !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
+ err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
+ NULL, &base->mqp, NULL);
+ } else {
+ struct mlx5_modify_raw_qp_param raw_qp_param = {
+ .operation = MLX5_CMD_OP_2RST_QP
+ };
+
+ err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
+ }
+ if (err)
+ mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
+ base->mqp.qpn);
+ }
+
+ get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
+ &recv_cq);
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ mlx5_ib_lock_cqs(send_cq, recv_cq);
+ /* del from lists under both locks above to protect reset flow paths */
+ list_del(&qp->qps_list);
+ if (send_cq)
+ list_del(&qp->cq_send_list);
+
+ if (recv_cq)
+ list_del(&qp->cq_recv_list);
+
+ if (!udata) {
+ __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
+ qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
+ if (send_cq != recv_cq)
+ __mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
+ NULL);
+ }
+ mlx5_ib_unlock_cqs(send_cq, recv_cq);
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ if (qp->type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ destroy_raw_packet_qp(dev, qp);
+ } else {
+ err = mlx5_core_destroy_qp(dev, &base->mqp);
+ if (err)
+ mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
+ base->mqp.qpn);
+ }
+
+ destroy_qp(dev, qp, base, udata);
+}
+
+static int create_dct(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_qp_init_attr *attr = params->attr;
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ u32 uidx = params->uidx;
+ void *dctc;
+
+ if (mlx5_lag_is_active(dev->mdev) && !MLX5_CAP_GEN(dev->mdev, lag_dct))
+ return -EOPNOTSUPP;
+
+ qp->dct.in = kzalloc(MLX5_ST_SZ_BYTES(create_dct_in), GFP_KERNEL);
+ if (!qp->dct.in)
+ return -ENOMEM;
+
+ MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
+ MLX5_SET(dctc, dctc, srqn_xrqn, to_msrq(attr->srq)->msrq.srqn);
+ MLX5_SET(dctc, dctc, cqn, to_mcq(attr->recv_cq)->mcq.cqn);
+ MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
+ MLX5_SET(dctc, dctc, user_index, uidx);
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
+
+ if (qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) {
+ int rcqe_sz = mlx5_ib_get_cqe_size(attr->recv_cq);
+
+ if (rcqe_sz == 128)
+ MLX5_SET(dctc, dctc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+ }
+
+ qp->state = IB_QPS_RESET;
+ return 0;
+}
+
+static int check_qp_type(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
+ enum ib_qp_type *type)
+{
+ if (attr->qp_type == IB_QPT_DRIVER && !MLX5_CAP_GEN(dev->mdev, dct))
+ goto out;
+
+ switch (attr->qp_type) {
+ case IB_QPT_XRC_TGT:
+ case IB_QPT_XRC_INI:
+ if (!MLX5_CAP_GEN(dev->mdev, xrc))
+ goto out;
+ fallthrough;
+ case IB_QPT_RC:
+ case IB_QPT_UC:
+ case IB_QPT_SMI:
+ case MLX5_IB_QPT_HW_GSI:
+ case IB_QPT_DRIVER:
+ case IB_QPT_GSI:
+ case IB_QPT_RAW_PACKET:
+ case IB_QPT_UD:
+ case MLX5_IB_QPT_REG_UMR:
+ break;
+ default:
+ goto out;
+ }
+
+ *type = attr->qp_type;
+ return 0;
+
+out:
+ mlx5_ib_dbg(dev, "Unsupported QP type %d\n", attr->qp_type);
+ return -EOPNOTSUPP;
+}
+
+static int check_valid_flow(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ if (!udata) {
+ /* Kernel create_qp callers */
+ if (attr->rwq_ind_tbl)
+ return -EOPNOTSUPP;
+
+ switch (attr->qp_type) {
+ case IB_QPT_RAW_PACKET:
+ case IB_QPT_DRIVER:
+ return -EOPNOTSUPP;
+ default:
+ return 0;
+ }
+ }
+
+ /* Userspace create_qp callers */
+ if (attr->qp_type == IB_QPT_RAW_PACKET && !ucontext->cqe_version) {
+ mlx5_ib_dbg(dev,
+ "Raw Packet QP is only supported for CQE version > 0\n");
+ return -EINVAL;
+ }
+
+ if (attr->qp_type != IB_QPT_RAW_PACKET && attr->rwq_ind_tbl) {
+ mlx5_ib_dbg(dev,
+ "Wrong QP type %d for the RWQ indirect table\n",
+ attr->qp_type);
+ return -EINVAL;
+ }
+
+ /*
+ * We don't need to see this warning, it means that kernel code
+ * missing ib_pd. Placed here to catch developer's mistakes.
+ */
+ WARN_ONCE(!pd && attr->qp_type != IB_QPT_XRC_TGT,
+ "There is a missing PD pointer assignment\n");
+ return 0;
+}
+
+static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+ bool cond, struct mlx5_ib_qp *qp)
+{
+ if (!(*flags & flag))
+ return;
+
+ if (cond) {
+ qp->flags_en |= flag;
+ *flags &= ~flag;
+ return;
+ }
+
+ switch (flag) {
+ case MLX5_QP_FLAG_SCATTER_CQE:
+ case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
+ /*
+ * We don't return error if these flags were provided,
+ * and mlx5 doesn't have right capability.
+ */
+ *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
+ return;
+ default:
+ break;
+ }
+ mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
+}
+
+static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ void *ucmd, struct ib_qp_init_attr *attr)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ bool cond;
+ int flags;
+
+ if (attr->rwq_ind_tbl)
+ flags = ((struct mlx5_ib_create_qp_rss *)ucmd)->flags;
+ else
+ flags = ((struct mlx5_ib_create_qp *)ucmd)->flags;
+
+ switch (flags & (MLX5_QP_FLAG_TYPE_DCT | MLX5_QP_FLAG_TYPE_DCI)) {
+ case MLX5_QP_FLAG_TYPE_DCI:
+ qp->type = MLX5_IB_QPT_DCI;
+ break;
+ case MLX5_QP_FLAG_TYPE_DCT:
+ qp->type = MLX5_IB_QPT_DCT;
+ break;
+ default:
+ if (qp->type != IB_QPT_DRIVER)
+ break;
+ /*
+ * It is IB_QPT_DRIVER and or no subtype or
+ * wrong subtype were provided.
+ */
+ return -EINVAL;
+ }
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCI, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TYPE_DCT, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_DCI_STREAM,
+ MLX5_CAP_GEN(mdev, log_max_dci_stream_channels),
+ qp);
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
+ MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
+ MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+
+ if (qp->type == IB_QPT_RAW_PACKET) {
+ cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_gre) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_TUNNEL_OFFLOADS,
+ cond, qp);
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC, true,
+ qp);
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC, true,
+ qp);
+ }
+
+ if (qp->type == IB_QPT_RC)
+ process_vendor_flag(dev, &flags,
+ MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE,
+ MLX5_CAP_GEN(mdev, qp_packet_based), qp);
+
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_BFREG_INDEX, true, qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_UAR_PAGE_INDEX, true, qp);
+
+ cond = qp->flags_en & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC);
+ if (attr->rwq_ind_tbl && cond) {
+ mlx5_ib_dbg(dev, "RSS RAW QP has unsupported flags 0x%X\n",
+ cond);
+ return -EINVAL;
+ }
+
+ if (flags)
+ mlx5_ib_dbg(dev, "udata has unsupported flags 0x%X\n", flags);
+
+ return (flags) ? -EINVAL : 0;
+ }
+
+static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
+ bool cond, struct mlx5_ib_qp *qp)
+{
+ if (!(*flags & flag))
+ return;
+
+ if (cond) {
+ qp->flags |= flag;
+ *flags &= ~flag;
+ return;
+ }
+
+ if (flag == MLX5_IB_QP_CREATE_WC_TEST) {
+ /*
+ * Special case, if condition didn't meet, it won't be error,
+ * just different in-kernel flow.
+ */
+ *flags &= ~MLX5_IB_QP_CREATE_WC_TEST;
+ return;
+ }
+ mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag);
+}
+
+static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *attr)
+{
+ enum ib_qp_type qp_type = qp->type;
+ struct mlx5_core_dev *mdev = dev->mdev;
+ int create_flags = attr->create_flags;
+ bool cond;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return (create_flags) ? -EINVAL : 0;
+
+ if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
+ return (create_flags) ? -EINVAL : 0;
+
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
+ mlx5_get_flow_namespace(dev->mdev,
+ MLX5_FLOW_NAMESPACE_BYPASS),
+ qp);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_INTEGRITY_EN,
+ MLX5_CAP_GEN(mdev, sho), qp);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
+ MLX5_CAP_GEN(mdev, block_lb_mc), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_CROSS_CHANNEL,
+ MLX5_CAP_GEN(mdev, cd), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_SEND,
+ MLX5_CAP_GEN(mdev, cd), qp);
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_MANAGED_RECV,
+ MLX5_CAP_GEN(mdev, cd), qp);
+
+ if (qp_type == IB_QPT_UD) {
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_IPOIB_UD_LSO,
+ MLX5_CAP_GEN(mdev, ipoib_basic_offloads),
+ qp);
+ cond = MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_IB;
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_SOURCE_QPN,
+ cond, qp);
+ }
+
+ if (qp_type == IB_QPT_RAW_PACKET) {
+ cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(mdev, scatter_fcs);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_SCATTER_FCS, cond, qp);
+
+ cond = MLX5_CAP_GEN(mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(mdev, vlan_cap);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_CVLAN_STRIPPING, cond, qp);
+ }
+
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_PCI_WRITE_END_PADDING,
+ MLX5_CAP_GEN(mdev, end_pad), qp);
+
+ process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_WC_TEST,
+ qp_type != MLX5_IB_QPT_REG_UMR, qp);
+ process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1,
+ true, qp);
+
+ if (create_flags) {
+ mlx5_ib_dbg(dev, "Create QP has unsupported flags 0x%X\n",
+ create_flags);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int process_udata_size(struct mlx5_ib_dev *dev,
+ struct mlx5_create_qp_params *params)
+{
+ size_t ucmd = sizeof(struct mlx5_ib_create_qp);
+ struct ib_udata *udata = params->udata;
+ size_t outlen = udata->outlen;
+ size_t inlen = udata->inlen;
+
+ params->outlen = min(outlen, sizeof(struct mlx5_ib_create_qp_resp));
+ params->ucmd_size = ucmd;
+ if (!params->is_rss_raw) {
+ /* User has old rdma-core, which doesn't support ECE */
+ size_t min_inlen =
+ offsetof(struct mlx5_ib_create_qp, ece_options);
+
+ /*
+ * We will check in check_ucmd_data() that user
+ * cleared everything after inlen.
+ */
+ params->inlen = (inlen < min_inlen) ? 0 : min(inlen, ucmd);
+ goto out;
+ }
+
+ /* RSS RAW QP */
+ if (inlen < offsetofend(struct mlx5_ib_create_qp_rss, flags))
+ return -EINVAL;
+
+ if (outlen < offsetofend(struct mlx5_ib_create_qp_resp, bfreg_index))
+ return -EINVAL;
+
+ ucmd = sizeof(struct mlx5_ib_create_qp_rss);
+ params->ucmd_size = ucmd;
+ if (inlen > ucmd && !ib_is_udata_cleared(udata, ucmd, inlen - ucmd))
+ return -EINVAL;
+
+ params->inlen = min(ucmd, inlen);
+out:
+ if (!params->inlen)
+ mlx5_ib_dbg(dev, "udata is too small\n");
+
+ return (params->inlen) ? 0 : -EINVAL;
+}
+
+static int create_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
+ struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ int err;
+
+ if (params->is_rss_raw) {
+ err = create_rss_raw_qp_tir(dev, pd, qp, params);
+ goto out;
+ }
+
+ switch (qp->type) {
+ case MLX5_IB_QPT_DCT:
+ err = create_dct(dev, pd, qp, params);
+ rdma_restrack_no_track(&qp->ibqp.res);
+ break;
+ case MLX5_IB_QPT_DCI:
+ err = create_dci(dev, pd, qp, params);
+ break;
+ case IB_QPT_XRC_TGT:
+ err = create_xrc_tgt_qp(dev, qp, params);
+ break;
+ case IB_QPT_GSI:
+ err = mlx5_ib_create_gsi(pd, qp, params->attr);
+ break;
+ case MLX5_IB_QPT_HW_GSI:
+ case MLX5_IB_QPT_REG_UMR:
+ rdma_restrack_no_track(&qp->ibqp.res);
+ fallthrough;
+ default:
+ if (params->udata)
+ err = create_user_qp(dev, pd, qp, params);
+ else
+ err = create_kernel_qp(dev, pd, qp, params);
+ }
+
+out:
+ if (err) {
+ mlx5_ib_err(dev, "Create QP type %d failed\n", qp->type);
+ return err;
+ }
+
+ if (is_qp0(qp->type))
+ qp->ibqp.qp_num = 0;
+ else if (is_qp1(qp->type))
+ qp->ibqp.qp_num = 1;
+ else
+ qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
+
+ mlx5_ib_dbg(dev,
+ "QP type %d, ib qpn 0x%X, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x, ece 0x%x\n",
+ qp->type, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
+ params->attr->recv_cq ? to_mcq(params->attr->recv_cq)->mcq.cqn :
+ -1,
+ params->attr->send_cq ? to_mcq(params->attr->send_cq)->mcq.cqn :
+ -1,
+ params->resp.ece_options);
+
+ return 0;
+}
+
+static int check_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_init_attr *attr)
+{
+ int ret = 0;
+
+ switch (qp->type) {
+ case MLX5_IB_QPT_DCT:
+ ret = (!attr->srq || !attr->recv_cq) ? -EINVAL : 0;
+ break;
+ case MLX5_IB_QPT_DCI:
+ ret = (attr->cap.max_recv_wr || attr->cap.max_recv_sge) ?
+ -EINVAL :
+ 0;
+ break;
+ case IB_QPT_RAW_PACKET:
+ ret = (attr->rwq_ind_tbl && attr->send_cq) ? -EINVAL : 0;
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ mlx5_ib_dbg(dev, "QP type %d has wrong attributes\n", qp->type);
+
+ return ret;
+}
+
+static int get_qp_uidx(struct mlx5_ib_qp *qp,
+ struct mlx5_create_qp_params *params)
+{
+ struct mlx5_ib_create_qp *ucmd = params->ucmd;
+ struct ib_udata *udata = params->udata;
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+
+ if (params->is_rss_raw)
+ return 0;
+
+ return get_qp_user_index(ucontext, ucmd, sizeof(*ucmd), &params->uidx);
+}
+
+static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device);
+
+ if (mqp->state == IB_QPS_RTR) {
+ int err;
+
+ err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct);
+ if (err) {
+ mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
+ return err;
+ }
+ }
+
+ kfree(mqp->dct.in);
+ return 0;
+}
+
+static int check_ucmd_data(struct mlx5_ib_dev *dev,
+ struct mlx5_create_qp_params *params)
+{
+ struct ib_udata *udata = params->udata;
+ size_t size, last;
+ int ret;
+
+ if (params->is_rss_raw)
+ /*
+ * These QPs don't have "reserved" field in their
+ * create_qp input struct, so their data is always valid.
+ */
+ last = sizeof(struct mlx5_ib_create_qp_rss);
+ else
+ last = offsetof(struct mlx5_ib_create_qp, reserved);
+
+ if (udata->inlen <= last)
+ return 0;
+
+ /*
+ * User provides different create_qp structures based on the
+ * flow and we need to know if he cleared memory after our
+ * struct create_qp ends.
+ */
+ size = udata->inlen - last;
+ ret = ib_is_udata_cleared(params->udata, last, size);
+ if (!ret)
+ mlx5_ib_dbg(
+ dev,
+ "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
+ udata->inlen, params->ucmd_size, last, size);
+ return ret ? 0 : -EINVAL;
+}
+
+int mlx5_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_create_qp_params params = {};
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct ib_pd *pd = ibqp->pd;
+ enum ib_qp_type type;
+ int err;
+
+ err = check_qp_type(dev, attr, &type);
+ if (err)
+ return err;
+
+ err = check_valid_flow(dev, pd, attr, udata);
+ if (err)
+ return err;
+
+ params.udata = udata;
+ params.uidx = MLX5_IB_DEFAULT_UIDX;
+ params.attr = attr;
+ params.is_rss_raw = !!attr->rwq_ind_tbl;
+
+ if (udata) {
+ err = process_udata_size(dev, &params);
+ if (err)
+ return err;
+
+ err = check_ucmd_data(dev, &params);
+ if (err)
+ return err;
+
+ params.ucmd = kzalloc(params.ucmd_size, GFP_KERNEL);
+ if (!params.ucmd)
+ return -ENOMEM;
+
+ err = ib_copy_from_udata(params.ucmd, udata, params.inlen);
+ if (err)
+ goto free_ucmd;
+ }
+
+ mutex_init(&qp->mutex);
+ qp->type = type;
+ if (udata) {
+ err = process_vendor_flags(dev, qp, params.ucmd, attr);
+ if (err)
+ goto free_ucmd;
+
+ err = get_qp_uidx(qp, &params);
+ if (err)
+ goto free_ucmd;
+ }
+ err = process_create_flags(dev, qp, attr);
+ if (err)
+ goto free_ucmd;
+
+ err = check_qp_attr(dev, qp, attr);
+ if (err)
+ goto free_ucmd;
+
+ err = create_qp(dev, pd, qp, &params);
+ if (err)
+ goto free_ucmd;
+
+ kfree(params.ucmd);
+ params.ucmd = NULL;
+
+ if (udata)
+ /*
+ * It is safe to copy response for all user create QP flows,
+ * including MLX5_IB_QPT_DCT, which doesn't need it.
+ * In that case, resp will be filled with zeros.
+ */
+ err = ib_copy_to_udata(udata, &params.resp, params.outlen);
+ if (err)
+ goto destroy_qp;
+
+ return 0;
+
+destroy_qp:
+ switch (qp->type) {
+ case MLX5_IB_QPT_DCT:
+ mlx5_ib_destroy_dct(qp);
+ break;
+ case IB_QPT_GSI:
+ mlx5_ib_destroy_gsi(qp);
+ break;
+ default:
+ destroy_qp_common(dev, qp, udata);
+ }
+
+free_ucmd:
+ kfree(params.ucmd);
+ return err;
+}
+
+int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+
+ if (mqp->type == IB_QPT_GSI)
+ return mlx5_ib_destroy_gsi(mqp);
+
+ if (mqp->type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_destroy_dct(mqp);
+
+ destroy_qp_common(dev, mqp, udata);
+ return 0;
+}
+
+static int set_qpc_atomic_flags(struct mlx5_ib_qp *qp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ void *qpc)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+ u8 dest_rd_atomic;
+ u32 access_flags;
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ dest_rd_atomic = attr->max_dest_rd_atomic;
+ else
+ dest_rd_atomic = qp->trans_qp.resp_depth;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ access_flags = attr->qp_access_flags;
+ else
+ access_flags = qp->trans_qp.atomic_rd_en;
+
+ if (!dest_rd_atomic)
+ access_flags &= IB_ACCESS_REMOTE_WRITE;
+
+ MLX5_SET(qpc, qpc, rre, !!(access_flags & IB_ACCESS_REMOTE_READ));
+
+ if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
+ int atomic_mode;
+
+ atomic_mode = get_atomic_mode(dev, qp->type);
+ if (atomic_mode < 0)
+ return -EOPNOTSUPP;
+
+ MLX5_SET(qpc, qpc, rae, 1);
+ MLX5_SET(qpc, qpc, atomic_mode, atomic_mode);
+ }
+
+ MLX5_SET(qpc, qpc, rwe, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ return 0;
+}
+
+enum {
+ MLX5_PATH_FLAG_FL = 1 << 0,
+ MLX5_PATH_FLAG_FREE_AR = 1 << 1,
+ MLX5_PATH_FLAG_COUNTER = 1 << 2,
+};
+
+static int mlx5_to_ib_rate_map(u8 rate)
+{
+ static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
+ IB_RATE_25_GBPS, IB_RATE_100_GBPS,
+ IB_RATE_200_GBPS, IB_RATE_50_GBPS,
+ IB_RATE_400_GBPS };
+
+ if (rate < ARRAY_SIZE(rates))
+ return rates[rate];
+
+ return rate - MLX5_STAT_RATE_OFFSET;
+}
+
+static int ib_to_mlx5_rate_map(u8 rate)
+{
+ switch (rate) {
+ case IB_RATE_PORT_CURRENT:
+ return 0;
+ case IB_RATE_56_GBPS:
+ return 1;
+ case IB_RATE_25_GBPS:
+ return 2;
+ case IB_RATE_100_GBPS:
+ return 3;
+ case IB_RATE_200_GBPS:
+ return 4;
+ case IB_RATE_50_GBPS:
+ return 5;
+ case IB_RATE_400_GBPS:
+ return 6;
+ default:
+ return rate + MLX5_STAT_RATE_OFFSET;
+ }
+
+ return 0;
+}
+
+static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
+{
+ u32 stat_rate_support;
+
+ if (rate == IB_RATE_PORT_CURRENT)
+ return 0;
+
+ if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS)
+ return -EINVAL;
+
+ stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support);
+ while (rate != IB_RATE_PORT_CURRENT &&
+ !(1 << ib_to_mlx5_rate_map(rate) & stat_rate_support))
+ --rate;
+
+ return ib_to_mlx5_rate_map(rate);
+}
+
+static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
+ struct mlx5_ib_sq *sq, u8 sl,
+ struct ib_pd *pd)
+{
+ void *in;
+ void *tisc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_tis_in, in, bitmask.prio, 1);
+ MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
+
+ tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
+ MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
+
+ err = mlx5_core_modify_tis(dev, sq->tisn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
+ struct mlx5_ib_sq *sq, u8 tx_affinity,
+ struct ib_pd *pd)
+{
+ void *in;
+ void *tisc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
+ MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
+
+ tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
+ MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
+
+ err = mlx5_core_modify_tis(dev, sq->tisn, in);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_set_path_udp_sport(void *path, const struct rdma_ah_attr *ah,
+ u32 lqpn, u32 rqpn)
+
+{
+ u32 fl = ah->grh.flow_label;
+
+ if (!fl)
+ fl = rdma_calc_flow_label(lqpn, rqpn);
+
+ MLX5_SET(ads, path, udp_sport, rdma_flow_label_to_udp_sport(fl));
+}
+
+static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct rdma_ah_attr *ah, void *path, u8 port,
+ int attr_mask, u32 path_flags,
+ const struct ib_qp_attr *attr, bool alt)
+{
+ const struct ib_global_route *grh = rdma_ah_read_grh(ah);
+ int err;
+ enum ib_gid_type gid_type;
+ u8 ah_flags = rdma_ah_get_ah_flags(ah);
+ u8 sl = rdma_ah_get_sl(ah);
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ MLX5_SET(ads, path, pkey_index,
+ alt ? attr->alt_pkey_index : attr->pkey_index);
+
+ if (ah_flags & IB_AH_GRH) {
+ const struct ib_port_immutable *immutable;
+
+ immutable = ib_port_immutable_read(&dev->ib_dev, port);
+ if (grh->sgid_index >= immutable->gid_tbl_len) {
+ pr_err("sgid_index (%u) too large. max is %d\n",
+ grh->sgid_index,
+ immutable->gid_tbl_len);
+ return -EINVAL;
+ }
+ }
+
+ if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ if (!(ah_flags & IB_AH_GRH))
+ return -EINVAL;
+
+ ether_addr_copy(MLX5_ADDR_OF(ads, path, rmac_47_32),
+ ah->roce.dmac);
+ if ((qp->type == IB_QPT_RC ||
+ qp->type == IB_QPT_UC ||
+ qp->type == IB_QPT_XRC_INI ||
+ qp->type == IB_QPT_XRC_TGT) &&
+ (grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) &&
+ (attr_mask & IB_QP_DEST_QPN))
+ mlx5_set_path_udp_sport(path, ah,
+ qp->ibqp.qp_num,
+ attr->dest_qp_num);
+ MLX5_SET(ads, path, eth_prio, sl & 0x7);
+ gid_type = ah->grh.sgid_attr->gid_type;
+ if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ MLX5_SET(ads, path, dscp, grh->traffic_class >> 2);
+ } else {
+ MLX5_SET(ads, path, fl, !!(path_flags & MLX5_PATH_FLAG_FL));
+ MLX5_SET(ads, path, free_ar,
+ !!(path_flags & MLX5_PATH_FLAG_FREE_AR));
+ MLX5_SET(ads, path, rlid, rdma_ah_get_dlid(ah));
+ MLX5_SET(ads, path, mlid, rdma_ah_get_path_bits(ah));
+ MLX5_SET(ads, path, grh, !!(ah_flags & IB_AH_GRH));
+ MLX5_SET(ads, path, sl, sl);
+ }
+
+ if (ah_flags & IB_AH_GRH) {
+ MLX5_SET(ads, path, src_addr_index, grh->sgid_index);
+ MLX5_SET(ads, path, hop_limit, grh->hop_limit);
+ MLX5_SET(ads, path, tclass, grh->traffic_class);
+ MLX5_SET(ads, path, flow_label, grh->flow_label);
+ memcpy(MLX5_ADDR_OF(ads, path, rgid_rip), grh->dgid.raw,
+ sizeof(grh->dgid.raw));
+ }
+
+ err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
+ if (err < 0)
+ return err;
+ MLX5_SET(ads, path, stat_rate, err);
+ MLX5_SET(ads, path, vhca_port_num, port);
+
+ if (attr_mask & IB_QP_TIMEOUT)
+ MLX5_SET(ads, path, ack_timeout,
+ alt ? attr->alt_timeout : attr->timeout);
+
+ if ((qp->type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
+ return modify_raw_packet_eth_prio(dev->mdev,
+ &qp->raw_packet_qp.sq,
+ sl & 0xf, qp->ibqp.pd);
+
+ return 0;
+}
+
+static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
+ [MLX5_QP_STATE_INIT] = {
+ [MLX5_QP_STATE_INIT] = {
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_Q_KEY |
+ MLX5_QP_OPTPAR_PRI_PORT,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_PRI_PORT |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
+ },
+ [MLX5_QP_STATE_RTR] = {
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_LAG_TX_AFF,
+ },
+ },
+ [MLX5_QP_STATE_RTR] = {
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PM_STATE,
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+ MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT,
+ },
+ },
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
+ MLX5_QP_OPTPAR_SRQN |
+ MLX5_QP_OPTPAR_CQN_RCV,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_PM_STATE |
+ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
+ },
+ },
+ [MLX5_QP_STATE_SQER] = {
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
+ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
+ },
+ },
+ [MLX5_QP_STATE_SQD] = {
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
+ [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
+ },
+ },
+};
+
+static int ib_nr_to_mlx5_nr(int ib_mask)
+{
+ switch (ib_mask) {
+ case IB_QP_STATE:
+ return 0;
+ case IB_QP_CUR_STATE:
+ return 0;
+ case IB_QP_EN_SQD_ASYNC_NOTIFY:
+ return 0;
+ case IB_QP_ACCESS_FLAGS:
+ return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
+ MLX5_QP_OPTPAR_RAE;
+ case IB_QP_PKEY_INDEX:
+ return MLX5_QP_OPTPAR_PKEY_INDEX;
+ case IB_QP_PORT:
+ return MLX5_QP_OPTPAR_PRI_PORT;
+ case IB_QP_QKEY:
+ return MLX5_QP_OPTPAR_Q_KEY;
+ case IB_QP_AV:
+ return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
+ MLX5_QP_OPTPAR_PRI_PORT;
+ case IB_QP_PATH_MTU:
+ return 0;
+ case IB_QP_TIMEOUT:
+ return MLX5_QP_OPTPAR_ACK_TIMEOUT;
+ case IB_QP_RETRY_CNT:
+ return MLX5_QP_OPTPAR_RETRY_COUNT;
+ case IB_QP_RNR_RETRY:
+ return MLX5_QP_OPTPAR_RNR_RETRY;
+ case IB_QP_RQ_PSN:
+ return 0;
+ case IB_QP_MAX_QP_RD_ATOMIC:
+ return MLX5_QP_OPTPAR_SRA_MAX;
+ case IB_QP_ALT_PATH:
+ return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
+ case IB_QP_MIN_RNR_TIMER:
+ return MLX5_QP_OPTPAR_RNR_TIMEOUT;
+ case IB_QP_SQ_PSN:
+ return 0;
+ case IB_QP_MAX_DEST_RD_ATOMIC:
+ return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
+ case IB_QP_PATH_MIG_STATE:
+ return MLX5_QP_OPTPAR_PM_STATE;
+ case IB_QP_CAP:
+ return 0;
+ case IB_QP_DEST_QPN:
+ return 0;
+ }
+ return 0;
+}
+
+static int ib_mask_to_mlx5_opt(int ib_mask)
+{
+ int result = 0;
+ int i;
+
+ for (i = 0; i < 8 * sizeof(int); i++) {
+ if ((1 << i) & ib_mask)
+ result |= ib_nr_to_mlx5_nr(1 << i);
+ }
+
+ return result;
+}
+
+static int modify_raw_packet_qp_rq(
+ struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
+{
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_rq_in, in, rq_state, rq->state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid);
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+ MLX5_SET(rqc, rqc, state, new_state);
+
+ if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) {
+ if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
+ MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
+ } else
+ dev_info_once(
+ &dev->ib_dev.dev,
+ "RAW PACKET QP counters are not supported on current FW\n");
+ }
+
+ err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in);
+ if (err)
+ goto out;
+
+ rq->state = new_state;
+
+out:
+ kvfree(in);
+ return err;
+}
+
+static int modify_raw_packet_qp_sq(
+ struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
+{
+ struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
+ struct mlx5_rate_limit old_rl = ibqp->rl;
+ struct mlx5_rate_limit new_rl = old_rl;
+ bool new_rate_added = false;
+ u16 rl_index = 0;
+ void *in;
+ void *sqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid);
+ MLX5_SET(modify_sq_in, in, sq_state, sq->state);
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, new_state);
+
+ if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
+ if (new_state != MLX5_SQC_STATE_RDY)
+ pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
+ __func__);
+ else
+ new_rl = raw_qp_param->rl;
+ }
+
+ if (!mlx5_rl_are_equal(&old_rl, &new_rl)) {
+ if (new_rl.rate) {
+ err = mlx5_rl_add_rate(dev, &rl_index, &new_rl);
+ if (err) {
+ pr_err("Failed configuring rate limit(err %d): \
+ rate %u, max_burst_sz %u, typical_pkt_sz %u\n",
+ err, new_rl.rate, new_rl.max_burst_sz,
+ new_rl.typical_pkt_sz);
+
+ goto out;
+ }
+ new_rate_added = true;
+ }
+
+ MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
+ /* index 0 means no limit */
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
+ }
+
+ err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in);
+ if (err) {
+ /* Remove new rate from table if failed */
+ if (new_rate_added)
+ mlx5_rl_remove_rate(dev, &new_rl);
+ goto out;
+ }
+
+ /* Only remove the old rate after new rate was set */
+ if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
+ (new_state != MLX5_SQC_STATE_RDY)) {
+ mlx5_rl_remove_rate(dev, &old_rl);
+ if (new_state != MLX5_SQC_STATE_RDY)
+ memset(&new_rl, 0, sizeof(new_rl));
+ }
+
+ ibqp->rl = new_rl;
+ sq->state = new_state;
+
+out:
+ kvfree(in);
+ return err;
+}
+
+static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param,
+ u8 tx_affinity)
+{
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ int modify_rq = !!qp->rq.wqe_cnt;
+ int modify_sq = !!qp->sq.wqe_cnt;
+ int rq_state;
+ int sq_state;
+ int err;
+
+ switch (raw_qp_param->operation) {
+ case MLX5_CMD_OP_RST2INIT_QP:
+ rq_state = MLX5_RQC_STATE_RDY;
+ sq_state = MLX5_SQC_STATE_RST;
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ rq_state = MLX5_RQC_STATE_ERR;
+ sq_state = MLX5_SQC_STATE_ERR;
+ break;
+ case MLX5_CMD_OP_2RST_QP:
+ rq_state = MLX5_RQC_STATE_RST;
+ sq_state = MLX5_SQC_STATE_RST;
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ if (raw_qp_param->set_mask & ~MLX5_RAW_QP_RATE_LIMIT)
+ return -EINVAL;
+
+ modify_rq = 0;
+ sq_state = MLX5_SQC_STATE_RDY;
+ break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ if (raw_qp_param->set_mask)
+ return -EINVAL;
+ else
+ return 0;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (modify_rq) {
+ err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
+ qp->ibqp.pd);
+ if (err)
+ return err;
+ }
+
+ if (modify_sq) {
+ struct mlx5_flow_handle *flow_rule;
+
+ if (tx_affinity) {
+ err = modify_raw_packet_tx_affinity(dev->mdev, sq,
+ tx_affinity,
+ qp->ibqp.pd);
+ if (err)
+ return err;
+ }
+
+ flow_rule = create_flow_rule_vport_sq(dev, sq,
+ raw_qp_param->port);
+ if (IS_ERR(flow_rule))
+ return PTR_ERR(flow_rule);
+
+ err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
+ raw_qp_param, qp->ibqp.pd);
+ if (err) {
+ if (flow_rule)
+ mlx5_del_flow_rules(flow_rule);
+ return err;
+ }
+
+ if (flow_rule) {
+ destroy_flow_rule_vport_sq(sq);
+ sq->flow_rule = flow_rule;
+ }
+
+ return err;
+ }
+
+ return 0;
+}
+
+static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ atomic_t *tx_port_affinity;
+
+ if (ucontext)
+ tx_port_affinity = &ucontext->tx_port_affinity;
+ else
+ tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
+
+ return (unsigned int)atomic_add_return(1, tx_port_affinity) %
+ (dev->lag_active ? dev->lag_ports : MLX5_CAP_GEN(dev->mdev, num_lag_ports)) + 1;
+}
+
+static bool qp_supports_affinity(struct mlx5_ib_qp *qp)
+{
+ if ((qp->type == IB_QPT_RC) || (qp->type == IB_QPT_UD) ||
+ (qp->type == IB_QPT_UC) || (qp->type == IB_QPT_RAW_PACKET) ||
+ (qp->type == IB_QPT_XRC_INI) || (qp->type == IB_QPT_XRC_TGT) ||
+ (qp->type == MLX5_IB_QPT_DCI))
+ return true;
+ return false;
+}
+
+static unsigned int get_tx_affinity(struct ib_qp *qp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, u8 init,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_qp_base *qp_base;
+ unsigned int tx_affinity;
+
+ if (!(mlx5_ib_lag_should_assign_affinity(dev) &&
+ qp_supports_affinity(mqp)))
+ return 0;
+
+ if (mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ tx_affinity = mqp->gsi_lag_port;
+ else if (init)
+ tx_affinity = get_tx_affinity_rr(dev, udata);
+ else if ((attr_mask & IB_QP_AV) && attr->xmit_slave)
+ tx_affinity =
+ mlx5_lag_get_slave_port(dev->mdev, attr->xmit_slave);
+ else
+ return 0;
+
+ qp_base = &mqp->trans_qp.base;
+ if (ucontext)
+ mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
+ tx_affinity, qp_base->mqp.qpn, ucontext);
+ else
+ mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
+ tx_affinity, qp_base->mqp.qpn);
+ return tx_affinity;
+}
+
+static int __mlx5_ib_qp_set_raw_qp_counter(struct mlx5_ib_qp *qp, u32 set_id,
+ struct mlx5_core_dev *mdev)
+{
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+ u32 in[MLX5_ST_SZ_DW(modify_rq_in)] = {};
+ void *rqc;
+
+ if (!qp->rq.wqe_cnt)
+ return 0;
+
+ MLX5_SET(modify_rq_in, in, rq_state, rq->state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(qp->ibqp.pd)->uid);
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
+ MLX5_SET(rqc, rqc, counter_set_id, set_id);
+
+ return mlx5_core_modify_rq(mdev, rq->base.mqp.qpn, in);
+}
+
+static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
+ struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ u32 in[MLX5_ST_SZ_DW(rts2rts_qp_in)] = {};
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ struct mlx5_ib_qp_base *base;
+ u32 set_id;
+ u32 *qpc;
+
+ if (counter)
+ set_id = counter->id;
+ else
+ set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1);
+
+ if (mqp->type == IB_QPT_RAW_PACKET)
+ return __mlx5_ib_qp_set_raw_qp_counter(mqp, set_id, dev->mdev);
+
+ base = &mqp->trans_qp.base;
+ MLX5_SET(rts2rts_qp_in, in, opcode, MLX5_CMD_OP_RTS2RTS_QP);
+ MLX5_SET(rts2rts_qp_in, in, qpn, base->mqp.qpn);
+ MLX5_SET(rts2rts_qp_in, in, uid, base->mqp.uid);
+ MLX5_SET(rts2rts_qp_in, in, opt_param_mask,
+ MLX5_QP_OPTPAR_COUNTER_SET_ID);
+
+ qpc = MLX5_ADDR_OF(rts2rts_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, counter_set_id, set_id);
+ return mlx5_cmd_exec_in(dev->mdev, rts2rts_qp, in);
+}
+
+static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr, int attr_mask,
+ enum ib_qp_state cur_state,
+ enum ib_qp_state new_state,
+ const struct mlx5_ib_modify_qp *ucmd,
+ struct mlx5_ib_modify_qp_resp *resp,
+ struct ib_udata *udata)
+{
+ static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
+ [MLX5_QP_STATE_RST] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
+ },
+ [MLX5_QP_STATE_INIT] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
+ [MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
+ },
+ [MLX5_QP_STATE_RTR] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
+ },
+ [MLX5_QP_STATE_RTS] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
+ },
+ [MLX5_QP_STATE_SQD] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD_RTS_QP,
+ },
+ [MLX5_QP_STATE_SQER] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
+ },
+ [MLX5_QP_STATE_ERR] = {
+ [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
+ [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
+ }
+ };
+
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
+ struct mlx5_ib_cq *send_cq, *recv_cq;
+ struct mlx5_ib_pd *pd;
+ enum mlx5_qp_state mlx5_cur, mlx5_new;
+ void *qpc, *pri_path, *alt_path;
+ enum mlx5_qp_optpar optpar = 0;
+ u32 set_id = 0;
+ int mlx5_st;
+ int err;
+ u16 op;
+ u8 tx_affinity = 0;
+
+ mlx5_st = to_mlx5_st(qp->type);
+ if (mlx5_st < 0)
+ return -EINVAL;
+
+ qpc = kzalloc(MLX5_ST_SZ_BYTES(qpc), GFP_KERNEL);
+ if (!qpc)
+ return -ENOMEM;
+
+ pd = to_mpd(qp->ibqp.pd);
+ MLX5_SET(qpc, qpc, st, mlx5_st);
+
+ if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ } else {
+ switch (attr->path_mig_state) {
+ case IB_MIG_MIGRATED:
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+ break;
+ case IB_MIG_REARM:
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_REARM);
+ break;
+ case IB_MIG_ARMED:
+ MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_ARMED);
+ break;
+ }
+ }
+
+ tx_affinity = get_tx_affinity(ibqp, attr, attr_mask,
+ cur_state == IB_QPS_RESET &&
+ new_state == IB_QPS_INIT, udata);
+
+ MLX5_SET(qpc, qpc, lag_tx_port_affinity, tx_affinity);
+ if (tx_affinity && new_state == IB_QPS_RTR &&
+ MLX5_CAP_GEN(dev->mdev, init2_lag_tx_port_affinity))
+ optpar |= MLX5_QP_OPTPAR_LAG_TX_AFF;
+
+ if (is_sqp(qp->type)) {
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_256);
+ MLX5_SET(qpc, qpc, log_msg_max, 8);
+ } else if ((qp->type == IB_QPT_UD &&
+ !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) ||
+ qp->type == MLX5_IB_QPT_REG_UMR) {
+ MLX5_SET(qpc, qpc, mtu, IB_MTU_4096);
+ MLX5_SET(qpc, qpc, log_msg_max, 12);
+ } else if (attr_mask & IB_QP_PATH_MTU) {
+ if (attr->path_mtu < IB_MTU_256 ||
+ attr->path_mtu > IB_MTU_4096) {
+ mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
+ err = -EINVAL;
+ goto out;
+ }
+ MLX5_SET(qpc, qpc, mtu, attr->path_mtu);
+ MLX5_SET(qpc, qpc, log_msg_max,
+ MLX5_CAP_GEN(dev->mdev, log_max_msg));
+ }
+
+ if (attr_mask & IB_QP_DEST_QPN)
+ MLX5_SET(qpc, qpc, remote_qpn, attr->dest_qp_num);
+
+ pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
+
+ if (attr_mask & IB_QP_PKEY_INDEX)
+ MLX5_SET(ads, pri_path, pkey_index, attr->pkey_index);
+
+ /* todo implement counter_index functionality */
+
+ if (is_sqp(qp->type))
+ MLX5_SET(ads, pri_path, vhca_port_num, qp->port);
+
+ if (attr_mask & IB_QP_PORT)
+ MLX5_SET(ads, pri_path, vhca_port_num, attr->port_num);
+
+ if (attr_mask & IB_QP_AV) {
+ err = mlx5_set_path(dev, qp, &attr->ah_attr, pri_path,
+ attr_mask & IB_QP_PORT ? attr->port_num :
+ qp->port,
+ attr_mask, 0, attr, false);
+ if (err)
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_TIMEOUT)
+ MLX5_SET(ads, pri_path, ack_timeout, attr->timeout);
+
+ if (attr_mask & IB_QP_ALT_PATH) {
+ err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, alt_path,
+ attr->alt_port_num,
+ attr_mask | IB_QP_PKEY_INDEX |
+ IB_QP_TIMEOUT,
+ 0, attr, true);
+ if (err)
+ goto out;
+ }
+
+ get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
+ &send_cq, &recv_cq);
+
+ MLX5_SET(qpc, qpc, pd, pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
+ if (send_cq)
+ MLX5_SET(qpc, qpc, cqn_snd, send_cq->mcq.cqn);
+ if (recv_cq)
+ MLX5_SET(qpc, qpc, cqn_rcv, recv_cq->mcq.cqn);
+
+ MLX5_SET(qpc, qpc, log_ack_req_freq, MLX5_IB_ACK_REQ_FREQ);
+
+ if (attr_mask & IB_QP_RNR_RETRY)
+ MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
+
+ if (attr_mask & IB_QP_RETRY_CNT)
+ MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && attr->max_rd_atomic)
+ MLX5_SET(qpc, qpc, log_sra_max, ilog2(attr->max_rd_atomic));
+
+ if (attr_mask & IB_QP_SQ_PSN)
+ MLX5_SET(qpc, qpc, next_send_psn, attr->sq_psn);
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && attr->max_dest_rd_atomic)
+ MLX5_SET(qpc, qpc, log_rra_max,
+ ilog2(attr->max_dest_rd_atomic));
+
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
+ err = set_qpc_atomic_flags(qp, attr, attr_mask, qpc);
+ if (err)
+ goto out;
+ }
+
+ if (attr_mask & IB_QP_MIN_RNR_TIMER)
+ MLX5_SET(qpc, qpc, min_rnr_nak, attr->min_rnr_timer);
+
+ if (attr_mask & IB_QP_RQ_PSN)
+ MLX5_SET(qpc, qpc, next_rcv_psn, attr->rq_psn);
+
+ if (attr_mask & IB_QP_QKEY)
+ MLX5_SET(qpc, qpc, q_key, attr->qkey);
+
+ if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
+ qp->port) - 1;
+
+ /* Underlay port should be used - index 0 function per port */
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN)
+ port_num = 0;
+
+ if (ibqp->counter)
+ set_id = ibqp->counter->id;
+ else
+ set_id = mlx5_ib_get_counters_id(dev, port_num);
+ MLX5_SET(qpc, qpc, counter_set_id, set_id);
+ }
+
+ if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
+ MLX5_SET(qpc, qpc, rlky, 1);
+
+ if (qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)
+ MLX5_SET(qpc, qpc, deth_sqpn, 1);
+
+ mlx5_cur = to_mlx5_state(cur_state);
+ mlx5_new = to_mlx5_state(new_state);
+
+ if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
+ !optab[mlx5_cur][mlx5_new]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ op = optab[mlx5_cur][mlx5_new];
+ optpar |= ib_mask_to_mlx5_opt(attr_mask);
+ optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
+
+ if (qp->type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ struct mlx5_modify_raw_qp_param raw_qp_param = {};
+
+ raw_qp_param.operation = op;
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ raw_qp_param.rq_q_ctr_id = set_id;
+ raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
+ }
+
+ if (attr_mask & IB_QP_PORT)
+ raw_qp_param.port = attr->port_num;
+
+ if (attr_mask & IB_QP_RATE_LIMIT) {
+ raw_qp_param.rl.rate = attr->rate_limit;
+
+ if (ucmd->burst_info.max_burst_sz) {
+ if (attr->rate_limit &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing_burst_bound)) {
+ raw_qp_param.rl.max_burst_sz =
+ ucmd->burst_info.max_burst_sz;
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (ucmd->burst_info.typical_pkt_sz) {
+ if (attr->rate_limit &&
+ MLX5_CAP_QOS(dev->mdev, packet_pacing_typical_size)) {
+ raw_qp_param.rl.typical_pkt_sz =
+ ucmd->burst_info.typical_pkt_sz;
+ } else {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
+ }
+
+ err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
+ } else {
+ if (udata) {
+ /* For the kernel flows, the resp will stay zero */
+ resp->ece_options =
+ MLX5_CAP_GEN(dev->mdev, ece_support) ?
+ ucmd->ece_options : 0;
+ resp->response_length = sizeof(*resp);
+ }
+ err = mlx5_core_qp_modify(dev, op, optpar, qpc, &base->mqp,
+ &resp->ece_options);
+ }
+
+ if (err)
+ goto out;
+
+ qp->state = new_state;
+
+ if (attr_mask & IB_QP_ACCESS_FLAGS)
+ qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
+ if (attr_mask & IB_QP_PORT)
+ qp->port = attr->port_num;
+ if (attr_mask & IB_QP_ALT_PATH)
+ qp->trans_qp.alt_port = attr->alt_port_num;
+
+ /*
+ * If we moved a kernel QP to RESET, clean up all old CQ
+ * entries and reinitialize the QP.
+ */
+ if (new_state == IB_QPS_RESET &&
+ !ibqp->uobject && qp->type != IB_QPT_XRC_TGT) {
+ mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
+ ibqp->srq ? to_msrq(ibqp->srq) : NULL);
+ if (send_cq != recv_cq)
+ mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL);
+
+ qp->rq.head = 0;
+ qp->rq.tail = 0;
+ qp->sq.head = 0;
+ qp->sq.tail = 0;
+ qp->sq.cur_post = 0;
+ if (qp->sq.wqe_cnt)
+ qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
+ qp->sq.last_poll = 0;
+ qp->db.db[MLX5_RCV_DBR] = 0;
+ qp->db.db[MLX5_SND_DBR] = 0;
+ }
+
+ if ((new_state == IB_QPS_RTS) && qp->counter_pending) {
+ err = __mlx5_ib_qp_set_counter(ibqp, ibqp->counter);
+ if (!err)
+ qp->counter_pending = 0;
+ }
+
+out:
+ kfree(qpc);
+ return err;
+}
+
+static inline bool is_valid_mask(int mask, int req, int opt)
+{
+ if ((mask & req) != req)
+ return false;
+
+ if (mask & ~(req | opt))
+ return false;
+
+ return true;
+}
+
+/* check valid transition for driver QP types
+ * for now the only QP type that this function supports is DCI
+ */
+static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new_state,
+ enum ib_qp_attr_mask attr_mask)
+{
+ int req = IB_QP_STATE;
+ int opt = 0;
+
+ if (new_state == IB_QPS_RESET) {
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+ opt = IB_QP_PKEY_INDEX | IB_QP_PORT;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ req |= IB_QP_PATH_MTU;
+ opt = IB_QP_PKEY_INDEX | IB_QP_AV;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
+ req |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
+ IB_QP_MAX_QP_RD_ATOMIC | IB_QP_SQ_PSN;
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RTS && new_state == IB_QPS_RTS) {
+ opt = IB_QP_MIN_RNR_TIMER;
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state != IB_QPS_RESET && new_state == IB_QPS_ERR) {
+ return is_valid_mask(attr_mask, req, opt);
+ }
+ return false;
+}
+
+/* mlx5_ib_modify_dct: modify a DCT QP
+ * valid transitions are:
+ * RESET to INIT: must set access_flags, pkey_index and port
+ * INIT to RTR : must set min_rnr_timer, tclass, flow_label,
+ * mtu, gid_index and hop_limit
+ * Other transitions and attributes are illegal
+ */
+static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct mlx5_ib_modify_qp *ucmd,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ enum ib_qp_state cur_state, new_state;
+ int required = IB_QP_STATE;
+ void *dctc;
+ int err;
+
+ if (!(attr_mask & IB_QP_STATE))
+ return -EINVAL;
+
+ cur_state = qp->state;
+ new_state = attr->qp_state;
+
+ dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
+ if (MLX5_CAP_GEN(dev->mdev, ece_support) && ucmd->ece_options)
+ /*
+ * DCT doesn't initialize QP till modify command is executed,
+ * so we need to overwrite previously set ECE field if user
+ * provided any value except zero, which means not set/not
+ * valid.
+ */
+ MLX5_SET(dctc, dctc, ece, ucmd->ece_options);
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ u16 set_id;
+
+ required |= IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+
+ if (attr->port_num == 0 ||
+ attr->port_num > dev->num_ports) {
+ mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ attr->port_num, dev->num_ports);
+ return -EINVAL;
+ }
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+ MLX5_SET(dctc, dctc, rre, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+ MLX5_SET(dctc, dctc, rwe, 1);
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
+ int atomic_mode;
+
+ atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
+ if (atomic_mode < 0)
+ return -EOPNOTSUPP;
+
+ MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
+ MLX5_SET(dctc, dctc, rae, 1);
+ }
+ MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
+ if (mlx5_lag_is_active(dev->mdev))
+ MLX5_SET(dctc, dctc, port,
+ get_tx_affinity_rr(dev, udata));
+ else
+ MLX5_SET(dctc, dctc, port, attr->port_num);
+
+ set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
+ MLX5_SET(dctc, dctc, counter_set_id, set_id);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ struct mlx5_ib_modify_qp_resp resp = {};
+ u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
+ u32 min_resp_len = offsetofend(typeof(resp), dctn);
+
+ if (udata->outlen < min_resp_len)
+ return -EINVAL;
+ /*
+ * If we don't have enough space for the ECE options,
+ * simply indicate it with resp.response_length.
+ */
+ resp.response_length = (udata->outlen < sizeof(resp)) ?
+ min_resp_len :
+ sizeof(resp);
+
+ required |= IB_QP_MIN_RNR_TIMER | IB_QP_AV | IB_QP_PATH_MTU;
+ if (!is_valid_mask(attr_mask, required, 0))
+ return -EINVAL;
+ MLX5_SET(dctc, dctc, min_rnr_nak, attr->min_rnr_timer);
+ MLX5_SET(dctc, dctc, tclass, attr->ah_attr.grh.traffic_class);
+ MLX5_SET(dctc, dctc, flow_label, attr->ah_attr.grh.flow_label);
+ MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
+ MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
+ MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
+ if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
+ MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
+
+ err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
+ MLX5_ST_SZ_BYTES(create_dct_in), out,
+ sizeof(out));
+ err = mlx5_cmd_check(dev->mdev, err, qp->dct.in, out);
+ if (err)
+ return err;
+ resp.dctn = qp->dct.mdct.mqp.qpn;
+ if (MLX5_CAP_GEN(dev->mdev, ece_support))
+ resp.ece_options = MLX5_GET(create_dct_out, out, ece);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err) {
+ mlx5_core_destroy_dct(dev, &qp->dct.mdct);
+ return err;
+ }
+ } else {
+ mlx5_ib_warn(dev, "Modify DCT: Invalid transition from %d to %d\n", cur_state, new_state);
+ return -EINVAL;
+ }
+
+ qp->state = new_state;
+ return 0;
+}
+
+static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp)
+{
+ if (dev->profile != &raw_eth_profile)
+ return true;
+
+ if (qp->type == IB_QPT_RAW_PACKET || qp->type == MLX5_IB_QPT_REG_UMR)
+ return true;
+
+ /* Internal QP used for wc testing, with NOPs in wq */
+ if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST)
+ return true;
+
+ return false;
+}
+
+static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr,
+ int attr_mask, enum ib_qp_type qp_type)
+{
+ int log_max_ra_res;
+ int log_max_ra_req;
+
+ if (qp_type == MLX5_IB_QPT_DCI) {
+ log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_res_dc);
+ log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_req_dc);
+ } else {
+ log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_res_qp);
+ log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
+ log_max_ra_req_qp);
+ }
+
+ if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+ attr->max_rd_atomic > log_max_ra_res) {
+ mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
+ attr->max_rd_atomic);
+ return false;
+ }
+
+ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+ attr->max_dest_rd_atomic > log_max_ra_req) {
+ mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
+ attr->max_dest_rd_atomic);
+ return false;
+ }
+ return true;
+}
+
+int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_modify_qp_resp resp = {};
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_ib_modify_qp ucmd = {};
+ enum ib_qp_type qp_type;
+ enum ib_qp_state cur_state, new_state;
+ int err = -EINVAL;
+
+ if (!mlx5_ib_modify_qp_allowed(dev, qp))
+ return -EOPNOTSUPP;
+
+ if (attr_mask & ~(IB_QP_ATTR_STANDARD_BITS | IB_QP_RATE_LIMIT))
+ return -EOPNOTSUPP;
+
+ if (ibqp->rwq_ind_tbl)
+ return -ENOSYS;
+
+ if (udata && udata->inlen) {
+ if (udata->inlen < offsetofend(typeof(ucmd), ece_options))
+ return -EINVAL;
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd)))
+ return -EOPNOTSUPP;
+
+ if (ib_copy_from_udata(&ucmd, udata,
+ min(udata->inlen, sizeof(ucmd))))
+ return -EFAULT;
+
+ if (ucmd.comp_mask ||
+ memchr_inv(&ucmd.burst_info.reserved, 0,
+ sizeof(ucmd.burst_info.reserved)))
+ return -EOPNOTSUPP;
+
+ }
+
+ if (qp->type == IB_QPT_GSI)
+ return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
+
+ qp_type = (qp->type == MLX5_IB_QPT_HW_GSI) ? IB_QPT_GSI : qp->type;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ return mlx5_ib_modify_dct(ibqp, attr, attr_mask, &ucmd, udata);
+
+ mutex_lock(&qp->mutex);
+
+ cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
+ new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
+
+ if (qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ if (attr_mask & ~(IB_QP_STATE | IB_QP_CUR_STATE)) {
+ mlx5_ib_dbg(dev, "invalid attr_mask 0x%x when underlay QP is used\n",
+ attr_mask);
+ goto out;
+ }
+ } else if (qp_type != MLX5_IB_QPT_REG_UMR &&
+ qp_type != MLX5_IB_QPT_DCI &&
+ !ib_modify_qp_is_ok(cur_state, new_state, qp_type,
+ attr_mask)) {
+ mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
+ cur_state, new_state, qp->type, attr_mask);
+ goto out;
+ } else if (qp_type == MLX5_IB_QPT_DCI &&
+ !modify_dci_qp_is_ok(cur_state, new_state, attr_mask)) {
+ mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
+ cur_state, new_state, qp_type, attr_mask);
+ goto out;
+ }
+
+ if ((attr_mask & IB_QP_PORT) &&
+ (attr->port_num == 0 ||
+ attr->port_num > dev->num_ports)) {
+ mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+ attr->port_num, dev->num_ports);
+ goto out;
+ }
+
+ if ((attr_mask & IB_QP_PKEY_INDEX) &&
+ attr->pkey_index >= dev->pkey_table_len) {
+ mlx5_ib_dbg(dev, "invalid pkey index %d\n", attr->pkey_index);
+ goto out;
+ }
+
+ if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
+ goto out;
+
+ if (cur_state == new_state && cur_state == IB_QPS_RESET) {
+ err = 0;
+ goto out;
+ }
+
+ err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state,
+ new_state, &ucmd, &resp, udata);
+
+ /* resp.response_length is set in ECE supported flows only */
+ if (!err && resp.response_length &&
+ udata->outlen >= resp.response_length)
+ /* Return -EFAULT to the user and expect him to destroy QP. */
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+
+out:
+ mutex_unlock(&qp->mutex);
+ return err;
+}
+
+static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
+{
+ switch (mlx5_state) {
+ case MLX5_QP_STATE_RST: return IB_QPS_RESET;
+ case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
+ case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
+ case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
+ case MLX5_QP_STATE_SQ_DRAINING:
+ case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
+ case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
+ case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
+ default: return -1;
+ }
+}
+
+static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
+{
+ switch (mlx5_mig_state) {
+ case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
+ case MLX5_QP_PM_REARM: return IB_MIG_REARM;
+ case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
+ default: return -1;
+ }
+}
+
+static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
+ struct rdma_ah_attr *ah_attr, void *path)
+{
+ int port = MLX5_GET(ads, path, vhca_port_num);
+ int static_rate;
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+
+ if (!port || port > ibdev->num_ports)
+ return;
+
+ ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port);
+
+ rdma_ah_set_port_num(ah_attr, port);
+ rdma_ah_set_sl(ah_attr, MLX5_GET(ads, path, sl));
+
+ rdma_ah_set_dlid(ah_attr, MLX5_GET(ads, path, rlid));
+ rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
+
+ static_rate = MLX5_GET(ads, path, stat_rate);
+ rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
+ if (MLX5_GET(ads, path, grh) ||
+ ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
+ rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
+ MLX5_GET(ads, path, src_addr_index),
+ MLX5_GET(ads, path, hop_limit),
+ MLX5_GET(ads, path, tclass));
+ rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
+ }
+}
+
+static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq,
+ u8 *sq_state)
+{
+ int err;
+
+ err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state);
+ if (err)
+ goto out;
+ sq->state = *sq_state;
+
+out:
+ return err;
+}
+
+static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq,
+ u8 *rq_state)
+{
+ void *out;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(query_rq_out);
+ out = kvzalloc(inlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out);
+ if (err)
+ goto out;
+
+ rqc = MLX5_ADDR_OF(query_rq_out, out, rq_context);
+ *rq_state = MLX5_GET(rqc, rqc, state);
+ rq->state = *rq_state;
+
+out:
+ kvfree(out);
+ return err;
+}
+
+static int sqrq_state_to_qp_state(u8 sq_state, u8 rq_state,
+ struct mlx5_ib_qp *qp, u8 *qp_state)
+{
+ static const u8 sqrq_trans[MLX5_RQ_NUM_STATE][MLX5_SQ_NUM_STATE] = {
+ [MLX5_RQC_STATE_RST] = {
+ [MLX5_SQC_STATE_RST] = IB_QPS_RESET,
+ [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD,
+ [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE_BAD,
+ [MLX5_SQ_STATE_NA] = IB_QPS_RESET,
+ },
+ [MLX5_RQC_STATE_RDY] = {
+ [MLX5_SQC_STATE_RST] = MLX5_QP_STATE,
+ [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE,
+ [MLX5_SQC_STATE_ERR] = IB_QPS_SQE,
+ [MLX5_SQ_STATE_NA] = MLX5_QP_STATE,
+ },
+ [MLX5_RQC_STATE_ERR] = {
+ [MLX5_SQC_STATE_RST] = MLX5_QP_STATE_BAD,
+ [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE_BAD,
+ [MLX5_SQC_STATE_ERR] = IB_QPS_ERR,
+ [MLX5_SQ_STATE_NA] = IB_QPS_ERR,
+ },
+ [MLX5_RQ_STATE_NA] = {
+ [MLX5_SQC_STATE_RST] = MLX5_QP_STATE,
+ [MLX5_SQC_STATE_RDY] = MLX5_QP_STATE,
+ [MLX5_SQC_STATE_ERR] = MLX5_QP_STATE,
+ [MLX5_SQ_STATE_NA] = MLX5_QP_STATE_BAD,
+ },
+ };
+
+ *qp_state = sqrq_trans[rq_state][sq_state];
+
+ if (*qp_state == MLX5_QP_STATE_BAD) {
+ WARN(1, "Buggy Raw Packet QP state, SQ 0x%x state: 0x%x, RQ 0x%x state: 0x%x",
+ qp->raw_packet_qp.sq.base.mqp.qpn, sq_state,
+ qp->raw_packet_qp.rq.base.mqp.qpn, rq_state);
+ return -EINVAL;
+ }
+
+ if (*qp_state == MLX5_QP_STATE)
+ *qp_state = qp->state;
+
+ return 0;
+}
+
+static int query_raw_packet_qp_state(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ u8 *raw_packet_qp_state)
+{
+ struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
+ struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+ struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
+ int err;
+ u8 sq_state = MLX5_SQ_STATE_NA;
+ u8 rq_state = MLX5_RQ_STATE_NA;
+
+ if (qp->sq.wqe_cnt) {
+ err = query_raw_packet_qp_sq_state(dev, sq, &sq_state);
+ if (err)
+ return err;
+ }
+
+ if (qp->rq.wqe_cnt) {
+ err = query_raw_packet_qp_rq_state(dev, rq, &rq_state);
+ if (err)
+ return err;
+ }
+
+ return sqrq_state_to_qp_state(sq_state, rq_state, qp,
+ raw_packet_qp_state);
+}
+
+static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ struct ib_qp_attr *qp_attr)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
+ void *qpc, *pri_path, *alt_path;
+ u32 *outb;
+ int err;
+
+ outb = kzalloc(outlen, GFP_KERNEL);
+ if (!outb)
+ return -ENOMEM;
+
+ err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen);
+ if (err)
+ goto out;
+
+ qpc = MLX5_ADDR_OF(query_qp_out, outb, qpc);
+
+ qp->state = to_ib_qp_state(MLX5_GET(qpc, qpc, state));
+ if (MLX5_GET(qpc, qpc, state) == MLX5_QP_STATE_SQ_DRAINING)
+ qp_attr->sq_draining = 1;
+
+ qp_attr->path_mtu = MLX5_GET(qpc, qpc, mtu);
+ qp_attr->path_mig_state = to_ib_mig_state(MLX5_GET(qpc, qpc, pm_state));
+ qp_attr->qkey = MLX5_GET(qpc, qpc, q_key);
+ qp_attr->rq_psn = MLX5_GET(qpc, qpc, next_rcv_psn);
+ qp_attr->sq_psn = MLX5_GET(qpc, qpc, next_send_psn);
+ qp_attr->dest_qp_num = MLX5_GET(qpc, qpc, remote_qpn);
+
+ if (MLX5_GET(qpc, qpc, rre))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ;
+ if (MLX5_GET(qpc, qpc, rwe))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (MLX5_GET(qpc, qpc, rae))
+ qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ qp_attr->max_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_sra_max);
+ qp_attr->max_dest_rd_atomic = 1 << MLX5_GET(qpc, qpc, log_rra_max);
+ qp_attr->min_rnr_timer = MLX5_GET(qpc, qpc, min_rnr_nak);
+ qp_attr->retry_cnt = MLX5_GET(qpc, qpc, retry_count);
+ qp_attr->rnr_retry = MLX5_GET(qpc, qpc, rnr_retry);
+
+ pri_path = MLX5_ADDR_OF(qpc, qpc, primary_address_path);
+ alt_path = MLX5_ADDR_OF(qpc, qpc, secondary_address_path);
+
+ if (qp->type == IB_QPT_RC || qp->type == IB_QPT_UC ||
+ qp->type == IB_QPT_XRC_INI || qp->type == IB_QPT_XRC_TGT) {
+ to_rdma_ah_attr(dev, &qp_attr->ah_attr, pri_path);
+ to_rdma_ah_attr(dev, &qp_attr->alt_ah_attr, alt_path);
+ qp_attr->alt_pkey_index = MLX5_GET(ads, alt_path, pkey_index);
+ qp_attr->alt_port_num = MLX5_GET(ads, alt_path, vhca_port_num);
+ }
+
+ qp_attr->pkey_index = MLX5_GET(ads, pri_path, pkey_index);
+ qp_attr->port_num = MLX5_GET(ads, pri_path, vhca_port_num);
+ qp_attr->timeout = MLX5_GET(ads, pri_path, ack_timeout);
+ qp_attr->alt_timeout = MLX5_GET(ads, alt_path, ack_timeout);
+
+out:
+ kfree(outb);
+ return err;
+}
+
+static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
+ struct ib_qp_attr *qp_attr, int qp_attr_mask,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx5_core_dct *dct = &mqp->dct.mdct;
+ u32 *out;
+ u32 access_flags = 0;
+ int outlen = MLX5_ST_SZ_BYTES(query_dct_out);
+ void *dctc;
+ int err;
+ int supported_mask = IB_QP_STATE |
+ IB_QP_ACCESS_FLAGS |
+ IB_QP_PORT |
+ IB_QP_MIN_RNR_TIMER |
+ IB_QP_AV |
+ IB_QP_PATH_MTU |
+ IB_QP_PKEY_INDEX;
+
+ if (qp_attr_mask & ~supported_mask)
+ return -EINVAL;
+ if (mqp->state != IB_QPS_RTR)
+ return -EINVAL;
+
+ out = kzalloc(outlen, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_dct_query(dev, dct, out, outlen);
+ if (err)
+ goto out;
+
+ dctc = MLX5_ADDR_OF(query_dct_out, out, dct_context_entry);
+
+ if (qp_attr_mask & IB_QP_STATE)
+ qp_attr->qp_state = IB_QPS_RTR;
+
+ if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
+ if (MLX5_GET(dctc, dctc, rre))
+ access_flags |= IB_ACCESS_REMOTE_READ;
+ if (MLX5_GET(dctc, dctc, rwe))
+ access_flags |= IB_ACCESS_REMOTE_WRITE;
+ if (MLX5_GET(dctc, dctc, rae))
+ access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+ qp_attr->qp_access_flags = access_flags;
+ }
+
+ if (qp_attr_mask & IB_QP_PORT)
+ qp_attr->port_num = MLX5_GET(dctc, dctc, port);
+ if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
+ qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
+ if (qp_attr_mask & IB_QP_AV) {
+ qp_attr->ah_attr.grh.traffic_class = MLX5_GET(dctc, dctc, tclass);
+ qp_attr->ah_attr.grh.flow_label = MLX5_GET(dctc, dctc, flow_label);
+ qp_attr->ah_attr.grh.sgid_index = MLX5_GET(dctc, dctc, my_addr_index);
+ qp_attr->ah_attr.grh.hop_limit = MLX5_GET(dctc, dctc, hop_limit);
+ }
+ if (qp_attr_mask & IB_QP_PATH_MTU)
+ qp_attr->path_mtu = MLX5_GET(dctc, dctc, mtu);
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+ qp_attr->pkey_index = MLX5_GET(dctc, dctc, pkey_index);
+out:
+ kfree(out);
+ return err;
+}
+
+int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ int err = 0;
+ u8 raw_packet_qp_state;
+
+ if (ibqp->rwq_ind_tbl)
+ return -ENOSYS;
+
+ if (qp->type == IB_QPT_GSI)
+ return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
+ qp_init_attr);
+
+ /* Not all of output fields are applicable, make sure to zero them */
+ memset(qp_init_attr, 0, sizeof(*qp_init_attr));
+ memset(qp_attr, 0, sizeof(*qp_attr));
+
+ if (unlikely(qp->type == MLX5_IB_QPT_DCT))
+ return mlx5_ib_dct_query_qp(dev, qp, qp_attr,
+ qp_attr_mask, qp_init_attr);
+
+ mutex_lock(&qp->mutex);
+
+ if (qp->type == IB_QPT_RAW_PACKET ||
+ qp->flags & IB_QP_CREATE_SOURCE_QPN) {
+ err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
+ if (err)
+ goto out;
+ qp->state = raw_packet_qp_state;
+ qp_attr->port_num = 1;
+ } else {
+ err = query_qp_attr(dev, qp, qp_attr);
+ if (err)
+ goto out;
+ }
+
+ qp_attr->qp_state = qp->state;
+ qp_attr->cur_qp_state = qp_attr->qp_state;
+ qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
+ qp_attr->cap.max_recv_sge = qp->rq.max_gs;
+
+ if (!ibqp->uobject) {
+ qp_attr->cap.max_send_wr = qp->sq.max_post;
+ qp_attr->cap.max_send_sge = qp->sq.max_gs;
+ qp_init_attr->qp_context = ibqp->qp_context;
+ } else {
+ qp_attr->cap.max_send_wr = 0;
+ qp_attr->cap.max_send_sge = 0;
+ }
+
+ qp_init_attr->qp_type = qp->type;
+ qp_init_attr->recv_cq = ibqp->recv_cq;
+ qp_init_attr->send_cq = ibqp->send_cq;
+ qp_init_attr->srq = ibqp->srq;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
+
+ qp_init_attr->cap = qp_attr->cap;
+
+ qp_init_attr->create_flags = qp->flags;
+
+ qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
+ IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
+
+out:
+ mutex_unlock(&qp->mutex);
+ return err;
+}
+
+int mlx5_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibxrcd->device);
+ struct mlx5_ib_xrcd *xrcd = to_mxrcd(ibxrcd);
+
+ if (!MLX5_CAP_GEN(dev->mdev, xrc))
+ return -EOPNOTSUPP;
+
+ return mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, 0);
+}
+
+int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
+ u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
+
+ return mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, 0);
+}
+
+static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
+{
+ struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
+ struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
+ struct ib_event event;
+
+ if (rwq->ibwq.event_handler) {
+ event.device = rwq->ibwq.device;
+ event.element.wq = &rwq->ibwq;
+ switch (type) {
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_WQ_FATAL;
+ break;
+ default:
+ mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn);
+ return;
+ }
+
+ rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
+ }
+}
+
+static int set_delay_drop(struct mlx5_ib_dev *dev)
+{
+ int err = 0;
+
+ mutex_lock(&dev->delay_drop.lock);
+ if (dev->delay_drop.activate)
+ goto out;
+
+ err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout);
+ if (err)
+ goto out;
+
+ dev->delay_drop.activate = true;
+out:
+ mutex_unlock(&dev->delay_drop.lock);
+
+ if (!err)
+ atomic_inc(&dev->delay_drop.rqs_cnt);
+ return err;
+}
+
+static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr)
+{
+ struct mlx5_ib_dev *dev;
+ int has_net_offloads;
+ __be64 *rq_pas0;
+ int ts_format;
+ void *in;
+ void *rqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ dev = to_mdev(pd->device);
+
+ ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq));
+ if (ts_format < 0)
+ return ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ MLX5_SET(rqc, rqc, mem_rq_type,
+ MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
+ MLX5_SET(rqc, rqc, ts_format, ts_format);
+ MLX5_SET(rqc, rqc, user_index, rwq->user_index);
+ MLX5_SET(rqc, rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ MLX5_SET(wq, wq, wq_type,
+ rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ ?
+ MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ : MLX5_WQ_TYPE_CYCLIC);
+ if (init_attr->create_flags & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
+ if (!MLX5_CAP_GEN(dev->mdev, end_pad)) {
+ mlx5_ib_dbg(dev, "Scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ } else {
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ }
+ }
+ MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride);
+ if (rwq->create_flags & MLX5_IB_WQ_FLAGS_STRIDING_RQ) {
+ /*
+ * In Firmware number of strides in each WQE is:
+ * "512 * 2^single_wqe_log_num_of_strides"
+ * Values 3 to 8 are accepted as 10 to 15, 9 to 18 are
+ * accepted as 0 to 9
+ */
+ static const u8 fw_map[] = { 10, 11, 12, 13, 14, 15, 0, 1,
+ 2, 3, 4, 5, 6, 7, 8, 9 };
+ MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ rwq->single_stride_log_num_of_bytes -
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES);
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ fw_map[rwq->log_num_strides -
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES]);
+ }
+ MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size);
+ MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn);
+ MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset);
+ MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size);
+ MLX5_SET(wq, wq, wq_signature, rwq->wq_sig);
+ MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
+ has_net_offloads = MLX5_CAP_GEN(dev->mdev, eth_net_offloads);
+ if (init_attr->create_flags & IB_WQ_FLAGS_CVLAN_STRIPPING) {
+ if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
+ mlx5_ib_dbg(dev, "VLAN offloads are not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ } else {
+ MLX5_SET(rqc, rqc, vsd, 1);
+ }
+ if (init_attr->create_flags & IB_WQ_FLAGS_SCATTER_FCS) {
+ if (!(has_net_offloads && MLX5_CAP_ETH(dev->mdev, scatter_fcs))) {
+ mlx5_ib_dbg(dev, "Scatter FCS is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ MLX5_SET(rqc, rqc, scatter_fcs, 1);
+ }
+ if (init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
+ if (!(dev->ib_dev.attrs.raw_packet_caps &
+ IB_RAW_PACKET_CAP_DELAY_DROP)) {
+ mlx5_ib_dbg(dev, "Delay drop is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ MLX5_SET(rqc, rqc, delay_drop_en, 1);
+ }
+ rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
+ mlx5_ib_populate_pas(rwq->umem, 1UL << rwq->page_shift, rq_pas0, 0);
+ err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
+ if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
+ err = set_delay_drop(dev);
+ if (err) {
+ mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
+ err);
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
+ } else {
+ rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
+ }
+ }
+out:
+ kvfree(in);
+ return err;
+}
+
+static int set_user_rq_size(struct mlx5_ib_dev *dev,
+ struct ib_wq_init_attr *wq_init_attr,
+ struct mlx5_ib_create_wq *ucmd,
+ struct mlx5_ib_rwq *rwq)
+{
+ /* Sanity check RQ size before proceeding */
+ if (wq_init_attr->max_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_wq_sz)))
+ return -EINVAL;
+
+ if (!ucmd->rq_wqe_count)
+ return -EINVAL;
+
+ rwq->wqe_count = ucmd->rq_wqe_count;
+ rwq->wqe_shift = ucmd->rq_wqe_shift;
+ if (check_shl_overflow(rwq->wqe_count, rwq->wqe_shift, &rwq->buf_size))
+ return -EINVAL;
+
+ rwq->log_rq_stride = rwq->wqe_shift;
+ rwq->log_rq_size = ilog2(rwq->wqe_count);
+ return 0;
+}
+
+static bool log_of_strides_valid(struct mlx5_ib_dev *dev, u32 log_num_strides)
+{
+ if ((log_num_strides > MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES) ||
+ (log_num_strides < MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
+
+ if (!MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) &&
+ (log_num_strides < MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES))
+ return false;
+
+ return true;
+}
+
+static int prepare_user_rq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct mlx5_ib_rwq *rwq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_create_wq ucmd = {};
+ int err;
+ size_t required_cmd_sz;
+
+ required_cmd_sz = offsetofend(struct mlx5_ib_create_wq,
+ single_stride_log_num_of_bytes);
+ if (udata->inlen < required_cmd_sz) {
+ mlx5_ib_dbg(dev, "invalid inlen\n");
+ return -EINVAL;
+ }
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd))) {
+ mlx5_ib_dbg(dev, "inlen is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
+ mlx5_ib_dbg(dev, "copy failed\n");
+ return -EFAULT;
+ }
+
+ if (ucmd.comp_mask & (~MLX5_IB_CREATE_WQ_STRIDING_RQ)) {
+ mlx5_ib_dbg(dev, "invalid comp mask\n");
+ return -EOPNOTSUPP;
+ } else if (ucmd.comp_mask & MLX5_IB_CREATE_WQ_STRIDING_RQ) {
+ if (!MLX5_CAP_GEN(dev->mdev, striding_rq)) {
+ mlx5_ib_dbg(dev, "Striding RQ is not supported\n");
+ return -EOPNOTSUPP;
+ }
+ if ((ucmd.single_stride_log_num_of_bytes <
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES) ||
+ (ucmd.single_stride_log_num_of_bytes >
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES)) {
+ mlx5_ib_dbg(dev, "Invalid log stride size (%u. Range is %u - %u)\n",
+ ucmd.single_stride_log_num_of_bytes,
+ MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES,
+ MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES);
+ return -EINVAL;
+ }
+ if (!log_of_strides_valid(dev,
+ ucmd.single_wqe_log_num_of_strides)) {
+ mlx5_ib_dbg(
+ dev,
+ "Invalid log num strides (%u. Range is %u - %u)\n",
+ ucmd.single_wqe_log_num_of_strides,
+ MLX5_CAP_GEN(dev->mdev, ext_stride_num_range) ?
+ MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES :
+ MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES,
+ MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES);
+ return -EINVAL;
+ }
+ rwq->single_stride_log_num_of_bytes =
+ ucmd.single_stride_log_num_of_bytes;
+ rwq->log_num_strides = ucmd.single_wqe_log_num_of_strides;
+ rwq->two_byte_shift_en = !!ucmd.two_byte_shift_en;
+ rwq->create_flags |= MLX5_IB_WQ_FLAGS_STRIDING_RQ;
+ }
+
+ err = set_user_rq_size(dev, init_attr, &ucmd, rwq);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+
+ err = create_user_rq(dev, pd, udata, rwq, &ucmd);
+ if (err) {
+ mlx5_ib_dbg(dev, "err %d\n", err);
+ return err;
+ }
+
+ rwq->user_index = ucmd.user_index;
+ return 0;
+}
+
+struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
+ struct ib_wq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev;
+ struct mlx5_ib_rwq *rwq;
+ struct mlx5_ib_create_wq_resp resp = {};
+ size_t min_resp_len;
+ int err;
+
+ if (!udata)
+ return ERR_PTR(-ENOSYS);
+
+ min_resp_len = offsetofend(struct mlx5_ib_create_wq_resp, reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return ERR_PTR(-EINVAL);
+
+ if (!capable(CAP_SYS_RAWIO) &&
+ init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP)
+ return ERR_PTR(-EPERM);
+
+ dev = to_mdev(pd->device);
+ switch (init_attr->wq_type) {
+ case IB_WQT_RQ:
+ rwq = kzalloc(sizeof(*rwq), GFP_KERNEL);
+ if (!rwq)
+ return ERR_PTR(-ENOMEM);
+ err = prepare_user_rq(pd, init_attr, udata, rwq);
+ if (err)
+ goto err;
+ err = create_rq(rwq, pd, init_attr);
+ if (err)
+ goto err_user_rq;
+ break;
+ default:
+ mlx5_ib_dbg(dev, "unsupported wq type %d\n",
+ init_attr->wq_type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ rwq->ibwq.wq_num = rwq->core_qp.qpn;
+ rwq->ibwq.state = IB_WQS_RESET;
+ if (udata->outlen) {
+ resp.response_length = offsetofend(
+ struct mlx5_ib_create_wq_resp, response_length);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+ }
+
+ rwq->core_qp.event = mlx5_ib_wq_event;
+ rwq->ibwq.event_handler = init_attr->event_handler;
+ return &rwq->ibwq;
+
+err_copy:
+ mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
+err_user_rq:
+ destroy_user_rq(dev, pd, rwq, udata);
+err:
+ kfree(rwq);
+ return ERR_PTR(err);
+}
+
+int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(wq->device);
+ struct mlx5_ib_rwq *rwq = to_mrwq(wq);
+ int ret;
+
+ ret = mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
+ if (ret)
+ return ret;
+ destroy_user_rq(dev, wq->pd, rwq, udata);
+ kfree(rwq);
+ return 0;
+}
+
+int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
+ struct ib_rwq_ind_table_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_rwq_ind_table *rwq_ind_tbl =
+ to_mrwq_ind_table(ib_rwq_ind_table);
+ struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_table->device);
+ int sz = 1 << init_attr->log_ind_tbl_size;
+ struct mlx5_ib_create_rwq_ind_tbl_resp resp = {};
+ size_t min_resp_len;
+ int inlen;
+ int err;
+ int i;
+ u32 *in;
+ void *rqtc;
+
+ if (udata->inlen > 0 &&
+ !ib_is_udata_cleared(udata, 0,
+ udata->inlen))
+ return -EOPNOTSUPP;
+
+ if (init_attr->log_ind_tbl_size >
+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) {
+ mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n",
+ init_attr->log_ind_tbl_size,
+ MLX5_CAP_GEN(dev->mdev, log_max_rqt_size));
+ return -EINVAL;
+ }
+
+ min_resp_len =
+ offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp, reserved);
+ if (udata->outlen && udata->outlen < min_resp_len)
+ return -EINVAL;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ for (i = 0; i < sz; i++)
+ MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
+
+ rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
+ MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
+
+ err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
+ kvfree(in);
+ if (err)
+ return err;
+
+ rwq_ind_tbl->ib_rwq_ind_tbl.ind_tbl_num = rwq_ind_tbl->rqtn;
+ if (udata->outlen) {
+ resp.response_length =
+ offsetofend(struct mlx5_ib_create_rwq_ind_tbl_resp,
+ response_length);
+ err = ib_copy_to_udata(udata, &resp, resp.response_length);
+ if (err)
+ goto err_copy;
+ }
+
+ return 0;
+
+err_copy:
+ mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
+ return err;
+}
+
+int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
+{
+ struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
+ struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
+
+ return mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
+}
+
+int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
+ u32 wq_attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(wq->device);
+ struct mlx5_ib_rwq *rwq = to_mrwq(wq);
+ struct mlx5_ib_modify_wq ucmd = {};
+ size_t required_cmd_sz;
+ int curr_wq_state;
+ int wq_state;
+ int inlen;
+ int err;
+ void *rqc;
+ void *in;
+
+ required_cmd_sz = offsetofend(struct mlx5_ib_modify_wq, reserved);
+ if (udata->inlen < required_cmd_sz)
+ return -EINVAL;
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd)))
+ return -EOPNOTSUPP;
+
+ if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)))
+ return -EFAULT;
+
+ if (ucmd.comp_mask || ucmd.reserved)
+ return -EOPNOTSUPP;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ curr_wq_state = wq_attr->curr_wq_state;
+ wq_state = wq_attr->wq_state;
+ if (curr_wq_state == IB_WQS_ERR)
+ curr_wq_state = MLX5_RQC_STATE_ERR;
+ if (wq_state == IB_WQS_ERR)
+ wq_state = MLX5_RQC_STATE_ERR;
+ MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid);
+ MLX5_SET(rqc, rqc, state, wq_state);
+
+ if (wq_attr_mask & IB_WQ_FLAGS) {
+ if (wq_attr->flags_mask & IB_WQ_FLAGS_CVLAN_STRIPPING) {
+ if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(dev->mdev, vlan_cap))) {
+ mlx5_ib_dbg(dev, "VLAN offloads are not "
+ "supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
+ MLX5_SET(rqc, rqc, vsd,
+ (wq_attr->flags & IB_WQ_FLAGS_CVLAN_STRIPPING) ? 0 : 1);
+ }
+
+ if (wq_attr->flags_mask & IB_WQ_FLAGS_PCI_WRITE_END_PADDING) {
+ mlx5_ib_dbg(dev, "Modifying scatter end padding is not supported\n");
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ if (curr_wq_state == IB_WQS_RESET && wq_state == IB_WQS_RDY) {
+ u16 set_id;
+
+ set_id = mlx5_ib_get_counters_id(dev, 0);
+ if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
+ MLX5_SET64(modify_rq_in, in, modify_bitmask,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
+ MLX5_SET(rqc, rqc, counter_set_id, set_id);
+ } else
+ dev_info_once(
+ &dev->ib_dev.dev,
+ "Receive WQ counters are not supported on current FW\n");
+ }
+
+ err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in);
+ if (!err)
+ rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
+
+out:
+ kvfree(in);
+ return err;
+}
+
+struct mlx5_ib_drain_cqe {
+ struct ib_cqe cqe;
+ struct completion done;
+};
+
+static void mlx5_ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
+ struct mlx5_ib_drain_cqe,
+ cqe);
+
+ complete(&cqe->done);
+}
+
+/* This function returns only once the drained WR was completed */
+static void handle_drain_completion(struct ib_cq *cq,
+ struct mlx5_ib_drain_cqe *sdrain,
+ struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ if (cq->poll_ctx == IB_POLL_DIRECT) {
+ while (wait_for_completion_timeout(&sdrain->done, HZ / 10) <= 0)
+ ib_process_cq_direct(cq, -1);
+ return;
+ }
+
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ struct mlx5_ib_cq *mcq = to_mcq(cq);
+ bool triggered = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+ /* Make sure that the CQ handler won't run if wasn't run yet */
+ if (!mcq->mcq.reset_notify_added)
+ mcq->mcq.reset_notify_added = 1;
+ else
+ triggered = true;
+ spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+
+ if (triggered) {
+ /* Wait for any scheduled/running task to be ended */
+ switch (cq->poll_ctx) {
+ case IB_POLL_SOFTIRQ:
+ irq_poll_disable(&cq->iop);
+ irq_poll_enable(&cq->iop);
+ break;
+ case IB_POLL_WORKQUEUE:
+ cancel_work_sync(&cq->work);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+ }
+
+ /* Run the CQ handler - this makes sure that the drain WR will
+ * be processed if wasn't processed yet.
+ */
+ mcq->mcq.comp(&mcq->mcq, NULL);
+ }
+
+ wait_for_completion(&sdrain->done);
+}
+
+void mlx5_ib_drain_sq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->send_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx5_ib_drain_cqe sdrain;
+ const struct ib_send_wr *bad_swr;
+ struct ib_rdma_wr swr = {
+ .wr = {
+ .next = NULL,
+ { .wr_cqe = &sdrain.cqe, },
+ .opcode = IB_WR_RDMA_WRITE,
+ },
+ };
+ int ret;
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ sdrain.cqe.done = mlx5_ib_drain_qp_done;
+ init_completion(&sdrain.done);
+
+ ret = mlx5_ib_post_send_drain(qp, &swr.wr, &bad_swr);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &sdrain, dev);
+}
+
+void mlx5_ib_drain_rq(struct ib_qp *qp)
+{
+ struct ib_cq *cq = qp->recv_cq;
+ struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+ struct mlx5_ib_drain_cqe rdrain;
+ struct ib_recv_wr rwr = {};
+ const struct ib_recv_wr *bad_rwr;
+ int ret;
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret && mdev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ rwr.wr_cqe = &rdrain.cqe;
+ rdrain.cqe.done = mlx5_ib_drain_qp_done;
+ init_completion(&rdrain.done);
+
+ ret = mlx5_ib_post_recv_drain(qp, &rwr, &bad_rwr);
+ if (ret) {
+ WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+ return;
+ }
+
+ handle_drain_completion(cq, &rdrain, dev);
+}
+
+/*
+ * Bind a qp to a counter. If @counter is NULL then bind the qp to
+ * the default counter
+ */
+int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
+{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ struct mlx5_ib_qp *mqp = to_mqp(qp);
+ int err = 0;
+
+ mutex_lock(&mqp->mutex);
+ if (mqp->state == IB_QPS_RESET) {
+ qp->counter = counter;
+ goto out;
+ }
+
+ if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (mqp->state == IB_QPS_RTS) {
+ err = __mlx5_ib_qp_set_counter(qp, counter);
+ if (!err)
+ qp->counter = counter;
+
+ goto out;
+ }
+
+ mqp->counter_pending = 1;
+ qp->counter = counter;
+
+out:
+ mutex_unlock(&mqp->mutex);
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
new file mode 100644
index 000000000..5d4e140db
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_QP_H
+#define _MLX5_IB_QP_H
+
+#include "mlx5_ib.h"
+
+int mlx5_init_qp_table(struct mlx5_ib_dev *dev);
+void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev);
+
+int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *qp,
+ u32 *in, int inlen, u32 *out, int outlen);
+int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *in, int inlen, u32 *out);
+int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
+ void *qpc, struct mlx5_core_qp *qp, u32 *ece);
+int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp);
+int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct);
+int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *out, int outlen);
+int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen);
+
+int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
+
+int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *rq);
+int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq);
+void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *sq);
+
+int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq);
+
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type);
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
+
+int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn);
+int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
+int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
+#endif /* _MLX5_IB_QP_H */
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
new file mode 100644
index 000000000..d4e7864c5
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/gfp.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_ib.h"
+#include "qp.h"
+
+static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
+ struct mlx5_core_dct *dct);
+
+static struct mlx5_core_rsc_common *
+mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
+{
+ struct mlx5_core_rsc_common *common;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+
+ common = radix_tree_lookup(&table->tree, rsn);
+ if (common)
+ refcount_inc(&common->refcount);
+
+ spin_unlock_irqrestore(&table->lock, flags);
+
+ return common;
+}
+
+void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
+{
+ if (refcount_dec_and_test(&common->refcount))
+ complete(&common->free);
+}
+
+static u64 qp_allowed_event_types(void)
+{
+ u64 mask;
+
+ mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
+ BIT(MLX5_EVENT_TYPE_COMM_EST) |
+ BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
+ BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
+ BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
+ BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
+ BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
+ BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
+
+ return mask;
+}
+
+static u64 rq_allowed_event_types(void)
+{
+ u64 mask;
+
+ mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
+ BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
+
+ return mask;
+}
+
+static u64 sq_allowed_event_types(void)
+{
+ return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
+}
+
+static u64 dct_allowed_event_types(void)
+{
+ return BIT(MLX5_EVENT_TYPE_DCT_DRAINED);
+}
+
+static bool is_event_type_allowed(int rsc_type, int event_type)
+{
+ switch (rsc_type) {
+ case MLX5_EVENT_QUEUE_TYPE_QP:
+ return BIT(event_type) & qp_allowed_event_types();
+ case MLX5_EVENT_QUEUE_TYPE_RQ:
+ return BIT(event_type) & rq_allowed_event_types();
+ case MLX5_EVENT_QUEUE_TYPE_SQ:
+ return BIT(event_type) & sq_allowed_event_types();
+ case MLX5_EVENT_QUEUE_TYPE_DCT:
+ return BIT(event_type) & dct_allowed_event_types();
+ default:
+ WARN(1, "Event arrived for unknown resource type");
+ return false;
+ }
+}
+
+static int rsc_event_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_core_rsc_common *common;
+ struct mlx5_qp_table *table;
+ struct mlx5_core_dct *dct;
+ u8 event_type = (u8)type;
+ struct mlx5_core_qp *qp;
+ struct mlx5_eqe *eqe;
+ u32 rsn;
+
+ switch (event_type) {
+ case MLX5_EVENT_TYPE_DCT_DRAINED:
+ eqe = data;
+ rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
+ rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN);
+ break;
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ case MLX5_EVENT_TYPE_COMM_EST:
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ eqe = data;
+ rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+ rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ table = container_of(nb, struct mlx5_qp_table, nb);
+ common = mlx5_get_rsc(table, rsn);
+ if (!common)
+ return NOTIFY_OK;
+
+ if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type))
+ goto out;
+
+ switch (common->res) {
+ case MLX5_RES_QP:
+ case MLX5_RES_RQ:
+ case MLX5_RES_SQ:
+ qp = (struct mlx5_core_qp *)common;
+ qp->event(qp, event_type);
+ break;
+ case MLX5_RES_DCT:
+ dct = (struct mlx5_core_dct *)common;
+ if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
+ complete(&dct->drained);
+ break;
+ default:
+ break;
+ }
+out:
+ mlx5_core_put_rsc(common);
+
+ return NOTIFY_OK;
+}
+
+static int create_resource_common(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp, int rsc_type)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+ int err;
+
+ qp->common.res = rsc_type;
+ spin_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree,
+ qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
+ qp);
+ spin_unlock_irq(&table->lock);
+ if (err)
+ return err;
+
+ refcount_set(&qp->common.refcount, 1);
+ init_completion(&qp->common.free);
+ qp->pid = current->pid;
+
+ return 0;
+}
+
+static void destroy_resource_common(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *qp)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+ unsigned long flags;
+
+ spin_lock_irqsave(&table->lock, flags);
+ radix_tree_delete(&table->tree,
+ qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
+ spin_unlock_irqrestore(&table->lock, flags);
+ mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
+ wait_for_completion(&qp->common.free);
+}
+
+static int _mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
+ struct mlx5_core_dct *dct, bool need_cleanup)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {};
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ err = mlx5_core_drain_dct(dev, dct);
+ if (err) {
+ if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ goto destroy;
+
+ return err;
+ }
+ wait_for_completion(&dct->drained);
+destroy:
+ if (need_cleanup)
+ destroy_resource_common(dev, &dct->mqp);
+ MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
+ MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
+ MLX5_SET(destroy_dct_in, in, uid, qp->uid);
+ err = mlx5_cmd_exec_in(dev->mdev, destroy_dct, in);
+ return err;
+}
+
+int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *in, int inlen, u32 *out, int outlen)
+{
+ struct mlx5_core_qp *qp = &dct->mqp;
+ int err;
+
+ init_completion(&dct->drained);
+ MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
+
+ err = mlx5_cmd_do(dev->mdev, in, inlen, out, outlen);
+ if (err)
+ return err;
+
+ qp->qpn = MLX5_GET(create_dct_out, out, dctn);
+ qp->uid = MLX5_GET(create_dct_in, in, uid);
+ err = create_resource_common(dev, qp, MLX5_RES_DCT);
+ if (err)
+ goto err_cmd;
+
+ return 0;
+err_cmd:
+ _mlx5_core_destroy_dct(dev, dct, false);
+ return err;
+}
+
+int mlx5_qpc_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *in, int inlen, u32 *out)
+{
+ u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+ int err;
+
+ MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out,
+ MLX5_ST_SZ_BYTES(create_qp_out));
+ if (err)
+ return err;
+
+ qp->uid = MLX5_GET(create_qp_in, in, uid);
+ qp->qpn = MLX5_GET(create_qp_out, out, qpn);
+
+ err = create_resource_common(dev, qp, MLX5_RES_QP);
+ if (err)
+ goto err_cmd;
+
+ mlx5_debug_qp_add(dev->mdev, qp);
+
+ return 0;
+
+err_cmd:
+ MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, din, uid, qp->uid);
+ mlx5_cmd_exec_in(dev->mdev, destroy_qp, din);
+ return err;
+}
+
+static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
+ MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
+ MLX5_SET(drain_dct_in, in, uid, qp->uid);
+ return mlx5_cmd_exec_in(dev->mdev, drain_dct, in);
+}
+
+int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
+ struct mlx5_core_dct *dct)
+{
+ return _mlx5_core_destroy_dct(dev, dct, true);
+}
+
+int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+ mlx5_debug_qp_remove(dev->mdev, qp);
+
+ destroy_resource_common(dev, qp);
+
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, in, uid, qp->uid);
+ return mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
+}
+
+int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
+ u32 timeout_usec)
+{
+ u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {};
+
+ MLX5_SET(set_delay_drop_params_in, in, opcode,
+ MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
+ MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
+ timeout_usec / 100);
+ return mlx5_cmd_exec_in(dev->mdev, set_delay_drop_params, in);
+}
+
+struct mbox_info {
+ u32 *in;
+ u32 *out;
+ int inlen;
+ int outlen;
+};
+
+static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
+{
+ mbox->inlen = inlen;
+ mbox->outlen = outlen;
+ mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
+ mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
+ if (!mbox->in || !mbox->out) {
+ kfree(mbox->in);
+ kfree(mbox->out);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void mbox_free(struct mbox_info *mbox)
+{
+ kfree(mbox->in);
+ kfree(mbox->out);
+}
+
+static int get_ece_from_mbox(void *out, u16 opcode)
+{
+ int ece = 0;
+
+ switch (opcode) {
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ ece = MLX5_GET(init2init_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ ece = MLX5_GET(init2rtr_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ ece = MLX5_GET(rtr2rts_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ ece = MLX5_GET(rts2rts_qp_out, out, ece);
+ break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ ece = MLX5_GET(rst2init_qp_out, out, ece);
+ break;
+ default:
+ break;
+ }
+
+ return ece;
+}
+
+static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
+ u32 opt_param_mask, void *qpc,
+ struct mbox_info *mbox, u16 uid, u32 ece)
+{
+ mbox->out = NULL;
+ mbox->in = NULL;
+
+#define MBOX_ALLOC(mbox, typ) \
+ mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
+
+#define MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid) \
+ do { \
+ MLX5_SET(typ##_in, in, opcode, _opcode); \
+ MLX5_SET(typ##_in, in, qpn, _qpn); \
+ MLX5_SET(typ##_in, in, uid, _uid); \
+ } while (0)
+
+#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc, _uid) \
+ do { \
+ MOD_QP_IN_SET(typ, in, _opcode, _qpn, _uid); \
+ MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
+ memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, \
+ MLX5_ST_SZ_BYTES(qpc)); \
+ } while (0)
+
+ switch (opcode) {
+ /* 2RST & 2ERR */
+ case MLX5_CMD_OP_2RST_QP:
+ if (MBOX_ALLOC(mbox, qp_2rst))
+ return -ENOMEM;
+ MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn, uid);
+ break;
+ case MLX5_CMD_OP_2ERR_QP:
+ if (MBOX_ALLOC(mbox, qp_2err))
+ return -ENOMEM;
+ MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn, uid);
+ break;
+
+ /* MODIFY with QPC */
+ case MLX5_CMD_OP_RST2INIT_QP:
+ if (MBOX_ALLOC(mbox, rst2init_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
+ break;
+ case MLX5_CMD_OP_INIT2RTR_QP:
+ if (MBOX_ALLOC(mbox, init2rtr_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ MLX5_SET(init2rtr_qp_in, mbox->in, ece, ece);
+ break;
+ case MLX5_CMD_OP_RTR2RTS_QP:
+ if (MBOX_ALLOC(mbox, rtr2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ MLX5_SET(rtr2rts_qp_in, mbox->in, ece, ece);
+ break;
+ case MLX5_CMD_OP_RTS2RTS_QP:
+ if (MBOX_ALLOC(mbox, rts2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ MLX5_SET(rts2rts_qp_in, mbox->in, ece, ece);
+ break;
+ case MLX5_CMD_OP_SQERR2RTS_QP:
+ if (MBOX_ALLOC(mbox, sqerr2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ break;
+ case MLX5_CMD_OP_SQD_RTS_QP:
+ if (MBOX_ALLOC(mbox, sqd2rts_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(sqd2rts_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ break;
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ if (MBOX_ALLOC(mbox, init2init_qp))
+ return -ENOMEM;
+ MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
+ opt_param_mask, qpc, uid);
+ MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
+ void *qpc, struct mlx5_core_qp *qp, u32 *ece)
+{
+ struct mbox_info mbox;
+ int err;
+
+ err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn, opt_param_mask,
+ qpc, &mbox, qp->uid, (ece) ? *ece : 0);
+ if (err)
+ return err;
+
+ err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
+ mbox.outlen);
+
+ if (ece)
+ *ece = get_ece_from_mbox(mbox.out, opcode);
+
+ mbox_free(&mbox);
+ return err;
+}
+
+int mlx5_init_qp_table(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+
+ spin_lock_init(&table->lock);
+ INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+ mlx5_qp_debugfs_init(dev->mdev);
+
+ table->nb.notifier_call = rsc_event_notifier;
+ mlx5_notifier_register(dev->mdev, &table->nb);
+
+ return 0;
+}
+
+void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_qp_table *table = &dev->qp_table;
+
+ mlx5_notifier_unregister(dev->mdev, &table->nb);
+ mlx5_qp_debugfs_cleanup(dev->mdev);
+}
+
+int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
+
+ MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
+ MLX5_SET(query_qp_in, in, qpn, qp->qpn);
+ return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
+ u32 *out, int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {};
+ struct mlx5_core_qp *qp = &dct->mqp;
+
+ MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
+ MLX5_SET(query_dct_in, in, dctn, qp->qpn);
+
+ return mlx5_cmd_exec(dev->mdev, (void *)&in, sizeof(in), (void *)out,
+ outlen);
+}
+
+int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+ err = mlx5_cmd_exec_inout(dev->mdev, alloc_xrcd, in, out);
+ if (!err)
+ *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
+ return err;
+}
+
+int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
+
+ MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
+}
+
+static int destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+ MLX5_SET(destroy_rq_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
+}
+
+int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq)
+{
+ int err;
+ u32 rqn;
+
+ err = mlx5_core_create_rq(dev->mdev, in, inlen, &rqn);
+ if (err)
+ return err;
+
+ rq->uid = MLX5_GET(create_rq_in, in, uid);
+ rq->qpn = rqn;
+ err = create_resource_common(dev, rq, MLX5_RES_RQ);
+ if (err)
+ goto err_destroy_rq;
+
+ return 0;
+
+err_destroy_rq:
+ destroy_rq_tracked(dev, rq->qpn, rq->uid);
+
+ return err;
+}
+
+int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *rq)
+{
+ destroy_resource_common(dev, rq);
+ return destroy_rq_tracked(dev, rq->qpn, rq->uid);
+}
+
+static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
+
+ MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, in, sqn, sqn);
+ MLX5_SET(destroy_sq_in, in, uid, uid);
+ mlx5_cmd_exec_in(dev->mdev, destroy_sq, in);
+}
+
+int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq)
+{
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
+ int err;
+
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+ err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
+ if (err)
+ return err;
+
+ sq->qpn = MLX5_GET(create_sq_out, out, sqn);
+ sq->uid = MLX5_GET(create_sq_in, in, uid);
+ err = create_resource_common(dev, sq, MLX5_RES_SQ);
+ if (err)
+ goto err_destroy_sq;
+
+ return 0;
+
+err_destroy_sq:
+ destroy_sq_tracked(dev, sq->qpn, sq->uid);
+
+ return err;
+}
+
+void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *sq)
+{
+ destroy_resource_common(dev, sq);
+ destroy_sq_tracked(dev, sq->qpn, sq->uid);
+}
+
+struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
+ int res_num,
+ enum mlx5_res_type res_type)
+{
+ u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
+ struct mlx5_qp_table *table = &dev->qp_table;
+
+ return mlx5_get_rsc(table, rsn);
+}
+
+void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
+{
+ mlx5_core_put_rsc(res);
+}
diff --git a/drivers/infiniband/hw/mlx5/restrack.c b/drivers/infiniband/hw/mlx5/restrack.c
new file mode 100644
index 000000000..4ac429e72
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/restrack.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2019-2020, Mellanox Technologies Ltd. All rights reserved.
+ */
+
+#include <uapi/rdma/rdma_netlink.h>
+#include <linux/mlx5/rsc_dump.h>
+#include <rdma/ib_umem_odp.h>
+#include <rdma/restrack.h>
+#include "mlx5_ib.h"
+#include "restrack.h"
+
+#define MAX_DUMP_SIZE 1024
+
+static int dump_rsc(struct mlx5_core_dev *dev, enum mlx5_sgmt_type type,
+ int index, void *data, int *data_len)
+{
+ struct mlx5_core_dev *mdev = dev;
+ struct mlx5_rsc_dump_cmd *cmd;
+ struct mlx5_rsc_key key = {};
+ struct page *page;
+ int offset = 0;
+ int err = 0;
+ int cmd_err;
+ int size;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ key.size = PAGE_SIZE;
+ key.rsc = type;
+ key.index1 = index;
+ key.num_of_obj1 = 1;
+
+ cmd = mlx5_rsc_dump_cmd_create(mdev, &key);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto free_page;
+ }
+
+ do {
+ cmd_err = mlx5_rsc_dump_next(mdev, cmd, page, &size);
+ if (cmd_err < 0 || size + offset > MAX_DUMP_SIZE) {
+ err = cmd_err;
+ goto destroy_cmd;
+ }
+ memcpy(data + offset, page_address(page), size);
+ offset += size;
+ } while (cmd_err > 0);
+ *data_len = offset;
+
+destroy_cmd:
+ mlx5_rsc_dump_cmd_destroy(cmd);
+free_page:
+ __free_page(page);
+ return err;
+}
+
+static int fill_res_raw(struct sk_buff *msg, struct mlx5_ib_dev *dev,
+ enum mlx5_sgmt_type type, u32 key)
+{
+ int len = 0;
+ void *data;
+ int err;
+
+ data = kzalloc(MAX_DUMP_SIZE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ err = dump_rsc(dev->mdev, type, key, data, &len);
+ if (err)
+ goto out;
+
+ err = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, len, data);
+out:
+ kfree(data);
+ return err;
+}
+
+static int fill_stat_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
+
+ if (!table_attr)
+ goto err;
+
+ if (rdma_nl_stat_hwcounter_entry(msg, "page_faults",
+ atomic64_read(&mr->odp_stats.faults)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(
+ msg, "page_invalidations",
+ atomic64_read(&mr->odp_stats.invalidations)))
+ goto err_table;
+ if (rdma_nl_stat_hwcounter_entry(msg, "page_prefetch",
+ atomic64_read(&mr->odp_stats.prefetch)))
+ goto err_table;
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err_table:
+ nla_nest_cancel(msg, table_attr);
+err:
+ return -EMSGSIZE;
+}
+
+static int fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ibmr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+
+ return fill_res_raw(msg, mr_to_mdev(mr), MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
+ mlx5_mkey_to_idx(mr->mmkey.key));
+}
+
+static int fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
+{
+ struct mlx5_ib_mr *mr = to_mmr(ibmr);
+ struct nlattr *table_attr;
+
+ if (!(mr->access_flags & IB_ACCESS_ON_DEMAND))
+ return 0;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr)
+ goto err;
+
+ if (mr->is_odp_implicit) {
+ if (rdma_nl_put_driver_string(msg, "odp", "implicit"))
+ goto err;
+ } else {
+ if (rdma_nl_put_driver_string(msg, "odp", "explicit"))
+ goto err;
+ }
+
+ nla_nest_end(msg, table_attr);
+ return 0;
+
+err:
+ nla_nest_cancel(msg, table_attr);
+ return -EMSGSIZE;
+}
+
+static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
+ struct mlx5_ib_cq *cq = to_mcq(ibcq);
+
+ return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_CQ, cq->mcq.cqn);
+}
+
+static int fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ibqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+
+ return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_QP,
+ ibqp->qp_num);
+}
+
+static const struct ib_device_ops restrack_ops = {
+ .fill_res_cq_entry_raw = fill_res_cq_entry_raw,
+ .fill_res_mr_entry = fill_res_mr_entry,
+ .fill_res_mr_entry_raw = fill_res_mr_entry_raw,
+ .fill_res_qp_entry_raw = fill_res_qp_entry_raw,
+ .fill_stat_mr_entry = fill_stat_mr_entry,
+};
+
+int mlx5_ib_restrack_init(struct mlx5_ib_dev *dev)
+{
+ ib_set_device_ops(&dev->ib_dev, &restrack_ops);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/restrack.h b/drivers/infiniband/hw/mlx5/restrack.h
new file mode 100644
index 000000000..e8d81270f
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/restrack.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2020, Mellanox Technologies Ltd. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_RESTRACK_H
+#define _MLX5_IB_RESTRACK_H
+
+#include "mlx5_ib.h"
+
+int mlx5_ib_restrack_init(struct mlx5_ib_dev *dev);
+
+#endif /* _MLX5_IB_RESTRACK_H */
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
new file mode 100644
index 000000000..09b365a98
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/mlx5/qp.h>
+#include <linux/slab.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_user_verbs.h>
+#include "mlx5_ib.h"
+#include "srq.h"
+
+static void *get_wqe(struct mlx5_ib_srq *srq, int n)
+{
+ return mlx5_frag_buf_get_wqe(&srq->fbc, n);
+}
+
+static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
+{
+ struct ib_event event;
+ struct ib_srq *ibsrq = &to_mibsrq(srq)->ibsrq;
+
+ if (ibsrq->event_handler) {
+ event.device = ibsrq->device;
+ event.element.srq = ibsrq;
+ switch (type) {
+ case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ break;
+ case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+ event.event = IB_EVENT_SRQ_ERR;
+ break;
+ default:
+ pr_warn("mlx5_ib: Unexpected event type %d on SRQ %06x\n",
+ type, srq->srqn);
+ return;
+ }
+
+ ibsrq->event_handler(&event, ibsrq->srq_context);
+ }
+}
+
+static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
+ struct mlx5_srq_attr *in,
+ struct ib_udata *udata, int buf_size)
+{
+ struct mlx5_ib_dev *dev = to_mdev(pd->device);
+ struct mlx5_ib_create_srq ucmd = {};
+ struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
+ udata, struct mlx5_ib_ucontext, ibucontext);
+ size_t ucmdlen;
+ int err;
+ u32 uidx = MLX5_IB_DEFAULT_UIDX;
+
+ ucmdlen = min(udata->inlen, sizeof(ucmd));
+
+ if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
+ mlx5_ib_dbg(dev, "failed copy udata\n");
+ return -EFAULT;
+ }
+
+ if (ucmd.reserved0 || ucmd.reserved1)
+ return -EINVAL;
+
+ if (udata->inlen > sizeof(ucmd) &&
+ !ib_is_udata_cleared(udata, sizeof(ucmd),
+ udata->inlen - sizeof(ucmd)))
+ return -EINVAL;
+
+ if (in->type != IB_SRQT_BASIC) {
+ err = get_srq_user_index(ucontext, &ucmd, udata->inlen, &uidx);
+ if (err)
+ return err;
+ }
+
+ srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
+
+ srq->umem = ib_umem_get(pd->device, ucmd.buf_addr, buf_size, 0);
+ if (IS_ERR(srq->umem)) {
+ mlx5_ib_dbg(dev, "failed umem get, size %d\n", buf_size);
+ err = PTR_ERR(srq->umem);
+ return err;
+ }
+ in->umem = srq->umem;
+
+ err = mlx5_ib_db_map_user(ucontext, ucmd.db_addr, &srq->db);
+ if (err) {
+ mlx5_ib_dbg(dev, "map doorbell failed\n");
+ goto err_umem;
+ }
+
+ in->uid = (in->type != IB_SRQT_XRC) ? to_mpd(pd)->uid : 0;
+ if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
+ in->type != IB_SRQT_BASIC)
+ in->user_index = uidx;
+
+ return 0;
+
+err_umem:
+ ib_umem_release(srq->umem);
+
+ return err;
+}
+
+static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
+ struct mlx5_srq_attr *in, int buf_size)
+{
+ int err;
+ int i;
+ struct mlx5_wqe_srq_next_seg *next;
+
+ err = mlx5_db_alloc(dev->mdev, &srq->db);
+ if (err) {
+ mlx5_ib_warn(dev, "alloc dbell rec failed\n");
+ return err;
+ }
+
+ if (mlx5_frag_buf_alloc_node(dev->mdev, buf_size, &srq->buf,
+ dev->mdev->priv.numa_node)) {
+ mlx5_ib_dbg(dev, "buf alloc failed\n");
+ err = -ENOMEM;
+ goto err_db;
+ }
+
+ mlx5_init_fbc(srq->buf.frags, srq->msrq.wqe_shift, ilog2(srq->msrq.max),
+ &srq->fbc);
+
+ srq->head = 0;
+ srq->tail = srq->msrq.max - 1;
+ srq->wqe_ctr = 0;
+
+ for (i = 0; i < srq->msrq.max; i++) {
+ next = get_wqe(srq, i);
+ next->next_wqe_index =
+ cpu_to_be16((i + 1) & (srq->msrq.max - 1));
+ }
+
+ mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift);
+ in->pas = kvcalloc(srq->buf.npages, sizeof(*in->pas), GFP_KERNEL);
+ if (!in->pas) {
+ err = -ENOMEM;
+ goto err_buf;
+ }
+ mlx5_fill_page_frag_array(&srq->buf, in->pas);
+
+ srq->wrid = kvmalloc_array(srq->msrq.max, sizeof(u64), GFP_KERNEL);
+ if (!srq->wrid) {
+ err = -ENOMEM;
+ goto err_in;
+ }
+ srq->wq_sig = 0;
+
+ in->log_page_size = srq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
+ if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
+ in->type != IB_SRQT_BASIC)
+ in->user_index = MLX5_IB_DEFAULT_UIDX;
+
+ return 0;
+
+err_in:
+ kvfree(in->pas);
+
+err_buf:
+ mlx5_frag_buf_free(dev->mdev, &srq->buf);
+
+err_db:
+ mlx5_db_free(dev->mdev, &srq->db);
+ return err;
+}
+
+static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
+ struct ib_udata *udata)
+{
+ mlx5_ib_db_unmap_user(
+ rdma_udata_to_drv_context(
+ udata,
+ struct mlx5_ib_ucontext,
+ ibucontext),
+ &srq->db);
+ ib_umem_release(srq->umem);
+}
+
+
+static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
+{
+ kvfree(srq->wrid);
+ mlx5_frag_buf_free(dev->mdev, &srq->buf);
+ mlx5_db_free(dev->mdev, &srq->db);
+}
+
+int mlx5_ib_create_srq(struct ib_srq *ib_srq,
+ struct ib_srq_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ib_srq->device);
+ struct mlx5_ib_srq *srq = to_msrq(ib_srq);
+ size_t desc_size;
+ size_t buf_size;
+ int err;
+ struct mlx5_srq_attr in = {};
+ __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
+
+ if (init_attr->srq_type != IB_SRQT_BASIC &&
+ init_attr->srq_type != IB_SRQT_XRC &&
+ init_attr->srq_type != IB_SRQT_TM)
+ return -EOPNOTSUPP;
+
+ /* Sanity check SRQ size before proceeding */
+ if (init_attr->attr.max_wr >= max_srq_wqes) {
+ mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
+ init_attr->attr.max_wr,
+ max_srq_wqes);
+ return -EINVAL;
+ }
+
+ mutex_init(&srq->mutex);
+ spin_lock_init(&srq->lock);
+ srq->msrq.max = roundup_pow_of_two(init_attr->attr.max_wr + 1);
+ srq->msrq.max_gs = init_attr->attr.max_sge;
+
+ desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
+ srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
+ if (desc_size == 0 || srq->msrq.max_gs > desc_size)
+ return -EINVAL;
+
+ desc_size = roundup_pow_of_two(desc_size);
+ desc_size = max_t(size_t, 32, desc_size);
+ if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
+ return -EINVAL;
+
+ srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ srq->msrq.wqe_shift = ilog2(desc_size);
+ buf_size = srq->msrq.max * desc_size;
+ if (buf_size < desc_size)
+ return -EINVAL;
+
+ in.type = init_attr->srq_type;
+
+ if (udata)
+ err = create_srq_user(ib_srq->pd, srq, &in, udata, buf_size);
+ else
+ err = create_srq_kernel(dev, srq, &in, buf_size);
+
+ if (err) {
+ mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
+ udata ? "user" : "kernel", err);
+ return err;
+ }
+
+ in.log_size = ilog2(srq->msrq.max);
+ in.wqe_shift = srq->msrq.wqe_shift - 4;
+ if (srq->wq_sig)
+ in.flags |= MLX5_SRQ_FLAG_WQ_SIG;
+
+ if (init_attr->srq_type == IB_SRQT_XRC && init_attr->ext.xrc.xrcd)
+ in.xrcd = to_mxrcd(init_attr->ext.xrc.xrcd)->xrcdn;
+ else
+ in.xrcd = dev->devr.xrcdn0;
+
+ if (init_attr->srq_type == IB_SRQT_TM) {
+ in.tm_log_list_size =
+ ilog2(init_attr->ext.tag_matching.max_num_tags) + 1;
+ if (in.tm_log_list_size >
+ MLX5_CAP_GEN(dev->mdev, log_tag_matching_list_sz)) {
+ mlx5_ib_dbg(dev, "TM SRQ max_num_tags exceeding limit\n");
+ err = -EINVAL;
+ goto err_usr_kern_srq;
+ }
+ in.flags |= MLX5_SRQ_FLAG_RNDV;
+ }
+
+ if (ib_srq_has_cq(init_attr->srq_type))
+ in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn;
+ else
+ in.cqn = to_mcq(dev->devr.c0)->mcq.cqn;
+
+ in.pd = to_mpd(ib_srq->pd)->pdn;
+ in.db_record = srq->db.dma;
+ err = mlx5_cmd_create_srq(dev, &srq->msrq, &in);
+ kvfree(in.pas);
+ if (err) {
+ mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
+ goto err_usr_kern_srq;
+ }
+
+ mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
+
+ srq->msrq.event = mlx5_ib_srq_event;
+ srq->ibsrq.ext.xrc.srq_num = srq->msrq.srqn;
+
+ if (udata) {
+ struct mlx5_ib_create_srq_resp resp = {
+ .srqn = srq->msrq.srqn,
+ };
+
+ if (ib_copy_to_udata(udata, &resp, min(udata->outlen,
+ sizeof(resp)))) {
+ mlx5_ib_dbg(dev, "copy to user failed\n");
+ err = -EFAULT;
+ goto err_core;
+ }
+ }
+
+ init_attr->attr.max_wr = srq->msrq.max - 1;
+
+ return 0;
+
+err_core:
+ mlx5_cmd_destroy_srq(dev, &srq->msrq);
+
+err_usr_kern_srq:
+ if (udata)
+ destroy_srq_user(ib_srq->pd, srq, udata);
+ else
+ destroy_srq_kernel(dev, srq);
+
+ return err;
+}
+
+int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
+ struct mlx5_ib_srq *srq = to_msrq(ibsrq);
+ int ret;
+
+ /* We don't support resizing SRQs yet */
+ if (attr_mask & IB_SRQ_MAX_WR)
+ return -EINVAL;
+
+ if (attr_mask & IB_SRQ_LIMIT) {
+ if (attr->srq_limit >= srq->msrq.max)
+ return -EINVAL;
+
+ mutex_lock(&srq->mutex);
+ ret = mlx5_cmd_arm_srq(dev, &srq->msrq, attr->srq_limit, 1);
+ mutex_unlock(&srq->mutex);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
+ struct mlx5_ib_srq *srq = to_msrq(ibsrq);
+ int ret;
+ struct mlx5_srq_attr *out;
+
+ out = kzalloc(sizeof(*out), GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ ret = mlx5_cmd_query_srq(dev, &srq->msrq, out);
+ if (ret)
+ goto out_box;
+
+ srq_attr->srq_limit = out->lwm;
+ srq_attr->max_wr = srq->msrq.max - 1;
+ srq_attr->max_sge = srq->msrq.max_gs;
+
+out_box:
+ kfree(out);
+ return ret;
+}
+
+int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
+{
+ struct mlx5_ib_dev *dev = to_mdev(srq->device);
+ struct mlx5_ib_srq *msrq = to_msrq(srq);
+ int ret;
+
+ ret = mlx5_cmd_destroy_srq(dev, &msrq->msrq);
+ if (ret)
+ return ret;
+
+ if (udata)
+ destroy_srq_user(srq->pd, msrq, udata);
+ else
+ destroy_srq_kernel(dev, msrq);
+ return 0;
+}
+
+void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index)
+{
+ struct mlx5_wqe_srq_next_seg *next;
+
+ /* always called with interrupts disabled. */
+ spin_lock(&srq->lock);
+
+ next = get_wqe(srq, srq->tail);
+ next->next_wqe_index = cpu_to_be16(wqe_index);
+ srq->tail = wqe_index;
+
+ spin_unlock(&srq->lock);
+}
+
+int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct mlx5_ib_srq *srq = to_msrq(ibsrq);
+ struct mlx5_wqe_srq_next_seg *next;
+ struct mlx5_wqe_data_seg *scat;
+ struct mlx5_ib_dev *dev = to_mdev(ibsrq->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ unsigned long flags;
+ int err = 0;
+ int nreq;
+ int i;
+
+ spin_lock_irqsave(&srq->lock, flags);
+
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+ err = -EIO;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ break;
+ }
+
+ if (unlikely(srq->head == srq->tail)) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ break;
+ }
+
+ srq->wrid[srq->head] = wr->wr_id;
+
+ next = get_wqe(srq, srq->head);
+ srq->head = be16_to_cpu(next->next_wqe_index);
+ scat = (struct mlx5_wqe_data_seg *)(next + 1);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
+ scat[i].lkey = cpu_to_be32(wr->sg_list[i].lkey);
+ scat[i].addr = cpu_to_be64(wr->sg_list[i].addr);
+ }
+
+ if (i < srq->msrq.max_avail_gather) {
+ scat[i].byte_count = 0;
+ scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ scat[i].addr = 0;
+ }
+ }
+
+ if (likely(nreq)) {
+ srq->wqe_ctr += nreq;
+
+ /* Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ *srq->db.db = cpu_to_be32(srq->wqe_ctr);
+ }
+out:
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/srq.h b/drivers/infiniband/hw/mlx5/srq.h
new file mode 100644
index 000000000..a7e3dc556
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/srq.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2013-2018, Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef MLX5_IB_SRQ_H
+#define MLX5_IB_SRQ_H
+
+enum {
+ MLX5_SRQ_FLAG_ERR = (1 << 0),
+ MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
+ MLX5_SRQ_FLAG_RNDV = (1 << 2),
+};
+
+struct mlx5_srq_attr {
+ u32 type;
+ u32 flags;
+ u32 log_size;
+ u32 wqe_shift;
+ u32 log_page_size;
+ u32 wqe_cnt;
+ u32 srqn;
+ u32 xrcd;
+ u32 page_offset;
+ u32 cqn;
+ u32 pd;
+ u32 lwm;
+ u32 user_index;
+ u64 db_record;
+ __be64 *pas;
+ struct ib_umem *umem;
+ u32 tm_log_list_size;
+ u32 tm_next_tag;
+ u32 tm_hw_phase_cnt;
+ u32 tm_sw_phase_cnt;
+ u16 uid;
+};
+
+struct mlx5_ib_dev;
+
+struct mlx5_core_srq {
+ struct mlx5_core_rsc_common common; /* must be first */
+ u32 srqn;
+ int max;
+ size_t max_gs;
+ size_t max_avail_gather;
+ int wqe_shift;
+ void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
+
+ u16 uid;
+};
+
+struct mlx5_srq_table {
+ struct notifier_block nb;
+ struct xarray array;
+};
+
+int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in);
+int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq);
+int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out);
+int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq);
+struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn);
+
+int mlx5_init_srq_table(struct mlx5_ib_dev *dev);
+void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev);
+#endif /* MLX5_IB_SRQ_H */
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
new file mode 100644
index 000000000..8b3385396
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -0,0 +1,774 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mlx5/driver.h>
+#include "mlx5_ib.h"
+#include "srq.h"
+#include "qp.h"
+
+static int get_pas_size(struct mlx5_srq_attr *in)
+{
+ u32 log_page_size = in->log_page_size + 12;
+ u32 log_srq_size = in->log_size;
+ u32 log_rq_stride = in->wqe_shift;
+ u32 page_offset = in->page_offset;
+ u32 po_quanta = 1 << (log_page_size - 6);
+ u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
+ u32 page_size = 1 << log_page_size;
+ u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
+ u32 rq_num_pas = DIV_ROUND_UP(rq_sz_po, page_size);
+
+ return rq_num_pas * sizeof(u64);
+}
+
+static void set_wq(void *wq, struct mlx5_srq_attr *in)
+{
+ MLX5_SET(wq, wq, wq_signature, !!(in->flags
+ & MLX5_SRQ_FLAG_WQ_SIG));
+ MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
+ MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
+ MLX5_SET(wq, wq, log_wq_sz, in->log_size);
+ MLX5_SET(wq, wq, page_offset, in->page_offset);
+ MLX5_SET(wq, wq, lwm, in->lwm);
+ MLX5_SET(wq, wq, pd, in->pd);
+ MLX5_SET64(wq, wq, dbr_addr, in->db_record);
+}
+
+static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+ MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
+ & MLX5_SRQ_FLAG_WQ_SIG));
+ MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
+ MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
+ MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
+ MLX5_SET(srqc, srqc, page_offset, in->page_offset);
+ MLX5_SET(srqc, srqc, lwm, in->lwm);
+ MLX5_SET(srqc, srqc, pd, in->pd);
+ MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
+ MLX5_SET(srqc, srqc, xrcd, in->xrcd);
+ MLX5_SET(srqc, srqc, cqn, in->cqn);
+}
+
+static void get_wq(void *wq, struct mlx5_srq_attr *in)
+{
+ if (MLX5_GET(wq, wq, wq_signature))
+ in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+ in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
+ in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
+ in->log_size = MLX5_GET(wq, wq, log_wq_sz);
+ in->page_offset = MLX5_GET(wq, wq, page_offset);
+ in->lwm = MLX5_GET(wq, wq, lwm);
+ in->pd = MLX5_GET(wq, wq, pd);
+ in->db_record = MLX5_GET64(wq, wq, dbr_addr);
+}
+
+static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+ if (MLX5_GET(srqc, srqc, wq_signature))
+ in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+ in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
+ in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
+ in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
+ in->page_offset = MLX5_GET(srqc, srqc, page_offset);
+ in->lwm = MLX5_GET(srqc, srqc, lwm);
+ in->pd = MLX5_GET(srqc, srqc, pd);
+ in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
+}
+
+struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
+{
+ struct mlx5_srq_table *table = &dev->srq_table;
+ struct mlx5_core_srq *srq;
+
+ xa_lock_irq(&table->array);
+ srq = xa_load(&table->array, srqn);
+ if (srq)
+ refcount_inc(&srq->common.refcount);
+ xa_unlock_irq(&table->array);
+
+ return srq;
+}
+
+static int __set_srq_page_size(struct mlx5_srq_attr *in,
+ unsigned long page_size)
+{
+ if (!page_size)
+ return -EINVAL;
+ in->log_page_size = order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT;
+
+ if (WARN_ON(get_pas_size(in) !=
+ ib_umem_num_dma_blocks(in->umem, page_size) * sizeof(u64)))
+ return -EINVAL;
+ return 0;
+}
+
+#define set_srq_page_size(in, typ, log_pgsz_fld) \
+ __set_srq_page_size(in, mlx5_umem_find_best_quantized_pgoff( \
+ (in)->umem, typ, log_pgsz_fld, \
+ MLX5_ADAPTER_PAGE_SHIFT, page_offset, \
+ 64, &(in)->page_offset))
+
+static int create_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
+ void *create_in;
+ void *srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ if (in->umem) {
+ err = set_srq_page_size(in, srqc, log_page_size);
+ if (err)
+ return err;
+ }
+
+ pas_size = get_pas_size(in);
+ inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
+ create_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!create_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_srq_in, create_in, uid, in->uid);
+ srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
+ pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
+
+ set_srqc(srqc, in);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
+
+ MLX5_SET(create_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_SRQ);
+
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
+ sizeof(create_out));
+ kvfree(create_in);
+ if (!err) {
+ srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
+ srq->uid = in->uid;
+ }
+
+ return err;
+}
+
+static int destroy_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_srq_in)] = {};
+
+ MLX5_SET(destroy_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, in, srqn, srq->srqn);
+ MLX5_SET(destroy_srq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, destroy_srq, in);
+}
+
+static int arm_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
+
+ MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
+ MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
+ MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
+}
+
+static int query_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_srq_in)] = {};
+ u32 *srq_out;
+ void *srqc;
+ int err;
+
+ srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL);
+ if (!srq_out)
+ return -ENOMEM;
+
+ MLX5_SET(query_srq_in, in, opcode, MLX5_CMD_OP_QUERY_SRQ);
+ MLX5_SET(query_srq_in, in, srqn, srq->srqn);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_srq, in, srq_out);
+ if (err)
+ goto out;
+
+ srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
+ get_srqc(srqc, out);
+ if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
+out:
+ kvfree(srq_out);
+ return err;
+}
+
+static int create_xrc_srq_cmd(struct mlx5_ib_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+ void *create_in;
+ void *xrc_srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ if (in->umem) {
+ err = set_srq_page_size(in, xrc_srqc, log_page_size);
+ if (err)
+ return err;
+ }
+
+ pas_size = get_pas_size(in);
+ inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
+ create_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!create_in)
+ return -ENOMEM;
+
+ MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid);
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
+ xrc_srq_context_entry);
+ pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
+
+ set_srqc(xrc_srqc, in);
+ MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
+ MLX5_SET(create_xrc_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+ memset(create_out, 0, sizeof(create_out));
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
+ sizeof(create_out));
+ if (err)
+ goto out;
+
+ srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+ srq->uid = in->uid;
+out:
+ kvfree(create_in);
+ return err;
+}
+
+static int destroy_xrc_srq_cmd(struct mlx5_ib_dev *dev,
+ struct mlx5_core_srq *srq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {};
+
+ MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ);
+ MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, srq->srqn);
+ MLX5_SET(destroy_xrc_srq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, destroy_xrc_srq, in);
+}
+
+static int arm_xrc_srq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {};
+
+ MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, op_mod,
+ MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, in, lwm, lwm);
+ MLX5_SET(arm_xrc_srq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, arm_xrc_srq, in);
+}
+
+static int query_xrc_srq_cmd(struct mlx5_ib_dev *dev,
+ struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {};
+ u32 *xrcsrq_out;
+ void *xrc_srqc;
+ int err;
+
+ xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL);
+ if (!xrcsrq_out)
+ return -ENOMEM;
+
+ MLX5_SET(query_xrc_srq_in, in, opcode, MLX5_CMD_OP_QUERY_XRC_SRQ);
+ MLX5_SET(query_xrc_srq_in, in, xrc_srqn, srq->srqn);
+
+ err = mlx5_cmd_exec_inout(dev->mdev, query_xrc_srq, in, xrcsrq_out);
+ if (err)
+ goto out;
+
+ xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
+ xrc_srq_context_entry);
+ get_srqc(xrc_srqc, out);
+ if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
+
+out:
+ kvfree(xrcsrq_out);
+ return err;
+}
+
+static int create_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ void *create_out = NULL;
+ void *create_in = NULL;
+ void *rmpc;
+ void *wq;
+ void *pas;
+ int pas_size;
+ int outlen;
+ int inlen;
+ int err;
+
+ if (in->umem) {
+ err = set_srq_page_size(in, wq, log_wq_pg_sz);
+ if (err)
+ return err;
+ }
+
+ pas_size = get_pas_size(in);
+ inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+ outlen = MLX5_ST_SZ_BYTES(create_rmp_out);
+ create_in = kvzalloc(inlen, GFP_KERNEL);
+ create_out = kvzalloc(outlen, GFP_KERNEL);
+ if (!create_in || !create_out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(create_rmp_in, create_in, uid, in->uid);
+ pas = MLX5_ADDR_OF(rmpc, rmpc, wq.pas);
+
+ set_wq(wq, in);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
+
+ MLX5_SET(create_rmp_in, create_in, opcode, MLX5_CMD_OP_CREATE_RMP);
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out, outlen);
+ if (!err) {
+ srq->srqn = MLX5_GET(create_rmp_out, create_out, rmpn);
+ srq->uid = in->uid;
+ }
+
+out:
+ kvfree(create_in);
+ kvfree(create_out);
+ return err;
+}
+
+static int destroy_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {};
+
+ MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+ MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(destroy_rmp_in, in, uid, srq->uid);
+ return mlx5_cmd_exec_in(dev->mdev, destroy_rmp, in);
+}
+
+static int arm_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm)
+{
+ void *out = NULL;
+ void *in = NULL;
+ void *rmpc;
+ void *wq;
+ void *bitmask;
+ int outlen;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rmp_in);
+ outlen = MLX5_ST_SZ_BYTES(modify_rmp_out);
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ out = kvzalloc(outlen, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx);
+ bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+ MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn);
+ MLX5_SET(modify_rmp_in, in, uid, srq->uid);
+ MLX5_SET(wq, wq, lwm, lwm);
+ MLX5_SET(rmp_bitmask, bitmask, lwm, 1);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
+
+ err = mlx5_cmd_exec_inout(dev->mdev, modify_rmp, in, out);
+
+out:
+ kvfree(in);
+ kvfree(out);
+ return err;
+}
+
+static int query_rmp_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out)
+{
+ u32 *rmp_out = NULL;
+ u32 *rmp_in = NULL;
+ void *rmpc;
+ int outlen;
+ int inlen;
+ int err;
+
+ outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+ inlen = MLX5_ST_SZ_BYTES(query_rmp_in);
+
+ rmp_out = kvzalloc(outlen, GFP_KERNEL);
+ rmp_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!rmp_out || !rmp_in) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(query_rmp_in, rmp_in, opcode, MLX5_CMD_OP_QUERY_RMP);
+ MLX5_SET(query_rmp_in, rmp_in, rmpn, srq->srqn);
+ err = mlx5_cmd_exec_inout(dev->mdev, query_rmp, rmp_in, rmp_out);
+ if (err)
+ goto out;
+
+ rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
+ get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
+ if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
+
+out:
+ kvfree(rmp_out);
+ kvfree(rmp_in);
+ return err;
+}
+
+static int create_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0};
+ void *create_in;
+ void *xrqc;
+ void *wq;
+ void *pas;
+ int pas_size;
+ int inlen;
+ int err;
+
+ if (in->umem) {
+ err = set_srq_page_size(in, wq, log_wq_pg_sz);
+ if (err)
+ return err;
+ }
+
+ pas_size = get_pas_size(in);
+ inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size;
+ create_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!create_in)
+ return -ENOMEM;
+
+ xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context);
+ wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
+ pas = MLX5_ADDR_OF(xrqc, xrqc, wq.pas);
+
+ set_wq(wq, in);
+ if (in->umem)
+ mlx5_ib_populate_pas(
+ in->umem,
+ 1UL << (in->log_page_size + MLX5_ADAPTER_PAGE_SHIFT),
+ pas, 0);
+ else
+ memcpy(pas, in->pas, pas_size);
+
+ if (in->type == IB_SRQT_TM) {
+ MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING);
+ if (in->flags & MLX5_SRQ_FLAG_RNDV)
+ MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV);
+ MLX5_SET(xrqc, xrqc,
+ tag_matching_topology_context.log_matching_list_sz,
+ in->tm_log_list_size);
+ }
+ MLX5_SET(xrqc, xrqc, user_index, in->user_index);
+ MLX5_SET(xrqc, xrqc, cqn, in->cqn);
+ MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ);
+ MLX5_SET(create_xrq_in, create_in, uid, in->uid);
+ err = mlx5_cmd_exec(dev->mdev, create_in, inlen, create_out,
+ sizeof(create_out));
+ kvfree(create_in);
+ if (!err) {
+ srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn);
+ srq->uid = in->uid;
+ }
+
+ return err;
+}
+
+static int destroy_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {};
+
+ MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ);
+ MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn);
+ MLX5_SET(destroy_xrq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, destroy_xrq, in);
+}
+
+static int arm_xrq_cmd(struct mlx5_ib_dev *dev,
+ struct mlx5_core_srq *srq,
+ u16 lwm)
+{
+ u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {};
+
+ MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ);
+ MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ);
+ MLX5_SET(arm_rq_in, in, srq_number, srq->srqn);
+ MLX5_SET(arm_rq_in, in, lwm, lwm);
+ MLX5_SET(arm_rq_in, in, uid, srq->uid);
+
+ return mlx5_cmd_exec_in(dev->mdev, arm_rq, in);
+}
+
+static int query_xrq_cmd(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {};
+ u32 *xrq_out;
+ int outlen = MLX5_ST_SZ_BYTES(query_xrq_out);
+ void *xrqc;
+ int err;
+
+ xrq_out = kvzalloc(outlen, GFP_KERNEL);
+ if (!xrq_out)
+ return -ENOMEM;
+
+ MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ);
+ MLX5_SET(query_xrq_in, in, xrqn, srq->srqn);
+
+ err = mlx5_cmd_exec_inout(dev->mdev, query_xrq, in, xrq_out);
+ if (err)
+ goto out;
+
+ xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context);
+ get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out);
+ if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
+ out->tm_next_tag =
+ MLX5_GET(xrqc, xrqc,
+ tag_matching_topology_context.append_next_index);
+ out->tm_hw_phase_cnt =
+ MLX5_GET(xrqc, xrqc,
+ tag_matching_topology_context.hw_phase_cnt);
+ out->tm_sw_phase_cnt =
+ MLX5_GET(xrqc, xrqc,
+ tag_matching_topology_context.sw_phase_cnt);
+
+out:
+ kvfree(xrq_out);
+ return err;
+}
+
+static int create_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ if (!dev->mdev->issi)
+ return create_srq_cmd(dev, srq, in);
+ switch (srq->common.res) {
+ case MLX5_RES_XSRQ:
+ return create_xrc_srq_cmd(dev, srq, in);
+ case MLX5_RES_XRQ:
+ return create_xrq_cmd(dev, srq, in);
+ default:
+ return create_rmp_cmd(dev, srq, in);
+ }
+}
+
+static int destroy_srq_split(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+{
+ if (!dev->mdev->issi)
+ return destroy_srq_cmd(dev, srq);
+ switch (srq->common.res) {
+ case MLX5_RES_XSRQ:
+ return destroy_xrc_srq_cmd(dev, srq);
+ case MLX5_RES_XRQ:
+ return destroy_xrq_cmd(dev, srq);
+ default:
+ return destroy_rmp_cmd(dev, srq);
+ }
+}
+
+int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *in)
+{
+ struct mlx5_srq_table *table = &dev->srq_table;
+ int err;
+
+ switch (in->type) {
+ case IB_SRQT_XRC:
+ srq->common.res = MLX5_RES_XSRQ;
+ break;
+ case IB_SRQT_TM:
+ srq->common.res = MLX5_RES_XRQ;
+ break;
+ default:
+ srq->common.res = MLX5_RES_SRQ;
+ }
+
+ err = create_srq_split(dev, srq, in);
+ if (err)
+ return err;
+
+ refcount_set(&srq->common.refcount, 1);
+ init_completion(&srq->common.free);
+
+ err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
+ if (err)
+ goto err_destroy_srq_split;
+
+ return 0;
+
+err_destroy_srq_split:
+ destroy_srq_split(dev, srq);
+
+ return err;
+}
+
+int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
+{
+ struct mlx5_srq_table *table = &dev->srq_table;
+ struct mlx5_core_srq *tmp;
+ int err;
+
+ /* Delete entry, but leave index occupied */
+ tmp = xa_cmpxchg_irq(&table->array, srq->srqn, srq, XA_ZERO_ENTRY, 0);
+ if (WARN_ON(tmp != srq))
+ return xa_err(tmp) ?: -EINVAL;
+
+ err = destroy_srq_split(dev, srq);
+ if (err) {
+ /*
+ * We don't need to check returned result for an error,
+ * because we are storing in pre-allocated space xarray
+ * entry and it can't fail at this stage.
+ */
+ xa_cmpxchg_irq(&table->array, srq->srqn, XA_ZERO_ENTRY, srq, 0);
+ return err;
+ }
+ xa_erase_irq(&table->array, srq->srqn);
+
+ mlx5_core_res_put(&srq->common);
+ wait_for_completion(&srq->common.free);
+ return 0;
+}
+
+int mlx5_cmd_query_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ struct mlx5_srq_attr *out)
+{
+ if (!dev->mdev->issi)
+ return query_srq_cmd(dev, srq, out);
+ switch (srq->common.res) {
+ case MLX5_RES_XSRQ:
+ return query_xrc_srq_cmd(dev, srq, out);
+ case MLX5_RES_XRQ:
+ return query_xrq_cmd(dev, srq, out);
+ default:
+ return query_rmp_cmd(dev, srq, out);
+ }
+}
+
+int mlx5_cmd_arm_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
+ u16 lwm, int is_srq)
+{
+ if (!dev->mdev->issi)
+ return arm_srq_cmd(dev, srq, lwm, is_srq);
+ switch (srq->common.res) {
+ case MLX5_RES_XSRQ:
+ return arm_xrc_srq_cmd(dev, srq, lwm);
+ case MLX5_RES_XRQ:
+ return arm_xrq_cmd(dev, srq, lwm);
+ default:
+ return arm_rmp_cmd(dev, srq, lwm);
+ }
+}
+
+static int srq_event_notifier(struct notifier_block *nb,
+ unsigned long type, void *data)
+{
+ struct mlx5_srq_table *table;
+ struct mlx5_core_srq *srq;
+ struct mlx5_eqe *eqe;
+ u32 srqn;
+
+ if (type != MLX5_EVENT_TYPE_SRQ_CATAS_ERROR &&
+ type != MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)
+ return NOTIFY_DONE;
+
+ table = container_of(nb, struct mlx5_srq_table, nb);
+
+ eqe = data;
+ srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+
+ xa_lock(&table->array);
+ srq = xa_load(&table->array, srqn);
+ if (srq)
+ refcount_inc(&srq->common.refcount);
+ xa_unlock(&table->array);
+
+ if (!srq)
+ return NOTIFY_OK;
+
+ srq->event(srq, eqe->type);
+
+ mlx5_core_res_put(&srq->common);
+
+ return NOTIFY_OK;
+}
+
+int mlx5_init_srq_table(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_srq_table *table = &dev->srq_table;
+
+ memset(table, 0, sizeof(*table));
+ xa_init_flags(&table->array, XA_FLAGS_LOCK_IRQ);
+
+ table->nb.notifier_call = srq_event_notifier;
+ mlx5_notifier_register(dev->mdev, &table->nb);
+
+ return 0;
+}
+
+void mlx5_cleanup_srq_table(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_srq_table *table = &dev->srq_table;
+
+ mlx5_notifier_unregister(dev->mdev, &table->nb);
+}
diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c
new file mode 100644
index 000000000..bbfcce3bd
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/std_types.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <rdma/uverbs_ioctl.h>
+#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_ib.h"
+
+#define UVERBS_MODULE_NAME mlx5_ib
+#include <rdma/uverbs_named_ioctl.h>
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_PD_QUERY)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_pd *pd =
+ uverbs_attr_get_obj(attrs, MLX5_IB_ATTR_QUERY_PD_HANDLE);
+ struct mlx5_ib_pd *mpd = to_mpd(pd);
+
+ return uverbs_copy_to(attrs, MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
+ &mpd->pdn, sizeof(mpd->pdn));
+}
+
+static int fill_vport_icm_addr(struct mlx5_core_dev *mdev, u16 vport,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
+ bool sw_owner_supp;
+ u64 icm_rx;
+ u64 icm_tx;
+ int err;
+
+ sw_owner_supp = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner) ||
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
+
+ if (vport == MLX5_VPORT_UPLINK) {
+ icm_rx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_rx);
+ icm_tx = MLX5_CAP64_ESW_FLOWTABLE(mdev,
+ sw_steering_uplink_icm_address_tx);
+ } else {
+ MLX5_SET(query_esw_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
+ MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
+ MLX5_SET(query_esw_vport_context_in, in, other_vport, true);
+
+ err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in,
+ out);
+
+ if (err)
+ return err;
+
+ icm_rx = MLX5_GET64(
+ query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_rx);
+
+ icm_tx = MLX5_GET64(
+ query_esw_vport_context_out, out,
+ esw_vport_context.sw_steering_vport_icm_address_tx);
+ }
+
+ if (sw_owner_supp && icm_rx) {
+ info->vport_steering_icm_rx = icm_rx;
+ info->flags |=
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX;
+ }
+
+ if (sw_owner_supp && icm_tx) {
+ info->vport_steering_icm_tx = icm_tx;
+ info->flags |=
+ MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX;
+ }
+
+ return 0;
+}
+
+static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ size_t out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ void *out;
+ int err;
+
+ out = kzalloc(out_sz, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, other_function, true);
+ MLX5_SET(query_hca_cap_in, in, function_id, vport);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
+ HCA_CAP_OPMOD_GET_CUR);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_sz);
+ if (err)
+ goto out;
+
+ info->vport_vhca_id = MLX5_GET(query_hca_cap_out, out,
+ capability.cmd_hca_cap.vhca_id);
+
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
+out:
+ kfree(out);
+ return err;
+}
+
+static int fill_switchdev_info(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_uapi_query_port *info)
+{
+ struct mlx5_eswitch_rep *rep;
+ struct mlx5_core_dev *mdev;
+ int err;
+
+ rep = dev->port[port_num - 1].rep;
+ if (!rep)
+ return -EOPNOTSUPP;
+
+ mdev = mlx5_eswitch_get_core_dev(rep->esw);
+ if (!mdev)
+ return -EINVAL;
+
+ info->vport = rep->vport;
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT;
+
+ if (rep->vport != MLX5_VPORT_UPLINK) {
+ err = fill_vport_vhca_id(mdev, rep->vport, info);
+ if (err)
+ return err;
+ }
+
+ info->esw_owner_vhca_id = MLX5_CAP_GEN(mdev, vhca_id);
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID;
+
+ err = fill_vport_icm_addr(mdev, rep->vport, info);
+ if (err)
+ return err;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(rep->esw)) {
+ info->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(
+ rep->esw, rep->vport);
+ info->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
+ info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0;
+ }
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_QUERY_PORT)(
+ struct uverbs_attr_bundle *attrs)
+{
+ struct mlx5_ib_uapi_query_port info = {};
+ struct mlx5_ib_ucontext *c;
+ struct mlx5_ib_dev *dev;
+ u32 port_num;
+ int ret;
+
+ if (uverbs_copy_from(&port_num, attrs,
+ MLX5_IB_ATTR_QUERY_PORT_PORT_NUM))
+ return -EFAULT;
+
+ c = to_mucontext(ib_uverbs_get_ucontext(attrs));
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+ dev = to_mdev(c->ibucontext.device);
+
+ if (!rdma_is_port_valid(&dev->ib_dev, port_num))
+ return -EINVAL;
+
+ if (mlx5_eswitch_mode(dev->mdev) == MLX5_ESWITCH_OFFLOADS) {
+ ret = fill_switchdev_info(dev, port_num, &info);
+ if (ret)
+ return ret;
+ }
+
+ return uverbs_copy_to_struct_or_zero(attrs, MLX5_IB_ATTR_QUERY_PORT, &info,
+ sizeof(info));
+}
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_QUERY_PORT,
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_QUERY_PORT_PORT_NUM,
+ UVERBS_ATTR_TYPE(u32), UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(
+ MLX5_IB_ATTR_QUERY_PORT,
+ UVERBS_ATTR_STRUCT(struct mlx5_ib_uapi_query_port,
+ reg_c0),
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(mlx5_ib_device,
+ UVERBS_OBJECT_DEVICE,
+ &UVERBS_METHOD(MLX5_IB_METHOD_QUERY_PORT));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_PD_QUERY,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_PD_HANDLE,
+ UVERBS_OBJECT_PD,
+ UVERBS_ACCESS_READ,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_PD_RESP_PDN,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(mlx5_ib_pd,
+ UVERBS_OBJECT_PD,
+ &UVERBS_METHOD(MLX5_IB_METHOD_PD_QUERY));
+
+const struct uapi_definition mlx5_ib_std_types_defs[] = {
+ UAPI_DEF_CHAIN_OBJ_TREE(
+ UVERBS_OBJECT_PD,
+ &mlx5_ib_pd),
+ UAPI_DEF_CHAIN_OBJ_TREE(
+ UVERBS_OBJECT_DEVICE,
+ &mlx5_ib_device),
+ {},
+};
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
new file mode 100644
index 000000000..cb5cee3de
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -0,0 +1,763 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <rdma/ib_umem_odp.h>
+#include "mlx5_ib.h"
+#include "umr.h"
+#include "wr.h"
+
+/*
+ * We can't use an array for xlt_emergency_page because dma_map_single doesn't
+ * work on kernel modules memory
+ */
+void *xlt_emergency_page;
+static DEFINE_MUTEX(xlt_emergency_page_mutex);
+
+static __be64 get_umr_enable_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_disable_mr_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_FREE;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_translation_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_access_mask(struct mlx5_ib_dev *dev)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW;
+
+ if (MLX5_CAP_GEN(dev->mdev, atomic))
+ result |= MLX5_MKEY_MASK_A;
+
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
+ result |= MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE;
+
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
+ result |= MLX5_MKEY_MASK_RELAXED_ORDERING_READ;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_pd_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_PD;
+
+ return cpu_to_be64(result);
+}
+
+static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
+{
+ if (mask & MLX5_MKEY_MASK_PAGE_SIZE &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
+ return -EPERM;
+
+ if (mask & MLX5_MKEY_MASK_A &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
+ return -EPERM;
+
+ if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
+ return -EPERM;
+
+ if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_READ &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
+ return -EPERM;
+
+ return 0;
+}
+
+enum {
+ MAX_UMR_WR = 128,
+};
+
+static int mlx5r_umr_qp_rst2rts(struct mlx5_ib_dev *dev, struct ib_qp *qp)
+{
+ struct ib_qp_attr attr = {};
+ int ret;
+
+ attr.qp_state = IB_QPS_INIT;
+ attr.port_num = 1;
+ ret = ib_modify_qp(qp, &attr,
+ IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT);
+ if (ret) {
+ mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
+ return ret;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ attr.qp_state = IB_QPS_RTR;
+
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
+ return ret;
+ }
+
+ memset(&attr, 0, sizeof(attr));
+ attr.qp_state = IB_QPS_RTS;
+ ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+ if (ret) {
+ mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
+{
+ struct ib_qp_init_attr init_attr = {};
+ struct ib_pd *pd;
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+ int ret;
+
+ pd = ib_alloc_pd(&dev->ib_dev, 0);
+ if (IS_ERR(pd)) {
+ mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
+ return PTR_ERR(pd);
+ }
+
+ cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
+ if (IS_ERR(cq)) {
+ mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
+ ret = PTR_ERR(cq);
+ goto destroy_pd;
+ }
+
+ init_attr.send_cq = cq;
+ init_attr.recv_cq = cq;
+ init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
+ init_attr.cap.max_send_wr = MAX_UMR_WR;
+ init_attr.cap.max_send_sge = 1;
+ init_attr.qp_type = MLX5_IB_QPT_REG_UMR;
+ init_attr.port_num = 1;
+ qp = ib_create_qp(pd, &init_attr);
+ if (IS_ERR(qp)) {
+ mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
+ ret = PTR_ERR(qp);
+ goto destroy_cq;
+ }
+
+ ret = mlx5r_umr_qp_rst2rts(dev, qp);
+ if (ret)
+ goto destroy_qp;
+
+ dev->umrc.qp = qp;
+ dev->umrc.cq = cq;
+ dev->umrc.pd = pd;
+
+ sema_init(&dev->umrc.sem, MAX_UMR_WR);
+ mutex_init(&dev->umrc.lock);
+ dev->umrc.state = MLX5_UMR_STATE_ACTIVE;
+
+ return 0;
+
+destroy_qp:
+ ib_destroy_qp(qp);
+destroy_cq:
+ ib_free_cq(cq);
+destroy_pd:
+ ib_dealloc_pd(pd);
+ return ret;
+}
+
+void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
+{
+ if (dev->umrc.state == MLX5_UMR_STATE_UNINIT)
+ return;
+ ib_destroy_qp(dev->umrc.qp);
+ ib_free_cq(dev->umrc.cq);
+ ib_dealloc_pd(dev->umrc.pd);
+}
+
+static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct ib_qp_attr attr;
+ int err;
+
+ attr.qp_state = IB_QPS_RESET;
+ err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
+ if (err) {
+ mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
+ goto err;
+ }
+
+ err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
+ if (err)
+ goto err;
+
+ umrc->state = MLX5_UMR_STATE_ACTIVE;
+ return 0;
+
+err:
+ umrc->state = MLX5_UMR_STATE_ERR;
+ return err;
+}
+
+static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
+ struct mlx5r_umr_wqe *wqe, bool with_data)
+{
+ unsigned int wqe_size =
+ with_data ? sizeof(struct mlx5r_umr_wqe) :
+ sizeof(struct mlx5r_umr_wqe) -
+ sizeof(struct mlx5_wqe_data_seg);
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ union {
+ struct ib_cqe *ib_cqe;
+ u64 wr_id;
+ } id;
+ void *cur_edge, *seg;
+ unsigned long flags;
+ unsigned int idx;
+ int size, err;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
+ return -EIO;
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ err = mlx5r_begin_wqe(qp, &seg, &ctrl, &idx, &size, &cur_edge, 0,
+ cpu_to_be32(mkey), false, false);
+ if (WARN_ON(err))
+ goto out;
+
+ qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
+
+ mlx5r_memcpy_send_wqe(&qp->sq, &cur_edge, &seg, &size, wqe, wqe_size);
+
+ id.ib_cqe = cqe;
+ mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0,
+ MLX5_FENCE_MODE_INITIATOR_SMALL, MLX5_OPCODE_UMR);
+
+ mlx5r_ring_db(qp, 1, ctrl);
+
+out:
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return err;
+}
+
+static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ struct mlx5_ib_umr_context *context =
+ container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
+
+ context->status = wc->status;
+ complete(&context->done);
+}
+
+static inline void mlx5r_umr_init_context(struct mlx5r_umr_context *context)
+{
+ context->cqe.done = mlx5r_umr_done;
+ init_completion(&context->done);
+}
+
+static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
+ struct mlx5r_umr_wqe *wqe, bool with_data)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct mlx5r_umr_context umr_context;
+ int err;
+
+ err = umr_check_mkey_mask(dev, be64_to_cpu(wqe->ctrl_seg.mkey_mask));
+ if (WARN_ON(err))
+ return err;
+
+ mlx5r_umr_init_context(&umr_context);
+
+ down(&umrc->sem);
+ while (true) {
+ mutex_lock(&umrc->lock);
+ if (umrc->state == MLX5_UMR_STATE_ERR) {
+ mutex_unlock(&umrc->lock);
+ err = -EFAULT;
+ break;
+ }
+
+ if (umrc->state == MLX5_UMR_STATE_RECOVER) {
+ mutex_unlock(&umrc->lock);
+ usleep_range(3000, 5000);
+ continue;
+ }
+
+ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
+ with_data);
+ mutex_unlock(&umrc->lock);
+ if (err) {
+ mlx5_ib_warn(dev, "UMR post send failed, err %d\n",
+ err);
+ break;
+ }
+
+ wait_for_completion(&umr_context.done);
+
+ if (umr_context.status == IB_WC_SUCCESS)
+ break;
+
+ if (umr_context.status == IB_WC_WR_FLUSH_ERR)
+ continue;
+
+ WARN_ON_ONCE(1);
+ mlx5_ib_warn(dev,
+ "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n",
+ umr_context.status);
+ mutex_lock(&umrc->lock);
+ err = mlx5r_umr_recover(dev);
+ mutex_unlock(&umrc->lock);
+ if (err)
+ mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
+ err);
+ err = -EFAULT;
+ break;
+ }
+ up(&umrc->sem);
+ return err;
+}
+
+/**
+ * mlx5r_umr_revoke_mr - Fence all DMA on the MR
+ * @mr: The MR to fence
+ *
+ * Upon return the NIC will not be doing any DMA to the pages under the MR,
+ * and any DMA in progress will be completed. Failure of this function
+ * indicates the HW has failed catastrophically.
+ */
+int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct mlx5r_umr_wqe wqe = {};
+
+ if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return 0;
+
+ wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
+ wqe.ctrl_seg.mkey_mask |= get_umr_disable_mr_mask();
+ wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
+
+ MLX5_SET(mkc, &wqe.mkey_seg, free, 1);
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(dev->umrc.pd)->pdn);
+ MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
+ MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
+ mlx5_mkey_variant(mr->mmkey.key));
+
+ return mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
+}
+
+static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
+ struct mlx5_mkey_seg *seg,
+ unsigned int access_flags)
+{
+ bool ro_read = (access_flags & IB_ACCESS_RELAXED_ORDERING) &&
+ pcie_relaxed_ordering_enabled(dev->mdev->pdev);
+
+ MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, seg, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, seg, lr, 1);
+ MLX5_SET(mkc, seg, relaxed_ordering_write,
+ !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+ MLX5_SET(mkc, seg, relaxed_ordering_read, ro_read);
+}
+
+int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+ int access_flags)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct mlx5r_umr_wqe wqe = {};
+ int err;
+
+ wqe.ctrl_seg.mkey_mask = get_umr_update_access_mask(dev);
+ wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
+ wqe.ctrl_seg.flags = MLX5_UMR_CHECK_FREE;
+ wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
+
+ mlx5r_umr_set_access_flags(dev, &wqe.mkey_seg, access_flags);
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
+ MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
+ mlx5_mkey_variant(mr->mmkey.key));
+
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
+ if (err)
+ return err;
+
+ mr->access_flags = access_flags;
+ return 0;
+}
+
+#define MLX5_MAX_UMR_CHUNK \
+ ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_MTT_ALIGNMENT)
+#define MLX5_SPARE_UMR_CHUNK 0x10000
+
+/*
+ * Allocate a temporary buffer to hold the per-page information to transfer to
+ * HW. For efficiency this should be as large as it can be, but buffer
+ * allocation failure is not allowed, so try smaller sizes.
+ */
+static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
+{
+ const size_t xlt_chunk_align = MLX5_UMR_MTT_ALIGNMENT / ent_size;
+ size_t size;
+ void *res = NULL;
+
+ static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
+
+ /*
+ * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
+ * allocation can't trigger any kind of reclaim.
+ */
+ might_sleep();
+
+ gfp_mask |= __GFP_ZERO | __GFP_NORETRY;
+
+ /*
+ * If the system already has a suitable high order page then just use
+ * that, but don't try hard to create one. This max is about 1M, so a
+ * free x86 huge page will satisfy it.
+ */
+ size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align),
+ MLX5_MAX_UMR_CHUNK);
+ *nents = size / ent_size;
+ res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
+ get_order(size));
+ if (res)
+ return res;
+
+ if (size > MLX5_SPARE_UMR_CHUNK) {
+ size = MLX5_SPARE_UMR_CHUNK;
+ *nents = size / ent_size;
+ res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
+ get_order(size));
+ if (res)
+ return res;
+ }
+
+ *nents = PAGE_SIZE / ent_size;
+ res = (void *)__get_free_page(gfp_mask);
+ if (res)
+ return res;
+
+ mutex_lock(&xlt_emergency_page_mutex);
+ memset(xlt_emergency_page, 0, PAGE_SIZE);
+ return xlt_emergency_page;
+}
+
+static void mlx5r_umr_free_xlt(void *xlt, size_t length)
+{
+ if (xlt == xlt_emergency_page) {
+ mutex_unlock(&xlt_emergency_page_mutex);
+ return;
+ }
+
+ free_pages((unsigned long)xlt, get_order(length));
+}
+
+static void mlx5r_umr_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt,
+ struct ib_sge *sg)
+{
+ struct device *ddev = &dev->mdev->pdev->dev;
+
+ dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE);
+ mlx5r_umr_free_xlt(xlt, sg->length);
+}
+
+/*
+ * Create an XLT buffer ready for submission.
+ */
+static void *mlx5r_umr_create_xlt(struct mlx5_ib_dev *dev, struct ib_sge *sg,
+ size_t nents, size_t ent_size,
+ unsigned int flags)
+{
+ struct device *ddev = &dev->mdev->pdev->dev;
+ dma_addr_t dma;
+ void *xlt;
+
+ xlt = mlx5r_umr_alloc_xlt(&nents, ent_size,
+ flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC :
+ GFP_KERNEL);
+ sg->length = nents * ent_size;
+ dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma)) {
+ mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
+ mlx5r_umr_free_xlt(xlt, sg->length);
+ return NULL;
+ }
+ sg->addr = dma;
+ sg->lkey = dev->umrc.pd->local_dma_lkey;
+
+ return xlt;
+}
+
+static void
+mlx5r_umr_set_update_xlt_ctrl_seg(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg,
+ unsigned int flags, struct ib_sge *sg)
+{
+ if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
+ /* fail if free */
+ ctrl_seg->flags = MLX5_UMR_CHECK_FREE;
+ else
+ /* fail if not free */
+ ctrl_seg->flags = MLX5_UMR_CHECK_NOT_FREE;
+ ctrl_seg->xlt_octowords =
+ cpu_to_be16(mlx5r_umr_get_xlt_octo(sg->length));
+}
+
+static void mlx5r_umr_set_update_xlt_mkey_seg(struct mlx5_ib_dev *dev,
+ struct mlx5_mkey_seg *mkey_seg,
+ struct mlx5_ib_mr *mr,
+ unsigned int page_shift)
+{
+ mlx5r_umr_set_access_flags(dev, mkey_seg, mr->access_flags);
+ MLX5_SET(mkc, mkey_seg, pd, to_mpd(mr->ibmr.pd)->pdn);
+ MLX5_SET64(mkc, mkey_seg, start_addr, mr->ibmr.iova);
+ MLX5_SET64(mkc, mkey_seg, len, mr->ibmr.length);
+ MLX5_SET(mkc, mkey_seg, log_page_size, page_shift);
+ MLX5_SET(mkc, mkey_seg, qpn, 0xffffff);
+ MLX5_SET(mkc, mkey_seg, mkey_7_0, mlx5_mkey_variant(mr->mmkey.key));
+}
+
+static void
+mlx5r_umr_set_update_xlt_data_seg(struct mlx5_wqe_data_seg *data_seg,
+ struct ib_sge *sg)
+{
+ data_seg->byte_count = cpu_to_be32(sg->length);
+ data_seg->lkey = cpu_to_be32(sg->lkey);
+ data_seg->addr = cpu_to_be64(sg->addr);
+}
+
+static void mlx5r_umr_update_offset(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg,
+ u64 offset)
+{
+ u64 octo_offset = mlx5r_umr_get_xlt_octo(offset);
+
+ ctrl_seg->xlt_offset = cpu_to_be16(octo_offset & 0xffff);
+ ctrl_seg->xlt_offset_47_16 = cpu_to_be32(octo_offset >> 16);
+ ctrl_seg->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
+}
+
+static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
+ struct mlx5r_umr_wqe *wqe,
+ struct mlx5_ib_mr *mr, struct ib_sge *sg,
+ unsigned int flags)
+{
+ bool update_pd_access, update_translation;
+
+ if (flags & MLX5_IB_UPD_XLT_ENABLE)
+ wqe->ctrl_seg.mkey_mask |= get_umr_enable_mr_mask();
+
+ update_pd_access = flags & MLX5_IB_UPD_XLT_ENABLE ||
+ flags & MLX5_IB_UPD_XLT_PD ||
+ flags & MLX5_IB_UPD_XLT_ACCESS;
+
+ if (update_pd_access) {
+ wqe->ctrl_seg.mkey_mask |= get_umr_update_access_mask(dev);
+ wqe->ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
+ }
+
+ update_translation =
+ flags & MLX5_IB_UPD_XLT_ENABLE || flags & MLX5_IB_UPD_XLT_ADDR;
+
+ if (update_translation) {
+ wqe->ctrl_seg.mkey_mask |= get_umr_update_translation_mask();
+ if (!mr->ibmr.length)
+ MLX5_SET(mkc, &wqe->mkey_seg, length64, 1);
+ }
+
+ wqe->ctrl_seg.xlt_octowords =
+ cpu_to_be16(mlx5r_umr_get_xlt_octo(sg->length));
+ wqe->data_seg.byte_count = cpu_to_be32(sg->length);
+}
+
+/*
+ * Send the DMA list to the HW for a normal MR using UMR.
+ * Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
+ * flag may be used.
+ */
+int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct device *ddev = &dev->mdev->pdev->dev;
+ struct mlx5r_umr_wqe wqe = {};
+ struct ib_block_iter biter;
+ struct mlx5_mtt *cur_mtt;
+ size_t orig_sg_length;
+ struct mlx5_mtt *mtt;
+ size_t final_size;
+ struct ib_sge sg;
+ u64 offset = 0;
+ int err = 0;
+
+ if (WARN_ON(mr->umem->is_odp))
+ return -EINVAL;
+
+ mtt = mlx5r_umr_create_xlt(
+ dev, &sg, ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift),
+ sizeof(*mtt), flags);
+ if (!mtt)
+ return -ENOMEM;
+
+ orig_sg_length = sg.length;
+
+ mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg);
+ mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr,
+ mr->page_shift);
+ mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
+
+ cur_mtt = mtt;
+ rdma_for_each_block(mr->umem->sgt_append.sgt.sgl, &biter,
+ mr->umem->sgt_append.sgt.nents,
+ BIT(mr->page_shift)) {
+ if (cur_mtt == (void *)mtt + sg.length) {
+ dma_sync_single_for_device(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
+
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe,
+ true);
+ if (err)
+ goto err;
+ dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
+ offset += sg.length;
+ mlx5r_umr_update_offset(&wqe.ctrl_seg, offset);
+
+ cur_mtt = mtt;
+ }
+
+ cur_mtt->ptag =
+ cpu_to_be64(rdma_block_iter_dma_address(&biter) |
+ MLX5_IB_MTT_PRESENT);
+
+ if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
+ cur_mtt->ptag = 0;
+
+ cur_mtt++;
+ }
+
+ final_size = (void *)cur_mtt - (void *)mtt;
+ sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT);
+ memset(cur_mtt, 0, sg.length - final_size);
+ mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
+
+ dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true);
+
+err:
+ sg.length = orig_sg_length;
+ mlx5r_umr_unmap_free_xlt(dev, mtt, &sg);
+ return err;
+}
+
+static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
+{
+ return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
+}
+
+int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
+ int page_shift, int flags)
+{
+ int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
+ ? sizeof(struct mlx5_klm)
+ : sizeof(struct mlx5_mtt);
+ const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct device *ddev = &dev->mdev->pdev->dev;
+ const int page_mask = page_align - 1;
+ struct mlx5r_umr_wqe wqe = {};
+ size_t pages_mapped = 0;
+ size_t pages_to_map = 0;
+ size_t size_to_map = 0;
+ size_t orig_sg_length;
+ size_t pages_iter;
+ struct ib_sge sg;
+ int err = 0;
+ void *xlt;
+
+ if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
+ !umr_can_use_indirect_mkey(dev))
+ return -EPERM;
+
+ if (WARN_ON(!mr->umem->is_odp))
+ return -EINVAL;
+
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
+ * so we need to align the offset and length accordingly
+ */
+ if (idx & page_mask) {
+ npages += idx & page_mask;
+ idx &= ~page_mask;
+ }
+ pages_to_map = ALIGN(npages, page_align);
+
+ xlt = mlx5r_umr_create_xlt(dev, &sg, npages, desc_size, flags);
+ if (!xlt)
+ return -ENOMEM;
+
+ pages_iter = sg.length / desc_size;
+ orig_sg_length = sg.length;
+
+ if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
+ struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
+ size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
+
+ pages_to_map = min_t(size_t, pages_to_map, max_pages);
+ }
+
+ mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg);
+ mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr, page_shift);
+ mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
+
+ for (pages_mapped = 0;
+ pages_mapped < pages_to_map && !err;
+ pages_mapped += pages_iter, idx += pages_iter) {
+ npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
+ size_to_map = npages * desc_size;
+ dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
+ mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
+ dma_sync_single_for_device(ddev, sg.addr, sg.length,
+ DMA_TO_DEVICE);
+ sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
+
+ if (pages_mapped + pages_iter >= pages_to_map)
+ mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
+ mlx5r_umr_update_offset(&wqe.ctrl_seg, idx * desc_size);
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true);
+ }
+ sg.length = orig_sg_length;
+ mlx5r_umr_unmap_free_xlt(dev, xlt, &sg);
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/umr.h b/drivers/infiniband/hw/mlx5/umr.h
new file mode 100644
index 000000000..c9d002138
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/umr.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef _MLX5_IB_UMR_H
+#define _MLX5_IB_UMR_H
+
+#include "mlx5_ib.h"
+
+
+#define MLX5_MAX_UMR_SHIFT 16
+#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
+
+#define MLX5_IB_UMR_OCTOWORD 16
+#define MLX5_IB_UMR_XLT_ALIGNMENT 64
+
+int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev);
+void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev);
+
+static inline bool mlx5r_umr_can_load_pas(struct mlx5_ib_dev *dev,
+ size_t length)
+{
+ /*
+ * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
+ * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
+ * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
+ * can never be enabled without this capability. Simplify this weird
+ * quirky hardware by just saying it can't use PAS lists with UMR at
+ * all.
+ */
+ if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
+ return false;
+
+ /*
+ * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
+ * used.
+ */
+ if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
+ length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
+ return false;
+ return true;
+}
+
+/*
+ * true if an existing MR can be reconfigured to new access_flags using UMR.
+ * Older HW cannot use UMR to update certain elements of the MKC. See
+ * get_umr_update_access_mask() and umr_check_mkey_mask()
+ */
+static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev,
+ unsigned int current_access_flags,
+ unsigned int target_access_flags)
+{
+ unsigned int diffs = current_access_flags ^ target_access_flags;
+
+ if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
+ MLX5_CAP_GEN(dev->mdev, atomic) &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
+ return false;
+
+ if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
+ MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
+ return false;
+
+ if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
+ MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
+ return false;
+
+ return true;
+}
+
+static inline u64 mlx5r_umr_get_xlt_octo(u64 bytes)
+{
+ return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
+ MLX5_IB_UMR_OCTOWORD;
+}
+
+struct mlx5r_umr_context {
+ struct ib_cqe cqe;
+ enum ib_wc_status status;
+ struct completion done;
+};
+
+struct mlx5r_umr_wqe {
+ struct mlx5_wqe_umr_ctrl_seg ctrl_seg;
+ struct mlx5_mkey_seg mkey_seg;
+ struct mlx5_wqe_data_seg data_seg;
+};
+
+int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
+int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+ int access_flags);
+int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
+int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
+ int page_shift, int flags);
+
+#endif /* _MLX5_IB_UMR_H */
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
new file mode 100644
index 000000000..855f3f4fe
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -0,0 +1,1284 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#include <linux/gfp.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/driver.h>
+#include "wr.h"
+#include "umr.h"
+
+static const u32 mlx5_ib_opcode[] = {
+ [IB_WR_SEND] = MLX5_OPCODE_SEND,
+ [IB_WR_LSO] = MLX5_OPCODE_LSO,
+ [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
+ [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
+ [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
+ [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
+ [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
+ [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
+ [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
+ [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
+ [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
+ [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
+ [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
+ [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
+};
+
+int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
+{
+ struct mlx5_ib_cq *cq;
+ unsigned int cur;
+
+ cur = wq->head - wq->tail;
+ if (likely(cur + nreq < wq->max_post))
+ return 0;
+
+ cq = to_mcq(ib_cq);
+ spin_lock(&cq->lock);
+ cur = wq->head - wq->tail;
+ spin_unlock(&cq->lock);
+
+ return cur + nreq >= wq->max_post;
+}
+
+static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
+ u64 remote_addr, u32 rkey)
+{
+ rseg->raddr = cpu_to_be64(remote_addr);
+ rseg->rkey = cpu_to_be32(rkey);
+ rseg->reserved = 0;
+}
+
+static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
+ void **seg, int *size, void **cur_edge)
+{
+ struct mlx5_wqe_eth_seg *eseg = *seg;
+
+ memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
+
+ if (wr->send_flags & IB_SEND_IP_CSUM)
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+
+ if (wr->opcode == IB_WR_LSO) {
+ struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
+ size_t left, copysz;
+ void *pdata = ud_wr->header;
+ size_t stride;
+
+ left = ud_wr->hlen;
+ eseg->mss = cpu_to_be16(ud_wr->mss);
+ eseg->inline_hdr.sz = cpu_to_be16(left);
+
+ /* mlx5r_memcpy_send_wqe should get a 16B align address. Hence,
+ * we first copy up to the current edge and then, if needed,
+ * continue to mlx5r_memcpy_send_wqe.
+ */
+ copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
+ left);
+ memcpy(eseg->inline_hdr.start, pdata, copysz);
+ stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
+ sizeof(eseg->inline_hdr.start) + copysz, 16);
+ *size += stride / 16;
+ *seg += stride;
+
+ if (copysz < left) {
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ left -= copysz;
+ pdata += copysz;
+ mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size,
+ pdata, left);
+ }
+
+ return;
+ }
+
+ *seg += sizeof(struct mlx5_wqe_eth_seg);
+ *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
+}
+
+static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
+ const struct ib_send_wr *wr)
+{
+ memcpy(&dseg->av, &to_mah(ud_wr(wr)->ah)->av, sizeof(struct mlx5_av));
+ dseg->av.dqp_dct =
+ cpu_to_be32(ud_wr(wr)->remote_qpn | MLX5_EXTENDED_UD_AV);
+ dseg->av.key.qkey.qkey = cpu_to_be32(ud_wr(wr)->remote_qkey);
+}
+
+static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
+{
+ dseg->byte_count = cpu_to_be32(sg->length);
+ dseg->lkey = cpu_to_be32(sg->lkey);
+ dseg->addr = cpu_to_be64(sg->addr);
+}
+
+static __be64 frwr_mkey_mask(bool atomic)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE;
+
+ if (atomic)
+ result |= MLX5_MKEY_MASK_A;
+
+ return cpu_to_be64(result);
+}
+
+static __be64 sig_mkey_mask(void)
+{
+ u64 result;
+
+ result = MLX5_MKEY_MASK_LEN |
+ MLX5_MKEY_MASK_PAGE_SIZE |
+ MLX5_MKEY_MASK_START_ADDR |
+ MLX5_MKEY_MASK_EN_SIGERR |
+ MLX5_MKEY_MASK_EN_RINVAL |
+ MLX5_MKEY_MASK_KEY |
+ MLX5_MKEY_MASK_LR |
+ MLX5_MKEY_MASK_LW |
+ MLX5_MKEY_MASK_RR |
+ MLX5_MKEY_MASK_RW |
+ MLX5_MKEY_MASK_SMALL_FENCE |
+ MLX5_MKEY_MASK_FREE |
+ MLX5_MKEY_MASK_BSF_EN;
+
+ return cpu_to_be64(result);
+}
+
+static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
+ struct mlx5_ib_mr *mr, u8 flags, bool atomic)
+{
+ int size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
+
+ memset(umr, 0, sizeof(*umr));
+
+ umr->flags = flags;
+ umr->xlt_octowords = cpu_to_be16(mlx5r_umr_get_xlt_octo(size));
+ umr->mkey_mask = frwr_mkey_mask(atomic);
+}
+
+static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
+{
+ memset(umr, 0, sizeof(*umr));
+ umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+ umr->flags = MLX5_UMR_INLINE;
+}
+
+static u8 get_umr_flags(int acc)
+{
+ return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
+ (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
+ (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
+ (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
+ MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
+}
+
+static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
+ struct mlx5_ib_mr *mr,
+ u32 key, int access)
+{
+ int ndescs = ALIGN(mr->mmkey.ndescs + mr->meta_ndescs, 8) >> 1;
+
+ memset(seg, 0, sizeof(*seg));
+
+ if (mr->access_mode == MLX5_MKC_ACCESS_MODE_MTT)
+ seg->log2_page_size = ilog2(mr->ibmr.page_size);
+ else if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
+ /* KLMs take twice the size of MTTs */
+ ndescs *= 2;
+
+ seg->flags = get_umr_flags(access) | mr->access_mode;
+ seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
+ seg->start_addr = cpu_to_be64(mr->ibmr.iova);
+ seg->len = cpu_to_be64(mr->ibmr.length);
+ seg->xlt_oct_size = cpu_to_be32(ndescs);
+}
+
+static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
+{
+ memset(seg, 0, sizeof(*seg));
+ seg->status = MLX5_MKEY_STATUS_FREE;
+}
+
+static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
+ struct mlx5_ib_mr *mr,
+ struct mlx5_ib_pd *pd)
+{
+ int bcount = mr->desc_size * (mr->mmkey.ndescs + mr->meta_ndescs);
+
+ dseg->addr = cpu_to_be64(mr->desc_map);
+ dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
+ dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
+}
+
+static __be32 send_ieth(const struct ib_send_wr *wr)
+{
+ switch (wr->opcode) {
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ return wr->ex.imm_data;
+
+ case IB_WR_SEND_WITH_INV:
+ return cpu_to_be32(wr->ex.invalidate_rkey);
+
+ default:
+ return 0;
+ }
+}
+
+static u8 calc_sig(void *wqe, int size)
+{
+ u8 *p = wqe;
+ u8 res = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ res ^= p[i];
+
+ return ~res;
+}
+
+static u8 wq_sig(void *wqe)
+{
+ return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
+}
+
+static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ void **wqe, int *wqe_sz, void **cur_edge)
+{
+ struct mlx5_wqe_inline_seg *seg;
+ size_t offset;
+ int inl = 0;
+ int i;
+
+ seg = *wqe;
+ *wqe += sizeof(*seg);
+ offset = sizeof(*seg);
+
+ for (i = 0; i < wr->num_sge; i++) {
+ size_t len = wr->sg_list[i].length;
+ void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
+
+ inl += len;
+
+ if (unlikely(inl > qp->max_inline_data))
+ return -ENOMEM;
+
+ while (likely(len)) {
+ size_t leftlen;
+ size_t copysz;
+
+ handle_post_send_edge(&qp->sq, wqe,
+ *wqe_sz + (offset >> 4),
+ cur_edge);
+
+ leftlen = *cur_edge - *wqe;
+ copysz = min_t(size_t, leftlen, len);
+
+ memcpy(*wqe, addr, copysz);
+ len -= copysz;
+ addr += copysz;
+ *wqe += copysz;
+ offset += copysz;
+ }
+ }
+
+ seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
+
+ *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
+
+ return 0;
+}
+
+static u16 prot_field_size(enum ib_signature_type type)
+{
+ switch (type) {
+ case IB_SIG_TYPE_T10_DIF:
+ return MLX5_DIF_SIZE;
+ default:
+ return 0;
+ }
+}
+
+static u8 bs_selector(int block_size)
+{
+ switch (block_size) {
+ case 512: return 0x1;
+ case 520: return 0x2;
+ case 4096: return 0x3;
+ case 4160: return 0x4;
+ case 1073741824: return 0x5;
+ default: return 0;
+ }
+}
+
+static void mlx5_fill_inl_bsf(struct ib_sig_domain *domain,
+ struct mlx5_bsf_inl *inl)
+{
+ /* Valid inline section and allow BSF refresh */
+ inl->vld_refresh = cpu_to_be16(MLX5_BSF_INL_VALID |
+ MLX5_BSF_REFRESH_DIF);
+ inl->dif_apptag = cpu_to_be16(domain->sig.dif.app_tag);
+ inl->dif_reftag = cpu_to_be32(domain->sig.dif.ref_tag);
+ /* repeating block */
+ inl->rp_inv_seed = MLX5_BSF_REPEAT_BLOCK;
+ inl->sig_type = domain->sig.dif.bg_type == IB_T10DIF_CRC ?
+ MLX5_DIF_CRC : MLX5_DIF_IPCS;
+
+ if (domain->sig.dif.ref_remap)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_INC_REFTAG;
+
+ if (domain->sig.dif.app_escape) {
+ if (domain->sig.dif.ref_escape)
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPREF_ESCAPE;
+ else
+ inl->dif_inc_ref_guard_check |= MLX5_BSF_APPTAG_ESCAPE;
+ }
+
+ inl->dif_app_bitmask_check =
+ cpu_to_be16(domain->sig.dif.apptag_check_mask);
+}
+
+static int mlx5_set_bsf(struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_bsf *bsf, u32 data_size)
+{
+ struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
+ struct mlx5_bsf_basic *basic = &bsf->basic;
+ struct ib_sig_domain *mem = &sig_attrs->mem;
+ struct ib_sig_domain *wire = &sig_attrs->wire;
+
+ memset(bsf, 0, sizeof(*bsf));
+
+ /* Basic + Extended + Inline */
+ basic->bsf_size_sbs = 1 << 7;
+ /* Input domain check byte mask */
+ basic->check_byte_mask = sig_attrs->check_mask;
+ basic->raw_data_size = cpu_to_be32(data_size);
+
+ /* Memory domain */
+ switch (sig_attrs->mem.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
+ basic->m_bfs_psv = cpu_to_be32(msig->psv_memory.psv_idx);
+ mlx5_fill_inl_bsf(mem, &bsf->m_inl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Wire domain */
+ switch (sig_attrs->wire.sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
+ mem->sig_type == wire->sig_type) {
+ /* Same block structure */
+ basic->bsf_size_sbs |= 1 << 4;
+ if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
+ basic->wire.copy_byte_mask |= MLX5_CPY_GRD_MASK;
+ if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
+ basic->wire.copy_byte_mask |= MLX5_CPY_APP_MASK;
+ if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
+ basic->wire.copy_byte_mask |= MLX5_CPY_REF_MASK;
+ } else
+ basic->wire.bs_selector =
+ bs_selector(wire->sig.dif.pi_interval);
+
+ basic->w_bfs_psv = cpu_to_be32(msig->psv_wire.psv_idx);
+ mlx5_fill_inl_bsf(wire, &bsf->w_inl);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int set_sig_data_segment(const struct ib_send_wr *send_wr,
+ struct ib_mr *sig_mr,
+ struct ib_sig_attrs *sig_attrs,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ struct mlx5_bsf *bsf;
+ u32 data_len;
+ u32 data_key;
+ u64 data_va;
+ u32 prot_len = 0;
+ u32 prot_key = 0;
+ u64 prot_va = 0;
+ bool prot = false;
+ int ret;
+ int wqe_size;
+ struct mlx5_ib_mr *mr = to_mmr(sig_mr);
+ struct mlx5_ib_mr *pi_mr = mr->pi_mr;
+
+ data_len = pi_mr->data_length;
+ data_key = pi_mr->ibmr.lkey;
+ data_va = pi_mr->data_iova;
+ if (pi_mr->meta_ndescs) {
+ prot_len = pi_mr->meta_length;
+ prot_key = pi_mr->ibmr.lkey;
+ prot_va = pi_mr->pi_iova;
+ prot = true;
+ }
+
+ if (!prot || (data_key == prot_key && data_va == prot_va &&
+ data_len == prot_len)) {
+ /**
+ * Source domain doesn't contain signature information
+ * or data and protection are interleaved in memory.
+ * So need construct:
+ * ------------------
+ * | data_klm |
+ * ------------------
+ * | BSF |
+ * ------------------
+ **/
+ struct mlx5_klm *data_klm = *seg;
+
+ data_klm->bcount = cpu_to_be32(data_len);
+ data_klm->key = cpu_to_be32(data_key);
+ data_klm->va = cpu_to_be64(data_va);
+ wqe_size = ALIGN(sizeof(*data_klm), 64);
+ } else {
+ /**
+ * Source domain contains signature information
+ * So need construct a strided block format:
+ * ---------------------------
+ * | stride_block_ctrl |
+ * ---------------------------
+ * | data_klm |
+ * ---------------------------
+ * | prot_klm |
+ * ---------------------------
+ * | BSF |
+ * ---------------------------
+ **/
+ struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
+ struct mlx5_stride_block_entry *data_sentry;
+ struct mlx5_stride_block_entry *prot_sentry;
+ u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
+ int prot_size;
+
+ sblock_ctrl = *seg;
+ data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
+ prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
+
+ prot_size = prot_field_size(sig_attrs->mem.sig_type);
+ if (!prot_size) {
+ pr_err("Bad block size given: %u\n", block_size);
+ return -EINVAL;
+ }
+ sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
+ prot_size);
+ sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
+ sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
+ sblock_ctrl->num_entries = cpu_to_be16(2);
+
+ data_sentry->bcount = cpu_to_be16(block_size);
+ data_sentry->key = cpu_to_be32(data_key);
+ data_sentry->va = cpu_to_be64(data_va);
+ data_sentry->stride = cpu_to_be16(block_size);
+
+ prot_sentry->bcount = cpu_to_be16(prot_size);
+ prot_sentry->key = cpu_to_be32(prot_key);
+ prot_sentry->va = cpu_to_be64(prot_va);
+ prot_sentry->stride = cpu_to_be16(prot_size);
+
+ wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
+ sizeof(*prot_sentry), 64);
+ }
+
+ *seg += wqe_size;
+ *size += wqe_size / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ bsf = *seg;
+ ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
+ if (ret)
+ return -EINVAL;
+
+ *seg += sizeof(*bsf);
+ *size += sizeof(*bsf) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ return 0;
+}
+
+static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
+ struct ib_mr *sig_mr, int access_flags,
+ u32 size, u32 length, u32 pdn)
+{
+ u32 sig_key = sig_mr->rkey;
+ u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
+
+ memset(seg, 0, sizeof(*seg));
+
+ seg->flags = get_umr_flags(access_flags) | MLX5_MKC_ACCESS_MODE_KLMS;
+ seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
+ seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
+ MLX5_MKEY_BSF_EN | pdn);
+ seg->len = cpu_to_be64(length);
+ seg->xlt_oct_size = cpu_to_be32(mlx5r_umr_get_xlt_octo(size));
+ seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
+}
+
+static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
+ u32 size)
+{
+ memset(umr, 0, sizeof(*umr));
+
+ umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
+ umr->xlt_octowords = cpu_to_be16(mlx5r_umr_get_xlt_octo(size));
+ umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
+ umr->mkey_mask = sig_mkey_mask();
+}
+
+static int set_pi_umr_wr(const struct ib_send_wr *send_wr,
+ struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ const struct ib_reg_wr *wr = reg_wr(send_wr);
+ struct mlx5_ib_mr *sig_mr = to_mmr(wr->mr);
+ struct mlx5_ib_mr *pi_mr = sig_mr->pi_mr;
+ struct ib_sig_attrs *sig_attrs = sig_mr->ibmr.sig_attrs;
+ u32 pdn = to_mpd(qp->ibqp.pd)->pdn;
+ u32 xlt_size;
+ int region_len, ret;
+
+ if (unlikely(send_wr->num_sge != 0) ||
+ unlikely(wr->access & IB_ACCESS_REMOTE_ATOMIC) ||
+ unlikely(!sig_mr->sig) || unlikely(!qp->ibqp.integrity_en) ||
+ unlikely(!sig_mr->sig->sig_status_checked))
+ return -EINVAL;
+
+ /* length of the protected region, data + protection */
+ region_len = pi_mr->ibmr.length;
+
+ /**
+ * KLM octoword size - if protection was provided
+ * then we use strided block format (3 octowords),
+ * else we use single KLM (1 octoword)
+ **/
+ if (sig_attrs->mem.sig_type != IB_SIG_TYPE_NONE)
+ xlt_size = 0x30;
+ else
+ xlt_size = sizeof(struct mlx5_klm);
+
+ set_sig_umr_segment(*seg, xlt_size);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ set_sig_mkey_segment(*seg, wr->mr, wr->access, xlt_size, region_len,
+ pdn);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ ret = set_sig_data_segment(send_wr, wr->mr, sig_attrs, qp, seg, size,
+ cur_edge);
+ if (ret)
+ return ret;
+
+ sig_mr->sig->sig_status_checked = false;
+ return 0;
+}
+
+static int set_psv_wr(struct ib_sig_domain *domain,
+ u32 psv_idx, void **seg, int *size)
+{
+ struct mlx5_seg_set_psv *psv_seg = *seg;
+
+ memset(psv_seg, 0, sizeof(*psv_seg));
+ psv_seg->psv_num = cpu_to_be32(psv_idx);
+ switch (domain->sig_type) {
+ case IB_SIG_TYPE_NONE:
+ break;
+ case IB_SIG_TYPE_T10_DIF:
+ psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
+ domain->sig.dif.app_tag);
+ psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
+ break;
+ default:
+ pr_err("Bad signature type (%d) is given.\n",
+ domain->sig_type);
+ return -EINVAL;
+ }
+
+ *seg += sizeof(*psv_seg);
+ *size += sizeof(*psv_seg) / 16;
+
+ return 0;
+}
+
+static int set_reg_wr(struct mlx5_ib_qp *qp,
+ const struct ib_reg_wr *wr,
+ void **seg, int *size, void **cur_edge,
+ bool check_not_free)
+{
+ struct mlx5_ib_mr *mr = to_mmr(wr->mr);
+ struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
+ struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
+ int mr_list_size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
+ bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
+ bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
+ u8 flags = 0;
+
+ /* Matches access in mlx5_set_umr_free_mkey().
+ * Relaxed Ordering is set implicitly in mlx5_set_umr_free_mkey() and
+ * kernel ULPs are not aware of it, so we don't set it here.
+ */
+ if (!mlx5r_umr_can_reconfig(dev, 0, wr->access)) {
+ mlx5_ib_warn(
+ to_mdev(qp->ibqp.device),
+ "Fast update for MR access flags is not possible\n");
+ return -EINVAL;
+ }
+
+ if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
+ mlx5_ib_warn(to_mdev(qp->ibqp.device),
+ "Invalid IB_SEND_INLINE send flag\n");
+ return -EINVAL;
+ }
+
+ if (check_not_free)
+ flags |= MLX5_UMR_CHECK_NOT_FREE;
+ if (umr_inline)
+ flags |= MLX5_UMR_INLINE;
+
+ set_reg_umr_seg(*seg, mr, flags, atomic);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ if (umr_inline) {
+ mlx5r_memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
+ mr_list_size);
+ *size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
+ } else {
+ set_reg_data_seg(*seg, mr, pd);
+ *seg += sizeof(struct mlx5_wqe_data_seg);
+ *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
+ }
+ return 0;
+}
+
+static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
+ void **cur_edge)
+{
+ set_linv_umr_seg(*seg);
+ *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
+ *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ set_linv_mkey_seg(*seg);
+ *seg += sizeof(struct mlx5_mkey_seg);
+ *size += sizeof(struct mlx5_mkey_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+}
+
+static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
+{
+ __be32 *p = NULL;
+ int i, j;
+
+ pr_debug("dump WQE index %u:\n", idx);
+ for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
+ if ((i & 0xf) == 0) {
+ p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx);
+ pr_debug("WQBB at %p:\n", (void *)p);
+ j = 0;
+ idx = (idx + 1) & (qp->sq.wqe_cnt - 1);
+ }
+ pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
+ be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
+ be32_to_cpu(p[j + 3]));
+ }
+}
+
+int mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl, unsigned int *idx,
+ int *size, void **cur_edge, int nreq, __be32 general_id,
+ bool send_signaled, bool solicited)
+{
+ if (unlikely(mlx5r_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
+ return -ENOMEM;
+
+ *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
+ *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
+ *ctrl = *seg;
+ *(uint32_t *)(*seg + 8) = 0;
+ (*ctrl)->general_id = general_id;
+ (*ctrl)->fm_ce_se = qp->sq_signal_bits |
+ (send_signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0) |
+ (solicited ? MLX5_WQE_CTRL_SOLICITED : 0);
+
+ *seg += sizeof(**ctrl);
+ *size = sizeof(**ctrl) / 16;
+ *cur_edge = qp->sq.cur_edge;
+
+ return 0;
+}
+
+static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl,
+ const struct ib_send_wr *wr, unsigned int *idx, int *size,
+ void **cur_edge, int nreq)
+{
+ return mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq,
+ send_ieth(wr), wr->send_flags & IB_SEND_SIGNALED,
+ wr->send_flags & IB_SEND_SOLICITED);
+}
+
+void mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl,
+ void *seg, u8 size, void *cur_edge, unsigned int idx,
+ u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode)
+{
+ u8 opmod = 0;
+
+ ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
+ mlx5_opcode | ((u32)opmod << 24));
+ ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
+ ctrl->fm_ce_se |= fence;
+ if (unlikely(qp->flags_en & MLX5_QP_FLAG_SIGNATURE))
+ ctrl->signature = wq_sig(ctrl);
+
+ qp->sq.wrid[idx] = wr_id;
+ qp->sq.w_list[idx].opcode = mlx5_opcode;
+ qp->sq.wqe_head[idx] = qp->sq.head + nreq;
+ qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
+ qp->sq.w_list[idx].next = qp->sq.cur_post;
+
+ /* We save the edge which was possibly updated during the WQE
+ * construction, into SQ's cache.
+ */
+ seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
+ qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
+ get_sq_edge(&qp->sq, qp->sq.cur_post &
+ (qp->sq.wqe_cnt - 1)) :
+ cur_edge;
+}
+
+static void handle_rdma_op(const struct ib_send_wr *wr, void **seg, int *size)
+{
+ set_raddr_seg(*seg, rdma_wr(wr)->remote_addr, rdma_wr(wr)->rkey);
+ *seg += sizeof(struct mlx5_wqe_raddr_seg);
+ *size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
+}
+
+static void handle_local_inv(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge, unsigned int idx)
+{
+ qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
+ (*ctrl)->imm = cpu_to_be32(wr->ex.invalidate_rkey);
+ set_linv_wr(qp, seg, size, cur_edge);
+}
+
+static int handle_reg_mr(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int idx)
+{
+ qp->sq.wr_data[idx] = IB_WR_REG_MR;
+ (*ctrl)->imm = cpu_to_be32(reg_wr(wr)->key);
+ return set_reg_wr(qp, reg_wr(wr), seg, size, cur_edge, true);
+}
+
+static int handle_psv(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int *idx, int nreq,
+ struct ib_sig_domain *domain, u32 psv_index,
+ u8 next_fence)
+{
+ int err;
+
+ /*
+ * SET_PSV WQEs are not signaled and solicited on error.
+ */
+ err = mlx5r_begin_wqe(qp, seg, ctrl, idx, size, cur_edge, nreq,
+ send_ieth(wr), false, true);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ err = set_psv_wr(domain, psv_index, seg, size);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ goto out;
+ }
+ mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
+ nreq, next_fence, MLX5_OPCODE_SET_PSV);
+
+out:
+ return err;
+}
+
+static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg,
+ int *size, void **cur_edge,
+ unsigned int *idx, int nreq, u8 fence,
+ u8 next_fence)
+{
+ struct mlx5_ib_mr *mr;
+ struct mlx5_ib_mr *pi_mr;
+ struct mlx5_ib_mr pa_pi_mr;
+ struct ib_sig_attrs *sig_attrs;
+ struct ib_reg_wr reg_pi_wr;
+ int err;
+
+ qp->sq.wr_data[*idx] = IB_WR_REG_MR_INTEGRITY;
+
+ mr = to_mmr(reg_wr(wr)->mr);
+ pi_mr = mr->pi_mr;
+
+ if (pi_mr) {
+ memset(&reg_pi_wr, 0,
+ sizeof(struct ib_reg_wr));
+
+ reg_pi_wr.mr = &pi_mr->ibmr;
+ reg_pi_wr.access = reg_wr(wr)->access;
+ reg_pi_wr.key = pi_mr->ibmr.rkey;
+
+ (*ctrl)->imm = cpu_to_be32(reg_pi_wr.key);
+ /* UMR for data + prot registration */
+ err = set_reg_wr(qp, &reg_pi_wr, seg, size, cur_edge, false);
+ if (unlikely(err))
+ goto out;
+
+ mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx,
+ wr->wr_id, nreq, fence, MLX5_OPCODE_UMR);
+
+ err = begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ } else {
+ memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr));
+ /* No UMR, use local_dma_lkey */
+ pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey;
+ pa_pi_mr.mmkey.ndescs = mr->mmkey.ndescs;
+ pa_pi_mr.data_length = mr->data_length;
+ pa_pi_mr.data_iova = mr->data_iova;
+ if (mr->meta_ndescs) {
+ pa_pi_mr.meta_ndescs = mr->meta_ndescs;
+ pa_pi_mr.meta_length = mr->meta_length;
+ pa_pi_mr.pi_iova = mr->pi_iova;
+ }
+
+ pa_pi_mr.ibmr.length = mr->ibmr.length;
+ mr->pi_mr = &pa_pi_mr;
+ }
+ (*ctrl)->imm = cpu_to_be32(mr->ibmr.rkey);
+ /* UMR for sig MR */
+ err = set_pi_umr_wr(wr, qp, seg, size, cur_edge);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ goto out;
+ }
+ mlx5r_finish_wqe(qp, *ctrl, *seg, *size, *cur_edge, *idx, wr->wr_id,
+ nreq, fence, MLX5_OPCODE_UMR);
+
+ sig_attrs = mr->ibmr.sig_attrs;
+ err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
+ &sig_attrs->mem, mr->sig->psv_memory.psv_idx,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+
+ err = handle_psv(dev, qp, wr, ctrl, seg, size, cur_edge, idx, nreq,
+ &sig_attrs->wire, mr->sig->psv_wire.psv_idx,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+
+ qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+
+out:
+ return err;
+}
+
+static int handle_qpt_rc(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr,
+ struct mlx5_wqe_ctrl_seg **ctrl, void **seg, int *size,
+ void **cur_edge, unsigned int *idx, int nreq, u8 fence,
+ u8 next_fence, int *num_sge)
+{
+ int err = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ handle_rdma_op(wr, seg, size);
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
+ mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
+ err = -EOPNOTSUPP;
+ goto out;
+
+ case IB_WR_LOCAL_INV:
+ handle_local_inv(qp, wr, ctrl, seg, size, cur_edge, *idx);
+ *num_sge = 0;
+ break;
+
+ case IB_WR_REG_MR:
+ err = handle_reg_mr(qp, wr, ctrl, seg, size, cur_edge, *idx);
+ if (unlikely(err))
+ goto out;
+ *num_sge = 0;
+ break;
+
+ case IB_WR_REG_MR_INTEGRITY:
+ err = handle_reg_mr_integrity(dev, qp, wr, ctrl, seg, size,
+ cur_edge, idx, nreq, fence,
+ next_fence);
+ if (unlikely(err))
+ goto out;
+ *num_sge = 0;
+ break;
+
+ default:
+ break;
+ }
+
+out:
+ return err;
+}
+
+static void handle_qpt_uc(const struct ib_send_wr *wr, void **seg, int *size)
+{
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ handle_rdma_op(wr, seg, size);
+ break;
+ default:
+ break;
+ }
+}
+
+static void handle_qpt_hw_gsi(struct mlx5_ib_qp *qp,
+ const struct ib_send_wr *wr, void **seg,
+ int *size, void **cur_edge)
+{
+ set_datagram_seg(*seg, wr);
+ *seg += sizeof(struct mlx5_wqe_datagram_seg);
+ *size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+}
+
+static void handle_qpt_ud(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
+ void **seg, int *size, void **cur_edge)
+{
+ set_datagram_seg(*seg, wr);
+ *seg += sizeof(struct mlx5_wqe_datagram_seg);
+ *size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+
+ /* handle qp that supports ud offload */
+ if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+ struct mlx5_wqe_eth_pad *pad;
+
+ pad = *seg;
+ memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
+ *seg += sizeof(struct mlx5_wqe_eth_pad);
+ *size += sizeof(struct mlx5_wqe_eth_pad) / 16;
+ set_eth_seg(wr, qp, seg, size, cur_edge);
+ handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
+ }
+}
+
+void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
+ struct mlx5_wqe_ctrl_seg *ctrl)
+{
+ struct mlx5_bf *bf = &qp->bf;
+
+ qp->sq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * updating doorbell record and ringing the doorbell
+ */
+ wmb();
+
+ qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
+
+ /* Make sure doorbell record is visible to the HCA before
+ * we hit doorbell.
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
+ /* Make sure doorbells don't leak out of SQ spinlock
+ * and reach the HCA out of order.
+ */
+ bf->offset ^= bf->buf_size;
+}
+
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain)
+{
+ struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_xrc_seg *xrc;
+ void *cur_edge;
+ int size;
+ unsigned long flags;
+ unsigned int idx;
+ int err = 0;
+ int num_sge;
+ void *seg;
+ int nreq;
+ int i;
+ u8 next_fence = 0;
+ u8 fence;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
+ if (qp->type == IB_QPT_GSI)
+ return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->sq.lock, flags);
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
+ mlx5_ib_warn(dev, "\n");
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ num_sge = wr->num_sge;
+ if (unlikely(num_sge > qp->sq.max_gs)) {
+ mlx5_ib_warn(dev, "\n");
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
+ nreq);
+ if (err) {
+ mlx5_ib_warn(dev, "\n");
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (wr->opcode == IB_WR_REG_MR ||
+ wr->opcode == IB_WR_REG_MR_INTEGRITY) {
+ fence = dev->umr_fence;
+ next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
+ } else {
+ if (wr->send_flags & IB_SEND_FENCE) {
+ if (qp->next_fence)
+ fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+ else
+ fence = MLX5_FENCE_MODE_FENCE;
+ } else {
+ fence = qp->next_fence;
+ }
+ }
+
+ switch (qp->type) {
+ case IB_QPT_XRC_INI:
+ xrc = seg;
+ seg += sizeof(*xrc);
+ size += sizeof(*xrc) / 16;
+ fallthrough;
+ case IB_QPT_RC:
+ err = handle_qpt_rc(dev, qp, wr, &ctrl, &seg, &size,
+ &cur_edge, &idx, nreq, fence,
+ next_fence, &num_sge);
+ if (unlikely(err)) {
+ *bad_wr = wr;
+ goto out;
+ } else if (wr->opcode == IB_WR_REG_MR_INTEGRITY) {
+ goto skip_psv;
+ }
+ break;
+
+ case IB_QPT_UC:
+ handle_qpt_uc(wr, &seg, &size);
+ break;
+ case IB_QPT_SMI:
+ if (unlikely(!dev->port_caps[qp->port - 1].has_smi)) {
+ mlx5_ib_warn(dev, "Send SMP MADs is not allowed\n");
+ err = -EPERM;
+ *bad_wr = wr;
+ goto out;
+ }
+ fallthrough;
+ case MLX5_IB_QPT_HW_GSI:
+ handle_qpt_hw_gsi(qp, wr, &seg, &size, &cur_edge);
+ break;
+ case IB_QPT_UD:
+ handle_qpt_ud(qp, wr, &seg, &size, &cur_edge);
+ break;
+
+ default:
+ break;
+ }
+
+ if (wr->send_flags & IB_SEND_INLINE && num_sge) {
+ err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
+ if (unlikely(err)) {
+ mlx5_ib_warn(dev, "\n");
+ *bad_wr = wr;
+ goto out;
+ }
+ } else {
+ for (i = 0; i < num_sge; i++) {
+ handle_post_send_edge(&qp->sq, &seg, size,
+ &cur_edge);
+ if (unlikely(!wr->sg_list[i].length))
+ continue;
+
+ set_data_ptr_seg(
+ (struct mlx5_wqe_data_seg *)seg,
+ wr->sg_list + i);
+ size += sizeof(struct mlx5_wqe_data_seg) / 16;
+ seg += sizeof(struct mlx5_wqe_data_seg);
+ }
+ }
+
+ qp->next_fence = next_fence;
+ mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id,
+ nreq, fence, mlx5_ib_opcode[wr->opcode]);
+skip_psv:
+ if (0)
+ dump_wqe(qp, idx, size);
+ }
+
+out:
+ if (likely(nreq))
+ mlx5r_ring_db(qp, nreq, ctrl);
+
+ spin_unlock_irqrestore(&qp->sq.lock, flags);
+
+ return err;
+}
+
+static void set_sig_seg(struct mlx5_rwqe_sig *sig, int max_gs)
+{
+ sig->signature = calc_sig(sig, (max_gs + 1) << 2);
+}
+
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain)
+{
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ struct mlx5_wqe_data_seg *scat;
+ struct mlx5_rwqe_sig *sig;
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ struct mlx5_core_dev *mdev = dev->mdev;
+ unsigned long flags;
+ int err = 0;
+ int nreq;
+ int ind;
+ int i;
+
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
+ if (qp->type == IB_QPT_GSI)
+ return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
+
+ spin_lock_irqsave(&qp->rq.lock, flags);
+
+ ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
+
+ for (nreq = 0; wr; nreq++, wr = wr->next) {
+ if (mlx5r_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
+ err = -ENOMEM;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ if (unlikely(wr->num_sge > qp->rq.max_gs)) {
+ err = -EINVAL;
+ *bad_wr = wr;
+ goto out;
+ }
+
+ scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE)
+ scat++;
+
+ for (i = 0; i < wr->num_sge; i++)
+ set_data_ptr_seg(scat + i, wr->sg_list + i);
+
+ if (i < qp->rq.max_gs) {
+ scat[i].byte_count = 0;
+ scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ scat[i].addr = 0;
+ }
+
+ if (qp->flags_en & MLX5_QP_FLAG_SIGNATURE) {
+ sig = (struct mlx5_rwqe_sig *)scat;
+ set_sig_seg(sig, qp->rq.max_gs);
+ }
+
+ qp->rq.wrid[ind] = wr->wr_id;
+
+ ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
+ }
+
+out:
+ if (likely(nreq)) {
+ qp->rq.head += nreq;
+
+ /* Make sure that descriptors are written before
+ * doorbell record.
+ */
+ wmb();
+
+ *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
+ }
+
+ spin_unlock_irqrestore(&qp->rq.lock, flags);
+
+ return err;
+}
diff --git a/drivers/infiniband/hw/mlx5/wr.h b/drivers/infiniband/hw/mlx5/wr.h
new file mode 100644
index 000000000..2dc894380
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/wr.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+
+#ifndef _MLX5_IB_WR_H
+#define _MLX5_IB_WR_H
+
+#include "mlx5_ib.h"
+
+enum {
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD = 64,
+};
+
+struct mlx5_wqe_eth_pad {
+ u8 rsvd0[16];
+};
+
+
+/* get_sq_edge - Get the next nearby edge.
+ *
+ * An 'edge' is defined as the first following address after the end
+ * of the fragment or the SQ. Accordingly, during the WQE construction
+ * which repetitively increases the pointer to write the next data, it
+ * simply should check if it gets to an edge.
+ *
+ * @sq - SQ buffer.
+ * @idx - Stride index in the SQ buffer.
+ *
+ * Return:
+ * The new edge.
+ */
+static inline void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
+{
+ void *fragment_end;
+
+ fragment_end = mlx5_frag_buf_get_wqe
+ (&sq->fbc,
+ mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
+
+ return fragment_end + MLX5_SEND_WQE_BB;
+}
+
+/* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
+ * next nearby edge and get new address translation for current WQE position.
+ * @sq: SQ buffer.
+ * @seg: Current WQE position (16B aligned).
+ * @wqe_sz: Total current WQE size [16B].
+ * @cur_edge: Updated current edge.
+ */
+static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
+ u32 wqe_sz, void **cur_edge)
+{
+ u32 idx;
+
+ if (likely(*seg != *cur_edge))
+ return;
+
+ idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
+ *cur_edge = get_sq_edge(sq, idx);
+
+ *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
+}
+
+/* mlx5r_memcpy_send_wqe - copy data from src to WQE and update the relevant
+ * WQ's pointers. At the end @seg is aligned to 16B regardless the copied size.
+ * @sq: SQ buffer.
+ * @cur_edge: Updated current edge.
+ * @seg: Current WQE position (16B aligned).
+ * @wqe_sz: Total current WQE size [16B].
+ * @src: Pointer to copy from.
+ * @n: Number of bytes to copy.
+ */
+static inline void mlx5r_memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
+ void **seg, u32 *wqe_sz,
+ const void *src, size_t n)
+{
+ while (likely(n)) {
+ size_t leftlen = *cur_edge - *seg;
+ size_t copysz = min_t(size_t, leftlen, n);
+ size_t stride;
+
+ memcpy(*seg, src, copysz);
+
+ n -= copysz;
+ src += copysz;
+ stride = !n ? ALIGN(copysz, 16) : copysz;
+ *seg += stride;
+ *wqe_sz += stride >> 4;
+ handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
+ }
+}
+
+int mlx5r_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq);
+int mlx5r_begin_wqe(struct mlx5_ib_qp *qp, void **seg,
+ struct mlx5_wqe_ctrl_seg **ctrl, unsigned int *idx,
+ int *size, void **cur_edge, int nreq, __be32 general_id,
+ bool send_signaled, bool solicited);
+void mlx5r_finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl,
+ void *seg, u8 size, void *cur_edge, unsigned int idx,
+ u64 wr_id, int nreq, u8 fence, u32 mlx5_opcode);
+void mlx5r_ring_db(struct mlx5_ib_qp *qp, unsigned int nreq,
+ struct mlx5_wqe_ctrl_seg *ctrl);
+int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr, bool drain);
+int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr, bool drain);
+
+static inline int mlx5_ib_post_send_nodrain(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return mlx5_ib_post_send(ibqp, wr, bad_wr, false);
+}
+
+static inline int mlx5_ib_post_send_drain(struct ib_qp *ibqp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ return mlx5_ib_post_send(ibqp, wr, bad_wr, true);
+}
+
+static inline int mlx5_ib_post_recv_nodrain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return mlx5_ib_post_recv(ibqp, wr, bad_wr, false);
+}
+
+static inline int mlx5_ib_post_recv_drain(struct ib_qp *ibqp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ return mlx5_ib_post_recv(ibqp, wr, bad_wr, true);
+}
+#endif /* _MLX5_IB_WR_H */