summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/bnxt_re/ib_verbs.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:03 +0000
commit01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch)
treeb406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/infiniband/hw/bnxt_re/ib_verbs.c
parentAdding upstream version 6.7.12. (diff)
downloadlinux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz
linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/ib_verbs.c')
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c271
1 files changed, 229 insertions, 42 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index cc466dfd79..ce9c5bae83 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -50,6 +50,7 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_cache.h>
#include <rdma/uverbs_ioctl.h>
+#include <linux/hashtable.h>
#include "bnxt_ulp.h"
@@ -400,6 +401,10 @@ static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
struct bnxt_re_fence_data *fence = &pd->fence;
struct ib_mr *ib_mr = &fence->mr->ib_mr;
struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+ struct bnxt_re_dev *rdev = pd->rdev;
+
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
memset(wqe, 0, sizeof(*wqe));
wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
@@ -454,6 +459,9 @@ static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
struct device *dev = &rdev->en_dev->pdev->dev;
struct bnxt_re_mr *mr = fence->mr;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
if (fence->mw) {
bnxt_re_dealloc_mw(fence->mw);
fence->mw = NULL;
@@ -485,6 +493,9 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
struct ib_mw *mw;
int rc;
+ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
DMA_BIDIRECTIONAL);
rc = dma_mapping_error(dev, dma_addr);
@@ -567,6 +578,7 @@ bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
case BNXT_RE_MMAP_WC_DB:
case BNXT_RE_MMAP_DBR_BAR:
case BNXT_RE_MMAP_DBR_PAGE:
+ case BNXT_RE_MMAP_TOGGLE_PAGE:
ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
&entry->rdma_entry, PAGE_SIZE);
break;
@@ -1023,7 +1035,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
/* Consider mapping PSN search memory only for RC QPs. */
if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
- psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
+ psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
sizeof(struct sq_psn_search_ext) :
sizeof(struct sq_psn_search);
psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
@@ -1184,7 +1196,8 @@ fail:
}
static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
- struct ib_qp_init_attr *init_attr)
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_re_ucontext *uctx)
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
@@ -1213,7 +1226,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
/* Allocate 1 more than what's provided so posting max doesn't
* mean empty.
*/
- entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
+ entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
rq->q_full_delta = 0;
rq->sg_info.pgsize = PAGE_SIZE;
@@ -1233,7 +1246,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr;
- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
qplqp->rq.max_sge = dev_attr->max_qp_sges;
if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
qplqp->rq.max_sge = dev_attr->max_qp_sges;
@@ -1243,7 +1256,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata)
+ struct bnxt_re_ucontext *uctx)
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
@@ -1272,7 +1285,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
/* Allocate 128 + 1 more than what's provided */
diff = (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) ?
0 : BNXT_QPLIB_RESERVED_QP_WRS;
- entries = roundup_pow_of_two(entries + diff + 1);
+ entries = bnxt_re_init_depth(entries + diff + 1, uctx);
sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
sq->q_full_delta = diff + 1;
/*
@@ -1288,7 +1301,8 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
}
static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
- struct ib_qp_init_attr *init_attr)
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_re_ucontext *uctx)
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp;
@@ -1299,8 +1313,8 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr;
- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
- entries = roundup_pow_of_two(init_attr->cap.max_send_wr + 1);
+ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
qplqp->sq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1);
qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
@@ -1326,7 +1340,7 @@ static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
goto out;
}
- if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
+ if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
init_attr->qp_type == IB_QPT_GSI)
qptype = CMDQ_CREATE_QP_TYPE_GSI;
out:
@@ -1338,6 +1352,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
struct ib_udata *udata)
{
struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *uctx;
struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev;
struct bnxt_re_cq *cq;
@@ -1347,6 +1362,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
qplqp = &qp->qplib_qp;
dev_attr = &rdev->dev_attr;
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
/* Setup misc params */
ether_addr_copy(qplqp->smac, rdev->netdev->dev_addr);
qplqp->pd = &pd->qplib_pd;
@@ -1388,18 +1404,18 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
}
/* Setup RQ/SRQ */
- rc = bnxt_re_init_rq_attr(qp, init_attr);
+ rc = bnxt_re_init_rq_attr(qp, init_attr, uctx);
if (rc)
goto out;
if (init_attr->qp_type == IB_QPT_GSI)
bnxt_re_adjust_gsi_rq_attr(qp);
/* Setup SQ */
- rc = bnxt_re_init_sq_attr(qp, init_attr, udata);
+ rc = bnxt_re_init_sq_attr(qp, init_attr, uctx);
if (rc)
goto out;
if (init_attr->qp_type == IB_QPT_GSI)
- bnxt_re_adjust_gsi_sq_attr(qp, init_attr);
+ bnxt_re_adjust_gsi_sq_attr(qp, init_attr, uctx);
if (udata) /* This will update DPI and qp_handle */
rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
@@ -1523,7 +1539,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
goto fail;
if (qp_init_attr->qp_type == IB_QPT_GSI &&
- !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
+ !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
if (rc == -ENODEV)
goto qp_destroy;
@@ -1715,6 +1731,7 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
{
struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_nq *nq = NULL;
+ struct bnxt_re_ucontext *uctx;
struct bnxt_re_dev *rdev;
struct bnxt_re_srq *srq;
struct bnxt_re_pd *pd;
@@ -1739,13 +1756,14 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
goto exit;
}
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
srq->rdev = rdev;
srq->qplib_srq.pd = &pd->qplib_pd;
srq->qplib_srq.dpi = &rdev->dpi_privileged;
/* Allocate 1 more than what's provided so posting max doesn't
* mean empty
*/
- entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
+ entries = bnxt_re_init_depth(srq_init_attr->attr.max_wr + 1, uctx);
if (entries > dev_attr->max_srq_wqes + 1)
entries = dev_attr->max_srq_wqes + 1;
srq->qplib_srq.max_wqe = entries;
@@ -2102,6 +2120,9 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
}
if (qp_attr_mask & IB_QP_CAP) {
+ struct bnxt_re_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
@@ -2118,7 +2139,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
"Create QP failed - max exceeded");
return -EINVAL;
}
- entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
+ entries = bnxt_re_init_depth(qp_attr->cap.max_send_wr, uctx);
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1);
qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
@@ -2131,7 +2152,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp->qplib_qp.sq.q_full_delta -= 1;
qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
if (qp->qplib_qp.rq.max_wqe) {
- entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
+ entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
qp->qplib_qp.rq.max_wqe =
min_t(u32, entries, dev_attr->max_qp_wqes + 1);
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
@@ -2544,11 +2565,6 @@ static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
- /* Need unconditional fence for local invalidate
- * opcode to work as expected.
- */
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
if (wr->send_flags & IB_SEND_SOLICITED)
@@ -2571,12 +2587,6 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
wqe->frmr.levels = qplib_frpl->hwq.level;
wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
- /* Need unconditional fence for reg_mr
- * opcode to function as expected.
- */
-
- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
-
if (wr->wr.send_flags & IB_SEND_SIGNALED)
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
@@ -2707,6 +2717,18 @@ bad:
return rc;
}
+static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
+{
+ /* Need unconditional fence for non-wire memory opcode
+ * to work as expected.
+ */
+ if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+}
+
int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr)
{
@@ -2786,8 +2808,11 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
rc = -EINVAL;
goto bad;
}
- if (!rc)
+ if (!rc) {
+ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
+ bnxt_re_legacy_set_uc_fence(&wqe);
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ }
bad:
if (rc) {
ibdev_err(&qp->rdev->ibdev,
@@ -2899,14 +2924,20 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
/* Completion Queues */
int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
{
- struct bnxt_re_cq *cq;
+ struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_nq *nq;
struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
rdev = cq->rdev;
nq = cq->qplib_cq.nq;
+ cctx = rdev->chip_ctx;
+ if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
+ free_page((unsigned long)cq->uctx_cq_page);
+ hash_del(&cq->hash_entry);
+ }
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
ib_umem_release(cq->umem);
@@ -2919,13 +2950,16 @@ int bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
{
+ struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibcq->device, ibdev);
+ struct bnxt_re_ucontext *uctx =
+ rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
- struct bnxt_re_cq *cq = container_of(ibcq, struct bnxt_re_cq, ib_cq);
- int rc, entries;
- int cqe = attr->cqe;
+ struct bnxt_qplib_chip_ctx *cctx;
struct bnxt_qplib_nq *nq = NULL;
unsigned int nq_alloc_cnt;
+ int cqe = attr->cqe;
+ int rc, entries;
u32 active_cqs;
if (attr->flags)
@@ -2938,9 +2972,10 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
cq->rdev = rdev;
+ cctx = rdev->chip_ctx;
cq->qplib_cq.cq_handle = (u64)(unsigned long)(&cq->qplib_cq);
- entries = roundup_pow_of_two(cqe + 1);
+ entries = bnxt_re_init_depth(cqe + 1, uctx);
if (entries > dev_attr->max_cq_wqes + 1)
entries = dev_attr->max_cq_wqes + 1;
@@ -2948,8 +2983,6 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
cq->qplib_cq.sg_info.pgshft = PAGE_SHIFT;
if (udata) {
struct bnxt_re_cq_req req;
- struct bnxt_re_ucontext *uctx = rdma_udata_to_drv_context(
- udata, struct bnxt_re_ucontext, ib_uctx);
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
rc = -EFAULT;
goto fail;
@@ -3001,22 +3034,34 @@ int bnxt_re_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
spin_lock_init(&cq->cq_lock);
if (udata) {
- struct bnxt_re_cq_resp resp;
+ struct bnxt_re_cq_resp resp = {};
+ if (cctx->modes.toggle_bits & BNXT_QPLIB_CQ_TOGGLE_BIT) {
+ hash_add(rdev->cq_hash, &cq->hash_entry, cq->qplib_cq.id);
+ /* Allocate a page */
+ cq->uctx_cq_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!cq->uctx_cq_page) {
+ rc = -ENOMEM;
+ goto c2fail;
+ }
+ resp.comp_mask |= BNXT_RE_CQ_TOGGLE_PAGE_SUPPORT;
+ }
resp.cqid = cq->qplib_cq.id;
resp.tail = cq->qplib_cq.hwq.cons;
resp.phase = cq->qplib_cq.period;
resp.rsvd = 0;
- rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to copy CQ udata");
bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
- goto c2fail;
+ goto free_mem;
}
}
return 0;
+free_mem:
+ free_page((unsigned long)cq->uctx_cq_page);
c2fail:
ib_umem_release(cq->umem);
fail:
@@ -3071,12 +3116,11 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
return -EINVAL;
}
- entries = roundup_pow_of_two(cqe + 1);
+ uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext, ib_uctx);
+ entries = bnxt_re_init_depth(cqe + 1, uctx);
if (entries > dev_attr->max_cq_wqes + 1)
entries = dev_attr->max_cq_wqes + 1;
- uctx = rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
- ib_uctx);
/* uverbs consumer */
if (ib_copy_from_udata(&req, udata, sizeof(req))) {
rc = -EFAULT;
@@ -4107,6 +4151,7 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_re_user_mmap_entry *entry;
struct bnxt_re_uctx_resp resp = {};
+ struct bnxt_re_uctx_req ureq = {};
u32 chip_met_rev_num = 0;
int rc;
@@ -4156,6 +4201,16 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
if (rdev->pacing.dbr_pacing)
resp.comp_mask |= BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED;
+ if (udata->inlen >= sizeof(ureq)) {
+ rc = ib_copy_from_udata(&ureq, udata, min(udata->inlen, sizeof(ureq)));
+ if (rc)
+ goto cfail;
+ if (ureq.comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT) {
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
+ uctx->cmask |= BNXT_RE_UCNTX_CMASK_POW2_DISABLED;
+ }
+ }
+
rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (rc) {
ibdev_err(ibdev, "Failed to copy user context");
@@ -4193,6 +4248,19 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
}
}
+static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
+{
+ struct bnxt_re_cq *cq = NULL, *tmp_cq;
+
+ hash_for_each_possible(rdev->cq_hash, tmp_cq, hash_entry, cq_id) {
+ if (tmp_cq->qplib_cq.id == cq_id) {
+ cq = tmp_cq;
+ break;
+ }
+ }
+ return cq;
+}
+
/* Helper function to mmap the virtual memory from user app */
int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
{
@@ -4234,6 +4302,7 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
rdma_entry);
break;
case BNXT_RE_MMAP_DBR_PAGE:
+ case BNXT_RE_MMAP_TOGGLE_PAGE:
/* Driver doesn't expect write access for user space */
if (vma->vm_flags & VM_WRITE)
return -EFAULT;
@@ -4410,8 +4479,126 @@ DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_NOTIFY_DRV);
DECLARE_UVERBS_GLOBAL_METHODS(BNXT_RE_OBJECT_NOTIFY_DRV,
&UVERBS_METHOD(BNXT_RE_METHOD_NOTIFY_DRV));
+/* Toggle MEM */
+static int UVERBS_HANDLER(BNXT_RE_METHOD_GET_TOGGLE_MEM)(struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
+ enum bnxt_re_mmap_flag mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
+ enum bnxt_re_get_toggle_mem_type res_type;
+ struct bnxt_re_user_mmap_entry *entry;
+ struct bnxt_re_ucontext *uctx;
+ struct ib_ucontext *ib_uctx;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ u64 mem_offset;
+ u64 addr = 0;
+ u32 length;
+ u32 offset;
+ u32 cq_id;
+ int err;
+
+ ib_uctx = ib_uverbs_get_ucontext(attrs);
+ if (IS_ERR(ib_uctx))
+ return PTR_ERR(ib_uctx);
+
+ err = uverbs_get_const(&res_type, attrs, BNXT_RE_TOGGLE_MEM_TYPE);
+ if (err)
+ return err;
+
+ uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx);
+ rdev = uctx->rdev;
+
+ switch (res_type) {
+ case BNXT_RE_CQ_TOGGLE_MEM:
+ err = uverbs_copy_from(&cq_id, attrs, BNXT_RE_TOGGLE_MEM_RES_ID);
+ if (err)
+ return err;
+
+ cq = bnxt_re_search_for_cq(rdev, cq_id);
+ if (!cq)
+ return -EINVAL;
+
+ length = PAGE_SIZE;
+ addr = (u64)cq->uctx_cq_page;
+ mmap_flag = BNXT_RE_MMAP_TOGGLE_PAGE;
+ offset = 0;
+ break;
+ case BNXT_RE_SRQ_TOGGLE_MEM:
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mem_offset);
+ if (!entry)
+ return -ENOMEM;
+
+ uobj->object = entry;
+ uverbs_finalize_uobj_create(attrs, BNXT_RE_TOGGLE_MEM_HANDLE);
+ err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
+ &mem_offset, sizeof(mem_offset));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
+ &length, sizeof(length));
+ if (err)
+ return err;
+
+ err = uverbs_copy_to(attrs, BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
+ &offset, sizeof(length));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int get_toggle_mem_obj_cleanup(struct ib_uobject *uobject,
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct bnxt_re_user_mmap_entry *entry = uobject->object;
+
+ rdma_user_mmap_entry_remove(&entry->rdma_entry);
+ return 0;
+}
+
+DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM,
+ UVERBS_ATTR_IDR(BNXT_RE_TOGGLE_MEM_HANDLE,
+ BNXT_RE_OBJECT_GET_TOGGLE_MEM,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(BNXT_RE_TOGGLE_MEM_TYPE,
+ enum bnxt_re_get_toggle_mem_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(BNXT_RE_TOGGLE_MEM_RES_ID,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_PAGE,
+ UVERBS_ATTR_TYPE(u64),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_OFFSET,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_OUT(BNXT_RE_TOGGLE_MEM_MMAP_LENGTH,
+ UVERBS_ATTR_TYPE(u32),
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM,
+ UVERBS_ATTR_IDR(BNXT_RE_RELEASE_TOGGLE_MEM_HANDLE,
+ BNXT_RE_OBJECT_GET_TOGGLE_MEM,
+ UVERBS_ACCESS_DESTROY,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_GET_TOGGLE_MEM,
+ UVERBS_TYPE_ALLOC_IDR(get_toggle_mem_obj_cleanup),
+ &UVERBS_METHOD(BNXT_RE_METHOD_GET_TOGGLE_MEM),
+ &UVERBS_METHOD(BNXT_RE_METHOD_RELEASE_TOGGLE_MEM));
+
const struct uapi_definition bnxt_re_uapi_defs[] = {
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_NOTIFY_DRV),
+ UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_GET_TOGGLE_MEM),
{}
};