From e54def4ad8144ab15f826416e2e0f290ef1901b4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 19 Jun 2024 23:00:30 +0200 Subject: Adding upstream version 6.9.2. Signed-off-by: Daniel Baumann --- drivers/infiniband/hw/mana/cq.c | 54 ++++++++++++++++++++++++++++-------- drivers/infiniband/hw/mana/main.c | 43 ---------------------------- drivers/infiniband/hw/mana/mana_ib.h | 14 ++-------- drivers/infiniband/hw/mana/qp.c | 26 ++++++++--------- 4 files changed, 58 insertions(+), 79 deletions(-) (limited to 'drivers/infiniband/hw/mana') diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c index 89fcc09ded..4a71e678d0 100644 --- a/drivers/infiniband/hw/mana/cq.c +++ b/drivers/infiniband/hw/mana/cq.c @@ -39,13 +39,37 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, } cq->cqe = attr->cqe; - err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue); - if (err) { - ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err); + cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, + IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(cq->umem)) { + err = PTR_ERR(cq->umem); + ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n", + err); return err; } + err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region); + if (err) { + ibdev_dbg(ibdev, + "Failed to create dma region for create cq, %d\n", + err); + goto err_release_umem; + } + + ibdev_dbg(ibdev, + "create_dma_region ret %d gdma_region 0x%llx\n", + err, cq->gdma_region); + + /* + * The CQ ID is not known at this time. The ID is generated at create_qp + */ + cq->id = INVALID_QUEUE_ID; + return 0; + +err_release_umem: + ib_umem_release(cq->umem); + return err; } int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) @@ -54,16 +78,24 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) struct ib_device *ibdev = ibcq->device; struct mana_ib_dev *mdev; struct gdma_context *gc; + int err; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); gc = mdev_to_gc(mdev); - if (cq->queue.id != INVALID_QUEUE_ID) { - kfree(gc->cq_table[cq->queue.id]); - gc->cq_table[cq->queue.id] = NULL; + err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region); + if (err) { + ibdev_dbg(ibdev, + "Failed to destroy dma region, %d\n", err); + return err; } - mana_ib_destroy_queue(mdev, &cq->queue); + if (cq->id != INVALID_QUEUE_ID) { + kfree(gc->cq_table[cq->id]); + gc->cq_table[cq->id] = NULL; + } + + ib_umem_release(cq->umem); return 0; } @@ -81,10 +113,8 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) struct gdma_context *gc = mdev_to_gc(mdev); struct gdma_queue *gdma_cq; - if (cq->queue.id >= gc->max_num_cqs) - return -EINVAL; /* Create CQ table entry */ - WARN_ON(gc->cq_table[cq->queue.id]); + WARN_ON(gc->cq_table[cq->id]); gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL); if (!gdma_cq) return -ENOMEM; @@ -92,7 +122,7 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) gdma_cq->cq.context = cq; gdma_cq->type = GDMA_CQ; gdma_cq->cq.callback = mana_ib_cq_handler; - gdma_cq->id = cq->queue.id; - gc->cq_table[cq->queue.id] = gdma_cq; + gdma_cq->id = cq->id; + gc->cq_table[cq->id] = gdma_cq; return 0; } diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index 4524c6b807..71e33feee6 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -237,49 +237,6 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret); } -int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size, - struct mana_ib_queue *queue) -{ - struct ib_umem *umem; - int err; - - queue->umem = NULL; - queue->id = INVALID_QUEUE_ID; - queue->gdma_region = GDMA_INVALID_DMA_REGION; - - umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(umem)) { - err = PTR_ERR(umem); - ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err); - return err; - } - - err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region); - if (err) { - ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err); - goto free_umem; - } - queue->umem = umem; - - ibdev_dbg(&mdev->ib_dev, - "create_dma_region ret %d gdma_region 0x%llx\n", - err, queue->gdma_region); - - return 0; -free_umem: - ib_umem_release(umem); - return err; -} - -void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue) -{ - /* Ignore return code as there is not much we can do about it. - * The error message is printed inside. - */ - mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region); - ib_umem_release(queue->umem); -} - static int mana_ib_gd_first_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc, diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index 6acb5c281c..f83390eebb 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -45,12 +45,6 @@ struct mana_ib_adapter_caps { u32 max_inline_data_size; }; -struct mana_ib_queue { - struct ib_umem *umem; - u64 gdma_region; - u64 id; -}; - struct mana_ib_dev { struct ib_device ib_dev; struct gdma_dev *gdma_dev; @@ -88,8 +82,10 @@ struct mana_ib_mr { struct mana_ib_cq { struct ib_cq ibcq; - struct mana_ib_queue queue; + struct ib_umem *umem; int cqe; + u64 gdma_region; + u64 id; u32 comp_vector; }; @@ -173,10 +169,6 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, mana_handle_t gdma_region); -int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size, - struct mana_ib_queue *queue); -void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue); - struct ib_wq *mana_ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c index d7485ee6a6..6e7627745c 100644 --- a/drivers/infiniband/hw/mana/qp.c +++ b/drivers/infiniband/hw/mana/qp.c @@ -197,7 +197,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, wq_spec.gdma_region = wq->gdma_region; wq_spec.queue_size = wq->wq_buf_size; - cq_spec.gdma_region = cq->queue.gdma_region; + cq_spec.gdma_region = cq->gdma_region; cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues]; @@ -213,16 +213,16 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, /* The GDMA regions are now owned by the WQ object */ wq->gdma_region = GDMA_INVALID_DMA_REGION; - cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; + cq->gdma_region = GDMA_INVALID_DMA_REGION; wq->id = wq_spec.queue_index; - cq->queue.id = cq_spec.queue_index; + cq->id = cq_spec.queue_index; ibdev_dbg(&mdev->ib_dev, "ret %d rx_object 0x%llx wq id %llu cq id %llu\n", - ret, wq->rx_object, wq->id, cq->queue.id); + ret, wq->rx_object, wq->id, cq->id); - resp.entries[i].cqid = cq->queue.id; + resp.entries[i].cqid = cq->id; resp.entries[i].wqid = wq->id; mana_ind_table[i] = wq->rx_object; @@ -232,7 +232,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, if (ret) goto fail; - gdma_cq_allocated[i] = gc->cq_table[cq->queue.id]; + gdma_cq_allocated[i] = gc->cq_table[cq->id]; } resp.num_entries = i; @@ -264,7 +264,7 @@ fail: wq = container_of(ibwq, struct mana_ib_wq, ibwq); cq = container_of(ibcq, struct mana_ib_cq, ibcq); - gc->cq_table[cq->queue.id] = NULL; + gc->cq_table[cq->id] = NULL; kfree(gdma_cq_allocated[i]); mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object); @@ -374,7 +374,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, wq_spec.gdma_region = qp->sq_gdma_region; wq_spec.queue_size = ucmd.sq_buf_size; - cq_spec.gdma_region = send_cq->queue.gdma_region; + cq_spec.gdma_region = send_cq->gdma_region; cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; eq_vec = send_cq->comp_vector % gc->max_num_queues; @@ -392,10 +392,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, /* The GDMA regions are now owned by the WQ object */ qp->sq_gdma_region = GDMA_INVALID_DMA_REGION; - send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; + send_cq->gdma_region = GDMA_INVALID_DMA_REGION; qp->sq_id = wq_spec.queue_index; - send_cq->queue.id = cq_spec.queue_index; + send_cq->id = cq_spec.queue_index; /* Create CQ table entry */ err = mana_ib_install_cq_cb(mdev, send_cq); @@ -404,10 +404,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, ibdev_dbg(&mdev->ib_dev, "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err, - qp->tx_object, qp->sq_id, send_cq->queue.id); + qp->tx_object, qp->sq_id, send_cq->id); resp.sqid = qp->sq_id; - resp.cqid = send_cq->queue.id; + resp.cqid = send_cq->id; resp.tx_vp_offset = pd->tx_vp_offset; err = ib_copy_to_udata(udata, &resp, sizeof(resp)); @@ -422,7 +422,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, err_release_gdma_cq: kfree(gdma_cq); - gc->cq_table[send_cq->queue.id] = NULL; + gc->cq_table[send_cq->id] = NULL; err_destroy_wq_obj: mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); -- cgit v1.2.3