diff options
Diffstat (limited to 'drivers/infiniband/hw/mana/cq.c')
-rw-r--r-- | drivers/infiniband/hw/mana/cq.c | 57 |
1 files changed, 53 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c index d141cab8a1..4a71e678d0 100644 --- a/drivers/infiniband/hw/mana/cq.c +++ b/drivers/infiniband/hw/mana/cq.c @@ -12,13 +12,20 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_device *ibdev = ibcq->device; struct mana_ib_create_cq ucmd = {}; struct mana_ib_dev *mdev; + struct gdma_context *gc; int err; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); + gc = mdev_to_gc(mdev); if (udata->inlen < sizeof(ucmd)) return -EINVAL; + if (attr->comp_vector > gc->max_num_queues) + return -EINVAL; + + cq->comp_vector = attr->comp_vector; + err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen)); if (err) { ibdev_dbg(ibdev, @@ -26,7 +33,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, return err; } - if (attr->cqe > MAX_SEND_BUFFERS_PER_QUEUE) { + if (attr->cqe > mdev->adapter_caps.max_qp_wr) { ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe); return -EINVAL; } @@ -41,7 +48,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, return err; } - err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region); + err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region); if (err) { ibdev_dbg(ibdev, "Failed to create dma region for create cq, %d\n", @@ -50,12 +57,13 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, } ibdev_dbg(ibdev, - "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n", + "create_dma_region ret %d gdma_region 0x%llx\n", err, cq->gdma_region); /* * The CQ ID is not known at this time. The ID is generated at create_qp */ + cq->id = INVALID_QUEUE_ID; return 0; @@ -69,11 +77,52 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq); struct ib_device *ibdev = ibcq->device; struct mana_ib_dev *mdev; + struct gdma_context *gc; + int err; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); + gc = mdev_to_gc(mdev); + + err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region); + if (err) { + ibdev_dbg(ibdev, + "Failed to destroy dma region, %d\n", err); + return err; + } + + if (cq->id != INVALID_QUEUE_ID) { + kfree(gc->cq_table[cq->id]); + gc->cq_table[cq->id] = NULL; + } - mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region); ib_umem_release(cq->umem); return 0; } + +static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq) +{ + struct mana_ib_cq *cq = ctx; + + if (cq->ibcq.comp_handler) + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); +} + +int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) +{ + struct gdma_context *gc = mdev_to_gc(mdev); + struct gdma_queue *gdma_cq; + + /* Create CQ table entry */ + WARN_ON(gc->cq_table[cq->id]); + gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL); + if (!gdma_cq) + return -ENOMEM; + + gdma_cq->cq.context = cq; + gdma_cq->type = GDMA_CQ; + gdma_cq->cq.callback = mana_ib_cq_handler; + gdma_cq->id = cq->id; + gc->cq_table[cq->id] = gdma_cq; + return 0; +} |