diff options
Diffstat (limited to 'drivers/infiniband/hw/mana/qp.c')
-rw-r--r-- | drivers/infiniband/hw/mana/qp.c | 110 |
1 files changed, 33 insertions, 77 deletions
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c index d7485ee6a6..ba13c5abf8 100644 --- a/drivers/infiniband/hw/mana/qp.c +++ b/drivers/infiniband/hw/mana/qp.c @@ -15,15 +15,13 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, struct mana_port_context *mpc = netdev_priv(ndev); struct mana_cfg_rx_steer_req_v2 *req; struct mana_cfg_rx_steer_resp resp = {}; - mana_handle_t *req_indir_tab; struct gdma_context *gc; u32 req_buf_size; int i, err; gc = mdev_to_gc(dev); - req_buf_size = - sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE; + req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_SIZE); req = kzalloc(req_buf_size, GFP_KERNEL); if (!req) return -ENOMEM; @@ -44,20 +42,20 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, req->rss_enable = true; req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE; - req->indir_tab_offset = sizeof(*req); + req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2, + indir_tab); req->update_indir_tab = true; req->cqe_coalescing_enable = 1; - req_indir_tab = (mana_handle_t *)(req + 1); /* The ind table passed to the hardware must have * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb * ind_table to MANA_INDIRECT_TABLE_SIZE if required */ ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size); for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { - req_indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)]; + req->indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)]; ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i, - req_indir_tab[i]); + req->indir_tab[i]); } req->update_hashkey = true; @@ -97,11 +95,9 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); struct mana_ib_dev *mdev = container_of(pd->device, struct mana_ib_dev, ib_dev); - struct gdma_context *gc = mdev_to_gc(mdev); struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl; struct mana_ib_create_qp_rss_resp resp = {}; struct mana_ib_create_qp_rss ucmd = {}; - struct gdma_queue **gdma_cq_allocated; mana_handle_t *mana_ind_table; struct mana_port_context *mpc; unsigned int ind_tbl_size; @@ -175,13 +171,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, goto fail; } - gdma_cq_allocated = kcalloc(ind_tbl_size, sizeof(*gdma_cq_allocated), - GFP_KERNEL); - if (!gdma_cq_allocated) { - ret = -ENOMEM; - goto fail; - } - qp->port = port; for (i = 0; i < ind_tbl_size; i++) { @@ -194,13 +183,13 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, ibcq = ibwq->cq; cq = container_of(ibcq, struct mana_ib_cq, ibcq); - wq_spec.gdma_region = wq->gdma_region; + wq_spec.gdma_region = wq->queue.gdma_region; wq_spec.queue_size = wq->wq_buf_size; cq_spec.gdma_region = cq->queue.gdma_region; cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; - eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues]; + eq = &mpc->ac->eqs[cq->comp_vector]; cq_spec.attached_eq = eq->eq->id; ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ, @@ -212,18 +201,18 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, } /* The GDMA regions are now owned by the WQ object */ - wq->gdma_region = GDMA_INVALID_DMA_REGION; + wq->queue.gdma_region = GDMA_INVALID_DMA_REGION; cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; - wq->id = wq_spec.queue_index; + wq->queue.id = wq_spec.queue_index; cq->queue.id = cq_spec.queue_index; ibdev_dbg(&mdev->ib_dev, - "ret %d rx_object 0x%llx wq id %llu cq id %llu\n", - ret, wq->rx_object, wq->id, cq->queue.id); + "rx_object 0x%llx wq id %llu cq id %llu\n", + wq->rx_object, wq->queue.id, cq->queue.id); resp.entries[i].cqid = cq->queue.id; - resp.entries[i].wqid = wq->id; + resp.entries[i].wqid = wq->queue.id; mana_ind_table[i] = wq->rx_object; @@ -231,8 +220,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, ret = mana_ib_install_cq_cb(mdev, cq); if (ret) goto fail; - - gdma_cq_allocated[i] = gc->cq_table[cq->queue.id]; } resp.num_entries = i; @@ -252,7 +239,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, goto fail; } - kfree(gdma_cq_allocated); kfree(mana_ind_table); return 0; @@ -264,13 +250,10 @@ fail: wq = container_of(ibwq, struct mana_ib_wq, ibwq); cq = container_of(ibcq, struct mana_ib_cq, ibcq); - gc->cq_table[cq->queue.id] = NULL; - kfree(gdma_cq_allocated[i]); - + mana_ib_remove_cq_cb(mdev, cq); mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object); } - kfree(gdma_cq_allocated); kfree(mana_ind_table); return ret; @@ -289,15 +272,12 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, struct mana_ib_ucontext *mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, ibucontext); - struct gdma_context *gc = mdev_to_gc(mdev); struct mana_ib_create_qp_resp resp = {}; struct mana_ib_create_qp ucmd = {}; - struct gdma_queue *gdma_cq = NULL; struct mana_obj_spec wq_spec = {}; struct mana_obj_spec cq_spec = {}; struct mana_port_context *mpc; struct net_device *ndev; - struct ib_umem *umem; struct mana_eq *eq; int eq_vec; u32 port; @@ -346,55 +326,38 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, ibdev_dbg(&mdev->ib_dev, "ucmd sq_buf_addr 0x%llx port %u\n", ucmd.sq_buf_addr, ucmd.port); - umem = ib_umem_get(ibpd->device, ucmd.sq_buf_addr, ucmd.sq_buf_size, - IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(umem)) { - err = PTR_ERR(umem); - ibdev_dbg(&mdev->ib_dev, - "Failed to get umem for create qp-raw, err %d\n", - err); - goto err_free_vport; - } - qp->sq_umem = umem; - - err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem, - &qp->sq_gdma_region); + err = mana_ib_create_queue(mdev, ucmd.sq_buf_addr, ucmd.sq_buf_size, &qp->raw_sq); if (err) { ibdev_dbg(&mdev->ib_dev, - "Failed to create dma region for create qp-raw, %d\n", - err); - goto err_release_umem; + "Failed to create queue for create qp-raw, err %d\n", err); + goto err_free_vport; } - ibdev_dbg(&mdev->ib_dev, - "create_dma_region ret %d gdma_region 0x%llx\n", - err, qp->sq_gdma_region); - /* Create a WQ on the same port handle used by the Ethernet */ - wq_spec.gdma_region = qp->sq_gdma_region; + wq_spec.gdma_region = qp->raw_sq.gdma_region; wq_spec.queue_size = ucmd.sq_buf_size; cq_spec.gdma_region = send_cq->queue.gdma_region; cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; - eq_vec = send_cq->comp_vector % gc->max_num_queues; + eq_vec = send_cq->comp_vector; eq = &mpc->ac->eqs[eq_vec]; cq_spec.attached_eq = eq->eq->id; err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec, - &cq_spec, &qp->tx_object); + &cq_spec, &qp->qp_handle); if (err) { ibdev_dbg(&mdev->ib_dev, "Failed to create wq for create raw-qp, err %d\n", err); - goto err_destroy_dma_region; + goto err_destroy_queue; } /* The GDMA regions are now owned by the WQ object */ - qp->sq_gdma_region = GDMA_INVALID_DMA_REGION; + qp->raw_sq.gdma_region = GDMA_INVALID_DMA_REGION; send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; - qp->sq_id = wq_spec.queue_index; + qp->raw_sq.id = wq_spec.queue_index; send_cq->queue.id = cq_spec.queue_index; /* Create CQ table entry */ @@ -403,10 +366,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, goto err_destroy_wq_obj; ibdev_dbg(&mdev->ib_dev, - "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err, - qp->tx_object, qp->sq_id, send_cq->queue.id); + "qp->qp_handle 0x%llx sq id %llu cq id %llu\n", + qp->qp_handle, qp->raw_sq.id, send_cq->queue.id); - resp.sqid = qp->sq_id; + resp.sqid = qp->raw_sq.id; resp.cqid = send_cq->queue.id; resp.tx_vp_offset = pd->tx_vp_offset; @@ -415,23 +378,19 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, ibdev_dbg(&mdev->ib_dev, "Failed copy udata for create qp-raw, %d\n", err); - goto err_release_gdma_cq; + goto err_remove_cq_cb; } return 0; -err_release_gdma_cq: - kfree(gdma_cq); - gc->cq_table[send_cq->queue.id] = NULL; +err_remove_cq_cb: + mana_ib_remove_cq_cb(mdev, send_cq); err_destroy_wq_obj: - mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); + mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle); -err_destroy_dma_region: - mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region); - -err_release_umem: - ib_umem_release(umem); +err_destroy_queue: + mana_ib_destroy_queue(mdev, &qp->raw_sq); err_free_vport: mana_ib_uncfg_vport(mdev, pd, port); @@ -505,12 +464,9 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata) mpc = netdev_priv(ndev); pd = container_of(ibpd, struct mana_ib_pd, ibpd); - mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); + mana_destroy_wq_obj(mpc, GDMA_SQ, qp->qp_handle); - if (qp->sq_umem) { - mana_ib_gd_destroy_dma_region(mdev, qp->sq_gdma_region); - ib_umem_release(qp->sq_umem); - } + mana_ib_destroy_queue(mdev, &qp->raw_sq); mana_ib_uncfg_vport(mdev, pd, qp->port); |