diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-03 05:08:50 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-03 05:08:50 +0000 |
commit | c237094850d33ea4ae93ebb2b04ba2e23adedeb6 (patch) | |
tree | e08643d51bc147dd580d1a5412657e15afbf8359 /drivers/infiniband/hw | |
parent | Adding debian version 6.8.11-1. (diff) | |
download | linux-c237094850d33ea4ae93ebb2b04ba2e23adedeb6.tar.xz linux-c237094850d33ea4ae93ebb2b04ba2e23adedeb6.zip |
Merging upstream version 6.8.12.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_fp.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_cq.c | 24 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hem.h | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_main.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_mr.c | 15 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_srq.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/cq.c | 54 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/main.c | 43 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/mana_ib.h | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/qp.c | 26 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mem.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 35 |
14 files changed, 144 insertions, 107 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 439d0c7c5d..04258676d0 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1013,7 +1013,8 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) hwq_attr.stride = sizeof(struct sq_sge); hwq_attr.depth = bnxt_qplib_get_depth(sq); hwq_attr.aux_stride = psn_sz; - hwq_attr.aux_depth = bnxt_qplib_set_sq_size(sq, qp->wqe_mode); + hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode) + : 0; /* Update msn tbl size */ if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) { hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)); diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index 1b6d16af8c..2517c972c6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -151,7 +151,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) return ret; } - ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); + ret = xa_err(xa_store_irq(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); if (ret) { ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret); goto err_put; @@ -164,7 +164,7 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) return 0; err_xa: - xa_erase(&cq_table->array, hr_cq->cqn); + xa_erase_irq(&cq_table->array, hr_cq->cqn); err_put: hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); @@ -183,7 +183,7 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret, hr_cq->cqn); - xa_erase(&cq_table->array, hr_cq->cqn); + xa_erase_irq(&cq_table->array, hr_cq->cqn); /* Waiting interrupt process procedure carried out */ synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); @@ -477,13 +477,6 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) struct ib_event event; struct ib_cq *ibcq; - hr_cq = xa_load(&hr_dev->cq_table.array, - cqn & (hr_dev->caps.num_cqs - 1)); - if (!hr_cq) { - dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn); - return; - } - if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { @@ -492,7 +485,16 @@ void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) return; } - refcount_inc(&hr_cq->refcount); + xa_lock(&hr_dev->cq_table.array); + hr_cq = xa_load(&hr_dev->cq_table.array, + cqn & (hr_dev->caps.num_cqs - 1)); + if (hr_cq) + refcount_inc(&hr_cq->refcount); + xa_unlock(&hr_dev->cq_table.array); + if (!hr_cq) { + dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn); + return; + } ibcq = &hr_cq->ib_cq; if (ibcq->event_handler) { diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index 7d23d3c51d..fea6d7d508 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -61,16 +61,16 @@ enum { (sizeof(struct scatterlist) + sizeof(void *))) #define check_whether_bt_num_3(type, hop_num) \ - (type < HEM_TYPE_MTT && hop_num == 2) + ((type) < HEM_TYPE_MTT && (hop_num) == 2) #define check_whether_bt_num_2(type, hop_num) \ - ((type < HEM_TYPE_MTT && hop_num == 1) || \ - (type >= HEM_TYPE_MTT && hop_num == 2)) + (((type) < HEM_TYPE_MTT && (hop_num) == 1) || \ + ((type) >= HEM_TYPE_MTT && (hop_num) == 2)) #define check_whether_bt_num_1(type, hop_num) \ - ((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \ - (type >= HEM_TYPE_MTT && hop_num == 1) || \ - (type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0)) + (((type) < HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0) || \ + ((type) >= HEM_TYPE_MTT && (hop_num) == 1) || \ + ((type) >= HEM_TYPE_MTT && (hop_num) == HNS_ROCE_HOP_NUM_0)) struct hns_roce_hem_chunk { struct list_head list; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 34e58e09b1..f95ec4618f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -2105,7 +2105,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) caps->gmv_bt_num * (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz)); - caps->gmv_entry_num = caps->gmv_bt_num * (PAGE_SIZE / + caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz); } else { u32 func_num = max_t(u32, 1, hr_dev->func_num); @@ -3715,8 +3715,9 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, wc->status == IB_WC_WR_FLUSH_ERR)) return; - ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status); - print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe, + ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n", + cqe_status); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 4, cqe, cq->cqe_size, false); wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS); diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index b55fe6911f..a33d3cedbc 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -37,6 +37,7 @@ #include <rdma/ib_smi.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_cache.h> +#include "hnae3.h" #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_hem.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 91cd580480..0d42fd197c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -433,18 +433,18 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mtr *mtr = &mr->pbl_mtr; - int ret = 0; + int ret, sg_num = 0; mr->npages = 0; mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count, sizeof(dma_addr_t), GFP_KERNEL); if (!mr->page_list) - return ret; + return sg_num; - ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); - if (ret < 1) { + sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); + if (sg_num < 1) { ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", - mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, ret); + mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num); goto err_page_list; } @@ -455,17 +455,16 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages); if (ret) { ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret); - ret = 0; + sg_num = 0; } else { mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size); - ret = mr->npages; } err_page_list: kvfree(mr->page_list); mr->page_list = NULL; - return ret; + return sg_num; } static void hns_roce_mw_free(struct hns_roce_dev *hr_dev, diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 4abae94778..8f48c6723e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -123,7 +123,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) return ret; } - ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); + ret = xa_err(xa_store_irq(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); if (ret) { ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret); goto err_put; @@ -136,7 +136,7 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) return 0; err_xa: - xa_erase(&srq_table->xa, srq->srqn); + xa_erase_irq(&srq_table->xa, srq->srqn); err_put: hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); @@ -154,7 +154,7 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n", ret, srq->srqn); - xa_erase(&srq_table->xa, srq->srqn); + xa_erase_irq(&srq_table->xa, srq->srqn); if (refcount_dec_and_test(&srq->refcount)) complete(&srq->free); diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c index 4a71e678d0..89fcc09ded 100644 --- a/drivers/infiniband/hw/mana/cq.c +++ b/drivers/infiniband/hw/mana/cq.c @@ -39,37 +39,13 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, } cq->cqe = attr->cqe; - cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, - IB_ACCESS_LOCAL_WRITE); - if (IS_ERR(cq->umem)) { - err = PTR_ERR(cq->umem); - ibdev_dbg(ibdev, "Failed to get umem for create cq, err %d\n", - err); - return err; - } - - err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region); + err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue); if (err) { - ibdev_dbg(ibdev, - "Failed to create dma region for create cq, %d\n", - err); - goto err_release_umem; + ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err); + return err; } - ibdev_dbg(ibdev, - "create_dma_region ret %d gdma_region 0x%llx\n", - err, cq->gdma_region); - - /* - * The CQ ID is not known at this time. The ID is generated at create_qp - */ - cq->id = INVALID_QUEUE_ID; - return 0; - -err_release_umem: - ib_umem_release(cq->umem); - return err; } int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) @@ -78,24 +54,16 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) struct ib_device *ibdev = ibcq->device; struct mana_ib_dev *mdev; struct gdma_context *gc; - int err; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); gc = mdev_to_gc(mdev); - err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region); - if (err) { - ibdev_dbg(ibdev, - "Failed to destroy dma region, %d\n", err); - return err; + if (cq->queue.id != INVALID_QUEUE_ID) { + kfree(gc->cq_table[cq->queue.id]); + gc->cq_table[cq->queue.id] = NULL; } - if (cq->id != INVALID_QUEUE_ID) { - kfree(gc->cq_table[cq->id]); - gc->cq_table[cq->id] = NULL; - } - - ib_umem_release(cq->umem); + mana_ib_destroy_queue(mdev, &cq->queue); return 0; } @@ -113,8 +81,10 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) struct gdma_context *gc = mdev_to_gc(mdev); struct gdma_queue *gdma_cq; + if (cq->queue.id >= gc->max_num_cqs) + return -EINVAL; /* Create CQ table entry */ - WARN_ON(gc->cq_table[cq->id]); + WARN_ON(gc->cq_table[cq->queue.id]); gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL); if (!gdma_cq) return -ENOMEM; @@ -122,7 +92,7 @@ int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq) gdma_cq->cq.context = cq; gdma_cq->type = GDMA_CQ; gdma_cq->cq.callback = mana_ib_cq_handler; - gdma_cq->id = cq->id; - gc->cq_table[cq->id] = gdma_cq; + gdma_cq->id = cq->queue.id; + gc->cq_table[cq->queue.id] = gdma_cq; return 0; } diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index 71e33feee6..4524c6b807 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -237,6 +237,49 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) ibdev_dbg(ibdev, "Failed to destroy doorbell page %d\n", ret); } +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size, + struct mana_ib_queue *queue) +{ + struct ib_umem *umem; + int err; + + queue->umem = NULL; + queue->id = INVALID_QUEUE_ID; + queue->gdma_region = GDMA_INVALID_DMA_REGION; + + umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(umem)) { + err = PTR_ERR(umem); + ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err); + return err; + } + + err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region); + if (err) { + ibdev_dbg(&mdev->ib_dev, "Failed to create dma region, %d\n", err); + goto free_umem; + } + queue->umem = umem; + + ibdev_dbg(&mdev->ib_dev, + "create_dma_region ret %d gdma_region 0x%llx\n", + err, queue->gdma_region); + + return 0; +free_umem: + ib_umem_release(umem); + return err; +} + +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue) +{ + /* Ignore return code as there is not much we can do about it. + * The error message is printed inside. + */ + mana_ib_gd_destroy_dma_region(mdev, queue->gdma_region); + ib_umem_release(queue->umem); +} + static int mana_ib_gd_first_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc, diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index f83390eebb..6acb5c281c 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -45,6 +45,12 @@ struct mana_ib_adapter_caps { u32 max_inline_data_size; }; +struct mana_ib_queue { + struct ib_umem *umem; + u64 gdma_region; + u64 id; +}; + struct mana_ib_dev { struct ib_device ib_dev; struct gdma_dev *gdma_dev; @@ -82,10 +88,8 @@ struct mana_ib_mr { struct mana_ib_cq { struct ib_cq ibcq; - struct ib_umem *umem; + struct mana_ib_queue queue; int cqe; - u64 gdma_region; - u64 id; u32 comp_vector; }; @@ -169,6 +173,10 @@ int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, mana_handle_t gdma_region); +int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size, + struct mana_ib_queue *queue); +void mana_ib_destroy_queue(struct mana_ib_dev *mdev, struct mana_ib_queue *queue); + struct ib_wq *mana_ib_create_wq(struct ib_pd *pd, struct ib_wq_init_attr *init_attr, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c index 6e7627745c..d7485ee6a6 100644 --- a/drivers/infiniband/hw/mana/qp.c +++ b/drivers/infiniband/hw/mana/qp.c @@ -197,7 +197,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, wq_spec.gdma_region = wq->gdma_region; wq_spec.queue_size = wq->wq_buf_size; - cq_spec.gdma_region = cq->gdma_region; + cq_spec.gdma_region = cq->queue.gdma_region; cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues]; @@ -213,16 +213,16 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, /* The GDMA regions are now owned by the WQ object */ wq->gdma_region = GDMA_INVALID_DMA_REGION; - cq->gdma_region = GDMA_INVALID_DMA_REGION; + cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; wq->id = wq_spec.queue_index; - cq->id = cq_spec.queue_index; + cq->queue.id = cq_spec.queue_index; ibdev_dbg(&mdev->ib_dev, "ret %d rx_object 0x%llx wq id %llu cq id %llu\n", - ret, wq->rx_object, wq->id, cq->id); + ret, wq->rx_object, wq->id, cq->queue.id); - resp.entries[i].cqid = cq->id; + resp.entries[i].cqid = cq->queue.id; resp.entries[i].wqid = wq->id; mana_ind_table[i] = wq->rx_object; @@ -232,7 +232,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, if (ret) goto fail; - gdma_cq_allocated[i] = gc->cq_table[cq->id]; + gdma_cq_allocated[i] = gc->cq_table[cq->queue.id]; } resp.num_entries = i; @@ -264,7 +264,7 @@ fail: wq = container_of(ibwq, struct mana_ib_wq, ibwq); cq = container_of(ibcq, struct mana_ib_cq, ibcq); - gc->cq_table[cq->id] = NULL; + gc->cq_table[cq->queue.id] = NULL; kfree(gdma_cq_allocated[i]); mana_destroy_wq_obj(mpc, GDMA_RQ, wq->rx_object); @@ -374,7 +374,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, wq_spec.gdma_region = qp->sq_gdma_region; wq_spec.queue_size = ucmd.sq_buf_size; - cq_spec.gdma_region = send_cq->gdma_region; + cq_spec.gdma_region = send_cq->queue.gdma_region; cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; cq_spec.modr_ctx_id = 0; eq_vec = send_cq->comp_vector % gc->max_num_queues; @@ -392,10 +392,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, /* The GDMA regions are now owned by the WQ object */ qp->sq_gdma_region = GDMA_INVALID_DMA_REGION; - send_cq->gdma_region = GDMA_INVALID_DMA_REGION; + send_cq->queue.gdma_region = GDMA_INVALID_DMA_REGION; qp->sq_id = wq_spec.queue_index; - send_cq->id = cq_spec.queue_index; + send_cq->queue.id = cq_spec.queue_index; /* Create CQ table entry */ err = mana_ib_install_cq_cb(mdev, send_cq); @@ -404,10 +404,10 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, ibdev_dbg(&mdev->ib_dev, "ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err, - qp->tx_object, qp->sq_id, send_cq->id); + qp->tx_object, qp->sq_id, send_cq->queue.id); resp.sqid = qp->sq_id; - resp.cqid = send_cq->id; + resp.cqid = send_cq->queue.id; resp.tx_vp_offset = pd->tx_vp_offset; err = ib_copy_to_udata(udata, &resp, sizeof(resp)); @@ -422,7 +422,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, err_release_gdma_cq: kfree(gdma_cq); - gc->cq_table[send_cq->id] = NULL; + gc->cq_table[send_cq->queue.id] = NULL; err_destroy_wq_obj: mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 96ffbbaf0a..5a22be14d9 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include <linux/io.h> #include <rdma/ib_umem_odp.h> #include "mlx5_ib.h" #include <linux/jiffies.h> @@ -108,7 +109,6 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id, __be32 mmio_wqe[16] = {}; unsigned long flags; unsigned int idx; - int i; if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)) return -EIO; @@ -148,10 +148,8 @@ static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id, * we hit doorbell */ wmb(); - for (i = 0; i < 8; i++) - mlx5_write64(&mmio_wqe[i * 2], - bf->bfreg->map + bf->offset + i * 8); - io_stop_wc(); + __iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe, + sizeof(mmio_wqe) / 8); bf->offset ^= bf->buf_size; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index bbe79b86c7..79ebafecca 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -643,9 +643,10 @@ struct mlx5_ib_mkey { unsigned int ndescs; struct wait_queue_head wait; refcount_t usecount; - /* User Mkey must hold either a rb_key or a cache_ent. */ + /* Cacheable user Mkey must hold either a rb_key or a cache_ent. */ struct mlx5r_cache_rb_key rb_key; struct mlx5_cache_ent *cache_ent; + u8 cacheable : 1; }; #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index a8ee2ca1f4..ecc111ed5d 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1158,6 +1158,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, if (IS_ERR(mr)) return mr; mr->mmkey.rb_key = rb_key; + mr->mmkey.cacheable = true; return mr; } @@ -1168,6 +1169,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, mr->ibmr.pd = pd; mr->umem = umem; mr->page_shift = order_base_2(page_size); + mr->mmkey.cacheable = true; set_mr_fields(dev, mr, umem->length, access_flags, iova); return mr; @@ -1570,7 +1572,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev, unsigned int diffs = current_access_flags ^ target_access_flags; if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING)) + IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING | + IB_ACCESS_REMOTE_ATOMIC)) return false; return mlx5r_umr_can_reconfig(dev, current_access_flags, target_access_flags); @@ -1835,6 +1838,23 @@ end: return ret; } +static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) +{ + struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); + struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; + + if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) + return 0; + + if (ent) { + spin_lock_irq(&ent->mkeys_queue.lock); + ent->in_use--; + mr->mmkey.cache_ent = NULL; + spin_unlock_irq(&ent->mkeys_queue.lock); + } + return destroy_mkey(dev, mr); +} + int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct mlx5_ib_mr *mr = to_mmr(ibmr); @@ -1880,16 +1900,9 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) } /* Stop DMA */ - if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length)) - if (mlx5r_umr_revoke_mr(mr) || - cache_ent_find_and_store(dev, mr)) - mr->mmkey.cache_ent = NULL; - - if (!mr->mmkey.cache_ent) { - rc = destroy_mkey(to_mdev(mr->ibmr.device), mr); - if (rc) - return rc; - } + rc = mlx5_revoke_mr(mr); + if (rc) + return rc; if (mr->umem) { bool is_odp = is_odp_mr(mr); |