diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:57 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:57 +0000 |
commit | dc50eab76b709d68175a358d6e23a5a3890764d3 (patch) | |
tree | c754d0390db060af0213ff994f0ac310e4cfd6e9 /drivers/infiniband/hw | |
parent | Adding debian version 6.6.15-2. (diff) | |
download | linux-dc50eab76b709d68175a358d6e23a5a3890764d3.tar.xz linux-dc50eab76b709d68175a358d6e23a5a3890764d3.zip |
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/infiniband/hw')
131 files changed, 949 insertions, 391 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index faa88d12ee..cc466dfd79 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -1809,7 +1809,7 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, switch (srq_attr_mask) { case IB_SRQ_MAX_WR: /* SRQ resize is not supported */ - break; + return -EINVAL; case IB_SRQ_LIMIT: /* Change the SRQ threshold */ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe) @@ -1824,13 +1824,12 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr, /* On success, update the shadow */ srq->srq_limit = srq_attr->srq_limit; /* No need to Build and send response back to udata */ - break; + return 0; default: ibdev_err(&rdev->ibdev, "Unsupported srq_attr_mask 0x%x", srq_attr_mask); return -EINVAL; } - return 0; } int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr) diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 039801d93e..a99c68247a 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -970,6 +970,9 @@ static int bnxt_re_handle_unaffi_async_event(struct creq_func_event static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct bnxt_re_qp *qp) { + struct bnxt_re_srq *srq = container_of(qp->qplib_qp.srq, struct bnxt_re_srq, + qplib_srq); + struct creq_qp_error_notification *err_event; struct ib_event event = {}; unsigned int flags; @@ -980,14 +983,147 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, bnxt_re_unlock_cqs(qp, flags); } - if (qp->qplib_qp.srq) { - event.device = &qp->rdev->ibdev; - event.element.qp = &qp->ib_qp; - event.event = IB_EVENT_QP_LAST_WQE_REACHED; + event.device = &qp->rdev->ibdev; + event.element.qp = &qp->ib_qp; + event.event = IB_EVENT_QP_FATAL; + + err_event = (struct creq_qp_error_notification *)qp_event; + + switch (err_event->req_err_state_reason) { + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + + default: + break; } - if (event.device && qp->ib_qp.event_handler) + switch (err_event->res_err_state_reason) { + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR: + event.event = IB_EVENT_QP_ACCESS_ERR; + break; + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR: + event.event = IB_EVENT_QP_REQ_ERR; + break; + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR: + event.event = IB_EVENT_QP_FATAL; + break; + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR: + case CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR: + if (srq) + event.event = IB_EVENT_SRQ_ERR; + break; + default: + break; + } + + if (err_event->res_err_state_reason || err_event->req_err_state_reason) { + ibdev_dbg(&qp->rdev->ibdev, + "%s %s qp_id: %d cons (%d %d) req (%d %d) res (%d %d)\n", + __func__, rdma_is_kernel_res(&qp->ib_qp.res) ? "kernel" : "user", + qp->qplib_qp.id, + err_event->sq_cons_idx, + err_event->rq_cons_idx, + err_event->req_slow_path_state, + err_event->req_err_state_reason, + err_event->res_slow_path_state, + err_event->res_err_state_reason); + } else { + if (srq) + event.event = IB_EVENT_QP_LAST_WQE_REACHED; + } + + if (event.event == IB_EVENT_SRQ_ERR && srq->ib_srq.event_handler) { + (*srq->ib_srq.event_handler)(&event, + srq->ib_srq.srq_context); + } else if (event.device && qp->ib_qp.event_handler) { qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context); + } + + return 0; +} + +static int bnxt_re_handle_cq_async_error(void *event, struct bnxt_re_cq *cq) +{ + struct creq_cq_error_notification *cqerr; + struct ib_event ibevent = {}; + + cqerr = event; + switch (cqerr->cq_err_reason) { + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR: + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR: + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR: + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR: + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR: + case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR: + ibevent.event = IB_EVENT_CQ_ERR; + break; + default: + break; + } + + if (ibevent.event == IB_EVENT_CQ_ERR && cq->ib_cq.event_handler) { + ibevent.element.cq = &cq->ib_cq; + ibevent.device = &cq->rdev->ibdev; + + ibdev_dbg(&cq->rdev->ibdev, + "%s err reason %d\n", __func__, cqerr->cq_err_reason); + cq->ib_cq.event_handler(&ibevent, cq->ib_cq.cq_context); + } return 0; } @@ -995,6 +1131,10 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async, void *obj) { + struct bnxt_qplib_qp *lib_qp; + struct bnxt_qplib_cq *lib_cq; + struct bnxt_re_qp *qp; + struct bnxt_re_cq *cq; int rc = 0; u8 event; @@ -1002,11 +1142,19 @@ static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async, return rc; /* QP was already dead, still return success */ event = affi_async->event; - if (event == CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION) { - struct bnxt_qplib_qp *lib_qp = obj; - struct bnxt_re_qp *qp = container_of(lib_qp, struct bnxt_re_qp, - qplib_qp); + switch (event) { + case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: + lib_qp = obj; + qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp); rc = bnxt_re_handle_qp_async_event(affi_async, qp); + break; + case CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION: + lib_cq = obj; + cq = container_of(lib_cq, struct bnxt_re_cq, qplib_cq); + rc = bnxt_re_handle_cq_async_error(affi_async, cq); + break; + default: + rc = -EINVAL; } return rc; } @@ -1040,13 +1188,10 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, ib_event.device = &srq->rdev->ibdev; ib_event.element.srq = &srq->ib_srq; - if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT) - ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED; - else - ib_event.event = IB_EVENT_SRQ_ERR; if (srq->ib_srq.event_handler) { - /* Lock event_handler? */ + if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT) + ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED; (*srq->ib_srq.event_handler)(&ib_event, srq->ib_srq.srq_context); } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index abbabea7f5..2a62239187 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -748,7 +748,8 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req), sizeof(resp), 0); rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); - srq->threshold = le16_to_cpu(sb->srq_limit); + if (!rc) + srq->threshold = le16_to_cpu(sb->srq_limit); dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_tlv.h b/drivers/infiniband/hw/bnxt_re/qplib_tlv.h index 402c220734..ae96a75d7f 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_tlv.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_tlv.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ #ifndef __QPLIB_TLV_H__ #define __QPLIB_TLV_H__ diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 4a10303e03..84b5acd7f7 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -2919,6 +2919,35 @@ struct creq_qp_error_notification { u8 status; u8 req_slow_path_state; u8 req_err_state_reason; + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_NO_ERROR 0X0UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR 0X1UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT 0X2UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT 0X3UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1 0X4UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2 0X5UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3 0X6UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4 0X7UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR 0X8UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR 0X9UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH 0XAUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP 0XBUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND 0XCUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG 0XDUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE 0XEUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR 0XFUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR 0X10UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR 0X11UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR 0X12UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR 0X13UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR 0X14UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR 0X15UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR 0X16UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR 0X17UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR 0X18UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR 0X19UL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR 0X1AUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR 0X1BUL + #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR 0X1CUL __le32 xid; u8 v; #define CREQ_QP_ERROR_NOTIFICATION_V 0x1UL @@ -2928,6 +2957,35 @@ struct creq_qp_error_notification { CREQ_QP_ERROR_NOTIFICATION_EVENT_QP_ERROR_NOTIFICATION u8 res_slow_path_state; u8 res_err_state_reason; + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_NO_ERROR 0x0UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX 0x1UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH 0x2UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE 0x3UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR 0x4UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT 0x5UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY 0x6UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR 0x7UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION 0x8UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR 0x9UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY 0xaUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR 0xbUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION 0xcUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR 0xdUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW 0xeUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE 0xfUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC 0x10UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE 0x11UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR 0x12UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR 0x13UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR 0x14UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY 0x15UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR 0x16UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR 0x17UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR 0x18UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR 0x19UL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR 0x1bUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR 0x1cUL + #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND 0x1dUL __le16 sq_cons_idx; __le16 rq_cons_idx; }; diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index bbc957c578..7ead8746b7 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h index 00854f2178..ffdd0d571c 100644 --- a/drivers/infiniband/hw/hfi1/affinity.h +++ b/drivers/infiniband/hw/hfi1/affinity.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2020 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h index df295f47b3..c8d92dc13d 100644 --- a/drivers/infiniband/hw/hfi1/aspm.h +++ b/drivers/infiniband/hw/hfi1/aspm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015-2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 0814291a04..78f27f7b42 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. * Copyright(c) 2021 Cornelis Networks. @@ -5334,7 +5334,7 @@ static const char * const cce_misc_names[] = { static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source) { if (source < ARRAY_SIZE(cce_misc_names)) - strncpy(buf, cce_misc_names[source], bsize); + strscpy_pad(buf, cce_misc_names[source], bsize); else snprintf(buf, bsize, "Reserved%u", source + IS_GENERAL_ERR_START); @@ -5374,7 +5374,7 @@ static const char * const various_names[] = { static char *is_various_name(char *buf, size_t bsize, unsigned int source) { if (source < ARRAY_SIZE(various_names)) - strncpy(buf, various_names[source], bsize); + strscpy_pad(buf, various_names[source], bsize); else snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START); return buf; @@ -13185,15 +13185,16 @@ static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits, { u64 reg; u16 idx = src / BITS_PER_REGISTER; + unsigned long flags; - spin_lock(&dd->irq_src_lock); + spin_lock_irqsave(&dd->irq_src_lock, flags); reg = read_csr(dd, CCE_INT_MASK + (8 * idx)); if (set) reg |= bits; else reg &= ~bits; write_csr(dd, CCE_INT_MASK + (8 * idx), reg); - spin_unlock(&dd->irq_src_lock); + spin_unlock_irqrestore(&dd->irq_src_lock, flags); } /** diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index b2d53713da..d861aa8fc6 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2020 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h index 95a8d530d5..d79e25d20f 100644 --- a/drivers/infiniband/hw/hfi1/chip_registers.h +++ b/drivers/infiniband/hw/hfi1/chip_registers.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h index 166ad6b828..8abc902b96 100644 --- a/drivers/infiniband/hw/hfi1/common.h +++ b/drivers/infiniband/hw/hfi1/common.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2020 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c index 80ba1e53c0..a1e01b4472 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.c +++ b/drivers/infiniband/hw/hfi1/debugfs.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015-2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h index 29a5a8de2c..54d952a401 100644 --- a/drivers/infiniband/hw/hfi1/debugfs.h +++ b/drivers/infiniband/hw/hfi1/debugfs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015, 2016, 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c index b0a00b7aae..4250d077b0 100644 --- a/drivers/infiniband/hw/hfi1/device.c +++ b/drivers/infiniband/hw/hfi1/device.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/device.h b/drivers/infiniband/hw/hfi1/device.h index c371b5612b..a91bea426b 100644 --- a/drivers/infiniband/hw/hfi1/device.h +++ b/drivers/infiniband/hw/hfi1/device.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index f4492fa407..37a6794885 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015-2020 Intel Corporation. * Copyright(c) 2021 Cornelis Networks. diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c index 2b5d264f41..9ed05e1002 100644 --- a/drivers/infiniband/hw/hfi1/efivar.c +++ b/drivers/infiniband/hw/hfi1/efivar.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/efivar.h b/drivers/infiniband/hw/hfi1/efivar.h index 5ebc2f07bb..882240929a 100644 --- a/drivers/infiniband/hw/hfi1/efivar.h +++ b/drivers/infiniband/hw/hfi1/efivar.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c index fbe9581074..f93a160d8d 100644 --- a/drivers/infiniband/hw/hfi1/eprom.c +++ b/drivers/infiniband/hw/hfi1/eprom.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/eprom.h b/drivers/infiniband/hw/hfi1/eprom.h index 772c516366..51648d1afc 100644 --- a/drivers/infiniband/hw/hfi1/eprom.h +++ b/drivers/infiniband/hw/hfi1/eprom.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.c b/drivers/infiniband/hw/hfi1/exp_rcv.c index b86f697c79..879a66edbd 100644 --- a/drivers/infiniband/hw/hfi1/exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/exp_rcv.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.h b/drivers/infiniband/hw/hfi1/exp_rcv.h index 41f7fe5d18..141413d9fb 100644 --- a/drivers/infiniband/hw/hfi1/exp_rcv.h +++ b/drivers/infiniband/hw/hfi1/exp_rcv.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/fault.c b/drivers/infiniband/hw/hfi1/fault.c index 3af77a0840..35d2382ee6 100644 --- a/drivers/infiniband/hw/hfi1/fault.c +++ b/drivers/infiniband/hw/hfi1/fault.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/fault.h b/drivers/infiniband/hw/hfi1/fault.h index 7fe7f47219..51adafe240 100644 --- a/drivers/infiniband/hw/hfi1/fault.h +++ b/drivers/infiniband/hw/hfi1/fault.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index a5ab22cedd..503abec709 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2020 Cornelis Networks, Inc. * Copyright(c) 2015-2020 Intel Corporation. @@ -267,7 +267,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) if (!HFI1_CAP_IS_KSET(SDMA)) return -EINVAL; - if (!from->user_backed) + if (!user_backed_iter(from)) return -EINVAL; idx = srcu_read_lock(&fd->pq_srcu); pq = srcu_dereference(fd->pq, &fd->pq_srcu); diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c index 0c0cef5b1e..3c228aeaaf 100644 --- a/drivers/infiniband/hw/hfi1/firmware.c +++ b/drivers/infiniband/hw/hfi1/firmware.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 38772e52d7..4b3f1cb125 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2020-2023 Cornelis Networks, Inc. * Copyright(c) 2015-2020 Intel Corporation. diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 6de37c5d7d..cbac4a442d 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. * Copyright(c) 2021 Cornelis Networks. @@ -1027,7 +1027,6 @@ static void shutdown_device(struct hfi1_devdata *dd) msix_clean_up_interrupts(dd); for (pidx = 0; pidx < dd->num_pports; ++pidx) { - ppd = dd->pport + pidx; for (i = 0; i < dd->num_rcv_contexts; i++) { rcd = hfi1_rcd_get_by_index(dd, i); hfi1_rcvctrl(dd, HFI1_RCVCTRL_TAILUPD_DIS | diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c index 70376e6dba..3737f632d6 100644 --- a/drivers/infiniband/hw/hfi1/intr.c +++ b/drivers/infiniband/hw/hfi1/intr.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h index 4df0700cba..49805a24bb 100644 --- a/drivers/infiniband/hw/hfi1/iowait.h +++ b/drivers/infiniband/hw/hfi1/iowait.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c index e7d8313302..8b9cc55db5 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c @@ -217,7 +217,7 @@ static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx, ret = sdma_txadd_page(dd, txreq, skb_frag_page(frag), - frag->bv_offset, + skb_frag_off(frag), skb_frag_size(frag), NULL, NULL, NULL); if (unlikely(ret)) diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c index e5e783c458..a9883295f4 100644 --- a/drivers/infiniband/hw/hfi1/mad.c +++ b/drivers/infiniband/hw/hfi1/mad.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015-2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h index 1d45a008fa..b6e3141253 100644 --- a/drivers/infiniband/hw/hfi1/mad.h +++ b/drivers/infiniband/hw/hfi1/mad.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c index 7a51f7d73b..d4a6acad0e 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.c +++ b/drivers/infiniband/hw/hfi1/mmu_rb.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2020 Cornelis Networks, Inc. * Copyright(c) 2016 - 2017 Intel Corporation. diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h index 751dc3fe1e..8e5d05454d 100644 --- a/drivers/infiniband/hw/hfi1/mmu_rb.h +++ b/drivers/infiniband/hw/hfi1/mmu_rb.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2020 Cornelis Networks, Inc. * Copyright(c) 2016 Intel Corporation. diff --git a/drivers/infiniband/hw/hfi1/opa_compat.h b/drivers/infiniband/hw/hfi1/opa_compat.h index 31570b0cfd..49f2da677b 100644 --- a/drivers/infiniband/hw/hfi1/opa_compat.h +++ b/drivers/infiniband/hw/hfi1/opa_compat.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index c132a9c073..119ec2f138 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2019 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index dfea53e0fd..5a91cbda4a 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015-2018 Intel Corporation. */ @@ -2086,7 +2086,7 @@ int init_credit_return(struct hfi1_devdata *dd) "Unable to allocate credit return DMA range for NUMA %d\n", i); ret = -ENOMEM; - goto done; + goto free_cr_base; } } set_dev_node(&dd->pcidev->dev, dd->node); @@ -2094,6 +2094,10 @@ int init_credit_return(struct hfi1_devdata *dd) ret = 0; done: return ret; + +free_cr_base: + free_credit_return(dd); + goto done; } void free_credit_return(struct hfi1_devdata *dd) diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h index ea714008f2..d07cc6ea7c 100644 --- a/drivers/infiniband/hw/hfi1/pio.h +++ b/drivers/infiniband/hw/hfi1/pio.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015-2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c index 7690f996d5..80fee812a9 100644 --- a/drivers/infiniband/hw/hfi1/pio_copy.c +++ b/drivers/infiniband/hw/hfi1/pio_copy.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c index 54cbd8f1a6..7bd0e9b6cb 100644 --- a/drivers/infiniband/hw/hfi1/platform.c +++ b/drivers/infiniband/hw/hfi1/platform.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h index 1d51dca1bc..0631f9bf3a 100644 --- a/drivers/infiniband/hw/hfi1/platform.h +++ b/drivers/infiniband/hw/hfi1/platform.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 6193d48b2c..f3d8c0c193 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h index cdf87bc6ad..870ff1a6e5 100644 --- a/drivers/infiniband/hw/hfi1/qp.h +++ b/drivers/infiniband/hw/hfi1/qp.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c index 19d7887a4f..52cce1c8b7 100644 --- a/drivers/infiniband/hw/hfi1/qsfp.c +++ b/drivers/infiniband/hw/hfi1/qsfp.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h index 8f14111eaa..df1389bad8 100644 --- a/drivers/infiniband/hw/hfi1/qsfp.h +++ b/drivers/infiniband/hw/hfi1/qsfp.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index acd2b273ea..b36242c9d4 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index b0151b7293..aafa4e03b1 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 26c6216275..b67d23b1f2 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2018 Intel Corporation. */ @@ -3158,7 +3158,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) { int rval = 0; - if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) { + if ((unlikely(tx->num_desc == tx->desc_limit))) { rval = _extend_sdma_tx_descs(dd, tx); if (rval) { __sdma_txclean(dd, tx); diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index 7fdebab202..d77246b484 100644 --- a/drivers/infiniband/hw/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h index 85ae7293c2..5782166d98 100644 --- a/drivers/infiniband/hw/hfi1/sdma_txreq.h +++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 3b3407dc7c..d62ba5fdd8 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015-2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c index 8302469582..10290ebf76 100644 --- a/drivers/infiniband/hw/hfi1/trace.c +++ b/drivers/infiniband/hw/hfi1/trace.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h index 31e027c5a0..bb3cc006ba 100644 --- a/drivers/infiniband/hw/hfi1/trace.h +++ b/drivers/infiniband/hw/hfi1/trace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/trace_ctxts.h b/drivers/infiniband/hw/hfi1/trace_ctxts.h index 1858eaf33b..76c41bd790 100644 --- a/drivers/infiniband/hw/hfi1/trace_ctxts.h +++ b/drivers/infiniband/hw/hfi1/trace_ctxts.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2020 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/trace_dbg.h b/drivers/infiniband/hw/hfi1/trace_dbg.h index 489395bfb5..75599d5168 100644 --- a/drivers/infiniband/hw/hfi1/trace_dbg.h +++ b/drivers/infiniband/hw/hfi1/trace_dbg.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h index b33f8f575f..b21356abc9 100644 --- a/drivers/infiniband/hw/hfi1/trace_ibhdrs.h +++ b/drivers/infiniband/hw/hfi1/trace_ibhdrs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/trace_misc.h b/drivers/infiniband/hw/hfi1/trace_misc.h index 742675fa75..8dc46b6891 100644 --- a/drivers/infiniband/hw/hfi1/trace_misc.h +++ b/drivers/infiniband/hw/hfi1/trace_misc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015, 2016 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/trace_mmu.h b/drivers/infiniband/hw/hfi1/trace_mmu.h index 82cc12aa3f..5a9dfd85e7 100644 --- a/drivers/infiniband/hw/hfi1/trace_mmu.h +++ b/drivers/infiniband/hw/hfi1/trace_mmu.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/trace_rc.h b/drivers/infiniband/hw/hfi1/trace_rc.h index 7c3a1c7753..fa254f9b9c 100644 --- a/drivers/infiniband/hw/hfi1/trace_rc.h +++ b/drivers/infiniband/hw/hfi1/trace_rc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015, 2016, 2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h index 0da22f9bc7..e6904aa80c 100644 --- a/drivers/infiniband/hw/hfi1/trace_rx.h +++ b/drivers/infiniband/hw/hfi1/trace_rx.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h index ed1b9e1e4b..c79856d4fd 100644 --- a/drivers/infiniband/hw/hfi1/trace_tx.h +++ b/drivers/infiniband/hw/hfi1/trace_tx.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index 4e9d6aa393..33d2c2a218 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index b64b9d7e08..89d1bae8f8 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2019 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index 96058baf36..6419872f95 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2020 Cornelis Networks, Inc. * Copyright(c) 2015-2018 Intel Corporation. diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h index f8ee997d00..055726f7c1 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2020 - Cornelis Networks, Inc. * Copyright(c) 2015 - 2017 Intel Corporation. @@ -36,7 +36,7 @@ struct tid_rb_node { dma_addr_t dma_addr; bool freed; unsigned int npages; - struct page *pages[]; + struct page *pages[] __counted_by(npages); }; static inline int num_user_pages(unsigned long addr, diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c index 36aaedc651..c77913a792 100644 --- a/drivers/infiniband/hw/hfi1/user_pages.c +++ b/drivers/infiniband/hw/hfi1/user_pages.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015-2017 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 29ae7beb9b..6a4aa59c0c 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2020 - 2023 Cornelis Networks, Inc. * Copyright(c) 2015 - 2018 Intel Corporation. diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h index 742ec1470c..8735524e3a 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.h +++ b/drivers/infiniband/hw/hfi1/user_sdma.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2023 - Cornelis Networks, Inc. * Copyright(c) 2015 - 2018 Intel Corporation. diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index fbdcfecb17..33af2196ef 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h index 7f30f32b34..070e4f0bab 100644 --- a/drivers/infiniband/hw/hfi1/verbs.h +++ b/drivers/infiniband/hw/hfi1/verbs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2015 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c index cfecc81a27..822f0d05ba 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.c +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2016 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h index 2a7e0ae892..56353c7676 100644 --- a/drivers/infiniband/hw/hfi1/verbs_txreq.h +++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2016 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/vnic.h b/drivers/infiniband/hw/hfi1/vnic.h index 34f03e7770..bbafeb5fc0 100644 --- a/drivers/infiniband/hw/hfi1/vnic.h +++ b/drivers/infiniband/hw/hfi1/vnic.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2017 - 2020 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c index 3650fababf..16a4c297a8 100644 --- a/drivers/infiniband/hw/hfi1/vnic_main.c +++ b/drivers/infiniband/hw/hfi1/vnic_main.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2017 - 2020 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c index cc6324d2d1..6caf01ba0b 100644 --- a/drivers/infiniband/hw/hfi1/vnic_sdma.c +++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright(c) 2017 - 2018 Intel Corporation. */ diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 7f0d0288be..1627f3b0ef 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -146,6 +146,7 @@ enum { HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14), HNS_ROCE_CAP_FLAG_STASH = BIT(17), HNS_ROCE_CAP_FLAG_CQE_INLINE = BIT(19), + HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB = BIT(22), }; #define HNS_ROCE_DB_TYPE_COUNT 2 @@ -453,6 +454,8 @@ struct hns_roce_srq { spinlock_t lock; struct mutex mutex; void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event); + struct hns_roce_db rdb; + u32 cap_flags; }; struct hns_roce_uar_table { @@ -908,6 +911,7 @@ struct hns_roce_hw { int (*query_cqc)(struct hns_roce_dev *hr_dev, u32 cqn, void *buffer); int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer); int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer); + int (*query_srqc)(struct hns_roce_dev *hr_dev, u32 srqn, void *buffer); int (*query_hw_counter)(struct hns_roce_dev *hr_dev, u64 *stats, u32 port, int *hw_counters); const struct ib_device_ops *hns_roce_dev_ops; @@ -1239,6 +1243,8 @@ int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp); int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp); int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr); int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr); +int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq); +int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq); struct hns_user_mmap_entry * hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, size_t length, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 3c62a0042d..aa9527ac2f 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -941,20 +941,23 @@ static void fill_wqe_idx(struct hns_roce_srq *srq, unsigned int wqe_idx) idx_que->head++; } -static void update_srq_db(struct hns_roce_v2_db *db, struct hns_roce_srq *srq) +static void update_srq_db(struct hns_roce_srq *srq) { - hr_reg_write(db, DB_TAG, srq->srqn); - hr_reg_write(db, DB_CMD, HNS_ROCE_V2_SRQ_DB); - hr_reg_write(db, DB_PI, srq->idx_que.head); + struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); + struct hns_roce_v2_db db; + + hr_reg_write(&db, DB_TAG, srq->srqn); + hr_reg_write(&db, DB_CMD, HNS_ROCE_V2_SRQ_DB); + hr_reg_write(&db, DB_PI, srq->idx_que.head); + + hns_roce_write64(hr_dev, (__le32 *)&db, srq->db_reg); } static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { - struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); struct hns_roce_srq *srq = to_hr_srq(ibsrq); - struct hns_roce_v2_db srq_db; unsigned long flags; int ret = 0; u32 max_sge; @@ -985,9 +988,11 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, } if (likely(nreq)) { - update_srq_db(&srq_db, srq); - - hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg); + if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) + *srq->rdb.db_record = srq->idx_que.head & + V2_DB_PRODUCER_IDX_M; + else + update_srq_db(srq); } spin_unlock_irqrestore(&srq->lock, flags); @@ -5287,6 +5292,30 @@ out: return ret; } +static int hns_roce_v2_query_srqc(struct hns_roce_dev *hr_dev, u32 srqn, + void *buffer) +{ + struct hns_roce_srq_context *context; + struct hns_roce_cmd_mailbox *mailbox; + int ret; + + mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + context = mailbox->buf; + ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SRQC, + srqn); + if (ret) + goto out; + + memcpy(buffer, context, sizeof(*context)); + +out: + hns_roce_free_cmd_mailbox(hr_dev, mailbox); + return ret; +} + static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev, struct hns_roce_v2_qp_context *context) { @@ -5621,6 +5650,14 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) hr_reg_write(ctx, SRQC_WQE_BUF_PG_SZ, to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift)); + if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) { + hr_reg_enable(ctx, SRQC_DB_RECORD_EN); + hr_reg_write(ctx, SRQC_DB_RECORD_ADDR_L, + lower_32_bits(srq->rdb.dma) >> 1); + hr_reg_write(ctx, SRQC_DB_RECORD_ADDR_H, + upper_32_bits(srq->rdb.dma)); + } + return hns_roce_v2_write_srqc_index_queue(srq, ctx); } @@ -6647,6 +6684,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { .query_cqc = hns_roce_v2_query_cqc, .query_qpc = hns_roce_v2_query_qpc, .query_mpt = hns_roce_v2_query_mpt, + .query_srqc = hns_roce_v2_query_srqc, .query_hw_counter = hns_roce_hw_v2_query_counter, .hns_roce_dev_ops = &hns_roce_v2_dev_ops, .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops, diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 4a9cd4d21b..a4a10a4e1a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -675,6 +675,8 @@ static const struct ib_device_ops hns_roce_dev_restrack_ops = { .fill_res_qp_entry_raw = hns_roce_fill_res_qp_entry_raw, .fill_res_mr_entry = hns_roce_fill_res_mr_entry, .fill_res_mr_entry_raw = hns_roce_fill_res_mr_entry_raw, + .fill_res_srq_entry = hns_roce_fill_res_srq_entry, + .fill_res_srq_entry_raw = hns_roce_fill_res_srq_entry_raw, }; static int hns_roce_register_device(struct hns_roce_dev *hr_dev) diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c index 081a01de30..f7f3c4cc74 100644 --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c @@ -160,3 +160,52 @@ int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr) return ret; } + +int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq) +{ + struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq); + struct nlattr *table_attr; + + table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); + if (!table_attr) + return -EMSGSIZE; + + if (rdma_nl_put_driver_u32_hex(msg, "srqn", hr_srq->srqn)) + goto err; + + if (rdma_nl_put_driver_u32_hex(msg, "wqe_cnt", hr_srq->wqe_cnt)) + goto err; + + if (rdma_nl_put_driver_u32_hex(msg, "max_gs", hr_srq->max_gs)) + goto err; + + if (rdma_nl_put_driver_u32_hex(msg, "xrcdn", hr_srq->xrcdn)) + goto err; + + nla_nest_end(msg, table_attr); + + return 0; + +err: + nla_nest_cancel(msg, table_attr); + return -EMSGSIZE; +} + +int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device); + struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq); + struct hns_roce_srq_context context; + int ret; + + if (!hr_dev->hw->query_srqc) + return -EINVAL; + + ret = hr_dev->hw->query_srqc(hr_dev, hr_srq->srqn, &context); + if (ret) + return ret; + + ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context); + + return ret; +} diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 8dae98f827..4e2d1c8e16 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -5,6 +5,7 @@ #include <linux/pci.h> #include <rdma/ib_umem.h> +#include <rdma/uverbs_ioctl.h> #include "hns_roce_device.h" #include "hns_roce_cmd.h" #include "hns_roce_hem.h" @@ -387,6 +388,79 @@ static void free_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) free_srq_idx(hr_dev, srq); } +static int get_srq_ucmd(struct hns_roce_srq *srq, struct ib_udata *udata, + struct hns_roce_ib_create_srq *ucmd) +{ + struct ib_device *ibdev = srq->ibsrq.device; + int ret; + + ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd))); + if (ret) { + ibdev_err(ibdev, "failed to copy SRQ udata, ret = %d.\n", ret); + return ret; + } + + return 0; +} + +static void free_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, + struct ib_udata *udata) +{ + struct hns_roce_ucontext *uctx; + + if (!(srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)) + return; + + srq->cap_flags &= ~HNS_ROCE_SRQ_CAP_RECORD_DB; + if (udata) { + uctx = rdma_udata_to_drv_context(udata, + struct hns_roce_ucontext, + ibucontext); + hns_roce_db_unmap_user(uctx, &srq->rdb); + } else { + hns_roce_free_db(hr_dev, &srq->rdb); + } +} + +static int alloc_srq_db(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, + struct ib_udata *udata, + struct hns_roce_ib_create_srq_resp *resp) +{ + struct hns_roce_ib_create_srq ucmd = {}; + struct hns_roce_ucontext *uctx; + int ret; + + if (udata) { + ret = get_srq_ucmd(srq, udata, &ucmd); + if (ret) + return ret; + + if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) && + (ucmd.req_cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB)) { + uctx = rdma_udata_to_drv_context(udata, + struct hns_roce_ucontext, ibucontext); + ret = hns_roce_db_map_user(uctx, ucmd.db_addr, + &srq->rdb); + if (ret) + return ret; + + srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB; + } + } else { + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB) { + ret = hns_roce_alloc_db(hr_dev, &srq->rdb, 1); + if (ret) + return ret; + + *srq->rdb.db_record = 0; + srq->cap_flags |= HNS_ROCE_RSP_SRQ_CAP_RECORD_DB; + } + srq->db_reg = hr_dev->reg_base + SRQ_DB_REG; + } + + return 0; +} + int hns_roce_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *init_attr, struct ib_udata *udata) @@ -407,15 +481,20 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, if (ret) return ret; - ret = alloc_srqn(hr_dev, srq); + ret = alloc_srq_db(hr_dev, srq, udata, &resp); if (ret) goto err_srq_buf; + ret = alloc_srqn(hr_dev, srq); + if (ret) + goto err_srq_db; + ret = alloc_srqc(hr_dev, srq); if (ret) goto err_srqn; if (udata) { + resp.cap_flags = srq->cap_flags; resp.srqn = srq->srqn; if (ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)))) { @@ -424,7 +503,6 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, } } - srq->db_reg = hr_dev->reg_base + SRQ_DB_REG; srq->event = hns_roce_ib_srq_event; refcount_set(&srq->refcount, 1); init_completion(&srq->free); @@ -435,6 +513,8 @@ err_srqc: free_srqc(hr_dev, srq); err_srqn: free_srqn(hr_dev, srq); +err_srq_db: + free_srq_db(hr_dev, srq, udata); err_srq_buf: free_srq_buf(hr_dev, srq); @@ -448,6 +528,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) free_srqc(hr_dev, srq); free_srqn(hr_dev, srq); + free_srq_db(hr_dev, srq, udata); free_srq_buf(hr_dev, srq); return 0; } diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c index 42d1e97710..1ee7a4e0d8 100644 --- a/drivers/infiniband/hw/irdma/cm.c +++ b/drivers/infiniband/hw/irdma/cm.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" #include "trace.h" diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h index 7feadb3e1e..48ee285cf7 100644 --- a/drivers/infiniband/hw/irdma/cm.h +++ b/drivers/infiniband/hw/irdma/cm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2021 Intel Corporation */ #ifndef IRDMA_CM_H #define IRDMA_CM_H diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index 8a6200e55c..6aed6169c0 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include <linux/etherdevice.h> diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h index d06e45d2c2..2cb4b96db7 100644 --- a/drivers/infiniband/hw/irdma/defs.h +++ b/drivers/infiniband/hw/irdma/defs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2021 Intel Corporation */ #ifndef IRDMA_DEFS_H #define IRDMA_DEFS_H @@ -346,6 +346,7 @@ enum irdma_cqp_op_type { #define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b #define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c #define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e +#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f #define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520 #define IRDMA_AE_RESET_SENT 0x0601 #define IRDMA_AE_TERMINATE_SENT 0x0602 diff --git a/drivers/infiniband/hw/irdma/hmc.c b/drivers/infiniband/hw/irdma/hmc.c index 49307ce8c4..ac58088a8e 100644 --- a/drivers/infiniband/hw/irdma/hmc.c +++ b/drivers/infiniband/hw/irdma/hmc.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "hmc.h" diff --git a/drivers/infiniband/hw/irdma/hmc.h b/drivers/infiniband/hw/irdma/hmc.h index f5c5dacc70..415f9e23bb 100644 --- a/drivers/infiniband/hw/irdma/hmc.h +++ b/drivers/infiniband/hw/irdma/hmc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2020 Intel Corporation */ #ifndef IRDMA_HMC_H #define IRDMA_HMC_H diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index 564c9188e1..ad50b77282 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" @@ -387,6 +387,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf) case IRDMA_AE_LLP_TOO_MANY_RETRIES: case IRDMA_AE_LCE_QP_CATASTROPHIC: case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC: + case IRDMA_AE_LLP_TOO_MANY_RNRS: case IRDMA_AE_LCE_CQ_CATASTROPHIC: case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: default: @@ -570,6 +571,13 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf, dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx); irq_update_affinity_hint(msix_vec->irq, NULL); free_irq(msix_vec->irq, dev_id); + if (rf == dev_id) { + tasklet_kill(&rf->dpc_tasklet); + } else { + struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id; + + tasklet_kill(&iwceq->dpc_tasklet); + } } /** diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c index 638d127fb3..ce61a27cb1 100644 --- a/drivers/infiniband/hw/irdma/i40iw_hw.c +++ b/drivers/infiniband/hw/irdma/i40iw_hw.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "type.h" diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.h b/drivers/infiniband/hw/irdma/i40iw_hw.h index 10afc165f5..e1db84d8a6 100644 --- a/drivers/infiniband/hw/irdma/i40iw_hw.h +++ b/drivers/infiniband/hw/irdma/i40iw_hw.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2021 Intel Corporation */ #ifndef I40IW_HW_H #define I40IW_HW_H diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c index 4053ead324..cc50a70703 100644 --- a/drivers/infiniband/hw/irdma/i40iw_if.c +++ b/drivers/infiniband/hw/irdma/i40iw_if.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" #include "i40iw_hw.h" @@ -186,7 +186,7 @@ static int i40iw_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ aux_dev); struct i40e_info *cdev_info = i40e_adev->ldev; - strncpy(i40iw_client.name, "irdma", I40E_CLIENT_STR_LENGTH); + strscpy_pad(i40iw_client.name, "irdma", I40E_CLIENT_STR_LENGTH); i40e_client_device_register(cdev_info, &i40iw_client); return 0; diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c index 10ccf4bc3f..941d3edffa 100644 --- a/drivers/infiniband/hw/irdma/icrdma_hw.c +++ b/drivers/infiniband/hw/irdma/icrdma_hw.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2017 - 2021 Intel Corporation */ #include "osdep.h" #include "type.h" diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.h b/drivers/infiniband/hw/irdma/icrdma_hw.h index 54035a08cc..697b9572b5 100644 --- a/drivers/infiniband/hw/irdma/icrdma_hw.h +++ b/drivers/infiniband/hw/irdma/icrdma_hw.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2017 - 2021 Intel Corporation */ #ifndef ICRDMA_HW_H #define ICRDMA_HW_H diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h index 3237fa64bc..20d2e7393e 100644 --- a/drivers/infiniband/hw/irdma/irdma.h +++ b/drivers/infiniband/hw/irdma/irdma.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2017 - 2021 Intel Corporation */ #ifndef IRDMA_H #define IRDMA_H diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c index be1030d1ad..3f13200ff7 100644 --- a/drivers/infiniband/hw/irdma/main.c +++ b/drivers/infiniband/hw/irdma/main.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" #include "../../../net/ethernet/intel/ice/ice.h" diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index cbf0db72e1..b65bc2ea54 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2021 Intel Corporation */ #ifndef IRDMA_MAIN_H #define IRDMA_MAIN_H diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h index fc1ba2a3e6..e1e3d3ae72 100644 --- a/drivers/infiniband/hw/irdma/osdep.h +++ b/drivers/infiniband/hw/irdma/osdep.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2021 Intel Corporation */ #ifndef IRDMA_OSDEP_H #define IRDMA_OSDEP_H diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c index c0bef11436..e7ce684075 100644 --- a/drivers/infiniband/hw/irdma/pble.c +++ b/drivers/infiniband/hw/irdma/pble.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "hmc.h" diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h index b31b7c5d66..160ad728e9 100644 --- a/drivers/infiniband/hw/irdma/pble.h +++ b/drivers/infiniband/hw/irdma/pble.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2019 Intel Corporation */ #ifndef IRDMA_PBLE_H #define IRDMA_PBLE_H diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h index 113096b603..d7c8ea948b 100644 --- a/drivers/infiniband/hw/irdma/protos.h +++ b/drivers/infiniband/hw/irdma/protos.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2016 - 2021 Intel Corporation */ #ifndef IRDMA_PROTOS_H #define IRDMA_PROTOS_H diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c index 562531712e..7e3f9bca2c 100644 --- a/drivers/infiniband/hw/irdma/puda.c +++ b/drivers/infiniband/hw/irdma/puda.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "hmc.h" diff --git a/drivers/infiniband/hw/irdma/puda.h b/drivers/infiniband/hw/irdma/puda.h index 5f5124db6d..bc6d9514c9 100644 --- a/drivers/infiniband/hw/irdma/puda.h +++ b/drivers/infiniband/hw/irdma/puda.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2020 Intel Corporation */ #ifndef IRDMA_PUDA_H #define IRDMA_PUDA_H diff --git a/drivers/infiniband/hw/irdma/trace.c b/drivers/infiniband/hw/irdma/trace.c index b5133f4137..fc2f566977 100644 --- a/drivers/infiniband/hw/irdma/trace.c +++ b/drivers/infiniband/hw/irdma/trace.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2019 Intel Corporation */ #define CREATE_TRACE_POINTS #include "trace.h" diff --git a/drivers/infiniband/hw/irdma/trace.h b/drivers/infiniband/hw/irdma/trace.h index 702e4efb01..b8085a66b9 100644 --- a/drivers/infiniband/hw/irdma/trace.h +++ b/drivers/infiniband/hw/irdma/trace.h @@ -1,3 +1,3 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2019 Intel Corporation */ #include "trace_cm.h" diff --git a/drivers/infiniband/hw/irdma/trace_cm.h b/drivers/infiniband/hw/irdma/trace_cm.h index f633fb3433..0d1699b552 100644 --- a/drivers/infiniband/hw/irdma/trace_cm.h +++ b/drivers/infiniband/hw/irdma/trace_cm.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2019 - 2021 Intel Corporation */ #if !defined(__TRACE_CM_H) || defined(TRACE_HEADER_MULTI_READ) #define __TRACE_CM_H diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h index c84ec4dd85..59b34afa86 100644 --- a/drivers/infiniband/hw/irdma/type.h +++ b/drivers/infiniband/hw/irdma/type.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2021 Intel Corporation */ #ifndef IRDMA_TYPE_H #define IRDMA_TYPE_H diff --git a/drivers/infiniband/hw/irdma/uda.c b/drivers/infiniband/hw/irdma/uda.c index 284cec2a74..84051266d9 100644 --- a/drivers/infiniband/hw/irdma/uda.c +++ b/drivers/infiniband/hw/irdma/uda.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2016 - 2021 Intel Corporation */ #include <linux/etherdevice.h> diff --git a/drivers/infiniband/hw/irdma/uda.h b/drivers/infiniband/hw/irdma/uda.h index fe4820ff0c..27b8701cf2 100644 --- a/drivers/infiniband/hw/irdma/uda.h +++ b/drivers/infiniband/hw/irdma/uda.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2016 - 2021 Intel Corporation */ #ifndef IRDMA_UDA_H #define IRDMA_UDA_H diff --git a/drivers/infiniband/hw/irdma/uda_d.h b/drivers/infiniband/hw/irdma/uda_d.h index bfc81cac2c..5a9e6eabf0 100644 --- a/drivers/infiniband/hw/irdma/uda_d.h +++ b/drivers/infiniband/hw/irdma/uda_d.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2016 - 2021 Intel Corporation */ #ifndef IRDMA_UDA_D_H #define IRDMA_UDA_D_H diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index d8285ca162..38c54e59cc 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "osdep.h" #include "defs.h" diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h index 36feca57b2..380e4a47ae 100644 --- a/drivers/infiniband/hw/irdma/user.h +++ b/drivers/infiniband/hw/irdma/user.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2020 Intel Corporation */ #ifndef IRDMA_USER_H #define IRDMA_USER_H diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 6cd5cb85da..916bfe2a91 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 2f1bedd3a5..0b046c0617 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ #include "main.h" @@ -839,7 +839,9 @@ static int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr, if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline || init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags || - init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags) + init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags || + init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta || + init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta) return -EINVAL; if (rdma_protocol_roce(&iwdev->ibdev, 1)) { @@ -2184,9 +2186,8 @@ static int irdma_create_cq(struct ib_cq *ibcq, info.cq_base_pa = iwcq->kmem.pa; } - if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) - info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, - (u32)IRDMA_MAX_CQ_READ_THRESH); + info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, + (u32)IRDMA_MAX_CQ_READ_THRESH); if (irdma_sc_cq_init(cq, &info)) { ibdev_dbg(&iwdev->ibdev, "VERBS: init cq fail\n"); @@ -2663,8 +2664,11 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev, cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request; status = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); + if (status) + return status; - return status; + iwmr->is_hwreg = 1; + return 0; } /** @@ -2830,14 +2834,18 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, ret = irdma_handle_cqp_op(iwdev->rf, cqp_request); irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); + if (!ret) + iwmr->is_hwreg = 1; + return ret; } -static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access) +static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access, + bool create_stag) { struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); struct irdma_pbl *iwpbl = &iwmr->iwpbl; - u32 stag; + u32 stag = 0; u8 lvl; int err; @@ -2856,15 +2864,18 @@ static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access) } } - stag = irdma_create_stag(iwdev); - if (!stag) { - err = -ENOMEM; - goto free_pble; + if (create_stag) { + stag = irdma_create_stag(iwdev); + if (!stag) { + err = -ENOMEM; + goto free_pble; + } + + iwmr->stag = stag; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; } - iwmr->stag = stag; - iwmr->ibmr.rkey = stag; - iwmr->ibmr.lkey = stag; err = irdma_hwreg_mr(iwdev, iwmr, access); if (err) goto err_hwreg; @@ -2872,7 +2883,8 @@ static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access) return 0; err_hwreg: - irdma_free_stag(iwdev, stag); + if (stag) + irdma_free_stag(iwdev, stag); free_pble: if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) @@ -3052,7 +3064,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, goto error; break; case IRDMA_MEMREG_TYPE_MEM: - err = irdma_reg_user_mr_type_mem(iwmr, access); + err = irdma_reg_user_mr_type_mem(iwmr, access, true); if (err) goto error; @@ -3096,7 +3108,7 @@ static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start, goto err_release; } - err = irdma_reg_user_mr_type_mem(iwmr, access); + err = irdma_reg_user_mr_type_mem(iwmr, access, true); if (err) goto err_iwmr; @@ -3111,6 +3123,161 @@ err_release: return ERR_PTR(err); } +static int irdma_hwdereg_mr(struct ib_mr *ib_mr) +{ + struct irdma_device *iwdev = to_iwdev(ib_mr->device); + struct irdma_mr *iwmr = to_iwmr(ib_mr); + struct irdma_pd *iwpd = to_iwpd(ib_mr->pd); + struct irdma_dealloc_stag_info *info; + struct irdma_pbl *iwpbl = &iwmr->iwpbl; + struct irdma_cqp_request *cqp_request; + struct cqp_cmds_info *cqp_info; + int status; + + /* Skip HW MR de-register when it is already de-registered + * during an MR re-reregister and the re-registration fails + */ + if (!iwmr->is_hwreg) + return 0; + + cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.dealloc_stag.info; + memset(info, 0, sizeof(*info)); + info->pd_id = iwpd->sc_pd.pd_id; + info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S; + info->mr = true; + if (iwpbl->pbl_allocated) + info->dealloc_pbl = true; + + cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; + cqp_info->post_sq = 1; + cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; + cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; + status = irdma_handle_cqp_op(iwdev->rf, cqp_request); + irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); + if (status) + return status; + + iwmr->is_hwreg = 0; + return 0; +} + +/* + * irdma_rereg_mr_trans - Re-register a user MR for a change translation. + * @iwmr: ptr of iwmr + * @start: virtual start address + * @len: length of mr + * @virt: virtual address + * + * Re-register a user memory region when a change translation is requested. + * Re-register a new region while reusing the stag from the original registration. + */ +static int irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len, + u64 virt) +{ + struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); + struct irdma_pbl *iwpbl = &iwmr->iwpbl; + struct ib_pd *pd = iwmr->ibmr.pd; + struct ib_umem *region; + int err; + + region = ib_umem_get(pd->device, start, len, iwmr->access); + if (IS_ERR(region)) + return PTR_ERR(region); + + iwmr->region = region; + iwmr->ibmr.iova = virt; + iwmr->ibmr.pd = pd; + iwmr->page_size = ib_umem_find_best_pgsz(region, + iwdev->rf->sc_dev.hw_attrs.page_size_cap, + virt); + if (unlikely(!iwmr->page_size)) { + err = -EOPNOTSUPP; + goto err; + } + + iwmr->len = region->length; + iwpbl->user_base = virt; + iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size); + + err = irdma_reg_user_mr_type_mem(iwmr, iwmr->access, false); + if (err) + goto err; + + return 0; + +err: + ib_umem_release(region); + return err; +} + +/* + * irdma_rereg_user_mr - Re-Register a user memory region(MR) + * @ibmr: ib mem to access iwarp mr pointer + * @flags: bit mask to indicate which of the attr's of MR modified + * @start: virtual start address + * @len: length of mr + * @virt: virtual address + * @new_access: bit mask of access flags + * @new_pd: ptr of pd + * @udata: user data + * + * Return: + * NULL - Success, existing MR updated + * ERR_PTR - error occurred + */ +static struct ib_mr *irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, + u64 start, u64 len, u64 virt, + int new_access, struct ib_pd *new_pd, + struct ib_udata *udata) +{ + struct irdma_device *iwdev = to_iwdev(ib_mr->device); + struct irdma_mr *iwmr = to_iwmr(ib_mr); + struct irdma_pbl *iwpbl = &iwmr->iwpbl; + int ret; + + if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size) + return ERR_PTR(-EINVAL); + + if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) + return ERR_PTR(-EOPNOTSUPP); + + ret = irdma_hwdereg_mr(ib_mr); + if (ret) + return ERR_PTR(ret); + + if (flags & IB_MR_REREG_ACCESS) + iwmr->access = new_access; + + if (flags & IB_MR_REREG_PD) { + iwmr->ibmr.pd = new_pd; + iwmr->ibmr.device = new_pd->device; + } + + if (flags & IB_MR_REREG_TRANS) { + if (iwpbl->pbl_allocated) { + irdma_free_pble(iwdev->rf->pble_rsrc, + &iwpbl->pble_alloc); + iwpbl->pbl_allocated = false; + } + if (iwmr->region) { + ib_umem_release(iwmr->region); + iwmr->region = NULL; + } + + ret = irdma_rereg_mr_trans(iwmr, start, len, virt); + } else + ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access); + if (ret) + return ERR_PTR(ret); + + return NULL; +} + /** * irdma_reg_phys_mr - register kernel physical memory * @pd: ibpd pointer @@ -3218,16 +3385,10 @@ static void irdma_del_memlist(struct irdma_mr *iwmr, */ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) { - struct ib_pd *ibpd = ib_mr->pd; - struct irdma_pd *iwpd = to_iwpd(ibpd); struct irdma_mr *iwmr = to_iwmr(ib_mr); struct irdma_device *iwdev = to_iwdev(ib_mr->device); - struct irdma_dealloc_stag_info *info; struct irdma_pbl *iwpbl = &iwmr->iwpbl; - struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; - struct irdma_cqp_request *cqp_request; - struct cqp_cmds_info *cqp_info; - int status; + int ret; if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) { if (iwmr->region) { @@ -3241,33 +3402,18 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) goto done; } - cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); - if (!cqp_request) - return -ENOMEM; - - cqp_info = &cqp_request->info; - info = &cqp_info->in.u.dealloc_stag.info; - memset(info, 0, sizeof(*info)); - info->pd_id = iwpd->sc_pd.pd_id; - info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S; - info->mr = true; - if (iwpbl->pbl_allocated) - info->dealloc_pbl = true; - - cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG; - cqp_info->post_sq = 1; - cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev; - cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; - status = irdma_handle_cqp_op(iwdev->rf, cqp_request); - irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); - if (status) - return status; + ret = irdma_hwdereg_mr(ib_mr); + if (ret) + return ret; irdma_free_stag(iwdev, iwmr->stag); done: if (iwpbl->pbl_allocated) - irdma_free_pble(iwdev->rf->pble_rsrc, palloc); - ib_umem_release(iwmr->region); + irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc); + + if (iwmr->region) + ib_umem_release(iwmr->region); + kfree(iwmr); return 0; @@ -4597,6 +4743,7 @@ static const struct ib_device_ops irdma_dev_ops = { .query_qp = irdma_query_qp, .reg_user_mr = irdma_reg_user_mr, .reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf, + .rereg_user_mr = irdma_rereg_user_mr, .req_notify_cq = irdma_req_notify_cq, .resize_cq = irdma_resize_cq, INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd), diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h index 20297a14c9..cfa140b363 100644 --- a/drivers/infiniband/hw/irdma/verbs.h +++ b/drivers/infiniband/hw/irdma/verbs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2021 Intel Corporation */ #ifndef IRDMA_VERBS_H #define IRDMA_VERBS_H @@ -100,6 +100,8 @@ struct irdma_mr { struct ib_mw ibmw; }; struct ib_umem *region; + int access; + u8 is_hwreg; u16 type; u32 page_cnt; u64 page_size; diff --git a/drivers/infiniband/hw/irdma/ws.c b/drivers/infiniband/hw/irdma/ws.c index 20bc8d0d7f..542bc0b1bb 100644 --- a/drivers/infiniband/hw/irdma/ws.c +++ b/drivers/infiniband/hw/irdma/ws.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2017 - 2021 Intel Corporation */ #include "osdep.h" #include "hmc.h" diff --git a/drivers/infiniband/hw/irdma/ws.h b/drivers/infiniband/hw/irdma/ws.h index d431e3327d..45490031a3 100644 --- a/drivers/infiniband/hw/irdma/ws.h +++ b/drivers/infiniband/hw/irdma/ws.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* Copyright (c) 2015 - 2020 Intel Corporation */ #ifndef IRDMA_WS_H #define IRDMA_WS_H diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c index f87531318f..a78a067e3c 100644 --- a/drivers/infiniband/hw/mlx5/cong.c +++ b/drivers/infiniband/hw/mlx5/cong.c @@ -458,6 +458,12 @@ void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num) dbg_cc_params->root = debugfs_create_dir("cc_params", mlx5_debugfs_get_dev_root(mdev)); for (i = 0; i < MLX5_IB_DBG_CC_MAX; i++) { + if ((i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID || + i == MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP)) + if (!MLX5_CAP_GEN(mdev, roce) || + !MLX5_CAP_ROCE(mdev, roce_cc_general)) + continue; + dbg_cc_params->params[i].offset = i; dbg_cc_params->params[i].dev = dev; dbg_cc_params->params[i].port_num = port_num; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 8102ef113b..0c3c4e6481 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -619,6 +619,19 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, } } + /* Check if extended speeds 2 (XDR/...) are supported */ + if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP && + props->port_cap_flags2 & IB_PORT_EXTENDED_SPEEDS2_SUP) { + ext_active_speed = (out_mad->data[56] >> 4) & 0x6; + + switch (ext_active_speed) { + case 2: + if (props->port_cap_flags2 & IB_PORT_LINK_SPEED_XDR_SUP) + props->active_speed = IB_SPEED_XDR; + break; + } + } + /* If reported active speed is QDR, check if is FDR-10 */ if (props->active_speed == 4) { if (dev->port_caps[port - 1].ext_port_cap & diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 102ead4971..650a15b6cf 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -444,7 +444,7 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed, *active_width = IB_WIDTH_2X; *active_speed = IB_SPEED_NDR; break; - case MLX5E_PROT_MASK(MLX5E_400GAUI_8): + case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8): *active_width = IB_WIDTH_8X; *active_speed = IB_SPEED_HDR; break; @@ -452,6 +452,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed, *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_NDR; break; + case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8): + *active_width = IB_WIDTH_8X; + *active_speed = IB_SPEED_NDR; + break; default: return -EINVAL; } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 16713baf0d..bbe79b86c7 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -753,10 +753,25 @@ struct umr_common { unsigned int state; }; +#define NUM_MKEYS_PER_PAGE \ + ((PAGE_SIZE - sizeof(struct list_head)) / sizeof(u32)) + +struct mlx5_mkeys_page { + u32 mkeys[NUM_MKEYS_PER_PAGE]; + struct list_head list; +}; +static_assert(sizeof(struct mlx5_mkeys_page) == PAGE_SIZE); + +struct mlx5_mkeys_queue { + struct list_head pages_list; + u32 num_pages; + unsigned long ci; + spinlock_t lock; /* sync list ops */ +}; + struct mlx5_cache_ent { - struct xarray mkeys; - unsigned long stored; - unsigned long reserved; + struct mlx5_mkeys_queue mkeys_queue; + u32 pending; char name[4]; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index e0629898c3..18e459b557 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -143,110 +143,47 @@ static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out) mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out); } -static int push_mkey_locked(struct mlx5_cache_ent *ent, bool limit_pendings, - void *to_store) +static int push_mkey_locked(struct mlx5_cache_ent *ent, u32 mkey) { - XA_STATE(xas, &ent->mkeys, 0); - void *curr; + unsigned long tmp = ent->mkeys_queue.ci % NUM_MKEYS_PER_PAGE; + struct mlx5_mkeys_page *page; - if (limit_pendings && - (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) - return -EAGAIN; - - while (1) { - /* - * This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version - * doesn't transparently unlock. Instead we set the xas index to - * the current value of reserved every iteration. - */ - xas_set(&xas, ent->reserved); - curr = xas_load(&xas); - if (!curr) { - if (to_store && ent->stored == ent->reserved) - xas_store(&xas, to_store); - else - xas_store(&xas, XA_ZERO_ENTRY); - if (xas_valid(&xas)) { - ent->reserved++; - if (to_store) { - if (ent->stored != ent->reserved) - __xa_store(&ent->mkeys, - ent->stored, - to_store, - GFP_KERNEL); - ent->stored++; - queue_adjust_cache_locked(ent); - WRITE_ONCE(ent->dev->cache.last_add, - jiffies); - } - } - } - xa_unlock_irq(&ent->mkeys); - - /* - * Notice xas_nomem() must always be called as it cleans - * up any cached allocation. - */ - if (!xas_nomem(&xas, GFP_KERNEL)) - break; - xa_lock_irq(&ent->mkeys); + lockdep_assert_held(&ent->mkeys_queue.lock); + if (ent->mkeys_queue.ci >= + ent->mkeys_queue.num_pages * NUM_MKEYS_PER_PAGE) { + page = kzalloc(sizeof(*page), GFP_ATOMIC); + if (!page) + return -ENOMEM; + ent->mkeys_queue.num_pages++; + list_add_tail(&page->list, &ent->mkeys_queue.pages_list); + } else { + page = list_last_entry(&ent->mkeys_queue.pages_list, + struct mlx5_mkeys_page, list); } - xa_lock_irq(&ent->mkeys); - if (xas_error(&xas)) - return xas_error(&xas); - if (WARN_ON(curr)) - return -EINVAL; - return 0; -} - -static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings, - void *to_store) -{ - int ret; - - xa_lock_irq(&ent->mkeys); - ret = push_mkey_locked(ent, limit_pendings, to_store); - xa_unlock_irq(&ent->mkeys); - return ret; -} - -static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent) -{ - void *old; - - ent->reserved--; - old = __xa_erase(&ent->mkeys, ent->reserved); - WARN_ON(old); -} - -static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey) -{ - void *old; - old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0); - WARN_ON(old); - ent->stored++; + page->mkeys[tmp] = mkey; + ent->mkeys_queue.ci++; + return 0; } -static u32 pop_stored_mkey(struct mlx5_cache_ent *ent) +static int pop_mkey_locked(struct mlx5_cache_ent *ent) { - void *old, *xa_mkey; - - ent->stored--; - ent->reserved--; + unsigned long tmp = (ent->mkeys_queue.ci - 1) % NUM_MKEYS_PER_PAGE; + struct mlx5_mkeys_page *last_page; + u32 mkey; - if (ent->stored == ent->reserved) { - xa_mkey = __xa_erase(&ent->mkeys, ent->stored); - WARN_ON(!xa_mkey); - return (u32)xa_to_value(xa_mkey); + lockdep_assert_held(&ent->mkeys_queue.lock); + last_page = list_last_entry(&ent->mkeys_queue.pages_list, + struct mlx5_mkeys_page, list); + mkey = last_page->mkeys[tmp]; + last_page->mkeys[tmp] = 0; + ent->mkeys_queue.ci--; + if (ent->mkeys_queue.num_pages > 1 && !tmp) { + list_del(&last_page->list); + ent->mkeys_queue.num_pages--; + kfree(last_page); } - - xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY, - GFP_KERNEL); - WARN_ON(!xa_mkey || xa_is_err(xa_mkey)); - old = __xa_erase(&ent->mkeys, ent->reserved); - WARN_ON(old); - return (u32)xa_to_value(xa_mkey); + return mkey; } static void create_mkey_callback(int status, struct mlx5_async_work *context) @@ -260,10 +197,10 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) if (status) { create_mkey_warn(dev, status, mkey_out->out); kfree(mkey_out); - xa_lock_irqsave(&ent->mkeys, flags); - undo_push_reserve_mkey(ent); + spin_lock_irqsave(&ent->mkeys_queue.lock, flags); + ent->pending--; WRITE_ONCE(dev->fill_delay, 1); - xa_unlock_irqrestore(&ent->mkeys, flags); + spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags); mod_timer(&dev->delay_timer, jiffies + HZ); return; } @@ -272,11 +209,12 @@ static void create_mkey_callback(int status, struct mlx5_async_work *context) MLX5_GET(create_mkey_out, mkey_out->out, mkey_index)); WRITE_ONCE(dev->cache.last_add, jiffies); - xa_lock_irqsave(&ent->mkeys, flags); - push_to_reserved(ent, mkey_out->mkey); + spin_lock_irqsave(&ent->mkeys_queue.lock, flags); + push_mkey_locked(ent, mkey_out->mkey); /* If we are doing fill_to_high_water then keep going. */ queue_adjust_cache_locked(ent); - xa_unlock_irqrestore(&ent->mkeys, flags); + ent->pending--; + spin_unlock_irqrestore(&ent->mkeys_queue.lock, flags); kfree(mkey_out); } @@ -333,24 +271,28 @@ static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) set_cache_mkc(ent, mkc); async_create->ent = ent; - err = push_mkey(ent, true, NULL); - if (err) + spin_lock_irq(&ent->mkeys_queue.lock); + if (ent->pending >= MAX_PENDING_REG_MR) { + err = -EAGAIN; goto free_async_create; + } + ent->pending++; + spin_unlock_irq(&ent->mkeys_queue.lock); err = mlx5_ib_create_mkey_cb(async_create); if (err) { mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); - goto err_undo_reserve; + goto err_create_mkey; } } return 0; -err_undo_reserve: - xa_lock_irq(&ent->mkeys); - undo_push_reserve_mkey(ent); - xa_unlock_irq(&ent->mkeys); +err_create_mkey: + spin_lock_irq(&ent->mkeys_queue.lock); + ent->pending--; free_async_create: + spin_unlock_irq(&ent->mkeys_queue.lock); kfree(async_create); return err; } @@ -383,36 +325,36 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) { u32 mkey; - lockdep_assert_held(&ent->mkeys.xa_lock); - if (!ent->stored) + lockdep_assert_held(&ent->mkeys_queue.lock); + if (!ent->mkeys_queue.ci) return; - mkey = pop_stored_mkey(ent); - xa_unlock_irq(&ent->mkeys); + mkey = pop_mkey_locked(ent); + spin_unlock_irq(&ent->mkeys_queue.lock); mlx5_core_destroy_mkey(ent->dev->mdev, mkey); - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); } static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, bool limit_fill) - __acquires(&ent->mkeys) __releases(&ent->mkeys) + __acquires(&ent->mkeys_queue.lock) __releases(&ent->mkeys_queue.lock) { int err; - lockdep_assert_held(&ent->mkeys.xa_lock); + lockdep_assert_held(&ent->mkeys_queue.lock); while (true) { if (limit_fill) target = ent->limit * 2; - if (target == ent->reserved) + if (target == ent->pending + ent->mkeys_queue.ci) return 0; - if (target > ent->reserved) { - u32 todo = target - ent->reserved; + if (target > ent->pending + ent->mkeys_queue.ci) { + u32 todo = target - (ent->pending + ent->mkeys_queue.ci); - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); err = add_keys(ent, todo); if (err == -EAGAIN) usleep_range(3000, 5000); - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); if (err) { if (err != -EAGAIN) return err; @@ -440,7 +382,7 @@ static ssize_t size_write(struct file *filp, const char __user *buf, * cannot free MRs that are in use. Compute the target value for stored * mkeys. */ - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); if (target < ent->in_use) { err = -EINVAL; goto err_unlock; @@ -453,12 +395,12 @@ static ssize_t size_write(struct file *filp, const char __user *buf, err = resize_available_mrs(ent, target, false); if (err) goto err_unlock; - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); return count; err_unlock: - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); return err; } @@ -469,7 +411,8 @@ static ssize_t size_read(struct file *filp, char __user *buf, size_t count, char lbuf[20]; int err; - err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use); + err = snprintf(lbuf, sizeof(lbuf), "%ld\n", + ent->mkeys_queue.ci + ent->in_use); if (err < 0) return err; @@ -498,10 +441,10 @@ static ssize_t limit_write(struct file *filp, const char __user *buf, * Upon set we immediately fill the cache to high water mark implied by * the limit. */ - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); ent->limit = var; err = resize_available_mrs(ent, 0, true); - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); if (err) return err; return count; @@ -537,9 +480,9 @@ static bool someone_adding(struct mlx5_mkey_cache *cache) mutex_lock(&cache->rb_lock); for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) { ent = rb_entry(node, struct mlx5_cache_ent, node); - xa_lock_irq(&ent->mkeys); - ret = ent->stored < ent->limit; - xa_unlock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); + ret = ent->mkeys_queue.ci < ent->limit; + spin_unlock_irq(&ent->mkeys_queue.lock); if (ret) { mutex_unlock(&cache->rb_lock); return true; @@ -556,26 +499,26 @@ static bool someone_adding(struct mlx5_mkey_cache *cache) */ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) { - lockdep_assert_held(&ent->mkeys.xa_lock); + lockdep_assert_held(&ent->mkeys_queue.lock); if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp) return; - if (ent->stored < ent->limit) { + if (ent->mkeys_queue.ci < ent->limit) { ent->fill_to_high_water = true; mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); } else if (ent->fill_to_high_water && - ent->reserved < 2 * ent->limit) { + ent->mkeys_queue.ci + ent->pending < 2 * ent->limit) { /* * Once we start populating due to hitting a low water mark * continue until we pass the high water mark. */ mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); - } else if (ent->stored == 2 * ent->limit) { + } else if (ent->mkeys_queue.ci == 2 * ent->limit) { ent->fill_to_high_water = false; - } else if (ent->stored > 2 * ent->limit) { + } else if (ent->mkeys_queue.ci > 2 * ent->limit) { /* Queue deletion of excess entries */ ent->fill_to_high_water = false; - if (ent->stored != ent->reserved) + if (ent->pending) queue_delayed_work(ent->dev->cache.wq, &ent->dwork, msecs_to_jiffies(1000)); else @@ -589,15 +532,16 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) struct mlx5_mkey_cache *cache = &dev->cache; int err; - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); if (ent->disabled) goto out; - if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit && + if (ent->fill_to_high_water && + ent->mkeys_queue.ci + ent->pending < 2 * ent->limit && !READ_ONCE(dev->fill_delay)) { - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); err = add_keys(ent, 1); - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); if (ent->disabled) goto out; if (err) { @@ -615,7 +559,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) msecs_to_jiffies(1000)); } } - } else if (ent->stored > 2 * ent->limit) { + } else if (ent->mkeys_queue.ci > 2 * ent->limit) { bool need_delay; /* @@ -630,11 +574,11 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) * the garbage collection work to try to run in next cycle, in * order to free CPU resources to other tasks. */ - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); need_delay = need_resched() || someone_adding(cache) || !time_after(jiffies, READ_ONCE(cache->last_add) + 300 * HZ); - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); if (ent->disabled) goto out; if (need_delay) { @@ -645,7 +589,7 @@ static void __cache_work_func(struct mlx5_cache_ent *ent) queue_adjust_cache_locked(ent); } out: - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); } static void delayed_cache_work_func(struct work_struct *work) @@ -753,25 +697,25 @@ static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, if (!mr) return ERR_PTR(-ENOMEM); - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); ent->in_use++; - if (!ent->stored) { + if (!ent->mkeys_queue.ci) { queue_adjust_cache_locked(ent); ent->miss++; - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); err = create_cache_mkey(ent, &mr->mmkey.key); if (err) { - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); ent->in_use--; - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); kfree(mr); return ERR_PTR(err); } } else { - mr->mmkey.key = pop_stored_mkey(ent); + mr->mmkey.key = pop_mkey_locked(ent); queue_adjust_cache_locked(ent); - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); } mr->mmkey.cache_ent = ent; mr->mmkey.type = MLX5_MKEY_MR; @@ -825,14 +769,14 @@ static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent) u32 mkey; cancel_delayed_work(&ent->dwork); - xa_lock_irq(&ent->mkeys); - while (ent->stored) { - mkey = pop_stored_mkey(ent); - xa_unlock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); + while (ent->mkeys_queue.ci) { + mkey = pop_mkey_locked(ent); + spin_unlock_irq(&ent->mkeys_queue.lock); mlx5_core_destroy_mkey(dev->mdev, mkey); - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); } - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); } static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) @@ -860,7 +804,7 @@ static void mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev *dev, dir = debugfs_create_dir(ent->name, dev->cache.fs_root); debugfs_create_file("size", 0600, dir, ent, &size_fops); debugfs_create_file("limit", 0600, dir, ent, &limit_fops); - debugfs_create_ulong("cur", 0400, dir, &ent->stored); + debugfs_create_ulong("cur", 0400, dir, &ent->mkeys_queue.ci); debugfs_create_u32("miss", 0600, dir, &ent->miss); } @@ -882,6 +826,31 @@ static void delay_time_func(struct timer_list *t) WRITE_ONCE(dev->fill_delay, 0); } +static int mlx5r_mkeys_init(struct mlx5_cache_ent *ent) +{ + struct mlx5_mkeys_page *page; + + page = kzalloc(sizeof(*page), GFP_KERNEL); + if (!page) + return -ENOMEM; + INIT_LIST_HEAD(&ent->mkeys_queue.pages_list); + spin_lock_init(&ent->mkeys_queue.lock); + list_add_tail(&page->list, &ent->mkeys_queue.pages_list); + ent->mkeys_queue.num_pages++; + return 0; +} + +static void mlx5r_mkeys_uninit(struct mlx5_cache_ent *ent) +{ + struct mlx5_mkeys_page *page; + + WARN_ON(ent->mkeys_queue.ci || ent->mkeys_queue.num_pages > 1); + page = list_last_entry(&ent->mkeys_queue.pages_list, + struct mlx5_mkeys_page, list); + list_del(&page->list); + kfree(page); +} + struct mlx5_cache_ent * mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, struct mlx5r_cache_rb_key rb_key, @@ -895,7 +864,9 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, if (!ent) return ERR_PTR(-ENOMEM); - xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ); + ret = mlx5r_mkeys_init(ent); + if (ret) + goto mkeys_err; ent->rb_key = rb_key; ent->dev = dev; ent->is_tmp = !persistent_entry; @@ -903,10 +874,8 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); ret = mlx5_cache_ent_insert(&dev->cache, ent); - if (ret) { - kfree(ent); - return ERR_PTR(ret); - } + if (ret) + goto ent_insert_err; if (persistent_entry) { if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) @@ -929,6 +898,11 @@ mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev, } return ent; +ent_insert_err: + mlx5r_mkeys_uninit(ent); +mkeys_err: + kfree(ent); + return ERR_PTR(ret); } static void remove_ent_work_func(struct work_struct *work) @@ -946,13 +920,13 @@ static void remove_ent_work_func(struct work_struct *work) cur = rb_prev(cur); mutex_unlock(&cache->rb_lock); - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); if (!ent->is_tmp) { - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); mutex_lock(&cache->rb_lock); continue; } - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); clean_keys(ent->dev, ent); mutex_lock(&cache->rb_lock); @@ -1002,9 +976,9 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) mutex_unlock(&cache->rb_lock); for (node = rb_first(root); node; node = rb_next(node)) { ent = rb_entry(node, struct mlx5_cache_ent, node); - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); queue_adjust_cache_locked(ent); - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); } return 0; @@ -1029,9 +1003,9 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) cancel_delayed_work(&dev->cache.remove_ent_dwork); for (node = rb_first(root); node; node = rb_next(node)) { ent = rb_entry(node, struct mlx5_cache_ent, node); - xa_lock_irq(&ent->mkeys); + spin_lock_irq(&ent->mkeys_queue.lock); ent->disabled = true; - xa_unlock_irq(&ent->mkeys); + spin_unlock_irq(&ent->mkeys_queue.lock); cancel_delayed_work(&ent->dwork); } mutex_unlock(&dev->cache.rb_lock); @@ -1053,6 +1027,7 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) node = rb_next(node); clean_keys(dev, ent); rb_erase(&ent->node, root); + mlx5r_mkeys_uninit(ent); kfree(ent); } mutex_unlock(&dev->cache.rb_lock); @@ -1825,7 +1800,7 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, int ret; if (mr->mmkey.cache_ent) { - xa_lock_irq(&mr->mmkey.cache_ent->mkeys); + spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); mr->mmkey.cache_ent->in_use--; goto end; } @@ -1839,7 +1814,7 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, return -EOPNOTSUPP; } mr->mmkey.cache_ent = ent; - xa_lock_irq(&mr->mmkey.cache_ent->mkeys); + spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); mutex_unlock(&cache->rb_lock); goto end; } @@ -1851,12 +1826,11 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, return PTR_ERR(ent); mr->mmkey.cache_ent = ent; - xa_lock_irq(&mr->mmkey.cache_ent->mkeys); + spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); end: - ret = push_mkey_locked(mr->mmkey.cache_ent, false, - xa_mk_value(mr->mmkey.key)); - xa_unlock_irq(&mr->mmkey.cache_ent->mkeys); + ret = push_mkey_locked(mr->mmkey.cache_ent, mr->mmkey.key); + spin_unlock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); return ret; } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 2340baaba8..8115ab1071 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -3436,7 +3436,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) if (rate == IB_RATE_PORT_CURRENT) return 0; - if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_600_GBPS) + if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS) return -EINVAL; stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support); diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c index 234bf30db7..e76142f6fa 100644 --- a/drivers/infiniband/hw/mlx5/umr.c +++ b/drivers/infiniband/hw/mlx5/umr.c @@ -332,8 +332,8 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey, WARN_ON_ONCE(1); mlx5_ib_warn(dev, - "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n", - umr_context.status); + "reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n", + umr_context.status, mkey); mutex_lock(&umrc->lock); err = mlx5r_umr_recover(dev); mutex_unlock(&umrc->lock); diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h index f9a2e65e2f..61d5bbba29 100644 --- a/drivers/infiniband/hw/mthca/mthca_memfree.h +++ b/drivers/infiniband/hw/mthca/mthca_memfree.h @@ -68,7 +68,7 @@ struct mthca_icm_table { int lowmem; int coherent; struct mutex mutex; - struct mthca_icm *icm[]; + struct mthca_icm *icm[] __counted_by(num_icm); }; struct mthca_icm_iter { diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 7887a6786e..f118ce0a9a 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1879,8 +1879,17 @@ static int qedr_create_user_qp(struct qedr_dev *dev, /* RQ - read access only (0) */ rc = qedr_init_user_queue(udata, dev, &qp->urq, ureq.rq_addr, ureq.rq_len, true, 0, alloc_and_init); - if (rc) + if (rc) { + ib_umem_release(qp->usq.umem); + qp->usq.umem = NULL; + if (rdma_protocol_roce(&dev->ibdev, 1)) { + qedr_free_pbl(dev, &qp->usq.pbl_info, + qp->usq.pbl_tbl); + } else { + kfree(qp->usq.pbl_tbl); + } return rc; + } } memset(&in_params, 0, sizeof(in_params)); diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 152952127f..29e4c59aa2 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -2244,7 +2244,7 @@ static ssize_t qib_write_iter(struct kiocb *iocb, struct iov_iter *from) struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp); struct qib_user_sdma_queue *pq = fp->pq; - if (!from->user_backed || !from->nr_segs || !pq) + if (!user_backed_iter(from) || !from->nr_segs || !pq) return -EINVAL; return qib_user_sdma_writev(rcd, pq, iter_iov(from), from->nr_segs); diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index ed7d4b02f4..455e966eef 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -64,8 +64,8 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry, inode->i_uid = GLOBAL_ROOT_UID; inode->i_gid = GLOBAL_ROOT_GID; inode->i_blocks = 0; - inode->i_atime = inode_set_ctime_current(inode); - inode->i_mtime = inode->i_atime; + simple_inode_init_ts(inode); + inode->i_private = data; if (S_ISDIR(mode)) { inode->i_op = &simple_dir_inode_operations; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 9d2dd135b7..f93906d8fc 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -6127,7 +6127,7 @@ static int setup_txselect(const char *str, const struct kernel_param *kp) TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ); return -EINVAL; } - strncpy(txselect_list, str, ARRAY_SIZE(txselect_list) - 1); + strscpy(txselect_list, str, sizeof(txselect_list)); xa_for_each(&qib_dev_table, index, dd) if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322) diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h index 5a9acf9415..70d51d919d 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.h +++ b/drivers/infiniband/hw/usnic/usnic_uiom.h @@ -77,7 +77,7 @@ struct usnic_uiom_reg { struct usnic_uiom_chunk { struct list_head list; int nents; - struct scatterlist page_list[]; + struct scatterlist page_list[] __counted_by(nents); }; struct usnic_uiom_pd *usnic_uiom_alloc_pd(struct device *dev); diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 343288b027..a5e8818517 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c @@ -1021,10 +1021,8 @@ err_free_intrs: pvrdma_free_irq(dev); pci_free_irq_vectors(pdev); err_free_cq_ring: - if (dev->netdev) { - dev_put(dev->netdev); - dev->netdev = NULL; - } + dev_put(dev->netdev); + dev->netdev = NULL; pvrdma_page_dir_cleanup(dev, &dev->cq_pdir); err_free_async_ring: pvrdma_page_dir_cleanup(dev, &dev->async_pdir); @@ -1064,10 +1062,8 @@ static void pvrdma_pci_remove(struct pci_dev *pdev) flush_workqueue(event_wq); - if (dev->netdev) { - dev_put(dev->netdev); - dev->netdev = NULL; - } + dev_put(dev->netdev); + dev->netdev = NULL; /* Unregister ib device */ ib_unregister_device(&dev->ib_dev); |