From 34996e42f82bfd60bc2c191e5cae3c6ab233ec6c Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 7 Aug 2024 15:11:27 +0200 Subject: Merging upstream version 6.9.7. Signed-off-by: Daniel Baumann --- drivers/crypto/hisilicon/qm.c | 189 ++++++++++++++++++++++++++++-------------- 1 file changed, 125 insertions(+), 64 deletions(-) (limited to 'drivers/crypto/hisilicon/qm.c') diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index 4b20b94e63..13e413533f 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -236,6 +236,12 @@ #define QM_DEV_ALG_MAX_LEN 256 + /* abnormal status value for stopping queue */ +#define QM_STOP_QUEUE_FAIL 1 +#define QM_DUMP_SQC_FAIL 3 +#define QM_DUMP_CQC_FAIL 4 +#define QM_FINISH_WAIT 5 + #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ @@ -312,6 +318,7 @@ static const struct hisi_qm_cap_info qm_cap_info_comm[] = { {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, + {QM_SUPPORT_STOP_FUNC, 0x3100, 0, BIT(10), 0x0, 0x0, 0x1}, {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, }; @@ -1674,6 +1681,11 @@ unlock: return ret; } +static int qm_drain_qm(struct hisi_qm *qm) +{ + return hisi_qm_mb(qm, QM_MB_CMD_FLUSH_QM, 0, 0, 0); +} + static int qm_stop_qp(struct hisi_qp *qp) { return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); @@ -2031,43 +2043,25 @@ static void qp_stop_fail_cb(struct hisi_qp *qp) } } -/** - * qm_drain_qp() - Drain a qp. - * @qp: The qp we want to drain. - * - * Determine whether the queue is cleared by judging the tail pointers of - * sq and cq. - */ -static int qm_drain_qp(struct hisi_qp *qp) +static int qm_wait_qp_empty(struct hisi_qm *qm, u32 *state, u32 qp_id) { - struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; struct qm_sqc sqc; struct qm_cqc cqc; int ret, i = 0; - /* No need to judge if master OOO is blocked. */ - if (qm_check_dev_error(qm)) - return 0; - - /* Kunpeng930 supports drain qp by device */ - if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { - ret = qm_stop_qp(qp); - if (ret) - dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); - return ret; - } - while (++i) { - ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp->qp_id, 1); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); if (ret) { dev_err_ratelimited(dev, "Failed to dump sqc!\n"); + *state = QM_DUMP_SQC_FAIL; return ret; } - ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp->qp_id, 1); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); if (ret) { dev_err_ratelimited(dev, "Failed to dump cqc!\n"); + *state = QM_DUMP_CQC_FAIL; return ret; } @@ -2076,8 +2070,9 @@ static int qm_drain_qp(struct hisi_qp *qp) break; if (i == MAX_WAIT_COUNTS) { - dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); - return -EBUSY; + dev_err(dev, "Fail to empty queue %u!\n", qp_id); + *state = QM_STOP_QUEUE_FAIL; + return -ETIMEDOUT; } usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); @@ -2086,9 +2081,53 @@ static int qm_drain_qp(struct hisi_qp *qp) return 0; } -static int qm_stop_qp_nolock(struct hisi_qp *qp) +/** + * qm_drain_qp() - Drain a qp. + * @qp: The qp we want to drain. + * + * If the device does not support stopping queue by sending mailbox, + * determine whether the queue is cleared by judging the tail pointers of + * sq and cq. + */ +static int qm_drain_qp(struct hisi_qp *qp) +{ + struct hisi_qm *qm = qp->qm; + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); + u32 state = 0; + int ret; + + /* No need to judge if master OOO is blocked. */ + if (qm_check_dev_error(pf_qm)) + return 0; + + /* HW V3 supports drain qp by device */ + if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { + ret = qm_stop_qp(qp); + if (ret) { + dev_err(&qm->pdev->dev, "Failed to stop qp!\n"); + state = QM_STOP_QUEUE_FAIL; + goto set_dev_state; + } + return ret; + } + + ret = qm_wait_qp_empty(qm, &state, qp->qp_id); + if (ret) + goto set_dev_state; + + return 0; + +set_dev_state: + if (qm->debug.dev_dfx.dev_timeout) + qm->debug.dev_dfx.dev_state = state; + + return ret; +} + +static void qm_stop_qp_nolock(struct hisi_qp *qp) { - struct device *dev = &qp->qm->pdev->dev; + struct hisi_qm *qm = qp->qm; + struct device *dev = &qm->pdev->dev; int ret; /* @@ -2099,39 +2138,36 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp) */ if (atomic_read(&qp->qp_status.flags) != QP_START) { qp->is_resetting = false; - return 0; + return; } atomic_set(&qp->qp_status.flags, QP_STOP); - ret = qm_drain_qp(qp); - if (ret) - dev_err(dev, "Failed to drain out data for stopping!\n"); + /* V3 supports direct stop function when FLR prepare */ + if (qm->ver < QM_HW_V3 || qm->status.stop_reason == QM_NORMAL) { + ret = qm_drain_qp(qp); + if (ret) + dev_err(dev, "Failed to drain out data for stopping qp(%u)!\n", qp->qp_id); + } - flush_workqueue(qp->qm->wq); + flush_workqueue(qm->wq); if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) qp_stop_fail_cb(qp); dev_dbg(dev, "stop queue %u!", qp->qp_id); - - return 0; } /** * hisi_qm_stop_qp() - Stop a qp in qm. * @qp: The qp we want to stop. * - * This function is reverse of hisi_qm_start_qp. Return 0 if successful. + * This function is reverse of hisi_qm_start_qp. */ -int hisi_qm_stop_qp(struct hisi_qp *qp) +void hisi_qm_stop_qp(struct hisi_qp *qp) { - int ret; - down_write(&qp->qm->qps_lock); - ret = qm_stop_qp_nolock(qp); + qm_stop_qp_nolock(qp); up_write(&qp->qm->qps_lock); - - return ret; } EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); @@ -2309,7 +2345,31 @@ static int hisi_qm_uacce_start_queue(struct uacce_queue *q) static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) { - hisi_qm_stop_qp(q->priv); + struct hisi_qp *qp = q->priv; + struct hisi_qm *qm = qp->qm; + struct qm_dev_dfx *dev_dfx = &qm->debug.dev_dfx; + u32 i = 0; + + hisi_qm_stop_qp(qp); + + if (!dev_dfx->dev_timeout || !dev_dfx->dev_state) + return; + + /* + * After the queue fails to be stopped, + * wait for a period of time before releasing the queue. + */ + while (++i) { + msleep(WAIT_PERIOD); + + /* Since dev_timeout maybe modified, check i >= dev_timeout */ + if (i >= dev_dfx->dev_timeout) { + dev_err(&qm->pdev->dev, "Stop q %u timeout, state %u\n", + qp->qp_id, dev_dfx->dev_state); + dev_dfx->dev_state = QM_FINISH_WAIT; + break; + } + } } static int hisi_qm_is_q_updated(struct uacce_queue *q) @@ -2833,12 +2893,9 @@ void hisi_qm_uninit(struct hisi_qm *qm) hisi_qm_set_state(qm, QM_NOT_READY); up_write(&qm->qps_lock); + qm_remove_uacce(qm); qm_irqs_unregister(qm); hisi_qm_pci_uninit(qm); - if (qm->use_sva) { - uacce_remove(qm->uacce); - qm->uacce = NULL; - } } EXPORT_SYMBOL_GPL(hisi_qm_uninit); @@ -3054,25 +3111,18 @@ static int qm_restart(struct hisi_qm *qm) } /* Stop started qps in reset flow */ -static int qm_stop_started_qp(struct hisi_qm *qm) +static void qm_stop_started_qp(struct hisi_qm *qm) { - struct device *dev = &qm->pdev->dev; struct hisi_qp *qp; - int i, ret; + int i; for (i = 0; i < qm->qp_num; i++) { qp = &qm->qp_array[i]; - if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { + if (atomic_read(&qp->qp_status.flags) == QP_START) { qp->is_resetting = true; - ret = qm_stop_qp_nolock(qp); - if (ret < 0) { - dev_err(dev, "Failed to stop qp%d!\n", i); - return ret; - } + qm_stop_qp_nolock(qp); } } - - return 0; } /** @@ -3112,21 +3162,31 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) down_write(&qm->qps_lock); - qm->status.stop_reason = r; if (atomic_read(&qm->status.flags) == QM_STOP) goto err_unlock; /* Stop all the request sending at first. */ atomic_set(&qm->status.flags, QM_STOP); + qm->status.stop_reason = r; - if (qm->status.stop_reason == QM_SOFT_RESET || - qm->status.stop_reason == QM_DOWN) { + if (qm->status.stop_reason != QM_NORMAL) { hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); - ret = qm_stop_started_qp(qm); - if (ret < 0) { - dev_err(dev, "Failed to stop started qp!\n"); - goto err_unlock; + /* + * When performing soft reset, the hardware will no longer + * do tasks, and the tasks in the device will be flushed + * out directly since the master ooo is closed. + */ + if (test_bit(QM_SUPPORT_STOP_FUNC, &qm->caps) && + r != QM_SOFT_RESET) { + ret = qm_drain_qm(qm); + if (ret) { + dev_err(dev, "failed to drain qm!\n"); + goto err_unlock; + } } + + qm_stop_started_qp(qm); + hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); } @@ -3141,6 +3201,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) } qm_clear_queues(qm); + qm->status.stop_reason = QM_NORMAL; err_unlock: up_write(&qm->qps_lock); -- cgit v1.2.3