diff options
Diffstat (limited to '')
-rw-r--r-- | drivers/crypto/hisilicon/debugfs.c | 75 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/hpre/hpre_crypto.c | 25 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/hpre/hpre_main.c | 14 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/qm.c | 383 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/qm_common.h | 5 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec/sec_drv.c | 6 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec_crypto.c | 29 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/sec2/sec_main.c | 24 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/trng/trng.c | 6 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/zip/zip_crypto.c | 290 | ||||
-rw-r--r-- | drivers/crypto/hisilicon/zip/zip_main.c | 18 |
11 files changed, 331 insertions, 544 deletions
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c index 2cc1591949..7e8186fe05 100644 --- a/drivers/crypto/hisilicon/debugfs.c +++ b/drivers/crypto/hisilicon/debugfs.c @@ -137,8 +137,8 @@ static void dump_show(struct hisi_qm *qm, void *info, static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; - struct qm_sqc *sqc, *sqc_curr; - dma_addr_t sqc_dma; + struct qm_sqc *sqc_curr; + struct qm_sqc sqc; u32 qp_id; int ret; @@ -151,35 +151,29 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) return -EINVAL; } - sqc = hisi_qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma); - if (IS_ERR(sqc)) - return PTR_ERR(sqc); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1); + if (!ret) { + dump_show(qm, &sqc, sizeof(struct qm_sqc), name); - ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1); - if (ret) { - down_read(&qm->qps_lock); - if (qm->sqc) { - sqc_curr = qm->sqc + qp_id; + return 0; + } - dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC"); - } - up_read(&qm->qps_lock); + down_read(&qm->qps_lock); + if (qm->sqc) { + sqc_curr = qm->sqc + qp_id; - goto free_ctx; + dump_show(qm, sqc_curr, sizeof(*sqc_curr), "SOFT SQC"); } + up_read(&qm->qps_lock); - dump_show(qm, sqc, sizeof(*sqc), name); - -free_ctx: - hisi_qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); return 0; } static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; - struct qm_cqc *cqc, *cqc_curr; - dma_addr_t cqc_dma; + struct qm_cqc *cqc_curr; + struct qm_cqc cqc; u32 qp_id; int ret; @@ -192,34 +186,29 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) return -EINVAL; } - cqc = hisi_qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); - if (IS_ERR(cqc)) - return PTR_ERR(cqc); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1); + if (!ret) { + dump_show(qm, &cqc, sizeof(struct qm_cqc), name); - ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1); - if (ret) { - down_read(&qm->qps_lock); - if (qm->cqc) { - cqc_curr = qm->cqc + qp_id; + return 0; + } - dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC"); - } - up_read(&qm->qps_lock); + down_read(&qm->qps_lock); + if (qm->cqc) { + cqc_curr = qm->cqc + qp_id; - goto free_ctx; + dump_show(qm, cqc_curr, sizeof(*cqc_curr), "SOFT CQC"); } + up_read(&qm->qps_lock); - dump_show(qm, cqc, sizeof(*cqc), name); - -free_ctx: - hisi_qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); return 0; } static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; - dma_addr_t xeqc_dma; + struct qm_aeqc aeqc; + struct qm_eqc eqc; size_t size; void *xeqc; int ret; @@ -233,23 +222,19 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name) if (!strcmp(name, "EQC")) { cmd = QM_MB_CMD_EQC; size = sizeof(struct qm_eqc); + xeqc = &eqc; } else { cmd = QM_MB_CMD_AEQC; size = sizeof(struct qm_aeqc); + xeqc = &aeqc; } - xeqc = hisi_qm_ctx_alloc(qm, size, &xeqc_dma); - if (IS_ERR(xeqc)) - return PTR_ERR(xeqc); - - ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1); + ret = qm_set_and_get_xqc(qm, cmd, xeqc, 0, 1); if (ret) - goto err_free_ctx; + return ret; dump_show(qm, xeqc, size, name); -err_free_ctx: - hisi_qm_ctx_free(qm, size, xeqc, &xeqc_dma); return ret; } diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c index 9a1c61be32..764532a6ca 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c +++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c @@ -57,6 +57,9 @@ struct hpre_ctx; #define HPRE_DRV_ECDH_MASK_CAP BIT(2) #define HPRE_DRV_X25519_MASK_CAP BIT(5) +static DEFINE_MUTEX(hpre_algs_lock); +static unsigned int hpre_available_devs; + typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); struct hpre_rsa_ctx { @@ -2202,11 +2205,17 @@ static void hpre_unregister_x25519(struct hisi_qm *qm) int hpre_algs_register(struct hisi_qm *qm) { - int ret; + int ret = 0; + + mutex_lock(&hpre_algs_lock); + if (hpre_available_devs) { + hpre_available_devs++; + goto unlock; + } ret = hpre_register_rsa(qm); if (ret) - return ret; + goto unlock; ret = hpre_register_dh(qm); if (ret) @@ -2220,6 +2229,9 @@ int hpre_algs_register(struct hisi_qm *qm) if (ret) goto unreg_ecdh; + hpre_available_devs++; + mutex_unlock(&hpre_algs_lock); + return ret; unreg_ecdh: @@ -2228,13 +2240,22 @@ unreg_dh: hpre_unregister_dh(qm); unreg_rsa: hpre_unregister_rsa(qm); +unlock: + mutex_unlock(&hpre_algs_lock); return ret; } void hpre_algs_unregister(struct hisi_qm *qm) { + mutex_lock(&hpre_algs_lock); + if (--hpre_available_devs) + goto unlock; + hpre_unregister_x25519(qm); hpre_unregister_ecdh(qm); hpre_unregister_dh(qm); hpre_unregister_rsa(qm); + +unlock: + mutex_unlock(&hpre_algs_lock); } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index b97ce0ee71..3255b2a070 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -107,6 +107,7 @@ #define HPRE_VIA_MSI_DSM 1 #define HPRE_SQE_MASK_OFFSET 8 #define HPRE_SQE_MASK_LEN 24 +#define HPRE_CTX_Q_NUM_DEF 1 #define HPRE_DFX_BASE 0x301000 #define HPRE_DFX_COMMON1 0x301400 @@ -1411,10 +1412,11 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) dev_warn(&pdev->dev, "init debugfs fail!\n"); - ret = hisi_qm_alg_register(qm, &hpre_devices); + hisi_qm_add_list(qm, &hpre_devices); + ret = hisi_qm_alg_register(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF); if (ret < 0) { pci_err(pdev, "fail to register algs to crypto!\n"); - goto err_with_qm_start; + goto err_qm_del_list; } if (qm->uacce) { @@ -1436,9 +1438,10 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_with_alg_register: - hisi_qm_alg_unregister(qm, &hpre_devices); + hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF); -err_with_qm_start: +err_qm_del_list: + hisi_qm_del_list(qm, &hpre_devices); hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); @@ -1458,7 +1461,8 @@ static void hpre_remove(struct pci_dev *pdev) hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &hpre_devices); - hisi_qm_alg_unregister(qm, &hpre_devices); + hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF); + hisi_qm_del_list(qm, &hpre_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, true); diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index e889363ed9..40da95dbab 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -46,7 +46,7 @@ #define QM_QC_PASID_ENABLE_SHIFT 7 #define QM_SQ_TYPE_MASK GENMASK(3, 0) -#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1) +#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1) /* cqc shift */ #define QM_CQ_HOP_NUM_SHIFT 0 @@ -58,7 +58,7 @@ #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) #define QM_QC_CQE_SIZE 4 -#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1) +#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1) /* eqc shift */ #define QM_EQE_AEQE_SIZE (2UL << 12) @@ -69,6 +69,7 @@ #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) #define QM_AEQE_TYPE_SHIFT 17 +#define QM_AEQE_TYPE_MASK 0xf #define QM_AEQE_CQN_MASK GENMASK(15, 0) #define QM_CQ_OVERFLOW 0 #define QM_EQ_OVERFLOW 1 @@ -253,19 +254,6 @@ #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) -#define INIT_QC_COMMON(qc, base, pasid) do { \ - (qc)->head = 0; \ - (qc)->tail = 0; \ - (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \ - (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \ - (qc)->dw3 = 0; \ - (qc)->w8 = 0; \ - (qc)->rsvd0 = 0; \ - (qc)->pasid = cpu_to_le16(pasid); \ - (qc)->w11 = 0; \ - (qc)->rsvd1 = 0; \ -} while (0) - enum vft_type { SQC_VFT = 0, CQC_VFT, @@ -701,6 +689,59 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, } EXPORT_SYMBOL_GPL(hisi_qm_mb); +/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */ +int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op) +{ + struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); + struct qm_mailbox mailbox; + dma_addr_t xqc_dma; + void *tmp_xqc; + size_t size; + int ret; + + switch (cmd) { + case QM_MB_CMD_SQC: + size = sizeof(struct qm_sqc); + tmp_xqc = qm->xqc_buf.sqc; + xqc_dma = qm->xqc_buf.sqc_dma; + break; + case QM_MB_CMD_CQC: + size = sizeof(struct qm_cqc); + tmp_xqc = qm->xqc_buf.cqc; + xqc_dma = qm->xqc_buf.cqc_dma; + break; + case QM_MB_CMD_EQC: + size = sizeof(struct qm_eqc); + tmp_xqc = qm->xqc_buf.eqc; + xqc_dma = qm->xqc_buf.eqc_dma; + break; + case QM_MB_CMD_AEQC: + size = sizeof(struct qm_aeqc); + tmp_xqc = qm->xqc_buf.aeqc; + xqc_dma = qm->xqc_buf.aeqc_dma; + break; + } + + /* Setting xqc will fail if master OOO is blocked. */ + if (qm_check_dev_error(pf_qm)) { + dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n"); + return -EIO; + } + + mutex_lock(&qm->mailbox_lock); + if (!op) + memcpy(tmp_xqc, xqc, size); + + qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op); + ret = qm_mb_nolock(qm, &mailbox); + if (!ret && op) + memcpy(xqc, tmp_xqc, size); + + mutex_unlock(&qm->mailbox_lock); + + return ret; +} + static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) { u64 doorbell; @@ -1062,7 +1103,8 @@ static irqreturn_t qm_aeq_thread(int irq, void *data) atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { - type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; + type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) & + QM_AEQE_TYPE_MASK; qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; switch (type) { @@ -1346,45 +1388,6 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) return 0; } -void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, - dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - void *ctx_addr; - - ctx_addr = kzalloc(ctx_size, GFP_KERNEL); - if (!ctx_addr) - return ERR_PTR(-ENOMEM); - - *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, *dma_addr)) { - dev_err(dev, "DMA mapping error!\n"); - kfree(ctx_addr); - return ERR_PTR(-ENOMEM); - } - - return ctx_addr; -} - -void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, - const void *ctx_addr, dma_addr_t *dma_addr) -{ - struct device *dev = &qm->pdev->dev; - - dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); - kfree(ctx_addr); -} - -static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); -} - -static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) -{ - return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); -} - static void qm_hw_error_init_v1(struct hisi_qm *qm) { writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); @@ -1977,84 +1980,51 @@ static void hisi_qm_release_qp(struct hisi_qp *qp) static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { struct hisi_qm *qm = qp->qm; - struct device *dev = &qm->pdev->dev; enum qm_hw_ver ver = qm->ver; - struct qm_sqc *sqc; - dma_addr_t sqc_dma; - int ret; - - sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL); - if (!sqc) - return -ENOMEM; + struct qm_sqc sqc = {0}; - INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); if (ver == QM_HW_V1) { - sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); - sqc->w8 = cpu_to_le16(qp->sq_depth - 1); + sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); + sqc.w8 = cpu_to_le16(qp->sq_depth - 1); } else { - sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); - sqc->w8 = 0; /* rand_qc */ + sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); + sqc.w8 = 0; /* rand_qc */ } - sqc->cq_num = cpu_to_le16(qp_id); - sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); + sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); + sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma)); + sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma)); + sqc.cq_num = cpu_to_le16(qp_id); + sqc.pasid = cpu_to_le16(pasid); if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) - sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE << - QM_QC_PASID_ENABLE_SHIFT); + sqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE << + QM_QC_PASID_ENABLE_SHIFT); - sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, sqc_dma)) { - kfree(sqc); - return -ENOMEM; - } - - ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); - dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); - kfree(sqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0); } static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { struct hisi_qm *qm = qp->qm; - struct device *dev = &qm->pdev->dev; enum qm_hw_ver ver = qm->ver; - struct qm_cqc *cqc; - dma_addr_t cqc_dma; - int ret; + struct qm_cqc cqc = {0}; - cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL); - if (!cqc) - return -ENOMEM; - - INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); if (ver == QM_HW_V1) { - cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, - QM_QC_CQE_SIZE)); - cqc->w8 = cpu_to_le16(qp->cq_depth - 1); + cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); + cqc.w8 = cpu_to_le16(qp->cq_depth - 1); } else { - cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); - cqc->w8 = 0; /* rand_qc */ + cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); + cqc.w8 = 0; /* rand_qc */ } - cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); + cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); + cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma)); + cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma)); + cqc.pasid = cpu_to_le16(pasid); if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) - cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE); - - cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, cqc_dma)) { - kfree(cqc); - return -ENOMEM; - } + cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE); - ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); - dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); - kfree(cqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0); } static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) @@ -2144,14 +2114,11 @@ static void qp_stop_fail_cb(struct hisi_qp *qp) */ static int qm_drain_qp(struct hisi_qp *qp) { - size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc); struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; - struct qm_sqc *sqc; - struct qm_cqc *cqc; - dma_addr_t dma_addr; - int ret = 0, i = 0; - void *addr; + struct qm_sqc sqc; + struct qm_cqc cqc; + int ret, i = 0; /* No need to judge if master OOO is blocked. */ if (qm_check_dev_error(qm)) @@ -2165,44 +2132,32 @@ static int qm_drain_qp(struct hisi_qp *qp) return ret; } - addr = hisi_qm_ctx_alloc(qm, size, &dma_addr); - if (IS_ERR(addr)) { - dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); - return -ENOMEM; - } - while (++i) { - ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp->qp_id, 1); if (ret) { dev_err_ratelimited(dev, "Failed to dump sqc!\n"); - break; + return ret; } - sqc = addr; - ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)), - qp->qp_id); + ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp->qp_id, 1); if (ret) { dev_err_ratelimited(dev, "Failed to dump cqc!\n"); - break; + return ret; } - cqc = addr + sizeof(struct qm_sqc); - if ((sqc->tail == cqc->tail) && + if ((sqc.tail == cqc.tail) && (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) break; if (i == MAX_WAIT_COUNTS) { dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); - ret = -EBUSY; - break; + return -EBUSY; } usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); } - hisi_qm_ctx_free(qm, size, addr, &dma_addr); - - return ret; + return 0; } static int qm_stop_qp_nolock(struct hisi_qp *qp) @@ -2914,11 +2869,20 @@ static void hisi_qm_unint_work(struct hisi_qm *qm) destroy_workqueue(qm->wq); } +static void hisi_qm_free_rsv_buf(struct hisi_qm *qm) +{ + struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma; + struct device *dev = &qm->pdev->dev; + + dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma); +} + static void hisi_qm_memory_uninit(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; hisi_qp_memory_uninit(qm, qm->qp_num); + hisi_qm_free_rsv_buf(qm); if (qm->qdma.va) { hisi_qm_cache_wb(qm); dma_free_coherent(dev, qm->qdma.size, @@ -3040,62 +3004,26 @@ static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) static int qm_eq_ctx_cfg(struct hisi_qm *qm) { - struct device *dev = &qm->pdev->dev; - struct qm_eqc *eqc; - dma_addr_t eqc_dma; - int ret; - - eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); - if (!eqc) - return -ENOMEM; + struct qm_eqc eqc = {0}; - eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); - eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); + eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); + eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); if (qm->ver == QM_HW_V1) - eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); - eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); - - eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, eqc_dma)) { - kfree(eqc); - return -ENOMEM; - } + eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); + eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); - ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); - dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); - kfree(eqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0); } static int qm_aeq_ctx_cfg(struct hisi_qm *qm) { - struct device *dev = &qm->pdev->dev; - struct qm_aeqc *aeqc; - dma_addr_t aeqc_dma; - int ret; - - aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL); - if (!aeqc) - return -ENOMEM; - - aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); - aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); - aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); + struct qm_aeqc aeqc = {0}; - aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), - DMA_TO_DEVICE); - if (dma_mapping_error(dev, aeqc_dma)) { - kfree(aeqc); - return -ENOMEM; - } + aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); + aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); + aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); - ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); - dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); - kfree(aeqc); - - return ret; + return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0); } static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) @@ -4885,63 +4813,48 @@ static void qm_cmd_process(struct work_struct *cmd_process) } /** - * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list. + * hisi_qm_alg_register() - Register alg to crypto. * @qm: The qm needs add. * @qm_list: The qm list. + * @guard: Guard of qp_num. * - * This function adds qm to qm list, and will register algorithm to - * crypto when the qm list is empty. + * Register algorithm to crypto when the function is satisfy guard. */ -int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list) +int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) { struct device *dev = &qm->pdev->dev; - int flag = 0; - int ret = 0; - - mutex_lock(&qm_list->lock); - if (list_empty(&qm_list->list)) - flag = 1; - list_add_tail(&qm->list, &qm_list->list); - mutex_unlock(&qm_list->lock); if (qm->ver <= QM_HW_V2 && qm->use_sva) { dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n"); return 0; } - if (flag) { - ret = qm_list->register_to_crypto(qm); - if (ret) { - mutex_lock(&qm_list->lock); - list_del(&qm->list); - mutex_unlock(&qm_list->lock); - } + if (qm->qp_num < guard) { + dev_info(dev, "qp_num is less than task need.\n"); + return 0; } - return ret; + return qm_list->register_to_crypto(qm); } EXPORT_SYMBOL_GPL(hisi_qm_alg_register); /** - * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from - * qm list. + * hisi_qm_alg_unregister() - Unregister alg from crypto. * @qm: The qm needs delete. * @qm_list: The qm list. + * @guard: Guard of qp_num. * - * This function deletes qm from qm list, and will unregister algorithm - * from crypto when the qm list is empty. + * Unregister algorithm from crypto when the last function is satisfy guard. */ -void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) +void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard) { - mutex_lock(&qm_list->lock); - list_del(&qm->list); - mutex_unlock(&qm_list->lock); - if (qm->ver <= QM_HW_V2 && qm->use_sva) return; - if (list_empty(&qm_list->list)) - qm_list->unregister_from_crypto(qm); + if (qm->qp_num < guard) + return; + + qm_list->unregister_from_crypto(qm); } EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); @@ -5364,6 +5277,36 @@ err_init_qp_mem: return ret; } +static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm) +{ + struct qm_rsv_buf *xqc_buf = &qm->xqc_buf; + struct qm_dma *xqc_dma = &xqc_buf->qcdma; + struct device *dev = &qm->pdev->dev; + size_t off = 0; + +#define QM_XQC_BUF_INIT(xqc_buf, type) do { \ + (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \ + (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \ + off += QMC_ALIGN(sizeof(struct qm_##type)); \ +} while (0) + + xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) + + QMC_ALIGN(sizeof(struct qm_aeqc)) + + QMC_ALIGN(sizeof(struct qm_sqc)) + + QMC_ALIGN(sizeof(struct qm_cqc)); + xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size, + &xqc_dma->dma, GFP_KERNEL); + if (!xqc_dma->va) + return -ENOMEM; + + QM_XQC_BUF_INIT(xqc_buf, eqc); + QM_XQC_BUF_INIT(xqc_buf, aeqc); + QM_XQC_BUF_INIT(xqc_buf, sqc); + QM_XQC_BUF_INIT(xqc_buf, cqc); + + return 0; +} + static int hisi_qm_memory_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; @@ -5405,13 +5348,19 @@ static int hisi_qm_memory_init(struct hisi_qm *qm) QM_INIT_BUF(qm, sqc, qm->qp_num); QM_INIT_BUF(qm, cqc, qm->qp_num); + ret = hisi_qm_alloc_rsv_buf(qm); + if (ret) + goto err_free_qdma; + ret = hisi_qp_alloc_memory(qm); if (ret) - goto err_alloc_qp_array; + goto err_free_reserve_buf; return 0; -err_alloc_qp_array: +err_free_reserve_buf: + hisi_qm_free_rsv_buf(qm); +err_free_qdma: dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); err_destroy_idr: idr_destroy(&qm->qp_idr); diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h index 8e36aa9c68..7b0b15c83e 100644 --- a/drivers/crypto/hisilicon/qm_common.h +++ b/drivers/crypto/hisilicon/qm_common.h @@ -76,10 +76,7 @@ static const char * const qm_s[] = { "init", "start", "close", "stop", }; -void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, - dma_addr_t *dma_addr); -void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, - const void *ctx_addr, dma_addr_t *dma_addr); +int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op); void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm); void hisi_qm_set_algqos_init(struct hisi_qm *qm); diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c index e1e08993de..afdddf87cc 100644 --- a/drivers/crypto/hisilicon/sec/sec_drv.c +++ b/drivers/crypto/hisilicon/sec/sec_drv.c @@ -1271,7 +1271,7 @@ queues_unconfig: return ret; } -static int sec_remove(struct platform_device *pdev) +static void sec_remove(struct platform_device *pdev) { struct sec_dev_info *info = platform_get_drvdata(pdev); int i; @@ -1287,8 +1287,6 @@ static int sec_remove(struct platform_device *pdev) } sec_base_exit(info); - - return 0; } static const __maybe_unused struct of_device_id sec_match[] = { @@ -1306,7 +1304,7 @@ MODULE_DEVICE_TABLE(acpi, sec_acpi_match); static struct platform_driver sec_driver = { .probe = sec_probe, - .remove = sec_remove, + .remove_new = sec_remove, .driver = { .name = "hisi_sec_platform_driver", .of_match_table = sec_match, diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index c3a630cb27..ba7f305d43 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -104,6 +104,9 @@ #define IV_CTR_INIT 0x1 #define IV_BYTE_OFFSET 0x8 +static DEFINE_MUTEX(sec_algs_lock); +static unsigned int sec_available_devs; + struct sec_skcipher { u64 alg_msk; struct skcipher_alg alg; @@ -1011,6 +1014,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, ret = sec_aead_mac_init(a_req); if (unlikely(ret)) { dev_err(dev, "fail to init mac data for ICV!\n"); + hisi_acc_sg_buf_unmap(dev, src, req->in); return ret; } } @@ -2549,15 +2553,29 @@ int sec_register_to_crypto(struct hisi_qm *qm) alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX, SEC_DRV_ALG_BITMAP_LOW_IDX); + mutex_lock(&sec_algs_lock); + if (sec_available_devs) { + sec_available_devs++; + goto unlock; + } ret = sec_register_skcipher(alg_mask); if (ret) - return ret; + goto unlock; ret = sec_register_aead(alg_mask); if (ret) - sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); + goto unreg_skcipher; + + sec_available_devs++; + mutex_unlock(&sec_algs_lock); + return 0; + +unreg_skcipher: + sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); +unlock: + mutex_unlock(&sec_algs_lock); return ret; } @@ -2568,6 +2586,13 @@ void sec_unregister_from_crypto(struct hisi_qm *qm) alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX, SEC_DRV_ALG_BITMAP_LOW_IDX); + mutex_lock(&sec_algs_lock); + if (--sec_available_devs) + goto unlock; + sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads)); sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); + +unlock: + mutex_unlock(&sec_algs_lock); } diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index bf02a6b2ee..878d94ab5d 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1238,15 +1238,11 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) pci_warn(pdev, "Failed to init debugfs!\n"); - if (qm->qp_num >= ctx_q_num) { - ret = hisi_qm_alg_register(qm, &sec_devices); - if (ret < 0) { - pr_err("Failed to register driver to crypto.\n"); - goto err_qm_stop; - } - } else { - pci_warn(qm->pdev, - "Failed to use kernel mode, qp not enough!\n"); + hisi_qm_add_list(qm, &sec_devices); + ret = hisi_qm_alg_register(qm, &sec_devices, ctx_q_num); + if (ret < 0) { + pr_err("Failed to register driver to crypto.\n"); + goto err_qm_del_list; } if (qm->uacce) { @@ -1268,9 +1264,9 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_alg_unregister: - if (qm->qp_num >= ctx_q_num) - hisi_qm_alg_unregister(qm, &sec_devices); -err_qm_stop: + hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num); +err_qm_del_list: + hisi_qm_del_list(qm, &sec_devices); sec_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_probe_uninit: @@ -1287,8 +1283,8 @@ static void sec_remove(struct pci_dev *pdev) hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &sec_devices); - if (qm->qp_num >= ctx_q_num) - hisi_qm_alg_unregister(qm, &sec_devices); + hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num); + hisi_qm_del_list(qm, &sec_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, true); diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c index 97e500db0a..451b167bcc 100644 --- a/drivers/crypto/hisilicon/trng/trng.c +++ b/drivers/crypto/hisilicon/trng/trng.c @@ -303,7 +303,7 @@ err_remove_from_list: return ret; } -static int hisi_trng_remove(struct platform_device *pdev) +static void hisi_trng_remove(struct platform_device *pdev) { struct hisi_trng *trng = platform_get_drvdata(pdev); @@ -314,8 +314,6 @@ static int hisi_trng_remove(struct platform_device *pdev) if (trng->ver != HISI_TRNG_VER_V1 && atomic_dec_return(&trng_active_devs) == 0) crypto_unregister_rng(&hisi_trng_alg); - - return 0; } static const struct acpi_device_id hisi_trng_acpi_match[] = { @@ -326,7 +324,7 @@ MODULE_DEVICE_TABLE(acpi, hisi_trng_acpi_match); static struct platform_driver hisi_trng_driver = { .probe = hisi_trng_probe, - .remove = hisi_trng_remove, + .remove_new = hisi_trng_remove, .driver = { .name = "hisi-trng-v2", .acpi_match_table = ACPI_PTR(hisi_trng_acpi_match), diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c index 6608971d10..c650c741a1 100644 --- a/drivers/crypto/hisilicon/zip/zip_crypto.c +++ b/drivers/crypto/hisilicon/zip/zip_crypto.c @@ -16,36 +16,17 @@ #define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0) /* hisi_zip_sqe dw9 */ #define HZIP_REQ_TYPE_M GENMASK(7, 0) -#define HZIP_ALG_TYPE_ZLIB 0x02 -#define HZIP_ALG_TYPE_GZIP 0x03 +#define HZIP_ALG_TYPE_DEFLATE 0x01 #define HZIP_BUF_TYPE_M GENMASK(11, 8) -#define HZIP_PBUFFER 0x0 #define HZIP_SGL 0x1 -#define HZIP_ZLIB_HEAD_SIZE 2 -#define HZIP_GZIP_HEAD_SIZE 10 - -#define GZIP_HEAD_FHCRC_BIT BIT(1) -#define GZIP_HEAD_FEXTRA_BIT BIT(2) -#define GZIP_HEAD_FNAME_BIT BIT(3) -#define GZIP_HEAD_FCOMMENT_BIT BIT(4) - -#define GZIP_HEAD_FLG_SHIFT 3 -#define GZIP_HEAD_FEXTRA_SHIFT 10 -#define GZIP_HEAD_FEXTRA_XLEN 2UL -#define GZIP_HEAD_FHCRC_SIZE 2 - -#define HZIP_GZIP_HEAD_BUF 256 #define HZIP_ALG_PRIORITY 300 #define HZIP_SGL_SGE_NR 10 -#define HZIP_ALG_ZLIB GENMASK(1, 0) -#define HZIP_ALG_GZIP GENMASK(3, 2) +#define HZIP_ALG_DEFLATE GENMASK(5, 4) -static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c}; -static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = { - 0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03 -}; +static DEFINE_MUTEX(zip_algs_lock); +static unsigned int zip_available_devs; enum hisi_zip_alg_type { HZIP_ALG_TYPE_COMP = 0, @@ -59,21 +40,10 @@ enum { }; #define COMP_NAME_TO_TYPE(alg_name) \ - (!strcmp((alg_name), "zlib-deflate") ? HZIP_ALG_TYPE_ZLIB : \ - !strcmp((alg_name), "gzip") ? HZIP_ALG_TYPE_GZIP : 0) \ - -#define TO_HEAD_SIZE(req_type) \ - (((req_type) == HZIP_ALG_TYPE_ZLIB) ? sizeof(zlib_head) : \ - ((req_type) == HZIP_ALG_TYPE_GZIP) ? sizeof(gzip_head) : 0) \ - -#define TO_HEAD(req_type) \ - (((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \ - ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : NULL) \ + (!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : 0) struct hisi_zip_req { struct acomp_req *req; - u32 sskip; - u32 dskip; struct hisi_acc_hw_sgl *hw_src; struct hisi_acc_hw_sgl *hw_dst; dma_addr_t dma_src; @@ -138,85 +108,8 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444); MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)"); -static u32 get_extra_field_size(const u8 *start) -{ - return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; -} - -static u32 get_name_field_size(const u8 *start) -{ - return strlen(start) + 1; -} - -static u32 get_comment_field_size(const u8 *start) -{ - return strlen(start) + 1; -} - -static u32 __get_gzip_head_size(const u8 *src) -{ - u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT); - u32 size = GZIP_HEAD_FEXTRA_SHIFT; - - if (head_flg & GZIP_HEAD_FEXTRA_BIT) - size += get_extra_field_size(src + size); - if (head_flg & GZIP_HEAD_FNAME_BIT) - size += get_name_field_size(src + size); - if (head_flg & GZIP_HEAD_FCOMMENT_BIT) - size += get_comment_field_size(src + size); - if (head_flg & GZIP_HEAD_FHCRC_BIT) - size += GZIP_HEAD_FHCRC_SIZE; - - return size; -} - -static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl) -{ - char buf[HZIP_GZIP_HEAD_BUF]; - - sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf)); - - return __get_gzip_head_size(buf); -} - -static int add_comp_head(struct scatterlist *dst, u8 req_type) -{ - int head_size = TO_HEAD_SIZE(req_type); - const u8 *head = TO_HEAD(req_type); - int ret; - - ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); - if (unlikely(ret != head_size)) { - pr_err("the head size of buffer is wrong (%d)!\n", ret); - return -ENOMEM; - } - - return head_size; -} - -static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type) -{ - if (unlikely(!acomp_req->src || !acomp_req->slen)) - return -EINVAL; - - if (unlikely(req_type == HZIP_ALG_TYPE_GZIP && - acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)) - return -EINVAL; - - switch (req_type) { - case HZIP_ALG_TYPE_ZLIB: - return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB); - case HZIP_ALG_TYPE_GZIP: - return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP); - default: - pr_err("request type does not support!\n"); - return -EINVAL; - } -} - -static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, - struct hisi_zip_qp_ctx *qp_ctx, - size_t head_size, bool is_comp) +static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx, + struct acomp_req *req) { struct hisi_zip_req_q *req_q = &qp_ctx->req_q; struct hisi_zip_req *q = req_q->q; @@ -239,14 +132,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, req_cache->req_id = req_id; req_cache->req = req; - if (is_comp) { - req_cache->sskip = 0; - req_cache->dskip = head_size; - } else { - req_cache->sskip = head_size; - req_cache->dskip = 0; - } - return req_cache; } @@ -272,10 +157,8 @@ static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req { struct acomp_req *a_req = req->req; - sqe->input_data_length = a_req->slen - req->sskip; - sqe->dest_avail_out = a_req->dlen - req->dskip; - sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, req->sskip); - sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, req->dskip); + sqe->input_data_length = a_req->slen; + sqe->dest_avail_out = a_req->dlen; } static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) @@ -296,12 +179,7 @@ static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type) sqe->dw9 = val; } -static void hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) -{ - sqe->dw13 = req->req_id; -} - -static void hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) +static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) { sqe->dw26 = req->req_id; } @@ -330,8 +208,8 @@ static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe ops->fill_sqe_type(sqe, ops->sqe_type); } -static int hisi_zip_do_work(struct hisi_zip_req *req, - struct hisi_zip_qp_ctx *qp_ctx) +static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx, + struct hisi_zip_req *req) { struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; @@ -383,12 +261,7 @@ err_unmap_input: return ret; } -static u32 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe) -{ - return sqe->dw13; -} - -static u32 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe) +static u32 hisi_zip_get_tag(struct hisi_zip_sqe *sqe) { return sqe->dw26; } @@ -414,8 +287,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) u32 tag = ops->get_tag(sqe); struct hisi_zip_req *req = req_q->q + tag; struct acomp_req *acomp_req = req->req; - u32 status, dlen, head_size; int err = 0; + u32 status; atomic64_inc(&dfx->recv_cnt); status = ops->get_status(sqe); @@ -427,13 +300,10 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) err = -EIO; } - dlen = ops->get_dstlen(sqe); - hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src); hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst); - head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0; - acomp_req->dlen = dlen + head_size; + acomp_req->dlen = ops->get_dstlen(sqe); if (acomp_req->base.complete) acomp_request_complete(acomp_req, err); @@ -447,22 +317,13 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req) struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP]; struct device *dev = &qp_ctx->qp->qm->pdev->dev; struct hisi_zip_req *req; - int head_size; int ret; - /* let's output compression head now */ - head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); - if (unlikely(head_size < 0)) { - dev_err_ratelimited(dev, "failed to add comp head (%d)!\n", - head_size); - return head_size; - } - - req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true); + req = hisi_zip_create_req(qp_ctx, acomp_req); if (IS_ERR(req)) return PTR_ERR(req); - ret = hisi_zip_do_work(req, qp_ctx); + ret = hisi_zip_do_work(qp_ctx, req); if (unlikely(ret != -EINPROGRESS)) { dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret); hisi_zip_remove_req(qp_ctx, req); @@ -477,20 +338,13 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req) struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP]; struct device *dev = &qp_ctx->qp->qm->pdev->dev; struct hisi_zip_req *req; - int head_size, ret; - - head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type); - if (unlikely(head_size < 0)) { - dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n", - head_size); - return head_size; - } + int ret; - req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false); + req = hisi_zip_create_req(qp_ctx, acomp_req); if (IS_ERR(req)) return PTR_ERR(req); - ret = hisi_zip_do_work(req, qp_ctx); + ret = hisi_zip_do_work(qp_ctx, req); if (unlikely(ret != -EINPROGRESS)) { dev_info_ratelimited(dev, "failed to do decompress (%d)!\n", ret); @@ -527,28 +381,15 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx) hisi_qm_free_qps(&qp_ctx->qp, 1); } -static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = { - .sqe_type = 0, - .fill_addr = hisi_zip_fill_addr, - .fill_buf_size = hisi_zip_fill_buf_size, - .fill_buf_type = hisi_zip_fill_buf_type, - .fill_req_type = hisi_zip_fill_req_type, - .fill_tag = hisi_zip_fill_tag_v1, - .fill_sqe_type = hisi_zip_fill_sqe_type, - .get_tag = hisi_zip_get_tag_v1, - .get_status = hisi_zip_get_status, - .get_dstlen = hisi_zip_get_dstlen, -}; - -static const struct hisi_zip_sqe_ops hisi_zip_ops_v2 = { +static const struct hisi_zip_sqe_ops hisi_zip_ops = { .sqe_type = 0x3, .fill_addr = hisi_zip_fill_addr, .fill_buf_size = hisi_zip_fill_buf_size, .fill_buf_type = hisi_zip_fill_buf_type, .fill_req_type = hisi_zip_fill_req_type, - .fill_tag = hisi_zip_fill_tag_v2, + .fill_tag = hisi_zip_fill_tag, .fill_sqe_type = hisi_zip_fill_sqe_type, - .get_tag = hisi_zip_get_tag_v2, + .get_tag = hisi_zip_get_tag, .get_status = hisi_zip_get_status, .get_dstlen = hisi_zip_get_dstlen, }; @@ -584,10 +425,7 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int qp_ctx->zip_dev = hisi_zip; } - if (hisi_zip->qm.ver < QM_HW_V3) - hisi_zip_ctx->ops = &hisi_zip_ops_v1; - else - hisi_zip_ctx->ops = &hisi_zip_ops_v2; + hisi_zip_ctx->ops = &hisi_zip_ops; return 0; } @@ -745,95 +583,67 @@ static void hisi_zip_acomp_exit(struct crypto_acomp *tfm) hisi_zip_ctx_exit(ctx); } -static struct acomp_alg hisi_zip_acomp_zlib = { - .init = hisi_zip_acomp_init, - .exit = hisi_zip_acomp_exit, - .compress = hisi_zip_acompress, - .decompress = hisi_zip_adecompress, - .base = { - .cra_name = "zlib-deflate", - .cra_driver_name = "hisi-zlib-acomp", - .cra_module = THIS_MODULE, - .cra_priority = HZIP_ALG_PRIORITY, - .cra_ctxsize = sizeof(struct hisi_zip_ctx), - } -}; - -static int hisi_zip_register_zlib(struct hisi_qm *qm) -{ - int ret; - - if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB)) - return 0; - - ret = crypto_register_acomp(&hisi_zip_acomp_zlib); - if (ret) - dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret); - - return ret; -} - -static void hisi_zip_unregister_zlib(struct hisi_qm *qm) -{ - if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB)) - return; - - crypto_unregister_acomp(&hisi_zip_acomp_zlib); -} - -static struct acomp_alg hisi_zip_acomp_gzip = { +static struct acomp_alg hisi_zip_acomp_deflate = { .init = hisi_zip_acomp_init, .exit = hisi_zip_acomp_exit, .compress = hisi_zip_acompress, .decompress = hisi_zip_adecompress, .base = { - .cra_name = "gzip", - .cra_driver_name = "hisi-gzip-acomp", + .cra_name = "deflate", + .cra_driver_name = "hisi-deflate-acomp", .cra_module = THIS_MODULE, - .cra_priority = HZIP_ALG_PRIORITY, + .cra_priority = HZIP_ALG_PRIORITY, .cra_ctxsize = sizeof(struct hisi_zip_ctx), } }; -static int hisi_zip_register_gzip(struct hisi_qm *qm) +static int hisi_zip_register_deflate(struct hisi_qm *qm) { int ret; - if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP)) + if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE)) return 0; - ret = crypto_register_acomp(&hisi_zip_acomp_gzip); + ret = crypto_register_acomp(&hisi_zip_acomp_deflate); if (ret) - dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret); + dev_err(&qm->pdev->dev, "failed to register to deflate (%d)!\n", ret); return ret; } -static void hisi_zip_unregister_gzip(struct hisi_qm *qm) +static void hisi_zip_unregister_deflate(struct hisi_qm *qm) { - if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP)) + if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE)) return; - crypto_unregister_acomp(&hisi_zip_acomp_gzip); + crypto_unregister_acomp(&hisi_zip_acomp_deflate); } int hisi_zip_register_to_crypto(struct hisi_qm *qm) { int ret = 0; - ret = hisi_zip_register_zlib(qm); - if (ret) - return ret; + mutex_lock(&zip_algs_lock); + if (zip_available_devs++) + goto unlock; - ret = hisi_zip_register_gzip(qm); + ret = hisi_zip_register_deflate(qm); if (ret) - hisi_zip_unregister_zlib(qm); + zip_available_devs--; +unlock: + mutex_unlock(&zip_algs_lock); return ret; } void hisi_zip_unregister_from_crypto(struct hisi_qm *qm) { - hisi_zip_unregister_zlib(qm); - hisi_zip_unregister_gzip(qm); + mutex_lock(&zip_algs_lock); + if (--zip_available_devs) + goto unlock; + + hisi_zip_unregister_deflate(qm); + +unlock: + mutex_unlock(&zip_algs_lock); } diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index cd7ecb2180..403b074688 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -66,6 +66,7 @@ #define HZIP_SQE_SIZE 128 #define HZIP_PF_DEF_Q_NUM 64 #define HZIP_PF_DEF_Q_BASE 0 +#define HZIP_CTX_Q_NUM_DEF 2 #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000 #define HZIP_SOFT_CTRL_CNT_CLR_CE_BIT BIT(0) @@ -238,8 +239,8 @@ static struct hisi_qm_cap_info zip_basic_cap_info[] = { {ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3}, {ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C}, {ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3}, - {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0xF, 0xF, 0xF}, - {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0xFF}, + {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x0, 0x30}, + {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0x3F}, {ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5}, {ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5}, {ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A}, @@ -1310,10 +1311,11 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (ret) pci_err(pdev, "failed to init debugfs (%d)!\n", ret); - ret = hisi_qm_alg_register(qm, &zip_devices); + hisi_qm_add_list(qm, &zip_devices); + ret = hisi_qm_alg_register(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF); if (ret < 0) { pci_err(pdev, "failed to register driver to crypto!\n"); - goto err_qm_stop; + goto err_qm_del_list; } if (qm->uacce) { @@ -1335,9 +1337,10 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_qm_alg_unregister: - hisi_qm_alg_unregister(qm, &zip_devices); + hisi_qm_alg_unregister(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF); -err_qm_stop: +err_qm_del_list: + hisi_qm_del_list(qm, &zip_devices); hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); @@ -1357,7 +1360,8 @@ static void hisi_zip_remove(struct pci_dev *pdev) hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &zip_devices); - hisi_qm_alg_unregister(qm, &zip_devices); + hisi_qm_alg_unregister(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF); + hisi_qm_del_list(qm, &zip_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, true); |