diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:12 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:12 +0000 |
commit | 8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch) | |
tree | 8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /drivers/crypto/marvell | |
parent | Adding debian version 6.7.12-1. (diff) | |
download | linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip |
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/crypto/marvell')
22 files changed, 945 insertions, 332 deletions
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index 5744df30c8..5fd31ba715 100644 --- a/drivers/crypto/marvell/cesa/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c @@ -488,7 +488,7 @@ static int mv_cesa_probe(struct platform_device *pdev) for (i = 0; i < caps->nengines; i++) { struct mv_cesa_engine *engine = &cesa->engines[i]; - char res_name[7]; + char res_name[16]; engine->id = i; spin_lock_init(&engine->lock); @@ -509,7 +509,7 @@ static int mv_cesa_probe(struct platform_device *pdev) * Not all platforms can gate the CESA clocks: do not complain * if the clock does not exist. */ - snprintf(res_name, sizeof(res_name), "cesa%d", i); + snprintf(res_name, sizeof(res_name), "cesa%u", i); engine->clk = devm_clk_get(dev, res_name); if (IS_ERR(engine->clk)) { engine->clk = devm_clk_get(dev, NULL); @@ -517,7 +517,7 @@ static int mv_cesa_probe(struct platform_device *pdev) engine->clk = NULL; } - snprintf(res_name, sizeof(res_name), "cesaz%d", i); + snprintf(res_name, sizeof(res_name), "cesaz%u", i); engine->zclk = devm_clk_get(dev, res_name); if (IS_ERR(engine->zclk)) engine->zclk = NULL; diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 1c2c870e88..3c5d577d8f 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -473,12 +473,6 @@ static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm, return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB); } -static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm, - const u8 *key, u32 keylen) -{ - return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB); -} - static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, u32 keylen) { @@ -1352,23 +1346,6 @@ static struct skcipher_alg otx_cpt_skciphers[] = { { .encrypt = otx_cpt_skcipher_encrypt, .decrypt = otx_cpt_skcipher_decrypt, }, { - .base.cra_name = "cfb(aes)", - .base.cra_driver_name = "cpt_cfb_aes", - .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx), - .base.cra_alignmask = 7, - .base.cra_priority = 4001, - .base.cra_module = THIS_MODULE, - - .init = otx_cpt_enc_dec_init, - .ivsize = AES_BLOCK_SIZE, - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = otx_cpt_skcipher_cfb_aes_setkey, - .encrypt = otx_cpt_skcipher_encrypt, - .decrypt = otx_cpt_skcipher_decrypt, -}, { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cpt_cbc_des3_ede", .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c index 93d22b3289..79b4e74804 100644 --- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c +++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c @@ -14,12 +14,14 @@ static struct cpt_hw_ops otx2_hw_ops = { .send_cmd = otx2_cpt_send_cmd, .cpt_get_compcode = otx2_cpt_get_compcode, .cpt_get_uc_compcode = otx2_cpt_get_uc_compcode, + .cpt_sg_info_create = otx2_sg_info_create, }; static struct cpt_hw_ops cn10k_hw_ops = { .send_cmd = cn10k_cpt_send_cmd, .cpt_get_compcode = cn10k_cpt_get_compcode, .cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode, + .cpt_sg_info_create = otx2_sg_info_create, }; static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num, @@ -78,12 +80,9 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf) struct pci_dev *pdev = cptvf->pdev; resource_size_t offset, size; - if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) { - cptvf->lfs.ops = &otx2_hw_ops; + if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) return 0; - } - cptvf->lfs.ops = &cn10k_hw_ops; offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM); size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM); /* Map VF LMILINE region */ @@ -96,3 +95,82 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf) return 0; } EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT); + +void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev, + struct cn10k_cpt_errata_ctx *er_ctx) +{ + u64 cptr_dma; + + if (!is_dev_cn10ka_ax(pdev)) + return; + + cptr_dma = er_ctx->cptr_dma & ~(BIT_ULL(60)); + cn10k_cpt_ctx_flush(pdev, cptr_dma, true); + dma_unmap_single(&pdev->dev, cptr_dma, CN10K_CPT_HW_CTX_SIZE, + DMA_BIDIRECTIONAL); + kfree(er_ctx->hw_ctx); +} +EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, CRYPTO_DEV_OCTEONTX2_CPT); + +void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz) +{ + hctx->w0.aop_valid = 1; + hctx->w0.ctx_hdr_sz = 0; + hctx->w0.ctx_sz = ctx_sz; + hctx->w0.ctx_push_sz = 1; +} +EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, CRYPTO_DEV_OCTEONTX2_CPT); + +int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev, + struct cn10k_cpt_errata_ctx *er_ctx) +{ + union cn10k_cpt_hw_ctx *hctx; + u64 cptr_dma; + + er_ctx->cptr_dma = 0; + er_ctx->hw_ctx = NULL; + + if (!is_dev_cn10ka_ax(pdev)) + return 0; + + hctx = kmalloc(CN10K_CPT_HW_CTX_SIZE, GFP_KERNEL); + if (unlikely(!hctx)) + return -ENOMEM; + cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE, + DMA_BIDIRECTIONAL); + + cn10k_cpt_hw_ctx_set(hctx, 1); + er_ctx->hw_ctx = hctx; + er_ctx->cptr_dma = cptr_dma | BIT_ULL(60); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, CRYPTO_DEV_OCTEONTX2_CPT); + +void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval) +{ + struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev); + struct otx2_cptlfs_info *lfs = &cptvf->lfs; + u64 reg; + + reg = (uintptr_t)cptr >> 7; + if (inval) + reg = reg | BIT_ULL(46); + + otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot, + OTX2_CPT_LF_CTX_FLUSH, reg); + /* Make sure that the FLUSH operation is complete */ + wmb(); + otx2_cpt_read64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot, + OTX2_CPT_LF_CTX_ERR); +} +EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, CRYPTO_DEV_OCTEONTX2_CPT); + +void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf) +{ + if (test_bit(CN10K_LMTST, &cptvf->cap_flag)) + cptvf->lfs.ops = &cn10k_hw_ops; + else + cptvf->lfs.ops = &otx2_hw_ops; +} +EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, CRYPTO_DEV_OCTEONTX2_CPT); diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h index aaefc7e38e..92be3ecf57 100644 --- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h +++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h @@ -8,6 +8,26 @@ #include "otx2_cptpf.h" #include "otx2_cptvf.h" +#define CN10K_CPT_HW_CTX_SIZE 256 + +union cn10k_cpt_hw_ctx { + u64 u; + struct { + u64 reserved_0_47:48; + u64 ctx_push_sz:7; + u64 reserved_55:1; + u64 ctx_hdr_sz:2; + u64 aop_valid:1; + u64 reserved_59:1; + u64 ctx_sz:4; + } w0; +}; + +struct cn10k_cpt_errata_ctx { + union cn10k_cpt_hw_ctx *hw_ctx; + u64 cptr_dma; +}; + static inline u8 cn10k_cpt_get_compcode(union otx2_cpt_res_s *result) { return ((struct cn10k_cpt_res_s *)result)->compcode; @@ -30,5 +50,12 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result) int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf); int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf); +void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval); +int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev, + struct cn10k_cpt_errata_ctx *er_ctx); +void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev, + struct cn10k_cpt_errata_ctx *er_ctx); +void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz); +void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf); #endif /* __CN10K_CPTLF_H */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h index 46b778bbbe..c5b7c57574 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h @@ -56,7 +56,11 @@ struct otx2_cpt_rx_inline_lf_cfg { u16 param2; u16 opcode; u32 credit; + u32 credit_th; + u16 bpid; u32 reserved; + u8 ctx_ilen_valid : 1; + u8 ctx_ilen : 7; }; /* @@ -102,7 +106,10 @@ union otx2_cpt_eng_caps { u64 kasumi:1; u64 des:1; u64 crc:1; - u64 reserved_14_63:50; + u64 mmul:1; + u64 reserved_15_33:19; + u64 pdcp_chain:1; + u64 reserved_35_63:29; }; }; @@ -145,6 +152,35 @@ static inline bool is_dev_otx2(struct pci_dev *pdev) return false; } +static inline bool is_dev_cn10ka(struct pci_dev *pdev) +{ + return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A; +} + +static inline bool is_dev_cn10ka_ax(struct pci_dev *pdev) +{ + if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && + ((pdev->revision & 0xFF) == 4 || (pdev->revision & 0xFF) == 0x50 || + (pdev->revision & 0xff) == 0x51)) + return true; + + return false; +} + +static inline bool is_dev_cn10kb(struct pci_dev *pdev) +{ + return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_B; +} + +static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev) +{ + if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A && + (pdev->revision & 0xFF) == 0x54) + return true; + + return false; +} + static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev, unsigned long *cap_flag) { @@ -154,6 +190,21 @@ static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev, } } +static inline bool cpt_is_errata_38550_exists(struct pci_dev *pdev) +{ + if (is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev)) + return true; + + return false; +} + +static inline bool cpt_feature_sgv2(struct pci_dev *pdev) +{ + if (!is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev)) + return true; + + return false; +} int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev); int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev); @@ -171,5 +222,6 @@ int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs); int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs); int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs); int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox); +int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot); #endif /* __OTX2_CPT_COMMON_H */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c index a2aba0b0d6..d2b8d26db9 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c @@ -24,10 +24,45 @@ static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id, static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { + ctx->val.vstr[0] = '\0'; + + return 0; +} + +static int otx2_cpt_dl_t106_mode_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl); struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf; + struct pci_dev *pdev = cptpf->pdev; + u64 reg_val = 0; + + otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, ®_val, + BLKADDR_CPT0); + ctx->val.vu8 = (reg_val >> 18) & 0x1; + + return 0; +} - otx2_cpt_print_uc_dbg_info(cptpf); +static int otx2_cpt_dl_t106_mode_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl); + struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf; + struct pci_dev *pdev = cptpf->pdev; + u64 reg_val = 0; + + if (cptpf->enabled_vfs != 0 || cptpf->eng_grps.is_grps_created) + return -EPERM; + + if (cpt_feature_sgv2(pdev)) { + otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, + ®_val, BLKADDR_CPT0); + reg_val &= ~(0x1ULL << 18); + reg_val |= ((u64)ctx->val.vu8 & 0x1) << 18; + return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, + CPT_AF_CTL, reg_val, BLKADDR_CPT0); + } return 0; } @@ -36,6 +71,7 @@ enum otx2_cpt_dl_param_id { OTX2_CPT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, OTX2_CPT_DEVLINK_PARAM_ID_EGRP_CREATE, OTX2_CPT_DEVLINK_PARAM_ID_EGRP_DELETE, + OTX2_CPT_DEVLINK_PARAM_ID_T106_MODE, }; static const struct devlink_param otx2_cpt_dl_params[] = { @@ -49,6 +85,11 @@ static const struct devlink_param otx2_cpt_dl_params[] = { BIT(DEVLINK_PARAM_CMODE_RUNTIME), otx2_cpt_dl_uc_info, otx2_cpt_dl_egrp_delete, NULL), + DEVLINK_PARAM_DRIVER(OTX2_CPT_DEVLINK_PARAM_ID_T106_MODE, + "t106_mode", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + otx2_cpt_dl_t106_mode_get, otx2_cpt_dl_t106_mode_set, + NULL), }; static int otx2_cpt_dl_info_firmware_version_put(struct devlink_info_req *req, @@ -120,7 +161,6 @@ int otx2_cpt_register_dl(struct otx2_cptpf_dev *cptpf) devlink_free(dl); return ret; } - devlink_register(dl); return 0; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h index 6f947978e4..7e746a4def 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h @@ -13,6 +13,9 @@ #define CN10K_CPT_PCI_PF_DEVICE_ID 0xA0F2 #define CN10K_CPT_PCI_VF_DEVICE_ID 0xA0F3 +#define CPT_PCI_SUBSYS_DEVID_CN10K_A 0xB900 +#define CPT_PCI_SUBSYS_DEVID_CN10K_B 0xBD00 + /* Mailbox interrupts offset */ #define OTX2_CPT_PF_MBOX_INT 6 #define OTX2_CPT_PF_INT_VEC_E_MBOXX(x, a) ((x) + (a)) @@ -99,6 +102,9 @@ #define OTX2_CPT_LF_Q_INST_PTR (0x110) #define OTX2_CPT_LF_Q_GRP_PTR (0x120) #define OTX2_CPT_LF_NQX(a) (0x400 | (a) << 3) +#define OTX2_CPT_LF_CTX_CTL (0x500) +#define OTX2_CPT_LF_CTX_FLUSH (0x510) +#define OTX2_CPT_LF_CTX_ERR (0x520) #define OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT 20 /* LMT LF registers */ #define OTX2_CPT_LMT_LFBASE BIT_ULL(OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT) @@ -467,7 +473,8 @@ union otx2_cptx_af_lf_ctrl { u64 cont_err:1; u64 reserved_11_15:5; u64 nixtx_en:1; - u64 reserved_17_47:31; + u64 ctx_ilen:3; + u64 reserved_17_47:28; u64 grp:8; u64 reserved_56_63:8; } s; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c index 273ee5352a..5be0103c1f 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c @@ -229,3 +229,29 @@ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox) return otx2_mbox_check_rsp_msgs(mbox, 0); } EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT); + +int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot) +{ + struct otx2_mbox *mbox = lfs->mbox; + struct pci_dev *pdev = lfs->pdev; + struct cpt_lf_rst_req *req; + int ret; + + req = (struct cpt_lf_rst_req *)otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req), + sizeof(struct msg_rsp)); + if (!req) { + dev_err(&pdev->dev, "RVU MBOX failed to get message.\n"); + return -EFAULT; + } + + req->hdr.id = MBOX_MSG_CPT_LF_RESET; + req->hdr.sig = OTX2_MBOX_REQ_SIG; + req->hdr.pcifunc = 0; + req->slot = slot; + ret = otx2_cpt_send_mbox_msg(mbox, pdev); + if (ret) + return ret; + + return ret; +} +EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, CRYPTO_DEV_OCTEONTX2_CPT); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h index dbb1ee746f..e27e849b01 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h @@ -27,6 +27,13 @@ #define OTX2_CPT_MAX_REQ_SIZE 65535 +#define SG_COMPS_MAX 4 +#define SGV2_COMPS_MAX 3 + +#define SG_COMP_3 3 +#define SG_COMP_2 2 +#define SG_COMP_1 1 + union otx2_cpt_opcode { u16 flags; struct { @@ -40,6 +47,8 @@ struct otx2_cptvf_request { u32 param2; u16 dlen; union otx2_cpt_opcode opcode; + dma_addr_t cptr_dma; + void *cptr; }; /* @@ -143,6 +152,8 @@ struct otx2_cpt_inst_info { unsigned long time_in; u32 dlen; u32 dma_len; + u64 gthr_sz; + u64 sctr_sz; u8 extra_time; }; @@ -157,6 +168,16 @@ struct otx2_cpt_sglist_component { __be64 ptr3; }; +struct cn10kb_cpt_sglist_component { + u16 len0; + u16 len1; + u16 len2; + u16 valid_segs; + u64 ptr0; + u64 ptr1; + u64 ptr2; +}; + static inline void otx2_cpt_info_destroy(struct pci_dev *pdev, struct otx2_cpt_inst_info *info) { @@ -188,6 +209,283 @@ static inline void otx2_cpt_info_destroy(struct pci_dev *pdev, kfree(info); } +static inline int setup_sgio_components(struct pci_dev *pdev, + struct otx2_cpt_buf_ptr *list, + int buf_count, u8 *buffer) +{ + struct otx2_cpt_sglist_component *sg_ptr; + int components; + int i, j; + + if (unlikely(!list)) { + dev_err(&pdev->dev, "Input list pointer is NULL\n"); + return -EINVAL; + } + + for (i = 0; i < buf_count; i++) { + if (unlikely(!list[i].vptr)) + continue; + list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr, + list[i].size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) { + dev_err(&pdev->dev, "Dma mapping failed\n"); + goto sg_cleanup; + } + } + components = buf_count / SG_COMPS_MAX; + sg_ptr = (struct otx2_cpt_sglist_component *)buffer; + for (i = 0; i < components; i++) { + sg_ptr->len0 = cpu_to_be16(list[i * SG_COMPS_MAX + 0].size); + sg_ptr->len1 = cpu_to_be16(list[i * SG_COMPS_MAX + 1].size); + sg_ptr->len2 = cpu_to_be16(list[i * SG_COMPS_MAX + 2].size); + sg_ptr->len3 = cpu_to_be16(list[i * SG_COMPS_MAX + 3].size); + sg_ptr->ptr0 = cpu_to_be64(list[i * SG_COMPS_MAX + 0].dma_addr); + sg_ptr->ptr1 = cpu_to_be64(list[i * SG_COMPS_MAX + 1].dma_addr); + sg_ptr->ptr2 = cpu_to_be64(list[i * SG_COMPS_MAX + 2].dma_addr); + sg_ptr->ptr3 = cpu_to_be64(list[i * SG_COMPS_MAX + 3].dma_addr); + sg_ptr++; + } + components = buf_count % SG_COMPS_MAX; + + switch (components) { + case SG_COMP_3: + sg_ptr->len2 = cpu_to_be16(list[i * SG_COMPS_MAX + 2].size); + sg_ptr->ptr2 = cpu_to_be64(list[i * SG_COMPS_MAX + 2].dma_addr); + fallthrough; + case SG_COMP_2: + sg_ptr->len1 = cpu_to_be16(list[i * SG_COMPS_MAX + 1].size); + sg_ptr->ptr1 = cpu_to_be64(list[i * SG_COMPS_MAX + 1].dma_addr); + fallthrough; + case SG_COMP_1: + sg_ptr->len0 = cpu_to_be16(list[i * SG_COMPS_MAX + 0].size); + sg_ptr->ptr0 = cpu_to_be64(list[i * SG_COMPS_MAX + 0].dma_addr); + break; + default: + break; + } + return 0; + +sg_cleanup: + for (j = 0; j < i; j++) { + if (list[j].dma_addr) { + dma_unmap_single(&pdev->dev, list[j].dma_addr, + list[j].size, DMA_BIDIRECTIONAL); + } + + list[j].dma_addr = 0; + } + return -EIO; +} + +static inline int sgv2io_components_setup(struct pci_dev *pdev, + struct otx2_cpt_buf_ptr *list, + int buf_count, u8 *buffer) +{ + struct cn10kb_cpt_sglist_component *sg_ptr; + int components; + int i, j; + + if (unlikely(!list)) { + dev_err(&pdev->dev, "Input list pointer is NULL\n"); + return -EFAULT; + } + + for (i = 0; i < buf_count; i++) { + if (unlikely(!list[i].vptr)) + continue; + list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr, + list[i].size, + DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) { + dev_err(&pdev->dev, "Dma mapping failed\n"); + goto sg_cleanup; + } + } + components = buf_count / SGV2_COMPS_MAX; + sg_ptr = (struct cn10kb_cpt_sglist_component *)buffer; + for (i = 0; i < components; i++) { + sg_ptr->len0 = list[i * SGV2_COMPS_MAX + 0].size; + sg_ptr->len1 = list[i * SGV2_COMPS_MAX + 1].size; + sg_ptr->len2 = list[i * SGV2_COMPS_MAX + 2].size; + sg_ptr->ptr0 = list[i * SGV2_COMPS_MAX + 0].dma_addr; + sg_ptr->ptr1 = list[i * SGV2_COMPS_MAX + 1].dma_addr; + sg_ptr->ptr2 = list[i * SGV2_COMPS_MAX + 2].dma_addr; + sg_ptr->valid_segs = SGV2_COMPS_MAX; + sg_ptr++; + } + components = buf_count % SGV2_COMPS_MAX; + + sg_ptr->valid_segs = components; + switch (components) { + case SG_COMP_2: + sg_ptr->len1 = list[i * SGV2_COMPS_MAX + 1].size; + sg_ptr->ptr1 = list[i * SGV2_COMPS_MAX + 1].dma_addr; + fallthrough; + case SG_COMP_1: + sg_ptr->len0 = list[i * SGV2_COMPS_MAX + 0].size; + sg_ptr->ptr0 = list[i * SGV2_COMPS_MAX + 0].dma_addr; + break; + default: + break; + } + return 0; + +sg_cleanup: + for (j = 0; j < i; j++) { + if (list[j].dma_addr) { + dma_unmap_single(&pdev->dev, list[j].dma_addr, + list[j].size, DMA_BIDIRECTIONAL); + } + + list[j].dma_addr = 0; + } + return -EIO; +} + +static inline struct otx2_cpt_inst_info * +cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req, + gfp_t gfp) +{ + u32 dlen = 0, g_len, sg_len, info_len; + int align = OTX2_CPT_DMA_MINALIGN; + struct otx2_cpt_inst_info *info; + u16 g_sz_bytes, s_sz_bytes; + u32 total_mem_len; + int i; + + g_sz_bytes = ((req->in_cnt + 2) / 3) * + sizeof(struct cn10kb_cpt_sglist_component); + s_sz_bytes = ((req->out_cnt + 2) / 3) * + sizeof(struct cn10kb_cpt_sglist_component); + + g_len = ALIGN(g_sz_bytes, align); + sg_len = ALIGN(g_len + s_sz_bytes, align); + info_len = ALIGN(sizeof(*info), align); + total_mem_len = sg_len + info_len + sizeof(union otx2_cpt_res_s); + + info = kzalloc(total_mem_len, gfp); + if (unlikely(!info)) + return NULL; + + for (i = 0; i < req->in_cnt; i++) + dlen += req->in[i].size; + + info->dlen = dlen; + info->in_buffer = (u8 *)info + info_len; + info->gthr_sz = req->in_cnt; + info->sctr_sz = req->out_cnt; + + /* Setup gather (input) components */ + if (sgv2io_components_setup(pdev, req->in, req->in_cnt, + info->in_buffer)) { + dev_err(&pdev->dev, "Failed to setup gather list\n"); + goto destroy_info; + } + + if (sgv2io_components_setup(pdev, req->out, req->out_cnt, + &info->in_buffer[g_len])) { + dev_err(&pdev->dev, "Failed to setup scatter list\n"); + goto destroy_info; + } + + info->dma_len = total_mem_len - info_len; + info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer, + info->dma_len, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) { + dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n"); + goto destroy_info; + } + info->rptr_baddr = info->dptr_baddr + g_len; + /* + * Get buffer for union otx2_cpt_res_s response + * structure and its physical address + */ + info->completion_addr = info->in_buffer + sg_len; + info->comp_baddr = info->dptr_baddr + sg_len; + + return info; + +destroy_info: + otx2_cpt_info_destroy(pdev, info); + return NULL; +} + +/* SG list header size in bytes */ +#define SG_LIST_HDR_SIZE 8 +static inline struct otx2_cpt_inst_info * +otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req, + gfp_t gfp) +{ + int align = OTX2_CPT_DMA_MINALIGN; + struct otx2_cpt_inst_info *info; + u32 dlen, align_dlen, info_len; + u16 g_sz_bytes, s_sz_bytes; + u32 total_mem_len; + + if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT || + req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) { + dev_err(&pdev->dev, "Error too many sg components\n"); + return NULL; + } + + g_sz_bytes = ((req->in_cnt + 3) / 4) * + sizeof(struct otx2_cpt_sglist_component); + s_sz_bytes = ((req->out_cnt + 3) / 4) * + sizeof(struct otx2_cpt_sglist_component); + + dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; + align_dlen = ALIGN(dlen, align); + info_len = ALIGN(sizeof(*info), align); + total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s); + + info = kzalloc(total_mem_len, gfp); + if (unlikely(!info)) + return NULL; + + info->dlen = dlen; + info->in_buffer = (u8 *)info + info_len; + + ((u16 *)info->in_buffer)[0] = req->out_cnt; + ((u16 *)info->in_buffer)[1] = req->in_cnt; + ((u16 *)info->in_buffer)[2] = 0; + ((u16 *)info->in_buffer)[3] = 0; + cpu_to_be64s((u64 *)info->in_buffer); + + /* Setup gather (input) components */ + if (setup_sgio_components(pdev, req->in, req->in_cnt, + &info->in_buffer[8])) { + dev_err(&pdev->dev, "Failed to setup gather list\n"); + goto destroy_info; + } + + if (setup_sgio_components(pdev, req->out, req->out_cnt, + &info->in_buffer[8 + g_sz_bytes])) { + dev_err(&pdev->dev, "Failed to setup scatter list\n"); + goto destroy_info; + } + + info->dma_len = total_mem_len - info_len; + info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer, + info->dma_len, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) { + dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n"); + goto destroy_info; + } + /* + * Get buffer for union otx2_cpt_res_s response + * structure and its physical address + */ + info->completion_addr = info->in_buffer + align_dlen; + info->comp_baddr = info->dptr_baddr + align_dlen; + + return info; + +destroy_info: + otx2_cpt_info_destroy(pdev, info); + return NULL; +} + struct otx2_cptlf_wqe; int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req, int cpu_num); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c index e4bd3f030c..b52728e3c0 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c @@ -106,6 +106,32 @@ static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs, return ret; } +static int cptlf_set_ctx_ilen(struct otx2_cptlfs_info *lfs, int ctx_ilen) +{ + union otx2_cptx_af_lf_ctrl lf_ctrl; + struct otx2_cptlf_info *lf; + int slot, ret = 0; + + for (slot = 0; slot < lfs->lfs_num; slot++) { + lf = &lfs->lf[slot]; + + ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev, + CPT_AF_LFX_CTL(lf->slot), + &lf_ctrl.u, lfs->blkaddr); + if (ret) + return ret; + + lf_ctrl.s.ctx_ilen = ctx_ilen; + + ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev, + CPT_AF_LFX_CTL(lf->slot), + lf_ctrl.u, lfs->blkaddr); + if (ret) + return ret; + } + return ret; +} + static void cptlf_hw_init(struct otx2_cptlfs_info *lfs) { /* Disable instruction queues */ @@ -151,26 +177,14 @@ static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable) irq_misc.u); } -static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs) +static void cptlf_set_done_intrs(struct otx2_cptlfs_info *lfs, u8 enable) { + u64 reg = enable ? OTX2_CPT_LF_DONE_INT_ENA_W1S : + OTX2_CPT_LF_DONE_INT_ENA_W1C; int slot; - /* Enable done interrupts */ for (slot = 0; slot < lfs->lfs_num; slot++) - otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, - OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1); - /* Enable Misc interrupts */ - cptlf_set_misc_intrs(lfs, true); -} - -static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs) -{ - int slot; - - for (slot = 0; slot < lfs->lfs_num; slot++) - otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, - OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1); - cptlf_set_misc_intrs(lfs, false); + otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg, 0x1); } static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf) @@ -257,24 +271,44 @@ static irqreturn_t cptlf_done_intr_handler(int irq, void *arg) return IRQ_HANDLED; } -void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs) +void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs) { - int i, offs, vector; + int i, irq_offs, vector; + irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC; for (i = 0; i < lfs->lfs_num; i++) { - for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) { - if (!lfs->lf[i].is_irq_reg[offs]) - continue; + if (!lfs->lf[i].is_irq_reg[irq_offs]) + continue; - vector = pci_irq_vector(lfs->pdev, - lfs->lf[i].msix_offset + offs); - free_irq(vector, &lfs->lf[i]); - lfs->lf[i].is_irq_reg[offs] = false; - } + vector = pci_irq_vector(lfs->pdev, + lfs->lf[i].msix_offset + irq_offs); + free_irq(vector, &lfs->lf[i]); + lfs->lf[i].is_irq_reg[irq_offs] = false; } - cptlf_disable_intrs(lfs); + + cptlf_set_misc_intrs(lfs, false); } -EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_interrupts, +EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_misc_interrupts, + CRYPTO_DEV_OCTEONTX2_CPT); + +void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs) +{ + int i, irq_offs, vector; + + irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE; + for (i = 0; i < lfs->lfs_num; i++) { + if (!lfs->lf[i].is_irq_reg[irq_offs]) + continue; + + vector = pci_irq_vector(lfs->pdev, + lfs->lf[i].msix_offset + irq_offs); + free_irq(vector, &lfs->lf[i]); + lfs->lf[i].is_irq_reg[irq_offs] = false; + } + + cptlf_set_done_intrs(lfs, false); +} +EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_done_interrupts, CRYPTO_DEV_OCTEONTX2_CPT); static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs, @@ -296,34 +330,53 @@ static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs, return ret; } -int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs) +int otx2_cptlf_register_misc_interrupts(struct otx2_cptlfs_info *lfs) { + bool is_cpt1 = (lfs->blkaddr == BLKADDR_CPT1); int irq_offs, ret, i; + irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC; for (i = 0; i < lfs->lfs_num; i++) { - irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC; - snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i); + snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPT%dLF Misc%d", + is_cpt1, i); ret = cptlf_do_register_interrrupts(lfs, i, irq_offs, cptlf_misc_intr_handler); if (ret) goto free_irq; + } + cptlf_set_misc_intrs(lfs, true); + return 0; - irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE; - snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d", - i); +free_irq: + otx2_cptlf_unregister_misc_interrupts(lfs); + return ret; +} +EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_misc_interrupts, + CRYPTO_DEV_OCTEONTX2_CPT); + +int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs) +{ + bool is_cpt1 = (lfs->blkaddr == BLKADDR_CPT1); + int irq_offs, ret, i; + + irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE; + for (i = 0; i < lfs->lfs_num; i++) { + snprintf(lfs->lf[i].irq_name[irq_offs], 32, + "OTX2_CPT%dLF Done%d", is_cpt1, i); ret = cptlf_do_register_interrrupts(lfs, i, irq_offs, cptlf_done_intr_handler); if (ret) goto free_irq; } - cptlf_enable_intrs(lfs); + cptlf_set_done_intrs(lfs, true); return 0; free_irq: - otx2_cptlf_unregister_interrupts(lfs); + otx2_cptlf_unregister_done_interrupts(lfs); return ret; } -EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_interrupts, CRYPTO_DEV_OCTEONTX2_CPT); +EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_done_interrupts, + CRYPTO_DEV_OCTEONTX2_CPT); void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs) { @@ -416,6 +469,12 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri, if (ret) goto free_iq; + if (lfs->ctx_ilen_ovrd) { + ret = cptlf_set_ctx_ilen(lfs, lfs->ctx_ilen); + if (ret) + goto free_iq; + } + return 0; free_iq: diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h index 5302fe3d0e..bd8604be29 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h @@ -5,6 +5,7 @@ #define __OTX2_CPTLF_H #include <linux/soc/marvell/octeontx2/asm.h> +#include <linux/bitfield.h> #include <mbox.h> #include <rvu.h> #include "otx2_cpt_common.h" @@ -99,6 +100,9 @@ struct cpt_hw_ops { struct otx2_cptlf_info *lf); u8 (*cpt_get_compcode)(union otx2_cpt_res_s *result); u8 (*cpt_get_uc_compcode)(union otx2_cpt_res_s *result); + struct otx2_cpt_inst_info * + (*cpt_sg_info_create)(struct pci_dev *pdev, struct otx2_cpt_req_info *req, + gfp_t gfp); }; struct otx2_cptlfs_info { @@ -116,6 +120,9 @@ struct otx2_cptlfs_info { u8 kvf_limits; /* Kernel crypto limits */ atomic_t state; /* LF's state. started/reset */ int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */ + int global_slot; /* Global slot across the blocks */ + u8 ctx_ilen; + u8 ctx_ilen_ovrd; }; static inline void otx2_cpt_free_instruction_queues( @@ -203,48 +210,71 @@ static inline void otx2_cptlf_set_iqueues_size(struct otx2_cptlfs_info *lfs) otx2_cptlf_do_set_iqueue_size(&lfs->lf[slot]); } +#define INFLIGHT GENMASK_ULL(8, 0) +#define GRB_CNT GENMASK_ULL(39, 32) +#define GWB_CNT GENMASK_ULL(47, 40) +#define XQ_XOR GENMASK_ULL(63, 63) +#define DQPTR GENMASK_ULL(19, 0) +#define NQPTR GENMASK_ULL(51, 32) + static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf) { - union otx2_cptx_lf_ctl lf_ctl = { .u = 0x0 }; - union otx2_cptx_lf_inprog lf_inprog; + void __iomem *reg_base = lf->lfs->reg_base; + struct pci_dev *pdev = lf->lfs->pdev; u8 blkaddr = lf->lfs->blkaddr; - int timeout = 20; + int timeout = 1000000; + u64 inprog, inst_ptr; + u64 slot = lf->slot; + u64 qsize, pending; + int i = 0; /* Disable instructions enqueuing */ - otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, - OTX2_CPT_LF_CTL, lf_ctl.u); + otx2_cpt_write64(reg_base, blkaddr, slot, OTX2_CPT_LF_CTL, 0x0); + + inprog = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG); + inprog |= BIT_ULL(16); + otx2_cpt_write64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG, inprog); - /* Wait for instruction queue to become empty */ + qsize = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_Q_SIZE) & 0x7FFF; + do { + inst_ptr = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_Q_INST_PTR); + pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) + + FIELD_GET(NQPTR, inst_ptr) - FIELD_GET(DQPTR, inst_ptr); + udelay(1); + timeout--; + } while ((pending != 0) && (timeout != 0)); + + if (timeout == 0) + dev_warn(&pdev->dev, "TIMEOUT: CPT poll on pending instructions\n"); + + timeout = 1000000; + /* Wait for CPT queue to become execution-quiescent */ do { - lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, - lf->slot, OTX2_CPT_LF_INPROG); - if (!lf_inprog.s.inflight) - break; - - usleep_range(10000, 20000); - if (timeout-- < 0) { - dev_err(&lf->lfs->pdev->dev, - "Error LF %d is still busy.\n", lf->slot); - break; + inprog = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG); + + if ((FIELD_GET(INFLIGHT, inprog) == 0) && + (FIELD_GET(GRB_CNT, inprog) == 0)) { + i++; + } else { + i = 0; + timeout--; } + } while ((timeout != 0) && (i < 10)); - } while (1); - - /* - * Disable executions in the LF's queue, - * the queue should be empty at this point - */ - lf_inprog.s.eena = 0x0; - otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, - OTX2_CPT_LF_INPROG, lf_inprog.u); + if (timeout == 0) + dev_warn(&pdev->dev, "TIMEOUT: CPT poll on inflight count\n"); + /* Wait for 2 us to flush all queue writes to memory */ + udelay(2); } static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs) { int slot; - for (slot = 0; slot < lfs->lfs_num; slot++) + for (slot = 0; slot < lfs->lfs_num; slot++) { otx2_cptlf_do_disable_iqueue(&lfs->lf[slot]); + otx2_cpt_lf_reset_msg(lfs, lfs->global_slot + slot); + } } static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf, @@ -282,6 +312,19 @@ static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf, OTX2_CPT_LF_INPROG, lf_inprog.u); } +static inline void otx2_cptlf_set_ctx_flr_flush(struct otx2_cptlf_info *lf) +{ + u8 blkaddr = lf->lfs->blkaddr; + u64 val; + + val = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot, + OTX2_CPT_LF_CTX_CTL); + val |= BIT_ULL(0); + + otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot, + OTX2_CPT_LF_CTX_CTL, val); +} + static inline void otx2_cptlf_enable_iqueue_exec(struct otx2_cptlf_info *lf) { otx2_cptlf_set_iqueue_exec(lf, true); @@ -297,6 +340,10 @@ static inline void otx2_cptlf_enable_iqueues(struct otx2_cptlfs_info *lfs) int slot; for (slot = 0; slot < lfs->lfs_num; slot++) { + /* Enable flush on FLR for Errata */ + if (is_dev_cn10kb(lfs->pdev)) + otx2_cptlf_set_ctx_flr_flush(&lfs->lf[slot]); + otx2_cptlf_enable_iqueue_exec(&lfs->lf[slot]); otx2_cptlf_enable_iqueue_enq(&lfs->lf[slot]); } @@ -382,8 +429,10 @@ static inline void otx2_cptlf_set_dev_info(struct otx2_cptlfs_info *lfs, int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri, int lfs_num); void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs); -int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs); -void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs); +int otx2_cptlf_register_misc_interrupts(struct otx2_cptlfs_info *lfs); +int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs); +void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs); +void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs); void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs); int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h index a209ec5af3..e5859a1e1c 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h @@ -71,4 +71,8 @@ void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work); irqreturn_t otx2_cptpf_vfpf_mbox_intr(int irq, void *arg); void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work); +int otx2_inline_cptlf_setup(struct otx2_cptpf_dev *cptpf, + struct otx2_cptlfs_info *lfs, u8 egrp, int num_lfs); +void otx2_inline_cptlf_cleanup(struct otx2_cptlfs_info *lfs); + #endif /* __OTX2_CPTPF_H */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c index e34223daa3..400e36d990 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c @@ -14,6 +14,8 @@ #define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver" #define CPT_UC_RID_CN9K_B0 1 +#define CPT_UC_RID_CN10K_A 4 +#define CPT_UC_RID_CN10K_B 5 static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf, int num_vfs) @@ -587,43 +589,22 @@ static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf) return 0; } -static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr) +static void cptpf_get_rid(struct pci_dev *pdev, struct otx2_cptpf_dev *cptpf) { - int timeout = 10, ret; - u64 reg = 0; + struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps; + u64 reg_val = 0x0; - ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_BLK_RST, 0x1, blkaddr); - if (ret) - return ret; - - do { - ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, - CPT_AF_BLK_RST, ®, blkaddr); - if (ret) - return ret; - - if (!((reg >> 63) & 0x1)) - break; - - usleep_range(10000, 20000); - if (timeout-- < 0) - return -EBUSY; - } while (1); - - return ret; -} - -static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf) -{ - int ret = 0; - - if (cptpf->has_cpt1) { - ret = cptx_device_reset(cptpf, BLKADDR_CPT1); - if (ret) - return ret; + if (is_dev_otx2(pdev)) { + eng_grps->rid = pdev->revision; + return; } - return cptx_device_reset(cptpf, BLKADDR_CPT0); + otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, ®_val, + BLKADDR_CPT0); + if ((cpt_feature_sgv2(pdev) && (reg_val & BIT_ULL(18))) || + is_dev_cn10ka_ax(pdev)) + eng_grps->rid = CPT_UC_RID_CN10K_A; + else if (cpt_feature_sgv2(pdev)) + eng_grps->rid = CPT_UC_RID_CN10K_B; } static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf) @@ -643,10 +624,6 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf) /* check if 'implemented' bit is set for block BLKADDR_CPT1 */ cptpf_check_block_implemented(cptpf); - /* Reset the CPT PF device */ - ret = cptpf_device_reset(cptpf); - if (ret) - return ret; /* Get number of SE, IE and AE engines */ ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev, @@ -701,6 +678,7 @@ static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs) if (ret) goto destroy_flr; + cptpf_get_rid(pdev, cptpf); /* Get CPT HW capabilities using LOAD_FVC operation. */ ret = otx2_cpt_discover_eng_capabilities(cptpf); if (ret) @@ -744,7 +722,7 @@ static int otx2_cptpf_probe(struct pci_dev *pdev, { struct device *dev = &pdev->dev; struct otx2_cptpf_dev *cptpf; - int err; + int err, num_vec; cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL); if (!cptpf) @@ -779,8 +757,13 @@ static int otx2_cptpf_probe(struct pci_dev *pdev, if (err) goto clear_drvdata; - err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT, - RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX); + num_vec = pci_msix_vec_count(cptpf->pdev); + if (num_vec <= 0) { + err = -EINVAL; + goto clear_drvdata; + } + + err = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSIX); if (err < 0) { dev_err(dev, "Request for %d msix vectors failed\n", RVU_PF_INT_VEC_CNT); @@ -797,6 +780,7 @@ static int otx2_cptpf_probe(struct pci_dev *pdev, goto destroy_afpf_mbox; cptpf->max_vfs = pci_sriov_get_totalvfs(pdev); + cptpf->kvf_limits = 1; err = cn10k_cptpf_lmtst_init(cptpf); if (err) @@ -844,6 +828,14 @@ static void otx2_cptpf_remove(struct pci_dev *pdev) cptpf_sriov_disable(pdev); otx2_cpt_unregister_dl(cptpf); + + /* Cleanup Inline CPT LF's if attached */ + if (cptpf->lfs.lfs_num) + otx2_inline_cptlf_cleanup(&cptpf->lfs); + + if (cptpf->cpt1_lfs.lfs_num) + otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs); + /* Delete sysfs entry created for kernel VF limits */ sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group); /* Cleanup engine groups */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c index 480b3720f1..ec1ac7e836 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c @@ -78,7 +78,7 @@ static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf, rsp->hdr.sig = OTX2_MBOX_RSP_SIG; rsp->hdr.pcifunc = req->pcifunc; rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION; - rsp->cpt_revision = cptpf->pdev->revision; + rsp->cpt_revision = cptpf->eng_grps.rid; memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps)); return 0; @@ -171,6 +171,8 @@ static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp, nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG; nix_req->hdr.sig = OTX2_MBOX_REQ_SIG; nix_req->enable = 1; + nix_req->credit_th = req->credit_th; + nix_req->bpid = req->bpid; if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS) nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1; else @@ -197,12 +199,53 @@ static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp, return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0); } +int +otx2_inline_cptlf_setup(struct otx2_cptpf_dev *cptpf, + struct otx2_cptlfs_info *lfs, u8 egrp, int num_lfs) +{ + int ret; + + ret = otx2_cptlf_init(lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO, 1); + if (ret) { + dev_err(&cptpf->pdev->dev, + "LF configuration failed for RX inline ipsec.\n"); + return ret; + } + + /* Get msix offsets for attached LFs */ + ret = otx2_cpt_msix_offset_msg(lfs); + if (ret) + goto cleanup_lf; + + /* Register for CPT LF Misc interrupts */ + ret = otx2_cptlf_register_misc_interrupts(lfs); + if (ret) + goto free_irq; + + return 0; +free_irq: + otx2_cptlf_unregister_misc_interrupts(lfs); +cleanup_lf: + otx2_cptlf_shutdown(lfs); + return ret; +} + +void +otx2_inline_cptlf_cleanup(struct otx2_cptlfs_info *lfs) +{ + /* Unregister misc interrupt */ + otx2_cptlf_unregister_misc_interrupts(lfs); + + /* Cleanup LFs */ + otx2_cptlf_shutdown(lfs); +} + static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, struct mbox_msghdr *req) { struct otx2_cpt_rx_inline_lf_cfg *cfg_req; + int num_lfs = 1, ret; u8 egrp; - int ret; cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req; if (cptpf->lfs.lfs_num) { @@ -223,11 +266,13 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base, &cptpf->afpf_mbox, BLKADDR_CPT0); - ret = otx2_cptlf_init(&cptpf->lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO, - 1); + cptpf->lfs.global_slot = 0; + cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid; + cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen; + + ret = otx2_inline_cptlf_setup(cptpf, &cptpf->lfs, egrp, num_lfs); if (ret) { - dev_err(&cptpf->pdev->dev, - "LF configuration failed for RX inline ipsec.\n"); + dev_err(&cptpf->pdev->dev, "Inline-Ipsec CPT0 LF setup failed.\n"); return ret; } @@ -236,11 +281,13 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev, cptpf->reg_base, &cptpf->afpf_mbox, BLKADDR_CPT1); - ret = otx2_cptlf_init(&cptpf->cpt1_lfs, 1 << egrp, - OTX2_CPT_QUEUE_HI_PRIO, 1); + cptpf->cpt1_lfs.global_slot = num_lfs; + cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid; + cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen; + ret = otx2_inline_cptlf_setup(cptpf, &cptpf->cpt1_lfs, egrp, + num_lfs); if (ret) { - dev_err(&cptpf->pdev->dev, - "LF configuration failed for RX inline ipsec.\n"); + dev_err(&cptpf->pdev->dev, "Inline CPT1 LF setup failed.\n"); goto lf_cleanup; } cptpf->rsrc_req_blkaddr = 0; @@ -253,9 +300,9 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, return 0; lf1_cleanup: - otx2_cptlf_shutdown(&cptpf->cpt1_lfs); + otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs); lf_cleanup: - otx2_cptlf_shutdown(&cptpf->lfs); + otx2_inline_cptlf_cleanup(&cptpf->lfs); return ret; } @@ -410,6 +457,8 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, struct otx2_cptlfs_info *lfs = &cptpf->lfs; struct device *dev = &cptpf->pdev->dev; struct cpt_rd_wr_reg_msg *rsp_rd_wr; + struct msix_offset_rsp *rsp_msix; + int i; if (msg->id >= MBOX_MSG_MAX) { dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id); @@ -428,6 +477,14 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; break; + case MBOX_MSG_MSIX_OFFSET: + rsp_msix = (struct msix_offset_rsp *) msg; + for (i = 0; i < rsp_msix->cptlfs; i++) + lfs->lf[i].msix_offset = rsp_msix->cptlf_msixoff[i]; + + for (i = 0; i < rsp_msix->cpt1_lfs; i++) + lfs->lf[i].msix_offset = rsp_msix->cpt1_lf_msixoff[i]; + break; case MBOX_MSG_CPT_RD_WR_REGISTER: rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg; if (msg->rc) { @@ -449,6 +506,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf, break; case MBOX_MSG_CPT_INLINE_IPSEC_CFG: case MBOX_MSG_NIX_INLINE_IPSEC_CFG: + case MBOX_MSG_CPT_LF_RESET: break; default: diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 1958b797a4..5c94846461 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -16,7 +16,11 @@ #define LOADFVC_MAJOR_OP 0x01 #define LOADFVC_MINOR_OP 0x08 -#define CTX_FLUSH_TIMER_CNT 0xFFFFFF +/* + * Interval to flush dirty data for next CTX entry. The interval is measured + * in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns). + */ +#define CTX_FLUSH_TIMER_CNT 0x2FAF0 struct fw_info_t { struct list_head ucodes; @@ -117,12 +121,10 @@ static char *get_ucode_type_str(int ucode_type) static int get_ucode_type(struct device *dev, struct otx2_cpt_ucode_hdr *ucode_hdr, - int *ucode_type) + int *ucode_type, u16 rid) { - struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev); char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ]; char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ]; - struct pci_dev *pdev = cptpf->pdev; int i, val = 0; u8 nn; @@ -130,7 +132,7 @@ static int get_ucode_type(struct device *dev, for (i = 0; i < strlen(tmp_ver_str); i++) tmp_ver_str[i] = tolower(tmp_ver_str[i]); - sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision); + sprintf(ver_str_prefix, "ocpt-%02d", rid); if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ)) return -EINVAL; @@ -359,7 +361,7 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, } static int load_fw(struct device *dev, struct fw_info_t *fw_info, - char *filename) + char *filename, u16 rid) { struct otx2_cpt_ucode_hdr *ucode_hdr; struct otx2_cpt_uc_info_t *uc_info; @@ -375,7 +377,7 @@ static int load_fw(struct device *dev, struct fw_info_t *fw_info, goto free_uc_info; ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data; - ret = get_ucode_type(dev, ucode_hdr, &ucode_type); + ret = get_ucode_type(dev, ucode_hdr, &ucode_type, rid); if (ret) goto release_fw; @@ -389,6 +391,7 @@ static int load_fw(struct device *dev, struct fw_info_t *fw_info, set_ucode_filename(&uc_info->ucode, filename); memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ); + uc_info->ucode.ver_str[OTX2_CPT_UCODE_VER_STR_SZ] = 0; uc_info->ucode.ver_num = ucode_hdr->ver_num; uc_info->ucode.type = ucode_type; uc_info->ucode.size = ucode_size; @@ -448,7 +451,8 @@ static void print_uc_info(struct fw_info_t *fw_info) } } -static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info) +static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info, + u16 rid) { char filename[OTX2_CPT_NAME_LENGTH]; char eng_type[8] = {0}; @@ -462,9 +466,9 @@ static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info) eng_type[i] = tolower(eng_type[i]); snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out", - pdev->revision, eng_type); + rid, eng_type); /* Request firmware for each engine type */ - ret = load_fw(&pdev->dev, fw_info, filename); + ret = load_fw(&pdev->dev, fw_info, filename, rid); if (ret) goto release_fw; } @@ -1155,7 +1159,7 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf, if (eng_grps->is_grps_created) goto unlock; - ret = cpt_ucode_load_fw(pdev, &fw_info); + ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid); if (ret) goto unlock; @@ -1230,14 +1234,16 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf, */ rnm_to_cpt_errata_fixup(&pdev->dev); + otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, ®_val, + BLKADDR_CPT0); /* * Configure engine group mask to allow context prefetching * for the groups and enable random number request, to enable * CPT to request random numbers from RNM. */ + reg_val |= OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16); otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, - OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16), - BLKADDR_CPT0); + reg_val, BLKADDR_CPT0); /* * Set interval to periodically flush dirty data for the next * CTX cache entry. Set the interval count to maximum supported @@ -1252,10 +1258,12 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf, * encounters a fault/poison, a rare case may result in * unpredictable data being delivered to a CPT engine. */ - otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, ®_val, - BLKADDR_CPT0); - otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, - reg_val | BIT_ULL(24), BLKADDR_CPT0); + if (cpt_is_errata_38550_exists(pdev)) { + otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, + ®_val, BLKADDR_CPT0); + otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, + reg_val | BIT_ULL(24), BLKADDR_CPT0); + } mutex_unlock(&eng_grps->lock); return 0; @@ -1412,7 +1420,7 @@ static int create_eng_caps_discovery_grps(struct pci_dev *pdev, int ret; mutex_lock(&eng_grps->lock); - ret = cpt_ucode_load_fw(pdev, &fw_info); + ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid); if (ret) { mutex_unlock(&eng_grps->lock); return ret; @@ -1686,13 +1694,14 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf, goto err_unlock; } INIT_LIST_HEAD(&fw_info.ucodes); - ret = load_fw(dev, &fw_info, ucode_filename[0]); + + ret = load_fw(dev, &fw_info, ucode_filename[0], eng_grps->rid); if (ret) { dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]); goto err_unlock; } if (ucode_idx > 1) { - ret = load_fw(dev, &fw_info, ucode_filename[1]); + ret = load_fw(dev, &fw_info, ucode_filename[1], eng_grps->rid); if (ret) { dev_err(dev, "Unable to load firmware %s\n", ucode_filename[1]); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h index e69320a54b..365fe8943b 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h @@ -73,7 +73,7 @@ struct otx2_cpt_ucode_hdr { }; struct otx2_cpt_ucode { - u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];/* + u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ + 1];/* * ucode version in readable * format */ @@ -150,6 +150,7 @@ struct otx2_cpt_eng_grps { int engs_num; /* total number of engines supported */ u8 eng_ref_cnt[OTX2_CPT_MAX_ENGINES];/* engines reference count */ bool is_grps_created; /* Is the engine groups are already created */ + u16 rid; }; struct otx2_cptpf_dev; int otx2_cpt_init_eng_grps(struct pci_dev *pdev, diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h index 994291e90d..11ab9af1df 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h @@ -22,6 +22,7 @@ struct otx2_cptvf_dev { int blkaddr; void *bbuf_base; unsigned long cap_flag; + u64 eng_caps[OTX2_CPT_MAX_ENG_TYPES]; }; irqreturn_t otx2_cptvf_pfvf_mbox_intr(int irq, void *arg); @@ -29,5 +30,6 @@ void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work); int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type); int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf); int otx2_cpt_mbox_bbuf_init(struct otx2_cptvf_dev *cptvf, struct pci_dev *pdev); +int otx2_cptvf_send_caps_msg(struct otx2_cptvf_dev *cptvf); #endif /* __OTX2_CPTVF_H */ diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c index e27ddd3c4e..1604fc58dc 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c @@ -17,6 +17,7 @@ #include "otx2_cptvf.h" #include "otx2_cptvf_algs.h" #include "otx2_cpt_reqmgr.h" +#include "cn10k_cpt.h" /* Size of salt in AES GCM mode */ #define AES_GCM_SALT_SIZE 4 @@ -384,6 +385,9 @@ static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc) req_info->is_trunc_hmac = false; req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev); + req_info->req.cptr = ctx->er_ctx.hw_ctx; + req_info->req.cptr_dma = ctx->er_ctx.cptr_dma; + /* * We perform an asynchronous send and once * the request is completed the driver would @@ -530,6 +534,8 @@ static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm) struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm); struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm); struct crypto_alg *alg = tfm->__crt_alg; + struct pci_dev *pdev; + int ret, cpu_num; memset(ctx, 0, sizeof(*ctx)); /* @@ -541,6 +547,15 @@ static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm) stfm, sizeof(struct otx2_cpt_req_ctx) + sizeof(struct skcipher_request)); + ret = get_se_device(&pdev, &cpu_num); + if (ret) + return ret; + + ctx->pdev = pdev; + ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx); + if (ret) + return ret; + return cpt_skcipher_fallback_init(ctx, alg); } @@ -552,6 +567,7 @@ static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm) crypto_free_skcipher(ctx->fbk_cipher); ctx->fbk_cipher = NULL; } + cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx); } static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx, @@ -576,6 +592,8 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type) struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(atfm); struct crypto_tfm *tfm = crypto_aead_tfm(atfm); struct crypto_alg *alg = tfm->__crt_alg; + struct pci_dev *pdev; + int ret, cpu_num; ctx->cipher_type = cipher_type; ctx->mac_type = mac_type; @@ -632,6 +650,15 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type) } crypto_aead_set_reqsize_dma(atfm, sizeof(struct otx2_cpt_req_ctx)); + ret = get_se_device(&pdev, &cpu_num); + if (ret) + return ret; + + ctx->pdev = pdev; + ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx); + if (ret) + return ret; + return cpt_aead_fallback_init(ctx, alg); } @@ -694,6 +721,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm) crypto_free_aead(ctx->fbk_cipher); ctx->fbk_cipher = NULL; } + cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx); } static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm, @@ -1299,6 +1327,9 @@ static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc) req_info->is_enc = enc; req_info->is_trunc_hmac = false; + req_info->req.cptr = ctx->er_ctx.hw_ctx; + req_info->req.cptr_dma = ctx->er_ctx.cptr_dma; + switch (reg_type) { case OTX2_CPT_AEAD_ENC_DEC_REQ: status = create_aead_input_list(req, enc); diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h index f04184bd17..d29f84f01c 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h @@ -9,6 +9,7 @@ #include <crypto/skcipher.h> #include <crypto/aead.h> #include "otx2_cpt_common.h" +#include "cn10k_cpt.h" #define OTX2_CPT_MAX_ENC_KEY_SIZE 32 #define OTX2_CPT_MAX_HASH_KEY_SIZE 64 @@ -123,6 +124,8 @@ struct otx2_cpt_enc_ctx { u8 key_type; u8 enc_align_len; struct crypto_skcipher *fbk_cipher; + struct pci_dev *pdev; + struct cn10k_cpt_errata_ctx er_ctx; }; union otx2_cpt_offset_ctrl { @@ -161,6 +164,8 @@ struct otx2_cpt_aead_ctx { struct crypto_shash *hashalg; struct otx2_cpt_sdesc *sdesc; struct crypto_aead *fbk_cipher; + struct cn10k_cpt_errata_ctx er_ctx; + struct pci_dev *pdev; u8 *ipad; u8 *opad; u32 enc_key_len; diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c index 215a1b17b6..527d34cc25 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c @@ -246,7 +246,8 @@ static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs) /* Unregister crypto algorithms */ otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE); /* Unregister LFs interrupts */ - otx2_cptlf_unregister_interrupts(lfs); + otx2_cptlf_unregister_misc_interrupts(lfs); + otx2_cptlf_unregister_done_interrupts(lfs); /* Cleanup LFs software side */ lf_sw_cleanup(lfs); /* Free instruction queues */ @@ -280,8 +281,7 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf) if (ret) return ret; - lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits : - num_online_cpus(); + lfs_num = cptvf->lfs.kvf_limits; otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base, &cptvf->pfvf_mbox, cptvf->blkaddr); @@ -301,7 +301,11 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf) goto cleanup_lf; /* Register LFs interrupts */ - ret = otx2_cptlf_register_interrupts(lfs); + ret = otx2_cptlf_register_misc_interrupts(lfs); + if (ret) + goto cleanup_lf_sw; + + ret = otx2_cptlf_register_done_interrupts(lfs); if (ret) goto cleanup_lf_sw; @@ -322,7 +326,8 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf) disable_irqs: otx2_cptlf_free_irqs_affinity(lfs); unregister_intr: - otx2_cptlf_unregister_interrupts(lfs); + otx2_cptlf_unregister_misc_interrupts(lfs); + otx2_cptlf_unregister_done_interrupts(lfs); cleanup_lf_sw: lf_sw_cleanup(lfs); cleanup_lf: @@ -383,6 +388,17 @@ static int otx2_cptvf_probe(struct pci_dev *pdev, goto destroy_pfvf_mbox; cptvf->blkaddr = BLKADDR_CPT0; + + cptvf_hw_ops_get(cptvf); + + ret = otx2_cptvf_send_caps_msg(cptvf); + if (ret) { + dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n"); + goto unregister_interrupts; + } + if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35)) + cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create; + /* Initialize CPT LFs */ ret = cptvf_lf_init(cptvf); if (ret) diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c index 75c403f2b1..d9fa5f6e20 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c @@ -72,6 +72,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf, struct otx2_cptlfs_info *lfs = &cptvf->lfs; struct otx2_cpt_kvf_limits_rsp *rsp_limits; struct otx2_cpt_egrp_num_rsp *rsp_grp; + struct otx2_cpt_caps_rsp *eng_caps; struct cpt_rd_wr_reg_msg *rsp_reg; struct msix_offset_rsp *rsp_msix; int i; @@ -127,6 +128,13 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf, rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg; cptvf->lfs.kvf_limits = rsp_limits->kvf_limits; break; + case MBOX_MSG_GET_CAPS: + eng_caps = (struct otx2_cpt_caps_rsp *)msg; + memcpy(cptvf->eng_caps, eng_caps->eng_caps, + sizeof(cptvf->eng_caps)); + break; + case MBOX_MSG_CPT_LF_RESET: + break; default: dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n", msg->id); @@ -205,3 +213,23 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf) return otx2_cpt_send_mbox_msg(mbox, pdev); } + +int otx2_cptvf_send_caps_msg(struct otx2_cptvf_dev *cptvf) +{ + struct otx2_mbox *mbox = &cptvf->pfvf_mbox; + struct pci_dev *pdev = cptvf->pdev; + struct mbox_msghdr *req; + + req = (struct mbox_msghdr *) + otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req), + sizeof(struct otx2_cpt_caps_rsp)); + if (!req) { + dev_err(&pdev->dev, "RVU MBOX failed to get message.\n"); + return -EFAULT; + } + req->id = MBOX_MSG_GET_CAPS; + req->sig = OTX2_MBOX_REQ_SIG; + req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0); + + return otx2_cpt_send_mbox_msg(mbox, pdev); +} diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c index 811ded72ce..5387c68f3c 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c @@ -4,9 +4,6 @@ #include "otx2_cptvf.h" #include "otx2_cpt_common.h" -/* SG list header size in bytes */ -#define SG_LIST_HDR_SIZE 8 - /* Default timeout when waiting for free pending entry in us */ #define CPT_PENTRY_TIMEOUT 1000 #define CPT_PENTRY_STEP 50 @@ -26,9 +23,9 @@ static void otx2_cpt_dump_sg_list(struct pci_dev *pdev, pr_debug("Gather list size %d\n", req->in_cnt); for (i = 0; i < req->in_cnt; i++) { - pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i, + pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%llx\n", i, req->in[i].size, req->in[i].vptr, - (void *) req->in[i].dma_addr); + req->in[i].dma_addr); pr_debug("Buffer hexdump (%d bytes)\n", req->in[i].size); print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, @@ -36,9 +33,9 @@ static void otx2_cpt_dump_sg_list(struct pci_dev *pdev, } pr_debug("Scatter list size %d\n", req->out_cnt); for (i = 0; i < req->out_cnt; i++) { - pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i, + pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%llx\n", i, req->out[i].size, req->out[i].vptr, - (void *) req->out[i].dma_addr); + req->out[i].dma_addr); pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size); print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, req->out[i].vptr, req->out[i].size, false); @@ -84,149 +81,6 @@ static inline void free_pentry(struct otx2_cpt_pending_entry *pentry) pentry->busy = false; } -static inline int setup_sgio_components(struct pci_dev *pdev, - struct otx2_cpt_buf_ptr *list, - int buf_count, u8 *buffer) -{ - struct otx2_cpt_sglist_component *sg_ptr = NULL; - int ret = 0, i, j; - int components; - - if (unlikely(!list)) { - dev_err(&pdev->dev, "Input list pointer is NULL\n"); - return -EFAULT; - } - - for (i = 0; i < buf_count; i++) { - if (unlikely(!list[i].vptr)) - continue; - list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr, - list[i].size, - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) { - dev_err(&pdev->dev, "Dma mapping failed\n"); - ret = -EIO; - goto sg_cleanup; - } - } - components = buf_count / 4; - sg_ptr = (struct otx2_cpt_sglist_component *)buffer; - for (i = 0; i < components; i++) { - sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size); - sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size); - sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size); - sg_ptr->len3 = cpu_to_be16(list[i * 4 + 3].size); - sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); - sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); - sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); - sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr); - sg_ptr++; - } - components = buf_count % 4; - - switch (components) { - case 3: - sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size); - sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); - fallthrough; - case 2: - sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size); - sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); - fallthrough; - case 1: - sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size); - sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); - break; - default: - break; - } - return ret; - -sg_cleanup: - for (j = 0; j < i; j++) { - if (list[j].dma_addr) { - dma_unmap_single(&pdev->dev, list[j].dma_addr, - list[j].size, DMA_BIDIRECTIONAL); - } - - list[j].dma_addr = 0; - } - return ret; -} - -static inline struct otx2_cpt_inst_info *info_create(struct pci_dev *pdev, - struct otx2_cpt_req_info *req, - gfp_t gfp) -{ - int align = OTX2_CPT_DMA_MINALIGN; - struct otx2_cpt_inst_info *info; - u32 dlen, align_dlen, info_len; - u16 g_sz_bytes, s_sz_bytes; - u32 total_mem_len; - - if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT || - req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) { - dev_err(&pdev->dev, "Error too many sg components\n"); - return NULL; - } - - g_sz_bytes = ((req->in_cnt + 3) / 4) * - sizeof(struct otx2_cpt_sglist_component); - s_sz_bytes = ((req->out_cnt + 3) / 4) * - sizeof(struct otx2_cpt_sglist_component); - - dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; - align_dlen = ALIGN(dlen, align); - info_len = ALIGN(sizeof(*info), align); - total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s); - - info = kzalloc(total_mem_len, gfp); - if (unlikely(!info)) - return NULL; - - info->dlen = dlen; - info->in_buffer = (u8 *)info + info_len; - - ((u16 *)info->in_buffer)[0] = req->out_cnt; - ((u16 *)info->in_buffer)[1] = req->in_cnt; - ((u16 *)info->in_buffer)[2] = 0; - ((u16 *)info->in_buffer)[3] = 0; - cpu_to_be64s((u64 *)info->in_buffer); - - /* Setup gather (input) components */ - if (setup_sgio_components(pdev, req->in, req->in_cnt, - &info->in_buffer[8])) { - dev_err(&pdev->dev, "Failed to setup gather list\n"); - goto destroy_info; - } - - if (setup_sgio_components(pdev, req->out, req->out_cnt, - &info->in_buffer[8 + g_sz_bytes])) { - dev_err(&pdev->dev, "Failed to setup scatter list\n"); - goto destroy_info; - } - - info->dma_len = total_mem_len - info_len; - info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer, - info->dma_len, DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) { - dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n"); - goto destroy_info; - } - /* - * Get buffer for union otx2_cpt_res_s response - * structure and its physical address - */ - info->completion_addr = info->in_buffer + align_dlen; - info->comp_baddr = info->dptr_baddr + align_dlen; - - return info; - -destroy_info: - otx2_cpt_info_destroy(pdev, info); - return NULL; -} - static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req, struct otx2_cpt_pending_queue *pqueue, struct otx2_cptlf_info *lf) @@ -247,7 +101,7 @@ static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req, if (unlikely(!otx2_cptlf_started(lf->lfs))) return -ENODEV; - info = info_create(pdev, req, gfp); + info = lf->lfs->ops->cpt_sg_info_create(pdev, req, gfp); if (unlikely(!info)) { dev_err(&pdev->dev, "Setting up cpt inst info failed"); return -ENOMEM; @@ -303,9 +157,9 @@ static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req, /* 64-bit swap for microcode data reads, not needed for addresses*/ cpu_to_be64s(&iq_cmd.cmd.u); - iq_cmd.dptr = info->dptr_baddr; - iq_cmd.rptr = 0; - iq_cmd.cptr.u = 0; + iq_cmd.dptr = info->dptr_baddr | info->gthr_sz << 60; + iq_cmd.rptr = info->rptr_baddr | info->sctr_sz << 60; + iq_cmd.cptr.s.cptr = cpt_req->cptr_dma; iq_cmd.cptr.s.grp = ctrl->s.grp; /* Fill in the CPT_INST_S type command for HW interpretation */ |