summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:50:12 +0000
commit8665bd53f2f2e27e5511d90428cb3f60e6d0ce15 (patch)
tree8d58900dc0ebd4a3011f92c128d2fe45bc7c4bf2 /drivers/crypto
parentAdding debian version 6.7.12-1. (diff)
downloadlinux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.tar.xz
linux-8665bd53f2f2e27e5511d90428cb3f60e6d0ce15.zip
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c4
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c5
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c25
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c14
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c40
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h4
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-cipher.c4
-rw-r--r--drivers/crypto/aspeed/Kconfig4
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c230
-rw-r--r--drivers/crypto/atmel-aes.c214
-rw-r--r--drivers/crypto/atmel-tdes.c205
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c12
-rw-r--r--drivers/crypto/bcm/cipher.c57
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c24
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c19
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes.c18
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/ccree/cc_aead.c10
-rw-r--r--drivers/crypto/ccree/cc_cipher.c45
-rw-r--r--drivers/crypto/gemini/sl3516-ce-cipher.c4
-rw-r--r--drivers/crypto/hifn_795x.c126
-rw-r--r--drivers/crypto/hisilicon/debugfs.c54
-rw-r--r--drivers/crypto/hisilicon/qm.c166
-rw-r--r--drivers/crypto/hisilicon/qm_common.h4
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c33
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.h2
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c2
-rw-r--r--drivers/crypto/hisilicon/sgl.c18
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c54
-rw-r--r--drivers/crypto/inside-secure/safexcel.c4
-rw-r--r--drivers/crypto/inside-secure/safexcel.h4
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c152
-rw-r--r--drivers/crypto/intel/Kconfig1
-rw-r--r--drivers/crypto/intel/Makefile1
-rw-r--r--drivers/crypto/intel/iaa/Kconfig19
-rw-r--r--drivers/crypto/intel/iaa/Makefile12
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto.h173
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c92
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c2197
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.c312
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_stats.h53
-rw-r--r--drivers/crypto/intel/qat/Kconfig11
-rw-r--r--drivers/crypto/intel/qat/Makefile1
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c536
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h55
-rw-r--r--drivers/crypto/intel/qat/qat_420xx/adf_drv.c202
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c313
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h52
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c277
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h15
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_engine.c2
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c37
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.h4
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_common.h1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_config.h18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.c287
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_config.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c241
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h87
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c153
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h158
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c288
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h99
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c710
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h117
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h10
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h14
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_hal.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c1
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c6
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c23
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c86
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h27
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h54
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c44
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h9
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c26
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h298
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c133
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.h105
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf.h4
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c74
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c82
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c49
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h3
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf.h2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c31
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h5
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c26
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c28
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c162
-rw-r--r--drivers/crypto/n2_core.c36
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c4
-rw-r--r--drivers/crypto/sahara.c589
-rw-r--r--drivers/crypto/starfive/Kconfig2
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c77
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c8
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.h12
-rw-r--r--drivers/crypto/starfive/jh7110-rsa.c58
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c2
106 files changed, 7463 insertions, 2753 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 79c3bb9c99..0991f026cb 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -306,6 +306,7 @@ config CRYPTO_DEV_SAHARA
select CRYPTO_SKCIPHER
select CRYPTO_AES
select CRYPTO_ECB
+ select CRYPTO_ENGINE
help
This option enables support for the SAHARA HW crypto accelerator
found in some Freescale i.MX chips.
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index d2cf961901..de50c00ba2 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -431,8 +431,8 @@ int sun8i_ce_cipher_init(struct crypto_tfm *tfm)
return PTR_ERR(op->fallback_tfm);
}
- sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
- crypto_skcipher_reqsize(op->fallback_tfm);
+ crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm));
memcpy(algt->fbname,
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 7fa359725e..9b9605ce8e 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -405,9 +405,8 @@ int sun8i_ss_cipher_init(struct crypto_tfm *tfm)
return PTR_ERR(op->fallback_tfm);
}
- sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) +
- crypto_skcipher_reqsize(op->fallback_tfm);
-
+ crypto_skcipher_set_reqsize(sktfm, sizeof(struct sun8i_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm));
memcpy(algt->fbname,
crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)),
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index d70b105dcf..753f67a36d 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -30,33 +30,16 @@ static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
unsigned int keylen)
{
struct crypto_shash *xtfm;
- struct shash_desc *sdesc;
- size_t len;
- int ret = 0;
+ int ret;
xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(xtfm))
return PTR_ERR(xtfm);
- len = sizeof(*sdesc) + crypto_shash_descsize(xtfm);
- sdesc = kmalloc(len, GFP_KERNEL);
- if (!sdesc) {
- ret = -ENOMEM;
- goto err_hashkey_sdesc;
- }
- sdesc->tfm = xtfm;
-
- ret = crypto_shash_init(sdesc);
- if (ret) {
- dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret);
- goto err_hashkey;
- }
- ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key);
+ ret = crypto_shash_tfm_digest(xtfm, key, keylen, tfmctx->key);
if (ret)
- dev_err(tfmctx->ss->dev, "shash finup error\n");
-err_hashkey:
- kfree(sdesc);
-err_hashkey_sdesc:
+ dev_err(tfmctx->ss->dev, "shash digest error ret=%d\n", ret);
+
crypto_free_shash(xtfm);
return ret;
}
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index ded7322427..e0af611a95 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -181,13 +181,6 @@ int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
-int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
- CRYPTO_FEEDBACK_MODE_128BIT_CFB);
-}
-
int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
@@ -195,13 +188,6 @@ int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
-int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen)
-{
- return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
- CRYPTO_FEEDBACK_MODE_64BIT_OFB);
-}
-
int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 8d53372245..6006703fb6 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1211,26 +1211,6 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
} },
{ .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
.base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "cfb-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_IV_SIZE,
- .setkey = crypto4xx_setkey_aes_cfb,
- .encrypt = crypto4xx_encrypt_iv_stream,
- .decrypt = crypto4xx_decrypt_iv_stream,
- .init = crypto4xx_sk_init,
- .exit = crypto4xx_sk_exit,
- } },
- { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
- .base = {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-ppc4xx",
.cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
@@ -1289,26 +1269,6 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
.init = crypto4xx_sk_init,
.exit = crypto4xx_sk_exit,
} },
- { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
- .base = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "ofb-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_IV_SIZE,
- .setkey = crypto4xx_setkey_aes_ofb,
- .encrypt = crypto4xx_encrypt_iv_stream,
- .decrypt = crypto4xx_decrypt_iv_stream,
- .init = crypto4xx_sk_init,
- .exit = crypto4xx_sk_exit,
- } },
/* AEAD */
{ .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index 56c10668c0..96355d463b 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -162,14 +162,10 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *dst_tmp);
int crypto4xx_setkey_aes_cbc(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_aes_cfb(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ctr(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_setkey_aes_ecb(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
-int crypto4xx_setkey_aes_ofb(struct crypto_skcipher *cipher,
- const u8 *key, unsigned int keylen);
int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen);
int crypto4xx_encrypt_ctr(struct skcipher_request *req);
diff --git a/drivers/crypto/amlogic/amlogic-gxl-cipher.c b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
index 3308406612..29048da6f5 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-cipher.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-cipher.c
@@ -327,8 +327,8 @@ int meson_cipher_init(struct crypto_tfm *tfm)
return PTR_ERR(op->fallback_tfm);
}
- sktfm->reqsize = sizeof(struct meson_cipher_req_ctx) +
- crypto_skcipher_reqsize(op->fallback_tfm);
+ crypto_skcipher_set_reqsize(sktfm, sizeof(struct meson_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm));
return 0;
}
diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig
index db6c5b4cdc..e93f2f82b4 100644
--- a/drivers/crypto/aspeed/Kconfig
+++ b/drivers/crypto/aspeed/Kconfig
@@ -38,14 +38,12 @@ config CRYPTO_DEV_ASPEED_HACE_CRYPTO
select CRYPTO_DES
select CRYPTO_ECB
select CRYPTO_CBC
- select CRYPTO_CFB
- select CRYPTO_OFB
select CRYPTO_CTR
help
Select here to enable Aspeed Hash & Crypto Engine (HACE)
crypto driver.
Supports AES/DES symmetric-key encryption and decryption
- with ECB/CBC/CFB/OFB/CTR options.
+ with ECB/CBC/CTR options.
config CRYPTO_DEV_ASPEED_ACRY
bool "Enable Aspeed ACRY RSA Engine"
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
index f0eddb7854..a72dfebc53 100644
--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -473,30 +473,6 @@ static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req)
HACE_CMD_TRIPLE_DES);
}
-static int aspeed_tdes_ofb_decrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
- HACE_CMD_TRIPLE_DES);
-}
-
-static int aspeed_tdes_ofb_encrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
- HACE_CMD_TRIPLE_DES);
-}
-
-static int aspeed_tdes_cfb_decrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
- HACE_CMD_TRIPLE_DES);
-}
-
-static int aspeed_tdes_cfb_encrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
- HACE_CMD_TRIPLE_DES);
-}
-
static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req)
{
return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
@@ -533,30 +509,6 @@ static int aspeed_des_ctr_encrypt(struct skcipher_request *req)
HACE_CMD_SINGLE_DES);
}
-static int aspeed_des_ofb_decrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB |
- HACE_CMD_SINGLE_DES);
-}
-
-static int aspeed_des_ofb_encrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB |
- HACE_CMD_SINGLE_DES);
-}
-
-static int aspeed_des_cfb_decrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB |
- HACE_CMD_SINGLE_DES);
-}
-
-static int aspeed_des_cfb_encrypt(struct skcipher_request *req)
-{
- return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB |
- HACE_CMD_SINGLE_DES);
-}
-
static int aspeed_des_cbc_decrypt(struct skcipher_request *req)
{
return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC |
@@ -659,26 +611,6 @@ static int aspeed_aes_ctr_encrypt(struct skcipher_request *req)
return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR);
}
-static int aspeed_aes_ofb_decrypt(struct skcipher_request *req)
-{
- return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB);
-}
-
-static int aspeed_aes_ofb_encrypt(struct skcipher_request *req)
-{
- return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB);
-}
-
-static int aspeed_aes_cfb_decrypt(struct skcipher_request *req)
-{
- return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB);
-}
-
-static int aspeed_aes_cfb_encrypt(struct skcipher_request *req)
-{
- return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB);
-}
-
static int aspeed_aes_cbc_decrypt(struct skcipher_request *req)
{
return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC);
@@ -792,60 +724,6 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
},
{
.alg.skcipher.base = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aspeed_aes_setkey,
- .encrypt = aspeed_aes_cfb_encrypt,
- .decrypt = aspeed_aes_cfb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "aspeed-cfb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
- {
- .alg.skcipher.base = {
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = aspeed_aes_setkey,
- .encrypt = aspeed_aes_ofb_encrypt,
- .decrypt = aspeed_aes_ofb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "aspeed-ofb-aes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
- {
- .alg.skcipher.base = {
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.setkey = aspeed_des_setkey,
@@ -899,60 +777,6 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
},
{
.alg.skcipher.base = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = aspeed_des_setkey,
- .encrypt = aspeed_des_cfb_encrypt,
- .decrypt = aspeed_des_cfb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "cfb(des)",
- .cra_driver_name = "aspeed-cfb-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
- {
- .alg.skcipher.base = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = aspeed_des_setkey,
- .encrypt = aspeed_des_ofb_encrypt,
- .decrypt = aspeed_des_ofb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "ofb(des)",
- .cra_driver_name = "aspeed-ofb-des",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
- {
- .alg.skcipher.base = {
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.setkey = aspeed_des_setkey,
@@ -1004,60 +828,6 @@ static struct aspeed_hace_alg aspeed_crypto_algs[] = {
.do_one_request = aspeed_crypto_do_request,
},
},
- {
- .alg.skcipher.base = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = aspeed_des_setkey,
- .encrypt = aspeed_tdes_cfb_encrypt,
- .decrypt = aspeed_tdes_cfb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "cfb(des3_ede)",
- .cra_driver_name = "aspeed-cfb-tdes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
- {
- .alg.skcipher.base = {
- .ivsize = DES_BLOCK_SIZE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = aspeed_des_setkey,
- .encrypt = aspeed_tdes_ofb_encrypt,
- .decrypt = aspeed_tdes_ofb_decrypt,
- .init = aspeed_crypto_cra_init,
- .exit = aspeed_crypto_cra_exit,
- .base = {
- .cra_name = "ofb(des3_ede)",
- .cra_driver_name = "aspeed-ofb-tdes",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = DES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct aspeed_cipher_ctx),
- .cra_alignmask = 0x0f,
- .cra_module = THIS_MODULE,
- }
- },
- .alg.skcipher.op = {
- .do_one_request = aspeed_crypto_do_request,
- },
- },
};
static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index d1d93e8978..8bd64fc37e 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -46,11 +46,6 @@
#define ATMEL_AES_BUFFER_ORDER 2
#define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER)
-#define CFB8_BLOCK_SIZE 1
-#define CFB16_BLOCK_SIZE 2
-#define CFB32_BLOCK_SIZE 4
-#define CFB64_BLOCK_SIZE 8
-
#define SIZE_IN_WORDS(x) ((x) >> 2)
/* AES flags */
@@ -60,12 +55,6 @@
#define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK)
#define AES_FLAGS_ECB AES_MR_OPMOD_ECB
#define AES_FLAGS_CBC AES_MR_OPMOD_CBC
-#define AES_FLAGS_OFB AES_MR_OPMOD_OFB
-#define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b)
-#define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b)
-#define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b)
-#define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b)
-#define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b)
#define AES_FLAGS_CTR AES_MR_OPMOD_CTR
#define AES_FLAGS_GCM AES_MR_OPMOD_GCM
#define AES_FLAGS_XTS AES_MR_OPMOD_XTS
@@ -87,7 +76,6 @@
struct atmel_aes_caps {
bool has_dualbuff;
- bool has_cfb64;
bool has_gcm;
bool has_xts;
bool has_authenc;
@@ -860,22 +848,6 @@ static int atmel_aes_dma_start(struct atmel_aes_dev *dd,
int err;
switch (dd->ctx->block_size) {
- case CFB8_BLOCK_SIZE:
- addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- maxburst = 1;
- break;
-
- case CFB16_BLOCK_SIZE:
- addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- maxburst = 1;
- break;
-
- case CFB32_BLOCK_SIZE:
- case CFB64_BLOCK_SIZE:
- addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- maxburst = 1;
- break;
-
case AES_BLOCK_SIZE:
addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
maxburst = dd->caps.max_burst_size;
@@ -1103,7 +1075,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
}
/*
- * ECB, CBC, CFB, OFB or CTR mode require the plaintext and ciphertext
+ * ECB, CBC or CTR mode require the plaintext and ciphertext
* to have a positve integer length.
*/
if (!req->cryptlen && opmode != AES_FLAGS_XTS)
@@ -1113,27 +1085,7 @@ static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode)
!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skcipher)))
return -EINVAL;
- switch (mode & AES_FLAGS_OPMODE_MASK) {
- case AES_FLAGS_CFB8:
- ctx->block_size = CFB8_BLOCK_SIZE;
- break;
-
- case AES_FLAGS_CFB16:
- ctx->block_size = CFB16_BLOCK_SIZE;
- break;
-
- case AES_FLAGS_CFB32:
- ctx->block_size = CFB32_BLOCK_SIZE;
- break;
-
- case AES_FLAGS_CFB64:
- ctx->block_size = CFB64_BLOCK_SIZE;
- break;
-
- default:
- ctx->block_size = AES_BLOCK_SIZE;
- break;
- }
+ ctx->block_size = AES_BLOCK_SIZE;
ctx->is_aead = false;
rctx = skcipher_request_ctx(req);
@@ -1188,66 +1140,6 @@ static int atmel_aes_cbc_decrypt(struct skcipher_request *req)
return atmel_aes_crypt(req, AES_FLAGS_CBC);
}
-static int atmel_aes_ofb_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_ofb_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_OFB);
-}
-
-static int atmel_aes_cfb_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_cfb_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB128);
-}
-
-static int atmel_aes_cfb64_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_cfb64_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB64);
-}
-
-static int atmel_aes_cfb32_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_cfb32_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB32);
-}
-
-static int atmel_aes_cfb16_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_cfb16_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB16);
-}
-
-static int atmel_aes_cfb8_encrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT);
-}
-
-static int atmel_aes_cfb8_decrypt(struct skcipher_request *req)
-{
- return atmel_aes_crypt(req, AES_FLAGS_CFB8);
-}
-
static int atmel_aes_ctr_encrypt(struct skcipher_request *req)
{
return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT);
@@ -1319,76 +1211,6 @@ static struct skcipher_alg aes_algs[] = {
.ivsize = AES_BLOCK_SIZE,
},
{
- .base.cra_name = "ofb(aes)",
- .base.cra_driver_name = "atmel-ofb-aes",
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_ofb_encrypt,
- .decrypt = atmel_aes_ofb_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-},
-{
- .base.cra_name = "cfb(aes)",
- .base.cra_driver_name = "atmel-cfb-aes",
- .base.cra_blocksize = 1,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb_encrypt,
- .decrypt = atmel_aes_cfb_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-},
-{
- .base.cra_name = "cfb32(aes)",
- .base.cra_driver_name = "atmel-cfb32-aes",
- .base.cra_blocksize = CFB32_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb32_encrypt,
- .decrypt = atmel_aes_cfb32_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-},
-{
- .base.cra_name = "cfb16(aes)",
- .base.cra_driver_name = "atmel-cfb16-aes",
- .base.cra_blocksize = CFB16_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb16_encrypt,
- .decrypt = atmel_aes_cfb16_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-},
-{
- .base.cra_name = "cfb8(aes)",
- .base.cra_driver_name = "atmel-cfb8-aes",
- .base.cra_blocksize = CFB8_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb8_encrypt,
- .decrypt = atmel_aes_cfb8_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-},
-{
.base.cra_name = "ctr(aes)",
.base.cra_driver_name = "atmel-ctr-aes",
.base.cra_blocksize = 1,
@@ -1404,21 +1226,6 @@ static struct skcipher_alg aes_algs[] = {
},
};
-static struct skcipher_alg aes_cfb64_alg = {
- .base.cra_name = "cfb64(aes)",
- .base.cra_driver_name = "atmel-cfb64-aes",
- .base.cra_blocksize = CFB64_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct atmel_aes_ctx),
-
- .init = atmel_aes_init_tfm,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = atmel_aes_setkey,
- .encrypt = atmel_aes_cfb64_encrypt,
- .decrypt = atmel_aes_cfb64_decrypt,
- .ivsize = AES_BLOCK_SIZE,
-};
-
/* gcm aead functions */
@@ -2407,9 +2214,6 @@ static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd)
if (dd->caps.has_gcm)
crypto_unregister_aead(&aes_gcm_alg);
- if (dd->caps.has_cfb64)
- crypto_unregister_skcipher(&aes_cfb64_alg);
-
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_skcipher(&aes_algs[i]);
}
@@ -2434,14 +2238,6 @@ static int atmel_aes_register_algs(struct atmel_aes_dev *dd)
goto err_aes_algs;
}
- if (dd->caps.has_cfb64) {
- atmel_aes_crypto_alg_init(&aes_cfb64_alg.base);
-
- err = crypto_register_skcipher(&aes_cfb64_alg);
- if (err)
- goto err_aes_cfb64_alg;
- }
-
if (dd->caps.has_gcm) {
atmel_aes_crypto_alg_init(&aes_gcm_alg.base);
@@ -2482,8 +2278,6 @@ err_aes_authenc_alg:
err_aes_xts_alg:
crypto_unregister_aead(&aes_gcm_alg);
err_aes_gcm_alg:
- crypto_unregister_skcipher(&aes_cfb64_alg);
-err_aes_cfb64_alg:
i = ARRAY_SIZE(aes_algs);
err_aes_algs:
for (j = 0; j < i; j++)
@@ -2495,7 +2289,6 @@ err_aes_algs:
static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
{
dd->caps.has_dualbuff = 0;
- dd->caps.has_cfb64 = 0;
dd->caps.has_gcm = 0;
dd->caps.has_xts = 0;
dd->caps.has_authenc = 0;
@@ -2507,7 +2300,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
case 0x600:
case 0x500:
dd->caps.has_dualbuff = 1;
- dd->caps.has_cfb64 = 1;
dd->caps.has_gcm = 1;
dd->caps.has_xts = 1;
dd->caps.has_authenc = 1;
@@ -2515,13 +2307,11 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
break;
case 0x200:
dd->caps.has_dualbuff = 1;
- dd->caps.has_cfb64 = 1;
dd->caps.has_gcm = 1;
dd->caps.max_burst_size = 4;
break;
case 0x130:
dd->caps.has_dualbuff = 1;
- dd->caps.has_cfb64 = 1;
dd->caps.max_burst_size = 4;
break;
case 0x120:
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 27b7000e25..dcc2380a58 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -45,11 +45,6 @@
#define TDES_FLAGS_OPMODE_MASK (TDES_MR_OPMOD_MASK | TDES_MR_CFBS_MASK)
#define TDES_FLAGS_ECB TDES_MR_OPMOD_ECB
#define TDES_FLAGS_CBC TDES_MR_OPMOD_CBC
-#define TDES_FLAGS_OFB TDES_MR_OPMOD_OFB
-#define TDES_FLAGS_CFB64 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_64b)
-#define TDES_FLAGS_CFB32 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_32b)
-#define TDES_FLAGS_CFB16 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_16b)
-#define TDES_FLAGS_CFB8 (TDES_MR_OPMOD_CFB | TDES_MR_CFBS_8b)
#define TDES_FLAGS_MODE_MASK (TDES_FLAGS_OPMODE_MASK | TDES_FLAGS_ENCRYPT)
@@ -60,13 +55,8 @@
#define ATMEL_TDES_QUEUE_LENGTH 50
-#define CFB8_BLOCK_SIZE 1
-#define CFB16_BLOCK_SIZE 2
-#define CFB32_BLOCK_SIZE 4
-
struct atmel_tdes_caps {
bool has_dma;
- u32 has_cfb_3keys;
};
struct atmel_tdes_dev;
@@ -376,7 +366,6 @@ static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
dma_addr_t dma_addr_in,
dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
int len32;
dd->dma_size = length;
@@ -386,19 +375,7 @@ static int atmel_tdes_crypt_pdc(struct atmel_tdes_dev *dd,
DMA_TO_DEVICE);
}
- switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
- case TDES_FLAGS_CFB8:
- len32 = DIV_ROUND_UP(length, sizeof(u8));
- break;
-
- case TDES_FLAGS_CFB16:
- len32 = DIV_ROUND_UP(length, sizeof(u16));
- break;
-
- default:
- len32 = DIV_ROUND_UP(length, sizeof(u32));
- break;
- }
+ len32 = DIV_ROUND_UP(length, sizeof(u32));
atmel_tdes_write(dd, TDES_PTCR, TDES_PTCR_TXTDIS|TDES_PTCR_RXTDIS);
atmel_tdes_write(dd, TDES_TPR, dma_addr_in);
@@ -419,7 +396,6 @@ static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
dma_addr_t dma_addr_in,
dma_addr_t dma_addr_out, int length)
{
- struct atmel_tdes_reqctx *rctx = skcipher_request_ctx(dd->req);
struct scatterlist sg[2];
struct dma_async_tx_descriptor *in_desc, *out_desc;
enum dma_slave_buswidth addr_width;
@@ -431,19 +407,7 @@ static int atmel_tdes_crypt_dma(struct atmel_tdes_dev *dd,
DMA_TO_DEVICE);
}
- switch (rctx->mode & TDES_FLAGS_OPMODE_MASK) {
- case TDES_FLAGS_CFB8:
- addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
- break;
-
- case TDES_FLAGS_CFB16:
- addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
- break;
-
- default:
- addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- break;
- }
+ addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dd->dma_lch_in.dma_conf.dst_addr_width = addr_width;
dd->dma_lch_out.dma_conf.src_addr_width = addr_width;
@@ -680,39 +644,11 @@ static int atmel_tdes_crypt(struct skcipher_request *req, unsigned long mode)
if (!req->cryptlen)
return 0;
- switch (mode & TDES_FLAGS_OPMODE_MASK) {
- case TDES_FLAGS_CFB8:
- if (!IS_ALIGNED(req->cryptlen, CFB8_BLOCK_SIZE)) {
- dev_dbg(dev, "request size is not exact amount of CFB8 blocks\n");
- return -EINVAL;
- }
- ctx->block_size = CFB8_BLOCK_SIZE;
- break;
-
- case TDES_FLAGS_CFB16:
- if (!IS_ALIGNED(req->cryptlen, CFB16_BLOCK_SIZE)) {
- dev_dbg(dev, "request size is not exact amount of CFB16 blocks\n");
- return -EINVAL;
- }
- ctx->block_size = CFB16_BLOCK_SIZE;
- break;
-
- case TDES_FLAGS_CFB32:
- if (!IS_ALIGNED(req->cryptlen, CFB32_BLOCK_SIZE)) {
- dev_dbg(dev, "request size is not exact amount of CFB32 blocks\n");
- return -EINVAL;
- }
- ctx->block_size = CFB32_BLOCK_SIZE;
- break;
-
- default:
- if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
- dev_dbg(dev, "request size is not exact amount of DES blocks\n");
- return -EINVAL;
- }
- ctx->block_size = DES_BLOCK_SIZE;
- break;
+ if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) {
+ dev_dbg(dev, "request size is not exact amount of DES blocks\n");
+ return -EINVAL;
}
+ ctx->block_size = DES_BLOCK_SIZE;
rctx->mode = mode;
@@ -832,55 +768,6 @@ static int atmel_tdes_cbc_decrypt(struct skcipher_request *req)
{
return atmel_tdes_crypt(req, TDES_FLAGS_CBC);
}
-static int atmel_tdes_cfb_encrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB64 | TDES_FLAGS_ENCRYPT);
-}
-
-static int atmel_tdes_cfb_decrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB64);
-}
-
-static int atmel_tdes_cfb8_encrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB8 | TDES_FLAGS_ENCRYPT);
-}
-
-static int atmel_tdes_cfb8_decrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB8);
-}
-
-static int atmel_tdes_cfb16_encrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB16 | TDES_FLAGS_ENCRYPT);
-}
-
-static int atmel_tdes_cfb16_decrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB16);
-}
-
-static int atmel_tdes_cfb32_encrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB32 | TDES_FLAGS_ENCRYPT);
-}
-
-static int atmel_tdes_cfb32_decrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_CFB32);
-}
-
-static int atmel_tdes_ofb_encrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_OFB | TDES_FLAGS_ENCRYPT);
-}
-
-static int atmel_tdes_ofb_decrypt(struct skcipher_request *req)
-{
- return atmel_tdes_crypt(req, TDES_FLAGS_OFB);
-}
static int atmel_tdes_init_tfm(struct crypto_skcipher *tfm)
{
@@ -932,71 +819,6 @@ static struct skcipher_alg tdes_algs[] = {
.decrypt = atmel_tdes_cbc_decrypt,
},
{
- .base.cra_name = "cfb(des)",
- .base.cra_driver_name = "atmel-cfb-des",
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_alignmask = 0x7,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb_encrypt,
- .decrypt = atmel_tdes_cfb_decrypt,
-},
-{
- .base.cra_name = "cfb8(des)",
- .base.cra_driver_name = "atmel-cfb8-des",
- .base.cra_blocksize = CFB8_BLOCK_SIZE,
- .base.cra_alignmask = 0,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb8_encrypt,
- .decrypt = atmel_tdes_cfb8_decrypt,
-},
-{
- .base.cra_name = "cfb16(des)",
- .base.cra_driver_name = "atmel-cfb16-des",
- .base.cra_blocksize = CFB16_BLOCK_SIZE,
- .base.cra_alignmask = 0x1,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb16_encrypt,
- .decrypt = atmel_tdes_cfb16_decrypt,
-},
-{
- .base.cra_name = "cfb32(des)",
- .base.cra_driver_name = "atmel-cfb32-des",
- .base.cra_blocksize = CFB32_BLOCK_SIZE,
- .base.cra_alignmask = 0x3,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_cfb32_encrypt,
- .decrypt = atmel_tdes_cfb32_decrypt,
-},
-{
- .base.cra_name = "ofb(des)",
- .base.cra_driver_name = "atmel-ofb-des",
- .base.cra_blocksize = 1,
- .base.cra_alignmask = 0x7,
-
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- .setkey = atmel_des_setkey,
- .encrypt = atmel_tdes_ofb_encrypt,
- .decrypt = atmel_tdes_ofb_decrypt,
-},
-{
.base.cra_name = "ecb(des3_ede)",
.base.cra_driver_name = "atmel-ecb-tdes",
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -1021,19 +843,6 @@ static struct skcipher_alg tdes_algs[] = {
.decrypt = atmel_tdes_cbc_decrypt,
.ivsize = DES_BLOCK_SIZE,
},
-{
- .base.cra_name = "ofb(des3_ede)",
- .base.cra_driver_name = "atmel-ofb-tdes",
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .base.cra_alignmask = 0x7,
-
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .setkey = atmel_tdes_setkey,
- .encrypt = atmel_tdes_ofb_encrypt,
- .decrypt = atmel_tdes_ofb_decrypt,
- .ivsize = DES_BLOCK_SIZE,
-},
};
static void atmel_tdes_queue_task(unsigned long data)
@@ -1121,14 +930,12 @@ static void atmel_tdes_get_cap(struct atmel_tdes_dev *dd)
{
dd->caps.has_dma = 0;
- dd->caps.has_cfb_3keys = 0;
/* keep only major version number */
switch (dd->hw_version & 0xf00) {
case 0x800:
case 0x700:
dd->caps.has_dma = 1;
- dd->caps.has_cfb_3keys = 1;
break;
case 0x600:
break;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index ef9fe13ffa..dbc1d483f2 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1535,7 +1535,8 @@ static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ crypto_skcipher_set_reqsize(tfm,
+ sizeof(struct artpec6_crypto_request_context));
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
return 0;
@@ -1551,7 +1552,8 @@ static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
if (IS_ERR(ctx->fallback))
return PTR_ERR(ctx->fallback);
- tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ crypto_skcipher_set_reqsize(tfm,
+ sizeof(struct artpec6_crypto_request_context));
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
return 0;
@@ -1561,7 +1563,8 @@ static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ crypto_skcipher_set_reqsize(tfm,
+ sizeof(struct artpec6_crypto_request_context));
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
return 0;
@@ -1571,7 +1574,8 @@ static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
+ crypto_skcipher_set_reqsize(tfm,
+ sizeof(struct artpec6_crypto_request_context));
ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
return 0;
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 10968ddb14..1a3ecd44cb 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -3517,25 +3517,6 @@ static struct iproc_alg_s driver_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
- .base.cra_name = "ofb(des)",
- .base.cra_driver_name = "ofb-des-iproc",
- .base.cra_blocksize = DES_BLOCK_SIZE,
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .ivsize = DES_BLOCK_SIZE,
- },
- .cipher_info = {
- .alg = CIPHER_ALG_DES,
- .mode = CIPHER_MODE_OFB,
- },
- .auth_info = {
- .alg = HASH_ALG_NONE,
- .mode = HASH_MODE_NONE,
- },
- },
- {
- .type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
.base.cra_name = "cbc(des)",
.base.cra_driver_name = "cbc-des-iproc",
.base.cra_blocksize = DES_BLOCK_SIZE,
@@ -3574,25 +3555,6 @@ static struct iproc_alg_s driver_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
- .base.cra_name = "ofb(des3_ede)",
- .base.cra_driver_name = "ofb-des3-iproc",
- .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
- .min_keysize = DES3_EDE_KEY_SIZE,
- .max_keysize = DES3_EDE_KEY_SIZE,
- .ivsize = DES3_EDE_BLOCK_SIZE,
- },
- .cipher_info = {
- .alg = CIPHER_ALG_3DES,
- .mode = CIPHER_MODE_OFB,
- },
- .auth_info = {
- .alg = HASH_ALG_NONE,
- .mode = HASH_MODE_NONE,
- },
- },
- {
- .type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cbc-des3-iproc",
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
@@ -3631,25 +3593,6 @@ static struct iproc_alg_s driver_algs[] = {
{
.type = CRYPTO_ALG_TYPE_SKCIPHER,
.alg.skcipher = {
- .base.cra_name = "ofb(aes)",
- .base.cra_driver_name = "ofb-aes-iproc",
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_info = {
- .alg = CIPHER_ALG_AES,
- .mode = CIPHER_MODE_OFB,
- },
- .auth_info = {
- .alg = HASH_ALG_NONE,
- .mode = HASH_MODE_NONE,
- },
- },
- {
- .type = CRYPTO_ALG_TYPE_SKCIPHER,
- .alg.skcipher = {
.base.cra_name = "cbc(aes)",
.base.cra_driver_name = "cbc-aes-iproc",
.base.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index ee476c6c7f..219fe9be76 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -311,12 +311,6 @@ static int cvm_ecb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
return cvm_setkey(cipher, key, keylen, AES_ECB);
}
-static int cvm_cfb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
- u32 keylen)
-{
- return cvm_setkey(cipher, key, keylen, AES_CFB);
-}
-
static int cvm_cbc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
@@ -394,24 +388,6 @@ static struct skcipher_alg algs[] = { {
}, {
.base.cra_flags = CRYPTO_ALG_ASYNC |
CRYPTO_ALG_ALLOCATES_MEMORY,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
- .base.cra_alignmask = 7,
- .base.cra_priority = 4001,
- .base.cra_name = "cfb(aes)",
- .base.cra_driver_name = "cavium-cfb-aes",
- .base.cra_module = THIS_MODULE,
-
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = cvm_cfb_aes_setkey,
- .encrypt = cvm_encrypt,
- .decrypt = cvm_decrypt,
- .init = cvm_enc_dec_init,
-}, {
- .base.cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY,
.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
.base.cra_alignmask = 7,
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 138261dcd0..6e5e667bab 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -421,25 +421,6 @@ static struct skcipher_alg nitrox_skciphers[] = { {
.exit = nitrox_skcipher_exit,
}, {
.base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "n5_cfb(aes)",
- .cra_priority = PRIO,
- .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct nitrox_crypto_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- },
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = nitrox_aes_setkey,
- .encrypt = nitrox_aes_encrypt,
- .decrypt = nitrox_aes_decrypt,
- .init = nitrox_skcipher_init,
- .exit = nitrox_skcipher_exit,
-}, {
- .base = {
.cra_name = "xts(aes)",
.cra_driver_name = "n5_xts(aes)",
.cra_priority = PRIO,
diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
index 918e223f21..d11daaf47f 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes.c
@@ -267,24 +267,6 @@ static struct ccp_aes_def aes_algs[] = {
.alg_defaults = &ccp_aes_defaults,
},
{
- .mode = CCP_AES_MODE_CFB,
- .version = CCP_VERSION(3, 0),
- .name = "cfb(aes)",
- .driver_name = "cfb-aes-ccp",
- .blocksize = 1,
- .ivsize = AES_BLOCK_SIZE,
- .alg_defaults = &ccp_aes_defaults,
- },
- {
- .mode = CCP_AES_MODE_OFB,
- .version = CCP_VERSION(3, 0),
- .name = "ofb(aes)",
- .driver_name = "ofb-aes-ccp",
- .blocksize = 1,
- .ivsize = AES_BLOCK_SIZE,
- .alg_defaults = &ccp_aes_defaults,
- },
- {
.mode = CCP_AES_MODE_CTR,
.version = CCP_VERSION(3, 0),
.name = "ctr(aes)",
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 53b217a621..b04bc1d3d6 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -912,7 +912,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
/*
* The length of the ID shouldn't be assumed by software since
* it may change in the future. The allocation size is limited
- * to 1 << (PAGE_SHIFT + MAX_ORDER) by the page allocator.
+ * to 1 << (PAGE_SHIFT + MAX_PAGE_ORDER) by the page allocator.
* If the allocation fails, simply return ENOMEM rather than
* warning in the kernel log.
*/
diff --git a/drivers/crypto/ccree/cc_aead.c b/drivers/crypto/ccree/cc_aead.c
index 109ffb375f..5ef39d6823 100644
--- a/drivers/crypto/ccree/cc_aead.c
+++ b/drivers/crypto/ccree/cc_aead.c
@@ -2569,9 +2569,13 @@ static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
alg = &tmpl->template_aead;
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- tmpl->driver_name);
+ if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->driver_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CC_CRA_PRIO;
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 2cd44d7457..cd66a580e8 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -1080,24 +1080,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.sec_func = true,
},
{
- .name = "ofb(paes)",
- .driver_name = "ofb-paes-ccree",
- .blocksize = AES_BLOCK_SIZE,
- .template_skcipher = {
- .setkey = cc_cipher_sethkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = CC_HW_KEY_SIZE,
- .max_keysize = CC_HW_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_OFB,
- .flow_mode = S_DIN_to_AES,
- .min_hw_rev = CC_HW_REV_712,
- .std_body = CC_STD_NIST,
- .sec_func = true,
- },
- {
.name = "cts(cbc(paes))",
.driver_name = "cts-cbc-paes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1206,23 +1188,6 @@ static const struct cc_alg_template skcipher_algs[] = {
.std_body = CC_STD_NIST,
},
{
- .name = "ofb(aes)",
- .driver_name = "ofb-aes-ccree",
- .blocksize = 1,
- .template_skcipher = {
- .setkey = cc_cipher_setkey,
- .encrypt = cc_cipher_encrypt,
- .decrypt = cc_cipher_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- },
- .cipher_mode = DRV_CIPHER_OFB,
- .flow_mode = S_DIN_to_AES,
- .min_hw_rev = CC_HW_REV_630,
- .std_body = CC_STD_NIST,
- },
- {
.name = "cts(cbc(aes))",
.driver_name = "cts-cbc-aes-ccree",
.blocksize = AES_BLOCK_SIZE,
@@ -1427,9 +1392,13 @@ static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl,
memcpy(alg, &tmpl->template_skcipher, sizeof(*alg));
- snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
- snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- tmpl->driver_name);
+ if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+ if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ tmpl->driver_name) >= CRYPTO_MAX_ALG_NAME)
+ return ERR_PTR(-EINVAL);
+
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CC_CRA_PRIO;
alg->base.cra_blocksize = tmpl->blocksize;
diff --git a/drivers/crypto/gemini/sl3516-ce-cipher.c b/drivers/crypto/gemini/sl3516-ce-cipher.c
index 49dce9e0a8..583010b2d0 100644
--- a/drivers/crypto/gemini/sl3516-ce-cipher.c
+++ b/drivers/crypto/gemini/sl3516-ce-cipher.c
@@ -332,8 +332,8 @@ int sl3516_ce_cipher_init(struct crypto_tfm *tfm)
return PTR_ERR(op->fallback_tfm);
}
- sktfm->reqsize = sizeof(struct sl3516_ce_cipher_req_ctx) +
- crypto_skcipher_reqsize(op->fallback_tfm);
+ crypto_skcipher_set_reqsize(sktfm, sizeof(struct sl3516_ce_cipher_req_ctx) +
+ crypto_skcipher_reqsize(op->fallback_tfm));
dev_info(op->ce->dev, "Fallback for %s is %s\n",
crypto_tfm_alg_driver_name(&sktfm->base),
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 7bddc3c786..b4a4ec35bc 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2096,16 +2096,6 @@ static inline int hifn_encrypt_aes_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_aes_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_encrypt_aes_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
-}
/*
* AES decryption functions.
@@ -2120,16 +2110,6 @@ static inline int hifn_decrypt_aes_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_aes_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_decrypt_aes_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_AES_128, ACRYPTO_MODE_OFB);
-}
/*
* DES ecryption functions.
@@ -2144,16 +2124,6 @@ static inline int hifn_encrypt_des_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_des_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_encrypt_des_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
-}
/*
* DES decryption functions.
@@ -2168,16 +2138,6 @@ static inline int hifn_decrypt_des_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_des_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_DES, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_decrypt_des_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_DES, ACRYPTO_MODE_OFB);
-}
/*
* 3DES ecryption functions.
@@ -2192,16 +2152,6 @@ static inline int hifn_encrypt_3des_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_encrypt_3des_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_encrypt_3des_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_ENCRYPT,
- ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
-}
/* 3DES decryption functions. */
static inline int hifn_decrypt_3des_ecb(struct skcipher_request *req)
@@ -2214,16 +2164,6 @@ static inline int hifn_decrypt_3des_cbc(struct skcipher_request *req)
return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CBC);
}
-static inline int hifn_decrypt_3des_cfb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_3DES, ACRYPTO_MODE_CFB);
-}
-static inline int hifn_decrypt_3des_ofb(struct skcipher_request *req)
-{
- return hifn_setup_crypto(req, ACRYPTO_OP_DECRYPT,
- ACRYPTO_TYPE_3DES, ACRYPTO_MODE_OFB);
-}
struct hifn_alg_template {
char name[CRYPTO_MAX_ALG_NAME];
@@ -2234,29 +2174,9 @@ struct hifn_alg_template {
static const struct hifn_alg_template hifn_alg_templates[] = {
/*
- * 3DES ECB, CBC, CFB and OFB modes.
+ * 3DES ECB and CBC modes.
*/
{
- .name = "cfb(des3_ede)", .drv_name = "cfb-3des", .bsize = 8,
- .skcipher = {
- .min_keysize = HIFN_3DES_KEY_LENGTH,
- .max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_des3_setkey,
- .encrypt = hifn_encrypt_3des_cfb,
- .decrypt = hifn_decrypt_3des_cfb,
- },
- },
- {
- .name = "ofb(des3_ede)", .drv_name = "ofb-3des", .bsize = 8,
- .skcipher = {
- .min_keysize = HIFN_3DES_KEY_LENGTH,
- .max_keysize = HIFN_3DES_KEY_LENGTH,
- .setkey = hifn_des3_setkey,
- .encrypt = hifn_encrypt_3des_ofb,
- .decrypt = hifn_decrypt_3des_ofb,
- },
- },
- {
.name = "cbc(des3_ede)", .drv_name = "cbc-3des", .bsize = 8,
.skcipher = {
.ivsize = HIFN_IV_LENGTH,
@@ -2279,29 +2199,9 @@ static const struct hifn_alg_template hifn_alg_templates[] = {
},
/*
- * DES ECB, CBC, CFB and OFB modes.
+ * DES ECB and CBC modes.
*/
{
- .name = "cfb(des)", .drv_name = "cfb-des", .bsize = 8,
- .skcipher = {
- .min_keysize = HIFN_DES_KEY_LENGTH,
- .max_keysize = HIFN_DES_KEY_LENGTH,
- .setkey = hifn_setkey,
- .encrypt = hifn_encrypt_des_cfb,
- .decrypt = hifn_decrypt_des_cfb,
- },
- },
- {
- .name = "ofb(des)", .drv_name = "ofb-des", .bsize = 8,
- .skcipher = {
- .min_keysize = HIFN_DES_KEY_LENGTH,
- .max_keysize = HIFN_DES_KEY_LENGTH,
- .setkey = hifn_setkey,
- .encrypt = hifn_encrypt_des_ofb,
- .decrypt = hifn_decrypt_des_ofb,
- },
- },
- {
.name = "cbc(des)", .drv_name = "cbc-des", .bsize = 8,
.skcipher = {
.ivsize = HIFN_IV_LENGTH,
@@ -2324,7 +2224,7 @@ static const struct hifn_alg_template hifn_alg_templates[] = {
},
/*
- * AES ECB, CBC, CFB and OFB modes.
+ * AES ECB and CBC modes.
*/
{
.name = "ecb(aes)", .drv_name = "ecb-aes", .bsize = 16,
@@ -2347,26 +2247,6 @@ static const struct hifn_alg_template hifn_alg_templates[] = {
.decrypt = hifn_decrypt_aes_cbc,
},
},
- {
- .name = "cfb(aes)", .drv_name = "cfb-aes", .bsize = 16,
- .skcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = hifn_setkey,
- .encrypt = hifn_encrypt_aes_cfb,
- .decrypt = hifn_decrypt_aes_cfb,
- },
- },
- {
- .name = "ofb(aes)", .drv_name = "ofb-aes", .bsize = 16,
- .skcipher = {
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = hifn_setkey,
- .encrypt = hifn_encrypt_aes_ofb,
- .decrypt = hifn_decrypt_aes_ofb,
- },
- },
};
static int hifn_init_tfm(struct crypto_skcipher *tfm)
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 7e8186fe05..80ed4b2d20 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -31,6 +31,10 @@ static const char * const qm_debug_file_name[] = {
[CLEAR_ENABLE] = "clear_enable",
};
+static const char * const qm_s[] = {
+ "work", "stop",
+};
+
struct qm_dfx_item {
const char *name;
u32 offset;
@@ -53,34 +57,34 @@ static struct qm_dfx_item qm_dfx_files[] = {
#define CNT_CYC_REGS_NUM 10
static const struct debugfs_reg32 qm_dfx_regs[] = {
/* XXX_CNT are reading clear register */
- {"QM_ECC_1BIT_CNT ", 0x104000ull},
- {"QM_ECC_MBIT_CNT ", 0x104008ull},
- {"QM_DFX_MB_CNT ", 0x104018ull},
- {"QM_DFX_DB_CNT ", 0x104028ull},
- {"QM_DFX_SQE_CNT ", 0x104038ull},
- {"QM_DFX_CQE_CNT ", 0x104048ull},
- {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
- {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
- {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
- {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
- {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
- {"QM_ECC_1BIT_INF ", 0x104004ull},
- {"QM_ECC_MBIT_INF ", 0x10400cull},
- {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
- {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
- {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
- {"QM_DFX_FF_ST0 ", 0x1040c8ull},
- {"QM_DFX_FF_ST1 ", 0x1040ccull},
- {"QM_DFX_FF_ST2 ", 0x1040d0ull},
- {"QM_DFX_FF_ST3 ", 0x1040d4ull},
- {"QM_DFX_FF_ST4 ", 0x1040d8ull},
- {"QM_DFX_FF_ST5 ", 0x1040dcull},
- {"QM_DFX_FF_ST6 ", 0x1040e0ull},
- {"QM_IN_IDLE_ST ", 0x1040e4ull},
+ {"QM_ECC_1BIT_CNT ", 0x104000},
+ {"QM_ECC_MBIT_CNT ", 0x104008},
+ {"QM_DFX_MB_CNT ", 0x104018},
+ {"QM_DFX_DB_CNT ", 0x104028},
+ {"QM_DFX_SQE_CNT ", 0x104038},
+ {"QM_DFX_CQE_CNT ", 0x104048},
+ {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050},
+ {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058},
+ {"QM_DFX_ACC_FINISH_CNT ", 0x104060},
+ {"QM_DFX_CQE_ERR_CNT ", 0x1040b4},
+ {"QM_DFX_FUNS_ACTIVE_ST ", 0x200},
+ {"QM_ECC_1BIT_INF ", 0x104004},
+ {"QM_ECC_MBIT_INF ", 0x10400c},
+ {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0},
+ {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4},
+ {"QM_DFX_AXI_RDY_VLD ", 0x1040a8},
+ {"QM_DFX_FF_ST0 ", 0x1040c8},
+ {"QM_DFX_FF_ST1 ", 0x1040cc},
+ {"QM_DFX_FF_ST2 ", 0x1040d0},
+ {"QM_DFX_FF_ST3 ", 0x1040d4},
+ {"QM_DFX_FF_ST4 ", 0x1040d8},
+ {"QM_DFX_FF_ST5 ", 0x1040dc},
+ {"QM_DFX_FF_ST6 ", 0x1040e0},
+ {"QM_IN_IDLE_ST ", 0x1040e4},
};
static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
- {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
+ {"QM_DFX_FUNS_ACTIVE_ST ", 0x200},
};
/* define the QM's dfx regs region and region length */
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 40da95dbab..4b20b94e63 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -129,16 +129,21 @@
#define QM_FIFO_OVERFLOW_TYPE 0xc0
#define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
#define QM_FIFO_OVERFLOW_VF 0x3f
+#define QM_FIFO_OVERFLOW_QP_SHIFT 16
#define QM_ABNORMAL_INF01 0x100014
#define QM_DB_TIMEOUT_TYPE 0xc0
#define QM_DB_TIMEOUT_TYPE_SHIFT 6
#define QM_DB_TIMEOUT_VF 0x3f
+#define QM_DB_TIMEOUT_QP_SHIFT 16
+#define QM_ABNORMAL_INF02 0x100018
+#define QM_AXI_POISON_ERR BIT(22)
#define QM_RAS_CE_ENABLE 0x1000ec
#define QM_RAS_FE_ENABLE 0x1000f0
#define QM_RAS_NFE_ENABLE 0x1000f4
#define QM_RAS_CE_THRESHOLD 0x1000f8
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
+#define QM_AXI_RRESP_ERR BIT(0)
#define QM_ECC_MBIT BIT(2)
#define QM_DB_TIMEOUT BIT(10)
#define QM_OF_FIFO_OF BIT(11)
@@ -402,7 +407,6 @@ static const struct hisi_qm_hw_error qm_hw_error[] = {
{ .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
{ .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
{ .int_msk = BIT(14), .msg = "qm_flr_timeout" },
- { /* sentinel */ }
};
static const char * const qm_db_timeout[] = {
@@ -413,10 +417,6 @@ static const char * const qm_fifo_overflow[] = {
"cq", "eq", "aeq",
};
-static const char * const qp_s[] = {
- "none", "init", "start", "stop", "close",
-};
-
struct qm_typical_qos_table {
u32 start;
u32 end;
@@ -444,85 +444,6 @@ static struct qm_typical_qos_table shaper_cbs_s[] = {
static void qm_irqs_unregister(struct hisi_qm *qm);
-static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
-{
- enum qm_state curr = atomic_read(&qm->status.flags);
- bool avail = false;
-
- switch (curr) {
- case QM_INIT:
- if (new == QM_START || new == QM_CLOSE)
- avail = true;
- break;
- case QM_START:
- if (new == QM_STOP)
- avail = true;
- break;
- case QM_STOP:
- if (new == QM_CLOSE || new == QM_START)
- avail = true;
- break;
- default:
- break;
- }
-
- dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
- qm_s[curr], qm_s[new]);
-
- if (!avail)
- dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
- qm_s[curr], qm_s[new]);
-
- return avail;
-}
-
-static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
- enum qp_state new)
-{
- enum qm_state qm_curr = atomic_read(&qm->status.flags);
- enum qp_state qp_curr = 0;
- bool avail = false;
-
- if (qp)
- qp_curr = atomic_read(&qp->qp_status.flags);
-
- switch (new) {
- case QP_INIT:
- if (qm_curr == QM_START || qm_curr == QM_INIT)
- avail = true;
- break;
- case QP_START:
- if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
- (qm_curr == QM_START && qp_curr == QP_STOP))
- avail = true;
- break;
- case QP_STOP:
- if ((qm_curr == QM_START && qp_curr == QP_START) ||
- (qp_curr == QP_INIT))
- avail = true;
- break;
- case QP_CLOSE:
- if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
- (qm_curr == QM_START && qp_curr == QP_STOP) ||
- (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
- (qm_curr == QM_STOP && qp_curr == QP_INIT))
- avail = true;
- break;
- default:
- break;
- }
-
- dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
- qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
-
- if (!avail)
- dev_warn(&qm->pdev->dev,
- "Can not change qp state from %s to %s in QM %s\n",
- qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
-
- return avail;
-}
-
static u32 qm_get_hw_error_status(struct hisi_qm *qm)
{
return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
@@ -676,9 +597,6 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
struct qm_mailbox mailbox;
int ret;
- dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
- queue, cmd, (unsigned long long)dma_addr);
-
qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
mutex_lock(&qm->mailbox_lock);
@@ -1456,7 +1374,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
{
const struct hisi_qm_hw_error *err;
struct device *dev = &qm->pdev->dev;
- u32 reg_val, type, vf_num;
+ u32 reg_val, type, vf_num, qp_id;
int i;
for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
@@ -1472,19 +1390,24 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
QM_DB_TIMEOUT_TYPE_SHIFT;
vf_num = reg_val & QM_DB_TIMEOUT_VF;
- dev_err(dev, "qm %s doorbell timeout in function %u\n",
- qm_db_timeout[type], vf_num);
+ qp_id = reg_val >> QM_DB_TIMEOUT_QP_SHIFT;
+ dev_err(dev, "qm %s doorbell timeout in function %u qp %u\n",
+ qm_db_timeout[type], vf_num, qp_id);
} else if (err->int_msk & QM_OF_FIFO_OF) {
reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
QM_FIFO_OVERFLOW_TYPE_SHIFT;
vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
-
+ qp_id = reg_val >> QM_FIFO_OVERFLOW_QP_SHIFT;
if (type < ARRAY_SIZE(qm_fifo_overflow))
- dev_err(dev, "qm %s fifo overflow in function %u\n",
- qm_fifo_overflow[type], vf_num);
+ dev_err(dev, "qm %s fifo overflow in function %u qp %u\n",
+ qm_fifo_overflow[type], vf_num, qp_id);
else
dev_err(dev, "unknown error type\n");
+ } else if (err->int_msk & QM_AXI_RRESP_ERR) {
+ reg_val = readl(qm->io_base + QM_ABNORMAL_INF02);
+ if (reg_val & QM_AXI_POISON_ERR)
+ dev_err(dev, "qm axi poison error happened\n");
}
}
}
@@ -1893,8 +1816,10 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
struct hisi_qp *qp;
int qp_id;
- if (!qm_qp_avail_state(qm, NULL, QP_INIT))
+ if (atomic_read(&qm->status.flags) == QM_STOP) {
+ dev_info_ratelimited(dev, "failed to create qp as qm is stop!\n");
return ERR_PTR(-EPERM);
+ }
if (qm->qp_in_used == qm->qp_num) {
dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
@@ -1921,7 +1846,6 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
qp->alg_type = alg_type;
qp->is_in_kernel = true;
qm->qp_in_used++;
- atomic_set(&qp->qp_status.flags, QP_INIT);
return qp;
}
@@ -1964,11 +1888,6 @@ static void hisi_qm_release_qp(struct hisi_qp *qp)
down_write(&qm->qps_lock);
- if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
- up_write(&qm->qps_lock);
- return;
- }
-
qm->qp_in_used--;
idr_remove(&qm->qp_idr, qp->qp_id);
@@ -2016,6 +1935,11 @@ static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
cqc.w8 = 0; /* rand_qc */
}
+ /*
+ * Enable request finishing interrupts defaultly.
+ * So, there will be some interrupts until disabling
+ * this.
+ */
cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma));
cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma));
@@ -2048,8 +1972,10 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
u32 pasid = arg;
int ret;
- if (!qm_qp_avail_state(qm, qp, QP_START))
+ if (atomic_read(&qm->status.flags) == QM_STOP) {
+ dev_info_ratelimited(dev, "failed to start qp as qm is stop!\n");
return -EPERM;
+ }
ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
if (ret)
@@ -2171,21 +2097,17 @@ static int qm_stop_qp_nolock(struct hisi_qp *qp)
* is_resetting flag should be set negative so that this qp will not
* be restarted after reset.
*/
- if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
+ if (atomic_read(&qp->qp_status.flags) != QP_START) {
qp->is_resetting = false;
return 0;
}
- if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
- return -EPERM;
-
atomic_set(&qp->qp_status.flags, QP_STOP);
ret = qm_drain_qp(qp);
if (ret)
dev_err(dev, "Failed to drain out data for stopping!\n");
-
flush_workqueue(qp->qm->wq);
if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
qp_stop_fail_cb(qp);
@@ -2905,13 +2827,8 @@ void hisi_qm_uninit(struct hisi_qm *qm)
{
qm_cmd_uninit(qm);
hisi_qm_unint_work(qm);
- down_write(&qm->qps_lock);
-
- if (!qm_avail_state(qm, QM_CLOSE)) {
- up_write(&qm->qps_lock);
- return;
- }
+ down_write(&qm->qps_lock);
hisi_qm_memory_uninit(qm);
hisi_qm_set_state(qm, QM_NOT_READY);
up_write(&qm->qps_lock);
@@ -3085,11 +3002,6 @@ int hisi_qm_start(struct hisi_qm *qm)
down_write(&qm->qps_lock);
- if (!qm_avail_state(qm, QM_START)) {
- up_write(&qm->qps_lock);
- return -EPERM;
- }
-
dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
if (!qm->qp_num) {
@@ -3099,10 +3011,12 @@ int hisi_qm_start(struct hisi_qm *qm)
}
ret = __hisi_qm_start(qm);
- if (!ret)
- atomic_set(&qm->status.flags, QM_START);
+ if (ret)
+ goto err_unlock;
+ atomic_set(&qm->status.flags, QM_WORK);
hisi_qm_set_state(qm, QM_READY);
+
err_unlock:
up_write(&qm->qps_lock);
return ret;
@@ -3199,10 +3113,11 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
down_write(&qm->qps_lock);
qm->status.stop_reason = r;
- if (!qm_avail_state(qm, QM_STOP)) {
- ret = -EPERM;
+ if (atomic_read(&qm->status.flags) == QM_STOP)
goto err_unlock;
- }
+
+ /* Stop all the request sending at first. */
+ atomic_set(&qm->status.flags, QM_STOP);
if (qm->status.stop_reason == QM_SOFT_RESET ||
qm->status.stop_reason == QM_DOWN) {
@@ -3226,7 +3141,6 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
qm_clear_queues(qm);
- atomic_set(&qm->status.flags, QM_STOP);
err_unlock:
up_write(&qm->qps_lock);
@@ -4016,6 +3930,11 @@ static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
int pos;
int i;
+ /*
+ * Since function qm_set_vf_mse is called only after SRIOV is enabled,
+ * pci_find_ext_capability cannot return 0, pos does not need to be
+ * checked.
+ */
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
if (set)
@@ -5418,7 +5337,6 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_free_qm_memory;
qm_cmd_init(qm);
- atomic_set(&qm->status.flags, QM_INIT);
return 0;
diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h
index 7b0b15c83e..0760bf55f1 100644
--- a/drivers/crypto/hisilicon/qm_common.h
+++ b/drivers/crypto/hisilicon/qm_common.h
@@ -72,10 +72,6 @@ struct qm_aeqc {
__le32 dw6;
};
-static const char * const qm_s[] = {
- "init", "start", "close", "stop",
-};
-
int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op);
void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm);
void hisi_qm_set_algqos_init(struct hisi_qm *qm);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index ba7f305d43..f028dcfd0e 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -850,6 +850,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
break;
default:
+ dev_err(dev, "sec c_alg err!\n");
return -EINVAL;
}
@@ -879,15 +880,11 @@ static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
-GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB)
-GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB)
GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
-GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB)
-GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB)
GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
@@ -1176,7 +1173,8 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
return 0;
}
- if (crypto_authenc_extractkeys(&keys, key, keylen))
+ ret = crypto_authenc_extractkeys(&keys, key, keylen);
+ if (ret)
goto bad_key;
ret = sec_aead_aes_set_key(c_ctx, &keys);
@@ -1193,6 +1191,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) ||
(ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) {
+ ret = -EINVAL;
dev_err(dev, "MAC or AUTH key length error!\n");
goto bad_key;
}
@@ -1201,7 +1200,7 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
bad_key:
memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
- return -EINVAL;
+ return ret;
}
@@ -2032,8 +2031,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
ret = -EINVAL;
}
break;
- case SEC_CMODE_CFB:
- case SEC_CMODE_OFB:
case SEC_CMODE_CTR:
if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
dev_err(dev, "skcipher HW version error!\n");
@@ -2198,16 +2195,6 @@ static struct sec_skcipher sec_skciphers[] = {
SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
},
{
- .alg_msk = BIT(4),
- .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE,
- AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
- },
- {
- .alg_msk = BIT(5),
- .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE,
- AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
- },
- {
.alg_msk = BIT(12),
.alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,
AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
@@ -2223,16 +2210,6 @@ static struct sec_skcipher sec_skciphers[] = {
SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
},
{
- .alg_msk = BIT(15),
- .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE,
- AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
- },
- {
- .alg_msk = BIT(16),
- .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE,
- AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
- },
- {
.alg_msk = BIT(23),
.alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h
index d033f63b58..27a0ee5ad9 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.h
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h
@@ -37,8 +37,6 @@ enum sec_mac_len {
enum sec_cmode {
SEC_CMODE_ECB = 0x0,
SEC_CMODE_CBC = 0x1,
- SEC_CMODE_CFB = 0x2,
- SEC_CMODE_OFB = 0x3,
SEC_CMODE_CTR = 0x4,
SEC_CMODE_CCM = 0x5,
SEC_CMODE_GCM = 0x6,
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 878d94ab5d..7bb99381bb 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -153,7 +153,7 @@ static const struct hisi_qm_cap_info sec_basic_info[] = {
{SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4},
{SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4},
{SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF},
- {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF},
+ {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x18670CF},
{SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C},
{SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
{SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF},
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 3df7a256e9..0beca257c2 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -70,11 +70,11 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
HISI_ACC_SGL_ALIGN_SIZE);
/*
- * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_ORDER,
+ * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_PAGE_ORDER,
* block size may exceed 2^31 on ia64, so the max of block size is 2^31
*/
- block_size = 1 << (PAGE_SHIFT + MAX_ORDER < 32 ?
- PAGE_SHIFT + MAX_ORDER : 31);
+ block_size = 1 << (PAGE_SHIFT + MAX_PAGE_ORDER < 32 ?
+ PAGE_SHIFT + MAX_PAGE_ORDER : 31);
sgl_num_per_block = block_size / sgl_size;
block_num = count / sgl_num_per_block;
remain_sgl = count % sgl_num_per_block;
@@ -121,10 +121,10 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
return pool;
err_free_mem:
- for (j = 0; j < i; j++) {
+ for (j = 0; j < i; j++)
dma_free_coherent(dev, block_size, block[j].sgl,
block[j].sgl_dma);
- }
+
kfree_sensitive(pool);
return ERR_PTR(-ENOMEM);
}
@@ -140,7 +140,7 @@ EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool)
{
struct mem_block *block;
- int i;
+ u32 i;
if (!dev || !pool)
return;
@@ -196,9 +196,10 @@ static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
{
struct acc_hw_sge *hw_sge = hw_sgl->sge_entries;
+ u16 entry_sum = le16_to_cpu(hw_sgl->entry_sum_in_sgl);
int i;
- for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) {
+ for (i = 0; i < entry_sum; i++) {
hw_sge[i].page_ctrl = NULL;
hw_sge[i].buf = 0;
hw_sge[i].len = 0;
@@ -223,10 +224,11 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
u32 index, dma_addr_t *hw_sgl_dma)
{
struct hisi_acc_hw_sgl *curr_hw_sgl;
+ unsigned int i, sg_n_mapped;
dma_addr_t curr_sgl_dma = 0;
struct acc_hw_sge *curr_hw_sge;
struct scatterlist *sg;
- int i, sg_n, sg_n_mapped;
+ int sg_n;
if (!dev || !sgl || !pool || !hw_sgl_dma)
return ERR_PTR(-EINVAL);
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 403b074688..479ba8a1d6 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -292,28 +292,28 @@ static const u64 core_offsets[] = {
};
static const struct debugfs_reg32 hzip_dfx_regs[] = {
- {"HZIP_GET_BD_NUM ", 0x00ull},
- {"HZIP_GET_RIGHT_BD ", 0x04ull},
- {"HZIP_GET_ERROR_BD ", 0x08ull},
- {"HZIP_DONE_BD_NUM ", 0x0cull},
- {"HZIP_WORK_CYCLE ", 0x10ull},
- {"HZIP_IDLE_CYCLE ", 0x18ull},
- {"HZIP_MAX_DELAY ", 0x20ull},
- {"HZIP_MIN_DELAY ", 0x24ull},
- {"HZIP_AVG_DELAY ", 0x28ull},
- {"HZIP_MEM_VISIBLE_DATA ", 0x30ull},
- {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull},
- {"HZIP_CONSUMED_BYTE ", 0x38ull},
- {"HZIP_PRODUCED_BYTE ", 0x40ull},
- {"HZIP_COMP_INF ", 0x70ull},
- {"HZIP_PRE_OUT ", 0x78ull},
- {"HZIP_BD_RD ", 0x7cull},
- {"HZIP_BD_WR ", 0x80ull},
- {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull},
- {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull},
- {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull},
- {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull},
- {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull},
+ {"HZIP_GET_BD_NUM ", 0x00},
+ {"HZIP_GET_RIGHT_BD ", 0x04},
+ {"HZIP_GET_ERROR_BD ", 0x08},
+ {"HZIP_DONE_BD_NUM ", 0x0c},
+ {"HZIP_WORK_CYCLE ", 0x10},
+ {"HZIP_IDLE_CYCLE ", 0x18},
+ {"HZIP_MAX_DELAY ", 0x20},
+ {"HZIP_MIN_DELAY ", 0x24},
+ {"HZIP_AVG_DELAY ", 0x28},
+ {"HZIP_MEM_VISIBLE_DATA ", 0x30},
+ {"HZIP_MEM_VISIBLE_ADDR ", 0x34},
+ {"HZIP_CONSUMED_BYTE ", 0x38},
+ {"HZIP_PRODUCED_BYTE ", 0x40},
+ {"HZIP_COMP_INF ", 0x70},
+ {"HZIP_PRE_OUT ", 0x78},
+ {"HZIP_BD_RD ", 0x7c},
+ {"HZIP_BD_WR ", 0x80},
+ {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84},
+ {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88},
+ {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8c},
+ {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94},
+ {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9c},
};
static const struct debugfs_reg32 hzip_com_dfx_regs[] = {
@@ -325,11 +325,11 @@ static const struct debugfs_reg32 hzip_com_dfx_regs[] = {
};
static const struct debugfs_reg32 hzip_dump_dfx_regs[] = {
- {"HZIP_GET_BD_NUM ", 0x00ull},
- {"HZIP_GET_RIGHT_BD ", 0x04ull},
- {"HZIP_GET_ERROR_BD ", 0x08ull},
- {"HZIP_DONE_BD_NUM ", 0x0cull},
- {"HZIP_MAX_DELAY ", 0x20ull},
+ {"HZIP_GET_BD_NUM ", 0x00},
+ {"HZIP_GET_RIGHT_BD ", 0x04},
+ {"HZIP_GET_ERROR_BD ", 0x08},
+ {"HZIP_DONE_BD_NUM ", 0x0c},
+ {"HZIP_MAX_DELAY ", 0x20},
};
/* define the ZIP's dfx regs region and region length */
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 76da14af74..f5c1912aa5 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1191,8 +1191,6 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_cbc_des3_ede,
&safexcel_alg_ecb_aes,
&safexcel_alg_cbc_aes,
- &safexcel_alg_cfb_aes,
- &safexcel_alg_ofb_aes,
&safexcel_alg_ctr_aes,
&safexcel_alg_md5,
&safexcel_alg_sha1,
@@ -1231,8 +1229,6 @@ static struct safexcel_alg_template *safexcel_algs[] = {
&safexcel_alg_hmac_sm3,
&safexcel_alg_ecb_sm4,
&safexcel_alg_cbc_sm4,
- &safexcel_alg_ofb_sm4,
- &safexcel_alg_cfb_sm4,
&safexcel_alg_ctr_sm4,
&safexcel_alg_authenc_hmac_sha1_cbc_sm4,
&safexcel_alg_authenc_hmac_sm3_cbc_sm4,
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 47ef6c7cd0..d0059ce954 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -933,8 +933,6 @@ extern struct safexcel_alg_template safexcel_alg_ecb_des3_ede;
extern struct safexcel_alg_template safexcel_alg_cbc_des3_ede;
extern struct safexcel_alg_template safexcel_alg_ecb_aes;
extern struct safexcel_alg_template safexcel_alg_cbc_aes;
-extern struct safexcel_alg_template safexcel_alg_cfb_aes;
-extern struct safexcel_alg_template safexcel_alg_ofb_aes;
extern struct safexcel_alg_template safexcel_alg_ctr_aes;
extern struct safexcel_alg_template safexcel_alg_md5;
extern struct safexcel_alg_template safexcel_alg_sha1;
@@ -973,8 +971,6 @@ extern struct safexcel_alg_template safexcel_alg_sm3;
extern struct safexcel_alg_template safexcel_alg_hmac_sm3;
extern struct safexcel_alg_template safexcel_alg_ecb_sm4;
extern struct safexcel_alg_template safexcel_alg_cbc_sm4;
-extern struct safexcel_alg_template safexcel_alg_ofb_sm4;
-extern struct safexcel_alg_template safexcel_alg_cfb_sm4;
extern struct safexcel_alg_template safexcel_alg_ctr_sm4;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4;
extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4;
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index b83818634a..42677f7458 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1352,82 +1352,6 @@ struct safexcel_alg_template safexcel_alg_cbc_aes = {
},
};
-static int safexcel_skcipher_aes_cfb_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- safexcel_skcipher_cra_init(tfm);
- ctx->alg = SAFEXCEL_AES;
- ctx->blocksz = AES_BLOCK_SIZE;
- ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
- return 0;
-}
-
-struct safexcel_alg_template safexcel_alg_cfb_aes = {
- .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
- .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB,
- .alg.skcipher = {
- .setkey = safexcel_skcipher_aes_setkey,
- .encrypt = safexcel_encrypt,
- .decrypt = safexcel_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "safexcel-cfb-aes",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
- .cra_alignmask = 0,
- .cra_init = safexcel_skcipher_aes_cfb_cra_init,
- .cra_exit = safexcel_skcipher_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
-};
-
-static int safexcel_skcipher_aes_ofb_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- safexcel_skcipher_cra_init(tfm);
- ctx->alg = SAFEXCEL_AES;
- ctx->blocksz = AES_BLOCK_SIZE;
- ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
- return 0;
-}
-
-struct safexcel_alg_template safexcel_alg_ofb_aes = {
- .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
- .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_AES_XFB,
- .alg.skcipher = {
- .setkey = safexcel_skcipher_aes_setkey,
- .encrypt = safexcel_encrypt,
- .decrypt = safexcel_decrypt,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .base = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "safexcel-ofb-aes",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
- .cra_alignmask = 0,
- .cra_init = safexcel_skcipher_aes_ofb_cra_init,
- .cra_exit = safexcel_skcipher_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
-};
-
static int safexcel_skcipher_aesctr_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
@@ -3186,82 +3110,6 @@ struct safexcel_alg_template safexcel_alg_cbc_sm4 = {
},
};
-static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- safexcel_skcipher_cra_init(tfm);
- ctx->alg = SAFEXCEL_SM4;
- ctx->blocksz = SM4_BLOCK_SIZE;
- ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
- return 0;
-}
-
-struct safexcel_alg_template safexcel_alg_ofb_sm4 = {
- .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
- .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
- .alg.skcipher = {
- .setkey = safexcel_skcipher_sm4_setkey,
- .encrypt = safexcel_encrypt,
- .decrypt = safexcel_decrypt,
- .min_keysize = SM4_KEY_SIZE,
- .max_keysize = SM4_KEY_SIZE,
- .ivsize = SM4_BLOCK_SIZE,
- .base = {
- .cra_name = "ofb(sm4)",
- .cra_driver_name = "safexcel-ofb-sm4",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
- .cra_alignmask = 0,
- .cra_init = safexcel_skcipher_sm4_ofb_cra_init,
- .cra_exit = safexcel_skcipher_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
-};
-
-static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
-{
- struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
-
- safexcel_skcipher_cra_init(tfm);
- ctx->alg = SAFEXCEL_SM4;
- ctx->blocksz = SM4_BLOCK_SIZE;
- ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
- return 0;
-}
-
-struct safexcel_alg_template safexcel_alg_cfb_sm4 = {
- .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
- .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
- .alg.skcipher = {
- .setkey = safexcel_skcipher_sm4_setkey,
- .encrypt = safexcel_encrypt,
- .decrypt = safexcel_decrypt,
- .min_keysize = SM4_KEY_SIZE,
- .max_keysize = SM4_KEY_SIZE,
- .ivsize = SM4_BLOCK_SIZE,
- .base = {
- .cra_name = "cfb(sm4)",
- .cra_driver_name = "safexcel-cfb-sm4",
- .cra_priority = SAFEXCEL_CRA_PRIORITY,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
- .cra_alignmask = 0,
- .cra_init = safexcel_skcipher_sm4_cfb_cra_init,
- .cra_exit = safexcel_skcipher_cra_exit,
- .cra_module = THIS_MODULE,
- },
- },
-};
-
static int safexcel_skcipher_sm4ctr_setkey(struct crypto_skcipher *ctfm,
const u8 *key, unsigned int len)
{
diff --git a/drivers/crypto/intel/Kconfig b/drivers/crypto/intel/Kconfig
index 3d90c87d40..f38cd62a3f 100644
--- a/drivers/crypto/intel/Kconfig
+++ b/drivers/crypto/intel/Kconfig
@@ -3,3 +3,4 @@
source "drivers/crypto/intel/keembay/Kconfig"
source "drivers/crypto/intel/ixp4xx/Kconfig"
source "drivers/crypto/intel/qat/Kconfig"
+source "drivers/crypto/intel/iaa/Kconfig"
diff --git a/drivers/crypto/intel/Makefile b/drivers/crypto/intel/Makefile
index b3d0352ae1..2f56f6d34c 100644
--- a/drivers/crypto/intel/Makefile
+++ b/drivers/crypto/intel/Makefile
@@ -3,3 +3,4 @@
obj-y += keembay/
obj-y += ixp4xx/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
+obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) += iaa/
diff --git a/drivers/crypto/intel/iaa/Kconfig b/drivers/crypto/intel/iaa/Kconfig
new file mode 100644
index 0000000000..d53f4b1d49
--- /dev/null
+++ b/drivers/crypto/intel/iaa/Kconfig
@@ -0,0 +1,19 @@
+config CRYPTO_DEV_IAA_CRYPTO
+ tristate "Support for Intel(R) IAA Compression Accelerator"
+ depends on CRYPTO_DEFLATE
+ depends on INTEL_IDXD
+ default n
+ help
+ This driver supports acceleration for compression and
+ decompression with the Intel Analytics Accelerator (IAA)
+ hardware using the cryptographic API. If you choose 'M'
+ here, the module will be called iaa_crypto.
+
+config CRYPTO_DEV_IAA_CRYPTO_STATS
+ bool "Enable Intel(R) IAA Compression Accelerator Statistics"
+ depends on CRYPTO_DEV_IAA_CRYPTO
+ default n
+ help
+ Enable statistics for the IAA compression accelerator.
+ These include per-device and per-workqueue statistics in
+ addition to global driver statistics.
diff --git a/drivers/crypto/intel/iaa/Makefile b/drivers/crypto/intel/iaa/Makefile
new file mode 100644
index 0000000000..b64b208d23
--- /dev/null
+++ b/drivers/crypto/intel/iaa/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for IAA crypto device drivers
+#
+
+ccflags-y += -I $(srctree)/drivers/dma/idxd -DDEFAULT_SYMBOL_NAMESPACE=IDXD
+
+obj-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO) := iaa_crypto.o
+
+iaa_crypto-y := iaa_crypto_main.o iaa_crypto_comp_fixed.o
+
+iaa_crypto-$(CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS) += iaa_crypto_stats.o
diff --git a/drivers/crypto/intel/iaa/iaa_crypto.h b/drivers/crypto/intel/iaa/iaa_crypto.h
new file mode 100644
index 0000000000..014420f7be
--- /dev/null
+++ b/drivers/crypto/intel/iaa/iaa_crypto.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+
+#ifndef __IAA_CRYPTO_H__
+#define __IAA_CRYPTO_H__
+
+#include <linux/crypto.h>
+#include <linux/idxd.h>
+#include <uapi/linux/idxd.h>
+
+#define IDXD_SUBDRIVER_NAME "crypto"
+
+#define IAA_DECOMP_ENABLE BIT(0)
+#define IAA_DECOMP_FLUSH_OUTPUT BIT(1)
+#define IAA_DECOMP_CHECK_FOR_EOB BIT(2)
+#define IAA_DECOMP_STOP_ON_EOB BIT(3)
+#define IAA_DECOMP_SUPPRESS_OUTPUT BIT(9)
+
+#define IAA_COMP_FLUSH_OUTPUT BIT(1)
+#define IAA_COMP_APPEND_EOB BIT(2)
+
+#define IAA_COMPLETION_TIMEOUT 1000000
+
+#define IAA_ANALYTICS_ERROR 0x0a
+#define IAA_ERROR_DECOMP_BUF_OVERFLOW 0x0b
+#define IAA_ERROR_COMP_BUF_OVERFLOW 0x19
+#define IAA_ERROR_WATCHDOG_EXPIRED 0x24
+
+#define IAA_COMP_MODES_MAX 2
+
+#define FIXED_HDR 0x2
+#define FIXED_HDR_SIZE 3
+
+#define IAA_COMP_FLAGS (IAA_COMP_FLUSH_OUTPUT | \
+ IAA_COMP_APPEND_EOB)
+
+#define IAA_DECOMP_FLAGS (IAA_DECOMP_ENABLE | \
+ IAA_DECOMP_FLUSH_OUTPUT | \
+ IAA_DECOMP_CHECK_FOR_EOB | \
+ IAA_DECOMP_STOP_ON_EOB)
+
+/* Representation of IAA workqueue */
+struct iaa_wq {
+ struct list_head list;
+
+ struct idxd_wq *wq;
+ int ref;
+ bool remove;
+
+ struct iaa_device *iaa_device;
+
+ u64 comp_calls;
+ u64 comp_bytes;
+ u64 decomp_calls;
+ u64 decomp_bytes;
+};
+
+struct iaa_device_compression_mode {
+ const char *name;
+
+ struct aecs_comp_table_record *aecs_comp_table;
+ struct aecs_decomp_table_record *aecs_decomp_table;
+
+ dma_addr_t aecs_comp_table_dma_addr;
+ dma_addr_t aecs_decomp_table_dma_addr;
+};
+
+/* Representation of IAA device with wqs, populated by probe */
+struct iaa_device {
+ struct list_head list;
+ struct idxd_device *idxd;
+
+ struct iaa_device_compression_mode *compression_modes[IAA_COMP_MODES_MAX];
+
+ int n_wq;
+ struct list_head wqs;
+
+ u64 comp_calls;
+ u64 comp_bytes;
+ u64 decomp_calls;
+ u64 decomp_bytes;
+};
+
+struct wq_table_entry {
+ struct idxd_wq **wqs;
+ int max_wqs;
+ int n_wqs;
+ int cur_wq;
+};
+
+#define IAA_AECS_ALIGN 32
+
+/*
+ * Analytics Engine Configuration and State (AECS) contains parameters and
+ * internal state of the analytics engine.
+ */
+struct aecs_comp_table_record {
+ u32 crc;
+ u32 xor_checksum;
+ u32 reserved0[5];
+ u32 num_output_accum_bits;
+ u8 output_accum[256];
+ u32 ll_sym[286];
+ u32 reserved1;
+ u32 reserved2;
+ u32 d_sym[30];
+ u32 reserved_padding[2];
+} __packed;
+
+/* AECS for decompress */
+struct aecs_decomp_table_record {
+ u32 crc;
+ u32 xor_checksum;
+ u32 low_filter_param;
+ u32 high_filter_param;
+ u32 output_mod_idx;
+ u32 drop_init_decomp_out_bytes;
+ u32 reserved[36];
+ u32 output_accum_data[2];
+ u32 out_bits_valid;
+ u32 bit_off_indexing;
+ u32 input_accum_data[64];
+ u8 size_qw[32];
+ u32 decomp_state[1220];
+} __packed;
+
+int iaa_aecs_init_fixed(void);
+void iaa_aecs_cleanup_fixed(void);
+
+typedef int (*iaa_dev_comp_init_fn_t) (struct iaa_device_compression_mode *mode);
+typedef int (*iaa_dev_comp_free_fn_t) (struct iaa_device_compression_mode *mode);
+
+struct iaa_compression_mode {
+ const char *name;
+ u32 *ll_table;
+ int ll_table_size;
+ u32 *d_table;
+ int d_table_size;
+ u32 *header_table;
+ int header_table_size;
+ u16 gen_decomp_table_flags;
+ iaa_dev_comp_init_fn_t init;
+ iaa_dev_comp_free_fn_t free;
+};
+
+int add_iaa_compression_mode(const char *name,
+ const u32 *ll_table,
+ int ll_table_size,
+ const u32 *d_table,
+ int d_table_size,
+ const u8 *header_table,
+ int header_table_size,
+ u16 gen_decomp_table_flags,
+ iaa_dev_comp_init_fn_t init,
+ iaa_dev_comp_free_fn_t free);
+
+void remove_iaa_compression_mode(const char *name);
+
+enum iaa_mode {
+ IAA_MODE_FIXED,
+};
+
+struct iaa_compression_ctx {
+ enum iaa_mode mode;
+ bool verify_compress;
+ bool async_mode;
+ bool use_irq;
+};
+
+extern struct list_head iaa_devices;
+extern struct mutex iaa_devices_lock;
+
+#endif
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c
new file mode 100644
index 0000000000..45cf5d74f0
--- /dev/null
+++ b/drivers/crypto/intel/iaa/iaa_crypto_comp_fixed.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+
+#include "idxd.h"
+#include "iaa_crypto.h"
+
+/*
+ * Fixed Huffman tables the IAA hardware requires to implement RFC-1951.
+ */
+static const u32 fixed_ll_sym[286] = {
+ 0x40030, 0x40031, 0x40032, 0x40033, 0x40034, 0x40035, 0x40036, 0x40037,
+ 0x40038, 0x40039, 0x4003A, 0x4003B, 0x4003C, 0x4003D, 0x4003E, 0x4003F,
+ 0x40040, 0x40041, 0x40042, 0x40043, 0x40044, 0x40045, 0x40046, 0x40047,
+ 0x40048, 0x40049, 0x4004A, 0x4004B, 0x4004C, 0x4004D, 0x4004E, 0x4004F,
+ 0x40050, 0x40051, 0x40052, 0x40053, 0x40054, 0x40055, 0x40056, 0x40057,
+ 0x40058, 0x40059, 0x4005A, 0x4005B, 0x4005C, 0x4005D, 0x4005E, 0x4005F,
+ 0x40060, 0x40061, 0x40062, 0x40063, 0x40064, 0x40065, 0x40066, 0x40067,
+ 0x40068, 0x40069, 0x4006A, 0x4006B, 0x4006C, 0x4006D, 0x4006E, 0x4006F,
+ 0x40070, 0x40071, 0x40072, 0x40073, 0x40074, 0x40075, 0x40076, 0x40077,
+ 0x40078, 0x40079, 0x4007A, 0x4007B, 0x4007C, 0x4007D, 0x4007E, 0x4007F,
+ 0x40080, 0x40081, 0x40082, 0x40083, 0x40084, 0x40085, 0x40086, 0x40087,
+ 0x40088, 0x40089, 0x4008A, 0x4008B, 0x4008C, 0x4008D, 0x4008E, 0x4008F,
+ 0x40090, 0x40091, 0x40092, 0x40093, 0x40094, 0x40095, 0x40096, 0x40097,
+ 0x40098, 0x40099, 0x4009A, 0x4009B, 0x4009C, 0x4009D, 0x4009E, 0x4009F,
+ 0x400A0, 0x400A1, 0x400A2, 0x400A3, 0x400A4, 0x400A5, 0x400A6, 0x400A7,
+ 0x400A8, 0x400A9, 0x400AA, 0x400AB, 0x400AC, 0x400AD, 0x400AE, 0x400AF,
+ 0x400B0, 0x400B1, 0x400B2, 0x400B3, 0x400B4, 0x400B5, 0x400B6, 0x400B7,
+ 0x400B8, 0x400B9, 0x400BA, 0x400BB, 0x400BC, 0x400BD, 0x400BE, 0x400BF,
+ 0x48190, 0x48191, 0x48192, 0x48193, 0x48194, 0x48195, 0x48196, 0x48197,
+ 0x48198, 0x48199, 0x4819A, 0x4819B, 0x4819C, 0x4819D, 0x4819E, 0x4819F,
+ 0x481A0, 0x481A1, 0x481A2, 0x481A3, 0x481A4, 0x481A5, 0x481A6, 0x481A7,
+ 0x481A8, 0x481A9, 0x481AA, 0x481AB, 0x481AC, 0x481AD, 0x481AE, 0x481AF,
+ 0x481B0, 0x481B1, 0x481B2, 0x481B3, 0x481B4, 0x481B5, 0x481B6, 0x481B7,
+ 0x481B8, 0x481B9, 0x481BA, 0x481BB, 0x481BC, 0x481BD, 0x481BE, 0x481BF,
+ 0x481C0, 0x481C1, 0x481C2, 0x481C3, 0x481C4, 0x481C5, 0x481C6, 0x481C7,
+ 0x481C8, 0x481C9, 0x481CA, 0x481CB, 0x481CC, 0x481CD, 0x481CE, 0x481CF,
+ 0x481D0, 0x481D1, 0x481D2, 0x481D3, 0x481D4, 0x481D5, 0x481D6, 0x481D7,
+ 0x481D8, 0x481D9, 0x481DA, 0x481DB, 0x481DC, 0x481DD, 0x481DE, 0x481DF,
+ 0x481E0, 0x481E1, 0x481E2, 0x481E3, 0x481E4, 0x481E5, 0x481E6, 0x481E7,
+ 0x481E8, 0x481E9, 0x481EA, 0x481EB, 0x481EC, 0x481ED, 0x481EE, 0x481EF,
+ 0x481F0, 0x481F1, 0x481F2, 0x481F3, 0x481F4, 0x481F5, 0x481F6, 0x481F7,
+ 0x481F8, 0x481F9, 0x481FA, 0x481FB, 0x481FC, 0x481FD, 0x481FE, 0x481FF,
+ 0x38000, 0x38001, 0x38002, 0x38003, 0x38004, 0x38005, 0x38006, 0x38007,
+ 0x38008, 0x38009, 0x3800A, 0x3800B, 0x3800C, 0x3800D, 0x3800E, 0x3800F,
+ 0x38010, 0x38011, 0x38012, 0x38013, 0x38014, 0x38015, 0x38016, 0x38017,
+ 0x400C0, 0x400C1, 0x400C2, 0x400C3, 0x400C4, 0x400C5
+};
+
+static const u32 fixed_d_sym[30] = {
+ 0x28000, 0x28001, 0x28002, 0x28003, 0x28004, 0x28005, 0x28006, 0x28007,
+ 0x28008, 0x28009, 0x2800A, 0x2800B, 0x2800C, 0x2800D, 0x2800E, 0x2800F,
+ 0x28010, 0x28011, 0x28012, 0x28013, 0x28014, 0x28015, 0x28016, 0x28017,
+ 0x28018, 0x28019, 0x2801A, 0x2801B, 0x2801C, 0x2801D
+};
+
+static int init_fixed_mode(struct iaa_device_compression_mode *mode)
+{
+ struct aecs_comp_table_record *comp_table = mode->aecs_comp_table;
+ u32 bfinal = 1;
+ u32 offset;
+
+ /* Configure aecs table using fixed Huffman table */
+ comp_table->crc = 0;
+ comp_table->xor_checksum = 0;
+ offset = comp_table->num_output_accum_bits / 8;
+ comp_table->output_accum[offset] = FIXED_HDR | bfinal;
+ comp_table->num_output_accum_bits = FIXED_HDR_SIZE;
+
+ return 0;
+}
+
+int iaa_aecs_init_fixed(void)
+{
+ int ret;
+
+ ret = add_iaa_compression_mode("fixed",
+ fixed_ll_sym,
+ sizeof(fixed_ll_sym),
+ fixed_d_sym,
+ sizeof(fixed_d_sym),
+ NULL, 0, 0,
+ init_fixed_mode, NULL);
+ if (!ret)
+ pr_debug("IAA fixed compression mode initialized\n");
+
+ return ret;
+}
+
+void iaa_aecs_cleanup_fixed(void)
+{
+ remove_iaa_compression_mode("fixed");
+}
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
new file mode 100644
index 0000000000..64a2e87a55
--- /dev/null
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -0,0 +1,2197 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <uapi/linux/idxd.h>
+#include <linux/highmem.h>
+#include <linux/sched/smt.h>
+#include <crypto/internal/acompress.h>
+
+#include "idxd.h"
+#include "iaa_crypto.h"
+#include "iaa_crypto_stats.h"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "idxd: " IDXD_SUBDRIVER_NAME ": " fmt
+
+#define IAA_ALG_PRIORITY 300
+
+/* number of iaa instances probed */
+static unsigned int nr_iaa;
+static unsigned int nr_cpus;
+static unsigned int nr_nodes;
+static unsigned int nr_cpus_per_node;
+
+/* Number of physical cpus sharing each iaa instance */
+static unsigned int cpus_per_iaa;
+
+static struct crypto_comp *deflate_generic_tfm;
+
+/* Per-cpu lookup table for balanced wqs */
+static struct wq_table_entry __percpu *wq_table;
+
+static struct idxd_wq *wq_table_next_wq(int cpu)
+{
+ struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
+
+ if (++entry->cur_wq >= entry->n_wqs)
+ entry->cur_wq = 0;
+
+ if (!entry->wqs[entry->cur_wq])
+ return NULL;
+
+ pr_debug("%s: returning wq at idx %d (iaa wq %d.%d) from cpu %d\n", __func__,
+ entry->cur_wq, entry->wqs[entry->cur_wq]->idxd->id,
+ entry->wqs[entry->cur_wq]->id, cpu);
+
+ return entry->wqs[entry->cur_wq];
+}
+
+static void wq_table_add(int cpu, struct idxd_wq *wq)
+{
+ struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
+
+ if (WARN_ON(entry->n_wqs == entry->max_wqs))
+ return;
+
+ entry->wqs[entry->n_wqs++] = wq;
+
+ pr_debug("%s: added iaa wq %d.%d to idx %d of cpu %d\n", __func__,
+ entry->wqs[entry->n_wqs - 1]->idxd->id,
+ entry->wqs[entry->n_wqs - 1]->id, entry->n_wqs - 1, cpu);
+}
+
+static void wq_table_free_entry(int cpu)
+{
+ struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
+
+ kfree(entry->wqs);
+ memset(entry, 0, sizeof(*entry));
+}
+
+static void wq_table_clear_entry(int cpu)
+{
+ struct wq_table_entry *entry = per_cpu_ptr(wq_table, cpu);
+
+ entry->n_wqs = 0;
+ entry->cur_wq = 0;
+ memset(entry->wqs, 0, entry->max_wqs * sizeof(struct idxd_wq *));
+}
+
+LIST_HEAD(iaa_devices);
+DEFINE_MUTEX(iaa_devices_lock);
+
+/* If enabled, IAA hw crypto algos are registered, unavailable otherwise */
+static bool iaa_crypto_enabled;
+static bool iaa_crypto_registered;
+
+/* Verify results of IAA compress or not */
+static bool iaa_verify_compress = true;
+
+static ssize_t verify_compress_show(struct device_driver *driver, char *buf)
+{
+ return sprintf(buf, "%d\n", iaa_verify_compress);
+}
+
+static ssize_t verify_compress_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ int ret = -EBUSY;
+
+ mutex_lock(&iaa_devices_lock);
+
+ if (iaa_crypto_enabled)
+ goto out;
+
+ ret = kstrtobool(buf, &iaa_verify_compress);
+ if (ret)
+ goto out;
+
+ ret = count;
+out:
+ mutex_unlock(&iaa_devices_lock);
+
+ return ret;
+}
+static DRIVER_ATTR_RW(verify_compress);
+
+/*
+ * The iaa crypto driver supports three 'sync' methods determining how
+ * compressions and decompressions are performed:
+ *
+ * - sync: the compression or decompression completes before
+ * returning. This is the mode used by the async crypto
+ * interface when the sync mode is set to 'sync' and by
+ * the sync crypto interface regardless of setting.
+ *
+ * - async: the compression or decompression is submitted and returns
+ * immediately. Completion interrupts are not used so
+ * the caller is responsible for polling the descriptor
+ * for completion. This mode is applicable to only the
+ * async crypto interface and is ignored for anything
+ * else.
+ *
+ * - async_irq: the compression or decompression is submitted and
+ * returns immediately. Completion interrupts are
+ * enabled so the caller can wait for the completion and
+ * yield to other threads. When the compression or
+ * decompression completes, the completion is signaled
+ * and the caller awakened. This mode is applicable to
+ * only the async crypto interface and is ignored for
+ * anything else.
+ *
+ * These modes can be set using the iaa_crypto sync_mode driver
+ * attribute.
+ */
+
+/* Use async mode */
+static bool async_mode;
+/* Use interrupts */
+static bool use_irq;
+
+/**
+ * set_iaa_sync_mode - Set IAA sync mode
+ * @name: The name of the sync mode
+ *
+ * Make the IAA sync mode named @name the current sync mode used by
+ * compression/decompression.
+ */
+
+static int set_iaa_sync_mode(const char *name)
+{
+ int ret = 0;
+
+ if (sysfs_streq(name, "sync")) {
+ async_mode = false;
+ use_irq = false;
+ } else if (sysfs_streq(name, "async")) {
+ async_mode = true;
+ use_irq = false;
+ } else if (sysfs_streq(name, "async_irq")) {
+ async_mode = true;
+ use_irq = true;
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t sync_mode_show(struct device_driver *driver, char *buf)
+{
+ int ret = 0;
+
+ if (!async_mode && !use_irq)
+ ret = sprintf(buf, "%s\n", "sync");
+ else if (async_mode && !use_irq)
+ ret = sprintf(buf, "%s\n", "async");
+ else if (async_mode && use_irq)
+ ret = sprintf(buf, "%s\n", "async_irq");
+
+ return ret;
+}
+
+static ssize_t sync_mode_store(struct device_driver *driver,
+ const char *buf, size_t count)
+{
+ int ret = -EBUSY;
+
+ mutex_lock(&iaa_devices_lock);
+
+ if (iaa_crypto_enabled)
+ goto out;
+
+ ret = set_iaa_sync_mode(buf);
+ if (ret == 0)
+ ret = count;
+out:
+ mutex_unlock(&iaa_devices_lock);
+
+ return ret;
+}
+static DRIVER_ATTR_RW(sync_mode);
+
+static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX];
+
+static int find_empty_iaa_compression_mode(void)
+{
+ int i = -EINVAL;
+
+ for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
+ if (iaa_compression_modes[i])
+ continue;
+ break;
+ }
+
+ return i;
+}
+
+static struct iaa_compression_mode *find_iaa_compression_mode(const char *name, int *idx)
+{
+ struct iaa_compression_mode *mode;
+ int i;
+
+ for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
+ mode = iaa_compression_modes[i];
+ if (!mode)
+ continue;
+
+ if (!strcmp(mode->name, name)) {
+ *idx = i;
+ return iaa_compression_modes[i];
+ }
+ }
+
+ return NULL;
+}
+
+static void free_iaa_compression_mode(struct iaa_compression_mode *mode)
+{
+ kfree(mode->name);
+ kfree(mode->ll_table);
+ kfree(mode->d_table);
+ kfree(mode->header_table);
+
+ kfree(mode);
+}
+
+/*
+ * IAA Compression modes are defined by an ll_table, a d_table, and an
+ * optional header_table. These tables are typically generated and
+ * captured using statistics collected from running actual
+ * compress/decompress workloads.
+ *
+ * A module or other kernel code can add and remove compression modes
+ * with a given name using the exported @add_iaa_compression_mode()
+ * and @remove_iaa_compression_mode functions.
+ *
+ * When a new compression mode is added, the tables are saved in a
+ * global compression mode list. When IAA devices are added, a
+ * per-IAA device dma mapping is created for each IAA device, for each
+ * compression mode. These are the tables used to do the actual
+ * compression/deccompression and are unmapped if/when the devices are
+ * removed. Currently, compression modes must be added before any
+ * device is added, and removed after all devices have been removed.
+ */
+
+/**
+ * remove_iaa_compression_mode - Remove an IAA compression mode
+ * @name: The name the compression mode will be known as
+ *
+ * Remove the IAA compression mode named @name.
+ */
+void remove_iaa_compression_mode(const char *name)
+{
+ struct iaa_compression_mode *mode;
+ int idx;
+
+ mutex_lock(&iaa_devices_lock);
+
+ if (!list_empty(&iaa_devices))
+ goto out;
+
+ mode = find_iaa_compression_mode(name, &idx);
+ if (mode) {
+ free_iaa_compression_mode(mode);
+ iaa_compression_modes[idx] = NULL;
+ }
+out:
+ mutex_unlock(&iaa_devices_lock);
+}
+EXPORT_SYMBOL_GPL(remove_iaa_compression_mode);
+
+/**
+ * add_iaa_compression_mode - Add an IAA compression mode
+ * @name: The name the compression mode will be known as
+ * @ll_table: The ll table
+ * @ll_table_size: The ll table size in bytes
+ * @d_table: The d table
+ * @d_table_size: The d table size in bytes
+ * @header_table: Optional header table
+ * @header_table_size: Optional header table size in bytes
+ * @gen_decomp_table_flags: Otional flags used to generate the decomp table
+ * @init: Optional callback function to init the compression mode data
+ * @free: Optional callback function to free the compression mode data
+ *
+ * Add a new IAA compression mode named @name.
+ *
+ * Returns 0 if successful, errcode otherwise.
+ */
+int add_iaa_compression_mode(const char *name,
+ const u32 *ll_table,
+ int ll_table_size,
+ const u32 *d_table,
+ int d_table_size,
+ const u8 *header_table,
+ int header_table_size,
+ u16 gen_decomp_table_flags,
+ iaa_dev_comp_init_fn_t init,
+ iaa_dev_comp_free_fn_t free)
+{
+ struct iaa_compression_mode *mode;
+ int idx, ret = -ENOMEM;
+
+ mutex_lock(&iaa_devices_lock);
+
+ if (!list_empty(&iaa_devices)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode)
+ goto out;
+
+ mode->name = kstrdup(name, GFP_KERNEL);
+ if (!mode->name)
+ goto free;
+
+ if (ll_table) {
+ mode->ll_table = kzalloc(ll_table_size, GFP_KERNEL);
+ if (!mode->ll_table)
+ goto free;
+ memcpy(mode->ll_table, ll_table, ll_table_size);
+ mode->ll_table_size = ll_table_size;
+ }
+
+ if (d_table) {
+ mode->d_table = kzalloc(d_table_size, GFP_KERNEL);
+ if (!mode->d_table)
+ goto free;
+ memcpy(mode->d_table, d_table, d_table_size);
+ mode->d_table_size = d_table_size;
+ }
+
+ if (header_table) {
+ mode->header_table = kzalloc(header_table_size, GFP_KERNEL);
+ if (!mode->header_table)
+ goto free;
+ memcpy(mode->header_table, header_table, header_table_size);
+ mode->header_table_size = header_table_size;
+ }
+
+ mode->gen_decomp_table_flags = gen_decomp_table_flags;
+
+ mode->init = init;
+ mode->free = free;
+
+ idx = find_empty_iaa_compression_mode();
+ if (idx < 0)
+ goto free;
+
+ pr_debug("IAA compression mode %s added at idx %d\n",
+ mode->name, idx);
+
+ iaa_compression_modes[idx] = mode;
+
+ ret = 0;
+out:
+ mutex_unlock(&iaa_devices_lock);
+
+ return ret;
+free:
+ free_iaa_compression_mode(mode);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(add_iaa_compression_mode);
+
+static struct iaa_device_compression_mode *
+get_iaa_device_compression_mode(struct iaa_device *iaa_device, int idx)
+{
+ return iaa_device->compression_modes[idx];
+}
+
+static void free_device_compression_mode(struct iaa_device *iaa_device,
+ struct iaa_device_compression_mode *device_mode)
+{
+ size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN;
+ struct device *dev = &iaa_device->idxd->pdev->dev;
+
+ kfree(device_mode->name);
+
+ if (device_mode->aecs_comp_table)
+ dma_free_coherent(dev, size, device_mode->aecs_comp_table,
+ device_mode->aecs_comp_table_dma_addr);
+ if (device_mode->aecs_decomp_table)
+ dma_free_coherent(dev, size, device_mode->aecs_decomp_table,
+ device_mode->aecs_decomp_table_dma_addr);
+
+ kfree(device_mode);
+}
+
+#define IDXD_OP_FLAG_AECS_RW_TGLS 0x400000
+#define IAX_AECS_DEFAULT_FLAG (IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC)
+#define IAX_AECS_COMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS)
+#define IAX_AECS_DECOMPRESS_FLAG (IAX_AECS_DEFAULT_FLAG | IDXD_OP_FLAG_RD_SRC2_AECS)
+#define IAX_AECS_GEN_FLAG (IAX_AECS_DEFAULT_FLAG | \
+ IDXD_OP_FLAG_WR_SRC2_AECS_COMP | \
+ IDXD_OP_FLAG_AECS_RW_TGLS)
+
+static int check_completion(struct device *dev,
+ struct iax_completion_record *comp,
+ bool compress,
+ bool only_once);
+
+static int decompress_header(struct iaa_device_compression_mode *device_mode,
+ struct iaa_compression_mode *mode,
+ struct idxd_wq *wq)
+{
+ dma_addr_t src_addr, src2_addr;
+ struct idxd_desc *idxd_desc;
+ struct iax_hw_desc *desc;
+ struct device *dev;
+ int ret = 0;
+
+ idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(idxd_desc))
+ return PTR_ERR(idxd_desc);
+
+ desc = idxd_desc->iax_hw;
+
+ dev = &wq->idxd->pdev->dev;
+
+ src_addr = dma_map_single(dev, (void *)mode->header_table,
+ mode->header_table_size, DMA_TO_DEVICE);
+ dev_dbg(dev, "%s: mode->name %s, src_addr %llx, dev %p, src %p, slen %d\n",
+ __func__, mode->name, src_addr, dev,
+ mode->header_table, mode->header_table_size);
+ if (unlikely(dma_mapping_error(dev, src_addr))) {
+ dev_dbg(dev, "dma_map_single err, exiting\n");
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ desc->flags = IAX_AECS_GEN_FLAG;
+ desc->opcode = IAX_OPCODE_DECOMPRESS;
+
+ desc->src1_addr = (u64)src_addr;
+ desc->src1_size = mode->header_table_size;
+
+ src2_addr = device_mode->aecs_decomp_table_dma_addr;
+ desc->src2_addr = (u64)src2_addr;
+ desc->src2_size = 1088;
+ dev_dbg(dev, "%s: mode->name %s, src2_addr %llx, dev %p, src2_size %d\n",
+ __func__, mode->name, desc->src2_addr, dev, desc->src2_size);
+ desc->max_dst_size = 0; // suppressed output
+
+ desc->decompr_flags = mode->gen_decomp_table_flags;
+
+ desc->priv = 0;
+
+ desc->completion_addr = idxd_desc->compl_dma;
+
+ ret = idxd_submit_desc(wq, idxd_desc);
+ if (ret) {
+ pr_err("%s: submit_desc failed ret=0x%x\n", __func__, ret);
+ goto out;
+ }
+
+ ret = check_completion(dev, idxd_desc->iax_completion, false, false);
+ if (ret)
+ dev_dbg(dev, "%s: mode->name %s check_completion failed ret=%d\n",
+ __func__, mode->name, ret);
+ else
+ dev_dbg(dev, "%s: mode->name %s succeeded\n", __func__,
+ mode->name);
+out:
+ dma_unmap_single(dev, src_addr, 1088, DMA_TO_DEVICE);
+
+ return ret;
+}
+
+static int init_device_compression_mode(struct iaa_device *iaa_device,
+ struct iaa_compression_mode *mode,
+ int idx, struct idxd_wq *wq)
+{
+ size_t size = sizeof(struct aecs_comp_table_record) + IAA_AECS_ALIGN;
+ struct device *dev = &iaa_device->idxd->pdev->dev;
+ struct iaa_device_compression_mode *device_mode;
+ int ret = -ENOMEM;
+
+ device_mode = kzalloc(sizeof(*device_mode), GFP_KERNEL);
+ if (!device_mode)
+ return -ENOMEM;
+
+ device_mode->name = kstrdup(mode->name, GFP_KERNEL);
+ if (!device_mode->name)
+ goto free;
+
+ device_mode->aecs_comp_table = dma_alloc_coherent(dev, size,
+ &device_mode->aecs_comp_table_dma_addr, GFP_KERNEL);
+ if (!device_mode->aecs_comp_table)
+ goto free;
+
+ device_mode->aecs_decomp_table = dma_alloc_coherent(dev, size,
+ &device_mode->aecs_decomp_table_dma_addr, GFP_KERNEL);
+ if (!device_mode->aecs_decomp_table)
+ goto free;
+
+ /* Add Huffman table to aecs */
+ memset(device_mode->aecs_comp_table, 0, sizeof(*device_mode->aecs_comp_table));
+ memcpy(device_mode->aecs_comp_table->ll_sym, mode->ll_table, mode->ll_table_size);
+ memcpy(device_mode->aecs_comp_table->d_sym, mode->d_table, mode->d_table_size);
+
+ if (mode->header_table) {
+ ret = decompress_header(device_mode, mode, wq);
+ if (ret) {
+ pr_debug("iaa header decompression failed: ret=%d\n", ret);
+ goto free;
+ }
+ }
+
+ if (mode->init) {
+ ret = mode->init(device_mode);
+ if (ret)
+ goto free;
+ }
+
+ /* mode index should match iaa_compression_modes idx */
+ iaa_device->compression_modes[idx] = device_mode;
+
+ pr_debug("IAA %s compression mode initialized for iaa device %d\n",
+ mode->name, iaa_device->idxd->id);
+
+ ret = 0;
+out:
+ return ret;
+free:
+ pr_debug("IAA %s compression mode initialization failed for iaa device %d\n",
+ mode->name, iaa_device->idxd->id);
+
+ free_device_compression_mode(iaa_device, device_mode);
+ goto out;
+}
+
+static int init_device_compression_modes(struct iaa_device *iaa_device,
+ struct idxd_wq *wq)
+{
+ struct iaa_compression_mode *mode;
+ int i, ret = 0;
+
+ for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
+ mode = iaa_compression_modes[i];
+ if (!mode)
+ continue;
+
+ ret = init_device_compression_mode(iaa_device, mode, i, wq);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static void remove_device_compression_modes(struct iaa_device *iaa_device)
+{
+ struct iaa_device_compression_mode *device_mode;
+ int i;
+
+ for (i = 0; i < IAA_COMP_MODES_MAX; i++) {
+ device_mode = iaa_device->compression_modes[i];
+ if (!device_mode)
+ continue;
+
+ free_device_compression_mode(iaa_device, device_mode);
+ iaa_device->compression_modes[i] = NULL;
+ if (iaa_compression_modes[i]->free)
+ iaa_compression_modes[i]->free(device_mode);
+ }
+}
+
+static struct iaa_device *iaa_device_alloc(void)
+{
+ struct iaa_device *iaa_device;
+
+ iaa_device = kzalloc(sizeof(*iaa_device), GFP_KERNEL);
+ if (!iaa_device)
+ return NULL;
+
+ INIT_LIST_HEAD(&iaa_device->wqs);
+
+ return iaa_device;
+}
+
+static bool iaa_has_wq(struct iaa_device *iaa_device, struct idxd_wq *wq)
+{
+ struct iaa_wq *iaa_wq;
+
+ list_for_each_entry(iaa_wq, &iaa_device->wqs, list) {
+ if (iaa_wq->wq == wq)
+ return true;
+ }
+
+ return false;
+}
+
+static struct iaa_device *add_iaa_device(struct idxd_device *idxd)
+{
+ struct iaa_device *iaa_device;
+
+ iaa_device = iaa_device_alloc();
+ if (!iaa_device)
+ return NULL;
+
+ iaa_device->idxd = idxd;
+
+ list_add_tail(&iaa_device->list, &iaa_devices);
+
+ nr_iaa++;
+
+ return iaa_device;
+}
+
+static int init_iaa_device(struct iaa_device *iaa_device, struct iaa_wq *iaa_wq)
+{
+ int ret = 0;
+
+ ret = init_device_compression_modes(iaa_device, iaa_wq->wq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static void del_iaa_device(struct iaa_device *iaa_device)
+{
+ list_del(&iaa_device->list);
+
+ nr_iaa--;
+}
+
+static int add_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq,
+ struct iaa_wq **new_wq)
+{
+ struct idxd_device *idxd = iaa_device->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct iaa_wq *iaa_wq;
+
+ iaa_wq = kzalloc(sizeof(*iaa_wq), GFP_KERNEL);
+ if (!iaa_wq)
+ return -ENOMEM;
+
+ iaa_wq->wq = wq;
+ iaa_wq->iaa_device = iaa_device;
+ idxd_wq_set_private(wq, iaa_wq);
+
+ list_add_tail(&iaa_wq->list, &iaa_device->wqs);
+
+ iaa_device->n_wq++;
+
+ if (new_wq)
+ *new_wq = iaa_wq;
+
+ dev_dbg(dev, "added wq %d to iaa device %d, n_wq %d\n",
+ wq->id, iaa_device->idxd->id, iaa_device->n_wq);
+
+ return 0;
+}
+
+static void del_iaa_wq(struct iaa_device *iaa_device, struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = iaa_device->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct iaa_wq *iaa_wq;
+
+ list_for_each_entry(iaa_wq, &iaa_device->wqs, list) {
+ if (iaa_wq->wq == wq) {
+ list_del(&iaa_wq->list);
+ iaa_device->n_wq--;
+
+ dev_dbg(dev, "removed wq %d from iaa_device %d, n_wq %d, nr_iaa %d\n",
+ wq->id, iaa_device->idxd->id,
+ iaa_device->n_wq, nr_iaa);
+
+ if (iaa_device->n_wq == 0)
+ del_iaa_device(iaa_device);
+ break;
+ }
+ }
+}
+
+static void clear_wq_table(void)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ wq_table_clear_entry(cpu);
+
+ pr_debug("cleared wq table\n");
+}
+
+static void free_iaa_device(struct iaa_device *iaa_device)
+{
+ if (!iaa_device)
+ return;
+
+ remove_device_compression_modes(iaa_device);
+ kfree(iaa_device);
+}
+
+static void __free_iaa_wq(struct iaa_wq *iaa_wq)
+{
+ struct iaa_device *iaa_device;
+
+ if (!iaa_wq)
+ return;
+
+ iaa_device = iaa_wq->iaa_device;
+ if (iaa_device->n_wq == 0)
+ free_iaa_device(iaa_wq->iaa_device);
+}
+
+static void free_iaa_wq(struct iaa_wq *iaa_wq)
+{
+ struct idxd_wq *wq;
+
+ __free_iaa_wq(iaa_wq);
+
+ wq = iaa_wq->wq;
+
+ kfree(iaa_wq);
+ idxd_wq_set_private(wq, NULL);
+}
+
+static int iaa_wq_get(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct iaa_wq *iaa_wq;
+ int ret = 0;
+
+ spin_lock(&idxd->dev_lock);
+ iaa_wq = idxd_wq_get_private(wq);
+ if (iaa_wq && !iaa_wq->remove) {
+ iaa_wq->ref++;
+ idxd_wq_get(wq);
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock(&idxd->dev_lock);
+
+ return ret;
+}
+
+static int iaa_wq_put(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct iaa_wq *iaa_wq;
+ bool free = false;
+ int ret = 0;
+
+ spin_lock(&idxd->dev_lock);
+ iaa_wq = idxd_wq_get_private(wq);
+ if (iaa_wq) {
+ iaa_wq->ref--;
+ if (iaa_wq->ref == 0 && iaa_wq->remove) {
+ idxd_wq_set_private(wq, NULL);
+ free = true;
+ }
+ idxd_wq_put(wq);
+ } else {
+ ret = -ENODEV;
+ }
+ spin_unlock(&idxd->dev_lock);
+ if (free) {
+ __free_iaa_wq(iaa_wq);
+ kfree(iaa_wq);
+ }
+
+ return ret;
+}
+
+static void free_wq_table(void)
+{
+ int cpu;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ wq_table_free_entry(cpu);
+
+ free_percpu(wq_table);
+
+ pr_debug("freed wq table\n");
+}
+
+static int alloc_wq_table(int max_wqs)
+{
+ struct wq_table_entry *entry;
+ int cpu;
+
+ wq_table = alloc_percpu(struct wq_table_entry);
+ if (!wq_table)
+ return -ENOMEM;
+
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ entry = per_cpu_ptr(wq_table, cpu);
+ entry->wqs = kcalloc(max_wqs, sizeof(struct wq *), GFP_KERNEL);
+ if (!entry->wqs) {
+ free_wq_table();
+ return -ENOMEM;
+ }
+
+ entry->max_wqs = max_wqs;
+ }
+
+ pr_debug("initialized wq table\n");
+
+ return 0;
+}
+
+static int save_iaa_wq(struct idxd_wq *wq)
+{
+ struct iaa_device *iaa_device, *found = NULL;
+ struct idxd_device *idxd;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret = 0;
+
+ list_for_each_entry(iaa_device, &iaa_devices, list) {
+ if (iaa_device->idxd == wq->idxd) {
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+ /*
+ * Check to see that we don't already have this wq.
+ * Shouldn't happen but we don't control probing.
+ */
+ if (iaa_has_wq(iaa_device, wq)) {
+ dev_dbg(dev, "same wq probed multiple times for iaa_device %p\n",
+ iaa_device);
+ goto out;
+ }
+
+ found = iaa_device;
+
+ ret = add_iaa_wq(iaa_device, wq, NULL);
+ if (ret)
+ goto out;
+
+ break;
+ }
+ }
+
+ if (!found) {
+ struct iaa_device *new_device;
+ struct iaa_wq *new_wq;
+
+ new_device = add_iaa_device(wq->idxd);
+ if (!new_device) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = add_iaa_wq(new_device, wq, &new_wq);
+ if (ret) {
+ del_iaa_device(new_device);
+ free_iaa_device(new_device);
+ goto out;
+ }
+
+ ret = init_iaa_device(new_device, new_wq);
+ if (ret) {
+ del_iaa_wq(new_device, new_wq->wq);
+ del_iaa_device(new_device);
+ free_iaa_wq(new_wq);
+ goto out;
+ }
+ }
+
+ if (WARN_ON(nr_iaa == 0))
+ return -EINVAL;
+
+ cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
+out:
+ return 0;
+}
+
+static void remove_iaa_wq(struct idxd_wq *wq)
+{
+ struct iaa_device *iaa_device;
+
+ list_for_each_entry(iaa_device, &iaa_devices, list) {
+ if (iaa_has_wq(iaa_device, wq)) {
+ del_iaa_wq(iaa_device, wq);
+ break;
+ }
+ }
+
+ if (nr_iaa) {
+ cpus_per_iaa = (nr_nodes * nr_cpus_per_node) / nr_iaa;
+ if (!cpus_per_iaa)
+ cpus_per_iaa = 1;
+ } else
+ cpus_per_iaa = 1;
+}
+
+static int wq_table_add_wqs(int iaa, int cpu)
+{
+ struct iaa_device *iaa_device, *found_device = NULL;
+ int ret = 0, cur_iaa = 0, n_wqs_added = 0;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+
+ list_for_each_entry(iaa_device, &iaa_devices, list) {
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ if (cur_iaa != iaa) {
+ cur_iaa++;
+ continue;
+ }
+
+ found_device = iaa_device;
+ dev_dbg(dev, "getting wq from iaa_device %d, cur_iaa %d\n",
+ found_device->idxd->id, cur_iaa);
+ break;
+ }
+
+ if (!found_device) {
+ found_device = list_first_entry_or_null(&iaa_devices,
+ struct iaa_device, list);
+ if (!found_device) {
+ pr_debug("couldn't find any iaa devices with wqs!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ cur_iaa = 0;
+
+ idxd = found_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+ dev_dbg(dev, "getting wq from only iaa_device %d, cur_iaa %d\n",
+ found_device->idxd->id, cur_iaa);
+ }
+
+ list_for_each_entry(iaa_wq, &found_device->wqs, list) {
+ wq_table_add(cpu, iaa_wq->wq);
+ pr_debug("rebalance: added wq for cpu=%d: iaa wq %d.%d\n",
+ cpu, iaa_wq->wq->idxd->id, iaa_wq->wq->id);
+ n_wqs_added++;
+ }
+
+ if (!n_wqs_added) {
+ pr_debug("couldn't find any iaa wqs!\n");
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+/*
+ * Rebalance the wq table so that given a cpu, it's easy to find the
+ * closest IAA instance. The idea is to try to choose the most
+ * appropriate IAA instance for a caller and spread available
+ * workqueues around to clients.
+ */
+static void rebalance_wq_table(void)
+{
+ const struct cpumask *node_cpus;
+ int node, cpu, iaa = -1;
+
+ if (nr_iaa == 0)
+ return;
+
+ pr_debug("rebalance: nr_nodes=%d, nr_cpus %d, nr_iaa %d, cpus_per_iaa %d\n",
+ nr_nodes, nr_cpus, nr_iaa, cpus_per_iaa);
+
+ clear_wq_table();
+
+ if (nr_iaa == 1) {
+ for (cpu = 0; cpu < nr_cpus; cpu++) {
+ if (WARN_ON(wq_table_add_wqs(0, cpu))) {
+ pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu);
+ return;
+ }
+ }
+
+ return;
+ }
+
+ for_each_node_with_cpus(node) {
+ node_cpus = cpumask_of_node(node);
+
+ for (cpu = 0; cpu < nr_cpus_per_node; cpu++) {
+ int node_cpu = cpumask_nth(cpu, node_cpus);
+
+ if (WARN_ON(node_cpu >= nr_cpu_ids)) {
+ pr_debug("node_cpu %d doesn't exist!\n", node_cpu);
+ return;
+ }
+
+ if ((cpu % cpus_per_iaa) == 0)
+ iaa++;
+
+ if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) {
+ pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
+ return;
+ }
+ }
+ }
+}
+
+static inline int check_completion(struct device *dev,
+ struct iax_completion_record *comp,
+ bool compress,
+ bool only_once)
+{
+ char *op_str = compress ? "compress" : "decompress";
+ int ret = 0;
+
+ while (!comp->status) {
+ if (only_once)
+ return -EAGAIN;
+ cpu_relax();
+ }
+
+ if (comp->status != IAX_COMP_SUCCESS) {
+ if (comp->status == IAA_ERROR_WATCHDOG_EXPIRED) {
+ ret = -ETIMEDOUT;
+ dev_dbg(dev, "%s timed out, size=0x%x\n",
+ op_str, comp->output_size);
+ update_completion_timeout_errs();
+ goto out;
+ }
+
+ if (comp->status == IAA_ANALYTICS_ERROR &&
+ comp->error_code == IAA_ERROR_COMP_BUF_OVERFLOW && compress) {
+ ret = -E2BIG;
+ dev_dbg(dev, "compressed > uncompressed size,"
+ " not compressing, size=0x%x\n",
+ comp->output_size);
+ update_completion_comp_buf_overflow_errs();
+ goto out;
+ }
+
+ if (comp->status == IAA_ERROR_DECOMP_BUF_OVERFLOW) {
+ ret = -EOVERFLOW;
+ goto out;
+ }
+
+ ret = -EINVAL;
+ dev_dbg(dev, "iaa %s status=0x%x, error=0x%x, size=0x%x\n",
+ op_str, comp->status, comp->error_code, comp->output_size);
+ print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET, 8, 1, comp, 64, 0);
+ update_completion_einval_errs();
+
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int deflate_generic_decompress(struct acomp_req *req)
+{
+ void *src, *dst;
+ int ret;
+
+ src = kmap_local_page(sg_page(req->src)) + req->src->offset;
+ dst = kmap_local_page(sg_page(req->dst)) + req->dst->offset;
+
+ ret = crypto_comp_decompress(deflate_generic_tfm,
+ src, req->slen, dst, &req->dlen);
+
+ kunmap_local(src);
+ kunmap_local(dst);
+
+ update_total_sw_decomp_calls();
+
+ return ret;
+}
+
+static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
+ struct acomp_req *req,
+ dma_addr_t *src_addr, dma_addr_t *dst_addr);
+
+static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
+ struct idxd_wq *wq,
+ dma_addr_t src_addr, unsigned int slen,
+ dma_addr_t dst_addr, unsigned int *dlen,
+ u32 compression_crc);
+
+static void iaa_desc_complete(struct idxd_desc *idxd_desc,
+ enum idxd_complete_type comp_type,
+ bool free_desc, void *__ctx,
+ u32 *status)
+{
+ struct iaa_device_compression_mode *active_compression_mode;
+ struct iaa_compression_ctx *compression_ctx;
+ struct crypto_ctx *ctx = __ctx;
+ struct iaa_device *iaa_device;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret, err = 0;
+
+ compression_ctx = crypto_tfm_ctx(ctx->tfm);
+
+ iaa_wq = idxd_wq_get_private(idxd_desc->wq);
+ iaa_device = iaa_wq->iaa_device;
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ active_compression_mode = get_iaa_device_compression_mode(iaa_device,
+ compression_ctx->mode);
+ dev_dbg(dev, "%s: compression mode %s,"
+ " ctx->src_addr %llx, ctx->dst_addr %llx\n", __func__,
+ active_compression_mode->name,
+ ctx->src_addr, ctx->dst_addr);
+
+ ret = check_completion(dev, idxd_desc->iax_completion,
+ ctx->compress, false);
+ if (ret) {
+ dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
+ if (!ctx->compress &&
+ idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) {
+ pr_warn("%s: falling back to deflate-generic decompress, "
+ "analytics error code %x\n", __func__,
+ idxd_desc->iax_completion->error_code);
+ ret = deflate_generic_decompress(ctx->req);
+ if (ret) {
+ dev_dbg(dev, "%s: deflate-generic failed ret=%d\n",
+ __func__, ret);
+ err = -EIO;
+ goto err;
+ }
+ } else {
+ err = -EIO;
+ goto err;
+ }
+ } else {
+ ctx->req->dlen = idxd_desc->iax_completion->output_size;
+ }
+
+ /* Update stats */
+ if (ctx->compress) {
+ update_total_comp_bytes_out(ctx->req->dlen);
+ update_wq_comp_bytes(iaa_wq->wq, ctx->req->dlen);
+ } else {
+ update_total_decomp_bytes_in(ctx->req->dlen);
+ update_wq_decomp_bytes(iaa_wq->wq, ctx->req->dlen);
+ }
+
+ if (ctx->compress && compression_ctx->verify_compress) {
+ dma_addr_t src_addr, dst_addr;
+ u32 compression_crc;
+
+ compression_crc = idxd_desc->iax_completion->crc;
+
+ ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr);
+ if (ret) {
+ dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret);
+ err = -EIO;
+ goto out;
+ }
+
+ ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr,
+ ctx->req->slen, dst_addr, &ctx->req->dlen,
+ compression_crc);
+ if (ret) {
+ dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret);
+ err = -EIO;
+ }
+
+ dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_TO_DEVICE);
+ dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_FROM_DEVICE);
+
+ goto out;
+ }
+err:
+ dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_TO_DEVICE);
+out:
+ if (ret != 0)
+ dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret);
+
+ if (ctx->req->base.complete)
+ acomp_request_complete(ctx->req, err);
+
+ if (free_desc)
+ idxd_free_desc(idxd_desc->wq, idxd_desc);
+ iaa_wq_put(idxd_desc->wq);
+}
+
+static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
+ struct idxd_wq *wq,
+ dma_addr_t src_addr, unsigned int slen,
+ dma_addr_t dst_addr, unsigned int *dlen,
+ u32 *compression_crc,
+ bool disable_async)
+{
+ struct iaa_device_compression_mode *active_compression_mode;
+ struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct iaa_device *iaa_device;
+ struct idxd_desc *idxd_desc;
+ struct iax_hw_desc *desc;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret = 0;
+
+ iaa_wq = idxd_wq_get_private(wq);
+ iaa_device = iaa_wq->iaa_device;
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
+
+ idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(idxd_desc)) {
+ dev_dbg(dev, "idxd descriptor allocation failed\n");
+ dev_dbg(dev, "iaa compress failed: ret=%ld\n", PTR_ERR(idxd_desc));
+ return PTR_ERR(idxd_desc);
+ }
+ desc = idxd_desc->iax_hw;
+
+ desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR |
+ IDXD_OP_FLAG_RD_SRC2_AECS | IDXD_OP_FLAG_CC;
+ desc->opcode = IAX_OPCODE_COMPRESS;
+ desc->compr_flags = IAA_COMP_FLAGS;
+ desc->priv = 0;
+
+ desc->src1_addr = (u64)src_addr;
+ desc->src1_size = slen;
+ desc->dst_addr = (u64)dst_addr;
+ desc->max_dst_size = *dlen;
+ desc->src2_addr = active_compression_mode->aecs_comp_table_dma_addr;
+ desc->src2_size = sizeof(struct aecs_comp_table_record);
+ desc->completion_addr = idxd_desc->compl_dma;
+
+ if (ctx->use_irq && !disable_async) {
+ desc->flags |= IDXD_OP_FLAG_RCI;
+
+ idxd_desc->crypto.req = req;
+ idxd_desc->crypto.tfm = tfm;
+ idxd_desc->crypto.src_addr = src_addr;
+ idxd_desc->crypto.dst_addr = dst_addr;
+ idxd_desc->crypto.compress = true;
+
+ dev_dbg(dev, "%s use_async_irq: compression mode %s,"
+ " src_addr %llx, dst_addr %llx\n", __func__,
+ active_compression_mode->name,
+ src_addr, dst_addr);
+ } else if (ctx->async_mode && !disable_async)
+ req->base.data = idxd_desc;
+
+ dev_dbg(dev, "%s: compression mode %s,"
+ " desc->src1_addr %llx, desc->src1_size %d,"
+ " desc->dst_addr %llx, desc->max_dst_size %d,"
+ " desc->src2_addr %llx, desc->src2_size %d\n", __func__,
+ active_compression_mode->name,
+ desc->src1_addr, desc->src1_size, desc->dst_addr,
+ desc->max_dst_size, desc->src2_addr, desc->src2_size);
+
+ ret = idxd_submit_desc(wq, idxd_desc);
+ if (ret) {
+ dev_dbg(dev, "submit_desc failed ret=%d\n", ret);
+ goto err;
+ }
+
+ /* Update stats */
+ update_total_comp_calls();
+ update_wq_comp_calls(wq);
+
+ if (ctx->async_mode && !disable_async) {
+ ret = -EINPROGRESS;
+ dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
+ goto out;
+ }
+
+ ret = check_completion(dev, idxd_desc->iax_completion, true, false);
+ if (ret) {
+ dev_dbg(dev, "check_completion failed ret=%d\n", ret);
+ goto err;
+ }
+
+ *dlen = idxd_desc->iax_completion->output_size;
+
+ /* Update stats */
+ update_total_comp_bytes_out(*dlen);
+ update_wq_comp_bytes(wq, *dlen);
+
+ *compression_crc = idxd_desc->iax_completion->crc;
+
+ if (!ctx->async_mode || disable_async)
+ idxd_free_desc(wq, idxd_desc);
+out:
+ return ret;
+err:
+ idxd_free_desc(wq, idxd_desc);
+ dev_dbg(dev, "iaa compress failed: ret=%d\n", ret);
+
+ goto out;
+}
+
+static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
+ struct acomp_req *req,
+ dma_addr_t *src_addr, dma_addr_t *dst_addr)
+{
+ int ret = 0;
+ int nr_sgs;
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+
+ nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "verify: couldn't map src sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto out;
+ }
+ *src_addr = sg_dma_address(req->src);
+ dev_dbg(dev, "verify: dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
+ " req->slen %d, sg_dma_len(sg) %d\n", *src_addr, nr_sgs,
+ req->src, req->slen, sg_dma_len(req->src));
+
+ nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "verify: couldn't map dst sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
+ goto out;
+ }
+ *dst_addr = sg_dma_address(req->dst);
+ dev_dbg(dev, "verify: dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
+ " req->dlen %d, sg_dma_len(sg) %d\n", *dst_addr, nr_sgs,
+ req->dst, req->dlen, sg_dma_len(req->dst));
+out:
+ return ret;
+}
+
+static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
+ struct idxd_wq *wq,
+ dma_addr_t src_addr, unsigned int slen,
+ dma_addr_t dst_addr, unsigned int *dlen,
+ u32 compression_crc)
+{
+ struct iaa_device_compression_mode *active_compression_mode;
+ struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct iaa_device *iaa_device;
+ struct idxd_desc *idxd_desc;
+ struct iax_hw_desc *desc;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret = 0;
+
+ iaa_wq = idxd_wq_get_private(wq);
+ iaa_device = iaa_wq->iaa_device;
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
+
+ idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(idxd_desc)) {
+ dev_dbg(dev, "idxd descriptor allocation failed\n");
+ dev_dbg(dev, "iaa compress failed: ret=%ld\n",
+ PTR_ERR(idxd_desc));
+ return PTR_ERR(idxd_desc);
+ }
+ desc = idxd_desc->iax_hw;
+
+ /* Verify (optional) - decompress and check crc, suppress dest write */
+
+ desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC;
+ desc->opcode = IAX_OPCODE_DECOMPRESS;
+ desc->decompr_flags = IAA_DECOMP_FLAGS | IAA_DECOMP_SUPPRESS_OUTPUT;
+ desc->priv = 0;
+
+ desc->src1_addr = (u64)dst_addr;
+ desc->src1_size = *dlen;
+ desc->dst_addr = (u64)src_addr;
+ desc->max_dst_size = slen;
+ desc->completion_addr = idxd_desc->compl_dma;
+
+ dev_dbg(dev, "(verify) compression mode %s,"
+ " desc->src1_addr %llx, desc->src1_size %d,"
+ " desc->dst_addr %llx, desc->max_dst_size %d,"
+ " desc->src2_addr %llx, desc->src2_size %d\n",
+ active_compression_mode->name,
+ desc->src1_addr, desc->src1_size, desc->dst_addr,
+ desc->max_dst_size, desc->src2_addr, desc->src2_size);
+
+ ret = idxd_submit_desc(wq, idxd_desc);
+ if (ret) {
+ dev_dbg(dev, "submit_desc (verify) failed ret=%d\n", ret);
+ goto err;
+ }
+
+ ret = check_completion(dev, idxd_desc->iax_completion, false, false);
+ if (ret) {
+ dev_dbg(dev, "(verify) check_completion failed ret=%d\n", ret);
+ goto err;
+ }
+
+ if (compression_crc != idxd_desc->iax_completion->crc) {
+ ret = -EINVAL;
+ dev_dbg(dev, "(verify) iaa comp/decomp crc mismatch:"
+ " comp=0x%x, decomp=0x%x\n", compression_crc,
+ idxd_desc->iax_completion->crc);
+ print_hex_dump(KERN_INFO, "cmp-rec: ", DUMP_PREFIX_OFFSET,
+ 8, 1, idxd_desc->iax_completion, 64, 0);
+ goto err;
+ }
+
+ idxd_free_desc(wq, idxd_desc);
+out:
+ return ret;
+err:
+ idxd_free_desc(wq, idxd_desc);
+ dev_dbg(dev, "iaa compress failed: ret=%d\n", ret);
+
+ goto out;
+}
+
+static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
+ struct idxd_wq *wq,
+ dma_addr_t src_addr, unsigned int slen,
+ dma_addr_t dst_addr, unsigned int *dlen,
+ bool disable_async)
+{
+ struct iaa_device_compression_mode *active_compression_mode;
+ struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct iaa_device *iaa_device;
+ struct idxd_desc *idxd_desc;
+ struct iax_hw_desc *desc;
+ struct idxd_device *idxd;
+ struct iaa_wq *iaa_wq;
+ struct pci_dev *pdev;
+ struct device *dev;
+ int ret = 0;
+
+ iaa_wq = idxd_wq_get_private(wq);
+ iaa_device = iaa_wq->iaa_device;
+ idxd = iaa_device->idxd;
+ pdev = idxd->pdev;
+ dev = &pdev->dev;
+
+ active_compression_mode = get_iaa_device_compression_mode(iaa_device, ctx->mode);
+
+ idxd_desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
+ if (IS_ERR(idxd_desc)) {
+ dev_dbg(dev, "idxd descriptor allocation failed\n");
+ dev_dbg(dev, "iaa decompress failed: ret=%ld\n",
+ PTR_ERR(idxd_desc));
+ return PTR_ERR(idxd_desc);
+ }
+ desc = idxd_desc->iax_hw;
+
+ desc->flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR | IDXD_OP_FLAG_CC;
+ desc->opcode = IAX_OPCODE_DECOMPRESS;
+ desc->max_dst_size = PAGE_SIZE;
+ desc->decompr_flags = IAA_DECOMP_FLAGS;
+ desc->priv = 0;
+
+ desc->src1_addr = (u64)src_addr;
+ desc->dst_addr = (u64)dst_addr;
+ desc->max_dst_size = *dlen;
+ desc->src1_size = slen;
+ desc->completion_addr = idxd_desc->compl_dma;
+
+ if (ctx->use_irq && !disable_async) {
+ desc->flags |= IDXD_OP_FLAG_RCI;
+
+ idxd_desc->crypto.req = req;
+ idxd_desc->crypto.tfm = tfm;
+ idxd_desc->crypto.src_addr = src_addr;
+ idxd_desc->crypto.dst_addr = dst_addr;
+ idxd_desc->crypto.compress = false;
+
+ dev_dbg(dev, "%s: use_async_irq compression mode %s,"
+ " src_addr %llx, dst_addr %llx\n", __func__,
+ active_compression_mode->name,
+ src_addr, dst_addr);
+ } else if (ctx->async_mode && !disable_async)
+ req->base.data = idxd_desc;
+
+ dev_dbg(dev, "%s: decompression mode %s,"
+ " desc->src1_addr %llx, desc->src1_size %d,"
+ " desc->dst_addr %llx, desc->max_dst_size %d,"
+ " desc->src2_addr %llx, desc->src2_size %d\n", __func__,
+ active_compression_mode->name,
+ desc->src1_addr, desc->src1_size, desc->dst_addr,
+ desc->max_dst_size, desc->src2_addr, desc->src2_size);
+
+ ret = idxd_submit_desc(wq, idxd_desc);
+ if (ret) {
+ dev_dbg(dev, "submit_desc failed ret=%d\n", ret);
+ goto err;
+ }
+
+ /* Update stats */
+ update_total_decomp_calls();
+ update_wq_decomp_calls(wq);
+
+ if (ctx->async_mode && !disable_async) {
+ ret = -EINPROGRESS;
+ dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
+ goto out;
+ }
+
+ ret = check_completion(dev, idxd_desc->iax_completion, false, false);
+ if (ret) {
+ dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
+ if (idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) {
+ pr_warn("%s: falling back to deflate-generic decompress, "
+ "analytics error code %x\n", __func__,
+ idxd_desc->iax_completion->error_code);
+ ret = deflate_generic_decompress(req);
+ if (ret) {
+ dev_dbg(dev, "%s: deflate-generic failed ret=%d\n",
+ __func__, ret);
+ goto err;
+ }
+ } else {
+ goto err;
+ }
+ } else {
+ req->dlen = idxd_desc->iax_completion->output_size;
+ }
+
+ *dlen = req->dlen;
+
+ if (!ctx->async_mode || disable_async)
+ idxd_free_desc(wq, idxd_desc);
+
+ /* Update stats */
+ update_total_decomp_bytes_in(slen);
+ update_wq_decomp_bytes(wq, slen);
+out:
+ return ret;
+err:
+ idxd_free_desc(wq, idxd_desc);
+ dev_dbg(dev, "iaa decompress failed: ret=%d\n", ret);
+
+ goto out;
+}
+
+static int iaa_comp_acompress(struct acomp_req *req)
+{
+ struct iaa_compression_ctx *compression_ctx;
+ struct crypto_tfm *tfm = req->base.tfm;
+ dma_addr_t src_addr, dst_addr;
+ bool disable_async = false;
+ int nr_sgs, cpu, ret = 0;
+ struct iaa_wq *iaa_wq;
+ u32 compression_crc;
+ struct idxd_wq *wq;
+ struct device *dev;
+ int order = -1;
+
+ compression_ctx = crypto_tfm_ctx(tfm);
+
+ if (!iaa_crypto_enabled) {
+ pr_debug("iaa_crypto disabled, not compressing\n");
+ return -ENODEV;
+ }
+
+ if (!req->src || !req->slen) {
+ pr_debug("invalid src, not compressing\n");
+ return -EINVAL;
+ }
+
+ cpu = get_cpu();
+ wq = wq_table_next_wq(cpu);
+ put_cpu();
+ if (!wq) {
+ pr_debug("no wq configured for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ ret = iaa_wq_get(wq);
+ if (ret) {
+ pr_debug("no wq available for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ iaa_wq = idxd_wq_get_private(wq);
+
+ if (!req->dst) {
+ gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
+
+ /* incompressible data will always be < 2 * slen */
+ req->dlen = 2 * req->slen;
+ order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
+ req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
+ if (!req->dst) {
+ ret = -ENOMEM;
+ order = -1;
+ goto out;
+ }
+ disable_async = true;
+ }
+
+ dev = &wq->idxd->pdev->dev;
+
+ nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map src sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto out;
+ }
+ src_addr = sg_dma_address(req->src);
+ dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
+ " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
+ req->src, req->slen, sg_dma_len(req->src));
+
+ nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto err_map_dst;
+ }
+ dst_addr = sg_dma_address(req->dst);
+ dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
+ " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
+ req->dst, req->dlen, sg_dma_len(req->dst));
+
+ ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr,
+ &req->dlen, &compression_crc, disable_async);
+ if (ret == -EINPROGRESS)
+ return ret;
+
+ if (!ret && compression_ctx->verify_compress) {
+ ret = iaa_remap_for_verify(dev, iaa_wq, req, &src_addr, &dst_addr);
+ if (ret) {
+ dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen,
+ dst_addr, &req->dlen, compression_crc);
+ if (ret)
+ dev_dbg(dev, "asynchronous compress verification failed ret=%d\n", ret);
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_TO_DEVICE);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_FROM_DEVICE);
+
+ goto out;
+ }
+
+ if (ret)
+ dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret);
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+err_map_dst:
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+out:
+ iaa_wq_put(wq);
+
+ if (order >= 0)
+ sgl_free_order(req->dst, order);
+
+ return ret;
+}
+
+static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req)
+{
+ gfp_t flags = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+ GFP_KERNEL : GFP_ATOMIC;
+ struct crypto_tfm *tfm = req->base.tfm;
+ dma_addr_t src_addr, dst_addr;
+ int nr_sgs, cpu, ret = 0;
+ struct iaa_wq *iaa_wq;
+ struct device *dev;
+ struct idxd_wq *wq;
+ int order = -1;
+
+ cpu = get_cpu();
+ wq = wq_table_next_wq(cpu);
+ put_cpu();
+ if (!wq) {
+ pr_debug("no wq configured for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ ret = iaa_wq_get(wq);
+ if (ret) {
+ pr_debug("no wq available for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ iaa_wq = idxd_wq_get_private(wq);
+
+ dev = &wq->idxd->pdev->dev;
+
+ nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map src sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto out;
+ }
+ src_addr = sg_dma_address(req->src);
+ dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
+ " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
+ req->src, req->slen, sg_dma_len(req->src));
+
+ req->dlen = 4 * req->slen; /* start with ~avg comp rato */
+alloc_dest:
+ order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE);
+ req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL);
+ if (!req->dst) {
+ ret = -ENOMEM;
+ order = -1;
+ goto out;
+ }
+
+ nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto err_map_dst;
+ }
+
+ dst_addr = sg_dma_address(req->dst);
+ dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
+ " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
+ req->dst, req->dlen, sg_dma_len(req->dst));
+ ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
+ dst_addr, &req->dlen, true);
+ if (ret == -EOVERFLOW) {
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ req->dlen *= 2;
+ if (req->dlen > CRYPTO_ACOMP_DST_MAX)
+ goto err_map_dst;
+ goto alloc_dest;
+ }
+
+ if (ret != 0)
+ dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+err_map_dst:
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+out:
+ iaa_wq_put(wq);
+
+ if (order >= 0)
+ sgl_free_order(req->dst, order);
+
+ return ret;
+}
+
+static int iaa_comp_adecompress(struct acomp_req *req)
+{
+ struct crypto_tfm *tfm = req->base.tfm;
+ dma_addr_t src_addr, dst_addr;
+ int nr_sgs, cpu, ret = 0;
+ struct iaa_wq *iaa_wq;
+ struct device *dev;
+ struct idxd_wq *wq;
+
+ if (!iaa_crypto_enabled) {
+ pr_debug("iaa_crypto disabled, not decompressing\n");
+ return -ENODEV;
+ }
+
+ if (!req->src || !req->slen) {
+ pr_debug("invalid src, not decompressing\n");
+ return -EINVAL;
+ }
+
+ if (!req->dst)
+ return iaa_comp_adecompress_alloc_dest(req);
+
+ cpu = get_cpu();
+ wq = wq_table_next_wq(cpu);
+ put_cpu();
+ if (!wq) {
+ pr_debug("no wq configured for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ ret = iaa_wq_get(wq);
+ if (ret) {
+ pr_debug("no wq available for cpu=%d\n", cpu);
+ return -ENODEV;
+ }
+
+ iaa_wq = idxd_wq_get_private(wq);
+
+ dev = &wq->idxd->pdev->dev;
+
+ nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map src sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto out;
+ }
+ src_addr = sg_dma_address(req->src);
+ dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p,"
+ " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs,
+ req->src, req->slen, sg_dma_len(req->src));
+
+ nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+ if (nr_sgs <= 0 || nr_sgs > 1) {
+ dev_dbg(dev, "couldn't map dst sg for iaa device %d,"
+ " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id,
+ iaa_wq->wq->id, ret);
+ ret = -EIO;
+ goto err_map_dst;
+ }
+ dst_addr = sg_dma_address(req->dst);
+ dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p,"
+ " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs,
+ req->dst, req->dlen, sg_dma_len(req->dst));
+
+ ret = iaa_decompress(tfm, req, wq, src_addr, req->slen,
+ dst_addr, &req->dlen, false);
+ if (ret == -EINPROGRESS)
+ return ret;
+
+ if (ret != 0)
+ dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret);
+
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE);
+err_map_dst:
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE);
+out:
+ iaa_wq_put(wq);
+
+ return ret;
+}
+
+static void compression_ctx_init(struct iaa_compression_ctx *ctx)
+{
+ ctx->verify_compress = iaa_verify_compress;
+ ctx->async_mode = async_mode;
+ ctx->use_irq = use_irq;
+}
+
+static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ compression_ctx_init(ctx);
+
+ ctx->mode = IAA_MODE_FIXED;
+
+ return 0;
+}
+
+static void dst_free(struct scatterlist *sgl)
+{
+ /*
+ * Called for req->dst = NULL cases but we free elsewhere
+ * using sgl_free_order().
+ */
+}
+
+static struct acomp_alg iaa_acomp_fixed_deflate = {
+ .init = iaa_comp_init_fixed,
+ .compress = iaa_comp_acompress,
+ .decompress = iaa_comp_adecompress,
+ .dst_free = dst_free,
+ .base = {
+ .cra_name = "deflate",
+ .cra_driver_name = "deflate-iaa",
+ .cra_ctxsize = sizeof(struct iaa_compression_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_priority = IAA_ALG_PRIORITY,
+ }
+};
+
+static int iaa_register_compression_device(void)
+{
+ int ret;
+
+ ret = crypto_register_acomp(&iaa_acomp_fixed_deflate);
+ if (ret) {
+ pr_err("deflate algorithm acomp fixed registration failed (%d)\n", ret);
+ goto out;
+ }
+
+ iaa_crypto_registered = true;
+out:
+ return ret;
+}
+
+static int iaa_unregister_compression_device(void)
+{
+ if (iaa_crypto_registered)
+ crypto_unregister_acomp(&iaa_acomp_fixed_deflate);
+
+ return 0;
+}
+
+static int iaa_crypto_probe(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_driver_data *data = idxd->data;
+ struct device *dev = &idxd_dev->conf_dev;
+ bool first_wq = false;
+ int ret = 0;
+
+ if (idxd->state != IDXD_DEV_ENABLED)
+ return -ENXIO;
+
+ if (data->type != IDXD_TYPE_IAX)
+ return -ENODEV;
+
+ mutex_lock(&wq->wq_lock);
+
+ if (idxd_wq_get_private(wq)) {
+ mutex_unlock(&wq->wq_lock);
+ return -EBUSY;
+ }
+
+ if (!idxd_wq_driver_name_match(wq, dev)) {
+ dev_dbg(dev, "wq %d.%d driver_name match failed: wq driver_name %s, dev driver name %s\n",
+ idxd->id, wq->id, wq->driver_name, dev->driver->name);
+ idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
+ ret = -ENODEV;
+ goto err;
+ }
+
+ wq->type = IDXD_WQT_KERNEL;
+
+ ret = idxd_drv_enable_wq(wq);
+ if (ret < 0) {
+ dev_dbg(dev, "enable wq %d.%d failed: %d\n",
+ idxd->id, wq->id, ret);
+ ret = -ENXIO;
+ goto err;
+ }
+
+ mutex_lock(&iaa_devices_lock);
+
+ if (list_empty(&iaa_devices)) {
+ ret = alloc_wq_table(wq->idxd->max_wqs);
+ if (ret)
+ goto err_alloc;
+ first_wq = true;
+ }
+
+ ret = save_iaa_wq(wq);
+ if (ret)
+ goto err_save;
+
+ rebalance_wq_table();
+
+ if (first_wq) {
+ iaa_crypto_enabled = true;
+ ret = iaa_register_compression_device();
+ if (ret != 0) {
+ iaa_crypto_enabled = false;
+ dev_dbg(dev, "IAA compression device registration failed\n");
+ goto err_register;
+ }
+ try_module_get(THIS_MODULE);
+
+ pr_info("iaa_crypto now ENABLED\n");
+ }
+
+ mutex_unlock(&iaa_devices_lock);
+out:
+ mutex_unlock(&wq->wq_lock);
+
+ return ret;
+
+err_register:
+ remove_iaa_wq(wq);
+ free_iaa_wq(idxd_wq_get_private(wq));
+err_save:
+ if (first_wq)
+ free_wq_table();
+err_alloc:
+ mutex_unlock(&iaa_devices_lock);
+ idxd_drv_disable_wq(wq);
+err:
+ wq->type = IDXD_WQT_NONE;
+
+ goto out;
+}
+
+static void iaa_crypto_remove(struct idxd_dev *idxd_dev)
+{
+ struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
+ struct idxd_device *idxd = wq->idxd;
+ struct iaa_wq *iaa_wq;
+ bool free = false;
+
+ idxd_wq_quiesce(wq);
+
+ mutex_lock(&wq->wq_lock);
+ mutex_lock(&iaa_devices_lock);
+
+ remove_iaa_wq(wq);
+
+ spin_lock(&idxd->dev_lock);
+ iaa_wq = idxd_wq_get_private(wq);
+ if (!iaa_wq) {
+ spin_unlock(&idxd->dev_lock);
+ pr_err("%s: no iaa_wq available to remove\n", __func__);
+ goto out;
+ }
+
+ if (iaa_wq->ref) {
+ iaa_wq->remove = true;
+ } else {
+ wq = iaa_wq->wq;
+ idxd_wq_set_private(wq, NULL);
+ free = true;
+ }
+ spin_unlock(&idxd->dev_lock);
+ if (free) {
+ __free_iaa_wq(iaa_wq);
+ kfree(iaa_wq);
+ }
+
+ idxd_drv_disable_wq(wq);
+ rebalance_wq_table();
+
+ if (nr_iaa == 0) {
+ iaa_crypto_enabled = false;
+ free_wq_table();
+ module_put(THIS_MODULE);
+
+ pr_info("iaa_crypto now DISABLED\n");
+ }
+out:
+ mutex_unlock(&iaa_devices_lock);
+ mutex_unlock(&wq->wq_lock);
+}
+
+static enum idxd_dev_type dev_types[] = {
+ IDXD_DEV_WQ,
+ IDXD_DEV_NONE,
+};
+
+static struct idxd_device_driver iaa_crypto_driver = {
+ .probe = iaa_crypto_probe,
+ .remove = iaa_crypto_remove,
+ .name = IDXD_SUBDRIVER_NAME,
+ .type = dev_types,
+ .desc_complete = iaa_desc_complete,
+};
+
+static int __init iaa_crypto_init_module(void)
+{
+ int ret = 0;
+ int node;
+
+ nr_cpus = num_online_cpus();
+ for_each_node_with_cpus(node)
+ nr_nodes++;
+ if (!nr_nodes) {
+ pr_err("IAA couldn't find any nodes with cpus\n");
+ return -ENODEV;
+ }
+ nr_cpus_per_node = nr_cpus / nr_nodes;
+
+ if (crypto_has_comp("deflate-generic", 0, 0))
+ deflate_generic_tfm = crypto_alloc_comp("deflate-generic", 0, 0);
+
+ if (IS_ERR_OR_NULL(deflate_generic_tfm)) {
+ pr_err("IAA could not alloc %s tfm: errcode = %ld\n",
+ "deflate-generic", PTR_ERR(deflate_generic_tfm));
+ return -ENOMEM;
+ }
+
+ ret = iaa_aecs_init_fixed();
+ if (ret < 0) {
+ pr_debug("IAA fixed compression mode init failed\n");
+ goto err_aecs_init;
+ }
+
+ ret = idxd_driver_register(&iaa_crypto_driver);
+ if (ret) {
+ pr_debug("IAA wq sub-driver registration failed\n");
+ goto err_driver_reg;
+ }
+
+ ret = driver_create_file(&iaa_crypto_driver.drv,
+ &driver_attr_verify_compress);
+ if (ret) {
+ pr_debug("IAA verify_compress attr creation failed\n");
+ goto err_verify_attr_create;
+ }
+
+ ret = driver_create_file(&iaa_crypto_driver.drv,
+ &driver_attr_sync_mode);
+ if (ret) {
+ pr_debug("IAA sync mode attr creation failed\n");
+ goto err_sync_attr_create;
+ }
+
+ if (iaa_crypto_debugfs_init())
+ pr_warn("debugfs init failed, stats not available\n");
+
+ pr_debug("initialized\n");
+out:
+ return ret;
+
+err_sync_attr_create:
+ driver_remove_file(&iaa_crypto_driver.drv,
+ &driver_attr_verify_compress);
+err_verify_attr_create:
+ idxd_driver_unregister(&iaa_crypto_driver);
+err_driver_reg:
+ iaa_aecs_cleanup_fixed();
+err_aecs_init:
+ crypto_free_comp(deflate_generic_tfm);
+
+ goto out;
+}
+
+static void __exit iaa_crypto_cleanup_module(void)
+{
+ if (iaa_unregister_compression_device())
+ pr_debug("IAA compression device unregister failed\n");
+
+ iaa_crypto_debugfs_cleanup();
+ driver_remove_file(&iaa_crypto_driver.drv,
+ &driver_attr_sync_mode);
+ driver_remove_file(&iaa_crypto_driver.drv,
+ &driver_attr_verify_compress);
+ idxd_driver_unregister(&iaa_crypto_driver);
+ iaa_aecs_cleanup_fixed();
+ crypto_free_comp(deflate_generic_tfm);
+
+ pr_debug("cleaned up\n");
+}
+
+MODULE_IMPORT_NS(IDXD);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_IDXD_DEVICE(0);
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("IAA Compression Accelerator Crypto Driver");
+
+module_init(iaa_crypto_init_module);
+module_exit(iaa_crypto_cleanup_module);
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.c b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
new file mode 100644
index 0000000000..2e3b7b73af
--- /dev/null
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <uapi/linux/idxd.h>
+#include <linux/idxd.h>
+#include <linux/dmaengine.h>
+#include "../../dma/idxd/idxd.h"
+#include <linux/debugfs.h>
+#include <crypto/internal/acompress.h>
+#include "iaa_crypto.h"
+#include "iaa_crypto_stats.h"
+
+static u64 total_comp_calls;
+static u64 total_decomp_calls;
+static u64 total_sw_decomp_calls;
+static u64 max_comp_delay_ns;
+static u64 max_decomp_delay_ns;
+static u64 max_acomp_delay_ns;
+static u64 max_adecomp_delay_ns;
+static u64 total_comp_bytes_out;
+static u64 total_decomp_bytes_in;
+static u64 total_completion_einval_errors;
+static u64 total_completion_timeout_errors;
+static u64 total_completion_comp_buf_overflow_errors;
+
+static struct dentry *iaa_crypto_debugfs_root;
+
+void update_total_comp_calls(void)
+{
+ total_comp_calls++;
+}
+
+void update_total_comp_bytes_out(int n)
+{
+ total_comp_bytes_out += n;
+}
+
+void update_total_decomp_calls(void)
+{
+ total_decomp_calls++;
+}
+
+void update_total_sw_decomp_calls(void)
+{
+ total_sw_decomp_calls++;
+}
+
+void update_total_decomp_bytes_in(int n)
+{
+ total_decomp_bytes_in += n;
+}
+
+void update_completion_einval_errs(void)
+{
+ total_completion_einval_errors++;
+}
+
+void update_completion_timeout_errs(void)
+{
+ total_completion_timeout_errors++;
+}
+
+void update_completion_comp_buf_overflow_errs(void)
+{
+ total_completion_comp_buf_overflow_errors++;
+}
+
+void update_max_comp_delay_ns(u64 start_time_ns)
+{
+ u64 time_diff;
+
+ time_diff = ktime_get_ns() - start_time_ns;
+
+ if (time_diff > max_comp_delay_ns)
+ max_comp_delay_ns = time_diff;
+}
+
+void update_max_decomp_delay_ns(u64 start_time_ns)
+{
+ u64 time_diff;
+
+ time_diff = ktime_get_ns() - start_time_ns;
+
+ if (time_diff > max_decomp_delay_ns)
+ max_decomp_delay_ns = time_diff;
+}
+
+void update_max_acomp_delay_ns(u64 start_time_ns)
+{
+ u64 time_diff;
+
+ time_diff = ktime_get_ns() - start_time_ns;
+
+ if (time_diff > max_acomp_delay_ns)
+ max_acomp_delay_ns = time_diff;
+}
+
+void update_max_adecomp_delay_ns(u64 start_time_ns)
+{
+ u64 time_diff;
+
+ time_diff = ktime_get_ns() - start_time_ns;
+
+ if (time_diff > max_adecomp_delay_ns)
+ max_adecomp_delay_ns = time_diff;
+}
+
+void update_wq_comp_calls(struct idxd_wq *idxd_wq)
+{
+ struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
+
+ wq->comp_calls++;
+ wq->iaa_device->comp_calls++;
+}
+
+void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n)
+{
+ struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
+
+ wq->comp_bytes += n;
+ wq->iaa_device->comp_bytes += n;
+}
+
+void update_wq_decomp_calls(struct idxd_wq *idxd_wq)
+{
+ struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
+
+ wq->decomp_calls++;
+ wq->iaa_device->decomp_calls++;
+}
+
+void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n)
+{
+ struct iaa_wq *wq = idxd_wq_get_private(idxd_wq);
+
+ wq->decomp_bytes += n;
+ wq->iaa_device->decomp_bytes += n;
+}
+
+static void reset_iaa_crypto_stats(void)
+{
+ total_comp_calls = 0;
+ total_decomp_calls = 0;
+ total_sw_decomp_calls = 0;
+ max_comp_delay_ns = 0;
+ max_decomp_delay_ns = 0;
+ max_acomp_delay_ns = 0;
+ max_adecomp_delay_ns = 0;
+ total_comp_bytes_out = 0;
+ total_decomp_bytes_in = 0;
+ total_completion_einval_errors = 0;
+ total_completion_timeout_errors = 0;
+ total_completion_comp_buf_overflow_errors = 0;
+}
+
+static void reset_wq_stats(struct iaa_wq *wq)
+{
+ wq->comp_calls = 0;
+ wq->comp_bytes = 0;
+ wq->decomp_calls = 0;
+ wq->decomp_bytes = 0;
+}
+
+static void reset_device_stats(struct iaa_device *iaa_device)
+{
+ struct iaa_wq *iaa_wq;
+
+ iaa_device->comp_calls = 0;
+ iaa_device->comp_bytes = 0;
+ iaa_device->decomp_calls = 0;
+ iaa_device->decomp_bytes = 0;
+
+ list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
+ reset_wq_stats(iaa_wq);
+}
+
+static void wq_show(struct seq_file *m, struct iaa_wq *iaa_wq)
+{
+ seq_printf(m, " name: %s\n", iaa_wq->wq->name);
+ seq_printf(m, " comp_calls: %llu\n", iaa_wq->comp_calls);
+ seq_printf(m, " comp_bytes: %llu\n", iaa_wq->comp_bytes);
+ seq_printf(m, " decomp_calls: %llu\n", iaa_wq->decomp_calls);
+ seq_printf(m, " decomp_bytes: %llu\n\n", iaa_wq->decomp_bytes);
+}
+
+static void device_stats_show(struct seq_file *m, struct iaa_device *iaa_device)
+{
+ struct iaa_wq *iaa_wq;
+
+ seq_puts(m, "iaa device:\n");
+ seq_printf(m, " id: %d\n", iaa_device->idxd->id);
+ seq_printf(m, " n_wqs: %d\n", iaa_device->n_wq);
+ seq_printf(m, " comp_calls: %llu\n", iaa_device->comp_calls);
+ seq_printf(m, " comp_bytes: %llu\n", iaa_device->comp_bytes);
+ seq_printf(m, " decomp_calls: %llu\n", iaa_device->decomp_calls);
+ seq_printf(m, " decomp_bytes: %llu\n", iaa_device->decomp_bytes);
+ seq_puts(m, " wqs:\n");
+
+ list_for_each_entry(iaa_wq, &iaa_device->wqs, list)
+ wq_show(m, iaa_wq);
+}
+
+static void global_stats_show(struct seq_file *m)
+{
+ seq_puts(m, "global stats:\n");
+ seq_printf(m, " total_comp_calls: %llu\n", total_comp_calls);
+ seq_printf(m, " total_decomp_calls: %llu\n", total_decomp_calls);
+ seq_printf(m, " total_sw_decomp_calls: %llu\n", total_sw_decomp_calls);
+ seq_printf(m, " total_comp_bytes_out: %llu\n", total_comp_bytes_out);
+ seq_printf(m, " total_decomp_bytes_in: %llu\n", total_decomp_bytes_in);
+ seq_printf(m, " total_completion_einval_errors: %llu\n",
+ total_completion_einval_errors);
+ seq_printf(m, " total_completion_timeout_errors: %llu\n",
+ total_completion_timeout_errors);
+ seq_printf(m, " total_completion_comp_buf_overflow_errors: %llu\n\n",
+ total_completion_comp_buf_overflow_errors);
+}
+
+static int wq_stats_show(struct seq_file *m, void *v)
+{
+ struct iaa_device *iaa_device;
+
+ mutex_lock(&iaa_devices_lock);
+
+ global_stats_show(m);
+
+ list_for_each_entry(iaa_device, &iaa_devices, list)
+ device_stats_show(m, iaa_device);
+
+ mutex_unlock(&iaa_devices_lock);
+
+ return 0;
+}
+
+static int iaa_crypto_stats_reset(void *data, u64 value)
+{
+ struct iaa_device *iaa_device;
+
+ reset_iaa_crypto_stats();
+
+ mutex_lock(&iaa_devices_lock);
+
+ list_for_each_entry(iaa_device, &iaa_devices, list)
+ reset_device_stats(iaa_device);
+
+ mutex_unlock(&iaa_devices_lock);
+
+ return 0;
+}
+
+static int wq_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wq_stats_show, file);
+}
+
+static const struct file_operations wq_stats_fops = {
+ .open = wq_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+DEFINE_DEBUGFS_ATTRIBUTE(wq_stats_reset_fops, NULL, iaa_crypto_stats_reset, "%llu\n");
+
+int __init iaa_crypto_debugfs_init(void)
+{
+ if (!debugfs_initialized())
+ return -ENODEV;
+
+ iaa_crypto_debugfs_root = debugfs_create_dir("iaa_crypto", NULL);
+ if (!iaa_crypto_debugfs_root)
+ return -ENOMEM;
+
+ debugfs_create_u64("max_comp_delay_ns", 0644,
+ iaa_crypto_debugfs_root, &max_comp_delay_ns);
+ debugfs_create_u64("max_decomp_delay_ns", 0644,
+ iaa_crypto_debugfs_root, &max_decomp_delay_ns);
+ debugfs_create_u64("max_acomp_delay_ns", 0644,
+ iaa_crypto_debugfs_root, &max_comp_delay_ns);
+ debugfs_create_u64("max_adecomp_delay_ns", 0644,
+ iaa_crypto_debugfs_root, &max_decomp_delay_ns);
+ debugfs_create_u64("total_comp_calls", 0644,
+ iaa_crypto_debugfs_root, &total_comp_calls);
+ debugfs_create_u64("total_decomp_calls", 0644,
+ iaa_crypto_debugfs_root, &total_decomp_calls);
+ debugfs_create_u64("total_sw_decomp_calls", 0644,
+ iaa_crypto_debugfs_root, &total_sw_decomp_calls);
+ debugfs_create_u64("total_comp_bytes_out", 0644,
+ iaa_crypto_debugfs_root, &total_comp_bytes_out);
+ debugfs_create_u64("total_decomp_bytes_in", 0644,
+ iaa_crypto_debugfs_root, &total_decomp_bytes_in);
+ debugfs_create_file("wq_stats", 0644, iaa_crypto_debugfs_root, NULL,
+ &wq_stats_fops);
+ debugfs_create_file("stats_reset", 0644, iaa_crypto_debugfs_root, NULL,
+ &wq_stats_reset_fops);
+
+ return 0;
+}
+
+void __exit iaa_crypto_debugfs_cleanup(void)
+{
+ debugfs_remove_recursive(iaa_crypto_debugfs_root);
+}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_stats.h b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
new file mode 100644
index 0000000000..c10b87b86f
--- /dev/null
+++ b/drivers/crypto/intel/iaa/iaa_crypto_stats.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2021 Intel Corporation. All rights rsvd. */
+
+#ifndef __CRYPTO_DEV_IAA_CRYPTO_STATS_H__
+#define __CRYPTO_DEV_IAA_CRYPTO_STATS_H__
+
+#if defined(CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS)
+int iaa_crypto_debugfs_init(void);
+void iaa_crypto_debugfs_cleanup(void);
+
+void update_total_comp_calls(void);
+void update_total_comp_bytes_out(int n);
+void update_total_decomp_calls(void);
+void update_total_sw_decomp_calls(void);
+void update_total_decomp_bytes_in(int n);
+void update_max_comp_delay_ns(u64 start_time_ns);
+void update_max_decomp_delay_ns(u64 start_time_ns);
+void update_max_acomp_delay_ns(u64 start_time_ns);
+void update_max_adecomp_delay_ns(u64 start_time_ns);
+void update_completion_einval_errs(void);
+void update_completion_timeout_errs(void);
+void update_completion_comp_buf_overflow_errs(void);
+
+void update_wq_comp_calls(struct idxd_wq *idxd_wq);
+void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n);
+void update_wq_decomp_calls(struct idxd_wq *idxd_wq);
+void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n);
+
+#else
+static inline int iaa_crypto_debugfs_init(void) { return 0; }
+static inline void iaa_crypto_debugfs_cleanup(void) {}
+
+static inline void update_total_comp_calls(void) {}
+static inline void update_total_comp_bytes_out(int n) {}
+static inline void update_total_decomp_calls(void) {}
+static inline void update_total_sw_decomp_calls(void) {}
+static inline void update_total_decomp_bytes_in(int n) {}
+static inline void update_max_comp_delay_ns(u64 start_time_ns) {}
+static inline void update_max_decomp_delay_ns(u64 start_time_ns) {}
+static inline void update_max_acomp_delay_ns(u64 start_time_ns) {}
+static inline void update_max_adecomp_delay_ns(u64 start_time_ns) {}
+static inline void update_completion_einval_errs(void) {}
+static inline void update_completion_timeout_errs(void) {}
+static inline void update_completion_comp_buf_overflow_errs(void) {}
+
+static inline void update_wq_comp_calls(struct idxd_wq *idxd_wq) {}
+static inline void update_wq_comp_bytes(struct idxd_wq *idxd_wq, int n) {}
+static inline void update_wq_decomp_calls(struct idxd_wq *idxd_wq) {}
+static inline void update_wq_decomp_bytes(struct idxd_wq *idxd_wq, int n) {}
+
+#endif // CONFIG_CRYPTO_DEV_IAA_CRYPTO_STATS
+
+#endif
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 1220cc86f9..c120f6715a 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -59,6 +59,17 @@ config CRYPTO_DEV_QAT_4XXX
To compile this as a module, choose M here: the module
will be called qat_4xxx.
+config CRYPTO_DEV_QAT_420XX
+ tristate "Support for Intel(R) QAT_420XX"
+ depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
+ select CRYPTO_DEV_QAT
+ help
+ Support for Intel(R) QuickAssist Technology QAT_420xx
+ for accelerating crypto and compression workloads.
+
+ To compile this as a module, choose M here: the module
+ will be called qat_420xx.
+
config CRYPTO_DEV_QAT_DH895xCCVF
tristate "Support for Intel(R) DH895xCC Virtual Function"
depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST)
diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile
index 258c8a626c..235b69f4f3 100644
--- a/drivers/crypto/intel/qat/Makefile
+++ b/drivers/crypto/intel/qat/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/
obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/
+obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/
obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/
obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/
diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile
new file mode 100644
index 0000000000..a90fbe00b3
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o
+qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
new file mode 100644
index 0000000000..7909b51e97
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/iopoll.h>
+#include <adf_accel_devices.h>
+#include <adf_admin.h>
+#include <adf_cfg.h>
+#include <adf_cfg_services.h>
+#include <adf_clock.h>
+#include <adf_common_drv.h>
+#include <adf_fw_config.h>
+#include <adf_gen4_config.h>
+#include <adf_gen4_dc.h>
+#include <adf_gen4_hw_data.h>
+#include <adf_gen4_pfvf.h>
+#include <adf_gen4_pm.h>
+#include <adf_gen4_ras.h>
+#include <adf_gen4_timer.h>
+#include <adf_gen4_tl.h>
+#include "adf_420xx_hw_data.h"
+#include "icp_qat_hw.h"
+
+#define ADF_AE_GROUP_0 GENMASK(3, 0)
+#define ADF_AE_GROUP_1 GENMASK(7, 4)
+#define ADF_AE_GROUP_2 GENMASK(11, 8)
+#define ADF_AE_GROUP_3 GENMASK(15, 12)
+#define ADF_AE_GROUP_4 BIT(16)
+
+#define ENA_THD_MASK_ASYM GENMASK(1, 0)
+#define ENA_THD_MASK_SYM GENMASK(3, 0)
+#define ENA_THD_MASK_DC GENMASK(1, 0)
+
+static const char * const adf_420xx_fw_objs[] = {
+ [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ,
+ [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ,
+ [ADF_FW_DC_OBJ] = ADF_420XX_DC_OBJ,
+ [ADF_FW_ADMIN_OBJ] = ADF_420XX_ADMIN_OBJ,
+};
+
+static const struct adf_fw_config adf_fw_cy_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_dc_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_sym_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_asym_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_asym_dc_config[] = {
+ {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_sym_dc_config[] = {
+ {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+static const struct adf_fw_config adf_fw_dcc_config[] = {
+ {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
+ {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
+ {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
+};
+
+
+static struct adf_hw_device_class adf_420xx_class = {
+ .name = ADF_420XX_DEVICE_NAME,
+ .type = DEV_420XX,
+ .instances = 0,
+};
+
+static u32 get_ae_mask(struct adf_hw_device_data *self)
+{
+ u32 me_disable = self->fuses;
+
+ return ~me_disable & ADF_420XX_ACCELENGINES_MASK;
+}
+
+static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
+{
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return ARRAY_SIZE(adf_fw_cy_config);
+ case SVC_DC:
+ return ARRAY_SIZE(adf_fw_dc_config);
+ case SVC_DCC:
+ return ARRAY_SIZE(adf_fw_dcc_config);
+ case SVC_SYM:
+ return ARRAY_SIZE(adf_fw_sym_config);
+ case SVC_ASYM:
+ return ARRAY_SIZE(adf_fw_asym_config);
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return ARRAY_SIZE(adf_fw_asym_dc_config);
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return ARRAY_SIZE(adf_fw_sym_dc_config);
+ default:
+ return 0;
+ }
+}
+
+static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
+{
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return adf_fw_cy_config;
+ case SVC_DC:
+ return adf_fw_dc_config;
+ case SVC_DCC:
+ return adf_fw_dcc_config;
+ case SVC_SYM:
+ return adf_fw_sym_config;
+ case SVC_ASYM:
+ return adf_fw_asym_config;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return adf_fw_asym_dc_config;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return adf_fw_sym_dc_config;
+ default:
+ return NULL;
+ }
+}
+
+static void update_ae_mask(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ const struct adf_fw_config *fw_config;
+ u32 config_ae_mask = 0;
+ u32 ae_mask, num_objs;
+ int i;
+
+ ae_mask = get_ae_mask(hw_data);
+
+ /* Modify the AE mask based on the firmware configuration loaded */
+ fw_config = get_fw_config(accel_dev);
+ num_objs = uof_get_num_objs(accel_dev);
+
+ config_ae_mask |= ADF_420XX_ADMIN_AE_MASK;
+ for (i = 0; i < num_objs; i++)
+ config_ae_mask |= fw_config[i].ae_mask;
+
+ hw_data->ae_mask = ae_mask & config_ae_mask;
+}
+
+static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
+{
+ u32 capabilities_sym, capabilities_asym, capabilities_dc;
+ struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
+ u32 capabilities_dcc;
+ u32 fusectl1;
+
+ /* As a side effect, update ae_mask based on configuration */
+ update_ae_mask(accel_dev);
+
+ /* Read accelerator capabilities mask */
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1);
+
+ capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CIPHER |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
+ ICP_ACCEL_CAPABILITIES_SHA3 |
+ ICP_ACCEL_CAPABILITIES_SHA3_EXT |
+ ICP_ACCEL_CAPABILITIES_HKDF |
+ ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
+ ICP_ACCEL_CAPABILITIES_SM3 |
+ ICP_ACCEL_CAPABILITIES_SM4 |
+ ICP_ACCEL_CAPABILITIES_AES_V2 |
+ ICP_ACCEL_CAPABILITIES_ZUC |
+ ICP_ACCEL_CAPABILITIES_ZUC_256 |
+ ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
+ ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
+
+ /* A set bit in fusectl1 means the feature is OFF in this SKU */
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) {
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+ }
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE)
+ capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
+
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_SM2 |
+ ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) {
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+ }
+
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+ switch (adf_get_service_enabled(accel_dev)) {
+ case SVC_CY:
+ case SVC_CY2:
+ return capabilities_sym | capabilities_asym;
+ case SVC_DC:
+ return capabilities_dc;
+ case SVC_DCC:
+ /*
+ * Sym capabilities are available for chaining operations,
+ * but sym crypto instances cannot be supported
+ */
+ capabilities_dcc = capabilities_dc | capabilities_sym;
+ capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+ return capabilities_dcc;
+ case SVC_SYM:
+ return capabilities_sym;
+ case SVC_ASYM:
+ return capabilities_asym;
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ return capabilities_asym | capabilities_dc;
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ return capabilities_sym | capabilities_dc;
+ default:
+ return 0;
+ }
+}
+
+static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
+{
+ if (adf_gen4_init_thd2arb_map(accel_dev))
+ dev_warn(&GET_DEV(accel_dev),
+ "Generate of the thread to arbiter map failed");
+
+ return GET_HW_DATA(accel_dev)->thd_to_arb_map;
+}
+
+static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
+{
+ rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
+ rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
+ rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET;
+ rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET;
+ rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET;
+
+ rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV;
+ rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL;
+ rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION;
+ rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
+ rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
+ rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC;
+ rl_data->scale_ref = ADF_420XX_RL_SLICE_REF;
+}
+
+static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
+{
+ switch (ae_mask) {
+ case ADF_AE_GROUP_0:
+ return RP_GROUP_0;
+ case ADF_AE_GROUP_1:
+ case ADF_AE_GROUP_3:
+ return RP_GROUP_1;
+ case ADF_AE_GROUP_2:
+ if (get_fw_config(accel_dev) == adf_fw_cy_config)
+ return RP_GROUP_0;
+ else
+ return RP_GROUP_1;
+ default:
+ dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized");
+ return -EINVAL;
+ }
+}
+
+static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ switch (fw_config[obj_num].obj) {
+ case ADF_FW_ASYM_OBJ:
+ return ENA_THD_MASK_ASYM;
+ case ADF_FW_SYM_OBJ:
+ return ENA_THD_MASK_SYM;
+ case ADF_FW_DC_OBJ:
+ return ENA_THD_MASK_DC;
+ default:
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+ }
+}
+
+static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
+{
+ enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
+ const struct adf_fw_config *fw_config;
+ u16 ring_to_svc_map;
+ int i, j;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return 0;
+
+ /* If dcc, all rings handle compression requests */
+ if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
+ for (i = 0; i < RP_GROUP_COUNT; i++)
+ rps[i] = COMP;
+ goto set_mask;
+ }
+
+ for (i = 0; i < RP_GROUP_COUNT; i++) {
+ switch (fw_config[i].ae_mask) {
+ case ADF_AE_GROUP_0:
+ j = RP_GROUP_0;
+ break;
+ case ADF_AE_GROUP_1:
+ j = RP_GROUP_1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (fw_config[i].obj) {
+ case ADF_FW_SYM_OBJ:
+ rps[j] = SYM;
+ break;
+ case ADF_FW_ASYM_OBJ:
+ rps[j] = ASYM;
+ break;
+ case ADF_FW_DC_OBJ:
+ rps[j] = COMP;
+ break;
+ default:
+ rps[j] = 0;
+ break;
+ }
+ }
+
+set_mask:
+ ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
+ rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
+ rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
+
+ return ring_to_svc_map;
+}
+
+static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
+ const char * const fw_objs[], int num_objs)
+{
+ const struct adf_fw_config *fw_config;
+ int id;
+
+ fw_config = get_fw_config(accel_dev);
+ if (fw_config)
+ id = fw_config[obj_num].obj;
+ else
+ id = -EINVAL;
+
+ if (id < 0 || id > num_objs)
+ return NULL;
+
+ return fw_objs[id];
+}
+
+static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ int num_fw_objs = ARRAY_SIZE(adf_420xx_fw_objs);
+
+ return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs);
+}
+
+static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return 0;
+
+ return fw_config[obj_num].ae_mask;
+}
+
+static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
+{
+ dev_err_mask->cppagentcmdpar_mask = ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK;
+ dev_err_mask->parerr_ath_cph_mask = ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK;
+ dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK;
+ dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK;
+ dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK;
+ dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK;
+}
+
+void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
+{
+ hw_data->dev_class = &adf_420xx_class;
+ hw_data->instance_id = adf_420xx_class.instances++;
+ hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF;
+ hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS;
+ hw_data->num_engines = ADF_420XX_MAX_ACCELENGINES;
+ hw_data->num_logical_accel = 1;
+ hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK;
+ hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
+ hw_data->alloc_irq = adf_isr_resource_alloc;
+ hw_data->free_irq = adf_isr_resource_free;
+ hw_data->enable_error_correction = adf_gen4_enable_error_correction;
+ hw_data->get_accel_mask = adf_gen4_get_accel_mask;
+ hw_data->get_ae_mask = get_ae_mask;
+ hw_data->get_num_accels = adf_gen4_get_num_accels;
+ hw_data->get_num_aes = adf_gen4_get_num_aes;
+ hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id;
+ hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id;
+ hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id;
+ hw_data->get_arb_info = adf_gen4_get_arb_info;
+ hw_data->get_admin_info = adf_gen4_get_admin_info;
+ hw_data->get_accel_cap = get_accel_cap;
+ hw_data->get_sku = adf_gen4_get_sku;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->send_admin_init = adf_send_admin_init;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->get_arb_mapping = adf_get_arbiter_mapping;
+ hw_data->enable_ints = adf_gen4_enable_ints;
+ hw_data->init_device = adf_gen4_init_device;
+ hw_data->reset_device = adf_reset_flr;
+ hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK;
+ hw_data->num_rps = ADF_GEN4_MAX_RPS;
+ hw_data->fw_name = ADF_420XX_FW;
+ hw_data->fw_mmp_name = ADF_420XX_MMP;
+ hw_data->uof_get_name = uof_get_name_420xx;
+ hw_data->uof_get_num_objs = uof_get_num_objs;
+ hw_data->uof_get_ae_mask = uof_get_ae_mask;
+ hw_data->get_rp_group = get_rp_group;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask;
+ hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
+ hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
+ hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
+ hw_data->disable_iov = adf_disable_sriov;
+ hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
+ hw_data->enable_pm = adf_gen4_enable_pm;
+ hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
+ hw_data->dev_config = adf_gen4_dev_config;
+ hw_data->start_timer = adf_gen4_timer_start;
+ hw_data->stop_timer = adf_gen4_timer_stop;
+ hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
+ hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->clock_frequency = ADF_420XX_AE_FREQ;
+
+ adf_gen4_set_err_mask(&hw_data->dev_err_mask);
+ adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
+ adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
+ adf_gen4_init_dc_ops(&hw_data->dc_ops);
+ adf_gen4_init_ras_ops(&hw_data->ras_ops);
+ adf_gen4_init_tl_data(&hw_data->tl_data);
+ adf_init_rl_data(&hw_data->rl_data);
+}
+
+void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data)
+{
+ hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h
new file mode 100644
index 0000000000..99abbfc148
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_420XX_HW_DATA_H_
+#define ADF_420XX_HW_DATA_H_
+
+#include <adf_accel_devices.h>
+
+#define ADF_420XX_MAX_ACCELENGINES 17
+
+#define ADF_420XX_ACCELENGINES_MASK 0x1FFFF
+#define ADF_420XX_ADMIN_AE_MASK 0x10000
+
+#define ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK (0xFF)
+#define ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK (0xFF00FF)
+#define ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK (0x10001)
+#define ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK (0xF0007)
+#define ADF_420XX_PARITYERRORMASK_PKE_MASK (0xFFF)
+#define ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK (0x3FF03FF)
+
+/*
+ * SSMFEATREN bit mask
+ * BIT(4) - enables parity detection on CPP
+ * BIT(12) - enables the logging of push/pull data errors
+ * in pperr register
+ * BIT(16) - BIT(27) - enable parity detection on SPPs
+ */
+#define ADF_420XX_SSMFEATREN_MASK \
+ (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \
+ BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27))
+
+/* Firmware Binaries */
+#define ADF_420XX_FW "qat_420xx.bin"
+#define ADF_420XX_MMP "qat_420xx_mmp.bin"
+#define ADF_420XX_SYM_OBJ "qat_420xx_sym.bin"
+#define ADF_420XX_DC_OBJ "qat_420xx_dc.bin"
+#define ADF_420XX_ASYM_OBJ "qat_420xx_asym.bin"
+#define ADF_420XX_ADMIN_OBJ "qat_420xx_admin.bin"
+
+/* RL constants */
+#define ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV 100
+#define ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL 102
+#define ADF_420XX_RL_DCPR_CORRECTION 1
+#define ADF_420XX_RL_SCANS_PER_SEC 954
+#define ADF_420XX_RL_MAX_TP_ASYM 173750UL
+#define ADF_420XX_RL_MAX_TP_SYM 95000UL
+#define ADF_420XX_RL_MAX_TP_DC 40000UL
+#define ADF_420XX_RL_SLICE_REF 1000UL
+
+/* Clocks frequency */
+#define ADF_420XX_AE_FREQ (1000 * HZ_PER_MHZ)
+
+void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id);
+void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
new file mode 100644
index 0000000000..2a3598409e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <adf_accel_devices.h>
+#include <adf_gen4_hw_data.h>
+#include <adf_gen4_config.h>
+#include <adf_cfg.h>
+#include <adf_common_drv.h>
+#include <adf_dbgfs.h>
+
+#include "adf_420xx_hw_data.h"
+
+static const struct pci_device_id adf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+ if (accel_dev->hw_device) {
+ adf_clean_hw_data_420xx(accel_dev->hw_device);
+ accel_dev->hw_device = NULL;
+ }
+ adf_dbgfs_exit(accel_dev);
+ adf_cfg_dev_remove(accel_dev);
+ adf_devmgr_rm_dev(accel_dev, NULL);
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct adf_accel_dev *accel_dev;
+ struct adf_accel_pci *accel_pci_dev;
+ struct adf_hw_device_data *hw_data;
+ unsigned int i, bar_nr;
+ unsigned long bar_mask;
+ struct adf_bar *bar;
+ int ret;
+
+ if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
+ /*
+ * If the accelerator is connected to a node with no memory
+ * there is no point in using the accelerator since the remote
+ * memory transaction will be very slow.
+ */
+ dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
+ return -EINVAL;
+ }
+
+ accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL);
+ if (!accel_dev)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&accel_dev->crypto_list);
+ accel_pci_dev = &accel_dev->accel_pci_dev;
+ accel_pci_dev->pci_dev = pdev;
+
+ /*
+ * Add accel device to accel table
+ * This should be called before adf_cleanup_accel is called
+ */
+ if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+ return -EFAULT;
+ }
+
+ accel_dev->owner = THIS_MODULE;
+ /* Allocate and initialise device hardware meta-data structure */
+ hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL);
+ if (!hw_data) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ accel_dev->hw_device = hw_data;
+ adf_init_hw_data_420xx(accel_dev->hw_device, ent->device);
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses);
+
+ /* Get Accelerators and Accelerators Engines masks */
+ hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
+ hw_data->ae_mask = hw_data->get_ae_mask(hw_data);
+ accel_pci_dev->sku = hw_data->get_sku(hw_data);
+ /* If the device has no acceleration engines then ignore it */
+ if (!hw_data->accel_mask || !hw_data->ae_mask ||
+ (~hw_data->ae_mask & 0x01)) {
+ dev_err(&pdev->dev, "No acceleration units found.\n");
+ ret = -EFAULT;
+ goto out_err;
+ }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+ if (ret)
+ goto out_err;
+
+ /* Enable PCI device */
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't enable PCI device.\n");
+ goto out_err;
+ }
+
+ /* Set DMA identifier */
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "No usable DMA configuration.\n");
+ goto out_err;
+ }
+
+ ret = adf_gen4_cfg_dev_init(accel_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize configuration.\n");
+ goto out_err;
+ }
+
+ /* Get accelerator capabilities mask */
+ hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev);
+ if (!hw_data->accel_capabilities_mask) {
+ dev_err(&pdev->dev, "Failed to get capabilities mask.\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ /* Find and map all the device's BARS */
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK;
+
+ ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to map pci regions.\n");
+ goto out_err;
+ }
+
+ i = 0;
+ for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) {
+ bar = &accel_pci_dev->pci_bars[i++];
+ bar->virt_addr = pcim_iomap_table(pdev)[bar_nr];
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_save_state(pdev)) {
+ dev_err(&pdev->dev, "Failed to save pci state.\n");
+ ret = -ENOMEM;
+ goto out_err;
+ }
+
+ accel_dev->ras_errors.enabled = true;
+ adf_dbgfs_init(accel_dev);
+
+ ret = adf_dev_up(accel_dev, true);
+ if (ret)
+ goto out_err_dev_stop;
+
+ ret = adf_sysfs_init(accel_dev);
+ if (ret)
+ goto out_err_dev_stop;
+
+ return ret;
+
+out_err_dev_stop:
+ adf_dev_down(accel_dev, false);
+out_err:
+ adf_cleanup_accel(accel_dev);
+ return ret;
+}
+
+static void adf_remove(struct pci_dev *pdev)
+{
+ struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+ if (!accel_dev) {
+ pr_err("QAT: Driver removal failed\n");
+ return;
+ }
+ adf_dev_down(accel_dev, false);
+ adf_cleanup_accel(accel_dev);
+}
+
+static struct pci_driver adf_driver = {
+ .id_table = adf_pci_tbl,
+ .name = ADF_420XX_DEVICE_NAME,
+ .probe = adf_probe,
+ .remove = adf_remove,
+ .sriov_configure = adf_sriov_configure,
+ .err_handler = &adf_err_handler,
+};
+
+module_pci_driver(adf_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE(ADF_420XX_FW);
+MODULE_FIRMWARE(ADF_420XX_MMP);
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_SOFTDEP("pre: crypto-intel_qat");
+MODULE_IMPORT_NS(CRYPTO_QAT);
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index b64aaecdd9..e171cddf6f 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -7,12 +7,15 @@
#include <adf_cfg_services.h>
#include <adf_clock.h>
#include <adf_common_drv.h>
+#include <adf_fw_config.h>
+#include <adf_gen4_config.h>
#include <adf_gen4_dc.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
#include "adf_gen4_ras.h"
#include <adf_gen4_timer.h>
+#include <adf_gen4_tl.h>
#include "adf_4xxx_hw_data.h"
#include "icp_qat_hw.h"
@@ -20,12 +23,10 @@
#define ADF_AE_GROUP_1 GENMASK(7, 4)
#define ADF_AE_GROUP_2 BIT(8)
-enum adf_fw_objs {
- ADF_FW_SYM_OBJ,
- ADF_FW_ASYM_OBJ,
- ADF_FW_DC_OBJ,
- ADF_FW_ADMIN_OBJ,
-};
+#define ENA_THD_MASK_ASYM GENMASK(1, 0)
+#define ENA_THD_MASK_ASYM_401XX GENMASK(5, 0)
+#define ENA_THD_MASK_SYM GENMASK(6, 0)
+#define ENA_THD_MASK_DC GENMASK(1, 0)
static const char * const adf_4xxx_fw_objs[] = {
[ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ,
@@ -41,11 +42,6 @@ static const char * const adf_402xx_fw_objs[] = {
[ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ,
};
-struct adf_fw_config {
- u32 ae_mask;
- enum adf_fw_objs obj;
-};
-
static const struct adf_fw_config adf_fw_cy_config[] = {
{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
{ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
@@ -95,36 +91,12 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config))
static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
-/* Worker thread to service arbiter mappings */
-static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = {
- 0x5555555, 0x5555555, 0x5555555, 0x5555555,
- 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA,
- 0x0
-};
-
-static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = {
- 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
- 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF,
- 0x0
-};
-
-static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = {
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
- 0x0
-};
-
static struct adf_hw_device_class adf_4xxx_class = {
.name = ADF_4XXX_DEVICE_NAME,
.type = DEV_4XXX,
.instances = 0,
};
-static u32 get_accel_mask(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_ACCELERATORS_MASK;
-}
-
static u32 get_ae_mask(struct adf_hw_device_data *self)
{
u32 me_disable = self->fuses;
@@ -132,55 +104,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
}
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_MAX_ACCELERATORS;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
- if (!self || !self->ae_mask)
- return 0;
-
- return hweight32(self->ae_mask);
-}
-
-static u32 get_misc_bar_id(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_PMISC_BAR;
-}
-
-static u32 get_etr_bar_id(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_ETR_BAR;
-}
-
-static u32 get_sram_bar_id(struct adf_hw_device_data *self)
-{
- return ADF_4XXX_SRAM_BAR;
-}
-
-/*
- * The vector routing table is used to select the MSI-X entry to use for each
- * interrupt source.
- * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
- * The final entry corresponds to VF2PF or error interrupts.
- * This vector table could be used to configure one MSI-X entry to be shared
- * between multiple interrupt sources.
- *
- * The default routing is set to have a one to one correspondence between the
- * interrupt source and the MSI-X entry used.
- */
-static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
-{
- void __iomem *csr;
- int i;
-
- csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
- for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
- ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
-}
-
static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
@@ -189,7 +112,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
u32 fusectl1;
/* Read accelerator capabilities mask */
- pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1);
capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
ICP_ACCEL_CAPABILITIES_CIPHER |
@@ -204,27 +127,27 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_AES_V2;
/* A set bit in fusectl1 means the feature is OFF in this SKU */
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
}
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) {
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
}
@@ -234,7 +157,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_SM2 |
ICP_ACCEL_CAPABILITIES_ECEDMONT;
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) {
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
@@ -245,7 +168,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
+ if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) {
capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
@@ -281,43 +204,13 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
}
}
-static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
-{
- return DEV_SKU_1;
-}
-
static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
{
- switch (adf_get_service_enabled(accel_dev)) {
- case SVC_DC:
- return thrd_to_arb_map_dc;
- case SVC_DCC:
- return thrd_to_arb_map_dcc;
- default:
- return default_thrd_to_arb_map;
- }
-}
+ if (adf_gen4_init_thd2arb_map(accel_dev))
+ dev_warn(&GET_DEV(accel_dev),
+ "Generate of the thread to arbiter map failed");
-static void get_arb_info(struct arb_info *arb_info)
-{
- arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG;
- arb_info->arb_offset = ADF_4XXX_ARB_OFFSET;
- arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
-}
-
-static void get_admin_info(struct admin_info *admin_csrs_info)
-{
- admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
- admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
- admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
-}
-
-static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
-{
- /*
- * 4XXX uses KPT counter for HB
- */
- return ADF_4XXX_KPT_COUNTER_FREQ;
+ return GET_HW_DATA(accel_dev)->thd_to_arb_map;
}
static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
@@ -338,59 +231,7 @@ static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF;
}
-static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
- struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
- void __iomem *csr = misc_bar->virt_addr;
-
- /* Enable all in errsou3 except VFLR notification on host */
- ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
-}
-
-static void adf_enable_ints(struct adf_accel_dev *accel_dev)
-{
- void __iomem *addr;
-
- addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-
- /* Enable bundle interrupts */
- ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
- ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
-
- /* Enable misc interrupts */
- ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
-}
-
-static int adf_init_device(struct adf_accel_dev *accel_dev)
-{
- void __iomem *addr;
- u32 status;
- u32 csr;
- int ret;
-
- addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
-
- /* Temporarily mask PM interrupt */
- csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
- csr |= ADF_GEN4_PM_SOU;
- ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
-
- /* Set DRV_ACTIVE bit to power up the device */
- ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
-
- /* Poll status register to make sure the device is powered up */
- ret = read_poll_timeout(ADF_CSR_RD, status,
- status & ADF_GEN4_PM_INIT_STATE,
- ADF_GEN4_PM_POLL_DELAY_US,
- ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
- ADF_GEN4_PM_STATUS);
- if (ret)
- dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
-
- return ret;
-}
-
-static u32 uof_get_num_objs(void)
+static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
{
return ARRAY_SIZE(adf_fw_cy_config);
}
@@ -420,11 +261,64 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev
}
}
-enum adf_rp_groups {
- RP_GROUP_0 = 0,
- RP_GROUP_1,
- RP_GROUP_COUNT
-};
+static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
+{
+ switch (ae_mask) {
+ case ADF_AE_GROUP_0:
+ return RP_GROUP_0;
+ case ADF_AE_GROUP_1:
+ return RP_GROUP_1;
+ default:
+ dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized");
+ return -EINVAL;
+ }
+}
+
+static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ switch (fw_config[obj_num].obj) {
+ case ADF_FW_ASYM_OBJ:
+ return ENA_THD_MASK_ASYM;
+ case ADF_FW_SYM_OBJ:
+ return ENA_THD_MASK_SYM;
+ case ADF_FW_DC_OBJ:
+ return ENA_THD_MASK_DC;
+ default:
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+ }
+}
+
+static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num)
+{
+ const struct adf_fw_config *fw_config;
+
+ if (obj_num >= uof_get_num_objs(accel_dev))
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ fw_config = get_fw_config(accel_dev);
+ if (!fw_config)
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+
+ switch (fw_config[obj_num].obj) {
+ case ADF_FW_ASYM_OBJ:
+ return ENA_THD_MASK_ASYM_401XX;
+ case ADF_FW_SYM_OBJ:
+ return ENA_THD_MASK_SYM;
+ case ADF_FW_DC_OBJ:
+ return ENA_THD_MASK_DC;
+ default:
+ return ADF_GEN4_ENA_THD_MASK_ERROR;
+ }
+}
static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
{
@@ -538,54 +432,64 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
{
hw_data->dev_class = &adf_4xxx_class;
hw_data->instance_id = adf_4xxx_class.instances++;
- hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
- hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF;
- hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
- hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
+ hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS;
+ hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF;
+ hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK;
+ hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS;
hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
hw_data->num_logical_accel = 1;
- hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
- hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
+ hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK;
hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
- hw_data->enable_error_correction = adf_enable_error_correction;
- hw_data->get_accel_mask = get_accel_mask;
+ hw_data->enable_error_correction = adf_gen4_enable_error_correction;
+ hw_data->get_accel_mask = adf_gen4_get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
- hw_data->get_num_accels = get_num_accels;
- hw_data->get_num_aes = get_num_aes;
- hw_data->get_sram_bar_id = get_sram_bar_id;
- hw_data->get_etr_bar_id = get_etr_bar_id;
- hw_data->get_misc_bar_id = get_misc_bar_id;
- hw_data->get_arb_info = get_arb_info;
- hw_data->get_admin_info = get_admin_info;
+ hw_data->get_num_accels = adf_gen4_get_num_accels;
+ hw_data->get_num_aes = adf_gen4_get_num_aes;
+ hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id;
+ hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id;
+ hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id;
+ hw_data->get_arb_info = adf_gen4_get_arb_info;
+ hw_data->get_admin_info = adf_gen4_get_admin_info;
hw_data->get_accel_cap = get_accel_cap;
- hw_data->get_sku = get_sku;
+ hw_data->get_sku = adf_gen4_get_sku;
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->send_admin_init = adf_send_admin_init;
hw_data->init_arb = adf_init_arb;
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
- hw_data->enable_ints = adf_enable_ints;
- hw_data->init_device = adf_init_device;
+ hw_data->enable_ints = adf_gen4_enable_ints;
+ hw_data->init_device = adf_gen4_init_device;
hw_data->reset_device = adf_reset_flr;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
+ hw_data->num_rps = ADF_GEN4_MAX_RPS;
switch (dev_id) {
case ADF_402XX_PCI_DEVICE_ID:
hw_data->fw_name = ADF_402XX_FW;
hw_data->fw_mmp_name = ADF_402XX_MMP;
hw_data->uof_get_name = uof_get_name_402xx;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask;
+ break;
+ case ADF_401XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->uof_get_name = uof_get_name_4xxx;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask_401xx;
break;
-
default:
hw_data->fw_name = ADF_4XXX_FW;
hw_data->fw_mmp_name = ADF_4XXX_MMP;
hw_data->uof_get_name = uof_get_name_4xxx;
+ hw_data->get_ena_thd_mask = get_ena_thd_mask;
+ break;
}
hw_data->uof_get_num_objs = uof_get_num_objs;
hw_data->uof_get_ae_mask = uof_get_ae_mask;
- hw_data->set_msix_rttable = set_msix_default_rttable;
+ hw_data->get_rp_group = get_rp_group;
+ hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
hw_data->disable_iov = adf_disable_sriov;
@@ -595,7 +499,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->dev_config = adf_gen4_dev_config;
hw_data->start_timer = adf_gen4_timer_start;
hw_data->stop_timer = adf_gen4_timer_stop;
- hw_data->get_hb_clock = get_heartbeat_clock;
+ hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
@@ -604,6 +508,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen4_init_dc_ops(&hw_data->dc_ops);
adf_gen4_init_ras_ops(&hw_data->ras_ops);
+ adf_gen4_init_tl_data(&hw_data->tl_data);
adf_init_rl_data(&hw_data->rl_data);
}
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
index 33423295e9..76388363ea 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -6,25 +6,8 @@
#include <linux/units.h>
#include <adf_accel_devices.h>
-/* PCIe configuration space */
-#define ADF_4XXX_SRAM_BAR 0
-#define ADF_4XXX_PMISC_BAR 1
-#define ADF_4XXX_ETR_BAR 2
-#define ADF_4XXX_RX_RINGS_OFFSET 1
-#define ADF_4XXX_TX_RINGS_MASK 0x1
-#define ADF_4XXX_MAX_ACCELERATORS 1
#define ADF_4XXX_MAX_ACCELENGINES 9
-#define ADF_4XXX_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
-/* Physical function fuses */
-#define ADF_4XXX_FUSECTL0_OFFSET (0x2C8)
-#define ADF_4XXX_FUSECTL1_OFFSET (0x2CC)
-#define ADF_4XXX_FUSECTL2_OFFSET (0x2D0)
-#define ADF_4XXX_FUSECTL3_OFFSET (0x2D4)
-#define ADF_4XXX_FUSECTL4_OFFSET (0x2D8)
-#define ADF_4XXX_FUSECTL5_OFFSET (0x2DC)
-
-#define ADF_4XXX_ACCELERATORS_MASK (0x1)
#define ADF_4XXX_ACCELENGINES_MASK (0x1FF)
#define ADF_4XXX_ADMIN_AE_MASK (0x100)
@@ -45,28 +28,6 @@
(BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | \
BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23))
-#define ADF_4XXX_ETR_MAX_BANKS 64
-
-/* MSIX interrupt */
-#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET (0x41A040)
-#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET (0x41A044)
-#define ADF_4XXX_SMIAPF_MASK_OFFSET (0x41A084)
-#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04))
-
-/* Bank and ring configuration */
-#define ADF_4XXX_NUM_RINGS_PER_BANK 2
-#define ADF_4XXX_NUM_BANKS_PER_VF 4
-
-/* Arbiter configuration */
-#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
-#define ADF_4XXX_ARB_OFFSET (0x0)
-#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET (0x400)
-
-/* Admin Interface Reg Offset */
-#define ADF_4XXX_ADMINMSGUR_OFFSET (0x500574)
-#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578)
-#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970)
-
/* Firmware Binaries */
#define ADF_4XXX_FW "qat_4xxx.bin"
#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
@@ -93,22 +54,9 @@
#define ADF_4XXX_RL_SLICE_REF 1000UL
/* Clocks frequency */
-#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
#define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ)
-/* qat_4xxx fuse bits are different from old GENs, redefine them */
-enum icp_qat_4xxx_slice_mask {
- ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0),
- ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1),
- ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2),
- ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
- ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
- ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
- ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7),
-};
-
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id);
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data);
-int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 8f483d1197..9762f2bf77 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -8,13 +8,10 @@
#include <adf_cfg.h>
#include <adf_common_drv.h>
#include <adf_dbgfs.h>
-#include <adf_heartbeat.h>
+#include <adf_gen4_config.h>
+#include <adf_gen4_hw_data.h>
#include "adf_4xxx_hw_data.h"
-#include "adf_cfg_services.h"
-#include "qat_compression.h"
-#include "qat_crypto.h"
-#include "adf_transport_access_macros.h"
static const struct pci_device_id adf_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), },
@@ -35,270 +32,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
adf_devmgr_rm_dev(accel_dev, NULL);
}
-static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev)
-{
- const char *config;
- int ret;
-
- config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
-
- ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
- if (ret)
- return ret;
-
- /* Default configuration is crypto only for even devices
- * and compression for odd devices
- */
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, config,
- ADF_STR);
- if (ret)
- return ret;
-
- adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
-
- return 0;
-}
-
-static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
-{
- char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
- int banks = GET_MAX_BANKS(accel_dev);
- int cpus = num_online_cpus();
- unsigned long bank, val;
- int instances;
- int ret;
- int i;
-
- if (adf_hw_dev_has_crypto(accel_dev))
- instances = min(cpus, banks / 2);
- else
- instances = 0;
-
- for (i = 0; i < instances; i++) {
- val = i;
- bank = i * 2;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &bank, ADF_DEC);
- if (ret)
- goto err;
-
- bank += 1;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &bank, ADF_DEC);
- if (ret)
- goto err;
-
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
- i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
- val = 128;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 512;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 1;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 1;
- snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = ADF_COALESCING_DEF_TIME;
- snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
- ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
- key, &val, ADF_DEC);
- if (ret)
- goto err;
- }
-
- val = i;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- return 0;
-err:
- dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
- return ret;
-}
-
-static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
-{
- char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
- int banks = GET_MAX_BANKS(accel_dev);
- int cpus = num_online_cpus();
- unsigned long val;
- int instances;
- int ret;
- int i;
-
- if (adf_hw_dev_has_compression(accel_dev))
- instances = min(cpus, banks);
- else
- instances = 0;
-
- for (i = 0; i < instances; i++) {
- val = i;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 512;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 1;
- snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
- key, &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = ADF_COALESCING_DEF_TIME;
- snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
- ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
- key, &val, ADF_DEC);
- if (ret)
- goto err;
- }
-
- val = i;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- val = 0;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
- &val, ADF_DEC);
- if (ret)
- goto err;
-
- return 0;
-err:
- dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
- return ret;
-}
-
-static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
-{
- unsigned long val;
- int ret;
-
- val = 0;
- ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
- &val, ADF_DEC);
- if (ret)
- return ret;
-
- return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
- &val, ADF_DEC);
-}
-
-int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
-{
- char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
- int ret;
-
- ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
- if (ret)
- goto err;
-
- ret = adf_cfg_section_add(accel_dev, "Accelerator0");
- if (ret)
- goto err;
-
- ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
- ADF_SERVICES_ENABLED, services);
- if (ret)
- goto err;
-
- ret = sysfs_match_string(adf_cfg_services, services);
- if (ret < 0)
- goto err;
-
- switch (ret) {
- case SVC_CY:
- case SVC_CY2:
- ret = adf_crypto_dev_config(accel_dev);
- break;
- case SVC_DC:
- case SVC_DCC:
- ret = adf_comp_dev_config(accel_dev);
- break;
- default:
- ret = adf_no_dev_config(accel_dev);
- break;
- }
-
- if (ret)
- goto err;
-
- set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
-
- return ret;
-
-err:
- dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
- return ret;
-}
-
static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct adf_accel_dev *accel_dev;
@@ -348,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device);
pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
- pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses);
+ pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses);
/* Get Accelerators and Accelerators Engines masks */
hw_data->accel_mask = hw_data->get_accel_mask(hw_data);
@@ -381,7 +114,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- ret = adf_cfg_dev_init(accel_dev);
+ ret = adf_gen4_cfg_dev_init(accel_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize configuration.\n");
goto out_err;
@@ -396,7 +129,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Find and map all the device's BARS */
- bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK;
+ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK;
ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev));
if (ret) {
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 779a8aa0b8..6908727bff 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -16,6 +16,7 @@ intel_qat-objs := adf_cfg.o \
adf_sysfs_ras_counters.o \
adf_gen2_hw_data.o \
adf_gen2_config.o \
+ adf_gen4_config.o \
adf_gen4_hw_data.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
@@ -40,9 +41,12 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
adf_fw_counters.o \
adf_cnv_dbgfs.o \
adf_gen4_pm_debugfs.o \
+ adf_gen4_tl.o \
adf_heartbeat.o \
adf_heartbeat_dbgfs.o \
adf_pm_dbgfs.o \
+ adf_telemetry.o \
+ adf_tl_debugfs.o \
adf_dbgfs.o
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 9d5fdd529a..a16c7e6edc 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -6,11 +6,14 @@
#include <linux/module.h>
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/pci.h>
#include <linux/ratelimit.h>
#include <linux/types.h>
#include "adf_cfg_common.h"
#include "adf_rl.h"
+#include "adf_telemetry.h"
#include "adf_pfvf_msg.h"
+#include "icp_qat_hw.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
@@ -19,12 +22,15 @@
#define ADF_C3XXX_DEVICE_NAME "c3xxx"
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_4XXX_DEVICE_NAME "4xxx"
+#define ADF_420XX_DEVICE_NAME "420xx"
#define ADF_4XXX_PCI_DEVICE_ID 0x4940
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
#define ADF_401XX_PCI_DEVICE_ID 0x4942
#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
#define ADF_402XX_PCI_DEVICE_ID 0x4944
#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
+#define ADF_420XX_PCI_DEVICE_ID 0x4946
+#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
@@ -241,8 +247,10 @@ struct adf_hw_device_data {
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
- u32 (*uof_get_num_objs)(void);
+ u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
+ int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
+ u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
int (*dev_config)(struct adf_accel_dev *accel_dev);
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
@@ -250,6 +258,7 @@ struct adf_hw_device_data {
struct adf_ras_ops ras_ops;
struct adf_dev_err_mask dev_err_mask;
struct adf_rl_hw_data rl_data;
+ struct adf_tl_hw_data tl_data;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
@@ -264,6 +273,7 @@ struct adf_hw_device_data {
u32 admin_ae_mask;
u16 tx_rings_mask;
u16 ring_to_svc_map;
+ u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER];
u8 tx_rx_gap;
u8 num_banks;
u16 num_banks_per_vf;
@@ -272,6 +282,7 @@ struct adf_hw_device_data {
u8 num_logical_accel;
u8 num_engines;
u32 num_hb_ctrs;
+ u8 num_rps;
};
/* CSR write macro */
@@ -304,6 +315,7 @@ struct adf_hw_device_data {
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
+#define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
struct adf_admin_comms;
@@ -352,6 +364,7 @@ struct adf_accel_dev {
struct adf_cfg_device_data *cfg;
struct adf_fw_loader_data *fw_loader;
struct adf_admin_comms *admin;
+ struct adf_telemetry *telemetry;
struct adf_dc_data *dc_data;
struct adf_pm power_management;
struct list_head crypto_list;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
index 6be064dc64..4b5d0350fc 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
@@ -19,7 +19,7 @@ static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr,
int i;
loader = loader_data->fw_loader;
- num_objs = hw_device->uof_get_num_objs();
+ num_objs = hw_device->uof_get_num_objs(accel_dev);
for (i = 0; i < num_objs; i++) {
obj_name = hw_device->uof_get_name(accel_dev, i);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 54b673ec23..acad526eb7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -498,6 +498,43 @@ int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt,
return ret;
}
+int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev,
+ dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes,
+ struct icp_qat_fw_init_admin_slice_cnt *slice_count)
+{
+ u32 ae_mask = GET_HW_DATA(accel_dev)->admin_ae_mask;
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+ int ret;
+
+ req.cmd_id = ICP_QAT_FW_TL_START;
+ req.init_cfg_ptr = tl_dma_addr;
+ req.init_cfg_sz = layout_sz;
+
+ if (rp_indexes)
+ memcpy(&req.rp_indexes, rp_indexes, sizeof(req.rp_indexes));
+
+ ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
+ if (ret)
+ return ret;
+
+ memcpy(slice_count, &resp.slices, sizeof(*slice_count));
+
+ return 0;
+}
+
+int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+ u32 ae_mask = hw_data->admin_ae_mask;
+
+ req.cmd_id = ICP_QAT_FW_TL_STOP;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h
index 55cbcbc66c..647c8e1967 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h
@@ -23,5 +23,9 @@ int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id,
int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp);
int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size);
int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err);
+int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev,
+ dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes,
+ struct icp_qat_fw_init_admin_slice_cnt *slice_count);
+int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev);
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
index 6e5de1dab9..89df3888d7 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h
@@ -47,6 +47,7 @@ enum adf_device_type {
DEV_C3XXX,
DEV_C3XXXVF,
DEV_4XXX,
+ DEV_420XX,
};
struct adf_dev_status_info {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
index 477efcc81a..c42f5c25aa 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -10,6 +10,7 @@
#include "adf_fw_counters.h"
#include "adf_heartbeat_dbgfs.h"
#include "adf_pm_dbgfs.h"
+#include "adf_tl_debugfs.h"
/**
* adf_dbgfs_init() - add persistent debugfs entries
@@ -66,6 +67,7 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
adf_heartbeat_dbgfs_add(accel_dev);
adf_pm_dbgfs_add(accel_dev);
adf_cnv_dbgfs_add(accel_dev);
+ adf_tl_dbgfs_add(accel_dev);
}
}
@@ -79,6 +81,7 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
return;
if (!accel_dev->is_vf) {
+ adf_tl_dbgfs_rm(accel_dev);
adf_cnv_dbgfs_rm(accel_dev);
adf_pm_dbgfs_rm(accel_dev);
adf_heartbeat_dbgfs_rm(accel_dev);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
new file mode 100644
index 0000000000..4f86696800
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_FW_CONFIG_H_
+#define ADF_FW_CONFIG_H_
+
+enum adf_fw_objs {
+ ADF_FW_SYM_OBJ,
+ ADF_FW_ASYM_OBJ,
+ ADF_FW_DC_OBJ,
+ ADF_FW_ADMIN_OBJ,
+};
+
+struct adf_fw_config {
+ u32 ae_mask;
+ enum adf_fw_objs obj;
+};
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
new file mode 100644
index 0000000000..fe1f3d727d
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_cfg_services.h"
+#include "adf_cfg_strings.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_config.h"
+#include "adf_heartbeat.h"
+#include "adf_transport_access_macros.h"
+#include "qat_compression.h"
+#include "qat_crypto.h"
+
+static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long bank, val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_crypto(accel_dev))
+ instances = min(cpus, banks / 2);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ bank = i * 2;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &bank, ADF_DEC);
+ if (ret)
+ goto err;
+
+ bank += 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &bank, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+ i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+ val = 128;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n");
+ return ret;
+}
+
+static int adf_comp_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+ int banks = GET_MAX_BANKS(accel_dev);
+ int cpus = num_online_cpus();
+ unsigned long val;
+ int instances;
+ int ret;
+ int i;
+
+ if (adf_hw_dev_has_compression(accel_dev))
+ instances = min(cpus, banks);
+ else
+ instances = 0;
+
+ for (i = 0; i < instances; i++) {
+ val = i;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 512;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 1;
+ snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = ADF_COALESCING_DEF_TIME;
+ snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+ ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+ key, &val, ADF_DEC);
+ if (ret)
+ goto err;
+ }
+
+ val = i;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n");
+ return ret;
+}
+
+static int adf_no_dev_config(struct adf_accel_dev *accel_dev)
+{
+ unsigned long val;
+ int ret;
+
+ val = 0;
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC,
+ &val, ADF_DEC);
+ if (ret)
+ return ret;
+
+ return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY,
+ &val, ADF_DEC);
+}
+
+/**
+ * adf_gen4_dev_config() - create dev config required to create instances
+ *
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function creates device configuration required to create instances
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev)
+{
+ char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
+ int ret;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_section_add(accel_dev, "Accelerator0");
+ if (ret)
+ goto err;
+
+ ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, services);
+ if (ret)
+ goto err;
+
+ ret = sysfs_match_string(adf_cfg_services, services);
+ if (ret < 0)
+ goto err;
+
+ switch (ret) {
+ case SVC_CY:
+ case SVC_CY2:
+ ret = adf_crypto_dev_config(accel_dev);
+ break;
+ case SVC_DC:
+ case SVC_DCC:
+ ret = adf_comp_dev_config(accel_dev);
+ break;
+ default:
+ ret = adf_no_dev_config(accel_dev);
+ break;
+ }
+
+ if (ret)
+ goto err;
+
+ set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+
+ return ret;
+
+err:
+ dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n");
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_dev_config);
+
+int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev)
+{
+ const char *config;
+ int ret;
+
+ config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY;
+
+ ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
+ if (ret)
+ return ret;
+
+ /* Default configuration is crypto only for even devices
+ * and compression for odd devices
+ */
+ ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
+ ADF_SERVICES_ENABLED, config,
+ ADF_STR);
+ if (ret)
+ return ret;
+
+ adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_cfg_dev_init);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
new file mode 100644
index 0000000000..bb87655f69
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_GEN4_CONFIG_H_
+#define ADF_GEN4_CONFIG_H_
+
+#include "adf_accel_devices.h"
+
+int adf_gen4_dev_config(struct adf_accel_dev *accel_dev);
+int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
index 3148a62938..f752653ccb 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c
@@ -2,8 +2,10 @@
/* Copyright(c) 2020 Intel Corporation */
#include <linux/iopoll.h>
#include "adf_accel_devices.h"
+#include "adf_cfg_services.h"
#include "adf_common_drv.h"
#include "adf_gen4_hw_data.h"
+#include "adf_gen4_pm.h"
static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size)
{
@@ -102,6 +104,131 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops)
}
EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops);
+u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_ACCELERATORS_MASK;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask);
+
+u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_MAX_ACCELERATORS;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels);
+
+u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self)
+{
+ if (!self || !self->ae_mask)
+ return 0;
+
+ return hweight32(self->ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes);
+
+u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_PMISC_BAR;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id);
+
+u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_ETR_BAR;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id);
+
+u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self)
+{
+ return ADF_GEN4_SRAM_BAR;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id);
+
+enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self)
+{
+ return DEV_SKU_1;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_sku);
+
+void adf_gen4_get_arb_info(struct arb_info *arb_info)
+{
+ arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG;
+ arb_info->arb_offset = ADF_GEN4_ARB_OFFSET;
+ arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info);
+
+void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info)
+{
+ admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET;
+ admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET;
+ admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info);
+
+u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self)
+{
+ /*
+ * GEN4 uses KPT counter for HB
+ */
+ return ADF_GEN4_KPT_COUNTER_FREQ;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock);
+
+void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR];
+ void __iomem *csr = misc_bar->virt_addr;
+
+ /* Enable all in errsou3 except VFLR notification on host */
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction);
+
+void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+
+ addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
+
+ /* Enable bundle interrupts */
+ ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0);
+ ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0);
+
+ /* Enable misc interrupts */
+ ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_enable_ints);
+
+int adf_gen4_init_device(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+ u32 status;
+ u32 csr;
+ int ret;
+
+ addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
+
+ /* Temporarily mask PM interrupt */
+ csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
+ csr |= ADF_GEN4_PM_SOU;
+ ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
+
+ /* Set DRV_ACTIVE bit to power up the device */
+ ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
+
+ /* Poll status register to make sure the device is powered up */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_GEN4_PM_INIT_STATE,
+ ADF_GEN4_PM_POLL_DELAY_US,
+ ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_GEN4_PM_STATUS);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_device);
+
static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
u32 *lower)
{
@@ -135,6 +262,28 @@ void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
}
EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
+/*
+ * The vector routing table is used to select the MSI-X entry to use for each
+ * interrupt source.
+ * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts.
+ * The final entry corresponds to VF2PF or error interrupts.
+ * This vector table could be used to configure one MSI-X entry to be shared
+ * between multiple interrupt sources.
+ *
+ * The default routing is set to have a one to one correspondence between the
+ * interrupt source and the MSI-X entry used.
+ */
+void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *csr;
+ int i;
+
+ csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
+ for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++)
+ ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable);
+
int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
{
return 0;
@@ -192,3 +341,95 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
return ret;
}
EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
+
+static const u32 thrd_to_arb_map_dcc[] = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0
+};
+
+static const u16 rp_group_to_arb_mask[] = {
+ [RP_GROUP_0] = 0x5,
+ [RP_GROUP_1] = 0xA,
+};
+
+static bool is_single_service(int service_id)
+{
+ switch (service_id) {
+ case SVC_DC:
+ case SVC_SYM:
+ case SVC_ASYM:
+ return true;
+ case SVC_CY:
+ case SVC_CY2:
+ case SVC_DCC:
+ case SVC_ASYM_DC:
+ case SVC_DC_ASYM:
+ case SVC_SYM_DC:
+ case SVC_DC_SYM:
+ default:
+ return false;
+ }
+}
+
+int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u32 *thd2arb_map = hw_data->thd_to_arb_map;
+ unsigned int ae_cnt, worker_obj_cnt, i, j;
+ unsigned long ae_mask, thds_mask;
+ int srv_id, rp_group;
+ u32 thd2arb_map_base;
+ u16 arb_mask;
+
+ if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask ||
+ !hw_data->get_num_aes || !hw_data->uof_get_num_objs ||
+ !hw_data->uof_get_ae_mask)
+ return -EFAULT;
+
+ srv_id = adf_get_service_enabled(accel_dev);
+ if (srv_id < 0)
+ return srv_id;
+
+ ae_cnt = hw_data->get_num_aes(hw_data);
+ worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
+ ADF_GEN4_ADMIN_ACCELENGINES;
+
+ if (srv_id == SVC_DCC) {
+ if (ae_cnt > ICP_QAT_HW_AE_DELIMITER)
+ return -EINVAL;
+
+ memcpy(thd2arb_map, thrd_to_arb_map_dcc,
+ array_size(sizeof(*thd2arb_map), ae_cnt));
+ return 0;
+ }
+
+ for (i = 0; i < worker_obj_cnt; i++) {
+ ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
+ rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
+ thds_mask = hw_data->get_ena_thd_mask(accel_dev, i);
+ thd2arb_map_base = 0;
+
+ if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
+ return -EINVAL;
+
+ if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR)
+ return -EINVAL;
+
+ if (is_single_service(srv_id))
+ arb_mask = rp_group_to_arb_mask[RP_GROUP_0] |
+ rp_group_to_arb_mask[RP_GROUP_1];
+ else
+ arb_mask = rp_group_to_arb_mask[rp_group];
+
+ for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE)
+ thd2arb_map_base |= arb_mask << (j * 4);
+
+ for_each_set_bit(j, &ae_mask, ae_cnt)
+ thd2arb_map[j] = thd2arb_map_base;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 1813fe1d5a..7d8a774cad 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -3,9 +3,57 @@
#ifndef ADF_GEN4_HW_CSR_DATA_H_
#define ADF_GEN4_HW_CSR_DATA_H_
+#include <linux/units.h>
+
#include "adf_accel_devices.h"
#include "adf_cfg_common.h"
+/* PCIe configuration space */
+#define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4))
+#define ADF_GEN4_SRAM_BAR 0
+#define ADF_GEN4_PMISC_BAR 1
+#define ADF_GEN4_ETR_BAR 2
+
+/* Clocks frequency */
+#define ADF_GEN4_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
+
+/* Physical function fuses */
+#define ADF_GEN4_FUSECTL0_OFFSET 0x2C8
+#define ADF_GEN4_FUSECTL1_OFFSET 0x2CC
+#define ADF_GEN4_FUSECTL2_OFFSET 0x2D0
+#define ADF_GEN4_FUSECTL3_OFFSET 0x2D4
+#define ADF_GEN4_FUSECTL4_OFFSET 0x2D8
+#define ADF_GEN4_FUSECTL5_OFFSET 0x2DC
+
+/* Accelerators */
+#define ADF_GEN4_ACCELERATORS_MASK 0x1
+#define ADF_GEN4_MAX_ACCELERATORS 1
+#define ADF_GEN4_ADMIN_ACCELENGINES 1
+
+/* MSIX interrupt */
+#define ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET 0x41A040
+#define ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET 0x41A044
+#define ADF_GEN4_SMIAPF_MASK_OFFSET 0x41A084
+#define ADF_GEN4_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04))
+
+/* Bank and ring configuration */
+#define ADF_GEN4_MAX_RPS 64
+#define ADF_GEN4_NUM_RINGS_PER_BANK 2
+#define ADF_GEN4_NUM_BANKS_PER_VF 4
+#define ADF_GEN4_ETR_MAX_BANKS 64
+#define ADF_GEN4_RX_RINGS_OFFSET 1
+#define ADF_GEN4_TX_RINGS_MASK 0x1
+
+/* Arbiter configuration */
+#define ADF_GEN4_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0))
+#define ADF_GEN4_ARB_OFFSET 0x0
+#define ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET 0x400
+
+/* Admin Interface Reg Offset */
+#define ADF_GEN4_ADMINMSGUR_OFFSET 0x500574
+#define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578
+#define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970
+
/* Transport access */
#define ADF_BANK_INT_SRC_SEL_MASK 0x44UL
#define ADF_RING_CSR_RING_CONFIG 0x1000
@@ -146,7 +194,46 @@ do { \
#define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800
#define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804
+/* Arbiter threads mask with error value */
+#define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0)
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+
+enum icp_qat_gen4_slice_mask {
+ ICP_ACCEL_GEN4_MASK_CIPHER_SLICE = BIT(0),
+ ICP_ACCEL_GEN4_MASK_AUTH_SLICE = BIT(1),
+ ICP_ACCEL_GEN4_MASK_PKE_SLICE = BIT(2),
+ ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE = BIT(3),
+ ICP_ACCEL_GEN4_MASK_UCS_SLICE = BIT(4),
+ ICP_ACCEL_GEN4_MASK_EIA3_SLICE = BIT(5),
+ ICP_ACCEL_GEN4_MASK_SMX_SLICE = BIT(7),
+ ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE = BIT(8),
+ ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE = BIT(9),
+};
+
+enum adf_gen4_rp_groups {
+ RP_GROUP_0,
+ RP_GROUP_1,
+ RP_GROUP_COUNT
+};
+
+void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev);
+void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev);
+u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self);
+void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info);
+void adf_gen4_get_arb_info(struct arb_info *arb_info);
+u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self);
+u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self);
+u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self);
+u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self);
+u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self);
+enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self);
+u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self);
+int adf_gen4_init_device(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
+void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev);
+void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
+int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev);
+
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
new file mode 100644
index 0000000000..7fc7a77f6a
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Intel Corporation. */
+#include <linux/export.h>
+#include <linux/kernel.h>
+
+#include "adf_gen4_tl.h"
+#include "adf_telemetry.h"
+#include "adf_tl_debugfs.h"
+
+#define ADF_GEN4_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen4)
+
+#define ADF_GEN4_TL_RP_REG_OFF(reg) ADF_TL_RP_REG_OFF(reg, gen4)
+
+#define ADF_GEN4_TL_SL_UTIL_COUNTER(_name) \
+ ADF_TL_COUNTER("util_" #_name, \
+ ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_util, gen4))
+
+#define ADF_GEN4_TL_SL_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("exec_" #_name, \
+ ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_exec_cnt, gen4))
+
+/* Device level counters. */
+static const struct adf_tl_dbg_counter dev_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_pci_trans_cnt)),
+ /* Max read latency[ns]. */
+ ADF_TL_COUNTER(MAX_RD_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_max)),
+ /* Read latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RD_LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_cmpl_cnt)),
+ /* Max get to put latency[ns]. */
+ ADF_TL_COUNTER(MAX_LAT_NAME, ADF_TL_COUNTER_NS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_max)),
+ /* Get to put latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_out)),
+ /* Page request latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(PAGE_REQ_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_cnt)),
+ /* Page translation latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(AT_TRANS_LAT_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_acc),
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_cnt)),
+ /* Maximum uTLB used. */
+ ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_max_tlb_used)),
+};
+
+/* Slice utilization counters. */
+static const struct adf_tl_dbg_counter sl_util_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(cpr),
+ /* Translator slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(xlt),
+ /* Decompression slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(dcpr),
+ /* PKE utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(pke),
+ /* Wireless Authentication slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(wat),
+ /* Wireless Cipher slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(wcp),
+ /* UCS slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(ucs),
+ /* Cipher slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(cph),
+ /* Authentication slice utilization. */
+ ADF_GEN4_TL_SL_UTIL_COUNTER(ath),
+};
+
+/* Slice execution counters. */
+static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(cpr),
+ /* Translator slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(xlt),
+ /* Decompression slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(dcpr),
+ /* PKE execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(pke),
+ /* Wireless Authentication slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(wat),
+ /* Wireless Cipher slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(wcp),
+ /* UCS slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(ucs),
+ /* Cipher slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(cph),
+ /* Authentication slice execution count. */
+ ADF_GEN4_TL_SL_EXEC_COUNTER(ath),
+};
+
+/* Ring pair counters. */
+static const struct adf_tl_dbg_counter rp_counters[] = {
+ /* PCIe partial transactions. */
+ ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_pci_trans_cnt)),
+ /* Get to put latency average[ns]. */
+ ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_gp_lat_acc),
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_ae_put_cnt)),
+ /* PCIe write bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_in)),
+ /* PCIe read bandwidth[Mbps]. */
+ ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_out)),
+ /* Message descriptor DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_hit)),
+ /* Message descriptor DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_GLOB_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_miss)),
+ /* Payload DevTLB hit rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_hit)),
+ /* Payload DevTLB miss rate. */
+ ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
+ ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)),
+};
+
+void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+ tl_data->layout_sz = ADF_GEN4_TL_LAYOUT_SZ;
+ tl_data->slice_reg_sz = ADF_GEN4_TL_SLICE_REG_SZ;
+ tl_data->rp_reg_sz = ADF_GEN4_TL_RP_REG_SZ;
+ tl_data->num_hbuff = ADF_GEN4_TL_NUM_HIST_BUFFS;
+ tl_data->max_rp = ADF_GEN4_TL_MAX_RP_NUM;
+ tl_data->msg_cnt_off = ADF_GEN4_TL_MSG_CNT_OFF;
+ tl_data->cpp_ns_per_cycle = ADF_GEN4_CPP_NS_PER_CYCLE;
+ tl_data->bw_units_to_bytes = ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES;
+
+ tl_data->dev_counters = dev_counters;
+ tl_data->num_dev_counters = ARRAY_SIZE(dev_counters);
+ tl_data->sl_util_counters = sl_util_counters;
+ tl_data->sl_exec_counters = sl_exec_counters;
+ tl_data->rp_counters = rp_counters;
+ tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h
new file mode 100644
index 0000000000..32df4163be
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023 Intel Corporation. */
+#ifndef ADF_GEN4_TL_H
+#define ADF_GEN4_TL_H
+
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+struct adf_tl_hw_data;
+
+/* Computation constants. */
+#define ADF_GEN4_CPP_NS_PER_CYCLE 2
+#define ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES 64
+
+/* Maximum aggregation time. Value in milliseconds. */
+#define ADF_GEN4_TL_MAX_AGGR_TIME_MS 4000
+/* Num of buffers to store historic values. */
+#define ADF_GEN4_TL_NUM_HIST_BUFFS \
+ (ADF_GEN4_TL_MAX_AGGR_TIME_MS / ADF_TL_DATA_WR_INTERVAL_MS)
+
+/* Max number of HW resources of one type. */
+#define ADF_GEN4_TL_MAX_SLICES_PER_TYPE 24
+
+/* Max number of simultaneously monitored ring pairs. */
+#define ADF_GEN4_TL_MAX_RP_NUM 4
+
+/**
+ * struct adf_gen4_tl_slice_data_regs - HW slice data as populated by FW.
+ * @reg_tm_slice_exec_cnt: Slice execution count.
+ * @reg_tm_slice_util: Slice utilization.
+ */
+struct adf_gen4_tl_slice_data_regs {
+ __u32 reg_tm_slice_exec_cnt;
+ __u32 reg_tm_slice_util;
+};
+
+#define ADF_GEN4_TL_SLICE_REG_SZ sizeof(struct adf_gen4_tl_slice_data_regs)
+
+/**
+ * struct adf_gen4_tl_device_data_regs - This structure stores device telemetry
+ * counter values as are being populated periodically by device.
+ * @reg_tl_rd_lat_acc: read latency accumulator
+ * @reg_tl_gp_lat_acc: get-put latency accumulator
+ * @reg_tl_at_page_req_lat_acc: AT/DevTLB page request latency accumulator
+ * @reg_tl_at_trans_lat_acc: DevTLB transaction latency accumulator
+ * @reg_tl_re_acc: accumulated ring empty time
+ * @reg_tl_pci_trans_cnt: PCIe partial transactions
+ * @reg_tl_rd_lat_max: maximum logged read latency
+ * @reg_tl_rd_cmpl_cnt: read requests completed count
+ * @reg_tl_gp_lat_max: maximum logged get to put latency
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_page_req_cnt: DevTLB page requests count
+ * @reg_tl_at_trans_lat_cnt: DevTLB transaction latency samples count
+ * @reg_tl_at_max_tlb_used: maximum uTLB used
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved: reserved
+ * @ath_slices: array of Authentication slices utilization registers
+ * @cph_slices: array of Cipher slices utilization registers
+ * @cpr_slices: array of Compression slices utilization registers
+ * @xlt_slices: array of Translator slices utilization registers
+ * @dcpr_slices: array of Decompression slices utilization registers
+ * @pke_slices: array of PKE slices utilization registers
+ * @ucs_slices: array of UCS slices utilization registers
+ * @wat_slices: array of Wireless Authentication slices utilization registers
+ * @wcp_slices: array of Wireless Cipher slices utilization registers
+ */
+struct adf_gen4_tl_device_data_regs {
+ __u64 reg_tl_rd_lat_acc;
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reg_tl_at_page_req_lat_acc;
+ __u64 reg_tl_at_trans_lat_acc;
+ __u64 reg_tl_re_acc;
+ __u32 reg_tl_pci_trans_cnt;
+ __u32 reg_tl_rd_lat_max;
+ __u32 reg_tl_rd_cmpl_cnt;
+ __u32 reg_tl_gp_lat_max;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_page_req_cnt;
+ __u32 reg_tl_at_trans_lat_cnt;
+ __u32 reg_tl_at_max_tlb_used;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved;
+ struct adf_gen4_tl_slice_data_regs ath_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs cph_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs cpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs xlt_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs dcpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs pke_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs ucs_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs wat_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+ struct adf_gen4_tl_slice_data_regs wcp_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE];
+};
+
+/**
+ * struct adf_gen4_tl_ring_pair_data_regs - This structure stores Ring Pair
+ * telemetry counter values as are being populated periodically by device.
+ * @reg_tl_gp_lat_acc: get-put latency accumulator
+ * @reserved: reserved
+ * @reg_tl_pci_trans_cnt: PCIe partial transactions
+ * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings
+ * @reg_tl_bw_in: PCIe write bandwidth
+ * @reg_tl_bw_out: PCIe read bandwidth
+ * @reg_tl_at_glob_devtlb_hit: Message descriptor DevTLB hit rate
+ * @reg_tl_at_glob_devtlb_miss: Message descriptor DevTLB miss rate
+ * @reg_tl_at_payld_devtlb_hit: Payload DevTLB hit rate
+ * @reg_tl_at_payld_devtlb_miss: Payload DevTLB miss rate
+ * @reg_tl_re_cnt: ring empty time samples count
+ * @reserved1: reserved
+ */
+struct adf_gen4_tl_ring_pair_data_regs {
+ __u64 reg_tl_gp_lat_acc;
+ __u64 reserved;
+ __u32 reg_tl_pci_trans_cnt;
+ __u32 reg_tl_ae_put_cnt;
+ __u32 reg_tl_bw_in;
+ __u32 reg_tl_bw_out;
+ __u32 reg_tl_at_glob_devtlb_hit;
+ __u32 reg_tl_at_glob_devtlb_miss;
+ __u32 reg_tl_at_payld_devtlb_hit;
+ __u32 reg_tl_at_payld_devtlb_miss;
+ __u32 reg_tl_re_cnt;
+ __u32 reserved1;
+};
+
+#define ADF_GEN4_TL_RP_REG_SZ sizeof(struct adf_gen4_tl_ring_pair_data_regs)
+
+/**
+ * struct adf_gen4_tl_layout - This structure represents entire telemetry
+ * counters data: Device + 4 Ring Pairs as are being populated periodically
+ * by device.
+ * @tl_device_data_regs: structure of device telemetry registers
+ * @tl_ring_pairs_data_regs: array of ring pairs telemetry registers
+ * @reg_tl_msg_cnt: telemetry messages counter
+ * @reserved: reserved
+ */
+struct adf_gen4_tl_layout {
+ struct adf_gen4_tl_device_data_regs tl_device_data_regs;
+ struct adf_gen4_tl_ring_pair_data_regs
+ tl_ring_pairs_data_regs[ADF_GEN4_TL_MAX_RP_NUM];
+ __u32 reg_tl_msg_cnt;
+ __u32 reserved;
+};
+
+#define ADF_GEN4_TL_LAYOUT_SZ sizeof(struct adf_gen4_tl_layout)
+#define ADF_GEN4_TL_MSG_CNT_OFF offsetof(struct adf_gen4_tl_layout, reg_tl_msg_cnt)
+
+#ifdef CONFIG_DEBUG_FS
+void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data);
+#else
+static inline void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* ADF_GEN4_TL_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index 81c39f3d07..f43ae91115 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -11,6 +11,7 @@
#include "adf_heartbeat.h"
#include "adf_rl.h"
#include "adf_sysfs_ras_counters.h"
+#include "adf_telemetry.h"
static LIST_HEAD(service_table);
static DEFINE_MUTEX(service_lock);
@@ -142,6 +143,10 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
if (ret && ret != -EOPNOTSUPP)
return ret;
+ ret = adf_tl_init(accel_dev);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
/*
* Subservice initialisation is divided into two stages: init and start.
* This is to facilitate any ordering dependencies between services
@@ -220,6 +225,10 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
if (ret && ret != -EOPNOTSUPP)
return ret;
+ ret = adf_tl_start(accel_dev);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
list_for_each_entry(service, &service_table, list) {
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
dev_err(&GET_DEV(accel_dev),
@@ -279,6 +288,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
!test_bit(ADF_STATUS_STARTING, &accel_dev->status))
return;
+ adf_tl_stop(accel_dev);
adf_rl_stop(accel_dev);
adf_dbgfs_rm(accel_dev);
adf_sysfs_stop_ras(accel_dev);
@@ -374,6 +384,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
adf_heartbeat_shutdown(accel_dev);
+ adf_tl_shutdown(accel_dev);
+
hw_data->disable_iov(accel_dev);
if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
new file mode 100644
index 0000000000..2ff714d11b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Intel Corporation. */
+#define dev_fmt(fmt) "Telemetry: " fmt
+
+#include <asm/errno.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+
+#include "adf_admin.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_telemetry.h"
+
+#define TL_IS_ZERO(input) ((input) == 0)
+
+static bool is_tl_supported(struct adf_accel_dev *accel_dev)
+{
+ u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities;
+
+ return fw_caps & TL_CAPABILITY_BIT;
+}
+
+static int validate_tl_data(struct adf_tl_hw_data *tl_data)
+{
+ if (!tl_data->dev_counters ||
+ TL_IS_ZERO(tl_data->num_dev_counters) ||
+ !tl_data->sl_util_counters ||
+ !tl_data->sl_exec_counters ||
+ !tl_data->rp_counters ||
+ TL_IS_ZERO(tl_data->num_rp_counters))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct device *dev = &GET_DEV(accel_dev);
+ size_t regs_sz = tl_data->layout_sz;
+ struct adf_telemetry *telemetry;
+ int node = dev_to_node(dev);
+ void *tl_data_regs;
+ unsigned int i;
+
+ telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node);
+ if (!telemetry)
+ return -ENOMEM;
+
+ telemetry->rp_num_indexes = kmalloc_array(tl_data->max_rp,
+ sizeof(*telemetry->rp_num_indexes),
+ GFP_KERNEL);
+ if (!telemetry->rp_num_indexes)
+ goto err_free_tl;
+
+ telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff,
+ sizeof(*telemetry->regs_hist_buff),
+ GFP_KERNEL);
+ if (!telemetry->regs_hist_buff)
+ goto err_free_rp_indexes;
+
+ telemetry->regs_data = dma_alloc_coherent(dev, regs_sz,
+ &telemetry->regs_data_p,
+ GFP_KERNEL);
+ if (!telemetry->regs_data)
+ goto err_free_regs_hist_buff;
+
+ for (i = 0; i < tl_data->num_hbuff; i++) {
+ tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node);
+ if (!tl_data_regs)
+ goto err_free_dma;
+
+ telemetry->regs_hist_buff[i] = tl_data_regs;
+ }
+
+ accel_dev->telemetry = telemetry;
+
+ return 0;
+
+err_free_dma:
+ dma_free_coherent(dev, regs_sz, telemetry->regs_data,
+ telemetry->regs_data_p);
+
+ while (i--)
+ kfree(telemetry->regs_hist_buff[i]);
+
+err_free_regs_hist_buff:
+ kfree(telemetry->regs_hist_buff);
+err_free_rp_indexes:
+ kfree(telemetry->rp_num_indexes);
+err_free_tl:
+ kfree(telemetry);
+
+ return -ENOMEM;
+}
+
+static void adf_tl_free_mem(struct adf_accel_dev *accel_dev)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ size_t regs_sz = tl_data->layout_sz;
+ unsigned int i;
+
+ for (i = 0; i < tl_data->num_hbuff; i++)
+ kfree(telemetry->regs_hist_buff[i]);
+
+ dma_free_coherent(dev, regs_sz, telemetry->regs_data,
+ telemetry->regs_data_p);
+
+ kfree(telemetry->regs_hist_buff);
+ kfree(telemetry->rp_num_indexes);
+ kfree(telemetry);
+ accel_dev->telemetry = NULL;
+}
+
+static unsigned long get_next_timeout(void)
+{
+ return msecs_to_jiffies(ADF_TL_TIMER_INT_MS);
+}
+
+static void snapshot_regs(struct adf_telemetry *telemetry, size_t size)
+{
+ void *dst = telemetry->regs_hist_buff[telemetry->hb_num];
+ void *src = telemetry->regs_data;
+
+ memcpy(dst, src, size);
+}
+
+static void tl_work_handler(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct adf_telemetry *telemetry;
+ struct adf_tl_hw_data *tl_data;
+ u32 msg_cnt, old_msg_cnt;
+ size_t layout_sz;
+ u32 *regs_data;
+ size_t id;
+
+ delayed_work = to_delayed_work(work);
+ telemetry = container_of(delayed_work, struct adf_telemetry, work_ctx);
+ tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ regs_data = telemetry->regs_data;
+
+ id = tl_data->msg_cnt_off / sizeof(*regs_data);
+ layout_sz = tl_data->layout_sz;
+
+ if (!atomic_read(&telemetry->state)) {
+ cancel_delayed_work_sync(&telemetry->work_ctx);
+ return;
+ }
+
+ msg_cnt = regs_data[id];
+ old_msg_cnt = msg_cnt;
+ if (msg_cnt == telemetry->msg_cnt)
+ goto out;
+
+ mutex_lock(&telemetry->regs_hist_lock);
+
+ snapshot_regs(telemetry, layout_sz);
+
+ /* Check if data changed while updating it */
+ msg_cnt = regs_data[id];
+ if (old_msg_cnt != msg_cnt)
+ snapshot_regs(telemetry, layout_sz);
+
+ telemetry->msg_cnt = msg_cnt;
+ telemetry->hb_num++;
+ telemetry->hb_num %= telemetry->hbuffs;
+
+ mutex_unlock(&telemetry->regs_hist_lock);
+
+out:
+ adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
+}
+
+int adf_tl_halt(struct adf_accel_dev *accel_dev)
+{
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ int ret;
+
+ cancel_delayed_work_sync(&telemetry->work_ctx);
+ atomic_set(&telemetry->state, 0);
+
+ ret = adf_send_admin_tl_stop(accel_dev);
+ if (ret)
+ dev_err(dev, "failed to stop telemetry\n");
+
+ return ret;
+}
+
+int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ size_t layout_sz = tl_data->layout_sz;
+ int ret;
+
+ ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p,
+ layout_sz, telemetry->rp_num_indexes,
+ &telemetry->slice_cnt);
+ if (ret) {
+ dev_err(dev, "failed to start telemetry\n");
+ return ret;
+ }
+
+ telemetry->hbuffs = state;
+ atomic_set(&telemetry->state, state);
+
+ adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout());
+
+ return 0;
+}
+
+int adf_tl_init(struct adf_accel_dev *accel_dev)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ u8 max_rp = GET_TL_DATA(accel_dev).max_rp;
+ struct device *dev = &GET_DEV(accel_dev);
+ struct adf_telemetry *telemetry;
+ unsigned int i;
+ int ret;
+
+ ret = validate_tl_data(tl_data);
+ if (ret)
+ return ret;
+
+ ret = adf_tl_alloc_mem(accel_dev);
+ if (ret) {
+ dev_err(dev, "failed to initialize: %d\n", ret);
+ return ret;
+ }
+
+ telemetry = accel_dev->telemetry;
+ telemetry->accel_dev = accel_dev;
+
+ mutex_init(&telemetry->wr_lock);
+ mutex_init(&telemetry->regs_hist_lock);
+ INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler);
+
+ for (i = 0; i < max_rp; i++)
+ telemetry->rp_num_indexes[i] = ADF_TL_RP_REGS_DISABLED;
+
+ return 0;
+}
+
+int adf_tl_start(struct adf_accel_dev *accel_dev)
+{
+ struct device *dev = &GET_DEV(accel_dev);
+
+ if (!accel_dev->telemetry)
+ return -EOPNOTSUPP;
+
+ if (!is_tl_supported(accel_dev)) {
+ dev_info(dev, "feature not supported by FW\n");
+ adf_tl_free_mem(accel_dev);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void adf_tl_stop(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->telemetry)
+ return;
+
+ if (atomic_read(&accel_dev->telemetry->state))
+ adf_tl_halt(accel_dev);
+}
+
+void adf_tl_shutdown(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->telemetry)
+ return;
+
+ adf_tl_free_mem(accel_dev);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
new file mode 100644
index 0000000000..9be81cd3b8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023 Intel Corporation. */
+#ifndef ADF_TELEMETRY_H
+#define ADF_TELEMETRY_H
+
+#include <linux/bits.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "icp_qat_fw_init_admin.h"
+
+struct adf_accel_dev;
+struct adf_tl_dbg_counter;
+struct dentry;
+
+#define ADF_TL_SL_CNT_COUNT \
+ (sizeof(struct icp_qat_fw_init_admin_slice_cnt) / sizeof(__u8))
+
+#define TL_CAPABILITY_BIT BIT(1)
+/* Interval within device writes data to DMA region. Value in milliseconds. */
+#define ADF_TL_DATA_WR_INTERVAL_MS 1000
+/* Interval within timer interrupt should be handled. Value in milliseconds. */
+#define ADF_TL_TIMER_INT_MS (ADF_TL_DATA_WR_INTERVAL_MS / 2)
+
+#define ADF_TL_RP_REGS_DISABLED (0xff)
+
+struct adf_tl_hw_data {
+ size_t layout_sz;
+ size_t slice_reg_sz;
+ size_t rp_reg_sz;
+ size_t msg_cnt_off;
+ const struct adf_tl_dbg_counter *dev_counters;
+ const struct adf_tl_dbg_counter *sl_util_counters;
+ const struct adf_tl_dbg_counter *sl_exec_counters;
+ const struct adf_tl_dbg_counter *rp_counters;
+ u8 num_hbuff;
+ u8 cpp_ns_per_cycle;
+ u8 bw_units_to_bytes;
+ u8 num_dev_counters;
+ u8 num_rp_counters;
+ u8 max_rp;
+};
+
+struct adf_telemetry {
+ struct adf_accel_dev *accel_dev;
+ atomic_t state;
+ u32 hbuffs;
+ int hb_num;
+ u32 msg_cnt;
+ dma_addr_t regs_data_p; /* bus address for DMA mapping */
+ void *regs_data; /* virtual address for DMA mapping */
+ /**
+ * @regs_hist_buff: array of pointers to copies of the last @hbuffs
+ * values of @regs_data
+ */
+ void **regs_hist_buff;
+ struct dentry *dbg_dir;
+ u8 *rp_num_indexes;
+ /**
+ * @regs_hist_lock: protects from race conditions between write and read
+ * to the copies referenced by @regs_hist_buff
+ */
+ struct mutex regs_hist_lock;
+ /**
+ * @wr_lock: protects from concurrent writes to debugfs telemetry files
+ */
+ struct mutex wr_lock;
+ struct delayed_work work_ctx;
+ struct icp_qat_fw_init_admin_slice_cnt slice_cnt;
+};
+
+#ifdef CONFIG_DEBUG_FS
+int adf_tl_init(struct adf_accel_dev *accel_dev);
+int adf_tl_start(struct adf_accel_dev *accel_dev);
+void adf_tl_stop(struct adf_accel_dev *accel_dev);
+void adf_tl_shutdown(struct adf_accel_dev *accel_dev);
+int adf_tl_run(struct adf_accel_dev *accel_dev, int state);
+int adf_tl_halt(struct adf_accel_dev *accel_dev);
+#else
+static inline int adf_tl_init(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline int adf_tl_start(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
+static inline void adf_tl_stop(struct adf_accel_dev *accel_dev)
+{
+}
+
+static inline void adf_tl_shutdown(struct adf_accel_dev *accel_dev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+#endif /* ADF_TELEMETRY_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
new file mode 100644
index 0000000000..c8241f5a0a
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Intel Corporation. */
+#define dev_fmt(fmt) "Telemetry debugfs: " fmt
+
+#include <linux/atomic.h>
+#include <linux/debugfs.h>
+#include <linux/dev_printk.h>
+#include <linux/dcache.h>
+#include <linux/file.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/mutex.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_strings.h"
+#include "adf_telemetry.h"
+#include "adf_tl_debugfs.h"
+
+#define TL_VALUE_MIN_PADDING 20
+#define TL_KEY_MIN_PADDING 23
+#define TL_RP_SRV_UNKNOWN "Unknown"
+
+static int tl_collect_values_u32(struct adf_telemetry *telemetry,
+ size_t counter_offset, u64 *arr)
+{
+ unsigned int samples, hb_idx, i;
+ u32 *regs_hist_buff;
+ u32 counter_val;
+
+ samples = min(telemetry->msg_cnt, telemetry->hbuffs);
+ hb_idx = telemetry->hb_num + telemetry->hbuffs - samples;
+
+ mutex_lock(&telemetry->regs_hist_lock);
+
+ for (i = 0; i < samples; i++) {
+ regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs];
+ counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)];
+ arr[i] = counter_val;
+ hb_idx++;
+ }
+
+ mutex_unlock(&telemetry->regs_hist_lock);
+
+ return samples;
+}
+
+static int tl_collect_values_u64(struct adf_telemetry *telemetry,
+ size_t counter_offset, u64 *arr)
+{
+ unsigned int samples, hb_idx, i;
+ u64 *regs_hist_buff;
+ u64 counter_val;
+
+ samples = min(telemetry->msg_cnt, telemetry->hbuffs);
+ hb_idx = telemetry->hb_num + telemetry->hbuffs - samples;
+
+ mutex_lock(&telemetry->regs_hist_lock);
+
+ for (i = 0; i < samples; i++) {
+ regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs];
+ counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)];
+ arr[i] = counter_val;
+ hb_idx++;
+ }
+
+ mutex_unlock(&telemetry->regs_hist_lock);
+
+ return samples;
+}
+
+/**
+ * avg_array() - Return average of values within an array.
+ * @array: Array of values.
+ * @len: Number of elements.
+ *
+ * This algorithm computes average of an array without running into overflow.
+ *
+ * Return: average of values.
+ */
+#define avg_array(array, len) ( \
+{ \
+ typeof(&(array)[0]) _array = (array); \
+ __unqual_scalar_typeof(_array[0]) _x = 0; \
+ __unqual_scalar_typeof(_array[0]) _y = 0; \
+ __unqual_scalar_typeof(_array[0]) _a, _b; \
+ typeof(len) _len = (len); \
+ size_t _i; \
+ \
+ for (_i = 0; _i < _len; _i++) { \
+ _a = _array[_i]; \
+ _b = do_div(_a, _len); \
+ _x += _a; \
+ if (_y >= _len - _b) { \
+ _x++; \
+ _y -= _len - _b; \
+ } else { \
+ _y += _b; \
+ } \
+ } \
+ do_div(_y, _len); \
+ (_x + _y); \
+})
+
+/* Calculation function for simple counter. */
+static int tl_calc_count(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u64 *hist_vals;
+ int sample_cnt;
+ int ret = 0;
+
+ hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals),
+ GFP_KERNEL);
+ if (!hist_vals)
+ return -ENOMEM;
+
+ memset(vals, 0, sizeof(*vals));
+ sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals);
+ if (!sample_cnt)
+ goto out_free_hist_vals;
+
+ vals->curr = hist_vals[sample_cnt - 1];
+ vals->min = min_array(hist_vals, sample_cnt);
+ vals->max = max_array(hist_vals, sample_cnt);
+ vals->avg = avg_array(hist_vals, sample_cnt);
+
+out_free_hist_vals:
+ kfree(hist_vals);
+ return ret;
+}
+
+/* Convert CPP bus cycles to ns. */
+static int tl_cycles_to_ns(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle;
+ int ret;
+
+ ret = tl_calc_count(telemetry, ctr, vals);
+ if (ret)
+ return ret;
+
+ vals->curr *= cpp_ns_per_cycle;
+ vals->min *= cpp_ns_per_cycle;
+ vals->max *= cpp_ns_per_cycle;
+ vals->avg *= cpp_ns_per_cycle;
+
+ return 0;
+}
+
+/*
+ * Compute latency cumulative average with division of accumulated value
+ * by sample count. Returned value is in ns.
+ */
+static int tl_lat_acc_avg(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle;
+ u8 num_hbuff = tl_data->num_hbuff;
+ int sample_cnt, i;
+ u64 *hist_vals;
+ u64 *hist_cnt;
+ int ret = 0;
+
+ hist_vals = kmalloc_array(num_hbuff, sizeof(*hist_vals), GFP_KERNEL);
+ if (!hist_vals)
+ return -ENOMEM;
+
+ hist_cnt = kmalloc_array(num_hbuff, sizeof(*hist_cnt), GFP_KERNEL);
+ if (!hist_cnt) {
+ ret = -ENOMEM;
+ goto out_free_hist_vals;
+ }
+
+ memset(vals, 0, sizeof(*vals));
+ sample_cnt = tl_collect_values_u64(telemetry, ctr->offset1, hist_vals);
+ if (!sample_cnt)
+ goto out_free_hist_cnt;
+
+ tl_collect_values_u32(telemetry, ctr->offset2, hist_cnt);
+
+ for (i = 0; i < sample_cnt; i++) {
+ /* Avoid division by 0 if count is 0. */
+ if (hist_cnt[i])
+ hist_vals[i] = div_u64(hist_vals[i] * cpp_ns_per_cycle,
+ hist_cnt[i]);
+ else
+ hist_vals[i] = 0;
+ }
+
+ vals->curr = hist_vals[sample_cnt - 1];
+ vals->min = min_array(hist_vals, sample_cnt);
+ vals->max = max_array(hist_vals, sample_cnt);
+ vals->avg = avg_array(hist_vals, sample_cnt);
+
+out_free_hist_cnt:
+ kfree(hist_cnt);
+out_free_hist_vals:
+ kfree(hist_vals);
+ return ret;
+}
+
+/* Convert HW raw bandwidth units to Mbps. */
+static int tl_bw_hw_units_to_mbps(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev);
+ u16 bw_hw_2_bits = tl_data->bw_units_to_bytes * BITS_PER_BYTE;
+ u64 *hist_vals;
+ int sample_cnt;
+ int ret = 0;
+
+ hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals),
+ GFP_KERNEL);
+ if (!hist_vals)
+ return -ENOMEM;
+
+ memset(vals, 0, sizeof(*vals));
+ sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals);
+ if (!sample_cnt)
+ goto out_free_hist_vals;
+
+ vals->curr = div_u64(hist_vals[sample_cnt - 1] * bw_hw_2_bits, MEGA);
+ vals->min = div_u64(min_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA);
+ vals->max = div_u64(max_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA);
+ vals->avg = div_u64(avg_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA);
+
+out_free_hist_vals:
+ kfree(hist_vals);
+ return ret;
+}
+
+static void tl_seq_printf_counter(struct adf_telemetry *telemetry,
+ struct seq_file *s, const char *name,
+ struct adf_tl_dbg_aggr_values *vals)
+{
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, name);
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->curr);
+ if (atomic_read(&telemetry->state) > 1) {
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->min);
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->max);
+ seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->avg);
+ }
+ seq_puts(s, "\n");
+}
+
+static int tl_calc_and_print_counter(struct adf_telemetry *telemetry,
+ struct seq_file *s,
+ const struct adf_tl_dbg_counter *ctr,
+ const char *name)
+{
+ const char *counter_name = name ? name : ctr->name;
+ enum adf_tl_counter_type type = ctr->type;
+ struct adf_tl_dbg_aggr_values vals;
+ int ret;
+
+ switch (type) {
+ case ADF_TL_SIMPLE_COUNT:
+ ret = tl_calc_count(telemetry, ctr, &vals);
+ break;
+ case ADF_TL_COUNTER_NS:
+ ret = tl_cycles_to_ns(telemetry, ctr, &vals);
+ break;
+ case ADF_TL_COUNTER_NS_AVG:
+ ret = tl_lat_acc_avg(telemetry, ctr, &vals);
+ break;
+ case ADF_TL_COUNTER_MBPS:
+ ret = tl_bw_hw_units_to_mbps(telemetry, ctr, &vals);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ tl_seq_printf_counter(telemetry, s, counter_name, &vals);
+
+ return 0;
+}
+
+static int tl_print_sl_counter(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct seq_file *s, u8 cnt_id)
+{
+ size_t sl_regs_sz = GET_TL_DATA(telemetry->accel_dev).slice_reg_sz;
+ struct adf_tl_dbg_counter slice_ctr;
+ size_t offset_inc = cnt_id * sl_regs_sz;
+ char cnt_name[MAX_COUNT_NAME_SIZE];
+
+ snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", ctr->name, cnt_id);
+ slice_ctr = *ctr;
+ slice_ctr.offset1 += offset_inc;
+
+ return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name);
+}
+
+static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev,
+ struct seq_file *s, u8 cnt_type, u8 cnt_id)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter *sl_tl_util_counters;
+ const struct adf_tl_dbg_counter *sl_tl_exec_counters;
+ const struct adf_tl_dbg_counter *ctr;
+ int ret;
+
+ sl_tl_util_counters = tl_data->sl_util_counters;
+ sl_tl_exec_counters = tl_data->sl_exec_counters;
+
+ ctr = &sl_tl_util_counters[cnt_type];
+
+ ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid slice utilization counter type\n");
+ return ret;
+ }
+
+ ctr = &sl_tl_exec_counters[cnt_type];
+
+ ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid slice execution counter type\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt)
+{
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG);
+ seq_printf(s, "%*u\n", TL_VALUE_MIN_PADDING, msg_cnt);
+}
+
+static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
+ struct seq_file *s)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter *dev_tl_counters;
+ u8 num_dev_counters = tl_data->num_dev_counters;
+ u8 *sl_cnt = (u8 *)&telemetry->slice_cnt;
+ const struct adf_tl_dbg_counter *ctr;
+ unsigned int i;
+ int ret;
+ u8 j;
+
+ if (!atomic_read(&telemetry->state)) {
+ dev_info(&GET_DEV(accel_dev), "not enabled\n");
+ return -EPERM;
+ }
+
+ dev_tl_counters = tl_data->dev_counters;
+
+ tl_print_msg_cnt(s, telemetry->msg_cnt);
+
+ /* Print device level telemetry. */
+ for (i = 0; i < num_dev_counters; i++) {
+ ctr = &dev_tl_counters[i];
+ ret = tl_calc_and_print_counter(telemetry, s, ctr, NULL);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid counter type\n");
+ return ret;
+ }
+ }
+
+ /* Print per slice telemetry. */
+ for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
+ for (j = 0; j < sl_cnt[i]; j++) {
+ ret = tl_calc_and_print_sl_counters(accel_dev, s, i, j);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int tl_dev_data_show(struct seq_file *s, void *unused)
+{
+ struct adf_accel_dev *accel_dev = s->private;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ return tl_print_dev_data(accel_dev, s);
+}
+DEFINE_SHOW_ATTRIBUTE(tl_dev_data);
+
+static int tl_control_show(struct seq_file *s, void *unused)
+{
+ struct adf_accel_dev *accel_dev = s->private;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ seq_printf(s, "%d\n", atomic_read(&accel_dev->telemetry->state));
+
+ return 0;
+}
+
+static ssize_t tl_control_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq_f = file->private_data;
+ struct adf_accel_dev *accel_dev;
+ struct adf_telemetry *telemetry;
+ struct adf_tl_hw_data *tl_data;
+ struct device *dev;
+ u32 input;
+ int ret;
+
+ accel_dev = seq_f->private;
+ if (!accel_dev)
+ return -EINVAL;
+
+ tl_data = &GET_TL_DATA(accel_dev);
+ telemetry = accel_dev->telemetry;
+ dev = &GET_DEV(accel_dev);
+
+ mutex_lock(&telemetry->wr_lock);
+
+ ret = kstrtou32_from_user(userbuf, count, 10, &input);
+ if (ret)
+ goto unlock_and_exit;
+
+ if (input > tl_data->num_hbuff) {
+ dev_info(dev, "invalid control input\n");
+ ret = -EINVAL;
+ goto unlock_and_exit;
+ }
+
+ /* If input is 0, just stop telemetry. */
+ if (!input) {
+ ret = adf_tl_halt(accel_dev);
+ if (!ret)
+ ret = count;
+
+ goto unlock_and_exit;
+ }
+
+ /* If TL is already enabled, stop it. */
+ if (atomic_read(&telemetry->state)) {
+ dev_info(dev, "already enabled, restarting.\n");
+ ret = adf_tl_halt(accel_dev);
+ if (ret)
+ goto unlock_and_exit;
+ }
+
+ ret = adf_tl_run(accel_dev, input);
+ if (ret)
+ goto unlock_and_exit;
+
+ ret = count;
+
+unlock_and_exit:
+ mutex_unlock(&telemetry->wr_lock);
+ return ret;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(tl_control);
+
+static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num)
+{
+ char alpha;
+ u8 index;
+ int ret;
+
+ ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha);
+ if (ret != 1)
+ return -EINVAL;
+
+ index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha);
+ *rp_id = index;
+
+ return 0;
+}
+
+static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev,
+ unsigned int new_rp_num,
+ unsigned int rp_regs_index)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct device *dev = &GET_DEV(accel_dev);
+ unsigned int i;
+ u8 curr_state;
+ int ret;
+
+ if (new_rp_num >= hw_data->num_rps) {
+ dev_info(dev, "invalid Ring Pair number selected\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hw_data->tl_data.max_rp; i++) {
+ if (telemetry->rp_num_indexes[i] == new_rp_num) {
+ dev_info(dev, "RP nr: %d is already selected in slot rp_%c_data\n",
+ new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(i));
+ return 0;
+ }
+ }
+
+ dev_dbg(dev, "selecting RP nr %u into slot rp_%c_data\n",
+ new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index));
+
+ curr_state = atomic_read(&telemetry->state);
+
+ if (curr_state) {
+ ret = adf_tl_halt(accel_dev);
+ if (ret)
+ return ret;
+
+ telemetry->rp_num_indexes[rp_regs_index] = new_rp_num;
+
+ ret = adf_tl_run(accel_dev, curr_state);
+ if (ret)
+ return ret;
+ } else {
+ telemetry->rp_num_indexes[rp_regs_index] = new_rp_num;
+ }
+
+ return 0;
+}
+
+static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s,
+ u8 rp_idx)
+{
+ u32 banks_per_vf = GET_HW_DATA(accel_dev)->num_banks_per_vf;
+ enum adf_cfg_service_type svc;
+
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_SERVICE_TYPE);
+
+ svc = GET_SRV_TYPE(accel_dev, rp_idx % banks_per_vf);
+ switch (svc) {
+ case COMP:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DC);
+ break;
+ case SYM:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_SYM);
+ break;
+ case ASYM:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM);
+ break;
+ default:
+ seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN);
+ break;
+ }
+}
+
+static int tl_print_rp_data(struct adf_accel_dev *accel_dev, struct seq_file *s,
+ u8 rp_regs_index)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter *rp_tl_counters;
+ u8 num_rp_counters = tl_data->num_rp_counters;
+ size_t rp_regs_sz = tl_data->rp_reg_sz;
+ struct adf_tl_dbg_counter ctr;
+ unsigned int i;
+ u8 rp_idx;
+ int ret;
+
+ if (!atomic_read(&telemetry->state)) {
+ dev_info(&GET_DEV(accel_dev), "not enabled\n");
+ return -EPERM;
+ }
+
+ rp_tl_counters = tl_data->rp_counters;
+ rp_idx = telemetry->rp_num_indexes[rp_regs_index];
+
+ if (rp_idx == ADF_TL_RP_REGS_DISABLED) {
+ dev_info(&GET_DEV(accel_dev), "no RP number selected in rp_%c_data\n",
+ ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index));
+ return -EPERM;
+ }
+
+ tl_print_msg_cnt(s, telemetry->msg_cnt);
+ seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_NUM_INDEX);
+ seq_printf(s, "%*d\n", TL_VALUE_MIN_PADDING, rp_idx);
+ tl_print_rp_srv(accel_dev, s, rp_idx);
+
+ for (i = 0; i < num_rp_counters; i++) {
+ ctr = rp_tl_counters[i];
+ ctr.offset1 += rp_regs_sz * rp_regs_index;
+ ctr.offset2 += rp_regs_sz * rp_regs_index;
+ ret = tl_calc_and_print_counter(telemetry, s, &ctr, NULL);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "invalid RP counter type\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int tl_rp_data_show(struct seq_file *s, void *unused)
+{
+ struct adf_accel_dev *accel_dev = s->private;
+ u8 rp_regs_index;
+ u8 max_rp;
+ int ret;
+
+ if (!accel_dev)
+ return -EINVAL;
+
+ max_rp = GET_TL_DATA(accel_dev).max_rp;
+ ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
+ return ret;
+ }
+
+ return tl_print_rp_data(accel_dev, s, rp_regs_index);
+}
+
+static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *seq_f = file->private_data;
+ struct adf_accel_dev *accel_dev;
+ struct adf_telemetry *telemetry;
+ unsigned int new_rp_num;
+ u8 rp_regs_index;
+ u8 max_rp;
+ int ret;
+
+ accel_dev = seq_f->private;
+ if (!accel_dev)
+ return -EINVAL;
+
+ telemetry = accel_dev->telemetry;
+ max_rp = GET_TL_DATA(accel_dev).max_rp;
+
+ mutex_lock(&telemetry->wr_lock);
+
+ ret = get_rp_index_from_file(file, &rp_regs_index, max_rp);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n");
+ goto unlock_and_exit;
+ }
+
+ ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num);
+ if (ret)
+ goto unlock_and_exit;
+
+ ret = adf_tl_dbg_change_rp_index(accel_dev, new_rp_num, rp_regs_index);
+ if (ret)
+ goto unlock_and_exit;
+
+ ret = count;
+
+unlock_and_exit:
+ mutex_unlock(&telemetry->wr_lock);
+ return ret;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(tl_rp_data);
+
+void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct dentry *parent = accel_dev->debugfs_dir;
+ u8 max_rp = GET_TL_DATA(accel_dev).max_rp;
+ char name[ADF_TL_RP_REGS_FNAME_SIZE];
+ struct dentry *dir;
+ unsigned int i;
+
+ if (!telemetry)
+ return;
+
+ dir = debugfs_create_dir("telemetry", parent);
+ accel_dev->telemetry->dbg_dir = dir;
+ debugfs_create_file("device_data", 0444, dir, accel_dev, &tl_dev_data_fops);
+ debugfs_create_file("control", 0644, dir, accel_dev, &tl_control_fops);
+
+ for (i = 0; i < max_rp; i++) {
+ snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME,
+ ADF_TL_DBG_RP_ALPHA_INDEX(i));
+ debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops);
+ }
+}
+
+void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ struct dentry *dbg_dir;
+
+ if (!telemetry)
+ return;
+
+ dbg_dir = telemetry->dbg_dir;
+
+ debugfs_remove_recursive(dbg_dir);
+
+ if (atomic_read(&telemetry->state))
+ adf_tl_halt(accel_dev);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
new file mode 100644
index 0000000000..11cc9eae19
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2023 Intel Corporation. */
+#ifndef ADF_TL_DEBUGFS_H
+#define ADF_TL_DEBUGFS_H
+
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+#define MAX_COUNT_NAME_SIZE 32
+#define SNAPSHOT_CNT_MSG "sample_cnt"
+#define RP_NUM_INDEX "rp_num"
+#define PCI_TRANS_CNT_NAME "pci_trans_cnt"
+#define MAX_RD_LAT_NAME "max_rd_lat"
+#define RD_LAT_ACC_NAME "rd_lat_acc_avg"
+#define MAX_LAT_NAME "max_gp_lat"
+#define LAT_ACC_NAME "gp_lat_acc_avg"
+#define BW_IN_NAME "bw_in"
+#define BW_OUT_NAME "bw_out"
+#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg"
+#define AT_TRANS_LAT_NAME "at_trans_lat_avg"
+#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used"
+#define AT_GLOB_DTLB_HIT_NAME "at_glob_devtlb_hit"
+#define AT_GLOB_DTLB_MISS_NAME "at_glob_devtlb_miss"
+#define AT_PAYLD_DTLB_HIT_NAME "tl_at_payld_devtlb_hit"
+#define AT_PAYLD_DTLB_MISS_NAME "tl_at_payld_devtlb_miss"
+#define RP_SERVICE_TYPE "service_type"
+
+#define ADF_TL_DBG_RP_ALPHA_INDEX(index) ((index) + 'A')
+#define ADF_TL_DBG_RP_INDEX_ALPHA(alpha) ((alpha) - 'A')
+
+#define ADF_TL_RP_REGS_FNAME "rp_%c_data"
+#define ADF_TL_RP_REGS_FNAME_SIZE 16
+
+#define ADF_TL_DATA_REG_OFF(reg, qat_gen) \
+ offsetof(struct adf_##qat_gen##_tl_layout, reg)
+
+#define ADF_TL_DEV_REG_OFF(reg, qat_gen) \
+ (ADF_TL_DATA_REG_OFF(tl_device_data_regs, qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_device_data_regs, reg))
+
+#define ADF_TL_SLICE_REG_OFF(slice, reg, qat_gen) \
+ (ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg))
+
+#define ADF_TL_RP_REG_OFF(reg, qat_gen) \
+ (ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg))
+
+/**
+ * enum adf_tl_counter_type - telemetry counter types
+ * @ADF_TL_COUNTER_UNSUPPORTED: unsupported counter
+ * @ADF_TL_SIMPLE_COUNT: simple counter
+ * @ADF_TL_COUNTER_NS: latency counter, value in ns
+ * @ADF_TL_COUNTER_NS_AVG: accumulated average latency counter, value in ns
+ * @ADF_TL_COUNTER_MBPS: bandwidth, value in MBps
+ */
+enum adf_tl_counter_type {
+ ADF_TL_COUNTER_UNSUPPORTED,
+ ADF_TL_SIMPLE_COUNT,
+ ADF_TL_COUNTER_NS,
+ ADF_TL_COUNTER_NS_AVG,
+ ADF_TL_COUNTER_MBPS,
+};
+
+/**
+ * struct adf_tl_dbg_counter - telemetry counter definition
+ * @name: name of the counter as printed in the report
+ * @adf_tl_counter_type: type of the counter
+ * @offset1: offset of 1st register
+ * @offset2: offset of 2nd optional register
+ */
+struct adf_tl_dbg_counter {
+ const char *name;
+ enum adf_tl_counter_type type;
+ size_t offset1;
+ size_t offset2;
+};
+
+#define ADF_TL_COUNTER(_name, _type, _offset) \
+{ .name = _name, \
+ .type = _type, \
+ .offset1 = _offset \
+}
+
+#define ADF_TL_COUNTER_LATENCY(_name, _type, _offset1, _offset2) \
+{ .name = _name, \
+ .type = _type, \
+ .offset1 = _offset1, \
+ .offset2 = _offset2 \
+}
+
+/* Telemetry counter aggregated values. */
+struct adf_tl_dbg_aggr_values {
+ u64 curr;
+ u64 min;
+ u64 max;
+ u64 avg;
+};
+
+/**
+ * adf_tl_dbgfs_add() - Add telemetry's debug fs entries.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Creates telemetry's debug fs folder and attributes in QAT debug fs root.
+ */
+void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev);
+
+/**
+ * adf_tl_dbgfs_rm() - Remove telemetry's debug fs entries.
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Removes telemetry's debug fs folder and attributes from QAT debug fs root.
+ */
+void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_TL_DEBUGFS_H */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
index cd418b51d9..63cf18e2a4 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
@@ -29,6 +29,8 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_RL_ADD = 134,
ICP_QAT_FW_RL_UPDATE = 135,
ICP_QAT_FW_RL_REMOVE = 136,
+ ICP_QAT_FW_TL_START = 137,
+ ICP_QAT_FW_TL_STOP = 138,
};
enum icp_qat_fw_init_admin_resp_status {
@@ -36,6 +38,13 @@ enum icp_qat_fw_init_admin_resp_status {
ICP_QAT_FW_INIT_RESP_STATUS_FAIL
};
+struct icp_qat_fw_init_admin_tl_rp_indexes {
+ __u8 rp_num_index_0;
+ __u8 rp_num_index_1;
+ __u8 rp_num_index_2;
+ __u8 rp_num_index_3;
+};
+
struct icp_qat_fw_init_admin_slice_cnt {
__u8 cpr_cnt;
__u8 xlt_cnt;
@@ -87,6 +96,7 @@ struct icp_qat_fw_init_admin_req {
__u8 rp_count;
};
__u32 idle_filter;
+ struct icp_qat_fw_init_admin_tl_rp_indexes rp_indexes;
};
__u32 resrvd4;
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index eb2ef225bc..b8f1c4ffb8 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
@@ -18,7 +18,12 @@ enum icp_qat_hw_ae_id {
ICP_QAT_HW_AE_9 = 9,
ICP_QAT_HW_AE_10 = 10,
ICP_QAT_HW_AE_11 = 11,
- ICP_QAT_HW_AE_DELIMITER = 12
+ ICP_QAT_HW_AE_12 = 12,
+ ICP_QAT_HW_AE_13 = 13,
+ ICP_QAT_HW_AE_14 = 14,
+ ICP_QAT_HW_AE_15 = 15,
+ ICP_QAT_HW_AE_16 = 16,
+ ICP_QAT_HW_AE_DELIMITER = 17
};
enum icp_qat_hw_qat_id {
@@ -95,7 +100,7 @@ enum icp_qat_capabilities_mask {
/* Bits 10-11 are currently reserved */
ICP_ACCEL_CAPABILITIES_HKDF = BIT(12),
ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13),
- /* Bit 14 is currently reserved */
+ ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN = BIT(14),
ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15),
ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16),
ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17),
@@ -107,7 +112,10 @@ enum icp_qat_capabilities_mask {
ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23),
ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24),
ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25),
- ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26)
+ ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26),
+ /* Bits 27-28 are currently reserved */
+ ICP_ACCEL_CAPABILITIES_ZUC_256 = BIT(29),
+ ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT = BIT(30),
};
#define QAT_AUTH_MODE_BITPOS 4
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
index 69482abdb8..e28241bdd0 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h
@@ -7,7 +7,7 @@
#define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000
#define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000
#define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000
-#define ICP_QAT_UCLO_MAX_AE 12
+#define ICP_QAT_UCLO_MAX_AE 17
#define ICP_QAT_UCLO_MAX_CTX 8
#define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
#define ICP_QAT_UCLO_MAX_USTORE 0x4000
diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c
index cbb946a800..317cafa9d1 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
@@ -697,12 +697,16 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
case ADF_402XX_PCI_DEVICE_ID:
+ case ADF_420XX_PCI_DEVICE_ID:
handle->chip_info->mmp_sram_size = 0;
handle->chip_info->nn = false;
handle->chip_info->lm2lm3 = true;
handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
- handle->chip_info->icp_rst_mask = 0x100015;
+ if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID)
+ handle->chip_info->icp_rst_mask = 0x100155;
+ else
+ handle->chip_info->icp_rst_mask = 0x100015;
handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0;
handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX;
handle->chip_info->wakeup_event_val = 0x80000000;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index e27ea7e28c..ad2c64af74 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -733,6 +733,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
case ADF_402XX_PCI_DEVICE_ID:
+ case ADF_420XX_PCI_DEVICE_ID:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 5744df30c8..5fd31ba715 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -488,7 +488,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
for (i = 0; i < caps->nengines; i++) {
struct mv_cesa_engine *engine = &cesa->engines[i];
- char res_name[7];
+ char res_name[16];
engine->id = i;
spin_lock_init(&engine->lock);
@@ -509,7 +509,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
* Not all platforms can gate the CESA clocks: do not complain
* if the clock does not exist.
*/
- snprintf(res_name, sizeof(res_name), "cesa%d", i);
+ snprintf(res_name, sizeof(res_name), "cesa%u", i);
engine->clk = devm_clk_get(dev, res_name);
if (IS_ERR(engine->clk)) {
engine->clk = devm_clk_get(dev, NULL);
@@ -517,7 +517,7 @@ static int mv_cesa_probe(struct platform_device *pdev)
engine->clk = NULL;
}
- snprintf(res_name, sizeof(res_name), "cesaz%d", i);
+ snprintf(res_name, sizeof(res_name), "cesaz%u", i);
engine->zclk = devm_clk_get(dev, res_name);
if (IS_ERR(engine->zclk))
engine->zclk = NULL;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 1c2c870e88..3c5d577d8f 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -473,12 +473,6 @@ static int otx_cpt_skcipher_ecb_aes_setkey(struct crypto_skcipher *tfm,
return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_ECB);
}
-static int otx_cpt_skcipher_cfb_aes_setkey(struct crypto_skcipher *tfm,
- const u8 *key, u32 keylen)
-{
- return cpt_aes_setkey(tfm, key, keylen, OTX_CPT_AES_CFB);
-}
-
static int otx_cpt_skcipher_cbc_des3_setkey(struct crypto_skcipher *tfm,
const u8 *key, u32 keylen)
{
@@ -1352,23 +1346,6 @@ static struct skcipher_alg otx_cpt_skciphers[] = { {
.encrypt = otx_cpt_skcipher_encrypt,
.decrypt = otx_cpt_skcipher_decrypt,
}, {
- .base.cra_name = "cfb(aes)",
- .base.cra_driver_name = "cpt_cfb_aes",
- .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct otx_cpt_enc_ctx),
- .base.cra_alignmask = 7,
- .base.cra_priority = 4001,
- .base.cra_module = THIS_MODULE,
-
- .init = otx_cpt_enc_dec_init,
- .ivsize = AES_BLOCK_SIZE,
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = otx_cpt_skcipher_cfb_aes_setkey,
- .encrypt = otx_cpt_skcipher_encrypt,
- .decrypt = otx_cpt_skcipher_decrypt,
-}, {
.base.cra_name = "cbc(des3_ede)",
.base.cra_driver_name = "cpt_cbc_des3_ede",
.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 93d22b3289..79b4e74804 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -14,12 +14,14 @@ static struct cpt_hw_ops otx2_hw_ops = {
.send_cmd = otx2_cpt_send_cmd,
.cpt_get_compcode = otx2_cpt_get_compcode,
.cpt_get_uc_compcode = otx2_cpt_get_uc_compcode,
+ .cpt_sg_info_create = otx2_sg_info_create,
};
static struct cpt_hw_ops cn10k_hw_ops = {
.send_cmd = cn10k_cpt_send_cmd,
.cpt_get_compcode = cn10k_cpt_get_compcode,
.cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
+ .cpt_sg_info_create = otx2_sg_info_create,
};
static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
@@ -78,12 +80,9 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
struct pci_dev *pdev = cptvf->pdev;
resource_size_t offset, size;
- if (!test_bit(CN10K_LMTST, &cptvf->cap_flag)) {
- cptvf->lfs.ops = &otx2_hw_ops;
+ if (!test_bit(CN10K_LMTST, &cptvf->cap_flag))
return 0;
- }
- cptvf->lfs.ops = &cn10k_hw_ops;
offset = pci_resource_start(pdev, PCI_MBOX_BAR_NUM);
size = pci_resource_len(pdev, PCI_MBOX_BAR_NUM);
/* Map VF LMILINE region */
@@ -96,3 +95,82 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
return 0;
}
EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
+
+void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
+ struct cn10k_cpt_errata_ctx *er_ctx)
+{
+ u64 cptr_dma;
+
+ if (!is_dev_cn10ka_ax(pdev))
+ return;
+
+ cptr_dma = er_ctx->cptr_dma & ~(BIT_ULL(60));
+ cn10k_cpt_ctx_flush(pdev, cptr_dma, true);
+ dma_unmap_single(&pdev->dev, cptr_dma, CN10K_CPT_HW_CTX_SIZE,
+ DMA_BIDIRECTIONAL);
+ kfree(er_ctx->hw_ctx);
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_clear, CRYPTO_DEV_OCTEONTX2_CPT);
+
+void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz)
+{
+ hctx->w0.aop_valid = 1;
+ hctx->w0.ctx_hdr_sz = 0;
+ hctx->w0.ctx_sz = ctx_sz;
+ hctx->w0.ctx_push_sz = 1;
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_set, CRYPTO_DEV_OCTEONTX2_CPT);
+
+int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
+ struct cn10k_cpt_errata_ctx *er_ctx)
+{
+ union cn10k_cpt_hw_ctx *hctx;
+ u64 cptr_dma;
+
+ er_ctx->cptr_dma = 0;
+ er_ctx->hw_ctx = NULL;
+
+ if (!is_dev_cn10ka_ax(pdev))
+ return 0;
+
+ hctx = kmalloc(CN10K_CPT_HW_CTX_SIZE, GFP_KERNEL);
+ if (unlikely(!hctx))
+ return -ENOMEM;
+ cptr_dma = dma_map_single(&pdev->dev, hctx, CN10K_CPT_HW_CTX_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ cn10k_cpt_hw_ctx_set(hctx, 1);
+ er_ctx->hw_ctx = hctx;
+ er_ctx->cptr_dma = cptr_dma | BIT_ULL(60);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_hw_ctx_init, CRYPTO_DEV_OCTEONTX2_CPT);
+
+void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval)
+{
+ struct otx2_cptvf_dev *cptvf = pci_get_drvdata(pdev);
+ struct otx2_cptlfs_info *lfs = &cptvf->lfs;
+ u64 reg;
+
+ reg = (uintptr_t)cptr >> 7;
+ if (inval)
+ reg = reg | BIT_ULL(46);
+
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot,
+ OTX2_CPT_LF_CTX_FLUSH, reg);
+ /* Make sure that the FLUSH operation is complete */
+ wmb();
+ otx2_cpt_read64(lfs->reg_base, lfs->blkaddr, lfs->lf[0].slot,
+ OTX2_CPT_LF_CTX_ERR);
+}
+EXPORT_SYMBOL_NS_GPL(cn10k_cpt_ctx_flush, CRYPTO_DEV_OCTEONTX2_CPT);
+
+void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf)
+{
+ if (test_bit(CN10K_LMTST, &cptvf->cap_flag))
+ cptvf->lfs.ops = &cn10k_hw_ops;
+ else
+ cptvf->lfs.ops = &otx2_hw_ops;
+}
+EXPORT_SYMBOL_NS_GPL(cptvf_hw_ops_get, CRYPTO_DEV_OCTEONTX2_CPT);
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
index aaefc7e38e..92be3ecf57 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
@@ -8,6 +8,26 @@
#include "otx2_cptpf.h"
#include "otx2_cptvf.h"
+#define CN10K_CPT_HW_CTX_SIZE 256
+
+union cn10k_cpt_hw_ctx {
+ u64 u;
+ struct {
+ u64 reserved_0_47:48;
+ u64 ctx_push_sz:7;
+ u64 reserved_55:1;
+ u64 ctx_hdr_sz:2;
+ u64 aop_valid:1;
+ u64 reserved_59:1;
+ u64 ctx_sz:4;
+ } w0;
+};
+
+struct cn10k_cpt_errata_ctx {
+ union cn10k_cpt_hw_ctx *hw_ctx;
+ u64 cptr_dma;
+};
+
static inline u8 cn10k_cpt_get_compcode(union otx2_cpt_res_s *result)
{
return ((struct cn10k_cpt_res_s *)result)->compcode;
@@ -30,5 +50,12 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
+void cn10k_cpt_ctx_flush(struct pci_dev *pdev, u64 cptr, bool inval);
+int cn10k_cpt_hw_ctx_init(struct pci_dev *pdev,
+ struct cn10k_cpt_errata_ctx *er_ctx);
+void cn10k_cpt_hw_ctx_clear(struct pci_dev *pdev,
+ struct cn10k_cpt_errata_ctx *er_ctx);
+void cn10k_cpt_hw_ctx_set(union cn10k_cpt_hw_ctx *hctx, u16 ctx_sz);
+void cptvf_hw_ops_get(struct otx2_cptvf_dev *cptvf);
#endif /* __CN10K_CPTLF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index 46b778bbbe..c5b7c57574 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -56,7 +56,11 @@ struct otx2_cpt_rx_inline_lf_cfg {
u16 param2;
u16 opcode;
u32 credit;
+ u32 credit_th;
+ u16 bpid;
u32 reserved;
+ u8 ctx_ilen_valid : 1;
+ u8 ctx_ilen : 7;
};
/*
@@ -102,7 +106,10 @@ union otx2_cpt_eng_caps {
u64 kasumi:1;
u64 des:1;
u64 crc:1;
- u64 reserved_14_63:50;
+ u64 mmul:1;
+ u64 reserved_15_33:19;
+ u64 pdcp_chain:1;
+ u64 reserved_35_63:29;
};
};
@@ -145,6 +152,35 @@ static inline bool is_dev_otx2(struct pci_dev *pdev)
return false;
}
+static inline bool is_dev_cn10ka(struct pci_dev *pdev)
+{
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A;
+}
+
+static inline bool is_dev_cn10ka_ax(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ ((pdev->revision & 0xFF) == 4 || (pdev->revision & 0xFF) == 0x50 ||
+ (pdev->revision & 0xff) == 0x51))
+ return true;
+
+ return false;
+}
+
+static inline bool is_dev_cn10kb(struct pci_dev *pdev)
+{
+ return pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_B;
+}
+
+static inline bool is_dev_cn10ka_b0(struct pci_dev *pdev)
+{
+ if (pdev->subsystem_device == CPT_PCI_SUBSYS_DEVID_CN10K_A &&
+ (pdev->revision & 0xFF) == 0x54)
+ return true;
+
+ return false;
+}
+
static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
unsigned long *cap_flag)
{
@@ -154,6 +190,21 @@ static inline void otx2_cpt_set_hw_caps(struct pci_dev *pdev,
}
}
+static inline bool cpt_is_errata_38550_exists(struct pci_dev *pdev)
+{
+ if (is_dev_otx2(pdev) || is_dev_cn10ka_ax(pdev))
+ return true;
+
+ return false;
+}
+
+static inline bool cpt_feature_sgv2(struct pci_dev *pdev)
+{
+ if (!is_dev_otx2(pdev) && !is_dev_cn10ka_ax(pdev))
+ return true;
+
+ return false;
+}
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
@@ -171,5 +222,6 @@ int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs);
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox);
+int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot);
#endif /* __OTX2_CPT_COMMON_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
index a2aba0b0d6..d2b8d26db9 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -24,10 +24,45 @@ static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
+ ctx->val.vstr[0] = '\0';
+
+ return 0;
+}
+
+static int otx2_cpt_dl_t106_mode_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+ struct pci_dev *pdev = cptpf->pdev;
+ u64 reg_val = 0;
+
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
+ BLKADDR_CPT0);
+ ctx->val.vu8 = (reg_val >> 18) & 0x1;
+
+ return 0;
+}
- otx2_cpt_print_uc_dbg_info(cptpf);
+static int otx2_cpt_dl_t106_mode_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
+ struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
+ struct pci_dev *pdev = cptpf->pdev;
+ u64 reg_val = 0;
+
+ if (cptpf->enabled_vfs != 0 || cptpf->eng_grps.is_grps_created)
+ return -EPERM;
+
+ if (cpt_feature_sgv2(pdev)) {
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
+ &reg_val, BLKADDR_CPT0);
+ reg_val &= ~(0x1ULL << 18);
+ reg_val |= ((u64)ctx->val.vu8 & 0x1) << 18;
+ return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev,
+ CPT_AF_CTL, reg_val, BLKADDR_CPT0);
+ }
return 0;
}
@@ -36,6 +71,7 @@ enum otx2_cpt_dl_param_id {
OTX2_CPT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
OTX2_CPT_DEVLINK_PARAM_ID_EGRP_CREATE,
OTX2_CPT_DEVLINK_PARAM_ID_EGRP_DELETE,
+ OTX2_CPT_DEVLINK_PARAM_ID_T106_MODE,
};
static const struct devlink_param otx2_cpt_dl_params[] = {
@@ -49,6 +85,11 @@ static const struct devlink_param otx2_cpt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
otx2_cpt_dl_uc_info, otx2_cpt_dl_egrp_delete,
NULL),
+ DEVLINK_PARAM_DRIVER(OTX2_CPT_DEVLINK_PARAM_ID_T106_MODE,
+ "t106_mode", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_cpt_dl_t106_mode_get, otx2_cpt_dl_t106_mode_set,
+ NULL),
};
static int otx2_cpt_dl_info_firmware_version_put(struct devlink_info_req *req,
@@ -120,7 +161,6 @@ int otx2_cpt_register_dl(struct otx2_cptpf_dev *cptpf)
devlink_free(dl);
return ret;
}
-
devlink_register(dl);
return 0;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
index 6f947978e4..7e746a4def 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_hw_types.h
@@ -13,6 +13,9 @@
#define CN10K_CPT_PCI_PF_DEVICE_ID 0xA0F2
#define CN10K_CPT_PCI_VF_DEVICE_ID 0xA0F3
+#define CPT_PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define CPT_PCI_SUBSYS_DEVID_CN10K_B 0xBD00
+
/* Mailbox interrupts offset */
#define OTX2_CPT_PF_MBOX_INT 6
#define OTX2_CPT_PF_INT_VEC_E_MBOXX(x, a) ((x) + (a))
@@ -99,6 +102,9 @@
#define OTX2_CPT_LF_Q_INST_PTR (0x110)
#define OTX2_CPT_LF_Q_GRP_PTR (0x120)
#define OTX2_CPT_LF_NQX(a) (0x400 | (a) << 3)
+#define OTX2_CPT_LF_CTX_CTL (0x500)
+#define OTX2_CPT_LF_CTX_FLUSH (0x510)
+#define OTX2_CPT_LF_CTX_ERR (0x520)
#define OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT 20
/* LMT LF registers */
#define OTX2_CPT_LMT_LFBASE BIT_ULL(OTX2_CPT_RVU_FUNC_BLKADDR_SHIFT)
@@ -467,7 +473,8 @@ union otx2_cptx_af_lf_ctrl {
u64 cont_err:1;
u64 reserved_11_15:5;
u64 nixtx_en:1;
- u64 reserved_17_47:31;
+ u64 ctx_ilen:3;
+ u64 reserved_17_47:28;
u64 grp:8;
u64 reserved_56_63:8;
} s;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index 273ee5352a..5be0103c1f 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -229,3 +229,29 @@ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
return otx2_mbox_check_rsp_msgs(mbox, 0);
}
EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
+
+int otx2_cpt_lf_reset_msg(struct otx2_cptlfs_info *lfs, int slot)
+{
+ struct otx2_mbox *mbox = lfs->mbox;
+ struct pci_dev *pdev = lfs->pdev;
+ struct cpt_lf_rst_req *req;
+ int ret;
+
+ req = (struct cpt_lf_rst_req *)otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+
+ req->hdr.id = MBOX_MSG_CPT_LF_RESET;
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.pcifunc = 0;
+ req->slot = slot;
+ ret = otx2_cpt_send_mbox_msg(mbox, pdev);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_lf_reset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
index dbb1ee746f..e27e849b01 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_reqmgr.h
@@ -27,6 +27,13 @@
#define OTX2_CPT_MAX_REQ_SIZE 65535
+#define SG_COMPS_MAX 4
+#define SGV2_COMPS_MAX 3
+
+#define SG_COMP_3 3
+#define SG_COMP_2 2
+#define SG_COMP_1 1
+
union otx2_cpt_opcode {
u16 flags;
struct {
@@ -40,6 +47,8 @@ struct otx2_cptvf_request {
u32 param2;
u16 dlen;
union otx2_cpt_opcode opcode;
+ dma_addr_t cptr_dma;
+ void *cptr;
};
/*
@@ -143,6 +152,8 @@ struct otx2_cpt_inst_info {
unsigned long time_in;
u32 dlen;
u32 dma_len;
+ u64 gthr_sz;
+ u64 sctr_sz;
u8 extra_time;
};
@@ -157,6 +168,16 @@ struct otx2_cpt_sglist_component {
__be64 ptr3;
};
+struct cn10kb_cpt_sglist_component {
+ u16 len0;
+ u16 len1;
+ u16 len2;
+ u16 valid_segs;
+ u64 ptr0;
+ u64 ptr1;
+ u64 ptr2;
+};
+
static inline void otx2_cpt_info_destroy(struct pci_dev *pdev,
struct otx2_cpt_inst_info *info)
{
@@ -188,6 +209,283 @@ static inline void otx2_cpt_info_destroy(struct pci_dev *pdev,
kfree(info);
}
+static inline int setup_sgio_components(struct pci_dev *pdev,
+ struct otx2_cpt_buf_ptr *list,
+ int buf_count, u8 *buffer)
+{
+ struct otx2_cpt_sglist_component *sg_ptr;
+ int components;
+ int i, j;
+
+ if (unlikely(!list)) {
+ dev_err(&pdev->dev, "Input list pointer is NULL\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < buf_count; i++) {
+ if (unlikely(!list[i].vptr))
+ continue;
+ list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
+ list[i].size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
+ dev_err(&pdev->dev, "Dma mapping failed\n");
+ goto sg_cleanup;
+ }
+ }
+ components = buf_count / SG_COMPS_MAX;
+ sg_ptr = (struct otx2_cpt_sglist_component *)buffer;
+ for (i = 0; i < components; i++) {
+ sg_ptr->len0 = cpu_to_be16(list[i * SG_COMPS_MAX + 0].size);
+ sg_ptr->len1 = cpu_to_be16(list[i * SG_COMPS_MAX + 1].size);
+ sg_ptr->len2 = cpu_to_be16(list[i * SG_COMPS_MAX + 2].size);
+ sg_ptr->len3 = cpu_to_be16(list[i * SG_COMPS_MAX + 3].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * SG_COMPS_MAX + 0].dma_addr);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * SG_COMPS_MAX + 1].dma_addr);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * SG_COMPS_MAX + 2].dma_addr);
+ sg_ptr->ptr3 = cpu_to_be64(list[i * SG_COMPS_MAX + 3].dma_addr);
+ sg_ptr++;
+ }
+ components = buf_count % SG_COMPS_MAX;
+
+ switch (components) {
+ case SG_COMP_3:
+ sg_ptr->len2 = cpu_to_be16(list[i * SG_COMPS_MAX + 2].size);
+ sg_ptr->ptr2 = cpu_to_be64(list[i * SG_COMPS_MAX + 2].dma_addr);
+ fallthrough;
+ case SG_COMP_2:
+ sg_ptr->len1 = cpu_to_be16(list[i * SG_COMPS_MAX + 1].size);
+ sg_ptr->ptr1 = cpu_to_be64(list[i * SG_COMPS_MAX + 1].dma_addr);
+ fallthrough;
+ case SG_COMP_1:
+ sg_ptr->len0 = cpu_to_be16(list[i * SG_COMPS_MAX + 0].size);
+ sg_ptr->ptr0 = cpu_to_be64(list[i * SG_COMPS_MAX + 0].dma_addr);
+ break;
+ default:
+ break;
+ }
+ return 0;
+
+sg_cleanup:
+ for (j = 0; j < i; j++) {
+ if (list[j].dma_addr) {
+ dma_unmap_single(&pdev->dev, list[j].dma_addr,
+ list[j].size, DMA_BIDIRECTIONAL);
+ }
+
+ list[j].dma_addr = 0;
+ }
+ return -EIO;
+}
+
+static inline int sgv2io_components_setup(struct pci_dev *pdev,
+ struct otx2_cpt_buf_ptr *list,
+ int buf_count, u8 *buffer)
+{
+ struct cn10kb_cpt_sglist_component *sg_ptr;
+ int components;
+ int i, j;
+
+ if (unlikely(!list)) {
+ dev_err(&pdev->dev, "Input list pointer is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < buf_count; i++) {
+ if (unlikely(!list[i].vptr))
+ continue;
+ list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
+ list[i].size,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
+ dev_err(&pdev->dev, "Dma mapping failed\n");
+ goto sg_cleanup;
+ }
+ }
+ components = buf_count / SGV2_COMPS_MAX;
+ sg_ptr = (struct cn10kb_cpt_sglist_component *)buffer;
+ for (i = 0; i < components; i++) {
+ sg_ptr->len0 = list[i * SGV2_COMPS_MAX + 0].size;
+ sg_ptr->len1 = list[i * SGV2_COMPS_MAX + 1].size;
+ sg_ptr->len2 = list[i * SGV2_COMPS_MAX + 2].size;
+ sg_ptr->ptr0 = list[i * SGV2_COMPS_MAX + 0].dma_addr;
+ sg_ptr->ptr1 = list[i * SGV2_COMPS_MAX + 1].dma_addr;
+ sg_ptr->ptr2 = list[i * SGV2_COMPS_MAX + 2].dma_addr;
+ sg_ptr->valid_segs = SGV2_COMPS_MAX;
+ sg_ptr++;
+ }
+ components = buf_count % SGV2_COMPS_MAX;
+
+ sg_ptr->valid_segs = components;
+ switch (components) {
+ case SG_COMP_2:
+ sg_ptr->len1 = list[i * SGV2_COMPS_MAX + 1].size;
+ sg_ptr->ptr1 = list[i * SGV2_COMPS_MAX + 1].dma_addr;
+ fallthrough;
+ case SG_COMP_1:
+ sg_ptr->len0 = list[i * SGV2_COMPS_MAX + 0].size;
+ sg_ptr->ptr0 = list[i * SGV2_COMPS_MAX + 0].dma_addr;
+ break;
+ default:
+ break;
+ }
+ return 0;
+
+sg_cleanup:
+ for (j = 0; j < i; j++) {
+ if (list[j].dma_addr) {
+ dma_unmap_single(&pdev->dev, list[j].dma_addr,
+ list[j].size, DMA_BIDIRECTIONAL);
+ }
+
+ list[j].dma_addr = 0;
+ }
+ return -EIO;
+}
+
+static inline struct otx2_cpt_inst_info *
+cn10k_sgv2_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ gfp_t gfp)
+{
+ u32 dlen = 0, g_len, sg_len, info_len;
+ int align = OTX2_CPT_DMA_MINALIGN;
+ struct otx2_cpt_inst_info *info;
+ u16 g_sz_bytes, s_sz_bytes;
+ u32 total_mem_len;
+ int i;
+
+ g_sz_bytes = ((req->in_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+ s_sz_bytes = ((req->out_cnt + 2) / 3) *
+ sizeof(struct cn10kb_cpt_sglist_component);
+
+ g_len = ALIGN(g_sz_bytes, align);
+ sg_len = ALIGN(g_len + s_sz_bytes, align);
+ info_len = ALIGN(sizeof(*info), align);
+ total_mem_len = sg_len + info_len + sizeof(union otx2_cpt_res_s);
+
+ info = kzalloc(total_mem_len, gfp);
+ if (unlikely(!info))
+ return NULL;
+
+ for (i = 0; i < req->in_cnt; i++)
+ dlen += req->in[i].size;
+
+ info->dlen = dlen;
+ info->in_buffer = (u8 *)info + info_len;
+ info->gthr_sz = req->in_cnt;
+ info->sctr_sz = req->out_cnt;
+
+ /* Setup gather (input) components */
+ if (sgv2io_components_setup(pdev, req->in, req->in_cnt,
+ info->in_buffer)) {
+ dev_err(&pdev->dev, "Failed to setup gather list\n");
+ goto destroy_info;
+ }
+
+ if (sgv2io_components_setup(pdev, req->out, req->out_cnt,
+ &info->in_buffer[g_len])) {
+ dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ goto destroy_info;
+ }
+
+ info->dma_len = total_mem_len - info_len;
+ info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
+ info->dma_len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
+ dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
+ goto destroy_info;
+ }
+ info->rptr_baddr = info->dptr_baddr + g_len;
+ /*
+ * Get buffer for union otx2_cpt_res_s response
+ * structure and its physical address
+ */
+ info->completion_addr = info->in_buffer + sg_len;
+ info->comp_baddr = info->dptr_baddr + sg_len;
+
+ return info;
+
+destroy_info:
+ otx2_cpt_info_destroy(pdev, info);
+ return NULL;
+}
+
+/* SG list header size in bytes */
+#define SG_LIST_HDR_SIZE 8
+static inline struct otx2_cpt_inst_info *
+otx2_sg_info_create(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ gfp_t gfp)
+{
+ int align = OTX2_CPT_DMA_MINALIGN;
+ struct otx2_cpt_inst_info *info;
+ u32 dlen, align_dlen, info_len;
+ u16 g_sz_bytes, s_sz_bytes;
+ u32 total_mem_len;
+
+ if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
+ req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) {
+ dev_err(&pdev->dev, "Error too many sg components\n");
+ return NULL;
+ }
+
+ g_sz_bytes = ((req->in_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+ s_sz_bytes = ((req->out_cnt + 3) / 4) *
+ sizeof(struct otx2_cpt_sglist_component);
+
+ dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
+ align_dlen = ALIGN(dlen, align);
+ info_len = ALIGN(sizeof(*info), align);
+ total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
+
+ info = kzalloc(total_mem_len, gfp);
+ if (unlikely(!info))
+ return NULL;
+
+ info->dlen = dlen;
+ info->in_buffer = (u8 *)info + info_len;
+
+ ((u16 *)info->in_buffer)[0] = req->out_cnt;
+ ((u16 *)info->in_buffer)[1] = req->in_cnt;
+ ((u16 *)info->in_buffer)[2] = 0;
+ ((u16 *)info->in_buffer)[3] = 0;
+ cpu_to_be64s((u64 *)info->in_buffer);
+
+ /* Setup gather (input) components */
+ if (setup_sgio_components(pdev, req->in, req->in_cnt,
+ &info->in_buffer[8])) {
+ dev_err(&pdev->dev, "Failed to setup gather list\n");
+ goto destroy_info;
+ }
+
+ if (setup_sgio_components(pdev, req->out, req->out_cnt,
+ &info->in_buffer[8 + g_sz_bytes])) {
+ dev_err(&pdev->dev, "Failed to setup scatter list\n");
+ goto destroy_info;
+ }
+
+ info->dma_len = total_mem_len - info_len;
+ info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
+ info->dma_len, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
+ dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
+ goto destroy_info;
+ }
+ /*
+ * Get buffer for union otx2_cpt_res_s response
+ * structure and its physical address
+ */
+ info->completion_addr = info->in_buffer + align_dlen;
+ info->comp_baddr = info->dptr_baddr + align_dlen;
+
+ return info;
+
+destroy_info:
+ otx2_cpt_info_destroy(pdev, info);
+ return NULL;
+}
+
struct otx2_cptlf_wqe;
int otx2_cpt_do_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
int cpu_num);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index e4bd3f030c..b52728e3c0 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -106,6 +106,32 @@ static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
return ret;
}
+static int cptlf_set_ctx_ilen(struct otx2_cptlfs_info *lfs, int ctx_ilen)
+{
+ union otx2_cptx_af_lf_ctrl lf_ctrl;
+ struct otx2_cptlf_info *lf;
+ int slot, ret = 0;
+
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
+ lf = &lfs->lf[slot];
+
+ ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ &lf_ctrl.u, lfs->blkaddr);
+ if (ret)
+ return ret;
+
+ lf_ctrl.s.ctx_ilen = ctx_ilen;
+
+ ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
+ CPT_AF_LFX_CTL(lf->slot),
+ lf_ctrl.u, lfs->blkaddr);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
{
/* Disable instruction queues */
@@ -151,26 +177,14 @@ static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
irq_misc.u);
}
-static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
+static void cptlf_set_done_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
{
+ u64 reg = enable ? OTX2_CPT_LF_DONE_INT_ENA_W1S :
+ OTX2_CPT_LF_DONE_INT_ENA_W1C;
int slot;
- /* Enable done interrupts */
for (slot = 0; slot < lfs->lfs_num; slot++)
- otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
- OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
- /* Enable Misc interrupts */
- cptlf_set_misc_intrs(lfs, true);
-}
-
-static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
-{
- int slot;
-
- for (slot = 0; slot < lfs->lfs_num; slot++)
- otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot,
- OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
- cptlf_set_misc_intrs(lfs, false);
+ otx2_cpt_write64(lfs->reg_base, lfs->blkaddr, slot, reg, 0x1);
}
static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
@@ -257,24 +271,44 @@ static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
+void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs)
{
- int i, offs, vector;
+ int i, irq_offs, vector;
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
for (i = 0; i < lfs->lfs_num; i++) {
- for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
- if (!lfs->lf[i].is_irq_reg[offs])
- continue;
+ if (!lfs->lf[i].is_irq_reg[irq_offs])
+ continue;
- vector = pci_irq_vector(lfs->pdev,
- lfs->lf[i].msix_offset + offs);
- free_irq(vector, &lfs->lf[i]);
- lfs->lf[i].is_irq_reg[offs] = false;
- }
+ vector = pci_irq_vector(lfs->pdev,
+ lfs->lf[i].msix_offset + irq_offs);
+ free_irq(vector, &lfs->lf[i]);
+ lfs->lf[i].is_irq_reg[irq_offs] = false;
}
- cptlf_disable_intrs(lfs);
+
+ cptlf_set_misc_intrs(lfs, false);
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_interrupts,
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_misc_interrupts,
+ CRYPTO_DEV_OCTEONTX2_CPT);
+
+void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ int i, irq_offs, vector;
+
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
+ for (i = 0; i < lfs->lfs_num; i++) {
+ if (!lfs->lf[i].is_irq_reg[irq_offs])
+ continue;
+
+ vector = pci_irq_vector(lfs->pdev,
+ lfs->lf[i].msix_offset + irq_offs);
+ free_irq(vector, &lfs->lf[i]);
+ lfs->lf[i].is_irq_reg[irq_offs] = false;
+ }
+
+ cptlf_set_done_intrs(lfs, false);
+}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_done_interrupts,
CRYPTO_DEV_OCTEONTX2_CPT);
static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
@@ -296,34 +330,53 @@ static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
return ret;
}
-int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
+int otx2_cptlf_register_misc_interrupts(struct otx2_cptlfs_info *lfs)
{
+ bool is_cpt1 = (lfs->blkaddr == BLKADDR_CPT1);
int irq_offs, ret, i;
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
for (i = 0; i < lfs->lfs_num; i++) {
- irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
- snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPT%dLF Misc%d",
+ is_cpt1, i);
ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
cptlf_misc_intr_handler);
if (ret)
goto free_irq;
+ }
+ cptlf_set_misc_intrs(lfs, true);
+ return 0;
- irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
- snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
- i);
+free_irq:
+ otx2_cptlf_unregister_misc_interrupts(lfs);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_misc_interrupts,
+ CRYPTO_DEV_OCTEONTX2_CPT);
+
+int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs)
+{
+ bool is_cpt1 = (lfs->blkaddr == BLKADDR_CPT1);
+ int irq_offs, ret, i;
+
+ irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
+ for (i = 0; i < lfs->lfs_num; i++) {
+ snprintf(lfs->lf[i].irq_name[irq_offs], 32,
+ "OTX2_CPT%dLF Done%d", is_cpt1, i);
ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
cptlf_done_intr_handler);
if (ret)
goto free_irq;
}
- cptlf_enable_intrs(lfs);
+ cptlf_set_done_intrs(lfs, true);
return 0;
free_irq:
- otx2_cptlf_unregister_interrupts(lfs);
+ otx2_cptlf_unregister_done_interrupts(lfs);
return ret;
}
-EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_interrupts, CRYPTO_DEV_OCTEONTX2_CPT);
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_done_interrupts,
+ CRYPTO_DEV_OCTEONTX2_CPT);
void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -416,6 +469,12 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
if (ret)
goto free_iq;
+ if (lfs->ctx_ilen_ovrd) {
+ ret = cptlf_set_ctx_ilen(lfs, lfs->ctx_ilen);
+ if (ret)
+ goto free_iq;
+ }
+
return 0;
free_iq:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
index 5302fe3d0e..bd8604be29 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.h
@@ -5,6 +5,7 @@
#define __OTX2_CPTLF_H
#include <linux/soc/marvell/octeontx2/asm.h>
+#include <linux/bitfield.h>
#include <mbox.h>
#include <rvu.h>
#include "otx2_cpt_common.h"
@@ -99,6 +100,9 @@ struct cpt_hw_ops {
struct otx2_cptlf_info *lf);
u8 (*cpt_get_compcode)(union otx2_cpt_res_s *result);
u8 (*cpt_get_uc_compcode)(union otx2_cpt_res_s *result);
+ struct otx2_cpt_inst_info *
+ (*cpt_sg_info_create)(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
+ gfp_t gfp);
};
struct otx2_cptlfs_info {
@@ -116,6 +120,9 @@ struct otx2_cptlfs_info {
u8 kvf_limits; /* Kernel crypto limits */
atomic_t state; /* LF's state. started/reset */
int blkaddr; /* CPT blkaddr: BLKADDR_CPT0/BLKADDR_CPT1 */
+ int global_slot; /* Global slot across the blocks */
+ u8 ctx_ilen;
+ u8 ctx_ilen_ovrd;
};
static inline void otx2_cpt_free_instruction_queues(
@@ -203,48 +210,71 @@ static inline void otx2_cptlf_set_iqueues_size(struct otx2_cptlfs_info *lfs)
otx2_cptlf_do_set_iqueue_size(&lfs->lf[slot]);
}
+#define INFLIGHT GENMASK_ULL(8, 0)
+#define GRB_CNT GENMASK_ULL(39, 32)
+#define GWB_CNT GENMASK_ULL(47, 40)
+#define XQ_XOR GENMASK_ULL(63, 63)
+#define DQPTR GENMASK_ULL(19, 0)
+#define NQPTR GENMASK_ULL(51, 32)
+
static inline void otx2_cptlf_do_disable_iqueue(struct otx2_cptlf_info *lf)
{
- union otx2_cptx_lf_ctl lf_ctl = { .u = 0x0 };
- union otx2_cptx_lf_inprog lf_inprog;
+ void __iomem *reg_base = lf->lfs->reg_base;
+ struct pci_dev *pdev = lf->lfs->pdev;
u8 blkaddr = lf->lfs->blkaddr;
- int timeout = 20;
+ int timeout = 1000000;
+ u64 inprog, inst_ptr;
+ u64 slot = lf->slot;
+ u64 qsize, pending;
+ int i = 0;
/* Disable instructions enqueuing */
- otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
- OTX2_CPT_LF_CTL, lf_ctl.u);
+ otx2_cpt_write64(reg_base, blkaddr, slot, OTX2_CPT_LF_CTL, 0x0);
+
+ inprog = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG);
+ inprog |= BIT_ULL(16);
+ otx2_cpt_write64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG, inprog);
- /* Wait for instruction queue to become empty */
+ qsize = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_Q_SIZE) & 0x7FFF;
+ do {
+ inst_ptr = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_Q_INST_PTR);
+ pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) +
+ FIELD_GET(NQPTR, inst_ptr) - FIELD_GET(DQPTR, inst_ptr);
+ udelay(1);
+ timeout--;
+ } while ((pending != 0) && (timeout != 0));
+
+ if (timeout == 0)
+ dev_warn(&pdev->dev, "TIMEOUT: CPT poll on pending instructions\n");
+
+ timeout = 1000000;
+ /* Wait for CPT queue to become execution-quiescent */
do {
- lf_inprog.u = otx2_cpt_read64(lf->lfs->reg_base, blkaddr,
- lf->slot, OTX2_CPT_LF_INPROG);
- if (!lf_inprog.s.inflight)
- break;
-
- usleep_range(10000, 20000);
- if (timeout-- < 0) {
- dev_err(&lf->lfs->pdev->dev,
- "Error LF %d is still busy.\n", lf->slot);
- break;
+ inprog = otx2_cpt_read64(reg_base, blkaddr, slot, OTX2_CPT_LF_INPROG);
+
+ if ((FIELD_GET(INFLIGHT, inprog) == 0) &&
+ (FIELD_GET(GRB_CNT, inprog) == 0)) {
+ i++;
+ } else {
+ i = 0;
+ timeout--;
}
+ } while ((timeout != 0) && (i < 10));
- } while (1);
-
- /*
- * Disable executions in the LF's queue,
- * the queue should be empty at this point
- */
- lf_inprog.s.eena = 0x0;
- otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
- OTX2_CPT_LF_INPROG, lf_inprog.u);
+ if (timeout == 0)
+ dev_warn(&pdev->dev, "TIMEOUT: CPT poll on inflight count\n");
+ /* Wait for 2 us to flush all queue writes to memory */
+ udelay(2);
}
static inline void otx2_cptlf_disable_iqueues(struct otx2_cptlfs_info *lfs)
{
int slot;
- for (slot = 0; slot < lfs->lfs_num; slot++)
+ for (slot = 0; slot < lfs->lfs_num; slot++) {
otx2_cptlf_do_disable_iqueue(&lfs->lf[slot]);
+ otx2_cpt_lf_reset_msg(lfs, lfs->global_slot + slot);
+ }
}
static inline void otx2_cptlf_set_iqueue_enq(struct otx2_cptlf_info *lf,
@@ -282,6 +312,19 @@ static inline void otx2_cptlf_set_iqueue_exec(struct otx2_cptlf_info *lf,
OTX2_CPT_LF_INPROG, lf_inprog.u);
}
+static inline void otx2_cptlf_set_ctx_flr_flush(struct otx2_cptlf_info *lf)
+{
+ u8 blkaddr = lf->lfs->blkaddr;
+ u64 val;
+
+ val = otx2_cpt_read64(lf->lfs->reg_base, blkaddr, lf->slot,
+ OTX2_CPT_LF_CTX_CTL);
+ val |= BIT_ULL(0);
+
+ otx2_cpt_write64(lf->lfs->reg_base, blkaddr, lf->slot,
+ OTX2_CPT_LF_CTX_CTL, val);
+}
+
static inline void otx2_cptlf_enable_iqueue_exec(struct otx2_cptlf_info *lf)
{
otx2_cptlf_set_iqueue_exec(lf, true);
@@ -297,6 +340,10 @@ static inline void otx2_cptlf_enable_iqueues(struct otx2_cptlfs_info *lfs)
int slot;
for (slot = 0; slot < lfs->lfs_num; slot++) {
+ /* Enable flush on FLR for Errata */
+ if (is_dev_cn10kb(lfs->pdev))
+ otx2_cptlf_set_ctx_flr_flush(&lfs->lf[slot]);
+
otx2_cptlf_enable_iqueue_exec(&lfs->lf[slot]);
otx2_cptlf_enable_iqueue_enq(&lfs->lf[slot]);
}
@@ -382,8 +429,10 @@ static inline void otx2_cptlf_set_dev_info(struct otx2_cptlfs_info *lfs,
int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_msk, int pri,
int lfs_num);
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs);
-int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs);
-void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_register_misc_interrupts(struct otx2_cptlfs_info *lfs);
+int otx2_cptlf_register_done_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_unregister_misc_interrupts(struct otx2_cptlfs_info *lfs);
+void otx2_cptlf_unregister_done_interrupts(struct otx2_cptlfs_info *lfs);
void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs);
int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
index a209ec5af3..e5859a1e1c 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf.h
@@ -71,4 +71,8 @@ void otx2_cptpf_afpf_mbox_up_handler(struct work_struct *work);
irqreturn_t otx2_cptpf_vfpf_mbox_intr(int irq, void *arg);
void otx2_cptpf_vfpf_mbox_handler(struct work_struct *work);
+int otx2_inline_cptlf_setup(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptlfs_info *lfs, u8 egrp, int num_lfs);
+void otx2_inline_cptlf_cleanup(struct otx2_cptlfs_info *lfs);
+
#endif /* __OTX2_CPTPF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index e34223daa3..400e36d990 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -14,6 +14,8 @@
#define OTX2_CPT_DRV_STRING "Marvell RVU CPT Physical Function Driver"
#define CPT_UC_RID_CN9K_B0 1
+#define CPT_UC_RID_CN10K_A 4
+#define CPT_UC_RID_CN10K_B 5
static void cptpf_enable_vfpf_mbox_intr(struct otx2_cptpf_dev *cptpf,
int num_vfs)
@@ -587,43 +589,22 @@ static int cpt_is_pf_usable(struct otx2_cptpf_dev *cptpf)
return 0;
}
-static int cptx_device_reset(struct otx2_cptpf_dev *cptpf, int blkaddr)
+static void cptpf_get_rid(struct pci_dev *pdev, struct otx2_cptpf_dev *cptpf)
{
- int timeout = 10, ret;
- u64 reg = 0;
+ struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
+ u64 reg_val = 0x0;
- ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_BLK_RST, 0x1, blkaddr);
- if (ret)
- return ret;
-
- do {
- ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
- CPT_AF_BLK_RST, &reg, blkaddr);
- if (ret)
- return ret;
-
- if (!((reg >> 63) & 0x1))
- break;
-
- usleep_range(10000, 20000);
- if (timeout-- < 0)
- return -EBUSY;
- } while (1);
-
- return ret;
-}
-
-static int cptpf_device_reset(struct otx2_cptpf_dev *cptpf)
-{
- int ret = 0;
-
- if (cptpf->has_cpt1) {
- ret = cptx_device_reset(cptpf, BLKADDR_CPT1);
- if (ret)
- return ret;
+ if (is_dev_otx2(pdev)) {
+ eng_grps->rid = pdev->revision;
+ return;
}
- return cptx_device_reset(cptpf, BLKADDR_CPT0);
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
+ BLKADDR_CPT0);
+ if ((cpt_feature_sgv2(pdev) && (reg_val & BIT_ULL(18))) ||
+ is_dev_cn10ka_ax(pdev))
+ eng_grps->rid = CPT_UC_RID_CN10K_A;
+ else if (cpt_feature_sgv2(pdev))
+ eng_grps->rid = CPT_UC_RID_CN10K_B;
}
static void cptpf_check_block_implemented(struct otx2_cptpf_dev *cptpf)
@@ -643,10 +624,6 @@ static int cptpf_device_init(struct otx2_cptpf_dev *cptpf)
/* check if 'implemented' bit is set for block BLKADDR_CPT1 */
cptpf_check_block_implemented(cptpf);
- /* Reset the CPT PF device */
- ret = cptpf_device_reset(cptpf);
- if (ret)
- return ret;
/* Get number of SE, IE and AE engines */
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
@@ -701,6 +678,7 @@ static int cptpf_sriov_enable(struct pci_dev *pdev, int num_vfs)
if (ret)
goto destroy_flr;
+ cptpf_get_rid(pdev, cptpf);
/* Get CPT HW capabilities using LOAD_FVC operation. */
ret = otx2_cpt_discover_eng_capabilities(cptpf);
if (ret)
@@ -744,7 +722,7 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
{
struct device *dev = &pdev->dev;
struct otx2_cptpf_dev *cptpf;
- int err;
+ int err, num_vec;
cptpf = devm_kzalloc(dev, sizeof(*cptpf), GFP_KERNEL);
if (!cptpf)
@@ -779,8 +757,13 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
if (err)
goto clear_drvdata;
- err = pci_alloc_irq_vectors(pdev, RVU_PF_INT_VEC_CNT,
- RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
+ num_vec = pci_msix_vec_count(cptpf->pdev);
+ if (num_vec <= 0) {
+ err = -EINVAL;
+ goto clear_drvdata;
+ }
+
+ err = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSIX);
if (err < 0) {
dev_err(dev, "Request for %d msix vectors failed\n",
RVU_PF_INT_VEC_CNT);
@@ -797,6 +780,7 @@ static int otx2_cptpf_probe(struct pci_dev *pdev,
goto destroy_afpf_mbox;
cptpf->max_vfs = pci_sriov_get_totalvfs(pdev);
+ cptpf->kvf_limits = 1;
err = cn10k_cptpf_lmtst_init(cptpf);
if (err)
@@ -844,6 +828,14 @@ static void otx2_cptpf_remove(struct pci_dev *pdev)
cptpf_sriov_disable(pdev);
otx2_cpt_unregister_dl(cptpf);
+
+ /* Cleanup Inline CPT LF's if attached */
+ if (cptpf->lfs.lfs_num)
+ otx2_inline_cptlf_cleanup(&cptpf->lfs);
+
+ if (cptpf->cpt1_lfs.lfs_num)
+ otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs);
+
/* Delete sysfs entry created for kernel VF limits */
sysfs_remove_group(&pdev->dev.kobj, &cptpf_sysfs_group);
/* Cleanup engine groups */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
index 480b3720f1..ec1ac7e836 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_mbox.c
@@ -78,7 +78,7 @@ static int handle_msg_get_caps(struct otx2_cptpf_dev *cptpf,
rsp->hdr.sig = OTX2_MBOX_RSP_SIG;
rsp->hdr.pcifunc = req->pcifunc;
rsp->cpt_pf_drv_version = OTX2_CPT_PF_DRV_VERSION;
- rsp->cpt_revision = cptpf->pdev->revision;
+ rsp->cpt_revision = cptpf->eng_grps.rid;
memcpy(&rsp->eng_caps, &cptpf->eng_caps, sizeof(rsp->eng_caps));
return 0;
@@ -171,6 +171,8 @@ static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
nix_req->hdr.id = MBOX_MSG_NIX_INLINE_IPSEC_CFG;
nix_req->hdr.sig = OTX2_MBOX_REQ_SIG;
nix_req->enable = 1;
+ nix_req->credit_th = req->credit_th;
+ nix_req->bpid = req->bpid;
if (!req->credit || req->credit > OTX2_CPT_INST_QLEN_MSGS)
nix_req->cpt_credit = OTX2_CPT_INST_QLEN_MSGS - 1;
else
@@ -197,12 +199,53 @@ static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf, u8 egrp,
return send_inline_ipsec_inbound_msg(cptpf, req->sso_pf_func, 0);
}
+int
+otx2_inline_cptlf_setup(struct otx2_cptpf_dev *cptpf,
+ struct otx2_cptlfs_info *lfs, u8 egrp, int num_lfs)
+{
+ int ret;
+
+ ret = otx2_cptlf_init(lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO, 1);
+ if (ret) {
+ dev_err(&cptpf->pdev->dev,
+ "LF configuration failed for RX inline ipsec.\n");
+ return ret;
+ }
+
+ /* Get msix offsets for attached LFs */
+ ret = otx2_cpt_msix_offset_msg(lfs);
+ if (ret)
+ goto cleanup_lf;
+
+ /* Register for CPT LF Misc interrupts */
+ ret = otx2_cptlf_register_misc_interrupts(lfs);
+ if (ret)
+ goto free_irq;
+
+ return 0;
+free_irq:
+ otx2_cptlf_unregister_misc_interrupts(lfs);
+cleanup_lf:
+ otx2_cptlf_shutdown(lfs);
+ return ret;
+}
+
+void
+otx2_inline_cptlf_cleanup(struct otx2_cptlfs_info *lfs)
+{
+ /* Unregister misc interrupt */
+ otx2_cptlf_unregister_misc_interrupts(lfs);
+
+ /* Cleanup LFs */
+ otx2_cptlf_shutdown(lfs);
+}
+
static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
struct mbox_msghdr *req)
{
struct otx2_cpt_rx_inline_lf_cfg *cfg_req;
+ int num_lfs = 1, ret;
u8 egrp;
- int ret;
cfg_req = (struct otx2_cpt_rx_inline_lf_cfg *)req;
if (cptpf->lfs.lfs_num) {
@@ -223,11 +266,13 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
otx2_cptlf_set_dev_info(&cptpf->lfs, cptpf->pdev, cptpf->reg_base,
&cptpf->afpf_mbox, BLKADDR_CPT0);
- ret = otx2_cptlf_init(&cptpf->lfs, 1 << egrp, OTX2_CPT_QUEUE_HI_PRIO,
- 1);
+ cptpf->lfs.global_slot = 0;
+ cptpf->lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
+ cptpf->lfs.ctx_ilen = cfg_req->ctx_ilen;
+
+ ret = otx2_inline_cptlf_setup(cptpf, &cptpf->lfs, egrp, num_lfs);
if (ret) {
- dev_err(&cptpf->pdev->dev,
- "LF configuration failed for RX inline ipsec.\n");
+ dev_err(&cptpf->pdev->dev, "Inline-Ipsec CPT0 LF setup failed.\n");
return ret;
}
@@ -236,11 +281,13 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
otx2_cptlf_set_dev_info(&cptpf->cpt1_lfs, cptpf->pdev,
cptpf->reg_base, &cptpf->afpf_mbox,
BLKADDR_CPT1);
- ret = otx2_cptlf_init(&cptpf->cpt1_lfs, 1 << egrp,
- OTX2_CPT_QUEUE_HI_PRIO, 1);
+ cptpf->cpt1_lfs.global_slot = num_lfs;
+ cptpf->cpt1_lfs.ctx_ilen_ovrd = cfg_req->ctx_ilen_valid;
+ cptpf->cpt1_lfs.ctx_ilen = cfg_req->ctx_ilen;
+ ret = otx2_inline_cptlf_setup(cptpf, &cptpf->cpt1_lfs, egrp,
+ num_lfs);
if (ret) {
- dev_err(&cptpf->pdev->dev,
- "LF configuration failed for RX inline ipsec.\n");
+ dev_err(&cptpf->pdev->dev, "Inline CPT1 LF setup failed.\n");
goto lf_cleanup;
}
cptpf->rsrc_req_blkaddr = 0;
@@ -253,9 +300,9 @@ static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev *cptpf,
return 0;
lf1_cleanup:
- otx2_cptlf_shutdown(&cptpf->cpt1_lfs);
+ otx2_inline_cptlf_cleanup(&cptpf->cpt1_lfs);
lf_cleanup:
- otx2_cptlf_shutdown(&cptpf->lfs);
+ otx2_inline_cptlf_cleanup(&cptpf->lfs);
return ret;
}
@@ -410,6 +457,8 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
struct otx2_cptlfs_info *lfs = &cptpf->lfs;
struct device *dev = &cptpf->pdev->dev;
struct cpt_rd_wr_reg_msg *rsp_rd_wr;
+ struct msix_offset_rsp *rsp_msix;
+ int i;
if (msg->id >= MBOX_MSG_MAX) {
dev_err(dev, "MBOX msg with unknown ID %d\n", msg->id);
@@ -428,6 +477,14 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
cptpf->pf_id = (msg->pcifunc >> RVU_PFVF_PF_SHIFT) &
RVU_PFVF_PF_MASK;
break;
+ case MBOX_MSG_MSIX_OFFSET:
+ rsp_msix = (struct msix_offset_rsp *) msg;
+ for (i = 0; i < rsp_msix->cptlfs; i++)
+ lfs->lf[i].msix_offset = rsp_msix->cptlf_msixoff[i];
+
+ for (i = 0; i < rsp_msix->cpt1_lfs; i++)
+ lfs->lf[i].msix_offset = rsp_msix->cpt1_lf_msixoff[i];
+ break;
case MBOX_MSG_CPT_RD_WR_REGISTER:
rsp_rd_wr = (struct cpt_rd_wr_reg_msg *)msg;
if (msg->rc) {
@@ -449,6 +506,7 @@ static void process_afpf_mbox_msg(struct otx2_cptpf_dev *cptpf,
break;
case MBOX_MSG_CPT_INLINE_IPSEC_CFG:
case MBOX_MSG_NIX_INLINE_IPSEC_CFG:
+ case MBOX_MSG_CPT_LF_RESET:
break;
default:
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 1958b797a4..5c94846461 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -16,7 +16,11 @@
#define LOADFVC_MAJOR_OP 0x01
#define LOADFVC_MINOR_OP 0x08
-#define CTX_FLUSH_TIMER_CNT 0xFFFFFF
+/*
+ * Interval to flush dirty data for next CTX entry. The interval is measured
+ * in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns).
+ */
+#define CTX_FLUSH_TIMER_CNT 0x2FAF0
struct fw_info_t {
struct list_head ucodes;
@@ -117,12 +121,10 @@ static char *get_ucode_type_str(int ucode_type)
static int get_ucode_type(struct device *dev,
struct otx2_cpt_ucode_hdr *ucode_hdr,
- int *ucode_type)
+ int *ucode_type, u16 rid)
{
- struct otx2_cptpf_dev *cptpf = dev_get_drvdata(dev);
char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
- struct pci_dev *pdev = cptpf->pdev;
int i, val = 0;
u8 nn;
@@ -130,7 +132,7 @@ static int get_ucode_type(struct device *dev,
for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
- sprintf(ver_str_prefix, "ocpt-%02d", pdev->revision);
+ sprintf(ver_str_prefix, "ocpt-%02d", rid);
if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
return -EINVAL;
@@ -359,7 +361,7 @@ static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
}
static int load_fw(struct device *dev, struct fw_info_t *fw_info,
- char *filename)
+ char *filename, u16 rid)
{
struct otx2_cpt_ucode_hdr *ucode_hdr;
struct otx2_cpt_uc_info_t *uc_info;
@@ -375,7 +377,7 @@ static int load_fw(struct device *dev, struct fw_info_t *fw_info,
goto free_uc_info;
ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
- ret = get_ucode_type(dev, ucode_hdr, &ucode_type);
+ ret = get_ucode_type(dev, ucode_hdr, &ucode_type, rid);
if (ret)
goto release_fw;
@@ -389,6 +391,7 @@ static int load_fw(struct device *dev, struct fw_info_t *fw_info,
set_ucode_filename(&uc_info->ucode, filename);
memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
OTX2_CPT_UCODE_VER_STR_SZ);
+ uc_info->ucode.ver_str[OTX2_CPT_UCODE_VER_STR_SZ] = 0;
uc_info->ucode.ver_num = ucode_hdr->ver_num;
uc_info->ucode.type = ucode_type;
uc_info->ucode.size = ucode_size;
@@ -448,7 +451,8 @@ static void print_uc_info(struct fw_info_t *fw_info)
}
}
-static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
+static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,
+ u16 rid)
{
char filename[OTX2_CPT_NAME_LENGTH];
char eng_type[8] = {0};
@@ -462,9 +466,9 @@ static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info)
eng_type[i] = tolower(eng_type[i]);
snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
- pdev->revision, eng_type);
+ rid, eng_type);
/* Request firmware for each engine type */
- ret = load_fw(&pdev->dev, fw_info, filename);
+ ret = load_fw(&pdev->dev, fw_info, filename, rid);
if (ret)
goto release_fw;
}
@@ -1155,7 +1159,7 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
if (eng_grps->is_grps_created)
goto unlock;
- ret = cpt_ucode_load_fw(pdev, &fw_info);
+ ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
if (ret)
goto unlock;
@@ -1230,14 +1234,16 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
*/
rnm_to_cpt_errata_fixup(&pdev->dev);
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
+ BLKADDR_CPT0);
/*
* Configure engine group mask to allow context prefetching
* for the groups and enable random number request, to enable
* CPT to request random numbers from RNM.
*/
+ reg_val |= OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16);
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
- OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16),
- BLKADDR_CPT0);
+ reg_val, BLKADDR_CPT0);
/*
* Set interval to periodically flush dirty data for the next
* CTX cache entry. Set the interval count to maximum supported
@@ -1252,10 +1258,12 @@ int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
* encounters a fault/poison, a rare case may result in
* unpredictable data being delivered to a CPT engine.
*/
- otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG, &reg_val,
- BLKADDR_CPT0);
- otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
- reg_val | BIT_ULL(24), BLKADDR_CPT0);
+ if (cpt_is_errata_38550_exists(pdev)) {
+ otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
+ &reg_val, BLKADDR_CPT0);
+ otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
+ reg_val | BIT_ULL(24), BLKADDR_CPT0);
+ }
mutex_unlock(&eng_grps->lock);
return 0;
@@ -1412,7 +1420,7 @@ static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
int ret;
mutex_lock(&eng_grps->lock);
- ret = cpt_ucode_load_fw(pdev, &fw_info);
+ ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
if (ret) {
mutex_unlock(&eng_grps->lock);
return ret;
@@ -1686,13 +1694,14 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
goto err_unlock;
}
INIT_LIST_HEAD(&fw_info.ucodes);
- ret = load_fw(dev, &fw_info, ucode_filename[0]);
+
+ ret = load_fw(dev, &fw_info, ucode_filename[0], eng_grps->rid);
if (ret) {
dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
goto err_unlock;
}
if (ucode_idx > 1) {
- ret = load_fw(dev, &fw_info, ucode_filename[1]);
+ ret = load_fw(dev, &fw_info, ucode_filename[1], eng_grps->rid);
if (ret) {
dev_err(dev, "Unable to load firmware %s\n",
ucode_filename[1]);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
index e69320a54b..365fe8943b 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.h
@@ -73,7 +73,7 @@ struct otx2_cpt_ucode_hdr {
};
struct otx2_cpt_ucode {
- u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ];/*
+ u8 ver_str[OTX2_CPT_UCODE_VER_STR_SZ + 1];/*
* ucode version in readable
* format
*/
@@ -150,6 +150,7 @@ struct otx2_cpt_eng_grps {
int engs_num; /* total number of engines supported */
u8 eng_ref_cnt[OTX2_CPT_MAX_ENGINES];/* engines reference count */
bool is_grps_created; /* Is the engine groups are already created */
+ u16 rid;
};
struct otx2_cptpf_dev;
int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
index 994291e90d..11ab9af1df 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf.h
@@ -22,6 +22,7 @@ struct otx2_cptvf_dev {
int blkaddr;
void *bbuf_base;
unsigned long cap_flag;
+ u64 eng_caps[OTX2_CPT_MAX_ENG_TYPES];
};
irqreturn_t otx2_cptvf_pfvf_mbox_intr(int irq, void *arg);
@@ -29,5 +30,6 @@ void otx2_cptvf_pfvf_mbox_handler(struct work_struct *work);
int otx2_cptvf_send_eng_grp_num_msg(struct otx2_cptvf_dev *cptvf, int eng_type);
int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf);
int otx2_cpt_mbox_bbuf_init(struct otx2_cptvf_dev *cptvf, struct pci_dev *pdev);
+int otx2_cptvf_send_caps_msg(struct otx2_cptvf_dev *cptvf);
#endif /* __OTX2_CPTVF_H */
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index e27ddd3c4e..1604fc58dc 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -17,6 +17,7 @@
#include "otx2_cptvf.h"
#include "otx2_cptvf_algs.h"
#include "otx2_cpt_reqmgr.h"
+#include "cn10k_cpt.h"
/* Size of salt in AES GCM mode */
#define AES_GCM_SALT_SIZE 4
@@ -384,6 +385,9 @@ static inline int cpt_enc_dec(struct skcipher_request *req, u32 enc)
req_info->is_trunc_hmac = false;
req_info->ctrl.s.grp = otx2_cpt_get_kcrypto_eng_grp_num(pdev);
+ req_info->req.cptr = ctx->er_ctx.hw_ctx;
+ req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
+
/*
* We perform an asynchronous send and once
* the request is completed the driver would
@@ -530,6 +534,8 @@ static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
struct otx2_cpt_enc_ctx *ctx = crypto_skcipher_ctx(stfm);
struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
struct crypto_alg *alg = tfm->__crt_alg;
+ struct pci_dev *pdev;
+ int ret, cpu_num;
memset(ctx, 0, sizeof(*ctx));
/*
@@ -541,6 +547,15 @@ static int otx2_cpt_enc_dec_init(struct crypto_skcipher *stfm)
stfm, sizeof(struct otx2_cpt_req_ctx) +
sizeof(struct skcipher_request));
+ ret = get_se_device(&pdev, &cpu_num);
+ if (ret)
+ return ret;
+
+ ctx->pdev = pdev;
+ ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx);
+ if (ret)
+ return ret;
+
return cpt_skcipher_fallback_init(ctx, alg);
}
@@ -552,6 +567,7 @@ static void otx2_cpt_skcipher_exit(struct crypto_skcipher *tfm)
crypto_free_skcipher(ctx->fbk_cipher);
ctx->fbk_cipher = NULL;
}
+ cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx);
}
static int cpt_aead_fallback_init(struct otx2_cpt_aead_ctx *ctx,
@@ -576,6 +592,8 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
struct otx2_cpt_aead_ctx *ctx = crypto_aead_ctx_dma(atfm);
struct crypto_tfm *tfm = crypto_aead_tfm(atfm);
struct crypto_alg *alg = tfm->__crt_alg;
+ struct pci_dev *pdev;
+ int ret, cpu_num;
ctx->cipher_type = cipher_type;
ctx->mac_type = mac_type;
@@ -632,6 +650,15 @@ static int cpt_aead_init(struct crypto_aead *atfm, u8 cipher_type, u8 mac_type)
}
crypto_aead_set_reqsize_dma(atfm, sizeof(struct otx2_cpt_req_ctx));
+ ret = get_se_device(&pdev, &cpu_num);
+ if (ret)
+ return ret;
+
+ ctx->pdev = pdev;
+ ret = cn10k_cpt_hw_ctx_init(pdev, &ctx->er_ctx);
+ if (ret)
+ return ret;
+
return cpt_aead_fallback_init(ctx, alg);
}
@@ -694,6 +721,7 @@ static void otx2_cpt_aead_exit(struct crypto_aead *tfm)
crypto_free_aead(ctx->fbk_cipher);
ctx->fbk_cipher = NULL;
}
+ cn10k_cpt_hw_ctx_clear(ctx->pdev, &ctx->er_ctx);
}
static int otx2_cpt_aead_gcm_set_authsize(struct crypto_aead *tfm,
@@ -1299,6 +1327,9 @@ static int cpt_aead_enc_dec(struct aead_request *req, u8 reg_type, u8 enc)
req_info->is_enc = enc;
req_info->is_trunc_hmac = false;
+ req_info->req.cptr = ctx->er_ctx.hw_ctx;
+ req_info->req.cptr_dma = ctx->er_ctx.cptr_dma;
+
switch (reg_type) {
case OTX2_CPT_AEAD_ENC_DEC_REQ:
status = create_aead_input_list(req, enc);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
index f04184bd17..d29f84f01c 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.h
@@ -9,6 +9,7 @@
#include <crypto/skcipher.h>
#include <crypto/aead.h>
#include "otx2_cpt_common.h"
+#include "cn10k_cpt.h"
#define OTX2_CPT_MAX_ENC_KEY_SIZE 32
#define OTX2_CPT_MAX_HASH_KEY_SIZE 64
@@ -123,6 +124,8 @@ struct otx2_cpt_enc_ctx {
u8 key_type;
u8 enc_align_len;
struct crypto_skcipher *fbk_cipher;
+ struct pci_dev *pdev;
+ struct cn10k_cpt_errata_ctx er_ctx;
};
union otx2_cpt_offset_ctrl {
@@ -161,6 +164,8 @@ struct otx2_cpt_aead_ctx {
struct crypto_shash *hashalg;
struct otx2_cpt_sdesc *sdesc;
struct crypto_aead *fbk_cipher;
+ struct cn10k_cpt_errata_ctx er_ctx;
+ struct pci_dev *pdev;
u8 *ipad;
u8 *opad;
u32 enc_key_len;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 215a1b17b6..527d34cc25 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -246,7 +246,8 @@ static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
/* Unregister crypto algorithms */
otx2_cpt_crypto_exit(lfs->pdev, THIS_MODULE);
/* Unregister LFs interrupts */
- otx2_cptlf_unregister_interrupts(lfs);
+ otx2_cptlf_unregister_misc_interrupts(lfs);
+ otx2_cptlf_unregister_done_interrupts(lfs);
/* Cleanup LFs software side */
lf_sw_cleanup(lfs);
/* Free instruction queues */
@@ -280,8 +281,7 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
if (ret)
return ret;
- lfs_num = cptvf->lfs.kvf_limits ? cptvf->lfs.kvf_limits :
- num_online_cpus();
+ lfs_num = cptvf->lfs.kvf_limits;
otx2_cptlf_set_dev_info(lfs, cptvf->pdev, cptvf->reg_base,
&cptvf->pfvf_mbox, cptvf->blkaddr);
@@ -301,7 +301,11 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
goto cleanup_lf;
/* Register LFs interrupts */
- ret = otx2_cptlf_register_interrupts(lfs);
+ ret = otx2_cptlf_register_misc_interrupts(lfs);
+ if (ret)
+ goto cleanup_lf_sw;
+
+ ret = otx2_cptlf_register_done_interrupts(lfs);
if (ret)
goto cleanup_lf_sw;
@@ -322,7 +326,8 @@ static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
disable_irqs:
otx2_cptlf_free_irqs_affinity(lfs);
unregister_intr:
- otx2_cptlf_unregister_interrupts(lfs);
+ otx2_cptlf_unregister_misc_interrupts(lfs);
+ otx2_cptlf_unregister_done_interrupts(lfs);
cleanup_lf_sw:
lf_sw_cleanup(lfs);
cleanup_lf:
@@ -383,6 +388,17 @@ static int otx2_cptvf_probe(struct pci_dev *pdev,
goto destroy_pfvf_mbox;
cptvf->blkaddr = BLKADDR_CPT0;
+
+ cptvf_hw_ops_get(cptvf);
+
+ ret = otx2_cptvf_send_caps_msg(cptvf);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't get CPT engine capabilities.\n");
+ goto unregister_interrupts;
+ }
+ if (cptvf->eng_caps[OTX2_CPT_SE_TYPES] & BIT_ULL(35))
+ cptvf->lfs.ops->cpt_sg_info_create = cn10k_sgv2_info_create;
+
/* Initialize CPT LFs */
ret = cptvf_lf_init(cptvf);
if (ret)
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
index 75c403f2b1..d9fa5f6e20 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_mbox.c
@@ -72,6 +72,7 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
struct otx2_cptlfs_info *lfs = &cptvf->lfs;
struct otx2_cpt_kvf_limits_rsp *rsp_limits;
struct otx2_cpt_egrp_num_rsp *rsp_grp;
+ struct otx2_cpt_caps_rsp *eng_caps;
struct cpt_rd_wr_reg_msg *rsp_reg;
struct msix_offset_rsp *rsp_msix;
int i;
@@ -127,6 +128,13 @@ static void process_pfvf_mbox_mbox_msg(struct otx2_cptvf_dev *cptvf,
rsp_limits = (struct otx2_cpt_kvf_limits_rsp *) msg;
cptvf->lfs.kvf_limits = rsp_limits->kvf_limits;
break;
+ case MBOX_MSG_GET_CAPS:
+ eng_caps = (struct otx2_cpt_caps_rsp *)msg;
+ memcpy(cptvf->eng_caps, eng_caps->eng_caps,
+ sizeof(cptvf->eng_caps));
+ break;
+ case MBOX_MSG_CPT_LF_RESET:
+ break;
default:
dev_err(&cptvf->pdev->dev, "Unsupported msg %d received.\n",
msg->id);
@@ -205,3 +213,23 @@ int otx2_cptvf_send_kvf_limits_msg(struct otx2_cptvf_dev *cptvf)
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
+
+int otx2_cptvf_send_caps_msg(struct otx2_cptvf_dev *cptvf)
+{
+ struct otx2_mbox *mbox = &cptvf->pfvf_mbox;
+ struct pci_dev *pdev = cptvf->pdev;
+ struct mbox_msghdr *req;
+
+ req = (struct mbox_msghdr *)
+ otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
+ sizeof(struct otx2_cpt_caps_rsp));
+ if (!req) {
+ dev_err(&pdev->dev, "RVU MBOX failed to get message.\n");
+ return -EFAULT;
+ }
+ req->id = MBOX_MSG_GET_CAPS;
+ req->sig = OTX2_MBOX_REQ_SIG;
+ req->pcifunc = OTX2_CPT_RVU_PFFUNC(cptvf->vf_id, 0);
+
+ return otx2_cpt_send_mbox_msg(mbox, pdev);
+}
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
index 811ded72ce..5387c68f3c 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c
@@ -4,9 +4,6 @@
#include "otx2_cptvf.h"
#include "otx2_cpt_common.h"
-/* SG list header size in bytes */
-#define SG_LIST_HDR_SIZE 8
-
/* Default timeout when waiting for free pending entry in us */
#define CPT_PENTRY_TIMEOUT 1000
#define CPT_PENTRY_STEP 50
@@ -26,9 +23,9 @@ static void otx2_cpt_dump_sg_list(struct pci_dev *pdev,
pr_debug("Gather list size %d\n", req->in_cnt);
for (i = 0; i < req->in_cnt; i++) {
- pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%llx\n", i,
req->in[i].size, req->in[i].vptr,
- (void *) req->in[i].dma_addr);
+ req->in[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n",
req->in[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
@@ -36,9 +33,9 @@ static void otx2_cpt_dump_sg_list(struct pci_dev *pdev,
}
pr_debug("Scatter list size %d\n", req->out_cnt);
for (i = 0; i < req->out_cnt; i++) {
- pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%p\n", i,
+ pr_debug("Buffer %d size %d, vptr 0x%p, dmaptr 0x%llx\n", i,
req->out[i].size, req->out[i].vptr,
- (void *) req->out[i].dma_addr);
+ req->out[i].dma_addr);
pr_debug("Buffer hexdump (%d bytes)\n", req->out[i].size);
print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1,
req->out[i].vptr, req->out[i].size, false);
@@ -84,149 +81,6 @@ static inline void free_pentry(struct otx2_cpt_pending_entry *pentry)
pentry->busy = false;
}
-static inline int setup_sgio_components(struct pci_dev *pdev,
- struct otx2_cpt_buf_ptr *list,
- int buf_count, u8 *buffer)
-{
- struct otx2_cpt_sglist_component *sg_ptr = NULL;
- int ret = 0, i, j;
- int components;
-
- if (unlikely(!list)) {
- dev_err(&pdev->dev, "Input list pointer is NULL\n");
- return -EFAULT;
- }
-
- for (i = 0; i < buf_count; i++) {
- if (unlikely(!list[i].vptr))
- continue;
- list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr,
- list[i].size,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) {
- dev_err(&pdev->dev, "Dma mapping failed\n");
- ret = -EIO;
- goto sg_cleanup;
- }
- }
- components = buf_count / 4;
- sg_ptr = (struct otx2_cpt_sglist_component *)buffer;
- for (i = 0; i < components; i++) {
- sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
- sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
- sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
- sg_ptr->len3 = cpu_to_be16(list[i * 4 + 3].size);
- sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
- sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
- sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
- sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr);
- sg_ptr++;
- }
- components = buf_count % 4;
-
- switch (components) {
- case 3:
- sg_ptr->len2 = cpu_to_be16(list[i * 4 + 2].size);
- sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr);
- fallthrough;
- case 2:
- sg_ptr->len1 = cpu_to_be16(list[i * 4 + 1].size);
- sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr);
- fallthrough;
- case 1:
- sg_ptr->len0 = cpu_to_be16(list[i * 4 + 0].size);
- sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr);
- break;
- default:
- break;
- }
- return ret;
-
-sg_cleanup:
- for (j = 0; j < i; j++) {
- if (list[j].dma_addr) {
- dma_unmap_single(&pdev->dev, list[j].dma_addr,
- list[j].size, DMA_BIDIRECTIONAL);
- }
-
- list[j].dma_addr = 0;
- }
- return ret;
-}
-
-static inline struct otx2_cpt_inst_info *info_create(struct pci_dev *pdev,
- struct otx2_cpt_req_info *req,
- gfp_t gfp)
-{
- int align = OTX2_CPT_DMA_MINALIGN;
- struct otx2_cpt_inst_info *info;
- u32 dlen, align_dlen, info_len;
- u16 g_sz_bytes, s_sz_bytes;
- u32 total_mem_len;
-
- if (unlikely(req->in_cnt > OTX2_CPT_MAX_SG_IN_CNT ||
- req->out_cnt > OTX2_CPT_MAX_SG_OUT_CNT)) {
- dev_err(&pdev->dev, "Error too many sg components\n");
- return NULL;
- }
-
- g_sz_bytes = ((req->in_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
- s_sz_bytes = ((req->out_cnt + 3) / 4) *
- sizeof(struct otx2_cpt_sglist_component);
-
- dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE;
- align_dlen = ALIGN(dlen, align);
- info_len = ALIGN(sizeof(*info), align);
- total_mem_len = align_dlen + info_len + sizeof(union otx2_cpt_res_s);
-
- info = kzalloc(total_mem_len, gfp);
- if (unlikely(!info))
- return NULL;
-
- info->dlen = dlen;
- info->in_buffer = (u8 *)info + info_len;
-
- ((u16 *)info->in_buffer)[0] = req->out_cnt;
- ((u16 *)info->in_buffer)[1] = req->in_cnt;
- ((u16 *)info->in_buffer)[2] = 0;
- ((u16 *)info->in_buffer)[3] = 0;
- cpu_to_be64s((u64 *)info->in_buffer);
-
- /* Setup gather (input) components */
- if (setup_sgio_components(pdev, req->in, req->in_cnt,
- &info->in_buffer[8])) {
- dev_err(&pdev->dev, "Failed to setup gather list\n");
- goto destroy_info;
- }
-
- if (setup_sgio_components(pdev, req->out, req->out_cnt,
- &info->in_buffer[8 + g_sz_bytes])) {
- dev_err(&pdev->dev, "Failed to setup scatter list\n");
- goto destroy_info;
- }
-
- info->dma_len = total_mem_len - info_len;
- info->dptr_baddr = dma_map_single(&pdev->dev, info->in_buffer,
- info->dma_len, DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(&pdev->dev, info->dptr_baddr))) {
- dev_err(&pdev->dev, "DMA Mapping failed for cpt req\n");
- goto destroy_info;
- }
- /*
- * Get buffer for union otx2_cpt_res_s response
- * structure and its physical address
- */
- info->completion_addr = info->in_buffer + align_dlen;
- info->comp_baddr = info->dptr_baddr + align_dlen;
-
- return info;
-
-destroy_info:
- otx2_cpt_info_destroy(pdev, info);
- return NULL;
-}
-
static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
struct otx2_cpt_pending_queue *pqueue,
struct otx2_cptlf_info *lf)
@@ -247,7 +101,7 @@ static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
if (unlikely(!otx2_cptlf_started(lf->lfs)))
return -ENODEV;
- info = info_create(pdev, req, gfp);
+ info = lf->lfs->ops->cpt_sg_info_create(pdev, req, gfp);
if (unlikely(!info)) {
dev_err(&pdev->dev, "Setting up cpt inst info failed");
return -ENOMEM;
@@ -303,9 +157,9 @@ static int process_request(struct pci_dev *pdev, struct otx2_cpt_req_info *req,
/* 64-bit swap for microcode data reads, not needed for addresses*/
cpu_to_be64s(&iq_cmd.cmd.u);
- iq_cmd.dptr = info->dptr_baddr;
- iq_cmd.rptr = 0;
- iq_cmd.cptr.u = 0;
+ iq_cmd.dptr = info->dptr_baddr | info->gthr_sz << 60;
+ iq_cmd.rptr = info->rptr_baddr | info->sctr_sz << 60;
+ iq_cmd.cptr.s.cptr = cpt_req->cptr_dma;
iq_cmd.cptr.s.grp = ctrl->s.grp;
/* Fill in the CPT_INST_S type command for HW interpretation */
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index caea98622c..7a3083debc 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1121,19 +1121,6 @@ static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
.decrypt = n2_decrypt_chaining,
},
},
- { .name = "cfb(des)",
- .drv_name = "cfb-des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_DES |
- ENC_TYPE_CHAINING_CFB),
- .skcipher = {
- .min_keysize = DES_KEY_SIZE,
- .max_keysize = DES_KEY_SIZE,
- .setkey = n2_des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
/* 3DES: ECB CBC and CFB are supported */
{ .name = "ecb(des3_ede)",
@@ -1163,19 +1150,7 @@ static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
.decrypt = n2_decrypt_chaining,
},
},
- { .name = "cfb(des3_ede)",
- .drv_name = "cfb-3des",
- .block_size = DES_BLOCK_SIZE,
- .enc_type = (ENC_TYPE_ALG_3DES |
- ENC_TYPE_CHAINING_CFB),
- .skcipher = {
- .min_keysize = 3 * DES_KEY_SIZE,
- .max_keysize = 3 * DES_KEY_SIZE,
- .setkey = n2_3des_setkey,
- .encrypt = n2_encrypt_chaining,
- .decrypt = n2_decrypt_chaining,
- },
- },
+
/* AES: ECB CBC and CTR are supported */
{ .name = "ecb(aes)",
.drv_name = "ecb-aes",
@@ -1382,8 +1357,12 @@ static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
ahash->setkey = n2_hmac_async_setkey;
base = &ahash->halg.base;
- snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
- snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
+ if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
+ p->child_alg) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_p;
+ if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2",
+ p->child_alg) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_p;
base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
base->cra_init = n2_hmac_cra_init;
@@ -1394,6 +1373,7 @@ static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
if (err) {
pr_err("%s alg registration failed\n", base->cra_name);
list_del(&p->derived.entry);
+out_free_p:
kfree(p);
} else {
pr_info("%s alg registered\n", base->cra_name);
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index da95747d97..9393e10671 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -445,8 +445,8 @@ static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
return PTR_ERR(ctx->fallback_tfm);
}
- tfm->reqsize = sizeof(struct rk_cipher_rctx) +
- crypto_skcipher_reqsize(ctx->fallback_tfm);
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct rk_cipher_rctx) +
+ crypto_skcipher_reqsize(ctx->fallback_tfm));
return 0;
}
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index fabe4f381f..3423b5cde1 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -15,6 +15,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/engine.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
@@ -24,105 +25,101 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
-#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
-#define SHA_BUFFER_LEN PAGE_SIZE
-#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
-
-#define SAHARA_NAME "sahara"
-#define SAHARA_VERSION_3 3
-#define SAHARA_VERSION_4 4
-#define SAHARA_TIMEOUT_MS 1000
-#define SAHARA_MAX_HW_DESC 2
-#define SAHARA_MAX_HW_LINK 20
-
-#define FLAGS_MODE_MASK 0x000f
-#define FLAGS_ENCRYPT BIT(0)
-#define FLAGS_CBC BIT(1)
-
-#define SAHARA_HDR_BASE 0x00800000
-#define SAHARA_HDR_SKHA_ALG_AES 0
-#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
-#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
-#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
-#define SAHARA_HDR_FORM_DATA (5 << 16)
-#define SAHARA_HDR_FORM_KEY (8 << 16)
-#define SAHARA_HDR_LLO (1 << 24)
-#define SAHARA_HDR_CHA_SKHA (1 << 28)
-#define SAHARA_HDR_CHA_MDHA (2 << 28)
-#define SAHARA_HDR_PARITY_BIT (1 << 31)
-
-#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
-#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
-#define SAHARA_HDR_MDHA_HASH 0xA0850000
-#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
-#define SAHARA_HDR_MDHA_ALG_SHA1 0
-#define SAHARA_HDR_MDHA_ALG_MD5 1
-#define SAHARA_HDR_MDHA_ALG_SHA256 2
-#define SAHARA_HDR_MDHA_ALG_SHA224 3
-#define SAHARA_HDR_MDHA_PDATA (1 << 2)
-#define SAHARA_HDR_MDHA_HMAC (1 << 3)
-#define SAHARA_HDR_MDHA_INIT (1 << 5)
-#define SAHARA_HDR_MDHA_IPAD (1 << 6)
-#define SAHARA_HDR_MDHA_OPAD (1 << 7)
-#define SAHARA_HDR_MDHA_SWAP (1 << 8)
-#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
-#define SAHARA_HDR_MDHA_SSL (1 << 10)
-
-/* SAHARA can only process one request at a time */
-#define SAHARA_QUEUE_LENGTH 1
-
-#define SAHARA_REG_VERSION 0x00
-#define SAHARA_REG_DAR 0x04
-#define SAHARA_REG_CONTROL 0x08
-#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
-#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
-#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
-#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
-#define SAHARA_REG_CMD 0x0C
-#define SAHARA_CMD_RESET (1 << 0)
-#define SAHARA_CMD_CLEAR_INT (1 << 8)
-#define SAHARA_CMD_CLEAR_ERR (1 << 9)
-#define SAHARA_CMD_SINGLE_STEP (1 << 10)
-#define SAHARA_CMD_MODE_BATCH (1 << 16)
-#define SAHARA_CMD_MODE_DEBUG (1 << 18)
-#define SAHARA_REG_STATUS 0x10
-#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
-#define SAHARA_STATE_IDLE 0
-#define SAHARA_STATE_BUSY 1
-#define SAHARA_STATE_ERR 2
-#define SAHARA_STATE_FAULT 3
-#define SAHARA_STATE_COMPLETE 4
-#define SAHARA_STATE_COMP_FLAG (1 << 2)
-#define SAHARA_STATUS_DAR_FULL (1 << 3)
-#define SAHARA_STATUS_ERROR (1 << 4)
-#define SAHARA_STATUS_SECURE (1 << 5)
-#define SAHARA_STATUS_FAIL (1 << 6)
-#define SAHARA_STATUS_INIT (1 << 7)
-#define SAHARA_STATUS_RNG_RESEED (1 << 8)
-#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
-#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
-#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
-#define SAHARA_STATUS_MODE_BATCH (1 << 16)
-#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
-#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
-#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
-#define SAHARA_REG_ERRSTATUS 0x14
-#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
-#define SAHARA_ERRSOURCE_CHA 14
-#define SAHARA_ERRSOURCE_DMA 15
-#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
-#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
-#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
-#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
-#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
-#define SAHARA_REG_FADDR 0x18
-#define SAHARA_REG_CDAR 0x1C
-#define SAHARA_REG_IDAR 0x20
+#define SHA_BUFFER_LEN PAGE_SIZE
+#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+#define SAHARA_NAME "sahara"
+#define SAHARA_VERSION_3 3
+#define SAHARA_VERSION_4 4
+#define SAHARA_TIMEOUT_MS 1000
+#define SAHARA_MAX_HW_DESC 2
+#define SAHARA_MAX_HW_LINK 20
+
+#define FLAGS_MODE_MASK 0x000f
+#define FLAGS_ENCRYPT BIT(0)
+#define FLAGS_CBC BIT(1)
+
+#define SAHARA_HDR_BASE 0x00800000
+#define SAHARA_HDR_SKHA_ALG_AES 0
+#define SAHARA_HDR_SKHA_MODE_ECB 0
+#define SAHARA_HDR_SKHA_OP_ENC BIT(2)
+#define SAHARA_HDR_SKHA_MODE_CBC BIT(3)
+#define SAHARA_HDR_FORM_DATA (5 << 16)
+#define SAHARA_HDR_FORM_KEY BIT(19)
+#define SAHARA_HDR_LLO BIT(24)
+#define SAHARA_HDR_CHA_SKHA BIT(28)
+#define SAHARA_HDR_CHA_MDHA BIT(29)
+#define SAHARA_HDR_PARITY_BIT BIT(31)
+
+#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
+#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
+#define SAHARA_HDR_MDHA_HASH 0xA0850000
+#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
+#define SAHARA_HDR_MDHA_ALG_SHA1 0
+#define SAHARA_HDR_MDHA_ALG_MD5 1
+#define SAHARA_HDR_MDHA_ALG_SHA256 2
+#define SAHARA_HDR_MDHA_ALG_SHA224 3
+#define SAHARA_HDR_MDHA_PDATA BIT(2)
+#define SAHARA_HDR_MDHA_HMAC BIT(3)
+#define SAHARA_HDR_MDHA_INIT BIT(5)
+#define SAHARA_HDR_MDHA_IPAD BIT(6)
+#define SAHARA_HDR_MDHA_OPAD BIT(7)
+#define SAHARA_HDR_MDHA_SWAP BIT(8)
+#define SAHARA_HDR_MDHA_MAC_FULL BIT(9)
+#define SAHARA_HDR_MDHA_SSL BIT(10)
+
+#define SAHARA_REG_VERSION 0x00
+#define SAHARA_REG_DAR 0x04
+#define SAHARA_REG_CONTROL 0x08
+#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
+#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
+#define SAHARA_CONTROL_RNG_AUTORSD BIT(7)
+#define SAHARA_CONTROL_ENABLE_INT BIT(4)
+#define SAHARA_REG_CMD 0x0C
+#define SAHARA_CMD_RESET BIT(0)
+#define SAHARA_CMD_CLEAR_INT BIT(8)
+#define SAHARA_CMD_CLEAR_ERR BIT(9)
+#define SAHARA_CMD_SINGLE_STEP BIT(10)
+#define SAHARA_CMD_MODE_BATCH BIT(16)
+#define SAHARA_CMD_MODE_DEBUG BIT(18)
+#define SAHARA_REG_STATUS 0x10
+#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
+#define SAHARA_STATE_IDLE 0
+#define SAHARA_STATE_BUSY 1
+#define SAHARA_STATE_ERR 2
+#define SAHARA_STATE_FAULT 3
+#define SAHARA_STATE_COMPLETE 4
+#define SAHARA_STATE_COMP_FLAG BIT(2)
+#define SAHARA_STATUS_DAR_FULL BIT(3)
+#define SAHARA_STATUS_ERROR BIT(4)
+#define SAHARA_STATUS_SECURE BIT(5)
+#define SAHARA_STATUS_FAIL BIT(6)
+#define SAHARA_STATUS_INIT BIT(7)
+#define SAHARA_STATUS_RNG_RESEED BIT(8)
+#define SAHARA_STATUS_ACTIVE_RNG BIT(9)
+#define SAHARA_STATUS_ACTIVE_MDHA BIT(10)
+#define SAHARA_STATUS_ACTIVE_SKHA BIT(11)
+#define SAHARA_STATUS_MODE_BATCH BIT(16)
+#define SAHARA_STATUS_MODE_DEDICATED BIT(17)
+#define SAHARA_STATUS_MODE_DEBUG BIT(18)
+#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
+#define SAHARA_REG_ERRSTATUS 0x14
+#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
+#define SAHARA_ERRSOURCE_CHA 14
+#define SAHARA_ERRSOURCE_DMA 15
+#define SAHARA_ERRSTATUS_DMA_DIR BIT(8)
+#define SAHARA_ERRSTATUS_GET_DMASZ(x) (((x) >> 9) & 0x3)
+#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
+#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
+#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
+#define SAHARA_REG_FADDR 0x18
+#define SAHARA_REG_CDAR 0x1C
+#define SAHARA_REG_IDAR 0x20
struct sahara_hw_desc {
u32 hdr;
@@ -168,7 +165,6 @@ struct sahara_aes_reqctx {
* @total: total number of bytes for transfer
* @last: is this the last block
* @first: is this the first block
- * @active: inside a transfer
*/
struct sahara_sha_reqctx {
u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
@@ -184,7 +180,6 @@ struct sahara_sha_reqctx {
size_t total;
unsigned int last;
unsigned int first;
- unsigned int active;
};
struct sahara_dev {
@@ -193,12 +188,9 @@ struct sahara_dev {
void __iomem *regs_base;
struct clk *clk_ipg;
struct clk *clk_ahb;
- spinlock_t queue_spinlock;
- struct task_struct *kthread;
struct completion dma_completion;
struct sahara_ctx *ctx;
- struct crypto_queue queue;
unsigned long flags;
struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
@@ -222,7 +214,7 @@ struct sahara_dev {
struct scatterlist *out_sg;
int nb_out_sg;
- u32 error;
+ struct crypto_engine *engine;
};
static struct sahara_dev *dev_ptr;
@@ -675,7 +667,6 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
struct sahara_ctx *ctx = crypto_skcipher_ctx(
crypto_skcipher_reqtfm(req));
struct sahara_dev *dev = dev_ptr;
- int err = 0;
if (!req->cryptlen)
return 0;
@@ -686,21 +677,12 @@ static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
- if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
- dev_err(dev->device,
- "request size is not exact amount of AES blocks\n");
+ if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
return -EINVAL;
- }
rctx->mode = mode;
- spin_lock_bh(&dev->queue_spinlock);
- err = crypto_enqueue_request(&dev->queue, &req->base);
- spin_unlock_bh(&dev->queue_spinlock);
-
- wake_up_process(dev->kthread);
-
- return err;
+ return crypto_transfer_skcipher_request_to_engine(dev->engine, req);
}
static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
@@ -1001,45 +983,26 @@ static int sahara_sha_process(struct ahash_request *req)
return 0;
}
-static int sahara_queue_manage(void *data)
+static int sahara_do_one_request(struct crypto_engine *engine, void *areq)
{
- struct sahara_dev *dev = data;
- struct crypto_async_request *async_req;
- struct crypto_async_request *backlog;
- int ret = 0;
-
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
-
- spin_lock_bh(&dev->queue_spinlock);
- backlog = crypto_get_backlog(&dev->queue);
- async_req = crypto_dequeue_request(&dev->queue);
- spin_unlock_bh(&dev->queue_spinlock);
-
- if (backlog)
- crypto_request_complete(backlog, -EINPROGRESS);
-
- if (async_req) {
- if (crypto_tfm_alg_type(async_req->tfm) ==
- CRYPTO_ALG_TYPE_AHASH) {
- struct ahash_request *req =
- ahash_request_cast(async_req);
-
- ret = sahara_sha_process(req);
- } else {
- struct skcipher_request *req =
- skcipher_request_cast(async_req);
-
- ret = sahara_aes_process(req);
- }
+ struct crypto_async_request *async_req = areq;
+ int err;
- crypto_request_complete(async_req, ret);
+ if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_AHASH) {
+ struct ahash_request *req = ahash_request_cast(async_req);
- continue;
- }
+ err = sahara_sha_process(req);
+ local_bh_disable();
+ crypto_finalize_hash_request(engine, req, err);
+ local_bh_enable();
+ } else {
+ struct skcipher_request *req = skcipher_request_cast(async_req);
- schedule();
- } while (!kthread_should_stop());
+ err = sahara_aes_process(skcipher_request_cast(async_req));
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, req, err);
+ local_bh_enable();
+ }
return 0;
}
@@ -1048,25 +1011,13 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
{
struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
struct sahara_dev *dev = dev_ptr;
- int ret;
if (!req->nbytes && !last)
return 0;
rctx->last = last;
- if (!rctx->active) {
- rctx->active = 1;
- rctx->first = 1;
- }
-
- spin_lock_bh(&dev->queue_spinlock);
- ret = crypto_enqueue_request(&dev->queue, &req->base);
- spin_unlock_bh(&dev->queue_spinlock);
-
- wake_up_process(dev->kthread);
-
- return ret;
+ return crypto_transfer_hash_request_to_engine(dev->engine, req);
}
static int sahara_sha_init(struct ahash_request *req)
@@ -1090,7 +1041,7 @@ static int sahara_sha_init(struct ahash_request *req)
}
rctx->context_size = rctx->digest_size + 4;
- rctx->active = 0;
+ rctx->first = 1;
return 0;
}
@@ -1144,94 +1095,114 @@ static int sahara_sha_cra_init(struct crypto_tfm *tfm)
return 0;
}
-static struct skcipher_alg aes_algs[] = {
+static struct skcipher_engine_alg aes_algs[] = {
{
- .base.cra_name = "ecb(aes)",
- .base.cra_driver_name = "sahara-ecb-aes",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct sahara_ctx),
- .base.cra_alignmask = 0x0,
- .base.cra_module = THIS_MODULE,
-
- .init = sahara_aes_init_tfm,
- .exit = sahara_aes_exit_tfm,
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_ecb_encrypt,
- .decrypt = sahara_aes_ecb_decrypt,
+ .base = {
+ .base.cra_name = "ecb(aes)",
+ .base.cra_driver_name = "sahara-ecb-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_ecb_encrypt,
+ .decrypt = sahara_aes_ecb_decrypt,
+ },
+ .op = {
+ .do_one_request = sahara_do_one_request,
+ },
}, {
- .base.cra_name = "cbc(aes)",
- .base.cra_driver_name = "sahara-cbc-aes",
- .base.cra_priority = 300,
- .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
- .base.cra_blocksize = AES_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct sahara_ctx),
- .base.cra_alignmask = 0x0,
- .base.cra_module = THIS_MODULE,
-
- .init = sahara_aes_init_tfm,
- .exit = sahara_aes_exit_tfm,
- .min_keysize = AES_MIN_KEY_SIZE ,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = sahara_aes_setkey,
- .encrypt = sahara_aes_cbc_encrypt,
- .decrypt = sahara_aes_cbc_decrypt,
+ .base = {
+ .base.cra_name = "cbc(aes)",
+ .base.cra_driver_name = "sahara-cbc-aes",
+ .base.cra_priority = 300,
+ .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
+ .base.cra_blocksize = AES_BLOCK_SIZE,
+ .base.cra_ctxsize = sizeof(struct sahara_ctx),
+ .base.cra_alignmask = 0x0,
+ .base.cra_module = THIS_MODULE,
+
+ .init = sahara_aes_init_tfm,
+ .exit = sahara_aes_exit_tfm,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = sahara_aes_setkey,
+ .encrypt = sahara_aes_cbc_encrypt,
+ .decrypt = sahara_aes_cbc_decrypt,
+ },
+ .op = {
+ .do_one_request = sahara_do_one_request,
+ },
}
};
-static struct ahash_alg sha_v3_algs[] = {
+static struct ahash_engine_alg sha_v3_algs[] = {
{
- .init = sahara_sha_init,
- .update = sahara_sha_update,
- .final = sahara_sha_final,
- .finup = sahara_sha_finup,
- .digest = sahara_sha_digest,
- .export = sahara_sha_export,
- .import = sahara_sha_import,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.statesize = sizeof(struct sahara_sha_reqctx),
- .halg.base = {
- .cra_name = "sha1",
- .cra_driver_name = "sahara-sha1",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_sha_cra_init,
- }
+ .base = {
+ .init = sahara_sha_init,
+ .update = sahara_sha_update,
+ .final = sahara_sha_final,
+ .finup = sahara_sha_finup,
+ .digest = sahara_sha_digest,
+ .export = sahara_sha_export,
+ .import = sahara_sha_import,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sahara_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sahara-sha1",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_sha_cra_init,
+ }
+ },
+ .op = {
+ .do_one_request = sahara_do_one_request,
+ },
},
};
-static struct ahash_alg sha_v4_algs[] = {
+static struct ahash_engine_alg sha_v4_algs[] = {
{
- .init = sahara_sha_init,
- .update = sahara_sha_update,
- .final = sahara_sha_final,
- .finup = sahara_sha_finup,
- .digest = sahara_sha_digest,
- .export = sahara_sha_export,
- .import = sahara_sha_import,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.statesize = sizeof(struct sahara_sha_reqctx),
- .halg.base = {
- .cra_name = "sha256",
- .cra_driver_name = "sahara-sha256",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct sahara_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = sahara_sha_cra_init,
- }
+ .base = {
+ .init = sahara_sha_init,
+ .update = sahara_sha_update,
+ .final = sahara_sha_final,
+ .finup = sahara_sha_finup,
+ .digest = sahara_sha_digest,
+ .export = sahara_sha_export,
+ .import = sahara_sha_import,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.statesize = sizeof(struct sahara_sha_reqctx),
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sahara-sha256",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct sahara_ctx),
+ .cra_alignmask = 0,
+ .cra_module = THIS_MODULE,
+ .cra_init = sahara_sha_cra_init,
+ }
+ },
+ .op = {
+ .do_one_request = sahara_do_one_request,
+ },
},
};
@@ -1246,14 +1217,11 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
sahara_decode_status(dev, stat);
- if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
+ if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY)
return IRQ_NONE;
- } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
- dev->error = 0;
- } else {
+
+ if (SAHARA_STATUS_GET_STATE(stat) != SAHARA_STATE_COMPLETE)
sahara_decode_error(dev, err);
- dev->error = -EINVAL;
- }
complete(&dev->dma_completion);
@@ -1264,57 +1232,42 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
static int sahara_register_algs(struct sahara_dev *dev)
{
int err;
- unsigned int i, j, k, l;
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
- err = crypto_register_skcipher(&aes_algs[i]);
- if (err)
- goto err_aes_algs;
- }
+ err = crypto_engine_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
+ if (err)
+ return err;
- for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
- err = crypto_register_ahash(&sha_v3_algs[k]);
+ err = crypto_engine_register_ahashes(sha_v3_algs,
+ ARRAY_SIZE(sha_v3_algs));
+ if (err)
+ goto err_aes_algs;
+
+ if (dev->version > SAHARA_VERSION_3) {
+ err = crypto_engine_register_ahashes(sha_v4_algs,
+ ARRAY_SIZE(sha_v4_algs));
if (err)
goto err_sha_v3_algs;
}
- if (dev->version > SAHARA_VERSION_3)
- for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
- err = crypto_register_ahash(&sha_v4_algs[l]);
- if (err)
- goto err_sha_v4_algs;
- }
-
return 0;
-err_sha_v4_algs:
- for (j = 0; j < l; j++)
- crypto_unregister_ahash(&sha_v4_algs[j]);
-
err_sha_v3_algs:
- for (j = 0; j < k; j++)
- crypto_unregister_ahash(&sha_v3_algs[j]);
+ crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
err_aes_algs:
- for (j = 0; j < i; j++)
- crypto_unregister_skcipher(&aes_algs[j]);
+ crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
return err;
}
static void sahara_unregister_algs(struct sahara_dev *dev)
{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
- crypto_unregister_skcipher(&aes_algs[i]);
-
- for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
- crypto_unregister_ahash(&sha_v3_algs[i]);
+ crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
+ crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
if (dev->version > SAHARA_VERSION_3)
- for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
- crypto_unregister_ahash(&sha_v4_algs[i]);
+ crypto_engine_unregister_ahashes(sha_v4_algs,
+ ARRAY_SIZE(sha_v4_algs));
}
static const struct of_device_id sahara_dt_ids[] = {
@@ -1351,32 +1304,27 @@ static int sahara_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
0, dev_name(&pdev->dev), dev);
- if (err) {
- dev_err(&pdev->dev, "failed to request irq\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err,
+ "failed to request irq\n");
/* clocks */
- dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(dev->clk_ipg)) {
- dev_err(&pdev->dev, "Could not get ipg clock\n");
- return PTR_ERR(dev->clk_ipg);
- }
+ dev->clk_ipg = devm_clk_get_enabled(&pdev->dev, "ipg");
+ if (IS_ERR(dev->clk_ipg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ipg),
+ "Could not get ipg clock\n");
- dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(dev->clk_ahb)) {
- dev_err(&pdev->dev, "Could not get ahb clock\n");
- return PTR_ERR(dev->clk_ahb);
- }
+ dev->clk_ahb = devm_clk_get_enabled(&pdev->dev, "ahb");
+ if (IS_ERR(dev->clk_ahb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ahb),
+ "Could not get ahb clock\n");
/* Allocate HW descriptors */
dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
&dev->hw_phys_desc[0], GFP_KERNEL);
- if (!dev->hw_desc[0]) {
- dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
+ if (!dev->hw_desc[0])
return -ENOMEM;
- }
dev->hw_desc[1] = dev->hw_desc[0] + 1;
dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
sizeof(struct sahara_hw_desc);
@@ -1384,10 +1332,8 @@ static int sahara_probe(struct platform_device *pdev)
/* Allocate space for iv and key */
dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
&dev->key_phys_base, GFP_KERNEL);
- if (!dev->key_base) {
- dev_err(&pdev->dev, "Could not allocate memory for key\n");
+ if (!dev->key_base)
return -ENOMEM;
- }
dev->iv_base = dev->key_base + AES_KEYSIZE_128;
dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
@@ -1395,45 +1341,36 @@ static int sahara_probe(struct platform_device *pdev)
dev->context_base = dmam_alloc_coherent(&pdev->dev,
SHA256_DIGEST_SIZE + 4,
&dev->context_phys_base, GFP_KERNEL);
- if (!dev->context_base) {
- dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
+ if (!dev->context_base)
return -ENOMEM;
- }
/* Allocate space for HW links */
dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
&dev->hw_phys_link[0], GFP_KERNEL);
- if (!dev->hw_link[0]) {
- dev_err(&pdev->dev, "Could not allocate hw links\n");
+ if (!dev->hw_link[0])
return -ENOMEM;
- }
for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
sizeof(struct sahara_hw_link);
dev->hw_link[i] = dev->hw_link[i - 1] + 1;
}
- crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
-
- spin_lock_init(&dev->queue_spinlock);
-
dev_ptr = dev;
- dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
- if (IS_ERR(dev->kthread)) {
- return PTR_ERR(dev->kthread);
+ dev->engine = crypto_engine_alloc_init(&pdev->dev, true);
+ if (!dev->engine)
+ return -ENOMEM;
+
+ err = crypto_engine_start(dev->engine);
+ if (err) {
+ crypto_engine_exit(dev->engine);
+ return dev_err_probe(&pdev->dev, err,
+ "Could not start crypto engine\n");
}
init_completion(&dev->dma_completion);
- err = clk_prepare_enable(dev->clk_ipg);
- if (err)
- return err;
- err = clk_prepare_enable(dev->clk_ahb);
- if (err)
- goto clk_ipg_disable;
-
version = sahara_read(dev, SAHARA_REG_VERSION);
if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
if (version != SAHARA_VERSION_3)
@@ -1445,8 +1382,8 @@ static int sahara_probe(struct platform_device *pdev)
version = (version >> 8) & 0xff;
}
if (err == -ENODEV) {
- dev_err(&pdev->dev, "SAHARA version %d not supported\n",
- version);
+ dev_err_probe(&pdev->dev, err,
+ "SAHARA version %d not supported\n", version);
goto err_algs;
}
@@ -1469,11 +1406,7 @@ static int sahara_probe(struct platform_device *pdev)
return 0;
err_algs:
- kthread_stop(dev->kthread);
- dev_ptr = NULL;
- clk_disable_unprepare(dev->clk_ahb);
-clk_ipg_disable:
- clk_disable_unprepare(dev->clk_ipg);
+ crypto_engine_exit(dev->engine);
return err;
}
@@ -1482,14 +1415,8 @@ static void sahara_remove(struct platform_device *pdev)
{
struct sahara_dev *dev = platform_get_drvdata(pdev);
- kthread_stop(dev->kthread);
-
+ crypto_engine_exit(dev->engine);
sahara_unregister_algs(dev);
-
- clk_disable_unprepare(dev->clk_ipg);
- clk_disable_unprepare(dev->clk_ahb);
-
- dev_ptr = NULL;
}
static struct platform_driver sahara_driver = {
diff --git a/drivers/crypto/starfive/Kconfig b/drivers/crypto/starfive/Kconfig
index 2cb192502c..cb59357b58 100644
--- a/drivers/crypto/starfive/Kconfig
+++ b/drivers/crypto/starfive/Kconfig
@@ -4,7 +4,7 @@
config CRYPTO_DEV_JH7110
tristate "StarFive JH7110 cryptographic engine driver"
- depends on SOC_STARFIVE || AMBA_PL08X || COMPILE_TEST
+ depends on (SOC_STARFIVE && AMBA_PL08X) || COMPILE_TEST
depends on HAS_DMA
select CRYPTO_ENGINE
select CRYPTO_HMAC
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
index 9378e6682f..1ac15cc4ef 100644
--- a/drivers/crypto/starfive/jh7110-aes.c
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -262,12 +262,7 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
rctx->csr.aes.mode = hw_mode;
rctx->csr.aes.cmode = !is_encrypt(cryp);
rctx->csr.aes.ie = 1;
-
- if (hw_mode == STARFIVE_AES_MODE_CFB ||
- hw_mode == STARFIVE_AES_MODE_OFB)
- rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_128;
- else
- rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1;
+ rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1;
if (cryp->side_chan) {
rctx->csr.aes.delay_aes = 1;
@@ -294,8 +289,6 @@ static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx)
starfive_aes_ccm_init(ctx);
starfive_aes_aead_hw_start(ctx, hw_mode);
break;
- case STARFIVE_AES_MODE_OFB:
- case STARFIVE_AES_MODE_CFB:
case STARFIVE_AES_MODE_CBC:
case STARFIVE_AES_MODE_CTR:
starfive_aes_write_iv(ctx, (void *)cryp->req.sreq->iv);
@@ -500,7 +493,7 @@ static int starfive_aes_prepare_req(struct skcipher_request *req,
scatterwalk_start(&cryp->out_walk, rctx->out_sg);
if (cryp->assoclen) {
- rctx->adata = kzalloc(ALIGN(cryp->assoclen, AES_BLOCK_SIZE), GFP_KERNEL);
+ rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
if (!rctx->adata)
return dev_err_probe(cryp->dev, -ENOMEM,
"Failed to alloc memory for adata");
@@ -569,7 +562,7 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
struct starfive_cryp_ctx *ctx =
crypto_aead_ctx(crypto_aead_reqtfm(req));
struct starfive_cryp_dev *cryp = ctx->cryp;
- struct starfive_cryp_request_ctx *rctx = ctx->rctx;
+ struct starfive_cryp_request_ctx *rctx;
u32 block[AES_BLOCK_32];
u32 stat;
int err;
@@ -579,6 +572,8 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
if (err)
return err;
+ rctx = ctx->rctx;
+
if (!cryp->assoclen)
goto write_text;
@@ -783,26 +778,6 @@ static int starfive_aes_cbc_decrypt(struct skcipher_request *req)
return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC);
}
-static int starfive_aes_cfb_encrypt(struct skcipher_request *req)
-{
- return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB | FLG_ENCRYPT);
-}
-
-static int starfive_aes_cfb_decrypt(struct skcipher_request *req)
-{
- return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB);
-}
-
-static int starfive_aes_ofb_encrypt(struct skcipher_request *req)
-{
- return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB | FLG_ENCRYPT);
-}
-
-static int starfive_aes_ofb_decrypt(struct skcipher_request *req)
-{
- return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB);
-}
-
static int starfive_aes_ctr_encrypt(struct skcipher_request *req)
{
return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR | FLG_ENCRYPT);
@@ -908,48 +883,6 @@ static struct skcipher_engine_alg skcipher_algs[] = {
.op = {
.do_one_request = starfive_aes_do_one_req,
},
-}, {
- .base.init = starfive_aes_init_tfm,
- .base.setkey = starfive_aes_setkey,
- .base.encrypt = starfive_aes_cfb_encrypt,
- .base.decrypt = starfive_aes_cfb_decrypt,
- .base.min_keysize = AES_MIN_KEY_SIZE,
- .base.max_keysize = AES_MAX_KEY_SIZE,
- .base.ivsize = AES_BLOCK_SIZE,
- .base.base = {
- .cra_name = "cfb(aes)",
- .cra_driver_name = "starfive-cfb-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
- },
- .op = {
- .do_one_request = starfive_aes_do_one_req,
- },
-}, {
- .base.init = starfive_aes_init_tfm,
- .base.setkey = starfive_aes_setkey,
- .base.encrypt = starfive_aes_ofb_encrypt,
- .base.decrypt = starfive_aes_ofb_decrypt,
- .base.min_keysize = AES_MIN_KEY_SIZE,
- .base.max_keysize = AES_MAX_KEY_SIZE,
- .base.ivsize = AES_BLOCK_SIZE,
- .base.base = {
- .cra_name = "ofb(aes)",
- .cra_driver_name = "starfive-ofb-aes",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 0xf,
- .cra_module = THIS_MODULE,
- },
- .op = {
- .do_one_request = starfive_aes_do_one_req,
- },
},
};
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index 4f5b681820..425fddf3a8 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -109,12 +109,6 @@ static irqreturn_t starfive_cryp_irq(int irq, void *priv)
tasklet_schedule(&cryp->hash_done);
}
- if (status & STARFIVE_IE_FLAG_PKA_DONE) {
- mask |= STARFIVE_IE_MASK_PKA_DONE;
- writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET);
- complete(&cryp->pka_done);
- }
-
return IRQ_HANDLED;
}
@@ -159,8 +153,6 @@ static int starfive_cryp_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(cryp->rst),
"Error getting hardware reset line\n");
- init_completion(&cryp->pka_done);
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
diff --git a/drivers/crypto/starfive/jh7110-cryp.h b/drivers/crypto/starfive/jh7110-cryp.h
index fe011d5047..6cdf6db5d9 100644
--- a/drivers/crypto/starfive/jh7110-cryp.h
+++ b/drivers/crypto/starfive/jh7110-cryp.h
@@ -50,8 +50,6 @@ union starfive_aes_csr {
u32 ccm_start :1;
#define STARFIVE_AES_MODE_ECB 0x0
#define STARFIVE_AES_MODE_CBC 0x1
-#define STARFIVE_AES_MODE_CFB 0x2
-#define STARFIVE_AES_MODE_OFB 0x3
#define STARFIVE_AES_MODE_CTR 0x4
#define STARFIVE_AES_MODE_CCM 0x5
#define STARFIVE_AES_MODE_GCM 0x6
@@ -125,6 +123,15 @@ union starfive_pka_cacr {
};
};
+union starfive_pka_casr {
+ u32 v;
+ struct {
+#define STARFIVE_PKA_DONE BIT(0)
+ u32 done :1;
+ u32 rsvd_0 :31;
+ };
+};
+
struct starfive_rsa_key {
u8 *n;
u8 *e;
@@ -183,7 +190,6 @@ struct starfive_cryp_dev {
struct crypto_engine *engine;
struct tasklet_struct aes_done;
struct tasklet_struct hash_done;
- struct completion pka_done;
size_t assoclen;
size_t total_in;
size_t total_out;
diff --git a/drivers/crypto/starfive/jh7110-rsa.c b/drivers/crypto/starfive/jh7110-rsa.c
index f31bbd825f..cf8bda7f08 100644
--- a/drivers/crypto/starfive/jh7110-rsa.c
+++ b/drivers/crypto/starfive/jh7110-rsa.c
@@ -6,13 +6,7 @@
*/
#include <linux/crypto.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-direct.h>
-#include <linux/interrupt.h>
#include <linux/iopoll.h>
-#include <linux/io.h>
-#include <linux/mod_devicetable.h>
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
#include <crypto/internal/akcipher.h>
@@ -28,13 +22,13 @@
#define STARFIVE_PKA_CAER_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x108)
#define STARFIVE_PKA_CANR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x208)
-// R^2 mod N and N0'
+/* R ^ 2 mod N and N0' */
#define CRYPTO_CMD_PRE 0x0
-// A * R mod N ==> A
+/* A * R mod N ==> A */
#define CRYPTO_CMD_ARN 0x5
-// A * E * R mod N ==> A
+/* A * E * R mod N ==> A */
#define CRYPTO_CMD_AERN 0x6
-// A * A * R mod N ==> A
+/* A * A * R mod N ==> A */
#define CRYPTO_CMD_AARN 0x7
#define STARFIVE_RSA_MAX_KEYSZ 256
@@ -43,31 +37,17 @@
static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx)
{
struct starfive_cryp_dev *cryp = ctx->cryp;
+ u32 status;
- return wait_for_completion_timeout(&cryp->pka_done,
- usecs_to_jiffies(100000));
-}
-
-static inline void starfive_pka_irq_mask_clear(struct starfive_cryp_ctx *ctx)
-{
- struct starfive_cryp_dev *cryp = ctx->cryp;
- u32 stat;
-
- stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET);
- stat &= ~STARFIVE_IE_MASK_PKA_DONE;
- writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET);
-
- reinit_completion(&cryp->pka_done);
+ return readl_relaxed_poll_timeout(cryp->base + STARFIVE_PKA_CASR_OFFSET, status,
+ status & STARFIVE_PKA_DONE, 10, 100000);
}
static void starfive_rsa_free_key(struct starfive_rsa_key *key)
{
- if (key->d)
- kfree_sensitive(key->d);
- if (key->e)
- kfree_sensitive(key->e);
- if (key->n)
- kfree_sensitive(key->n);
+ kfree_sensitive(key->d);
+ kfree_sensitive(key->e);
+ kfree_sensitive(key->n);
memset(key, 0, sizeof(*key));
}
@@ -114,10 +94,9 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
rctx->csr.pka.not_r2 = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
return -ETIMEDOUT;
for (loop = 0; loop <= opsize; loop++)
@@ -136,10 +115,9 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
rctx->csr.pka.start = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
return -ETIMEDOUT;
} else {
rctx->csr.pka.v = 0;
@@ -151,10 +129,9 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
rctx->csr.pka.pre_expf = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
return -ETIMEDOUT;
for (loop = 0; loop <= count; loop++)
@@ -172,10 +149,9 @@ static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx,
rctx->csr.pka.start = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
return -ETIMEDOUT;
}
@@ -226,11 +202,10 @@ static int starfive_rsa_cpu_start(struct starfive_cryp_ctx *ctx, u32 *result,
rctx->csr.pka.start = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
ret = -ETIMEDOUT;
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
goto rsa_err;
if (mlen) {
@@ -242,10 +217,9 @@ static int starfive_rsa_cpu_start(struct starfive_cryp_ctx *ctx, u32 *result,
rctx->csr.pka.start = 1;
rctx->csr.pka.ie = 1;
- starfive_pka_irq_mask_clear(ctx);
writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET);
- if (!starfive_pka_wait_done(ctx))
+ if (starfive_pka_wait_done(ctx))
goto rsa_err;
}
}
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index c3cbc26733..11ad4ffdce 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -838,7 +838,7 @@ static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq);
static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm)
{
- tfm->reqsize = sizeof(struct stm32_cryp_reqctx);
+ crypto_aead_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx));
return 0;
}