summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/crypto/Kconfig1
-rw-r--r--drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c7
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c11
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c11
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c6
-rw-r--r--drivers/crypto/amlogic/amlogic-gxl-core.c5
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c6
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c16
-rw-r--r--drivers/crypto/atmel-aes.c6
-rw-r--r--drivers/crypto/atmel-sha.c8
-rw-r--r--drivers/crypto/atmel-tdes.c6
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c8
-rw-r--r--drivers/crypto/bcm/cipher.c5
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c7
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/caam/jr.c22
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c2
-rw-r--r--drivers/crypto/ccp/dbc.c72
-rw-r--r--drivers/crypto/ccp/dbc.h29
-rw-r--r--drivers/crypto/ccp/psp-dev.c122
-rw-r--r--drivers/crypto/ccp/psp-dev.h55
-rw-r--r--drivers/crypto/ccp/sev-dev.c24
-rw-r--r--drivers/crypto/ccp/sp-dev.h4
-rw-r--r--drivers/crypto/ccp/sp-pci.c22
-rw-r--r--drivers/crypto/ccp/sp-platform.c6
-rw-r--r--drivers/crypto/ccp/tee-dev.c48
-rw-r--r--drivers/crypto/ccp/tee-dev.h15
-rw-r--r--drivers/crypto/ccree/cc_driver.c6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c9
-rw-r--r--drivers/crypto/exynos-rng.c6
-rw-r--r--drivers/crypto/gemini/sl3516-ce-core.c6
-rw-r--r--drivers/crypto/hifn_795x.c11
-rw-r--r--drivers/crypto/hisilicon/debugfs.c75
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c25
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c14
-rw-r--r--drivers/crypto/hisilicon/qm.c383
-rw-r--r--drivers/crypto/hisilicon/qm_common.h5
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c6
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c29
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c24
-rw-r--r--drivers/crypto/hisilicon/trng/trng.c6
-rw-r--r--drivers/crypto/hisilicon/zip/zip_crypto.c290
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c18
-rw-r--r--drivers/crypto/img-hash.c6
-rw-r--r--drivers/crypto/inside-secure/safexcel.c6
-rw-r--r--drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c6
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-aes-core.c6
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-ecc.c6
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c15
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c36
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h30
-rw-r--r--drivers/crypto/intel/qat/qat_4xxx/adf_drv.c2
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c62x/adf_drv.c1
-rw-r--r--drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/Makefile10
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_accel_devices.h59
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.c119
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_admin.h27
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.c20
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cfg_services.h14
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_clock.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c300
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_common_drv.h18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_dbgfs.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_fw_counters.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c27
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h50
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c266
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c1566
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h825
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c1
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_init.c43
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c48
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h12
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.c1168
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl.h177
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl_admin.c97
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_rl_admin.h18
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs.c88
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c117
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h27
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c459
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h11
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h81
-rw-r--r--drivers/crypto/intel/qat/qat_common/icp_qat_hw.h2
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_comp_algs.c129
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c2
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c1
-rw-r--r--drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c1
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c3
-rw-r--r--drivers/crypto/mxs-dcp.c8
-rw-r--r--drivers/crypto/n2_core.c12
-rw-r--r--drivers/crypto/omap-aes.c6
-rw-r--r--drivers/crypto/omap-des.c6
-rw-r--r--drivers/crypto/omap-sham.c22
-rw-r--r--drivers/crypto/qce/core.c5
-rw-r--r--drivers/crypto/qcom-rng.c71
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c5
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c3
-rw-r--r--drivers/crypto/s5p-sss.c12
-rw-r--r--drivers/crypto/sa2ul.c6
-rw-r--r--drivers/crypto/sahara.c6
-rw-r--r--drivers/crypto/starfive/jh7110-cryp.c2
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c13
-rw-r--r--drivers/crypto/stm32/stm32-crc32.c19
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c16
-rw-r--r--drivers/crypto/stm32/stm32-hash.c20
-rw-r--r--drivers/crypto/talitos.c23
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c5
-rw-r--r--drivers/crypto/vmx/aesp8-ppc.pl141
-rw-r--r--drivers/crypto/xilinx/zynqmp-aes-gcm.c6
-rw-r--r--drivers/crypto/xilinx/zynqmp-sha.c7
124 files changed, 6626 insertions, 1198 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index c761952f0d..79c3bb9c99 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -601,6 +601,7 @@ config CRYPTO_DEV_QCE_SW_MAX_LEN
config CRYPTO_DEV_QCOM_RNG
tristate "Qualcomm Random Number Generator Driver"
depends on ARCH_QCOM || COMPILE_TEST
+ depends on HW_RANDOM
select CRYPTO_RNG
help
This driver provides support for the Random Number
diff --git a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
index 3bcfcfc370..890664bd5f 100644
--- a/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
+++ b/drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
@@ -49,7 +49,6 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-sun4i-ss",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
@@ -76,7 +75,6 @@ static struct sun4i_ss_alg_template ss_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sha1-sun4i-ss",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sun4i_req_ctx),
.cra_module = THIS_MODULE,
@@ -509,7 +507,7 @@ error_pm:
return err;
}
-static int sun4i_ss_remove(struct platform_device *pdev)
+static void sun4i_ss_remove(struct platform_device *pdev)
{
int i;
struct sun4i_ss_ctx *ss = platform_get_drvdata(pdev);
@@ -529,7 +527,6 @@ static int sun4i_ss_remove(struct platform_device *pdev)
}
sun4i_ss_pm_exit(ss);
- return 0;
}
static const struct of_device_id a20ss_crypto_of_match_table[] = {
@@ -545,7 +542,7 @@ MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table);
static struct platform_driver sun4i_ss_driver = {
.probe = sun4i_ss_probe,
- .remove = sun4i_ss_remove,
+ .remove_new = sun4i_ss_remove,
.driver = {
.name = "sun4i-ss",
.pm = &sun4i_ss_pm_ops,
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index d4ccd52542..0408b2d5d5 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -414,7 +414,6 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-sun8i-ce",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -448,7 +447,6 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sha1-sun8i-ce",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -481,7 +479,6 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_name = "sha224",
.cra_driver_name = "sha224-sun8i-ce",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -514,7 +511,6 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sha256-sun8i-ce",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -547,7 +543,6 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_name = "sha384",
.cra_driver_name = "sha384-sun8i-ce",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -580,7 +575,6 @@ static struct sun8i_ce_alg_template ce_algs[] = {
.cra_name = "sha512",
.cra_driver_name = "sha512-sun8i-ce",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -1071,7 +1065,7 @@ error_pm:
return err;
}
-static int sun8i_ce_remove(struct platform_device *pdev)
+static void sun8i_ce_remove(struct platform_device *pdev)
{
struct sun8i_ce_dev *ce = platform_get_drvdata(pdev);
@@ -1088,7 +1082,6 @@ static int sun8i_ce_remove(struct platform_device *pdev)
sun8i_ce_free_chanlist(ce, MAXFLOW - 1);
sun8i_ce_pm_exit(ce);
- return 0;
}
static const struct of_device_id sun8i_ce_crypto_of_match_table[] = {
@@ -1110,7 +1103,7 @@ MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table);
static struct platform_driver sun8i_ce_driver = {
.probe = sun8i_ce_probe,
- .remove = sun8i_ce_remove,
+ .remove_new = sun8i_ce_remove,
.driver = {
.name = "sun8i-ce",
.pm = &sun8i_ce_pm_ops,
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index 4a9587285c..0dbc022014 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -322,7 +322,6 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_name = "md5",
.cra_driver_name = "md5-sun8i-ss",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -355,7 +354,6 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "sha1-sun8i-ss",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -388,7 +386,6 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_name = "sha224",
.cra_driver_name = "sha224-sun8i-ss",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -421,7 +418,6 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "sha256-sun8i-ss",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -455,7 +451,6 @@ static struct sun8i_ss_alg_template ss_algs[] = {
.cra_name = "hmac(sha1)",
.cra_driver_name = "hmac-sha1-sun8i-ss",
.cra_priority = 300,
- .cra_alignmask = 3,
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK,
@@ -908,7 +903,7 @@ error_pm:
return err;
}
-static int sun8i_ss_remove(struct platform_device *pdev)
+static void sun8i_ss_remove(struct platform_device *pdev)
{
struct sun8i_ss_dev *ss = platform_get_drvdata(pdev);
@@ -921,8 +916,6 @@ static int sun8i_ss_remove(struct platform_device *pdev)
sun8i_ss_free_flows(ss, MAXFLOW - 1);
sun8i_ss_pm_exit(ss);
-
- return 0;
}
static const struct of_device_id sun8i_ss_crypto_of_match_table[] = {
@@ -936,7 +929,7 @@ MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table);
static struct platform_driver sun8i_ss_driver = {
.probe = sun8i_ss_probe,
- .remove = sun8i_ss_remove,
+ .remove_new = sun8i_ss_remove,
.driver = {
.name = "sun8i-ss",
.pm = &sun8i_ss_pm_ops,
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index d553f3f1ef..8d53372245 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1507,7 +1507,7 @@ err_alloc_dev:
return rc;
}
-static int crypto4xx_remove(struct platform_device *ofdev)
+static void crypto4xx_remove(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
@@ -1523,8 +1523,6 @@ static int crypto4xx_remove(struct platform_device *ofdev)
mutex_destroy(&core_dev->rng_lock);
/* Free all allocated memory */
crypto4xx_stop_all(core_dev);
-
- return 0;
}
static const struct of_device_id crypto4xx_match[] = {
@@ -1539,7 +1537,7 @@ static struct platform_driver crypto4xx_driver = {
.of_match_table = crypto4xx_match,
},
.probe = crypto4xx_probe,
- .remove = crypto4xx_remove,
+ .remove_new = crypto4xx_remove,
};
module_platform_driver(crypto4xx_driver);
diff --git a/drivers/crypto/amlogic/amlogic-gxl-core.c b/drivers/crypto/amlogic/amlogic-gxl-core.c
index da6dfe0f9a..f54ab0d0b1 100644
--- a/drivers/crypto/amlogic/amlogic-gxl-core.c
+++ b/drivers/crypto/amlogic/amlogic-gxl-core.c
@@ -299,7 +299,7 @@ error_flow:
return err;
}
-static int meson_crypto_remove(struct platform_device *pdev)
+static void meson_crypto_remove(struct platform_device *pdev)
{
struct meson_dev *mc = platform_get_drvdata(pdev);
@@ -312,7 +312,6 @@ static int meson_crypto_remove(struct platform_device *pdev)
meson_free_chanlist(mc, MAXFLOW - 1);
clk_disable_unprepare(mc->busclk);
- return 0;
}
static const struct of_device_id meson_crypto_of_match_table[] = {
@@ -323,7 +322,7 @@ MODULE_DEVICE_TABLE(of, meson_crypto_of_match_table);
static struct platform_driver meson_crypto_driver = {
.probe = meson_crypto_probe,
- .remove = meson_crypto_remove,
+ .remove_new = meson_crypto_remove,
.driver = {
.name = "gxl-crypto",
.of_match_table = meson_crypto_of_match_table,
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
index 247c568aa8..b4613bd4ad 100644
--- a/drivers/crypto/aspeed/aspeed-acry.c
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -794,7 +794,7 @@ clk_exit:
return rc;
}
-static int aspeed_acry_remove(struct platform_device *pdev)
+static void aspeed_acry_remove(struct platform_device *pdev)
{
struct aspeed_acry_dev *acry_dev = platform_get_drvdata(pdev);
@@ -802,15 +802,13 @@ static int aspeed_acry_remove(struct platform_device *pdev)
crypto_engine_exit(acry_dev->crypt_engine_rsa);
tasklet_kill(&acry_dev->done_task);
clk_disable_unprepare(acry_dev->clk);
-
- return 0;
}
MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
static struct platform_driver aspeed_acry_driver = {
.probe = aspeed_acry_probe,
- .remove = aspeed_acry_remove,
+ .remove_new = aspeed_acry_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_acry_of_matches,
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
index 8f7aab82e1..062f2a66dd 100644
--- a/drivers/crypto/aspeed/aspeed-hace.c
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -12,11 +12,9 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
#define HACE_DBG(d, fmt, ...) \
@@ -101,7 +99,6 @@ static const struct of_device_id aspeed_hace_of_matches[] = {
static int aspeed_hace_probe(struct platform_device *pdev)
{
struct aspeed_engine_crypto *crypto_engine;
- const struct of_device_id *hace_dev_id;
struct aspeed_engine_hash *hash_engine;
struct aspeed_hace_dev *hace_dev;
int rc;
@@ -111,14 +108,13 @@ static int aspeed_hace_probe(struct platform_device *pdev)
if (!hace_dev)
return -ENOMEM;
- hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev);
- if (!hace_dev_id) {
+ hace_dev->version = (uintptr_t)device_get_match_data(&pdev->dev);
+ if (!hace_dev->version) {
dev_err(&pdev->dev, "Failed to match hace dev id\n");
return -EINVAL;
}
hace_dev->dev = &pdev->dev;
- hace_dev->version = (unsigned long)hace_dev_id->data;
hash_engine = &hace_dev->hash_engine;
crypto_engine = &hace_dev->crypto_engine;
@@ -249,7 +245,7 @@ clk_exit:
return rc;
}
-static int aspeed_hace_remove(struct platform_device *pdev)
+static void aspeed_hace_remove(struct platform_device *pdev)
{
struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev);
struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
@@ -264,15 +260,13 @@ static int aspeed_hace_remove(struct platform_device *pdev)
tasklet_kill(&crypto_engine->done_task);
clk_disable_unprepare(hace_dev->clk);
-
- return 0;
}
MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches);
static struct platform_driver aspeed_hace_driver = {
.probe = aspeed_hace_probe,
- .remove = aspeed_hace_remove,
+ .remove_new = aspeed_hace_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = aspeed_hace_of_matches,
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 55b5f577b0..d1d93e8978 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -2648,7 +2648,7 @@ err_tasklet_kill:
return err;
}
-static int atmel_aes_remove(struct platform_device *pdev)
+static void atmel_aes_remove(struct platform_device *pdev)
{
struct atmel_aes_dev *aes_dd;
@@ -2667,13 +2667,11 @@ static int atmel_aes_remove(struct platform_device *pdev)
atmel_aes_buff_cleanup(aes_dd);
clk_unprepare(aes_dd->iclk);
-
- return 0;
}
static struct platform_driver atmel_aes_driver = {
.probe = atmel_aes_probe,
- .remove = atmel_aes_remove,
+ .remove_new = atmel_aes_remove,
.driver = {
.name = "atmel_aes",
.of_match_table = atmel_aes_dt_ids,
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 3622120add..f4cd6158a4 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1300,7 +1300,6 @@ static struct ahash_alg sha_384_512_algs[] = {
.halg.base.cra_name = "sha384",
.halg.base.cra_driver_name = "atmel-sha384",
.halg.base.cra_blocksize = SHA384_BLOCK_SIZE,
- .halg.base.cra_alignmask = 0x3,
.halg.digestsize = SHA384_DIGEST_SIZE,
},
@@ -1308,7 +1307,6 @@ static struct ahash_alg sha_384_512_algs[] = {
.halg.base.cra_name = "sha512",
.halg.base.cra_driver_name = "atmel-sha512",
.halg.base.cra_blocksize = SHA512_BLOCK_SIZE,
- .halg.base.cra_alignmask = 0x3,
.halg.digestsize = SHA512_DIGEST_SIZE,
},
@@ -2680,7 +2678,7 @@ err_tasklet_kill:
return err;
}
-static int atmel_sha_remove(struct platform_device *pdev)
+static void atmel_sha_remove(struct platform_device *pdev)
{
struct atmel_sha_dev *sha_dd = platform_get_drvdata(pdev);
@@ -2697,13 +2695,11 @@ static int atmel_sha_remove(struct platform_device *pdev)
atmel_sha_dma_cleanup(sha_dd);
clk_unprepare(sha_dd->iclk);
-
- return 0;
}
static struct platform_driver atmel_sha_driver = {
.probe = atmel_sha_probe,
- .remove = atmel_sha_remove,
+ .remove_new = atmel_sha_remove,
.driver = {
.name = "atmel_sha",
.of_match_table = atmel_sha_dt_ids,
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 099b32a10d..27b7000e25 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1246,7 +1246,7 @@ err_tasklet_kill:
return err;
}
-static int atmel_tdes_remove(struct platform_device *pdev)
+static void atmel_tdes_remove(struct platform_device *pdev)
{
struct atmel_tdes_dev *tdes_dd = platform_get_drvdata(pdev);
@@ -1263,13 +1263,11 @@ static int atmel_tdes_remove(struct platform_device *pdev)
atmel_tdes_dma_cleanup(tdes_dd);
atmel_tdes_buff_cleanup(tdes_dd);
-
- return 0;
}
static struct platform_driver atmel_tdes_driver = {
.probe = atmel_tdes_probe,
- .remove = atmel_tdes_remove,
+ .remove_new = atmel_tdes_remove,
.driver = {
.name = "atmel_tdes",
.of_match_table = atmel_tdes_dt_ids,
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 8493a45e1b..ef9fe13ffa 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -2635,7 +2635,6 @@ static struct ahash_alg hash_algos[] = {
CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_init = artpec6_crypto_ahash_init,
.cra_exit = artpec6_crypto_ahash_exit,
@@ -2659,7 +2658,6 @@ static struct ahash_alg hash_algos[] = {
CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_init = artpec6_crypto_ahash_init,
.cra_exit = artpec6_crypto_ahash_exit,
@@ -2684,7 +2682,6 @@ static struct ahash_alg hash_algos[] = {
CRYPTO_ALG_ALLOCATES_MEMORY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct artpec6_hashalg_context),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_init = artpec6_crypto_ahash_init_hmac_sha256,
.cra_exit = artpec6_crypto_ahash_exit,
@@ -2957,7 +2954,7 @@ free_cache:
return err;
}
-static int artpec6_crypto_remove(struct platform_device *pdev)
+static void artpec6_crypto_remove(struct platform_device *pdev)
{
struct artpec6_crypto *ac = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
@@ -2977,12 +2974,11 @@ static int artpec6_crypto_remove(struct platform_device *pdev)
#ifdef CONFIG_DEBUG_FS
artpec6_crypto_free_debugfs();
#endif
- return 0;
}
static struct platform_driver artpec6_crypto_driver = {
.probe = artpec6_crypto_probe,
- .remove = artpec6_crypto_remove,
+ .remove_new = artpec6_crypto_remove,
.driver = {
.name = "artpec6-crypto",
.of_match_table = artpec6_crypto_of_match,
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index 689be70d69..10968ddb14 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -4713,7 +4713,7 @@ failure:
return err;
}
-static int bcm_spu_remove(struct platform_device *pdev)
+static void bcm_spu_remove(struct platform_device *pdev)
{
int i;
struct device *dev = &pdev->dev;
@@ -4751,7 +4751,6 @@ static int bcm_spu_remove(struct platform_device *pdev)
}
spu_free_debugfs();
spu_mb_release(pdev);
- return 0;
}
/* ===== Kernel Module API ===== */
@@ -4762,7 +4761,7 @@ static struct platform_driver bcm_spu_pdriver = {
.of_match_table = of_match_ptr(bcm_spu_dt_ids),
},
.probe = bcm_spu_probe,
- .remove = bcm_spu_remove,
+ .remove_new = bcm_spu_remove,
};
module_platform_driver(bcm_spu_pdriver);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index a148ff1f08..a4f6884416 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -4545,6 +4545,7 @@ struct caam_hash_alg {
struct list_head entry;
struct device *dev;
int alg_type;
+ bool is_hmac;
struct ahash_alg ahash_alg;
};
@@ -4571,7 +4572,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->dev = caam_hash->dev;
- if (alg->setkey) {
+ if (caam_hash->is_hmac) {
ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
ARRAY_SIZE(ctx->key),
DMA_TO_DEVICE,
@@ -4611,7 +4612,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
* For keyed hash algorithms shared descriptors
* will be created later in setkey() callback
*/
- return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
+ return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -4646,12 +4647,14 @@ static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
+ t_alg->is_hmac = true;
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
t_alg->ahash_alg.setkey = NULL;
+ t_alg->is_hmac = false;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 290c8500c2..fdd724228c 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1753,6 +1753,7 @@ static struct caam_hash_template driver_hash[] = {
struct caam_hash_alg {
struct list_head entry;
int alg_type;
+ bool is_hmac;
struct ahash_engine_alg ahash_alg;
};
@@ -1804,7 +1805,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
} else {
if (priv->era >= 6) {
ctx->dir = DMA_BIDIRECTIONAL;
- ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE;
+ ctx->key_dir = caam_hash->is_hmac ? DMA_TO_DEVICE : DMA_NONE;
} else {
ctx->dir = DMA_TO_DEVICE;
ctx->key_dir = DMA_NONE;
@@ -1862,7 +1863,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
* For keyed hash algorithms shared descriptors
* will be created later in setkey() callback
*/
- return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
+ return caam_hash->is_hmac ? 0 : ahash_set_sh_desc(ahash);
}
static void caam_hash_cra_exit(struct crypto_tfm *tfm)
@@ -1915,12 +1916,14 @@ caam_hash_alloc(struct caam_hash_template *template,
template->hmac_name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->hmac_driver_name);
+ t_alg->is_hmac = true;
} else {
snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
template->name);
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
template->driver_name);
halg->setkey = NULL;
+ t_alg->is_hmac = false;
}
alg->cra_module = THIS_MODULE;
alg->cra_init = caam_hash_cra_init;
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index b1f1b393b9..26eba7de3f 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -180,7 +180,7 @@ static int caam_jr_shutdown(struct device *dev)
return ret;
}
-static int caam_jr_remove(struct platform_device *pdev)
+static void caam_jr_remove(struct platform_device *pdev)
{
int ret;
struct device *jrdev;
@@ -193,11 +193,14 @@ static int caam_jr_remove(struct platform_device *pdev)
caam_rng_exit(jrdev->parent);
/*
- * Return EBUSY if job ring already allocated.
+ * If a job ring is still allocated there is trouble ahead. Once
+ * caam_jr_remove() returned, jrpriv will be freed and the registers
+ * will get unmapped. So any user of such a job ring will probably
+ * crash.
*/
if (atomic_read(&jrpriv->tfm_count)) {
- dev_err(jrdev, "Device is busy\n");
- return -EBUSY;
+ dev_alert(jrdev, "Device is busy; consumers might start to crash\n");
+ return;
}
/* Unregister JR-based RNG & crypto algorithms */
@@ -212,13 +215,6 @@ static int caam_jr_remove(struct platform_device *pdev)
ret = caam_jr_shutdown(jrdev);
if (ret)
dev_err(jrdev, "Failed to shut down job ring\n");
-
- return ret;
-}
-
-static void caam_jr_platform_shutdown(struct platform_device *pdev)
-{
- caam_jr_remove(pdev);
}
/* Main per-ring interrupt handler */
@@ -823,8 +819,8 @@ static struct platform_driver caam_jr_driver = {
.pm = pm_ptr(&caam_jr_pm_ops),
},
.probe = caam_jr_probe,
- .remove = caam_jr_remove,
- .shutdown = caam_jr_platform_shutdown,
+ .remove_new = caam_jr_remove,
+ .shutdown = caam_jr_remove,
};
static int __init jr_driver_init(void)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index 13b137410b..1b5abdb6cc 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -647,7 +647,7 @@ void nitrox_get_hwinfo(struct nitrox_device *ndev)
ndev->hw.revision_id);
/* copy partname */
- strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
+ strscpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
}
void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
diff --git a/drivers/crypto/ccp/dbc.c b/drivers/crypto/ccp/dbc.c
index 6f33149ef8..d373caab52 100644
--- a/drivers/crypto/ccp/dbc.c
+++ b/drivers/crypto/ccp/dbc.c
@@ -9,6 +9,7 @@
#include "dbc.h"
+#define DBC_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
struct error_map {
u32 psp;
int ret;
@@ -37,22 +38,37 @@ static struct error_map error_codes[] = {
{0x0, 0x0},
};
-static int send_dbc_cmd(struct psp_dbc_device *dbc_dev,
- enum psp_platform_access_msg msg)
+static inline int send_dbc_cmd_thru_ext(struct psp_dbc_device *dbc_dev, int msg)
+{
+ dbc_dev->mbox->ext_req.header.sub_cmd_id = msg;
+
+ return psp_extended_mailbox_cmd(dbc_dev->psp,
+ DBC_DEFAULT_TIMEOUT,
+ (struct psp_ext_request *)dbc_dev->mbox);
+}
+
+static inline int send_dbc_cmd_thru_pa(struct psp_dbc_device *dbc_dev, int msg)
+{
+ return psp_send_platform_access_msg(msg,
+ (struct psp_request *)dbc_dev->mbox);
+}
+
+static int send_dbc_cmd(struct psp_dbc_device *dbc_dev, int msg)
{
int ret;
- dbc_dev->mbox->req.header.status = 0;
- ret = psp_send_platform_access_msg(msg, (struct psp_request *)dbc_dev->mbox);
+ *dbc_dev->result = 0;
+ ret = dbc_dev->use_ext ? send_dbc_cmd_thru_ext(dbc_dev, msg) :
+ send_dbc_cmd_thru_pa(dbc_dev, msg);
if (ret == -EIO) {
int i;
dev_dbg(dbc_dev->dev,
"msg 0x%x failed with PSP error: 0x%x\n",
- msg, dbc_dev->mbox->req.header.status);
+ msg, *dbc_dev->result);
for (i = 0; error_codes[i].psp; i++) {
- if (dbc_dev->mbox->req.header.status == error_codes[i].psp)
+ if (*dbc_dev->result == error_codes[i].psp)
return error_codes[i].ret;
}
}
@@ -64,7 +80,7 @@ static int send_dbc_nonce(struct psp_dbc_device *dbc_dev)
{
int ret;
- dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_nonce);
+ *dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_nonce);
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE);
if (ret == -EAGAIN) {
dev_dbg(dbc_dev->dev, "retrying get nonce\n");
@@ -76,9 +92,9 @@ static int send_dbc_nonce(struct psp_dbc_device *dbc_dev)
static int send_dbc_parameter(struct psp_dbc_device *dbc_dev)
{
- dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_param);
+ struct dbc_user_param *user_param = (struct dbc_user_param *)dbc_dev->payload;
- switch (dbc_dev->mbox->dbc_param.user.msg_index) {
+ switch (user_param->msg_index) {
case PARAM_SET_FMAX_CAP:
case PARAM_SET_PWR_CAP:
case PARAM_SET_GFX_MODE:
@@ -125,8 +141,7 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case DBCIOCNONCE:
- if (copy_from_user(&dbc_dev->mbox->dbc_nonce.user, argp,
- sizeof(struct dbc_user_nonce))) {
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_nonce))) {
ret = -EFAULT;
goto unlock;
}
@@ -135,43 +150,39 @@ static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
goto unlock;
- if (copy_to_user(argp, &dbc_dev->mbox->dbc_nonce.user,
- sizeof(struct dbc_user_nonce))) {
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_nonce))) {
ret = -EFAULT;
goto unlock;
}
break;
case DBCIOCUID:
- dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_set_uid);
- if (copy_from_user(&dbc_dev->mbox->dbc_set_uid.user, argp,
- sizeof(struct dbc_user_setuid))) {
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_setuid))) {
ret = -EFAULT;
goto unlock;
}
+ *dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_setuid);
ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID);
if (ret)
goto unlock;
- if (copy_to_user(argp, &dbc_dev->mbox->dbc_set_uid.user,
- sizeof(struct dbc_user_setuid))) {
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_setuid))) {
ret = -EFAULT;
goto unlock;
}
break;
case DBCIOCPARAM:
- if (copy_from_user(&dbc_dev->mbox->dbc_param.user, argp,
- sizeof(struct dbc_user_param))) {
+ if (copy_from_user(dbc_dev->payload, argp, sizeof(struct dbc_user_param))) {
ret = -EFAULT;
goto unlock;
}
+ *dbc_dev->payload_size = dbc_dev->header_size + sizeof(struct dbc_user_param);
ret = send_dbc_parameter(dbc_dev);
if (ret)
goto unlock;
- if (copy_to_user(argp, &dbc_dev->mbox->dbc_param.user,
- sizeof(struct dbc_user_param))) {
+ if (copy_to_user(argp, dbc_dev->payload, sizeof(struct dbc_user_param))) {
ret = -EFAULT;
goto unlock;
}
@@ -197,9 +208,6 @@ int dbc_dev_init(struct psp_device *psp)
struct psp_dbc_device *dbc_dev;
int ret;
- if (!PSP_FEATURE(psp, DBC))
- return 0;
-
dbc_dev = devm_kzalloc(dev, sizeof(*dbc_dev), GFP_KERNEL);
if (!dbc_dev)
return -ENOMEM;
@@ -213,6 +221,20 @@ int dbc_dev_init(struct psp_device *psp)
psp->dbc_data = dbc_dev;
dbc_dev->dev = dev;
+ dbc_dev->psp = psp;
+
+ if (PSP_CAPABILITY(psp, DBC_THRU_EXT)) {
+ dbc_dev->use_ext = true;
+ dbc_dev->payload_size = &dbc_dev->mbox->ext_req.header.payload_size;
+ dbc_dev->result = &dbc_dev->mbox->ext_req.header.status;
+ dbc_dev->payload = &dbc_dev->mbox->ext_req.buf;
+ dbc_dev->header_size = sizeof(struct psp_ext_req_buffer_hdr);
+ } else {
+ dbc_dev->payload_size = &dbc_dev->mbox->pa_req.header.payload_size;
+ dbc_dev->result = &dbc_dev->mbox->pa_req.header.status;
+ dbc_dev->payload = &dbc_dev->mbox->pa_req.buf;
+ dbc_dev->header_size = sizeof(struct psp_req_buffer_hdr);
+ }
ret = send_dbc_nonce(dbc_dev);
if (ret == -EACCES) {
diff --git a/drivers/crypto/ccp/dbc.h b/drivers/crypto/ccp/dbc.h
index e963099ca3..e0fecbe92e 100644
--- a/drivers/crypto/ccp/dbc.h
+++ b/drivers/crypto/ccp/dbc.h
@@ -20,34 +20,25 @@
struct psp_dbc_device {
struct device *dev;
+ struct psp_device *psp;
union dbc_buffer *mbox;
struct mutex ioctl_mutex;
struct miscdevice char_dev;
-};
-
-struct dbc_nonce {
- struct psp_req_buffer_hdr header;
- struct dbc_user_nonce user;
-} __packed;
-struct dbc_set_uid {
- struct psp_req_buffer_hdr header;
- struct dbc_user_setuid user;
-} __packed;
-
-struct dbc_param {
- struct psp_req_buffer_hdr header;
- struct dbc_user_param user;
-} __packed;
+ /* used to abstract communication path */
+ bool use_ext;
+ u32 header_size;
+ u32 *payload_size;
+ u32 *result;
+ void *payload;
+};
union dbc_buffer {
- struct psp_request req;
- struct dbc_nonce dbc_nonce;
- struct dbc_set_uid dbc_set_uid;
- struct dbc_param dbc_param;
+ struct psp_request pa_req;
+ struct psp_ext_request ext_req;
};
void dbc_dev_destroy(struct psp_device *psp);
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index d42d7bc623..124a2e0c89 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -9,6 +9,9 @@
#include <linux/kernel.h>
#include <linux/irqreturn.h>
+#include <linux/mutex.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
#include "sp-dev.h"
#include "psp-dev.h"
@@ -19,6 +22,86 @@
struct psp_device *psp_master;
+#define PSP_C2PMSG_17_CMDRESP_CMD GENMASK(19, 16)
+
+static int psp_mailbox_poll(const void __iomem *cmdresp_reg, unsigned int *cmdresp,
+ unsigned int timeout_msecs)
+{
+ while (true) {
+ *cmdresp = ioread32(cmdresp_reg);
+ if (FIELD_GET(PSP_CMDRESP_RESP, *cmdresp))
+ return 0;
+
+ if (!timeout_msecs--)
+ break;
+
+ usleep_range(1000, 1100);
+ }
+
+ return -ETIMEDOUT;
+}
+
+int psp_mailbox_command(struct psp_device *psp, enum psp_cmd cmd, void *cmdbuff,
+ unsigned int timeout_msecs, unsigned int *cmdresp)
+{
+ void __iomem *cmdresp_reg, *cmdbuff_lo_reg, *cmdbuff_hi_reg;
+ int ret;
+
+ if (!psp || !psp->vdata || !psp->vdata->cmdresp_reg ||
+ !psp->vdata->cmdbuff_addr_lo_reg || !psp->vdata->cmdbuff_addr_hi_reg)
+ return -ENODEV;
+
+ cmdresp_reg = psp->io_regs + psp->vdata->cmdresp_reg;
+ cmdbuff_lo_reg = psp->io_regs + psp->vdata->cmdbuff_addr_lo_reg;
+ cmdbuff_hi_reg = psp->io_regs + psp->vdata->cmdbuff_addr_hi_reg;
+
+ mutex_lock(&psp->mailbox_mutex);
+
+ /* Ensure mailbox is ready for a command */
+ ret = -EBUSY;
+ if (psp_mailbox_poll(cmdresp_reg, cmdresp, 0))
+ goto unlock;
+
+ if (cmdbuff) {
+ iowrite32(lower_32_bits(__psp_pa(cmdbuff)), cmdbuff_lo_reg);
+ iowrite32(upper_32_bits(__psp_pa(cmdbuff)), cmdbuff_hi_reg);
+ }
+
+ *cmdresp = FIELD_PREP(PSP_C2PMSG_17_CMDRESP_CMD, cmd);
+ iowrite32(*cmdresp, cmdresp_reg);
+
+ ret = psp_mailbox_poll(cmdresp_reg, cmdresp, timeout_msecs);
+
+unlock:
+ mutex_unlock(&psp->mailbox_mutex);
+
+ return ret;
+}
+
+int psp_extended_mailbox_cmd(struct psp_device *psp, unsigned int timeout_msecs,
+ struct psp_ext_request *req)
+{
+ unsigned int reg;
+ int ret;
+
+ print_hex_dump_debug("->psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+ req->header.payload_size, false);
+
+ ret = psp_mailbox_command(psp, PSP_CMD_TEE_EXTENDED_CMD, (void *)req,
+ timeout_msecs, &reg);
+ if (ret) {
+ return ret;
+ } else if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
+ req->header.status = FIELD_GET(PSP_CMDRESP_STS, reg);
+ return -EIO;
+ }
+
+ print_hex_dump_debug("<-psp ", DUMP_PREFIX_OFFSET, 16, 2, req,
+ req->header.payload_size, false);
+
+ return 0;
+}
+
static struct psp_device *psp_alloc_struct(struct sp_device *sp)
{
struct device *dev = sp->dev;
@@ -74,7 +157,7 @@ static unsigned int psp_get_capability(struct psp_device *psp)
psp->capability = val;
/* Detect if TSME and SME are both enabled */
- if (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING &&
+ if (PSP_CAPABILITY(psp, PSP_SECURITY_REPORTING) &&
psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET) &&
cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n");
@@ -85,7 +168,7 @@ static unsigned int psp_get_capability(struct psp_device *psp)
static int psp_check_sev_support(struct psp_device *psp)
{
/* Check if device supports SEV feature */
- if (!(psp->capability & PSP_CAPABILITY_SEV)) {
+ if (!PSP_CAPABILITY(psp, SEV)) {
dev_dbg(psp->dev, "psp does not support SEV\n");
return -ENODEV;
}
@@ -96,7 +179,7 @@ static int psp_check_sev_support(struct psp_device *psp)
static int psp_check_tee_support(struct psp_device *psp)
{
/* Check if device supports TEE feature */
- if (!(psp->capability & PSP_CAPABILITY_TEE)) {
+ if (!PSP_CAPABILITY(psp, TEE)) {
dev_dbg(psp->dev, "psp does not support TEE\n");
return -ENODEV;
}
@@ -104,23 +187,6 @@ static int psp_check_tee_support(struct psp_device *psp)
return 0;
}
-static void psp_init_platform_access(struct psp_device *psp)
-{
- int ret;
-
- ret = platform_access_dev_init(psp);
- if (ret) {
- dev_warn(psp->dev, "platform access init failed: %d\n", ret);
- return;
- }
-
- /* dbc must come after platform access as it tests the feature */
- ret = dbc_dev_init(psp);
- if (ret)
- dev_warn(psp->dev, "failed to init dynamic boost control: %d\n",
- ret);
-}
-
static int psp_init(struct psp_device *psp)
{
int ret;
@@ -137,8 +203,19 @@ static int psp_init(struct psp_device *psp)
return ret;
}
- if (psp->vdata->platform_access)
- psp_init_platform_access(psp);
+ if (psp->vdata->platform_access) {
+ ret = platform_access_dev_init(psp);
+ if (ret)
+ return ret;
+ }
+
+ /* dbc must come after platform access as it tests the feature */
+ if (PSP_FEATURE(psp, DBC) ||
+ PSP_CAPABILITY(psp, DBC_THRU_EXT)) {
+ ret = dbc_dev_init(psp);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -164,6 +241,7 @@ int psp_dev_init(struct sp_device *sp)
}
psp->io_regs = sp->io_map;
+ mutex_init(&psp->mailbox_mutex);
ret = psp_get_capability(psp);
if (ret)
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index 8a4de69399..ae582ba637 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -14,6 +14,9 @@
#include <linux/list.h>
#include <linux/bits.h>
#include <linux/interrupt.h>
+#include <linux/mutex.h>
+#include <linux/psp.h>
+#include <linux/psp-platform-access.h>
#include "sp-dev.h"
@@ -33,6 +36,7 @@ struct psp_device {
struct sp_device *sp;
void __iomem *io_regs;
+ struct mutex mailbox_mutex;
psp_irq_handler_t sev_irq_handler;
void *sev_irq_data;
@@ -53,6 +57,7 @@ struct psp_device *psp_get_master_device(void);
#define PSP_CAPABILITY_SEV BIT(0)
#define PSP_CAPABILITY_TEE BIT(1)
+#define PSP_CAPABILITY_DBC_THRU_EXT BIT(2)
#define PSP_CAPABILITY_PSP_SECURITY_REPORTING BIT(7)
#define PSP_CAPABILITY_PSP_SECURITY_OFFSET 8
@@ -71,4 +76,54 @@ struct psp_device *psp_get_master_device(void);
#define PSP_SECURITY_HSP_TPM_AVAILABLE BIT(10)
#define PSP_SECURITY_ROM_ARMOR_ENFORCED BIT(11)
+/**
+ * enum psp_cmd - PSP mailbox commands
+ * @PSP_CMD_TEE_RING_INIT: Initialize TEE ring buffer
+ * @PSP_CMD_TEE_RING_DESTROY: Destroy TEE ring buffer
+ * @PSP_CMD_TEE_EXTENDED_CMD: Extended command
+ * @PSP_CMD_MAX: Maximum command id
+ */
+enum psp_cmd {
+ PSP_CMD_TEE_RING_INIT = 1,
+ PSP_CMD_TEE_RING_DESTROY = 2,
+ PSP_CMD_TEE_EXTENDED_CMD = 14,
+ PSP_CMD_MAX = 15,
+};
+
+int psp_mailbox_command(struct psp_device *psp, enum psp_cmd cmd, void *cmdbuff,
+ unsigned int timeout_msecs, unsigned int *cmdresp);
+
+/**
+ * struct psp_ext_req_buffer_hdr - Structure of the extended command header
+ * @payload_size: total payload size
+ * @sub_cmd_id: extended command ID
+ * @status: status of command execution (out)
+ */
+struct psp_ext_req_buffer_hdr {
+ u32 payload_size;
+ u32 sub_cmd_id;
+ u32 status;
+} __packed;
+
+struct psp_ext_request {
+ struct psp_ext_req_buffer_hdr header;
+ void *buf;
+} __packed;
+
+/**
+ * enum psp_sub_cmd - PSP mailbox sub commands
+ * @PSP_SUB_CMD_DBC_GET_NONCE: Get nonce from DBC
+ * @PSP_SUB_CMD_DBC_SET_UID: Set UID for DBC
+ * @PSP_SUB_CMD_DBC_GET_PARAMETER: Get parameter from DBC
+ * @PSP_SUB_CMD_DBC_SET_PARAMETER: Set parameter for DBC
+ */
+enum psp_sub_cmd {
+ PSP_SUB_CMD_DBC_GET_NONCE = PSP_DYNAMIC_BOOST_GET_NONCE,
+ PSP_SUB_CMD_DBC_SET_UID = PSP_DYNAMIC_BOOST_SET_UID,
+ PSP_SUB_CMD_DBC_GET_PARAMETER = PSP_DYNAMIC_BOOST_GET_PARAMETER,
+ PSP_SUB_CMD_DBC_SET_PARAMETER = PSP_DYNAMIC_BOOST_SET_PARAMETER,
+};
+
+int psp_extended_mailbox_cmd(struct psp_device *psp, unsigned int timeout_msecs,
+ struct psp_ext_request *req);
#endif /* __PSP_DEV_H */
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index f97166fba9..53b217a621 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -309,6 +309,7 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
{
struct psp_device *psp = psp_master;
struct sev_device *sev;
+ unsigned int cmdbuff_hi, cmdbuff_lo;
unsigned int phys_lsb, phys_msb;
unsigned int reg, ret = 0;
int buf_len;
@@ -371,6 +372,19 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n",
cmd, FIELD_GET(PSP_CMDRESP_STS, reg));
+
+ /*
+ * PSP firmware may report additional error information in the
+ * command buffer registers on error. Print contents of command
+ * buffer registers if they changed.
+ */
+ cmdbuff_hi = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg);
+ cmdbuff_lo = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg);
+ if (cmdbuff_hi != phys_msb || cmdbuff_lo != phys_lsb) {
+ dev_dbg(sev->dev, "Additional error information reported in cmdbuff:");
+ dev_dbg(sev->dev, " cmdbuff hi: %#010x\n", cmdbuff_hi);
+ dev_dbg(sev->dev, " cmdbuff lo: %#010x\n", cmdbuff_lo);
+ }
ret = -EIO;
} else {
ret = sev_write_init_ex_file_if_required(cmd);
@@ -520,10 +534,16 @@ EXPORT_SYMBOL_GPL(sev_platform_init);
static int __sev_platform_shutdown_locked(int *error)
{
- struct sev_device *sev = psp_master->sev_data;
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
int ret;
- if (!sev || sev->state == SEV_STATE_UNINIT)
+ if (!psp || !psp->sev_data)
+ return 0;
+
+ sev = psp->sev_data;
+
+ if (sev->state == SEV_STATE_UNINIT)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 2329ad524b..03d5b9e040 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -30,6 +30,7 @@
#define PLATFORM_FEATURE_DBC 0x1
+#define PSP_CAPABILITY(psp, cap) (psp->capability & PSP_CAPABILITY_##cap)
#define PSP_FEATURE(psp, feat) (psp->vdata && psp->vdata->platform_features & PLATFORM_FEATURE_##feat)
/* Structure to hold CCP device data */
@@ -71,6 +72,9 @@ struct psp_vdata {
const struct sev_vdata *sev;
const struct tee_vdata *tee;
const struct platform_access_vdata *platform_access;
+ const unsigned int cmdresp_reg;
+ const unsigned int cmdbuff_addr_lo_reg;
+ const unsigned int cmdbuff_addr_hi_reg;
const unsigned int feature_reg;
const unsigned int inten_reg;
const unsigned int intsts_reg;
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index b6ab56abeb..300dda1418 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -84,7 +84,7 @@ static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *a
struct sp_device *sp = dev_get_drvdata(dev);
struct psp_device *psp = sp->psp_data;
- if (psp && (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING))
+ if (psp && PSP_CAPABILITY(psp, PSP_SECURITY_REPORTING))
return 0444;
return 0;
@@ -135,7 +135,7 @@ static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *a
val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg);
if (attr == &dev_attr_tee_version.attr &&
- psp->capability & PSP_CAPABILITY_TEE &&
+ PSP_CAPABILITY(psp, TEE) &&
psp->vdata->tee->info_reg)
val = ioread32(psp->io_regs + psp->vdata->tee->info_reg);
@@ -418,18 +418,12 @@ static const struct sev_vdata sevv2 = {
};
static const struct tee_vdata teev1 = {
- .cmdresp_reg = 0x10544, /* C2PMSG_17 */
- .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */
- .cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */
.ring_wptr_reg = 0x10550, /* C2PMSG_20 */
.ring_rptr_reg = 0x10554, /* C2PMSG_21 */
.info_reg = 0x109e8, /* C2PMSG_58 */
};
static const struct tee_vdata teev2 = {
- .cmdresp_reg = 0x10944, /* C2PMSG_17 */
- .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
- .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
.ring_wptr_reg = 0x10950, /* C2PMSG_20 */
.ring_rptr_reg = 0x10954, /* C2PMSG_21 */
};
@@ -466,6 +460,9 @@ static const struct psp_vdata pspv2 = {
static const struct psp_vdata pspv3 = {
.tee = &teev1,
.platform_access = &pa_v1,
+ .cmdresp_reg = 0x10544, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */
.bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
@@ -476,6 +473,9 @@ static const struct psp_vdata pspv3 = {
static const struct psp_vdata pspv4 = {
.sev = &sevv2,
.tee = &teev1,
+ .cmdresp_reg = 0x10544, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */
.bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10690, /* P2CMSG_INTEN */
@@ -485,6 +485,9 @@ static const struct psp_vdata pspv4 = {
static const struct psp_vdata pspv5 = {
.tee = &teev2,
.platform_access = &pa_v2,
+ .cmdresp_reg = 0x10944, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10510, /* P2CMSG_INTEN */
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
@@ -493,6 +496,9 @@ static const struct psp_vdata pspv5 = {
static const struct psp_vdata pspv6 = {
.sev = &sevv2,
.tee = &teev2,
+ .cmdresp_reg = 0x10944, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
.feature_reg = 0x109fc, /* C2PMSG_63 */
.inten_reg = 0x10510, /* P2CMSG_INTEN */
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 7d79a8744f..4733012377 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -180,7 +180,7 @@ e_err:
return ret;
}
-static int sp_platform_remove(struct platform_device *pdev)
+static void sp_platform_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sp_device *sp = dev_get_drvdata(dev);
@@ -188,8 +188,6 @@ static int sp_platform_remove(struct platform_device *pdev)
sp_destroy(sp);
dev_notice(dev, "disabled\n");
-
- return 0;
}
#ifdef CONFIG_PM
@@ -222,7 +220,7 @@ static struct platform_driver sp_platform_driver = {
#endif
},
.probe = sp_platform_probe,
- .remove = sp_platform_remove,
+ .remove_new = sp_platform_remove,
#ifdef CONFIG_PM
.suspend = sp_platform_suspend,
.resume = sp_platform_resume,
diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
index 5560bf8329..5e1d807246 100644
--- a/drivers/crypto/ccp/tee-dev.c
+++ b/drivers/crypto/ccp/tee-dev.c
@@ -62,26 +62,6 @@ static void tee_free_ring(struct psp_tee_device *tee)
mutex_destroy(&rb_mgr->mutex);
}
-static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout,
- unsigned int *reg)
-{
- /* ~10ms sleep per loop => nloop = timeout * 100 */
- int nloop = timeout * 100;
-
- while (--nloop) {
- *reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg);
- if (FIELD_GET(PSP_CMDRESP_RESP, *reg))
- return 0;
-
- usleep_range(10000, 10100);
- }
-
- dev_err(tee->dev, "tee: command timed out, disabling PSP\n");
- psp_dead = true;
-
- return -ETIMEDOUT;
-}
-
static
struct tee_init_ring_cmd *tee_alloc_cmd_buffer(struct psp_tee_device *tee)
{
@@ -110,7 +90,6 @@ static int tee_init_ring(struct psp_tee_device *tee)
{
int ring_size = MAX_RING_BUFFER_ENTRIES * sizeof(struct tee_ring_cmd);
struct tee_init_ring_cmd *cmd;
- phys_addr_t cmd_buffer;
unsigned int reg;
int ret;
@@ -130,23 +109,15 @@ static int tee_init_ring(struct psp_tee_device *tee)
return -ENOMEM;
}
- cmd_buffer = __psp_pa((void *)cmd);
-
/* Send command buffer details to Trusted OS by writing to
* CPU-PSP message registers
*/
-
- iowrite32(lower_32_bits(cmd_buffer),
- tee->io_regs + tee->vdata->cmdbuff_addr_lo_reg);
- iowrite32(upper_32_bits(cmd_buffer),
- tee->io_regs + tee->vdata->cmdbuff_addr_hi_reg);
- iowrite32(TEE_RING_INIT_CMD,
- tee->io_regs + tee->vdata->cmdresp_reg);
-
- ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ ret = psp_mailbox_command(tee->psp, PSP_CMD_TEE_RING_INIT, cmd,
+ TEE_DEFAULT_CMD_TIMEOUT, &reg);
if (ret) {
- dev_err(tee->dev, "tee: ring init command timed out\n");
+ dev_err(tee->dev, "tee: ring init command timed out, disabling TEE support\n");
tee_free_ring(tee);
+ psp_dead = true;
goto free_buf;
}
@@ -174,12 +145,11 @@ static void tee_destroy_ring(struct psp_tee_device *tee)
if (psp_dead)
goto free_ring;
- iowrite32(TEE_RING_DESTROY_CMD,
- tee->io_regs + tee->vdata->cmdresp_reg);
-
- ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg);
+ ret = psp_mailbox_command(tee->psp, PSP_CMD_TEE_RING_DESTROY, NULL,
+ TEE_DEFAULT_CMD_TIMEOUT, &reg);
if (ret) {
- dev_err(tee->dev, "tee: ring destroy command timed out\n");
+ dev_err(tee->dev, "tee: ring destroy command timed out, disabling TEE support\n");
+ psp_dead = true;
} else if (FIELD_GET(PSP_CMDRESP_STS, reg)) {
dev_err(tee->dev, "tee: ring destroy command failed (%#010lx)\n",
FIELD_GET(PSP_CMDRESP_STS, reg));
@@ -370,7 +340,7 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
if (ret)
return ret;
- ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
+ ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_RING_TIMEOUT);
if (ret) {
resp->flag = CMD_RESPONSE_TIMEDOUT;
return ret;
diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
index 49d26158b7..ea9a2b7c05 100644
--- a/drivers/crypto/ccp/tee-dev.h
+++ b/drivers/crypto/ccp/tee-dev.h
@@ -17,22 +17,11 @@
#include <linux/device.h>
#include <linux/mutex.h>
-#define TEE_DEFAULT_TIMEOUT 10
+#define TEE_DEFAULT_CMD_TIMEOUT (10 * MSEC_PER_SEC)
+#define TEE_DEFAULT_RING_TIMEOUT 10
#define MAX_BUFFER_SIZE 988
/**
- * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
- * @TEE_RING_INIT_CMD: Initialize ring buffer
- * @TEE_RING_DESTROY_CMD: Destroy ring buffer
- * @TEE_RING_MAX_CMD: Maximum command id
- */
-enum tee_ring_cmd_id {
- TEE_RING_INIT_CMD = 0x00010000,
- TEE_RING_DESTROY_CMD = 0x00020000,
- TEE_RING_MAX_CMD = 0x000F0000,
-};
-
-/**
* struct tee_init_ring_cmd - Command to init TEE ring buffer
* @low_addr: bits [31:0] of the physical address of ring buffer
* @hi_addr: bits [63:32] of the physical address of ring buffer
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index 0f0694037d..9177b54bb0 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -623,7 +623,7 @@ static int ccree_probe(struct platform_device *plat_dev)
return 0;
}
-static int ccree_remove(struct platform_device *plat_dev)
+static void ccree_remove(struct platform_device *plat_dev)
{
struct device *dev = &plat_dev->dev;
@@ -632,8 +632,6 @@ static int ccree_remove(struct platform_device *plat_dev)
cleanup_cc_resources(plat_dev);
dev_info(dev, "ARM ccree device terminated\n");
-
- return 0;
}
static struct platform_driver ccree_driver = {
@@ -645,7 +643,7 @@ static struct platform_driver ccree_driver = {
#endif
},
.probe = ccree_probe,
- .remove = ccree_remove,
+ .remove_new = ccree_remove,
};
static int __init ccree_init(void)
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 16298ae4a0..177428480c 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -1920,6 +1920,9 @@ err:
return error;
}
+static int chcr_hmac_init(struct ahash_request *areq);
+static int chcr_sha_init(struct ahash_request *areq);
+
static int chcr_ahash_digest(struct ahash_request *req)
{
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
@@ -1938,7 +1941,11 @@ static int chcr_ahash_digest(struct ahash_request *req)
req_ctx->rxqidx = cpu % ctx->nrxq;
put_cpu();
- rtfm->init(req);
+ if (is_hmac(crypto_ahash_tfm(rtfm)))
+ chcr_hmac_init(req);
+ else
+ chcr_sha_init(req);
+
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
error = chcr_inc_wrcount(dev);
if (error)
diff --git a/drivers/crypto/exynos-rng.c b/drivers/crypto/exynos-rng.c
index 5d60a4bcb5..0dd8baf16c 100644
--- a/drivers/crypto/exynos-rng.c
+++ b/drivers/crypto/exynos-rng.c
@@ -306,13 +306,11 @@ static int exynos_rng_probe(struct platform_device *pdev)
return ret;
}
-static int exynos_rng_remove(struct platform_device *pdev)
+static void exynos_rng_remove(struct platform_device *pdev)
{
crypto_unregister_rng(&exynos_rng_alg);
exynos_rng_dev = NULL;
-
- return 0;
}
static int __maybe_unused exynos_rng_suspend(struct device *dev)
@@ -391,7 +389,7 @@ static struct platform_driver exynos_rng_driver = {
.of_match_table = exynos_rng_dt_match,
},
.probe = exynos_rng_probe,
- .remove = exynos_rng_remove,
+ .remove_new = exynos_rng_remove,
};
module_platform_driver(exynos_rng_driver);
diff --git a/drivers/crypto/gemini/sl3516-ce-core.c b/drivers/crypto/gemini/sl3516-ce-core.c
index 0f43c6e39b..1d1a889599 100644
--- a/drivers/crypto/gemini/sl3516-ce-core.c
+++ b/drivers/crypto/gemini/sl3516-ce-core.c
@@ -505,7 +505,7 @@ error_pm:
return err;
}
-static int sl3516_ce_remove(struct platform_device *pdev)
+static void sl3516_ce_remove(struct platform_device *pdev)
{
struct sl3516_ce_dev *ce = platform_get_drvdata(pdev);
@@ -518,8 +518,6 @@ static int sl3516_ce_remove(struct platform_device *pdev)
#ifdef CONFIG_CRYPTO_DEV_SL3516_DEBUG
debugfs_remove_recursive(ce->dbgfs_dir);
#endif
-
- return 0;
}
static const struct of_device_id sl3516_ce_crypto_of_match_table[] = {
@@ -530,7 +528,7 @@ MODULE_DEVICE_TABLE(of, sl3516_ce_crypto_of_match_table);
static struct platform_driver sl3516_ce_driver = {
.probe = sl3516_ce_probe,
- .remove = sl3516_ce_remove,
+ .remove_new = sl3516_ce_remove,
.driver = {
.name = "sl3516-crypto",
.pm = &sl3516_ce_pm_ops,
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 8e4a49b7ab..7bddc3c786 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2393,9 +2393,13 @@ static int hifn_alg_alloc(struct hifn_device *dev, const struct hifn_alg_templat
alg->alg = t->skcipher;
alg->alg.init = hifn_init_tfm;
- snprintf(alg->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", t->name);
- snprintf(alg->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-%s",
- t->drv_name, dev->name);
+ err = -EINVAL;
+ if (snprintf(alg->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+ "%s", t->name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_alg;
+ if (snprintf(alg->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "%s-%s", t->drv_name, dev->name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_alg;
alg->alg.base.cra_priority = 300;
alg->alg.base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
@@ -2411,6 +2415,7 @@ static int hifn_alg_alloc(struct hifn_device *dev, const struct hifn_alg_templat
err = crypto_register_skcipher(&alg->alg);
if (err) {
list_del(&alg->entry);
+out_free_alg:
kfree(alg);
}
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 2cc1591949..7e8186fe05 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -137,8 +137,8 @@ static void dump_show(struct hisi_qm *qm, void *info,
static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
- struct qm_sqc *sqc, *sqc_curr;
- dma_addr_t sqc_dma;
+ struct qm_sqc *sqc_curr;
+ struct qm_sqc sqc;
u32 qp_id;
int ret;
@@ -151,35 +151,29 @@ static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name)
return -EINVAL;
}
- sqc = hisi_qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
- if (IS_ERR(sqc))
- return PTR_ERR(sqc);
+ ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 1);
+ if (!ret) {
+ dump_show(qm, &sqc, sizeof(struct qm_sqc), name);
- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1);
- if (ret) {
- down_read(&qm->qps_lock);
- if (qm->sqc) {
- sqc_curr = qm->sqc + qp_id;
+ return 0;
+ }
- dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC");
- }
- up_read(&qm->qps_lock);
+ down_read(&qm->qps_lock);
+ if (qm->sqc) {
+ sqc_curr = qm->sqc + qp_id;
- goto free_ctx;
+ dump_show(qm, sqc_curr, sizeof(*sqc_curr), "SOFT SQC");
}
+ up_read(&qm->qps_lock);
- dump_show(qm, sqc, sizeof(*sqc), name);
-
-free_ctx:
- hisi_qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
return 0;
}
static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
- struct qm_cqc *cqc, *cqc_curr;
- dma_addr_t cqc_dma;
+ struct qm_cqc *cqc_curr;
+ struct qm_cqc cqc;
u32 qp_id;
int ret;
@@ -192,34 +186,29 @@ static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name)
return -EINVAL;
}
- cqc = hisi_qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
- if (IS_ERR(cqc))
- return PTR_ERR(cqc);
+ ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 1);
+ if (!ret) {
+ dump_show(qm, &cqc, sizeof(struct qm_cqc), name);
- ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1);
- if (ret) {
- down_read(&qm->qps_lock);
- if (qm->cqc) {
- cqc_curr = qm->cqc + qp_id;
+ return 0;
+ }
- dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC");
- }
- up_read(&qm->qps_lock);
+ down_read(&qm->qps_lock);
+ if (qm->cqc) {
+ cqc_curr = qm->cqc + qp_id;
- goto free_ctx;
+ dump_show(qm, cqc_curr, sizeof(*cqc_curr), "SOFT CQC");
}
+ up_read(&qm->qps_lock);
- dump_show(qm, cqc, sizeof(*cqc), name);
-
-free_ctx:
- hisi_qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
return 0;
}
static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
{
struct device *dev = &qm->pdev->dev;
- dma_addr_t xeqc_dma;
+ struct qm_aeqc aeqc;
+ struct qm_eqc eqc;
size_t size;
void *xeqc;
int ret;
@@ -233,23 +222,19 @@ static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name)
if (!strcmp(name, "EQC")) {
cmd = QM_MB_CMD_EQC;
size = sizeof(struct qm_eqc);
+ xeqc = &eqc;
} else {
cmd = QM_MB_CMD_AEQC;
size = sizeof(struct qm_aeqc);
+ xeqc = &aeqc;
}
- xeqc = hisi_qm_ctx_alloc(qm, size, &xeqc_dma);
- if (IS_ERR(xeqc))
- return PTR_ERR(xeqc);
-
- ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
+ ret = qm_set_and_get_xqc(qm, cmd, xeqc, 0, 1);
if (ret)
- goto err_free_ctx;
+ return ret;
dump_show(qm, xeqc, size, name);
-err_free_ctx:
- hisi_qm_ctx_free(qm, size, xeqc, &xeqc_dma);
return ret;
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 9a1c61be32..764532a6ca 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -57,6 +57,9 @@ struct hpre_ctx;
#define HPRE_DRV_ECDH_MASK_CAP BIT(2)
#define HPRE_DRV_X25519_MASK_CAP BIT(5)
+static DEFINE_MUTEX(hpre_algs_lock);
+static unsigned int hpre_available_devs;
+
typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
struct hpre_rsa_ctx {
@@ -2202,11 +2205,17 @@ static void hpre_unregister_x25519(struct hisi_qm *qm)
int hpre_algs_register(struct hisi_qm *qm)
{
- int ret;
+ int ret = 0;
+
+ mutex_lock(&hpre_algs_lock);
+ if (hpre_available_devs) {
+ hpre_available_devs++;
+ goto unlock;
+ }
ret = hpre_register_rsa(qm);
if (ret)
- return ret;
+ goto unlock;
ret = hpre_register_dh(qm);
if (ret)
@@ -2220,6 +2229,9 @@ int hpre_algs_register(struct hisi_qm *qm)
if (ret)
goto unreg_ecdh;
+ hpre_available_devs++;
+ mutex_unlock(&hpre_algs_lock);
+
return ret;
unreg_ecdh:
@@ -2228,13 +2240,22 @@ unreg_dh:
hpre_unregister_dh(qm);
unreg_rsa:
hpre_unregister_rsa(qm);
+unlock:
+ mutex_unlock(&hpre_algs_lock);
return ret;
}
void hpre_algs_unregister(struct hisi_qm *qm)
{
+ mutex_lock(&hpre_algs_lock);
+ if (--hpre_available_devs)
+ goto unlock;
+
hpre_unregister_x25519(qm);
hpre_unregister_ecdh(qm);
hpre_unregister_dh(qm);
hpre_unregister_rsa(qm);
+
+unlock:
+ mutex_unlock(&hpre_algs_lock);
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index b97ce0ee71..3255b2a070 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -107,6 +107,7 @@
#define HPRE_VIA_MSI_DSM 1
#define HPRE_SQE_MASK_OFFSET 8
#define HPRE_SQE_MASK_LEN 24
+#define HPRE_CTX_Q_NUM_DEF 1
#define HPRE_DFX_BASE 0x301000
#define HPRE_DFX_COMMON1 0x301400
@@ -1411,10 +1412,11 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
dev_warn(&pdev->dev, "init debugfs fail!\n");
- ret = hisi_qm_alg_register(qm, &hpre_devices);
+ hisi_qm_add_list(qm, &hpre_devices);
+ ret = hisi_qm_alg_register(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);
if (ret < 0) {
pci_err(pdev, "fail to register algs to crypto!\n");
- goto err_with_qm_start;
+ goto err_qm_del_list;
}
if (qm->uacce) {
@@ -1436,9 +1438,10 @@ static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_with_alg_register:
- hisi_qm_alg_unregister(qm, &hpre_devices);
+ hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);
-err_with_qm_start:
+err_qm_del_list:
+ hisi_qm_del_list(qm, &hpre_devices);
hpre_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
@@ -1458,7 +1461,8 @@ static void hpre_remove(struct pci_dev *pdev)
hisi_qm_pm_uninit(qm);
hisi_qm_wait_task_finish(qm, &hpre_devices);
- hisi_qm_alg_unregister(qm, &hpre_devices);
+ hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);
+ hisi_qm_del_list(qm, &hpre_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev, true);
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index e889363ed9..40da95dbab 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -46,7 +46,7 @@
#define QM_QC_PASID_ENABLE_SHIFT 7
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
-#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
+#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1)
/* cqc shift */
#define QM_CQ_HOP_NUM_SHIFT 0
@@ -58,7 +58,7 @@
#define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
#define QM_QC_CQE_SIZE 4
-#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
+#define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1)
/* eqc shift */
#define QM_EQE_AEQE_SIZE (2UL << 12)
@@ -69,6 +69,7 @@
#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
#define QM_AEQE_TYPE_SHIFT 17
+#define QM_AEQE_TYPE_MASK 0xf
#define QM_AEQE_CQN_MASK GENMASK(15, 0)
#define QM_CQ_OVERFLOW 0
#define QM_EQ_OVERFLOW 1
@@ -253,19 +254,6 @@
#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
-#define INIT_QC_COMMON(qc, base, pasid) do { \
- (qc)->head = 0; \
- (qc)->tail = 0; \
- (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
- (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
- (qc)->dw3 = 0; \
- (qc)->w8 = 0; \
- (qc)->rsvd0 = 0; \
- (qc)->pasid = cpu_to_le16(pasid); \
- (qc)->w11 = 0; \
- (qc)->rsvd1 = 0; \
-} while (0)
-
enum vft_type {
SQC_VFT = 0,
CQC_VFT,
@@ -701,6 +689,59 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
}
EXPORT_SYMBOL_GPL(hisi_qm_mb);
+/* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
+int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op)
+{
+ struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ struct qm_mailbox mailbox;
+ dma_addr_t xqc_dma;
+ void *tmp_xqc;
+ size_t size;
+ int ret;
+
+ switch (cmd) {
+ case QM_MB_CMD_SQC:
+ size = sizeof(struct qm_sqc);
+ tmp_xqc = qm->xqc_buf.sqc;
+ xqc_dma = qm->xqc_buf.sqc_dma;
+ break;
+ case QM_MB_CMD_CQC:
+ size = sizeof(struct qm_cqc);
+ tmp_xqc = qm->xqc_buf.cqc;
+ xqc_dma = qm->xqc_buf.cqc_dma;
+ break;
+ case QM_MB_CMD_EQC:
+ size = sizeof(struct qm_eqc);
+ tmp_xqc = qm->xqc_buf.eqc;
+ xqc_dma = qm->xqc_buf.eqc_dma;
+ break;
+ case QM_MB_CMD_AEQC:
+ size = sizeof(struct qm_aeqc);
+ tmp_xqc = qm->xqc_buf.aeqc;
+ xqc_dma = qm->xqc_buf.aeqc_dma;
+ break;
+ }
+
+ /* Setting xqc will fail if master OOO is blocked. */
+ if (qm_check_dev_error(pf_qm)) {
+ dev_err(&qm->pdev->dev, "failed to send mailbox since qm is stop!\n");
+ return -EIO;
+ }
+
+ mutex_lock(&qm->mailbox_lock);
+ if (!op)
+ memcpy(tmp_xqc, xqc, size);
+
+ qm_mb_pre_init(&mailbox, cmd, xqc_dma, qp_id, op);
+ ret = qm_mb_nolock(qm, &mailbox);
+ if (!ret && op)
+ memcpy(xqc, tmp_xqc, size);
+
+ mutex_unlock(&qm->mailbox_lock);
+
+ return ret;
+}
+
static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
{
u64 doorbell;
@@ -1062,7 +1103,8 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
- type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
+ type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) &
+ QM_AEQE_TYPE_MASK;
qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
switch (type) {
@@ -1346,45 +1388,6 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
return 0;
}
-void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
- dma_addr_t *dma_addr)
-{
- struct device *dev = &qm->pdev->dev;
- void *ctx_addr;
-
- ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
- if (!ctx_addr)
- return ERR_PTR(-ENOMEM);
-
- *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, *dma_addr)) {
- dev_err(dev, "DMA mapping error!\n");
- kfree(ctx_addr);
- return ERR_PTR(-ENOMEM);
- }
-
- return ctx_addr;
-}
-
-void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
- const void *ctx_addr, dma_addr_t *dma_addr)
-{
- struct device *dev = &qm->pdev->dev;
-
- dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
- kfree(ctx_addr);
-}
-
-static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
-{
- return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
-}
-
-static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
-{
- return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
-}
-
static void qm_hw_error_init_v1(struct hisi_qm *qm)
{
writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
@@ -1977,84 +1980,51 @@ static void hisi_qm_release_qp(struct hisi_qp *qp)
static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
{
struct hisi_qm *qm = qp->qm;
- struct device *dev = &qm->pdev->dev;
enum qm_hw_ver ver = qm->ver;
- struct qm_sqc *sqc;
- dma_addr_t sqc_dma;
- int ret;
-
- sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
- if (!sqc)
- return -ENOMEM;
+ struct qm_sqc sqc = {0};
- INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
if (ver == QM_HW_V1) {
- sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
- sqc->w8 = cpu_to_le16(qp->sq_depth - 1);
+ sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
+ sqc.w8 = cpu_to_le16(qp->sq_depth - 1);
} else {
- sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
- sqc->w8 = 0; /* rand_qc */
+ sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth));
+ sqc.w8 = 0; /* rand_qc */
}
- sqc->cq_num = cpu_to_le16(qp_id);
- sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
+ sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
+ sqc.base_l = cpu_to_le32(lower_32_bits(qp->sqe_dma));
+ sqc.base_h = cpu_to_le32(upper_32_bits(qp->sqe_dma));
+ sqc.cq_num = cpu_to_le16(qp_id);
+ sqc.pasid = cpu_to_le16(pasid);
if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
- sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
- QM_QC_PASID_ENABLE_SHIFT);
+ sqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
+ QM_QC_PASID_ENABLE_SHIFT);
- sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, sqc_dma)) {
- kfree(sqc);
- return -ENOMEM;
- }
-
- ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
- dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
- kfree(sqc);
-
- return ret;
+ return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0);
}
static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
{
struct hisi_qm *qm = qp->qm;
- struct device *dev = &qm->pdev->dev;
enum qm_hw_ver ver = qm->ver;
- struct qm_cqc *cqc;
- dma_addr_t cqc_dma;
- int ret;
+ struct qm_cqc cqc = {0};
- cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
- if (!cqc)
- return -ENOMEM;
-
- INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
if (ver == QM_HW_V1) {
- cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
- QM_QC_CQE_SIZE));
- cqc->w8 = cpu_to_le16(qp->cq_depth - 1);
+ cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE));
+ cqc.w8 = cpu_to_le16(qp->cq_depth - 1);
} else {
- cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
- cqc->w8 = 0; /* rand_qc */
+ cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth));
+ cqc.w8 = 0; /* rand_qc */
}
- cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
+ cqc.dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
+ cqc.base_l = cpu_to_le32(lower_32_bits(qp->cqe_dma));
+ cqc.base_h = cpu_to_le32(upper_32_bits(qp->cqe_dma));
+ cqc.pasid = cpu_to_le16(pasid);
if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
- cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
-
- cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, cqc_dma)) {
- kfree(cqc);
- return -ENOMEM;
- }
+ cqc.w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
- ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
- dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
- kfree(cqc);
-
- return ret;
+ return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0);
}
static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
@@ -2144,14 +2114,11 @@ static void qp_stop_fail_cb(struct hisi_qp *qp)
*/
static int qm_drain_qp(struct hisi_qp *qp)
{
- size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
struct hisi_qm *qm = qp->qm;
struct device *dev = &qm->pdev->dev;
- struct qm_sqc *sqc;
- struct qm_cqc *cqc;
- dma_addr_t dma_addr;
- int ret = 0, i = 0;
- void *addr;
+ struct qm_sqc sqc;
+ struct qm_cqc cqc;
+ int ret, i = 0;
/* No need to judge if master OOO is blocked. */
if (qm_check_dev_error(qm))
@@ -2165,44 +2132,32 @@ static int qm_drain_qp(struct hisi_qp *qp)
return ret;
}
- addr = hisi_qm_ctx_alloc(qm, size, &dma_addr);
- if (IS_ERR(addr)) {
- dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
- return -ENOMEM;
- }
-
while (++i) {
- ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
+ ret = qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp->qp_id, 1);
if (ret) {
dev_err_ratelimited(dev, "Failed to dump sqc!\n");
- break;
+ return ret;
}
- sqc = addr;
- ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
- qp->qp_id);
+ ret = qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp->qp_id, 1);
if (ret) {
dev_err_ratelimited(dev, "Failed to dump cqc!\n");
- break;
+ return ret;
}
- cqc = addr + sizeof(struct qm_sqc);
- if ((sqc->tail == cqc->tail) &&
+ if ((sqc.tail == cqc.tail) &&
(QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
break;
if (i == MAX_WAIT_COUNTS) {
dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
- ret = -EBUSY;
- break;
+ return -EBUSY;
}
usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
}
- hisi_qm_ctx_free(qm, size, addr, &dma_addr);
-
- return ret;
+ return 0;
}
static int qm_stop_qp_nolock(struct hisi_qp *qp)
@@ -2914,11 +2869,20 @@ static void hisi_qm_unint_work(struct hisi_qm *qm)
destroy_workqueue(qm->wq);
}
+static void hisi_qm_free_rsv_buf(struct hisi_qm *qm)
+{
+ struct qm_dma *xqc_dma = &qm->xqc_buf.qcdma;
+ struct device *dev = &qm->pdev->dev;
+
+ dma_free_coherent(dev, xqc_dma->size, xqc_dma->va, xqc_dma->dma);
+}
+
static void hisi_qm_memory_uninit(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
hisi_qp_memory_uninit(qm, qm->qp_num);
+ hisi_qm_free_rsv_buf(qm);
if (qm->qdma.va) {
hisi_qm_cache_wb(qm);
dma_free_coherent(dev, qm->qdma.size,
@@ -3040,62 +3004,26 @@ static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
static int qm_eq_ctx_cfg(struct hisi_qm *qm)
{
- struct device *dev = &qm->pdev->dev;
- struct qm_eqc *eqc;
- dma_addr_t eqc_dma;
- int ret;
-
- eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
- if (!eqc)
- return -ENOMEM;
+ struct qm_eqc eqc = {0};
- eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
- eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
+ eqc.base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
+ eqc.base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
if (qm->ver == QM_HW_V1)
- eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
- eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
-
- eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, eqc_dma)) {
- kfree(eqc);
- return -ENOMEM;
- }
+ eqc.dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
+ eqc.dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
- ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
- dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
- kfree(eqc);
-
- return ret;
+ return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0);
}
static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
{
- struct device *dev = &qm->pdev->dev;
- struct qm_aeqc *aeqc;
- dma_addr_t aeqc_dma;
- int ret;
-
- aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
- if (!aeqc)
- return -ENOMEM;
-
- aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
- aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
- aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
+ struct qm_aeqc aeqc = {0};
- aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, aeqc_dma)) {
- kfree(aeqc);
- return -ENOMEM;
- }
+ aeqc.base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
+ aeqc.base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
+ aeqc.dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT));
- ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
- dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
- kfree(aeqc);
-
- return ret;
+ return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0);
}
static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
@@ -4885,63 +4813,48 @@ static void qm_cmd_process(struct work_struct *cmd_process)
}
/**
- * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
+ * hisi_qm_alg_register() - Register alg to crypto.
* @qm: The qm needs add.
* @qm_list: The qm list.
+ * @guard: Guard of qp_num.
*
- * This function adds qm to qm list, and will register algorithm to
- * crypto when the qm list is empty.
+ * Register algorithm to crypto when the function is satisfy guard.
*/
-int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard)
{
struct device *dev = &qm->pdev->dev;
- int flag = 0;
- int ret = 0;
-
- mutex_lock(&qm_list->lock);
- if (list_empty(&qm_list->list))
- flag = 1;
- list_add_tail(&qm->list, &qm_list->list);
- mutex_unlock(&qm_list->lock);
if (qm->ver <= QM_HW_V2 && qm->use_sva) {
dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
return 0;
}
- if (flag) {
- ret = qm_list->register_to_crypto(qm);
- if (ret) {
- mutex_lock(&qm_list->lock);
- list_del(&qm->list);
- mutex_unlock(&qm_list->lock);
- }
+ if (qm->qp_num < guard) {
+ dev_info(dev, "qp_num is less than task need.\n");
+ return 0;
}
- return ret;
+ return qm_list->register_to_crypto(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
/**
- * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
- * qm list.
+ * hisi_qm_alg_unregister() - Unregister alg from crypto.
* @qm: The qm needs delete.
* @qm_list: The qm list.
+ * @guard: Guard of qp_num.
*
- * This function deletes qm from qm list, and will unregister algorithm
- * from crypto when the qm list is empty.
+ * Unregister algorithm from crypto when the last function is satisfy guard.
*/
-void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard)
{
- mutex_lock(&qm_list->lock);
- list_del(&qm->list);
- mutex_unlock(&qm_list->lock);
-
if (qm->ver <= QM_HW_V2 && qm->use_sva)
return;
- if (list_empty(&qm_list->list))
- qm_list->unregister_from_crypto(qm);
+ if (qm->qp_num < guard)
+ return;
+
+ qm_list->unregister_from_crypto(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
@@ -5364,6 +5277,36 @@ err_init_qp_mem:
return ret;
}
+static int hisi_qm_alloc_rsv_buf(struct hisi_qm *qm)
+{
+ struct qm_rsv_buf *xqc_buf = &qm->xqc_buf;
+ struct qm_dma *xqc_dma = &xqc_buf->qcdma;
+ struct device *dev = &qm->pdev->dev;
+ size_t off = 0;
+
+#define QM_XQC_BUF_INIT(xqc_buf, type) do { \
+ (xqc_buf)->type = ((xqc_buf)->qcdma.va + (off)); \
+ (xqc_buf)->type##_dma = (xqc_buf)->qcdma.dma + (off); \
+ off += QMC_ALIGN(sizeof(struct qm_##type)); \
+} while (0)
+
+ xqc_dma->size = QMC_ALIGN(sizeof(struct qm_eqc)) +
+ QMC_ALIGN(sizeof(struct qm_aeqc)) +
+ QMC_ALIGN(sizeof(struct qm_sqc)) +
+ QMC_ALIGN(sizeof(struct qm_cqc));
+ xqc_dma->va = dma_alloc_coherent(dev, xqc_dma->size,
+ &xqc_dma->dma, GFP_KERNEL);
+ if (!xqc_dma->va)
+ return -ENOMEM;
+
+ QM_XQC_BUF_INIT(xqc_buf, eqc);
+ QM_XQC_BUF_INIT(xqc_buf, aeqc);
+ QM_XQC_BUF_INIT(xqc_buf, sqc);
+ QM_XQC_BUF_INIT(xqc_buf, cqc);
+
+ return 0;
+}
+
static int hisi_qm_memory_init(struct hisi_qm *qm)
{
struct device *dev = &qm->pdev->dev;
@@ -5405,13 +5348,19 @@ static int hisi_qm_memory_init(struct hisi_qm *qm)
QM_INIT_BUF(qm, sqc, qm->qp_num);
QM_INIT_BUF(qm, cqc, qm->qp_num);
+ ret = hisi_qm_alloc_rsv_buf(qm);
+ if (ret)
+ goto err_free_qdma;
+
ret = hisi_qp_alloc_memory(qm);
if (ret)
- goto err_alloc_qp_array;
+ goto err_free_reserve_buf;
return 0;
-err_alloc_qp_array:
+err_free_reserve_buf:
+ hisi_qm_free_rsv_buf(qm);
+err_free_qdma:
dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
err_destroy_idr:
idr_destroy(&qm->qp_idr);
diff --git a/drivers/crypto/hisilicon/qm_common.h b/drivers/crypto/hisilicon/qm_common.h
index 8e36aa9c68..7b0b15c83e 100644
--- a/drivers/crypto/hisilicon/qm_common.h
+++ b/drivers/crypto/hisilicon/qm_common.h
@@ -76,10 +76,7 @@ static const char * const qm_s[] = {
"init", "start", "close", "stop",
};
-void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
- dma_addr_t *dma_addr);
-void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
- const void *ctx_addr, dma_addr_t *dma_addr);
+int qm_set_and_get_xqc(struct hisi_qm *qm, u8 cmd, void *xqc, u32 qp_id, bool op);
void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm);
void hisi_qm_set_algqos_init(struct hisi_qm *qm);
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index e1e08993de..afdddf87cc 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -1271,7 +1271,7 @@ queues_unconfig:
return ret;
}
-static int sec_remove(struct platform_device *pdev)
+static void sec_remove(struct platform_device *pdev)
{
struct sec_dev_info *info = platform_get_drvdata(pdev);
int i;
@@ -1287,8 +1287,6 @@ static int sec_remove(struct platform_device *pdev)
}
sec_base_exit(info);
-
- return 0;
}
static const __maybe_unused struct of_device_id sec_match[] = {
@@ -1306,7 +1304,7 @@ MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
static struct platform_driver sec_driver = {
.probe = sec_probe,
- .remove = sec_remove,
+ .remove_new = sec_remove,
.driver = {
.name = "hisi_sec_platform_driver",
.of_match_table = sec_match,
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index c3a630cb27..ba7f305d43 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -104,6 +104,9 @@
#define IV_CTR_INIT 0x1
#define IV_BYTE_OFFSET 0x8
+static DEFINE_MUTEX(sec_algs_lock);
+static unsigned int sec_available_devs;
+
struct sec_skcipher {
u64 alg_msk;
struct skcipher_alg alg;
@@ -1011,6 +1014,7 @@ static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
ret = sec_aead_mac_init(a_req);
if (unlikely(ret)) {
dev_err(dev, "fail to init mac data for ICV!\n");
+ hisi_acc_sg_buf_unmap(dev, src, req->in);
return ret;
}
}
@@ -2549,15 +2553,29 @@ int sec_register_to_crypto(struct hisi_qm *qm)
alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
SEC_DRV_ALG_BITMAP_LOW_IDX);
+ mutex_lock(&sec_algs_lock);
+ if (sec_available_devs) {
+ sec_available_devs++;
+ goto unlock;
+ }
ret = sec_register_skcipher(alg_mask);
if (ret)
- return ret;
+ goto unlock;
ret = sec_register_aead(alg_mask);
if (ret)
- sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
+ goto unreg_skcipher;
+
+ sec_available_devs++;
+ mutex_unlock(&sec_algs_lock);
+ return 0;
+
+unreg_skcipher:
+ sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
+unlock:
+ mutex_unlock(&sec_algs_lock);
return ret;
}
@@ -2568,6 +2586,13 @@ void sec_unregister_from_crypto(struct hisi_qm *qm)
alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
SEC_DRV_ALG_BITMAP_LOW_IDX);
+ mutex_lock(&sec_algs_lock);
+ if (--sec_available_devs)
+ goto unlock;
+
sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
+
+unlock:
+ mutex_unlock(&sec_algs_lock);
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index bf02a6b2ee..878d94ab5d 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -1238,15 +1238,11 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
pci_warn(pdev, "Failed to init debugfs!\n");
- if (qm->qp_num >= ctx_q_num) {
- ret = hisi_qm_alg_register(qm, &sec_devices);
- if (ret < 0) {
- pr_err("Failed to register driver to crypto.\n");
- goto err_qm_stop;
- }
- } else {
- pci_warn(qm->pdev,
- "Failed to use kernel mode, qp not enough!\n");
+ hisi_qm_add_list(qm, &sec_devices);
+ ret = hisi_qm_alg_register(qm, &sec_devices, ctx_q_num);
+ if (ret < 0) {
+ pr_err("Failed to register driver to crypto.\n");
+ goto err_qm_del_list;
}
if (qm->uacce) {
@@ -1268,9 +1264,9 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_alg_unregister:
- if (qm->qp_num >= ctx_q_num)
- hisi_qm_alg_unregister(qm, &sec_devices);
-err_qm_stop:
+ hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num);
+err_qm_del_list:
+ hisi_qm_del_list(qm, &sec_devices);
sec_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
err_probe_uninit:
@@ -1287,8 +1283,8 @@ static void sec_remove(struct pci_dev *pdev)
hisi_qm_pm_uninit(qm);
hisi_qm_wait_task_finish(qm, &sec_devices);
- if (qm->qp_num >= ctx_q_num)
- hisi_qm_alg_unregister(qm, &sec_devices);
+ hisi_qm_alg_unregister(qm, &sec_devices, ctx_q_num);
+ hisi_qm_del_list(qm, &sec_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev, true);
diff --git a/drivers/crypto/hisilicon/trng/trng.c b/drivers/crypto/hisilicon/trng/trng.c
index 97e500db0a..451b167bcc 100644
--- a/drivers/crypto/hisilicon/trng/trng.c
+++ b/drivers/crypto/hisilicon/trng/trng.c
@@ -303,7 +303,7 @@ err_remove_from_list:
return ret;
}
-static int hisi_trng_remove(struct platform_device *pdev)
+static void hisi_trng_remove(struct platform_device *pdev)
{
struct hisi_trng *trng = platform_get_drvdata(pdev);
@@ -314,8 +314,6 @@ static int hisi_trng_remove(struct platform_device *pdev)
if (trng->ver != HISI_TRNG_VER_V1 &&
atomic_dec_return(&trng_active_devs) == 0)
crypto_unregister_rng(&hisi_trng_alg);
-
- return 0;
}
static const struct acpi_device_id hisi_trng_acpi_match[] = {
@@ -326,7 +324,7 @@ MODULE_DEVICE_TABLE(acpi, hisi_trng_acpi_match);
static struct platform_driver hisi_trng_driver = {
.probe = hisi_trng_probe,
- .remove = hisi_trng_remove,
+ .remove_new = hisi_trng_remove,
.driver = {
.name = "hisi-trng-v2",
.acpi_match_table = ACPI_PTR(hisi_trng_acpi_match),
diff --git a/drivers/crypto/hisilicon/zip/zip_crypto.c b/drivers/crypto/hisilicon/zip/zip_crypto.c
index 6608971d10..c650c741a1 100644
--- a/drivers/crypto/hisilicon/zip/zip_crypto.c
+++ b/drivers/crypto/hisilicon/zip/zip_crypto.c
@@ -16,36 +16,17 @@
#define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
/* hisi_zip_sqe dw9 */
#define HZIP_REQ_TYPE_M GENMASK(7, 0)
-#define HZIP_ALG_TYPE_ZLIB 0x02
-#define HZIP_ALG_TYPE_GZIP 0x03
+#define HZIP_ALG_TYPE_DEFLATE 0x01
#define HZIP_BUF_TYPE_M GENMASK(11, 8)
-#define HZIP_PBUFFER 0x0
#define HZIP_SGL 0x1
-#define HZIP_ZLIB_HEAD_SIZE 2
-#define HZIP_GZIP_HEAD_SIZE 10
-
-#define GZIP_HEAD_FHCRC_BIT BIT(1)
-#define GZIP_HEAD_FEXTRA_BIT BIT(2)
-#define GZIP_HEAD_FNAME_BIT BIT(3)
-#define GZIP_HEAD_FCOMMENT_BIT BIT(4)
-
-#define GZIP_HEAD_FLG_SHIFT 3
-#define GZIP_HEAD_FEXTRA_SHIFT 10
-#define GZIP_HEAD_FEXTRA_XLEN 2UL
-#define GZIP_HEAD_FHCRC_SIZE 2
-
-#define HZIP_GZIP_HEAD_BUF 256
#define HZIP_ALG_PRIORITY 300
#define HZIP_SGL_SGE_NR 10
-#define HZIP_ALG_ZLIB GENMASK(1, 0)
-#define HZIP_ALG_GZIP GENMASK(3, 2)
+#define HZIP_ALG_DEFLATE GENMASK(5, 4)
-static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
-static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {
- 0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03
-};
+static DEFINE_MUTEX(zip_algs_lock);
+static unsigned int zip_available_devs;
enum hisi_zip_alg_type {
HZIP_ALG_TYPE_COMP = 0,
@@ -59,21 +40,10 @@ enum {
};
#define COMP_NAME_TO_TYPE(alg_name) \
- (!strcmp((alg_name), "zlib-deflate") ? HZIP_ALG_TYPE_ZLIB : \
- !strcmp((alg_name), "gzip") ? HZIP_ALG_TYPE_GZIP : 0) \
-
-#define TO_HEAD_SIZE(req_type) \
- (((req_type) == HZIP_ALG_TYPE_ZLIB) ? sizeof(zlib_head) : \
- ((req_type) == HZIP_ALG_TYPE_GZIP) ? sizeof(gzip_head) : 0) \
-
-#define TO_HEAD(req_type) \
- (((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \
- ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : NULL) \
+ (!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : 0)
struct hisi_zip_req {
struct acomp_req *req;
- u32 sskip;
- u32 dskip;
struct hisi_acc_hw_sgl *hw_src;
struct hisi_acc_hw_sgl *hw_dst;
dma_addr_t dma_src;
@@ -138,85 +108,8 @@ static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
-static u32 get_extra_field_size(const u8 *start)
-{
- return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
-}
-
-static u32 get_name_field_size(const u8 *start)
-{
- return strlen(start) + 1;
-}
-
-static u32 get_comment_field_size(const u8 *start)
-{
- return strlen(start) + 1;
-}
-
-static u32 __get_gzip_head_size(const u8 *src)
-{
- u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
- u32 size = GZIP_HEAD_FEXTRA_SHIFT;
-
- if (head_flg & GZIP_HEAD_FEXTRA_BIT)
- size += get_extra_field_size(src + size);
- if (head_flg & GZIP_HEAD_FNAME_BIT)
- size += get_name_field_size(src + size);
- if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
- size += get_comment_field_size(src + size);
- if (head_flg & GZIP_HEAD_FHCRC_BIT)
- size += GZIP_HEAD_FHCRC_SIZE;
-
- return size;
-}
-
-static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
-{
- char buf[HZIP_GZIP_HEAD_BUF];
-
- sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
-
- return __get_gzip_head_size(buf);
-}
-
-static int add_comp_head(struct scatterlist *dst, u8 req_type)
-{
- int head_size = TO_HEAD_SIZE(req_type);
- const u8 *head = TO_HEAD(req_type);
- int ret;
-
- ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
- if (unlikely(ret != head_size)) {
- pr_err("the head size of buffer is wrong (%d)!\n", ret);
- return -ENOMEM;
- }
-
- return head_size;
-}
-
-static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
-{
- if (unlikely(!acomp_req->src || !acomp_req->slen))
- return -EINVAL;
-
- if (unlikely(req_type == HZIP_ALG_TYPE_GZIP &&
- acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
- return -EINVAL;
-
- switch (req_type) {
- case HZIP_ALG_TYPE_ZLIB:
- return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
- case HZIP_ALG_TYPE_GZIP:
- return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
- default:
- pr_err("request type does not support!\n");
- return -EINVAL;
- }
-}
-
-static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
- struct hisi_zip_qp_ctx *qp_ctx,
- size_t head_size, bool is_comp)
+static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
+ struct acomp_req *req)
{
struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
struct hisi_zip_req *q = req_q->q;
@@ -239,14 +132,6 @@ static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
req_cache->req_id = req_id;
req_cache->req = req;
- if (is_comp) {
- req_cache->sskip = 0;
- req_cache->dskip = head_size;
- } else {
- req_cache->sskip = head_size;
- req_cache->dskip = 0;
- }
-
return req_cache;
}
@@ -272,10 +157,8 @@ static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req
{
struct acomp_req *a_req = req->req;
- sqe->input_data_length = a_req->slen - req->sskip;
- sqe->dest_avail_out = a_req->dlen - req->dskip;
- sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, req->sskip);
- sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, req->dskip);
+ sqe->input_data_length = a_req->slen;
+ sqe->dest_avail_out = a_req->dlen;
}
static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
@@ -296,12 +179,7 @@ static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
sqe->dw9 = val;
}
-static void hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
-{
- sqe->dw13 = req->req_id;
-}
-
-static void hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
+static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
{
sqe->dw26 = req->req_id;
}
@@ -330,8 +208,8 @@ static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe
ops->fill_sqe_type(sqe, ops->sqe_type);
}
-static int hisi_zip_do_work(struct hisi_zip_req *req,
- struct hisi_zip_qp_ctx *qp_ctx)
+static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
+ struct hisi_zip_req *req)
{
struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
@@ -383,12 +261,7 @@ err_unmap_input:
return ret;
}
-static u32 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe)
-{
- return sqe->dw13;
-}
-
-static u32 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe)
+static u32 hisi_zip_get_tag(struct hisi_zip_sqe *sqe)
{
return sqe->dw26;
}
@@ -414,8 +287,8 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
u32 tag = ops->get_tag(sqe);
struct hisi_zip_req *req = req_q->q + tag;
struct acomp_req *acomp_req = req->req;
- u32 status, dlen, head_size;
int err = 0;
+ u32 status;
atomic64_inc(&dfx->recv_cnt);
status = ops->get_status(sqe);
@@ -427,13 +300,10 @@ static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
err = -EIO;
}
- dlen = ops->get_dstlen(sqe);
-
hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
- head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
- acomp_req->dlen = dlen + head_size;
+ acomp_req->dlen = ops->get_dstlen(sqe);
if (acomp_req->base.complete)
acomp_request_complete(acomp_req, err);
@@ -447,22 +317,13 @@ static int hisi_zip_acompress(struct acomp_req *acomp_req)
struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
struct device *dev = &qp_ctx->qp->qm->pdev->dev;
struct hisi_zip_req *req;
- int head_size;
int ret;
- /* let's output compression head now */
- head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
- if (unlikely(head_size < 0)) {
- dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
- head_size);
- return head_size;
- }
-
- req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
+ req = hisi_zip_create_req(qp_ctx, acomp_req);
if (IS_ERR(req))
return PTR_ERR(req);
- ret = hisi_zip_do_work(req, qp_ctx);
+ ret = hisi_zip_do_work(qp_ctx, req);
if (unlikely(ret != -EINPROGRESS)) {
dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
hisi_zip_remove_req(qp_ctx, req);
@@ -477,20 +338,13 @@ static int hisi_zip_adecompress(struct acomp_req *acomp_req)
struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
struct device *dev = &qp_ctx->qp->qm->pdev->dev;
struct hisi_zip_req *req;
- int head_size, ret;
-
- head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
- if (unlikely(head_size < 0)) {
- dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
- head_size);
- return head_size;
- }
+ int ret;
- req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
+ req = hisi_zip_create_req(qp_ctx, acomp_req);
if (IS_ERR(req))
return PTR_ERR(req);
- ret = hisi_zip_do_work(req, qp_ctx);
+ ret = hisi_zip_do_work(qp_ctx, req);
if (unlikely(ret != -EINPROGRESS)) {
dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
ret);
@@ -527,28 +381,15 @@ static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
hisi_qm_free_qps(&qp_ctx->qp, 1);
}
-static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
- .sqe_type = 0,
- .fill_addr = hisi_zip_fill_addr,
- .fill_buf_size = hisi_zip_fill_buf_size,
- .fill_buf_type = hisi_zip_fill_buf_type,
- .fill_req_type = hisi_zip_fill_req_type,
- .fill_tag = hisi_zip_fill_tag_v1,
- .fill_sqe_type = hisi_zip_fill_sqe_type,
- .get_tag = hisi_zip_get_tag_v1,
- .get_status = hisi_zip_get_status,
- .get_dstlen = hisi_zip_get_dstlen,
-};
-
-static const struct hisi_zip_sqe_ops hisi_zip_ops_v2 = {
+static const struct hisi_zip_sqe_ops hisi_zip_ops = {
.sqe_type = 0x3,
.fill_addr = hisi_zip_fill_addr,
.fill_buf_size = hisi_zip_fill_buf_size,
.fill_buf_type = hisi_zip_fill_buf_type,
.fill_req_type = hisi_zip_fill_req_type,
- .fill_tag = hisi_zip_fill_tag_v2,
+ .fill_tag = hisi_zip_fill_tag,
.fill_sqe_type = hisi_zip_fill_sqe_type,
- .get_tag = hisi_zip_get_tag_v2,
+ .get_tag = hisi_zip_get_tag,
.get_status = hisi_zip_get_status,
.get_dstlen = hisi_zip_get_dstlen,
};
@@ -584,10 +425,7 @@ static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int
qp_ctx->zip_dev = hisi_zip;
}
- if (hisi_zip->qm.ver < QM_HW_V3)
- hisi_zip_ctx->ops = &hisi_zip_ops_v1;
- else
- hisi_zip_ctx->ops = &hisi_zip_ops_v2;
+ hisi_zip_ctx->ops = &hisi_zip_ops;
return 0;
}
@@ -745,95 +583,67 @@ static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
hisi_zip_ctx_exit(ctx);
}
-static struct acomp_alg hisi_zip_acomp_zlib = {
- .init = hisi_zip_acomp_init,
- .exit = hisi_zip_acomp_exit,
- .compress = hisi_zip_acompress,
- .decompress = hisi_zip_adecompress,
- .base = {
- .cra_name = "zlib-deflate",
- .cra_driver_name = "hisi-zlib-acomp",
- .cra_module = THIS_MODULE,
- .cra_priority = HZIP_ALG_PRIORITY,
- .cra_ctxsize = sizeof(struct hisi_zip_ctx),
- }
-};
-
-static int hisi_zip_register_zlib(struct hisi_qm *qm)
-{
- int ret;
-
- if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
- return 0;
-
- ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret);
-
- return ret;
-}
-
-static void hisi_zip_unregister_zlib(struct hisi_qm *qm)
-{
- if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
- return;
-
- crypto_unregister_acomp(&hisi_zip_acomp_zlib);
-}
-
-static struct acomp_alg hisi_zip_acomp_gzip = {
+static struct acomp_alg hisi_zip_acomp_deflate = {
.init = hisi_zip_acomp_init,
.exit = hisi_zip_acomp_exit,
.compress = hisi_zip_acompress,
.decompress = hisi_zip_adecompress,
.base = {
- .cra_name = "gzip",
- .cra_driver_name = "hisi-gzip-acomp",
+ .cra_name = "deflate",
+ .cra_driver_name = "hisi-deflate-acomp",
.cra_module = THIS_MODULE,
- .cra_priority = HZIP_ALG_PRIORITY,
+ .cra_priority = HZIP_ALG_PRIORITY,
.cra_ctxsize = sizeof(struct hisi_zip_ctx),
}
};
-static int hisi_zip_register_gzip(struct hisi_qm *qm)
+static int hisi_zip_register_deflate(struct hisi_qm *qm)
{
int ret;
- if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
return 0;
- ret = crypto_register_acomp(&hisi_zip_acomp_gzip);
+ ret = crypto_register_acomp(&hisi_zip_acomp_deflate);
if (ret)
- dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret);
+ dev_err(&qm->pdev->dev, "failed to register to deflate (%d)!\n", ret);
return ret;
}
-static void hisi_zip_unregister_gzip(struct hisi_qm *qm)
+static void hisi_zip_unregister_deflate(struct hisi_qm *qm)
{
- if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
+ if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
return;
- crypto_unregister_acomp(&hisi_zip_acomp_gzip);
+ crypto_unregister_acomp(&hisi_zip_acomp_deflate);
}
int hisi_zip_register_to_crypto(struct hisi_qm *qm)
{
int ret = 0;
- ret = hisi_zip_register_zlib(qm);
- if (ret)
- return ret;
+ mutex_lock(&zip_algs_lock);
+ if (zip_available_devs++)
+ goto unlock;
- ret = hisi_zip_register_gzip(qm);
+ ret = hisi_zip_register_deflate(qm);
if (ret)
- hisi_zip_unregister_zlib(qm);
+ zip_available_devs--;
+unlock:
+ mutex_unlock(&zip_algs_lock);
return ret;
}
void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
{
- hisi_zip_unregister_zlib(qm);
- hisi_zip_unregister_gzip(qm);
+ mutex_lock(&zip_algs_lock);
+ if (--zip_available_devs)
+ goto unlock;
+
+ hisi_zip_unregister_deflate(qm);
+
+unlock:
+ mutex_unlock(&zip_algs_lock);
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index cd7ecb2180..403b074688 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -66,6 +66,7 @@
#define HZIP_SQE_SIZE 128
#define HZIP_PF_DEF_Q_NUM 64
#define HZIP_PF_DEF_Q_BASE 0
+#define HZIP_CTX_Q_NUM_DEF 2
#define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000
#define HZIP_SOFT_CTRL_CNT_CLR_CE_BIT BIT(0)
@@ -238,8 +239,8 @@ static struct hisi_qm_cap_info zip_basic_cap_info[] = {
{ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3},
{ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C},
{ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3},
- {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0xF, 0xF, 0xF},
- {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0xFF},
+ {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x0, 0x30},
+ {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0x3F},
{ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
{ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5},
{ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A},
@@ -1310,10 +1311,11 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
pci_err(pdev, "failed to init debugfs (%d)!\n", ret);
- ret = hisi_qm_alg_register(qm, &zip_devices);
+ hisi_qm_add_list(qm, &zip_devices);
+ ret = hisi_qm_alg_register(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF);
if (ret < 0) {
pci_err(pdev, "failed to register driver to crypto!\n");
- goto err_qm_stop;
+ goto err_qm_del_list;
}
if (qm->uacce) {
@@ -1335,9 +1337,10 @@ static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_qm_alg_unregister:
- hisi_qm_alg_unregister(qm, &zip_devices);
+ hisi_qm_alg_unregister(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF);
-err_qm_stop:
+err_qm_del_list:
+ hisi_qm_del_list(qm, &zip_devices);
hisi_zip_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);
@@ -1357,7 +1360,8 @@ static void hisi_zip_remove(struct pci_dev *pdev)
hisi_qm_pm_uninit(qm);
hisi_qm_wait_task_finish(qm, &zip_devices);
- hisi_qm_alg_unregister(qm, &zip_devices);
+ hisi_qm_alg_unregister(qm, &zip_devices, HZIP_CTX_Q_NUM_DEF);
+ hisi_qm_del_list(qm, &zip_devices);
if (qm->fun_type == QM_HW_PF && qm->vfs_num)
hisi_qm_sriov_disable(pdev, true);
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 4506369385..d269036bda 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -1043,7 +1043,7 @@ res_err:
return err;
}
-static int img_hash_remove(struct platform_device *pdev)
+static void img_hash_remove(struct platform_device *pdev)
{
struct img_hash_dev *hdev;
@@ -1061,8 +1061,6 @@ static int img_hash_remove(struct platform_device *pdev)
clk_disable_unprepare(hdev->hash_clk);
clk_disable_unprepare(hdev->sys_clk);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1101,7 +1099,7 @@ static const struct dev_pm_ops img_hash_pm_ops = {
static struct platform_driver img_hash_driver = {
.probe = img_hash_probe,
- .remove = img_hash_remove,
+ .remove_new = img_hash_remove,
.driver = {
.name = "img-hash-accelerator",
.pm = &img_hash_pm_ops,
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 9ff02b5abc..76da14af74 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1801,7 +1801,7 @@ err_core_clk:
return ret;
}
-static int safexcel_remove(struct platform_device *pdev)
+static void safexcel_remove(struct platform_device *pdev)
{
struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
int i;
@@ -1816,8 +1816,6 @@ static int safexcel_remove(struct platform_device *pdev)
irq_set_affinity_hint(priv->ring[i].irq, NULL);
destroy_workqueue(priv->ring[i].workqueue);
}
-
- return 0;
}
static const struct safexcel_priv_data eip97ies_mrvl_data = {
@@ -1874,7 +1872,7 @@ MODULE_DEVICE_TABLE(of, safexcel_of_match_table);
static struct platform_driver crypto_safexcel = {
.probe = safexcel_probe,
- .remove = safexcel_remove,
+ .remove_new = safexcel_remove,
.driver = {
.name = "crypto-safexcel",
.of_match_table = safexcel_of_match_table,
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
index 4a18095ae5..f8a77bff88 100644
--- a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
+++ b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
@@ -1563,7 +1563,7 @@ static int ixp_crypto_probe(struct platform_device *_pdev)
return 0;
}
-static int ixp_crypto_remove(struct platform_device *pdev)
+static void ixp_crypto_remove(struct platform_device *pdev)
{
int num = ARRAY_SIZE(ixp4xx_algos);
int i;
@@ -1578,8 +1578,6 @@ static int ixp_crypto_remove(struct platform_device *pdev)
crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
}
release_ixp_crypto(&pdev->dev);
-
- return 0;
}
static const struct of_device_id ixp4xx_crypto_of_match[] = {
{
@@ -1590,7 +1588,7 @@ static const struct of_device_id ixp4xx_crypto_of_match[] = {
static struct platform_driver ixp_crypto_driver = {
.probe = ixp_crypto_probe,
- .remove = ixp_crypto_remove,
+ .remove_new = ixp_crypto_remove,
.driver = {
.name = "ixp4xx_crypto",
.of_match_table = ixp4xx_crypto_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
index 1e2fd9a754..9b2d098e5e 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-aes-core.c
@@ -1562,7 +1562,7 @@ static const struct of_device_id kmb_ocs_aes_of_match[] = {
{}
};
-static int kmb_ocs_aes_remove(struct platform_device *pdev)
+static void kmb_ocs_aes_remove(struct platform_device *pdev)
{
struct ocs_aes_dev *aes_dev;
@@ -1575,8 +1575,6 @@ static int kmb_ocs_aes_remove(struct platform_device *pdev)
spin_unlock(&ocs_aes.lock);
crypto_engine_exit(aes_dev->engine);
-
- return 0;
}
static int kmb_ocs_aes_probe(struct platform_device *pdev)
@@ -1658,7 +1656,7 @@ list_del:
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_aes_driver = {
.probe = kmb_ocs_aes_probe,
- .remove = kmb_ocs_aes_remove,
+ .remove_new = kmb_ocs_aes_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_aes_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
index fb95deed90..5e24f2d8af 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-ecc.c
@@ -964,7 +964,7 @@ list_del:
return rc;
}
-static int kmb_ocs_ecc_remove(struct platform_device *pdev)
+static void kmb_ocs_ecc_remove(struct platform_device *pdev)
{
struct ocs_ecc_dev *ecc_dev;
@@ -978,8 +978,6 @@ static int kmb_ocs_ecc_remove(struct platform_device *pdev)
spin_unlock(&ocs_ecc.lock);
crypto_engine_exit(ecc_dev->engine);
-
- return 0;
}
/* Device tree driver match. */
@@ -993,7 +991,7 @@ static const struct of_device_id kmb_ocs_ecc_of_match[] = {
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_ecc_driver = {
.probe = kmb_ocs_ecc_probe,
- .remove = kmb_ocs_ecc_remove,
+ .remove_new = kmb_ocs_ecc_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_ecc_of_match,
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index daba8ca05d..c2dfca73fe 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -1151,24 +1151,17 @@ static const struct of_device_id kmb_ocs_hcu_of_match[] = {
{}
};
-static int kmb_ocs_hcu_remove(struct platform_device *pdev)
+static void kmb_ocs_hcu_remove(struct platform_device *pdev)
{
- struct ocs_hcu_dev *hcu_dev;
- int rc;
-
- hcu_dev = platform_get_drvdata(pdev);
- if (!hcu_dev)
- return -ENODEV;
+ struct ocs_hcu_dev *hcu_dev = platform_get_drvdata(pdev);
crypto_engine_unregister_ahashes(ocs_hcu_algs, ARRAY_SIZE(ocs_hcu_algs));
- rc = crypto_engine_exit(hcu_dev->engine);
+ crypto_engine_exit(hcu_dev->engine);
spin_lock_bh(&ocs_hcu.lock);
list_del(&hcu_dev->list);
spin_unlock_bh(&ocs_hcu.lock);
-
- return rc;
}
static int kmb_ocs_hcu_probe(struct platform_device *pdev)
@@ -1249,7 +1242,7 @@ list_del:
/* The OCS driver is a platform device. */
static struct platform_driver kmb_ocs_hcu_driver = {
.probe = kmb_ocs_hcu_probe,
- .remove = kmb_ocs_hcu_remove,
+ .remove_new = kmb_ocs_hcu_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = kmb_ocs_hcu_of_match,
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
index a5691ba0b7..0faedb5b2e 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -2,16 +2,18 @@
/* Copyright(c) 2020 - 2021 Intel Corporation */
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
+#include <adf_admin.h>
#include <adf_cfg.h>
+#include <adf_cfg_services.h>
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen4_dc.h>
#include <adf_gen4_hw_data.h>
#include <adf_gen4_pfvf.h>
#include <adf_gen4_pm.h>
+#include "adf_gen4_ras.h"
#include <adf_gen4_timer.h>
#include "adf_4xxx_hw_data.h"
-#include "adf_cfg_services.h"
#include "icp_qat_hw.h"
#define ADF_AE_GROUP_0 GENMASK(3, 0)
@@ -341,6 +343,24 @@ static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
return ADF_4XXX_KPT_COUNTER_FREQ;
}
+static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
+{
+ rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
+ rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
+ rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET;
+ rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET;
+ rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET;
+
+ rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV;
+ rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL;
+ rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION;
+ rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
+ rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
+ rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
+ rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC;
+ rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF;
+}
+
static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
{
struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
@@ -519,6 +539,16 @@ static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
return fw_config[obj_num].ae_mask;
}
+static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
+{
+ dev_err_mask->cppagentcmdpar_mask = ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK;
+ dev_err_mask->parerr_ath_cph_mask = ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK;
+ dev_err_mask->parerr_cpr_xlt_mask = ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK;
+ dev_err_mask->parerr_dcpr_ucs_mask = ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK;
+ dev_err_mask->parerr_pke_mask = ADF_4XXX_PARITYERRORMASK_PKE_MASK;
+ dev_err_mask->ssmfeatren_mask = ADF_4XXX_SSMFEATREN_MASK;
+}
+
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
{
hw_data->dev_class = &adf_4xxx_class;
@@ -582,10 +612,14 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
hw_data->stop_timer = adf_gen4_timer_stop;
hw_data->get_hb_clock = get_heartbeat_clock;
hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
+ hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
+ adf_gen4_set_err_mask(&hw_data->dev_err_mask);
adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
adf_gen4_init_dc_ops(&hw_data->dc_ops);
+ adf_gen4_init_ras_ops(&hw_data->ras_ops);
+ adf_init_rl_data(&hw_data->rl_data);
}
void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
index bb3d95a8fb..33423295e9 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -28,6 +28,23 @@
#define ADF_4XXX_ACCELENGINES_MASK (0x1FF)
#define ADF_4XXX_ADMIN_AE_MASK (0x100)
+#define ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1F
+#define ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK 0xF000F
+#define ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK 0x10001
+#define ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK 0x30007
+#define ADF_4XXX_PARITYERRORMASK_PKE_MASK 0x3F
+
+/*
+ * SSMFEATREN bit mask
+ * BIT(4) - enables parity detection on CPP
+ * BIT(12) - enables the logging of push/pull data errors
+ * in pperr register
+ * BIT(16) - BIT(23) - enable parity detection on SPPs
+ */
+#define ADF_4XXX_SSMFEATREN_MASK \
+ (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | \
+ BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23))
+
#define ADF_4XXX_ETR_MAX_BANKS 64
/* MSIX interrupt */
@@ -65,8 +82,19 @@
#define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin"
#define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin"
+/* RL constants */
+#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV 100
+#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL 102
+#define ADF_4XXX_RL_DCPR_CORRECTION 1
+#define ADF_4XXX_RL_SCANS_PER_SEC 954
+#define ADF_4XXX_RL_MAX_TP_ASYM 173750UL
+#define ADF_4XXX_RL_MAX_TP_SYM 95000UL
+#define ADF_4XXX_RL_MAX_TP_DC 45000UL
+#define ADF_4XXX_RL_SLICE_REF 1000UL
+
/* Clocks frequency */
-#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
+#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ)
+#define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ)
/* qat_4xxx fuse bits are different from old GENs, redefine them */
enum icp_qat_4xxx_slice_mask {
diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
index 90f5c1ca7b..8f483d1197 100644
--- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c
@@ -418,6 +418,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
+ accel_dev->ras_errors.enabled = true;
adf_dbgfs_init(accel_dev);
ret = adf_dev_up(accel_dev, true);
@@ -467,3 +468,4 @@ MODULE_FIRMWARE(ADF_4XXX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
MODULE_SOFTDEP("pre: crypto-intel_qat");
+MODULE_IMPORT_NS(CRYPTO_QAT);
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 9c00c441b6..a882e0ea22 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
+#include <adf_admin.h>
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
index 468c910209..956a4c8560 100644
--- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c
@@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_C3XXX_FW);
MODULE_FIRMWARE(ADF_C3XXX_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_IMPORT_NS(CRYPTO_QAT);
diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
index d5a0ecca9d..a8de9cd09c 100644
--- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c
@@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_IMPORT_NS(CRYPTO_QAT);
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
index 355a781693..48cf3eb7c7 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
+#include <adf_admin.h>
#include <adf_clock.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
index 0186921be9..ad0ca43849 100644
--- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c
@@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_C62X_FW);
MODULE_FIRMWARE(ADF_C62X_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_IMPORT_NS(CRYPTO_QAT);
diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
index c9ae6c0d0d..53b8ddb633 100644
--- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c
@@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_IMPORT_NS(CRYPTO_QAT);
diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile
index 43622c7fca..779a8aa0b8 100644
--- a/drivers/crypto/intel/qat/qat_common/Makefile
+++ b/drivers/crypto/intel/qat/qat_common/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CRYPTO_QAT
intel_qat-objs := adf_cfg.o \
adf_isr.o \
adf_ctl_drv.o \
+ adf_cfg_services.o \
adf_dev_mgr.o \
adf_init.o \
adf_accel_engine.o \
@@ -11,12 +13,14 @@ intel_qat-objs := adf_cfg.o \
adf_admin.o \
adf_hw_arbiter.o \
adf_sysfs.o \
+ adf_sysfs_ras_counters.o \
adf_gen2_hw_data.o \
adf_gen2_config.o \
adf_gen4_hw_data.o \
adf_gen4_pm.o \
adf_gen2_dc.o \
adf_gen4_dc.o \
+ adf_gen4_ras.o \
adf_gen4_timer.o \
adf_clock.o \
qat_crypto.o \
@@ -25,14 +29,20 @@ intel_qat-objs := adf_cfg.o \
qat_algs.o \
qat_asym_algs.o \
qat_algs_send.o \
+ adf_rl.o \
+ adf_rl_admin.o \
+ adf_sysfs_rl.o \
qat_uclo.o \
qat_hal.o \
qat_bl.o
intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \
adf_fw_counters.o \
+ adf_cnv_dbgfs.o \
+ adf_gen4_pm_debugfs.o \
adf_heartbeat.o \
adf_heartbeat_dbgfs.o \
+ adf_pm_dbgfs.o \
adf_dbgfs.o
intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \
diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
index 79d5a1535e..9d5fdd529a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h
@@ -7,7 +7,9 @@
#include <linux/list.h>
#include <linux/io.h>
#include <linux/ratelimit.h>
+#include <linux/types.h>
#include "adf_cfg_common.h"
+#include "adf_rl.h"
#include "adf_pfvf_msg.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
@@ -81,6 +83,19 @@ enum dev_sku_info {
DEV_SKU_UNKNOWN,
};
+enum ras_errors {
+ ADF_RAS_CORR,
+ ADF_RAS_UNCORR,
+ ADF_RAS_FATAL,
+ ADF_RAS_ERRORS,
+};
+
+struct adf_error_counters {
+ atomic_t counter[ADF_RAS_ERRORS];
+ bool sysfs_added;
+ bool enabled;
+};
+
static inline const char *get_sku_info(enum dev_sku_info info)
{
switch (info) {
@@ -152,6 +167,13 @@ struct adf_accel_dev;
struct adf_etr_data;
struct adf_etr_ring_data;
+struct adf_ras_ops {
+ void (*enable_ras_errors)(struct adf_accel_dev *accel_dev);
+ void (*disable_ras_errors)(struct adf_accel_dev *accel_dev);
+ bool (*handle_interrupt)(struct adf_accel_dev *accel_dev,
+ bool *reset_required);
+};
+
struct adf_pfvf_ops {
int (*enable_comms)(struct adf_accel_dev *accel_dev);
u32 (*get_pf2vf_offset)(u32 i);
@@ -169,6 +191,16 @@ struct adf_dc_ops {
void (*build_deflate_ctx)(void *ctx);
};
+struct adf_dev_err_mask {
+ u32 cppagentcmdpar_mask;
+ u32 parerr_ath_cph_mask;
+ u32 parerr_cpr_xlt_mask;
+ u32 parerr_dcpr_ucs_mask;
+ u32 parerr_pke_mask;
+ u32 parerr_wat_wcp_mask;
+ u32 ssmfeatren_mask;
+};
+
struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
u32 (*get_accel_mask)(struct adf_hw_device_data *self);
@@ -215,12 +247,16 @@ struct adf_hw_device_data {
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
struct adf_dc_ops dc_ops;
+ struct adf_ras_ops ras_ops;
+ struct adf_dev_err_mask dev_err_mask;
+ struct adf_rl_hw_data rl_data;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
u32 straps;
u32 accel_capabilities_mask;
u32 extended_dc_capabilities;
+ u16 fw_capabilities;
u32 clock_frequency;
u32 instance_id;
u16 accel_mask;
@@ -263,6 +299,7 @@ struct adf_hw_device_data {
#define GET_SRV_TYPE(accel_dev, idx) \
(((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
& ADF_SRV_TYPE_MASK)
+#define GET_ERR_MASK(accel_dev) (&GET_HW_DATA(accel_dev)->dev_err_mask)
#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
@@ -292,6 +329,23 @@ struct adf_dc_data {
dma_addr_t ovf_buff_p;
};
+struct adf_pm {
+ struct dentry *debugfs_pm_status;
+ bool present;
+ int idle_irq_counters;
+ int throttle_irq_counters;
+ int fw_irq_counters;
+ int host_ack_counter;
+ int host_nack_counter;
+ ssize_t (*print_pm_status)(struct adf_accel_dev *accel_dev,
+ char __user *buf, size_t count, loff_t *pos);
+};
+
+struct adf_sysfs {
+ int ring_num;
+ struct rw_semaphore lock; /* protects access to the fields in this struct */
+};
+
struct adf_accel_dev {
struct adf_etr_data *transport;
struct adf_hw_device_data *hw_device;
@@ -299,17 +353,21 @@ struct adf_accel_dev {
struct adf_fw_loader_data *fw_loader;
struct adf_admin_comms *admin;
struct adf_dc_data *dc_data;
+ struct adf_pm power_management;
struct list_head crypto_list;
struct list_head compression_list;
unsigned long status;
atomic_t ref_count;
struct dentry *debugfs_dir;
struct dentry *fw_cntr_dbgfile;
+ struct dentry *cnv_dbgfile;
struct list_head list;
struct module *owner;
struct adf_accel_pci accel_pci_dev;
struct adf_timer *timer;
struct adf_heartbeat *heartbeat;
+ struct adf_rl *rate_limiting;
+ struct adf_sysfs sysfs;
union {
struct {
/* protects VF2PF interrupts access */
@@ -327,6 +385,7 @@ struct adf_accel_dev {
u8 pf_compat_ver;
} vf;
};
+ struct adf_error_counters ras_errors;
struct mutex state_lock; /* protect state of the device */
bool is_vf;
u32 accel_id;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c
index 194d64d4b9..54b673ec23 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_admin.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c
@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include "adf_accel_devices.h"
+#include "adf_admin.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
#include "adf_heartbeat.h"
@@ -309,6 +310,73 @@ static bool is_dcc_enabled(struct adf_accel_dev *accel_dev)
return !strcmp(services, "dcc");
}
+static int adf_get_fw_capabilities(struct adf_accel_dev *accel_dev, u16 *caps)
+{
+ u32 ae_mask = accel_dev->hw_device->admin_ae_mask;
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+ int ret;
+
+ if (!ae_mask)
+ return 0;
+
+ req.cmd_id = ICP_QAT_FW_CAPABILITIES_GET;
+ ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
+ if (ret)
+ return ret;
+
+ *caps = resp.fw_capabilities;
+
+ return 0;
+}
+
+int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev,
+ struct icp_qat_fw_init_admin_slice_cnt *slices)
+{
+ u32 ae_mask = accel_dev->hw_device->admin_ae_mask;
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+ int ret;
+
+ req.cmd_id = ICP_QAT_FW_RL_INIT;
+
+ ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
+ if (ret)
+ return ret;
+
+ memcpy(slices, &resp.slices, sizeof(*slices));
+
+ return 0;
+}
+
+int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev,
+ struct icp_qat_fw_init_admin_req *req)
+{
+ u32 ae_mask = accel_dev->hw_device->admin_ae_mask;
+ struct icp_qat_fw_init_admin_resp resp = { };
+
+ /*
+ * req struct filled in rl implementation. Used commands
+ * ICP_QAT_FW_RL_ADD for a new SLA
+ * ICP_QAT_FW_RL_UPDATE for update SLA
+ */
+ return adf_send_admin(accel_dev, req, &resp, ae_mask);
+}
+
+int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id,
+ u8 node_type)
+{
+ u32 ae_mask = accel_dev->hw_device->admin_ae_mask;
+ struct icp_qat_fw_init_admin_resp resp = { };
+ struct icp_qat_fw_init_admin_req req = { };
+
+ req.cmd_id = ICP_QAT_FW_RL_REMOVE;
+ req.node_id = node_id;
+ req.node_type = node_type;
+
+ return adf_send_admin(accel_dev, &req, &resp, ae_mask);
+}
+
/**
* adf_send_admin_init() - Function sends init message to FW
* @accel_dev: Pointer to acceleration device.
@@ -319,6 +387,7 @@ static bool is_dcc_enabled(struct adf_accel_dev *accel_dev)
*/
int adf_send_admin_init(struct adf_accel_dev *accel_dev)
{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
u32 dc_capabilities = 0;
int ret;
@@ -339,6 +408,8 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev)
}
accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
+ adf_get_fw_capabilities(accel_dev, &hw_data->fw_capabilities);
+
return adf_init_ae(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_send_admin_init);
@@ -379,6 +450,54 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay)
return adf_send_admin(accel_dev, &req, &resp, ae_mask);
}
+int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr,
+ size_t buff_size)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp;
+ u32 ae_mask = hw_data->admin_ae_mask;
+ int ret;
+
+ /* Query pm info via init/admin cmd */
+ if (!accel_dev->admin) {
+ dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n");
+ return -EFAULT;
+ }
+
+ req.cmd_id = ICP_QAT_FW_PM_INFO;
+ req.init_cfg_sz = buff_size;
+ req.init_cfg_ptr = p_state_addr;
+
+ ret = adf_send_admin(accel_dev, &req, &resp, ae_mask);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to query power-management info\n");
+
+ return ret;
+}
+
+int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt,
+ u16 *latest_err)
+{
+ struct icp_qat_fw_init_admin_req req = { };
+ struct icp_qat_fw_init_admin_resp resp;
+ int ret;
+
+ req.cmd_id = ICP_QAT_FW_CNV_STATS_GET;
+
+ ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp);
+ if (ret)
+ return ret;
+ if (resp.status)
+ return -EPROTONOSUPPORT;
+
+ *err_cnt = resp.error_count;
+ *latest_err = resp.latest_error;
+
+ return ret;
+}
+
int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
{
struct adf_admin_comms *admin;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h
new file mode 100644
index 0000000000..55cbcbc66c
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_ADMIN
+#define ADF_ADMIN
+
+#include "icp_qat_fw_init_admin.h"
+
+struct adf_accel_dev;
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_send_admin_init(struct adf_accel_dev *accel_dev);
+int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps);
+int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
+int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt);
+int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks);
+int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev,
+ struct icp_qat_fw_init_admin_slice_cnt *slices);
+int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev,
+ struct icp_qat_fw_init_admin_req *req);
+int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id,
+ u8 node_type);
+int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp);
+int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size);
+int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
new file mode 100644
index 0000000000..8e13fe9389
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/export.h>
+#include "adf_cfg_services.h"
+#include "adf_cfg_strings.h"
+
+const char *const adf_cfg_services[] = {
+ [SVC_CY] = ADF_CFG_CY,
+ [SVC_CY2] = ADF_CFG_ASYM_SYM,
+ [SVC_DC] = ADF_CFG_DC,
+ [SVC_DCC] = ADF_CFG_DCC,
+ [SVC_SYM] = ADF_CFG_SYM,
+ [SVC_ASYM] = ADF_CFG_ASYM,
+ [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
+ [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
+ [SVC_DC_SYM] = ADF_CFG_DC_SYM,
+ [SVC_SYM_DC] = ADF_CFG_SYM_DC,
+};
+EXPORT_SYMBOL_GPL(adf_cfg_services);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
index b353d40c5c..f78fd697b4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h
@@ -16,19 +16,9 @@ enum adf_services {
SVC_ASYM_DC,
SVC_DC_SYM,
SVC_SYM_DC,
+ SVC_COUNT
};
-static const char *const adf_cfg_services[] = {
- [SVC_CY] = ADF_CFG_CY,
- [SVC_CY2] = ADF_CFG_ASYM_SYM,
- [SVC_DC] = ADF_CFG_DC,
- [SVC_DCC] = ADF_CFG_DCC,
- [SVC_SYM] = ADF_CFG_SYM,
- [SVC_ASYM] = ADF_CFG_ASYM,
- [SVC_DC_ASYM] = ADF_CFG_DC_ASYM,
- [SVC_ASYM_DC] = ADF_CFG_ASYM_DC,
- [SVC_DC_SYM] = ADF_CFG_DC_SYM,
- [SVC_SYM_DC] = ADF_CFG_SYM_DC,
-};
+extern const char *const adf_cfg_services[SVC_COUNT];
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c
index dc0778691e..01e0a389e4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_clock.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/units.h>
#include <asm/errno.h>
+#include "adf_admin.h"
#include "adf_accel_devices.h"
#include "adf_clock.h"
#include "adf_common_drv.h"
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c
new file mode 100644
index 0000000000..07119c487d
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+
+#include "adf_accel_devices.h"
+#include "adf_admin.h"
+#include "adf_common_drv.h"
+#include "adf_cnv_dbgfs.h"
+#include "qat_compression.h"
+
+#define CNV_DEBUGFS_FILENAME "cnv_errors"
+#define CNV_MIN_PADDING 16
+
+#define CNV_ERR_INFO_MASK GENMASK(11, 0)
+#define CNV_ERR_TYPE_MASK GENMASK(15, 12)
+#define CNV_SLICE_ERR_MASK GENMASK(7, 0)
+#define CNV_SLICE_ERR_SIGN_BIT_INDEX 7
+#define CNV_DELTA_ERR_SIGN_BIT_INDEX 11
+
+enum cnv_error_type {
+ CNV_ERR_TYPE_NONE,
+ CNV_ERR_TYPE_CHECKSUM,
+ CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH,
+ CNV_ERR_TYPE_DECOMPRESSION,
+ CNV_ERR_TYPE_TRANSLATION,
+ CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH,
+ CNV_ERR_TYPE_UNKNOWN,
+ CNV_ERR_TYPES_COUNT
+};
+
+#define CNV_ERROR_TYPE_GET(latest_err) \
+ min_t(u16, u16_get_bits(latest_err, CNV_ERR_TYPE_MASK), CNV_ERR_TYPE_UNKNOWN)
+
+#define CNV_GET_DELTA_ERR_INFO(latest_error) \
+ sign_extend32(latest_error, CNV_DELTA_ERR_SIGN_BIT_INDEX)
+
+#define CNV_GET_SLICE_ERR_INFO(latest_error) \
+ sign_extend32(latest_error, CNV_SLICE_ERR_SIGN_BIT_INDEX)
+
+#define CNV_GET_DEFAULT_ERR_INFO(latest_error) \
+ u16_get_bits(latest_error, CNV_ERR_INFO_MASK)
+
+enum cnv_fields {
+ CNV_ERR_COUNT,
+ CNV_LATEST_ERR,
+ CNV_FIELDS_COUNT
+};
+
+static const char * const cnv_field_names[CNV_FIELDS_COUNT] = {
+ [CNV_ERR_COUNT] = "Total Errors",
+ [CNV_LATEST_ERR] = "Last Error",
+};
+
+static const char * const cnv_error_names[CNV_ERR_TYPES_COUNT] = {
+ [CNV_ERR_TYPE_NONE] = "No Error",
+ [CNV_ERR_TYPE_CHECKSUM] = "Checksum Error",
+ [CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH] = "Length Error-P",
+ [CNV_ERR_TYPE_DECOMPRESSION] = "Decomp Error",
+ [CNV_ERR_TYPE_TRANSLATION] = "Xlat Error",
+ [CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH] = "Length Error-C",
+ [CNV_ERR_TYPE_UNKNOWN] = "Unknown Error",
+};
+
+struct ae_cnv_errors {
+ u16 ae;
+ u16 err_cnt;
+ u16 latest_err;
+ bool is_comp_ae;
+};
+
+struct cnv_err_stats {
+ u16 ae_count;
+ struct ae_cnv_errors ae_cnv_errors[];
+};
+
+static s16 get_err_info(u8 error_type, u16 latest)
+{
+ switch (error_type) {
+ case CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH:
+ case CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH:
+ return CNV_GET_DELTA_ERR_INFO(latest);
+ case CNV_ERR_TYPE_DECOMPRESSION:
+ case CNV_ERR_TYPE_TRANSLATION:
+ return CNV_GET_SLICE_ERR_INFO(latest);
+ default:
+ return CNV_GET_DEFAULT_ERR_INFO(latest);
+ }
+}
+
+static void *qat_cnv_errors_seq_start(struct seq_file *sfile, loff_t *pos)
+{
+ struct cnv_err_stats *err_stats = sfile->private;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ if (*pos > err_stats->ae_count)
+ return NULL;
+
+ return &err_stats->ae_cnv_errors[*pos - 1];
+}
+
+static void *qat_cnv_errors_seq_next(struct seq_file *sfile, void *v,
+ loff_t *pos)
+{
+ struct cnv_err_stats *err_stats = sfile->private;
+
+ (*pos)++;
+
+ if (*pos > err_stats->ae_count)
+ return NULL;
+
+ return &err_stats->ae_cnv_errors[*pos - 1];
+}
+
+static void qat_cnv_errors_seq_stop(struct seq_file *sfile, void *v)
+{
+}
+
+static int qat_cnv_errors_seq_show(struct seq_file *sfile, void *v)
+{
+ struct ae_cnv_errors *ae_errors;
+ unsigned int i;
+ s16 err_info;
+ u8 err_type;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_puts(sfile, "AE ");
+ for (i = 0; i < CNV_FIELDS_COUNT; ++i)
+ seq_printf(sfile, " %*s", CNV_MIN_PADDING,
+ cnv_field_names[i]);
+ } else {
+ ae_errors = v;
+
+ if (!ae_errors->is_comp_ae)
+ return 0;
+
+ err_type = CNV_ERROR_TYPE_GET(ae_errors->latest_err);
+ err_info = get_err_info(err_type, ae_errors->latest_err);
+
+ seq_printf(sfile, "%d:", ae_errors->ae);
+ seq_printf(sfile, " %*d", CNV_MIN_PADDING, ae_errors->err_cnt);
+ seq_printf(sfile, "%*s [%d]", CNV_MIN_PADDING,
+ cnv_error_names[err_type], err_info);
+ }
+ seq_putc(sfile, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations qat_cnv_errors_sops = {
+ .start = qat_cnv_errors_seq_start,
+ .next = qat_cnv_errors_seq_next,
+ .stop = qat_cnv_errors_seq_stop,
+ .show = qat_cnv_errors_seq_show,
+};
+
+/**
+ * cnv_err_stats_alloc() - Get CNV stats for the provided device.
+ * @accel_dev: Pointer to a QAT acceleration device
+ *
+ * Allocates and populates table of CNV errors statistics for each non-admin AE
+ * available through the supplied acceleration device. The caller becomes the
+ * owner of such memory and is responsible for the deallocation through a call
+ * to kfree().
+ *
+ * Returns: a pointer to a dynamically allocated struct cnv_err_stats on success
+ * or a negative value on error.
+ */
+static struct cnv_err_stats *cnv_err_stats_alloc(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct cnv_err_stats *err_stats;
+ unsigned long ae_count;
+ unsigned long ae_mask;
+ size_t err_stats_size;
+ unsigned long ae;
+ unsigned int i;
+ u16 latest_err;
+ u16 err_cnt;
+ int ret;
+
+ if (!adf_dev_started(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "QAT Device not started\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ /* Ignore the admin AEs */
+ ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask;
+ ae_count = hweight_long(ae_mask);
+ if (unlikely(!ae_count))
+ return ERR_PTR(-EINVAL);
+
+ err_stats_size = struct_size(err_stats, ae_cnv_errors, ae_count);
+ err_stats = kmalloc(err_stats_size, GFP_KERNEL);
+ if (!err_stats)
+ return ERR_PTR(-ENOMEM);
+
+ err_stats->ae_count = ae_count;
+
+ i = 0;
+ for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
+ ret = adf_get_cnv_stats(accel_dev, ae, &err_cnt, &latest_err);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Failed to get CNV stats for ae %ld, [%d].\n",
+ ae, ret);
+ err_stats->ae_cnv_errors[i++].is_comp_ae = false;
+ continue;
+ }
+ err_stats->ae_cnv_errors[i].is_comp_ae = true;
+ err_stats->ae_cnv_errors[i].latest_err = latest_err;
+ err_stats->ae_cnv_errors[i].err_cnt = err_cnt;
+ err_stats->ae_cnv_errors[i].ae = ae;
+ i++;
+ }
+
+ return err_stats;
+}
+
+static int qat_cnv_errors_file_open(struct inode *inode, struct file *file)
+{
+ struct adf_accel_dev *accel_dev = inode->i_private;
+ struct seq_file *cnv_errors_seq_file;
+ struct cnv_err_stats *cnv_err_stats;
+ int ret;
+
+ cnv_err_stats = cnv_err_stats_alloc(accel_dev);
+ if (IS_ERR(cnv_err_stats))
+ return PTR_ERR(cnv_err_stats);
+
+ ret = seq_open(file, &qat_cnv_errors_sops);
+ if (unlikely(ret)) {
+ kfree(cnv_err_stats);
+ return ret;
+ }
+
+ cnv_errors_seq_file = file->private_data;
+ cnv_errors_seq_file->private = cnv_err_stats;
+ return ret;
+}
+
+static int qat_cnv_errors_file_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *cnv_errors_seq_file = file->private_data;
+
+ kfree(cnv_errors_seq_file->private);
+ cnv_errors_seq_file->private = NULL;
+
+ return seq_release(inode, file);
+}
+
+static const struct file_operations qat_cnv_fops = {
+ .owner = THIS_MODULE,
+ .open = qat_cnv_errors_file_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = qat_cnv_errors_file_release,
+};
+
+static ssize_t no_comp_file_read(struct file *f, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ char *file_msg = "No engine configured for comp\n";
+
+ return simple_read_from_buffer(buf, count, pos, file_msg,
+ strlen(file_msg));
+}
+
+static const struct file_operations qat_cnv_no_comp_fops = {
+ .owner = THIS_MODULE,
+ .read = no_comp_file_read,
+};
+
+void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ const struct file_operations *fops;
+ void *data;
+
+ if (adf_hw_dev_has_compression(accel_dev)) {
+ fops = &qat_cnv_fops;
+ data = accel_dev;
+ } else {
+ fops = &qat_cnv_no_comp_fops;
+ data = NULL;
+ }
+
+ accel_dev->cnv_dbgfile = debugfs_create_file(CNV_DEBUGFS_FILENAME, 0400,
+ accel_dev->debugfs_dir,
+ data, fops);
+}
+
+void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ debugfs_remove(accel_dev->cnv_dbgfile);
+ accel_dev->cnv_dbgfile = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h
new file mode 100644
index 0000000000..b02b0961c4
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_CNV_DBG_H
+#define ADF_CNV_DBG_H
+
+struct adf_accel_dev;
+
+void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev);
+
+#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
index 79ff798237..f06188033a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h
@@ -87,14 +87,6 @@ void adf_reset_flr(struct adf_accel_dev *accel_dev);
void adf_dev_restore(struct adf_accel_dev *accel_dev);
int adf_init_aer(void);
void adf_exit_aer(void);
-int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
-void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
-int adf_send_admin_init(struct adf_accel_dev *accel_dev);
-int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps);
-int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay);
-int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt);
-int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks);
-int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp);
int adf_init_arb(struct adf_accel_dev *accel_dev);
void adf_exit_arb(struct adf_accel_dev *accel_dev);
void adf_update_ring_arb(struct adf_etr_ring_data *ring);
@@ -246,4 +238,14 @@ static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev)
return pmisc->virt_addr;
}
+static inline void __iomem *adf_get_aram_base(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *param;
+
+ param = &GET_BARS(accel_dev)[hw_data->get_sram_bar_id(hw_data)];
+
+ return param->virt_addr;
+}
+
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
index 04845f8d72..477efcc81a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c
@@ -5,9 +5,11 @@
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
+#include "adf_cnv_dbgfs.h"
#include "adf_dbgfs.h"
#include "adf_fw_counters.h"
#include "adf_heartbeat_dbgfs.h"
+#include "adf_pm_dbgfs.h"
/**
* adf_dbgfs_init() - add persistent debugfs entries
@@ -62,6 +64,8 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev)
if (!accel_dev->is_vf) {
adf_fw_counters_dbgfs_add(accel_dev);
adf_heartbeat_dbgfs_add(accel_dev);
+ adf_pm_dbgfs_add(accel_dev);
+ adf_cnv_dbgfs_add(accel_dev);
}
}
@@ -75,6 +79,8 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
return;
if (!accel_dev->is_vf) {
+ adf_cnv_dbgfs_rm(accel_dev);
+ adf_pm_dbgfs_rm(accel_dev);
adf_heartbeat_dbgfs_rm(accel_dev);
adf_fw_counters_dbgfs_rm(accel_dev);
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c
index cb6e09ef5c..98fb7ccfed 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "adf_accel_devices.h"
+#include "adf_admin.h"
#include "adf_common_drv.h"
#include "adf_fw_counters.h"
@@ -34,7 +35,7 @@ struct adf_ae_counters {
struct adf_fw_counters {
u16 ae_count;
- struct adf_ae_counters ae_counters[];
+ struct adf_ae_counters ae_counters[] __counted_by(ae_count);
};
static void adf_fw_counters_parse_ae_values(struct adf_ae_counters *ae_counters, u32 ae,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
index 02d7a019eb..1813fe1d5a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h
@@ -139,6 +139,13 @@ do { \
/* Number of heartbeat counter pairs */
#define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE
+/* Rate Limiting */
+#define ADF_GEN4_RL_R2L_OFFSET 0x508000
+#define ADF_GEN4_RL_L2C_OFFSET 0x509000
+#define ADF_GEN4_RL_C2S_OFFSET 0x508818
+#define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800
+#define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804
+
void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev);
void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
index 34c6cd8e27..5dafd9a270 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c
@@ -2,7 +2,10 @@
/* Copyright(c) 2022 Intel Corporation */
#include <linux/bitfield.h>
#include <linux/iopoll.h>
+#include <linux/kernel.h>
+
#include "adf_accel_devices.h"
+#include "adf_admin.h"
#include "adf_common_drv.h"
#include "adf_gen4_pm.h"
#include "adf_cfg_strings.h"
@@ -10,11 +13,6 @@
#include "adf_gen4_hw_data.h"
#include "adf_cfg.h"
-enum qat_pm_host_msg {
- PM_NO_CHANGE = 0,
- PM_SET_MIN,
-};
-
struct adf_gen4_pm_data {
struct work_struct pm_irq_work;
struct adf_accel_dev *accel_dev;
@@ -25,6 +23,7 @@ static int send_host_msg(struct adf_accel_dev *accel_dev)
{
char pm_idle_support_cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {};
void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ struct adf_pm *pm = &accel_dev->power_management;
bool pm_idle_support;
u32 msg;
int ret;
@@ -39,6 +38,11 @@ static int send_host_msg(struct adf_accel_dev *accel_dev)
if (ret)
pm_idle_support = true;
+ if (pm_idle_support)
+ pm->host_ack_counter++;
+ else
+ pm->host_nack_counter++;
+
/* Send HOST_MSG */
msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK,
pm_idle_support ? PM_SET_MIN : PM_NO_CHANGE);
@@ -59,17 +63,27 @@ static void pm_bh_handler(struct work_struct *work)
container_of(work, struct adf_gen4_pm_data, pm_irq_work);
struct adf_accel_dev *accel_dev = pm_data->accel_dev;
void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ struct adf_pm *pm = &accel_dev->power_management;
u32 pm_int_sts = pm_data->pm_int_sts;
u32 val;
/* PM Idle interrupt */
if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) {
+ pm->idle_irq_counters++;
/* Issue host message to FW */
if (send_host_msg(accel_dev))
dev_warn_ratelimited(&GET_DEV(accel_dev),
"Failed to send host msg to FW\n");
}
+ /* PM throttle interrupt */
+ if (pm_int_sts & ADF_GEN4_PM_THR_STS)
+ pm->throttle_irq_counters++;
+
+ /* PM fw interrupt */
+ if (pm_int_sts & ADF_GEN4_PM_FW_INT_STS)
+ pm->fw_irq_counters++;
+
/* Clear interrupt status */
ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts);
@@ -129,6 +143,9 @@ int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev)
if (ret)
return ret;
+ /* Initialize PM internal data */
+ adf_gen4_init_dev_pm_data(accel_dev);
+
/* Enable default PM interrupts: IDLE, THROTTLE */
val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
val |= ADF_GEN4_PM_INT_EN_DEFAULT;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
index c2768762cc..a49352b79a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h
@@ -3,7 +3,14 @@
#ifndef ADF_GEN4_PM_H
#define ADF_GEN4_PM_H
-#include "adf_accel_devices.h"
+#include <linux/bits.h>
+
+struct adf_accel_dev;
+
+enum qat_pm_host_msg {
+ PM_NO_CHANGE = 0,
+ PM_SET_MIN,
+};
/* Power management registers */
#define ADF_GEN4_PM_HOST_MSG (0x50A01C)
@@ -39,7 +46,48 @@
#define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7)
#define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1)
+/* PM CSRs fields masks */
+#define ADF_GEN4_PM_DOMAIN_POWER_GATED_MASK GENMASK(15, 0)
+#define ADF_GEN4_PM_SSM_PM_ENABLE_MASK GENMASK(15, 0)
+#define ADF_GEN4_PM_IDLE_FILTER_MASK GENMASK(5, 3)
+#define ADF_GEN4_PM_IDLE_ENABLE_MASK BIT(2)
+#define ADF_GEN4_PM_ENABLE_PM_MASK BIT(21)
+#define ADF_GEN4_PM_ENABLE_PM_IDLE_MASK BIT(22)
+#define ADF_GEN4_PM_ENABLE_DEEP_PM_IDLE_MASK BIT(23)
+#define ADF_GEN4_PM_CURRENT_WP_MASK GENMASK(19, 11)
+#define ADF_GEN4_PM_CPM_PM_STATE_MASK GENMASK(22, 20)
+#define ADF_GEN4_PM_PENDING_WP_MASK GENMASK(31, 23)
+#define ADF_GEN4_PM_THR_VALUE_MASK GENMASK(6, 4)
+#define ADF_GEN4_PM_MIN_PWR_ACK_MASK BIT(7)
+#define ADF_GEN4_PM_MIN_PWR_ACK_PENDING_MASK BIT(17)
+#define ADF_GEN4_PM_CPR_ACTIVE_COUNT_MASK BIT(0)
+#define ADF_GEN4_PM_CPR_MANAGED_COUNT_MASK BIT(0)
+#define ADF_GEN4_PM_XLT_ACTIVE_COUNT_MASK BIT(1)
+#define ADF_GEN4_PM_XLT_MANAGED_COUNT_MASK BIT(1)
+#define ADF_GEN4_PM_DCPR_ACTIVE_COUNT_MASK GENMASK(3, 2)
+#define ADF_GEN4_PM_DCPR_MANAGED_COUNT_MASK GENMASK(3, 2)
+#define ADF_GEN4_PM_PKE_ACTIVE_COUNT_MASK GENMASK(8, 4)
+#define ADF_GEN4_PM_PKE_MANAGED_COUNT_MASK GENMASK(8, 4)
+#define ADF_GEN4_PM_WAT_ACTIVE_COUNT_MASK GENMASK(13, 9)
+#define ADF_GEN4_PM_WAT_MANAGED_COUNT_MASK GENMASK(13, 9)
+#define ADF_GEN4_PM_WCP_ACTIVE_COUNT_MASK GENMASK(18, 14)
+#define ADF_GEN4_PM_WCP_MANAGED_COUNT_MASK GENMASK(18, 14)
+#define ADF_GEN4_PM_UCS_ACTIVE_COUNT_MASK GENMASK(20, 19)
+#define ADF_GEN4_PM_UCS_MANAGED_COUNT_MASK GENMASK(20, 19)
+#define ADF_GEN4_PM_CPH_ACTIVE_COUNT_MASK GENMASK(24, 21)
+#define ADF_GEN4_PM_CPH_MANAGED_COUNT_MASK GENMASK(24, 21)
+#define ADF_GEN4_PM_ATH_ACTIVE_COUNT_MASK GENMASK(28, 25)
+#define ADF_GEN4_PM_ATH_MANAGED_COUNT_MASK GENMASK(28, 25)
+
int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev);
bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev);
+#ifdef CONFIG_DEBUG_FS
+void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev);
+#else
+static inline void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
new file mode 100644
index 0000000000..ee0b5079de
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/string_helpers.h>
+#include <linux/stringify.h>
+
+#include "adf_accel_devices.h"
+#include "adf_admin.h"
+#include "adf_common_drv.h"
+#include "adf_gen4_pm.h"
+#include "icp_qat_fw_init_admin.h"
+
+/*
+ * This is needed because a variable is used to index the mask at
+ * pm_scnprint_table(), making it not compile time constant, so the compile
+ * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
+ */
+#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+
+#define PM_INFO_MEMBER_OFF(member) \
+ (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32))
+
+#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \
+{ \
+ .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \
+ .key = __stringify(_field_), \
+ .field_mask = _mask_, \
+}
+
+#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \
+ PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0))
+
+#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \
+ PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN4_PM_##_field_##_MASK)
+
+#define PM_INFO_MAX_KEY_LEN 21
+
+struct pm_status_row {
+ int reg_offset;
+ u32 field_mask;
+ const char *key;
+};
+
+static struct pm_status_row pm_fuse_rows[] = {
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM),
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE),
+ PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE),
+};
+
+static struct pm_status_row pm_info_rows[] = {
+ PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE),
+ PM_INFO_REGSET_ENTRY(pm.status, PENDING_WP),
+ PM_INFO_REGSET_ENTRY(pm.status, CURRENT_WP),
+ PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_ENABLE),
+ PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_FILTER),
+ PM_INFO_REGSET_ENTRY(pm.main, MIN_PWR_ACK),
+ PM_INFO_REGSET_ENTRY(pm.thread, MIN_PWR_ACK_PENDING),
+ PM_INFO_REGSET_ENTRY(pm.main, THR_VALUE),
+};
+
+static struct pm_status_row pm_ssm_rows[] = {
+ PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE),
+ PM_INFO_REGSET_ENTRY32(ssm.active_constraint, ACTIVE_CONSTRAINT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWER_GATED),
+ PM_INFO_REGSET_ENTRY(ssm.pm_active_status, ATH_ACTIVE_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_active_status, CPH_ACTIVE_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_active_status, PKE_ACTIVE_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_active_status, CPR_ACTIVE_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_active_status, DCPR_ACTIVE_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_active_status, UCS_ACTIVE_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_active_status, XLT_ACTIVE_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_active_status, WAT_ACTIVE_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_active_status, WCP_ACTIVE_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, ATH_MANAGED_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, CPH_MANAGED_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, PKE_MANAGED_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, CPR_MANAGED_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, DCPR_MANAGED_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, UCS_MANAGED_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, XLT_MANAGED_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WAT_MANAGED_COUNT),
+ PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WCP_MANAGED_COUNT),
+};
+
+static struct pm_status_row pm_log_rows[] = {
+ PM_INFO_REGSET_ENTRY32(event_counters.host_msg, HOST_MSG_EVENT_COUNT),
+ PM_INFO_REGSET_ENTRY32(event_counters.sys_pm, SYS_PM_EVENT_COUNT),
+ PM_INFO_REGSET_ENTRY32(event_counters.local_ssm, SSM_EVENT_COUNT),
+ PM_INFO_REGSET_ENTRY32(event_counters.timer, TIMER_EVENT_COUNT),
+ PM_INFO_REGSET_ENTRY32(event_counters.unknown, UNKNOWN_EVENT_COUNT),
+};
+
+static struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = {
+ PM_INFO_REGSET_ENTRY32(event_log[0], EVENT0),
+ PM_INFO_REGSET_ENTRY32(event_log[1], EVENT1),
+ PM_INFO_REGSET_ENTRY32(event_log[2], EVENT2),
+ PM_INFO_REGSET_ENTRY32(event_log[3], EVENT3),
+ PM_INFO_REGSET_ENTRY32(event_log[4], EVENT4),
+ PM_INFO_REGSET_ENTRY32(event_log[5], EVENT5),
+ PM_INFO_REGSET_ENTRY32(event_log[6], EVENT6),
+ PM_INFO_REGSET_ENTRY32(event_log[7], EVENT7),
+};
+
+static struct pm_status_row pm_csrs_rows[] = {
+ PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT),
+ PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS),
+ PM_INFO_REGSET_ENTRY32(pm.main, CPM_PM_MASTER_FW),
+ PM_INFO_REGSET_ENTRY32(pm.pwrreq, CPM_PM_PWRREQ),
+};
+
+static int pm_scnprint_table(char *buff, struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size, int table_len,
+ bool lowercase)
+{
+ char key[PM_INFO_MAX_KEY_LEN];
+ int wr = 0;
+ int i;
+
+ for (i = 0; i < table_len; i++) {
+ if (lowercase)
+ string_lower(key, table[i].key);
+ else
+ string_upper(key, table[i].key);
+
+ wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key,
+ field_get(table[i].field_mask,
+ pm_info_regs[table[i].reg_offset]));
+ }
+
+ return wr;
+}
+
+static int pm_scnprint_table_upper_keys(char *buff, struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size,
+ int table_len)
+{
+ return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
+ table_len, false);
+}
+
+static int pm_scnprint_table_lower_keys(char *buff, struct pm_status_row *table,
+ u32 *pm_info_regs, size_t buff_size,
+ int table_len)
+{
+ return pm_scnprint_table(buff, table, pm_info_regs, buff_size,
+ table_len, true);
+}
+
+static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE);
+
+static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev,
+ char __user *buf, size_t count,
+ loff_t *pos)
+{
+ void __iomem *pmisc = adf_get_pmisc_base(accel_dev);
+ struct adf_pm *pm = &accel_dev->power_management;
+ struct icp_qat_fw_init_admin_pm_info *pm_info;
+ dma_addr_t p_state_addr;
+ u32 *pm_info_regs;
+ char *pm_kv;
+ int len = 0;
+ u32 val;
+ int ret;
+
+ pm_info = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pm_info)
+ return -ENOMEM;
+
+ pm_kv = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pm_kv) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ p_state_addr = dma_map_single(&GET_DEV(accel_dev), pm_info, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(&GET_DEV(accel_dev), p_state_addr);
+ if (ret)
+ goto out_free;
+
+ /* Query PM info from QAT FW */
+ ret = adf_get_pm_info(accel_dev, p_state_addr, PAGE_SIZE);
+ dma_unmap_single(&GET_DEV(accel_dev), p_state_addr, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (ret)
+ goto out_free;
+
+ pm_info_regs = (u32 *)pm_info;
+
+ /* Fusectl related */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- PM Fuse info ---------\n");
+ len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_fuse_rows));
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "max_pwrreq: %#x\n",
+ pm_info->max_pwrreq);
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "min_pwrreq: %#x\n",
+ pm_info->min_pwrreq);
+
+ /* PM related */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "------------ PM Info ------------\n");
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "power_level: %s\n",
+ pm_info->pwr_state == PM_SET_MIN ? "min" : "max");
+ len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_info_rows));
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: STATIC\n");
+
+ /* SSM related */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- SSM_PM Info ----------\n");
+ len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_ssm_rows));
+
+ /* Log related */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "------------- PM Log -------------\n");
+ len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_log_rows));
+
+ len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_event_rows));
+
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "idle_irq_count: %#x\n",
+ pm->idle_irq_counters);
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "fw_irq_count: %#x\n",
+ pm->fw_irq_counters);
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "throttle_irq_count: %#x\n", pm->throttle_irq_counters);
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_ack_count: %#x\n",
+ pm->host_ack_counter);
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_nack_count: %#x\n",
+ pm->host_nack_counter);
+
+ /* CSRs content */
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "----------- HW PM CSRs -----------\n");
+ len += pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows,
+ pm_info_regs, PAGE_SIZE - len,
+ ARRAY_SIZE(pm_csrs_rows));
+
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG);
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "CPM_PM_HOST_MSG: %#x\n", val);
+ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT);
+ len += scnprintf(&pm_kv[len], PAGE_SIZE - len,
+ "CPM_PM_INTERRUPT: %#x\n", val);
+ ret = simple_read_from_buffer(buf, count, pos, pm_kv, len);
+
+out_free:
+ kfree(pm_info);
+ kfree(pm_kv);
+ return ret;
+}
+
+void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev)
+{
+ accel_dev->power_management.print_pm_status = adf_gen4_print_pm_status;
+ accel_dev->power_management.present = true;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
new file mode 100644
index 0000000000..048c246079
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c
@@ -0,0 +1,1566 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include "adf_common_drv.h"
+#include "adf_gen4_hw_data.h"
+#include "adf_gen4_ras.h"
+#include "adf_sysfs_ras_counters.h"
+
+#define BITS_PER_REG(_n_) (sizeof(_n_) * BITS_PER_BYTE)
+
+static void enable_errsou_reporting(void __iomem *csr)
+{
+ /* Enable correctable error reporting in ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, 0);
+
+ /* Enable uncorrectable error reporting in ERRSOU1 */
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, 0);
+
+ /*
+ * Enable uncorrectable error reporting in ERRSOU2
+ * but disable PM interrupt and CFC attention interrupt by default
+ */
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2,
+ ADF_GEN4_ERRSOU2_PM_INT_BIT |
+ ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK);
+
+ /*
+ * Enable uncorrectable error reporting in ERRSOU3
+ * but disable RLT error interrupt and VFLR notify interrupt by default
+ */
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3,
+ ADF_GEN4_ERRSOU3_RLTERROR_BIT |
+ ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT);
+}
+
+static void disable_errsou_reporting(void __iomem *csr)
+{
+ u32 val = 0;
+
+ /* Disable correctable error reporting in ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, ADF_GEN4_ERRSOU0_BIT);
+
+ /* Disable uncorrectable error reporting in ERRSOU1 */
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, ADF_GEN4_ERRSOU1_BITMASK);
+
+ /* Disable uncorrectable error reporting in ERRSOU2 */
+ val = ADF_CSR_RD(csr, ADF_GEN4_ERRMSK2);
+ val |= ADF_GEN4_ERRSOU2_DIS_BITMASK;
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, val);
+
+ /* Disable uncorrectable error reporting in ERRSOU3 */
+ ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_ERRSOU3_BITMASK);
+}
+
+static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask;
+
+ /* Enable Acceleration Engine correctable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, ae_mask);
+
+ /* Enable Acceleration Engine uncorrectable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, ae_mask);
+}
+
+static void disable_ae_error_reporting(void __iomem *csr)
+{
+ /* Disable Acceleration Engine correctable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, 0);
+
+ /* Disable Acceleration Engine uncorrectable error reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, 0);
+}
+
+static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+
+ /* Enable HI CPP Agents Command Parity Error Reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE,
+ err_mask->cppagentcmdpar_mask);
+
+ ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL,
+ ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK);
+}
+
+static void disable_cpp_error_reporting(void __iomem *csr)
+{
+ /* Disable HI CPP Agents Command Parity Error Reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, 0);
+
+ ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL,
+ ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK);
+}
+
+static void enable_ti_ri_error_reporting(void __iomem *csr)
+{
+ u32 reg;
+
+ /* Enable RI Memory error reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0,
+ ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK |
+ ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK);
+
+ /* Enable IOSF Primary Command Parity error Reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, ADF_GEN4_RIMISCSTS_BIT);
+
+ /* Enable TI Internal Memory Parity Error reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, 0);
+
+ /* Enable error handling in RI, TI CPP interface control registers */
+ ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, ADF_GEN4_RICPPINTCTL_BITMASK);
+
+ ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, ADF_GEN4_TICPPINTCTL_BITMASK);
+
+ /*
+ * Enable error detection and reporting in TIMISCSTS
+ * with bits 1, 2 and 30 value preserved
+ */
+ reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL);
+ reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK;
+ reg |= ADF_GEN4_TIMISCCTL_BIT;
+ ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg);
+}
+
+static void disable_ti_ri_error_reporting(void __iomem *csr)
+{
+ u32 reg;
+
+ /* Disable RI Memory error reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, 0);
+
+ /* Disable IOSF Primary Command Parity error Reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, 0);
+
+ /* Disable TI Internal Memory Parity Error reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK,
+ ADF_GEN4_TI_CI_PAR_STS_BITMASK);
+ ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK,
+ ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK);
+ ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK,
+ ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK);
+ ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK,
+ ADF_GEN4_TI_CD_PAR_STS_BITMASK);
+ ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK,
+ ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK);
+
+ /* Disable error handling in RI, TI CPP interface control registers */
+ ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, 0);
+
+ ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, 0);
+
+ /*
+ * Disable error detection and reporting in TIMISCSTS
+ * with bits 1, 2 and 30 value preserved
+ */
+ reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL);
+ reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK;
+ ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg);
+}
+
+static void enable_rf_error_reporting(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+
+ /* Enable RF parity error in Shared RAM */
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, 0);
+
+ if (err_mask->parerr_wat_wcp_mask)
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, 0);
+}
+
+static void disable_rf_error_reporting(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+
+ /* Disable RF Parity Error reporting in Shared RAM */
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC,
+ ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH,
+ err_mask->parerr_ath_cph_mask);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT,
+ err_mask->parerr_cpr_xlt_mask);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS,
+ err_mask->parerr_dcpr_ucs_mask);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE,
+ err_mask->parerr_pke_mask);
+
+ if (err_mask->parerr_wat_wcp_mask)
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP,
+ err_mask->parerr_wat_wcp_mask);
+}
+
+static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+ u32 val = 0;
+
+ /* Enable SSM interrupts */
+ ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, 0);
+
+ /* Enable shared memory error detection & correction */
+ val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN);
+ val |= err_mask->ssmfeatren_mask;
+ ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val);
+
+ /* Enable SER detection in SER_err_ssmsh register */
+ ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH,
+ ADF_GEN4_SER_EN_SSMSH_BITMASK);
+
+ /* Enable SSM soft parity error */
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, 0);
+
+ if (err_mask->parerr_wat_wcp_mask)
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, 0);
+
+ /* Enable slice hang interrupt reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, 0);
+
+ if (err_mask->parerr_wat_wcp_mask)
+ ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, 0);
+}
+
+static void disable_ssm_error_reporting(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+ u32 val = 0;
+
+ /* Disable SSM interrupts */
+ ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM,
+ ADF_GEN4_INTMASKSSM_BITMASK);
+
+ /* Disable shared memory error detection & correction */
+ val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN);
+ val &= ADF_GEN4_SSMFEATREN_DIS_BITMASK;
+ ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val);
+
+ /* Disable SER detection in SER_err_ssmsh register */
+ ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, 0);
+
+ /* Disable SSM soft parity error */
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH,
+ err_mask->parerr_ath_cph_mask);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT,
+ err_mask->parerr_cpr_xlt_mask);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS,
+ err_mask->parerr_dcpr_ucs_mask);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE,
+ err_mask->parerr_pke_mask);
+
+ if (err_mask->parerr_wat_wcp_mask)
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP,
+ err_mask->parerr_wat_wcp_mask);
+
+ /* Disable slice hang interrupt reporting */
+ ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH,
+ err_mask->parerr_ath_cph_mask);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT,
+ err_mask->parerr_cpr_xlt_mask);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS,
+ err_mask->parerr_dcpr_ucs_mask);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE,
+ err_mask->parerr_pke_mask);
+
+ if (err_mask->parerr_wat_wcp_mask)
+ ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP,
+ err_mask->parerr_wat_wcp_mask);
+}
+
+static void enable_aram_error_reporting(void __iomem *csr)
+{
+ ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN,
+ ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK);
+
+ ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR,
+ ADF_GEN4_REG_ARAMCERR_EN_BITMASK);
+
+ ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR,
+ ADF_GEN4_REG_ARAMUERR_EN_BITMASK);
+
+ ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR,
+ ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK);
+}
+
+static void disable_aram_error_reporting(void __iomem *csr)
+{
+ ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, 0);
+ ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, 0);
+}
+
+static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *aram_csr = adf_get_aram_base(accel_dev);
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ enable_errsou_reporting(csr);
+ enable_ae_error_reporting(accel_dev, csr);
+ enable_cpp_error_reporting(accel_dev, csr);
+ enable_ti_ri_error_reporting(csr);
+ enable_rf_error_reporting(accel_dev, csr);
+ enable_ssm_error_reporting(accel_dev, csr);
+ enable_aram_error_reporting(aram_csr);
+}
+
+static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *aram_csr = adf_get_aram_base(accel_dev);
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+
+ disable_errsou_reporting(csr);
+ disable_ae_error_reporting(csr);
+ disable_cpp_error_reporting(csr);
+ disable_ti_ri_error_reporting(csr);
+ disable_rf_error_reporting(accel_dev, csr);
+ disable_ssm_error_reporting(accel_dev, csr);
+ disable_aram_error_reporting(aram_csr);
+}
+
+static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ u32 aecorrerr = ADF_CSR_RD(csr, ADF_GEN4_HIAECORERRLOG_CPP0);
+
+ aecorrerr &= GET_HW_DATA(accel_dev)->ae_mask;
+
+ dev_warn(&GET_DEV(accel_dev),
+ "Correctable error detected in AE: 0x%x\n",
+ aecorrerr);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+
+ /* Clear interrupt from ERRSOU0 */
+ ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOG_CPP0, aecorrerr);
+}
+
+static bool adf_handle_cpp_aeunc(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 aeuncorerr;
+
+ if (!(errsou & ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT))
+ return false;
+
+ aeuncorerr = ADF_CSR_RD(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0);
+ aeuncorerr &= GET_HW_DATA(accel_dev)->ae_mask;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Uncorrectable error detected in AE: 0x%x\n",
+ aeuncorerr);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0, aeuncorerr);
+
+ return false;
+}
+
+static bool adf_handle_cppcmdparerr(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+ u32 cmdparerr;
+
+ if (!(errsou & ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT))
+ return false;
+
+ cmdparerr = ADF_CSR_RD(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG);
+ cmdparerr &= err_mask->cppagentcmdpar_mask;
+
+ dev_err(&GET_DEV(accel_dev),
+ "HI CPP agent command parity error: 0x%x\n",
+ cmdparerr);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG, cmdparerr);
+
+ return true;
+}
+
+static bool adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ bool reset_required = false;
+ u32 rimem_parerr_sts;
+
+ if (!(errsou & ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT))
+ return false;
+
+ rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN4_RIMEM_PARERR_STS);
+ rimem_parerr_sts &= ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK |
+ ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK;
+
+ if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK) {
+ dev_err(&GET_DEV(accel_dev),
+ "RI Memory Parity uncorrectable error: 0x%x\n",
+ rimem_parerr_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK) {
+ dev_err(&GET_DEV(accel_dev),
+ "RI Memory Parity fatal error: 0x%x\n",
+ rimem_parerr_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ reset_required = true;
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN4_RIMEM_PARERR_STS, rimem_parerr_sts);
+
+ return reset_required;
+}
+
+static bool adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 ti_ci_par_sts;
+
+ if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
+ return false;
+
+ ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CI_PAR_STS);
+ ti_ci_par_sts &= ADF_GEN4_TI_CI_PAR_STS_BITMASK;
+
+ if (ti_ci_par_sts) {
+ dev_err(&GET_DEV(accel_dev),
+ "TI Memory Parity Error: 0x%x\n", ti_ci_par_sts);
+ ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_STS, ti_ci_par_sts);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ return false;
+}
+
+static bool adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 ti_pullfub_par_sts;
+
+ if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
+ return false;
+
+ ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS);
+ ti_pullfub_par_sts &= ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK;
+
+ if (ti_pullfub_par_sts) {
+ dev_err(&GET_DEV(accel_dev),
+ "TI Pull Parity Error: 0x%x\n", ti_pullfub_par_sts);
+
+ ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS,
+ ti_pullfub_par_sts);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ return false;
+}
+
+static bool adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 ti_pushfub_par_sts;
+
+ if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
+ return false;
+
+ ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS);
+ ti_pushfub_par_sts &= ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK;
+
+ if (ti_pushfub_par_sts) {
+ dev_err(&GET_DEV(accel_dev),
+ "TI Push Parity Error: 0x%x\n", ti_pushfub_par_sts);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS,
+ ti_pushfub_par_sts);
+ }
+
+ return false;
+}
+
+static bool adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 ti_cd_par_sts;
+
+ if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
+ return false;
+
+ ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CD_PAR_STS);
+ ti_cd_par_sts &= ADF_GEN4_TI_CD_PAR_STS_BITMASK;
+
+ if (ti_cd_par_sts) {
+ dev_err(&GET_DEV(accel_dev),
+ "TI CD Parity Error: 0x%x\n", ti_cd_par_sts);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_STS, ti_cd_par_sts);
+ }
+
+ return false;
+}
+
+static bool adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 ti_trnsb_par_sts;
+
+ if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
+ return false;
+
+ ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_TRNSB_PAR_STS);
+ ti_trnsb_par_sts &= ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK;
+
+ if (ti_trnsb_par_sts) {
+ dev_err(&GET_DEV(accel_dev),
+ "TI TRNSB Parity Error: 0x%x\n", ti_trnsb_par_sts);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_STS, ti_trnsb_par_sts);
+ }
+
+ return false;
+}
+
+static bool adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 rimiscsts;
+
+ if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT))
+ return false;
+
+ rimiscsts = ADF_CSR_RD(csr, ADF_GEN4_RIMISCSTS);
+ rimiscsts &= ADF_GEN4_RIMISCSTS_BIT;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Command Parity error detected on IOSFP: 0x%x\n",
+ rimiscsts);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_RIMISCSTS, rimiscsts);
+
+ return true;
+}
+
+static void adf_gen4_process_errsou1(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou,
+ bool *reset_required)
+{
+ *reset_required |= adf_handle_cpp_aeunc(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_cppcmdparerr(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_ri_mem_par_err(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_ti_ci_par_sts(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_ti_pullfub_par_sts(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_ti_pushfub_par_sts(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_ti_cd_par_sts(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_ti_trnsb_par_sts(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_iosfp_cmd_parerr(accel_dev, csr, errsou);
+}
+
+static bool adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT))
+ return false;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_UERRSSMSH);
+ reg &= ADF_GEN4_UERRSSMSH_BITMASK;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Uncorrectable error on ssm shared memory: 0x%x\n",
+ reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_UERRSSMSH, reg);
+
+ return false;
+}
+
+static bool adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT))
+ return false;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_CERRSSMSH);
+ reg &= ADF_GEN4_CERRSSMSH_ERROR_BIT;
+
+ dev_warn(&GET_DEV(accel_dev),
+ "Correctable error on ssm shared memory: 0x%x\n",
+ reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_CERRSSMSH, reg);
+
+ return false;
+}
+
+static bool adf_handle_pperr_err(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 iastatssm)
+{
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_PPERR_BIT))
+ return false;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_PPERR);
+ reg &= ADF_GEN4_PPERR_BITMASK;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Uncorrectable error CPP transaction on memory target: 0x%x\n",
+ reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_PPERR, reg);
+
+ return false;
+}
+
+static void adf_poll_slicehang_csr(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 slice_hang_offset,
+ char *slice_name)
+{
+ u32 slice_hang_reg = ADF_CSR_RD(csr, slice_hang_offset);
+
+ if (!slice_hang_reg)
+ return;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Slice %s hang error encountered\n", slice_name);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+}
+
+static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 iastatssm)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+
+ if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT))
+ return false;
+
+ adf_poll_slicehang_csr(accel_dev, csr,
+ ADF_GEN4_SLICEHANGSTATUS_ATH_CPH, "ath_cph");
+ adf_poll_slicehang_csr(accel_dev, csr,
+ ADF_GEN4_SLICEHANGSTATUS_CPR_XLT, "cpr_xlt");
+ adf_poll_slicehang_csr(accel_dev, csr,
+ ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS, "dcpr_ucs");
+ adf_poll_slicehang_csr(accel_dev, csr,
+ ADF_GEN4_SLICEHANGSTATUS_PKE, "pke");
+
+ if (err_mask->parerr_wat_wcp_mask)
+ adf_poll_slicehang_csr(accel_dev, csr,
+ ADF_GEN4_SLICEHANGSTATUS_WAT_WCP,
+ "ath_cph");
+
+ return false;
+}
+
+static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+ bool reset_required = false;
+ u32 reg;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH);
+ reg &= err_mask->parerr_ath_cph_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP pull command fatal error ATH_CPH: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH, reg);
+
+ reset_required = true;
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT);
+ reg &= err_mask->parerr_cpr_xlt_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP pull command fatal error CPR_XLT: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT, reg);
+
+ reset_required = true;
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS);
+ reg &= err_mask->parerr_dcpr_ucs_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP pull command fatal error DCPR_UCS: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS, reg);
+
+ reset_required = true;
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE);
+ reg &= err_mask->parerr_pke_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP pull command fatal error PKE: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE, reg);
+
+ reset_required = true;
+ }
+
+ if (err_mask->parerr_wat_wcp_mask) {
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP);
+ reg &= err_mask->parerr_wat_wcp_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP pull command fatal error WAT_WCP: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP, reg);
+
+ reset_required = true;
+ }
+ }
+
+ return reset_required;
+}
+
+static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+ u32 reg;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH);
+ reg &= err_mask->parerr_ath_cph_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP pull data err ATH_CPH: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH, reg);
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT);
+ reg &= err_mask->parerr_cpr_xlt_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP pull data err CPR_XLT: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT, reg);
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS);
+ reg &= err_mask->parerr_dcpr_ucs_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP pull data err DCPR_UCS: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS, reg);
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE);
+ reg &= err_mask->parerr_pke_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP pull data err PKE: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE, reg);
+ }
+
+ if (err_mask->parerr_wat_wcp_mask) {
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP);
+ reg &= err_mask->parerr_wat_wcp_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP pull data err WAT_WCP: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP, reg);
+ }
+ }
+
+ return false;
+}
+
+static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+ bool reset_required = false;
+ u32 reg;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH);
+ reg &= err_mask->parerr_ath_cph_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP push command fatal error ATH_CPH: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH, reg);
+
+ reset_required = true;
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT);
+ reg &= err_mask->parerr_cpr_xlt_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP push command fatal error CPR_XLT: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT, reg);
+
+ reset_required = true;
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS);
+ reg &= err_mask->parerr_dcpr_ucs_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP push command fatal error DCPR_UCS: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS, reg);
+
+ reset_required = true;
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE);
+ reg &= err_mask->parerr_pke_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP push command fatal error PKE: 0x%x\n",
+ reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE, reg);
+
+ reset_required = true;
+ }
+
+ if (err_mask->parerr_wat_wcp_mask) {
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP);
+ reg &= err_mask->parerr_wat_wcp_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP push command fatal error WAT_WCP: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP, reg);
+
+ reset_required = true;
+ }
+ }
+
+ return reset_required;
+}
+
+static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+ u32 reg;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH);
+ reg &= err_mask->parerr_ath_cph_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP push data err ATH_CPH: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH, reg);
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT);
+ reg &= err_mask->parerr_cpr_xlt_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP push data err CPR_XLT: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT, reg);
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS);
+ reg &= err_mask->parerr_dcpr_ucs_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP push data err DCPR_UCS: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS, reg);
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE);
+ reg &= err_mask->parerr_pke_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP push data err PKE: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE, reg);
+ }
+
+ if (err_mask->parerr_wat_wcp_mask) {
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP);
+ reg &= err_mask->parerr_wat_wcp_mask;
+ if (reg) {
+ dev_err(&GET_DEV(accel_dev),
+ "SPP push data err WAT_WCP: 0x%x\n", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP,
+ reg);
+ }
+ }
+
+ return false;
+}
+
+static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 iastatssm)
+{
+ bool reset_required;
+
+ if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT))
+ return false;
+
+ reset_required = adf_handle_spp_pullcmd_err(accel_dev, csr);
+ reset_required |= adf_handle_spp_pulldata_err(accel_dev, csr);
+ reset_required |= adf_handle_spp_pushcmd_err(accel_dev, csr);
+ reset_required |= adf_handle_spp_pushdata_err(accel_dev, csr);
+
+ return reset_required;
+}
+
+static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 iastatssm)
+{
+ u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR);
+ u32 bits_num = BITS_PER_REG(reg);
+ bool reset_required = false;
+ unsigned long errs_bits;
+ u32 bit_iterator;
+
+ if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT))
+ return false;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR);
+ reg &= ADF_GEN4_SSMCPPERR_FATAL_BITMASK | ADF_GEN4_SSMCPPERR_UNCERR_BITMASK;
+ if (reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal SSM CPP parity error: 0x%x\n", reg);
+
+ errs_bits = reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK;
+ for_each_set_bit(bit_iterator, &errs_bits, bits_num) {
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+ reset_required = true;
+ }
+
+ if (reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK) {
+ dev_err(&GET_DEV(accel_dev),
+ "non-Fatal SSM CPP parity error: 0x%x\n", reg);
+ errs_bits = reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK;
+
+ for_each_set_bit(bit_iterator, &errs_bits, bits_num) {
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN4_SSMCPPERR, reg);
+
+ return reset_required;
+}
+
+static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 iastatssm)
+{
+ struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev);
+ u32 reg;
+
+ if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT))
+ return false;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC);
+ reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT;
+ if (reg) {
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg);
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH);
+ reg &= err_mask->parerr_ath_cph_mask;
+ if (reg) {
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg);
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT);
+ reg &= err_mask->parerr_cpr_xlt_mask;
+ if (reg) {
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg);
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS);
+ reg &= err_mask->parerr_dcpr_ucs_mask;
+ if (reg) {
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg);
+ }
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE);
+ reg &= err_mask->parerr_pke_mask;
+ if (reg) {
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg);
+ }
+
+ if (err_mask->parerr_wat_wcp_mask) {
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP);
+ reg &= err_mask->parerr_wat_wcp_mask;
+ if (reg) {
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP,
+ reg);
+ }
+ }
+
+ dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported");
+
+ return false;
+}
+
+static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 iastatssm)
+{
+ u32 reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH);
+ u32 bits_num = BITS_PER_REG(reg);
+ bool reset_required = false;
+ unsigned long errs_bits;
+ u32 bit_iterator;
+
+ if (!(iastatssm & (ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT |
+ ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT)))
+ return false;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH);
+ reg &= ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK |
+ ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK |
+ ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK;
+ if (reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK) {
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal SER_SSMSH_ERR: 0x%x\n", reg);
+
+ errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK;
+ for_each_set_bit(bit_iterator, &errs_bits, bits_num) {
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+ }
+
+ reset_required = true;
+ }
+
+ if (reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK) {
+ dev_err(&GET_DEV(accel_dev),
+ "non-fatal SER_SSMSH_ERR: 0x%x\n", reg);
+
+ errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK;
+ for_each_set_bit(bit_iterator, &errs_bits, bits_num) {
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+ }
+
+ if (reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK) {
+ dev_warn(&GET_DEV(accel_dev),
+ "Correctable SER_SSMSH_ERR: 0x%x\n", reg);
+
+ errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK;
+ for_each_set_bit(bit_iterator, &errs_bits, bits_num) {
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+ }
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN4_SER_ERR_SSMSH, reg);
+
+ return reset_required;
+}
+
+static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN4_IAINTSTATSSM);
+ bool reset_required;
+
+ iastatssm &= ADF_GEN4_IAINTSTATSSM_BITMASK;
+ if (!iastatssm)
+ return false;
+
+ reset_required = adf_handle_uerrssmsh(accel_dev, csr, iastatssm);
+ reset_required |= adf_handle_cerrssmsh(accel_dev, csr, iastatssm);
+ reset_required |= adf_handle_pperr_err(accel_dev, csr, iastatssm);
+ reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm);
+ reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm);
+ reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm);
+ reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm);
+ reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm);
+
+ ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm);
+
+ return reset_required;
+}
+
+static bool adf_handle_exprpssmcmpr(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMCPR);
+
+ reg &= ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK;
+ if (!reg)
+ return false;
+
+ dev_err(&GET_DEV(accel_dev),
+ "Uncorrectable error exception in SSM CMP: 0x%x", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMCPR, reg);
+
+ return false;
+}
+
+static bool adf_handle_exprpssmxlt(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMXLT);
+
+ reg &= ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK |
+ ADF_GEN4_EXPRPSSMXLT_CERR_BIT;
+ if (!reg)
+ return false;
+
+ if (reg & ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK) {
+ dev_err(&GET_DEV(accel_dev),
+ "Uncorrectable error exception in SSM XLT: 0x%x", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ if (reg & ADF_GEN4_EXPRPSSMXLT_CERR_BIT) {
+ dev_warn(&GET_DEV(accel_dev),
+ "Correctable error exception in SSM XLT: 0x%x", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMXLT, reg);
+
+ return false;
+}
+
+static bool adf_handle_exprpssmdcpr(struct adf_accel_dev *accel_dev,
+ void __iomem *csr)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < ADF_GEN4_DCPR_SLICES_NUM; i++) {
+ reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMDCPR(i));
+ reg &= ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK |
+ ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK;
+ if (!reg)
+ continue;
+
+ if (reg & ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK) {
+ dev_err(&GET_DEV(accel_dev),
+ "Uncorrectable error exception in SSM DCMP: 0x%x", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ if (reg & ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK) {
+ dev_warn(&GET_DEV(accel_dev),
+ "Correctable error exception in SSM DCMP: 0x%x", reg);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMDCPR(i), reg);
+ }
+
+ return false;
+}
+
+static bool adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr,
+ u32 errsou)
+{
+ bool reset_required;
+
+ if (!(errsou & ADF_GEN4_ERRSOU2_SSM_ERR_BIT))
+ return false;
+
+ reset_required = adf_handle_iaintstatssm(accel_dev, csr);
+ reset_required |= adf_handle_exprpssmcmpr(accel_dev, csr);
+ reset_required |= adf_handle_exprpssmxlt(accel_dev, csr);
+ reset_required |= adf_handle_exprpssmdcpr(accel_dev, csr);
+
+ return reset_required;
+}
+
+static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ bool reset_required = false;
+ u32 reg;
+
+ if (!(errsou & ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT))
+ return false;
+
+ reg = ADF_CSR_RD(csr, ADF_GEN4_CPP_CFC_ERR_STATUS);
+ if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT) {
+ dev_err(&GET_DEV(accel_dev),
+ "CPP_CFC_ERR: data parity: 0x%x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT) {
+ dev_err(&GET_DEV(accel_dev),
+ "CPP_CFC_ERR: command parity: 0x%x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ reset_required = true;
+ }
+
+ if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT) {
+ dev_err(&GET_DEV(accel_dev),
+ "CPP_CFC_ERR: multiple errors: 0x%x", reg);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ reset_required = true;
+ }
+
+ ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_STATUS_CLR,
+ ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK);
+
+ return reset_required;
+}
+
+static void adf_gen4_process_errsou2(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou,
+ bool *reset_required)
+{
+ *reset_required |= adf_handle_ssm(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_cpp_cfc_err(accel_dev, csr, errsou);
+}
+
+static bool adf_handle_timiscsts(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 timiscsts;
+
+ if (!(errsou & ADF_GEN4_ERRSOU3_TIMISCSTS_BIT))
+ return false;
+
+ timiscsts = ADF_CSR_RD(csr, ADF_GEN4_TIMISCSTS);
+
+ dev_err(&GET_DEV(accel_dev),
+ "Fatal error in Transmit Interface: 0x%x\n", timiscsts);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ return true;
+}
+
+static bool adf_handle_ricppintsts(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 ricppintsts;
+
+ if (!(errsou & ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK))
+ return false;
+
+ ricppintsts = ADF_CSR_RD(csr, ADF_GEN4_RICPPINTSTS);
+ ricppintsts &= ADF_GEN4_RICPPINTSTS_BITMASK;
+
+ dev_err(&GET_DEV(accel_dev),
+ "RI CPP Uncorrectable Error: 0x%x\n", ricppintsts);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_RICPPINTSTS, ricppintsts);
+
+ return false;
+}
+
+static bool adf_handle_ticppintsts(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 ticppintsts;
+
+ if (!(errsou & ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK))
+ return false;
+
+ ticppintsts = ADF_CSR_RD(csr, ADF_GEN4_TICPPINTSTS);
+ ticppintsts &= ADF_GEN4_TICPPINTSTS_BITMASK;
+
+ dev_err(&GET_DEV(accel_dev),
+ "TI CPP Uncorrectable Error: 0x%x\n", ticppintsts);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_TICPPINTSTS, ticppintsts);
+
+ return false;
+}
+
+static bool adf_handle_aramcerr(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 aram_cerr;
+
+ if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT))
+ return false;
+
+ aram_cerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMCERR);
+ aram_cerr &= ADF_GEN4_REG_ARAMCERR_BIT;
+
+ dev_warn(&GET_DEV(accel_dev),
+ "ARAM correctable error : 0x%x\n", aram_cerr);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR);
+
+ aram_cerr |= ADF_GEN4_REG_ARAMCERR_EN_BITMASK;
+
+ ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, aram_cerr);
+
+ return false;
+}
+
+static bool adf_handle_aramuerr(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ bool reset_required = false;
+ u32 aramuerr;
+
+ if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT))
+ return false;
+
+ aramuerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMUERR);
+ aramuerr &= ADF_GEN4_REG_ARAMUERR_ERROR_BIT |
+ ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT;
+
+ if (!aramuerr)
+ return false;
+
+ if (aramuerr & ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT) {
+ dev_err(&GET_DEV(accel_dev),
+ "ARAM multiple uncorrectable errors: 0x%x\n", aramuerr);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ reset_required = true;
+ } else {
+ dev_err(&GET_DEV(accel_dev),
+ "ARAM uncorrectable error: 0x%x\n", aramuerr);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ aramuerr |= ADF_GEN4_REG_ARAMUERR_EN_BITMASK;
+
+ ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, aramuerr);
+
+ return reset_required;
+}
+
+static bool adf_handle_reg_cppmemtgterr(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ bool reset_required = false;
+ u32 cppmemtgterr;
+
+ if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT))
+ return false;
+
+ cppmemtgterr = ADF_CSR_RD(csr, ADF_GEN4_REG_CPPMEMTGTERR);
+ cppmemtgterr &= ADF_GEN4_REG_CPPMEMTGTERR_BITMASK |
+ ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT;
+ if (!cppmemtgterr)
+ return false;
+
+ if (cppmemtgterr & ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT) {
+ dev_err(&GET_DEV(accel_dev),
+ "Misc memory target multiple uncorrectable errors: 0x%x\n",
+ cppmemtgterr);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL);
+
+ reset_required = true;
+ } else {
+ dev_err(&GET_DEV(accel_dev),
+ "Misc memory target uncorrectable error: 0x%x\n", cppmemtgterr);
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ }
+
+ cppmemtgterr |= ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK;
+
+ ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, cppmemtgterr);
+
+ return reset_required;
+}
+
+static bool adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, u32 errsou)
+{
+ u32 i;
+ u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks;
+
+ if (!(errsou & ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT))
+ return false;
+
+ for (i = 0; i < max_rp_num; i++) {
+ u32 atufaultstatus = ADF_CSR_RD(csr, ADF_GEN4_ATUFAULTSTATUS(i));
+
+ atufaultstatus &= ADF_GEN4_ATUFAULTSTATUS_BIT;
+
+ if (atufaultstatus) {
+ dev_err(&GET_DEV(accel_dev),
+ "Ring Pair (%u) ATU detected fault: 0x%x\n", i,
+ atufaultstatus);
+
+ ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR);
+
+ ADF_CSR_WR(csr, ADF_GEN4_ATUFAULTSTATUS(i), atufaultstatus);
+ }
+ }
+
+ return false;
+}
+
+static void adf_gen4_process_errsou3(struct adf_accel_dev *accel_dev,
+ void __iomem *csr, void __iomem *aram_csr,
+ u32 errsou, bool *reset_required)
+{
+ *reset_required |= adf_handle_timiscsts(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_ricppintsts(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_ticppintsts(accel_dev, csr, errsou);
+ *reset_required |= adf_handle_aramcerr(accel_dev, aram_csr, errsou);
+ *reset_required |= adf_handle_aramuerr(accel_dev, aram_csr, errsou);
+ *reset_required |= adf_handle_reg_cppmemtgterr(accel_dev, aram_csr, errsou);
+ *reset_required |= adf_handle_atufaultstatus(accel_dev, csr, errsou);
+}
+
+static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev,
+ bool *reset_required)
+{
+ void __iomem *aram_csr = adf_get_aram_base(accel_dev);
+ void __iomem *csr = adf_get_pmisc_base(accel_dev);
+ u32 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU0);
+ bool handled = false;
+
+ *reset_required = false;
+
+ if (errsou & ADF_GEN4_ERRSOU0_BIT) {
+ adf_gen4_process_errsou0(accel_dev, csr);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU1);
+ if (errsou & ADF_GEN4_ERRSOU1_BITMASK) {
+ adf_gen4_process_errsou1(accel_dev, csr, errsou, reset_required);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU2);
+ if (errsou & ADF_GEN4_ERRSOU2_BITMASK) {
+ adf_gen4_process_errsou2(accel_dev, csr, errsou, reset_required);
+ handled = true;
+ }
+
+ errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU3);
+ if (errsou & ADF_GEN4_ERRSOU3_BITMASK) {
+ adf_gen4_process_errsou3(accel_dev, csr, aram_csr, errsou, reset_required);
+ handled = true;
+ }
+
+ return handled;
+}
+
+void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops)
+{
+ ras_ops->enable_ras_errors = adf_gen4_enable_ras;
+ ras_ops->disable_ras_errors = adf_gen4_disable_ras;
+ ras_ops->handle_interrupt = adf_gen4_handle_interrupt;
+}
+EXPORT_SYMBOL_GPL(adf_gen4_init_ras_ops);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h
new file mode 100644
index 0000000000..53352083cd
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h
@@ -0,0 +1,825 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_GEN4_RAS_H_
+#define ADF_GEN4_RAS_H_
+
+#include <linux/bits.h>
+
+struct adf_ras_ops;
+
+/* ERRSOU0 Correctable error mask*/
+#define ADF_GEN4_ERRSOU0_BIT BIT(0)
+
+/* HI AE Correctable error log */
+#define ADF_GEN4_HIAECORERRLOG_CPP0 0x41A308
+
+/* HI AE Correctable error log enable */
+#define ADF_GEN4_HIAECORERRLOGENABLE_CPP0 0x41A318
+#define ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT BIT(0)
+#define ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT BIT(1)
+#define ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT BIT(2)
+#define ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT BIT(3)
+#define ADF_GEN4_ERRSOU1_RIMISCSTS_BIT BIT(4)
+
+#define ADF_GEN4_ERRSOU1_BITMASK ( \
+ (ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT) | \
+ (ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT) | \
+ (ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT) | \
+ (ADF_GEN4_ERRSOU1_RIMISCSTS_BIT))
+
+/* HI AE Uncorrectable error log */
+#define ADF_GEN4_HIAEUNCERRLOG_CPP0 0x41A300
+
+/* HI AE Uncorrectable error log enable */
+#define ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0 0x41A320
+
+/* HI CPP Agent Command parity error log */
+#define ADF_GEN4_HICPPAGENTCMDPARERRLOG 0x41A310
+
+/* HI CPP Agent Command parity error logging enable */
+#define ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE 0x41A314
+
+/* RI Memory parity error status register */
+#define ADF_GEN4_RIMEM_PARERR_STS 0x41B128
+
+/* RI Memory parity error reporting enable */
+#define ADF_GEN4_RI_MEM_PAR_ERR_EN0 0x41B12C
+
+/*
+ * RI Memory parity error mask
+ * BIT(0) - BIT(3) - ri_iosf_pdata_rxq[0:3] parity error
+ * BIT(4) - ri_tlq_phdr parity error
+ * BIT(5) - ri_tlq_pdata parity error
+ * BIT(6) - ri_tlq_nphdr parity error
+ * BIT(7) - ri_tlq_npdata parity error
+ * BIT(8) - BIT(9) - ri_tlq_cplhdr[0:1] parity error
+ * BIT(10) - BIT(17) - ri_tlq_cpldata[0:7] parity error
+ * BIT(18) - set this bit to 1 to enable logging status to ri_mem_par_err_sts0
+ * BIT(19) - ri_cds_cmd_fifo parity error
+ * BIT(20) - ri_obc_ricpl_fifo parity error
+ * BIT(21) - ri_obc_tiricpl_fifo parity error
+ * BIT(22) - ri_obc_cppcpl_fifo parity error
+ * BIT(23) - ri_obc_pendcpl_fifo parity error
+ * BIT(24) - ri_cpp_cmd_fifo parity error
+ * BIT(25) - ri_cds_ticmd_fifo parity error
+ * BIT(26) - riti_cmd_fifo parity error
+ * BIT(27) - ri_int_msixtbl parity error
+ * BIT(28) - ri_int_imstbl parity error
+ * BIT(30) - ri_kpt_fuses parity error
+ */
+#define ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(5) | \
+ BIT(7) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \
+ BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | \
+ BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | \
+ BIT(26) | BIT(27) | BIT(28) | BIT(30))
+
+#define ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK \
+ (BIT(4) | BIT(6) | BIT(8) | BIT(9))
+
+/* TI CI parity status */
+#define ADF_GEN4_TI_CI_PAR_STS 0x50060C
+
+/* TI CI parity reporting mask */
+#define ADF_GEN4_TI_CI_PAR_ERR_MASK 0x500608
+
+/*
+ * TI CI parity status mask
+ * BIT(0) - CdCmdQ_sts patiry error status
+ * BIT(1) - CdDataQ_sts parity error status
+ * BIT(3) - CPP_SkidQ_sts parity error status
+ * BIT(7) - CPP_SkidQ_sc_sts parity error status
+ */
+#define ADF_GEN4_TI_CI_PAR_STS_BITMASK \
+ (BIT(0) | BIT(1) | BIT(3) | BIT(7))
+
+/* TI PULLFUB parity status */
+#define ADF_GEN4_TI_PULL0FUB_PAR_STS 0x500618
+
+/* TI PULLFUB parity error reporting mask */
+#define ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK 0x500614
+
+/*
+ * TI PULLFUB parity status mask
+ * BIT(0) - TrnPullReqQ_sts parity status
+ * BIT(1) - TrnSharedDataQ_sts parity status
+ * BIT(2) - TrnPullReqDataQ_sts parity status
+ * BIT(4) - CPP_CiPullReqQ_sts parity status
+ * BIT(5) - CPP_TrnPullReqQ_sts parity status
+ * BIT(6) - CPP_PullidQ_sts parity status
+ * BIT(7) - CPP_WaitDataQ_sts parity status
+ * BIT(8) - CPP_CdDataQ_sts parity status
+ * BIT(9) - CPP_TrnDataQP0_sts parity status
+ * BIT(10) - BIT(11) - CPP_TrnDataQRF[00:01]_sts parity status
+ * BIT(12) - CPP_TrnDataQP1_sts parity status
+ * BIT(13) - BIT(14) - CPP_TrnDataQRF[10:11]_sts parity status
+ */
+#define ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | \
+ BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14))
+
+/* TI PUSHUB parity status */
+#define ADF_GEN4_TI_PUSHFUB_PAR_STS 0x500630
+
+/* TI PUSHFUB parity error reporting mask */
+#define ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK 0x50062C
+
+/*
+ * TI PUSHUB parity status mask
+ * BIT(0) - SbPushReqQ_sts parity status
+ * BIT(1) - BIT(2) - SbPushDataQ[0:1]_sts parity status
+ * BIT(4) - CPP_CdPushReqQ_sts parity status
+ * BIT(5) - BIT(6) - CPP_CdPushDataQ[0:1]_sts parity status
+ * BIT(7) - CPP_SbPushReqQ_sts parity status
+ * BIT(8) - CPP_SbPushDataQP_sts parity status
+ * BIT(9) - BIT(10) - CPP_SbPushDataQRF[0:1]_sts parity status
+ */
+#define ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | \
+ BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10))
+
+/* TI CD parity status */
+#define ADF_GEN4_TI_CD_PAR_STS 0x50063C
+
+/* TI CD parity error mask */
+#define ADF_GEN4_TI_CD_PAR_ERR_MASK 0x500638
+
+/*
+ * TI CD parity status mask
+ * BIT(0) - BIT(15) - CtxMdRam[0:15]_sts parity status
+ * BIT(16) - Leaf2ClusterRam_sts parity status
+ * BIT(17) - BIT(18) - Ring2LeafRam[0:1]_sts parity status
+ * BIT(19) - VirtualQ_sts parity status
+ * BIT(20) - DtRdQ_sts parity status
+ * BIT(21) - DtWrQ_sts parity status
+ * BIT(22) - RiCmdQ_sts parity status
+ * BIT(23) - BypassQ_sts parity status
+ * BIT(24) - DtRdQ_sc_sts parity status
+ * BIT(25) - DtWrQ_sc_sts parity status
+ */
+#define ADF_GEN4_TI_CD_PAR_STS_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \
+ BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \
+ BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25))
+
+/* TI TRNSB parity status */
+#define ADF_GEN4_TI_TRNSB_PAR_STS 0x500648
+
+/* TI TRNSB Parity error reporting mask */
+#define ADF_GEN4_TI_TRNSB_PAR_ERR_MASK 0x500644
+
+/*
+ * TI TRNSB parity status mask
+ * BIT(0) - TrnPHdrQP_sts parity status
+ * BIT(1) - TrnPHdrQRF_sts parity status
+ * BIT(2) - TrnPDataQP_sts parity status
+ * BIT(3) - BIT(6) - TrnPDataQRF[0:3]_sts parity status
+ * BIT(7) - TrnNpHdrQP_sts parity status
+ * BIT(8) - BIT(9) - TrnNpHdrQRF[0:1]_sts parity status
+ * BIT(10) - TrnCplHdrQ_sts parity status
+ * BIT(11) - TrnPutObsReqQ_sts parity status
+ * BIT(12) - TrnPushReqQ_sts parity status
+ * BIT(13) - SbSplitIdRam_sts parity status
+ * BIT(14) - SbReqCountQ_sts parity status
+ * BIT(15) - SbCplTrkRam_sts parity status
+ * BIT(16) - SbGetObsReqQ_sts parity status
+ * BIT(17) - SbEpochIdQ_sts parity status
+ * BIT(18) - SbAtCplHdrQ_sts parity status
+ * BIT(19) - SbAtCplDataQ_sts parity status
+ * BIT(20) - SbReqCountRam_sts parity status
+ * BIT(21) - SbAtCplHdrQ_sc_sts parity status
+ */
+#define ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \
+ BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | \
+ BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | \
+ BIT(19) | BIT(20) | BIT(21))
+
+/* Status register to log misc error on RI */
+#define ADF_GEN4_RIMISCSTS 0x41B1B8
+
+/* Status control register to log misc RI error */
+#define ADF_GEN4_RIMISCCTL 0x41B1BC
+
+/*
+ * ERRSOU2 bit mask
+ * BIT(0) - SSM Interrupt Mask
+ * BIT(1) - CFC on CPP. ORed of CFC Push error and Pull error
+ * BIT(2) - BIT(4) - CPP attention interrupts, deprecated on gen4 devices
+ * BIT(18) - PM interrupt
+ */
+#define ADF_GEN4_ERRSOU2_SSM_ERR_BIT BIT(0)
+#define ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT BIT(1)
+#define ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK \
+ (BIT(2) | BIT(3) | BIT(4))
+
+#define ADF_GEN4_ERRSOU2_PM_INT_BIT BIT(18)
+
+#define ADF_GEN4_ERRSOU2_BITMASK \
+ (ADF_GEN4_ERRSOU2_SSM_ERR_BIT | \
+ ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)
+
+#define ADF_GEN4_ERRSOU2_DIS_BITMASK \
+ (ADF_GEN4_ERRSOU2_SSM_ERR_BIT | \
+ ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT | \
+ ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK)
+
+#define ADF_GEN4_IAINTSTATSSM 0x28
+
+/* IAINTSTATSSM error bit mask definitions */
+#define ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT BIT(0)
+#define ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT BIT(1)
+#define ADF_GEN4_IAINTSTATSSM_PPERR_BIT BIT(2)
+#define ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT BIT(3)
+#define ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT BIT(4)
+#define ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT BIT(5)
+#define ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT BIT(6)
+#define ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT BIT(7)
+#define ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT BIT(8)
+
+#define ADF_GEN4_IAINTSTATSSM_BITMASK \
+ (ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT | \
+ ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT | \
+ ADF_GEN4_IAINTSTATSSM_PPERR_BIT | \
+ ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT | \
+ ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT | \
+ ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT | \
+ ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT | \
+ ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | \
+ ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT)
+
+#define ADF_GEN4_UERRSSMSH 0x18
+
+/*
+ * UERRSSMSH error bit masks definitions
+ *
+ * BIT(0) - Indicates one uncorrectable error
+ * BIT(15) - Indicates multiple uncorrectable errors
+ * in device shared memory
+ */
+#define ADF_GEN4_UERRSSMSH_BITMASK (BIT(0) | BIT(15))
+
+#define ADF_GEN4_UERRSSMSHAD 0x1C
+
+#define ADF_GEN4_CERRSSMSH 0x10
+
+/*
+ * CERRSSMSH error bit
+ * BIT(0) - Indicates one correctable error
+ */
+#define ADF_GEN4_CERRSSMSH_ERROR_BIT BIT(0)
+
+#define ADF_GEN4_CERRSSMSHAD 0x14
+
+/* SSM error handling features enable register */
+#define ADF_GEN4_SSMFEATREN 0x198
+
+/*
+ * Disable SSM error detection and reporting features
+ * enabled by device driver on RAS initialization
+ *
+ * following bits should be cleared :
+ * BIT(4) - Disable parity for CPP parity
+ * BIT(12) - Disable logging push/pull data error in pperr register.
+ * BIT(16) - BIT(23) - Disable parity for SPPs
+ * BIT(24) - BIT(27) - Disable parity for SPPs, if it's supported on the device.
+ */
+#define ADF_GEN4_SSMFEATREN_DIS_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(5) | BIT(6) | BIT(7) | \
+ BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(13) | BIT(14) | BIT(15))
+
+#define ADF_GEN4_INTMASKSSM 0x0
+
+/*
+ * Error reporting mask in INTMASKSSM
+ * BIT(0) - Shared memory uncorrectable interrupt mask
+ * BIT(1) - Shared memory correctable interrupt mask
+ * BIT(2) - PPERR interrupt mask
+ * BIT(3) - CPP parity error Interrupt mask
+ * BIT(4) - SSM interrupt generated by SER correctable error mask
+ * BIT(5) - SSM interrupt generated by SER uncorrectable error
+ * - not stop and scream - mask
+ */
+#define ADF_GEN4_INTMASKSSM_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5))
+
+/* CPP push or pull error */
+#define ADF_GEN4_PPERR 0x8
+
+#define ADF_GEN4_PPERR_BITMASK (BIT(0) | BIT(1))
+
+#define ADF_GEN4_PPERRID 0xC
+
+/* Slice hang handling related registers */
+#define ADF_GEN4_SLICEHANGSTATUS_ATH_CPH 0x84
+#define ADF_GEN4_SLICEHANGSTATUS_CPR_XLT 0x88
+#define ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS 0x90
+#define ADF_GEN4_SLICEHANGSTATUS_WAT_WCP 0x8C
+#define ADF_GEN4_SLICEHANGSTATUS_PKE 0x94
+
+#define ADF_GEN4_SHINTMASKSSM_ATH_CPH 0xF0
+#define ADF_GEN4_SHINTMASKSSM_CPR_XLT 0xF4
+#define ADF_GEN4_SHINTMASKSSM_DCPR_UCS 0xFC
+#define ADF_GEN4_SHINTMASKSSM_WAT_WCP 0xF8
+#define ADF_GEN4_SHINTMASKSSM_PKE 0x100
+
+/* SPP pull cmd parity err_*slice* CSR */
+#define ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH 0x1A4
+#define ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT 0x1A8
+#define ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS 0x1B0
+#define ADF_GEN4_SPPPULLCMDPARERR_PKE 0x1B4
+#define ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP 0x1AC
+
+/* SPP pull data parity err_*slice* CSR */
+#define ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH 0x1BC
+#define ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT 0x1C0
+#define ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS 0x1C8
+#define ADF_GEN4_SPPPULLDATAPARERR_PKE 0x1CC
+#define ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP 0x1C4
+
+/* SPP push cmd parity err_*slice* CSR */
+#define ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH 0x1D4
+#define ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT 0x1D8
+#define ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS 0x1E0
+#define ADF_GEN4_SPPPUSHCMDPARERR_PKE 0x1E4
+#define ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP 0x1DC
+
+/* SPP push data parity err_*slice* CSR */
+#define ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH 0x1EC
+#define ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT 0x1F0
+#define ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS 0x1F8
+#define ADF_GEN4_SPPPUSHDATAPARERR_PKE 0x1FC
+#define ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP 0x1F4
+
+/* Accelerator SPP parity error mask registers */
+#define ADF_GEN4_SPPPARERRMSK_ATH_CPH 0x204
+#define ADF_GEN4_SPPPARERRMSK_CPR_XLT 0x208
+#define ADF_GEN4_SPPPARERRMSK_DCPR_UCS 0x210
+#define ADF_GEN4_SPPPARERRMSK_PKE 0x214
+#define ADF_GEN4_SPPPARERRMSK_WAT_WCP 0x20C
+
+#define ADF_GEN4_SSMCPPERR 0x224
+
+/*
+ * Uncorrectable error mask in SSMCPPERR
+ * BIT(0) - indicates CPP command parity error
+ * BIT(1) - indicates CPP Main Push PPID parity error
+ * BIT(2) - indicates CPP Main ePPID parity error
+ * BIT(3) - indicates CPP Main push data parity error
+ * BIT(4) - indicates CPP Main Pull PPID parity error
+ * BIT(5) - indicates CPP target pull data parity error
+ */
+#define ADF_GEN4_SSMCPPERR_FATAL_BITMASK \
+ (BIT(0) | BIT(1) | BIT(4))
+
+#define ADF_GEN4_SSMCPPERR_UNCERR_BITMASK \
+ (BIT(2) | BIT(3) | BIT(5))
+
+#define ADF_GEN4_SSMSOFTERRORPARITY_SRC 0x9C
+#define ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC 0xB8
+
+#define ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH 0xA0
+#define ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH 0xBC
+
+#define ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT 0xA4
+#define ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT 0xC0
+
+#define ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS 0xAC
+#define ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS 0xC8
+
+#define ADF_GEN4_SSMSOFTERRORPARITY_PKE 0xB0
+#define ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE 0xCC
+
+#define ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP 0xA8
+#define ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP 0xC4
+
+/* RF parity error detected in SharedRAM */
+#define ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT BIT(0)
+
+#define ADF_GEN4_SER_ERR_SSMSH 0x44C
+
+/*
+ * Fatal error mask in SER_ERR_SSMSH
+ * BIT(0) - Indicates an uncorrectable error has occurred in the
+ * accelerator controller command RFs
+ * BIT(2) - Parity error occurred in the bank SPP fifos
+ * BIT(3) - Indicates Parity error occurred in following fifos in
+ * the design
+ * BIT(4) - Parity error occurred in flops in the design
+ * BIT(5) - Uncorrectable error has occurred in the
+ * target push and pull data register flop
+ * BIT(7) - Indicates Parity error occurred in the Resource Manager
+ * pending lock request fifos
+ * BIT(8) - Indicates Parity error occurred in the Resource Manager
+ * MECTX command queues logic
+ * BIT(9) - Indicates Parity error occurred in the Resource Manager
+ * MECTX sigdone fifo flops
+ * BIT(10) - Indicates an uncorrectable error has occurred in the
+ * Resource Manager MECTX command RFs
+ * BIT(14) - Parity error occurred in Buffer Manager sigdone FIFO
+ */
+ #define ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK \
+ (BIT(0) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(7) | \
+ BIT(8) | BIT(9) | BIT(10) | BIT(14))
+
+/*
+ * Uncorrectable error mask in SER_ERR_SSMSH
+ * BIT(12) Parity error occurred in Buffer Manager pool 0
+ * BIT(13) Parity error occurred in Buffer Manager pool 1
+ */
+#define ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK \
+ (BIT(12) | BIT(13))
+
+/*
+ * Correctable error mask in SER_ERR_SSMSH
+ * BIT(1) - Indicates a correctable Error has occurred
+ * in the slice controller command RFs
+ * BIT(6) - Indicates a correctable Error has occurred in
+ * the target push and pull data RFs
+ * BIT(11) - Indicates an correctable Error has occurred in
+ * the Resource Manager MECTX command RFs
+ */
+#define ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK \
+ (BIT(1) | BIT(6) | BIT(11))
+
+/* SSM shared memory SER error reporting mask */
+#define ADF_GEN4_SER_EN_SSMSH 0x450
+
+/*
+ * SSM SER error reporting mask in SER_en_err_ssmsh
+ * BIT(0) - Enables uncorrectable Error detection in :
+ * 1) slice controller command RFs.
+ * 2) target push/pull data registers
+ * BIT(1) - Enables correctable Error detection in :
+ * 1) slice controller command RFs
+ * 2) target push/pull data registers
+ * BIT(2) - Enables Parity error detection in
+ * 1) bank SPP fifos
+ * 2) gen4_pull_id_queue
+ * 3) gen4_push_id_queue
+ * 4) AE_pull_sigdn_fifo
+ * 5) DT_push_sigdn_fifo
+ * 6) slx_push_sigdn_fifo
+ * 7) secure_push_cmd_fifo
+ * 8) secure_pull_cmd_fifo
+ * 9) Head register in FIFO wrapper
+ * 10) current_cmd in individual push queue
+ * 11) current_cmd in individual pull queue
+ * 12) push_command_rxp arbitrated in ssm_push_cmd_queues
+ * 13) pull_command_rxp arbitrated in ssm_pull_cmd_queues
+ * BIT(3) - Enables uncorrectable Error detection in
+ * the resource manager mectx cmd RFs.
+ * BIT(4) - Enables correctable error detection in the Resource Manager
+ * mectx command RFs
+ * BIT(5) - Enables Parity error detection in
+ * 1) resource manager lock request fifo
+ * 2) mectx cmdqueues logic
+ * 3) mectx sigdone fifo
+ * BIT(6) - Enables Parity error detection in Buffer Manager pools
+ * and sigdone fifo
+ */
+#define ADF_GEN4_SER_EN_SSMSH_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6))
+
+#define ADF_GEN4_CPP_CFC_ERR_STATUS 0x640C04
+
+/*
+ * BIT(1) - Indicates multiple CPP CFC errors
+ * BIT(7) - Indicates CPP CFC command parity error type
+ * BIT(8) - Indicated CPP CFC data parity error type
+ */
+#define ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT BIT(1)
+#define ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT BIT(7)
+#define ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT BIT(8)
+
+/*
+ * BIT(0) - Enables CFC to detect and log push/pull data error
+ * BIT(1) - Enables CFC to generate interrupt to PCIEP for CPP error
+ * BIT(4) - When 1 Parity detection is disabled
+ * BIT(5) - When 1 Parity detection is disabled on CPP command bus
+ * BIT(6) - When 1 Parity detection is disabled on CPP push/pull bus
+ * BIT(9) - When 1 RF parity error detection is disabled
+ */
+#define ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK (BIT(0) | BIT(1))
+
+#define ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK \
+ (BIT(4) | BIT(5) | BIT(6) | BIT(9) | BIT(10))
+
+#define ADF_GEN4_CPP_CFC_ERR_CTRL 0x640C00
+
+/*
+ * BIT(0) - Clears bit(0) of ADF_GEN4_CPP_CFC_ERR_STATUS
+ * when an error is reported on CPP
+ * BIT(1) - Clears bit(1) of ADF_GEN4_CPP_CFC_ERR_STATUS
+ * when multiple errors are reported on CPP
+ * BIT(2) - Clears bit(2) of ADF_GEN4_CPP_CFC_ERR_STATUS
+ * when attention interrupt is reported
+ */
+#define ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK (BIT(0) | BIT(1) | BIT(2))
+#define ADF_GEN4_CPP_CFC_ERR_STATUS_CLR 0x640C08
+
+#define ADF_GEN4_CPP_CFC_ERR_PPID_LO 0x640C0C
+#define ADF_GEN4_CPP_CFC_ERR_PPID_HI 0x640C10
+
+/* Exception reporting in QAT SSM CMP */
+#define ADF_GEN4_EXPRPSSMCPR 0x2000
+
+/*
+ * Uncorrectable error mask in EXPRPSSMCPR
+ * BIT(2) - Hard fatal error
+ * BIT(16) - Parity error detected in CPR Push FIFO
+ * BIT(17) - Parity error detected in CPR Pull FIFO
+ * BIT(18) - Parity error detected in CPR Hash Table
+ * BIT(19) - Parity error detected in CPR History Buffer Copy 0
+ * BIT(20) - Parity error detected in CPR History Buffer Copy 1
+ * BIT(21) - Parity error detected in CPR History Buffer Copy 2
+ * BIT(22) - Parity error detected in CPR History Buffer Copy 3
+ * BIT(23) - Parity error detected in CPR History Buffer Copy 4
+ * BIT(24) - Parity error detected in CPR History Buffer Copy 5
+ * BIT(25) - Parity error detected in CPR History Buffer Copy 6
+ * BIT(26) - Parity error detected in CPR History Buffer Copy 7
+ */
+#define ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK \
+ (BIT(2) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \
+ BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26))
+
+/* Exception reporting in QAT SSM XLT */
+#define ADF_GEN4_EXPRPSSMXLT 0xA000
+
+/*
+ * Uncorrectable error mask in EXPRPSSMXLT
+ * BIT(2) - If set, an Uncorrectable Error event occurred
+ * BIT(16) - Parity error detected in XLT Push FIFO
+ * BIT(17) - Parity error detected in XLT Pull FIFO
+ * BIT(18) - Parity error detected in XLT HCTB0
+ * BIT(19) - Parity error detected in XLT HCTB1
+ * BIT(20) - Parity error detected in XLT HCTB2
+ * BIT(21) - Parity error detected in XLT HCTB3
+ * BIT(22) - Parity error detected in XLT CBCL
+ * BIT(23) - Parity error detected in XLT LITPTR
+ */
+#define ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK \
+ (BIT(2) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \
+ BIT(22) | BIT(23))
+
+/*
+ * Correctable error mask in EXPRPSSMXLT
+ * BIT(3) - Correctable error event occurred.
+ */
+#define ADF_GEN4_EXPRPSSMXLT_CERR_BIT BIT(3)
+
+/* Exception reporting in QAT SSM DCMP */
+#define ADF_GEN4_EXPRPSSMDCPR(_n_) (0x12000 + (_n_) * 0x80)
+
+/*
+ * Uncorrectable error mask in EXPRPSSMDCPR
+ * BIT(2) - Even hard fatal error
+ * BIT(4) - Odd hard fatal error
+ * BIT(6) - decode soft error
+ * BIT(16) - Parity error detected in CPR Push FIFO
+ * BIT(17) - Parity error detected in CPR Pull FIFO
+ * BIT(18) - Parity error detected in the Input Buffer
+ * BIT(19) - symbuf0parerr
+ * Parity error detected in CPR Push FIFO
+ * BIT(20) - symbuf1parerr
+ * Parity error detected in CPR Push FIFO
+ */
+#define ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK \
+ (BIT(2) | BIT(4) | BIT(6) | BIT(16) | BIT(17) | \
+ BIT(18) | BIT(19) | BIT(20))
+
+/*
+ * Correctable error mask in EXPRPSSMDCPR
+ * BIT(3) - Even ecc correctable error
+ * BIT(5) - Odd ecc correctable error
+ */
+#define ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK (BIT(3) | BIT(5))
+
+#define ADF_GEN4_DCPR_SLICES_NUM 3
+
+/*
+ * ERRSOU3 bit masks
+ * BIT(0) - indicates error Response Order Overflow and/or BME error
+ * BIT(1) - indicates RI push/pull error
+ * BIT(2) - indicates TI push/pull error
+ * BIT(3) - indicates ARAM correctable error
+ * BIT(4) - indicates ARAM uncorrectable error
+ * BIT(5) - indicates TI pull parity error
+ * BIT(6) - indicates RI push parity error
+ * BIT(7) - indicates VFLR interrupt
+ * BIT(8) - indicates ring pair interrupts for ATU detected fault
+ * BIT(9) - indicates error when accessing RLT block
+ */
+#define ADF_GEN4_ERRSOU3_TIMISCSTS_BIT BIT(0)
+#define ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK (BIT(1) | BIT(6))
+#define ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK (BIT(2) | BIT(5))
+#define ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT BIT(3)
+#define ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT BIT(4)
+#define ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT BIT(7)
+#define ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT BIT(8)
+#define ADF_GEN4_ERRSOU3_RLTERROR_BIT BIT(9)
+
+#define ADF_GEN4_ERRSOU3_BITMASK ( \
+ (ADF_GEN4_ERRSOU3_TIMISCSTS_BIT) | \
+ (ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK) | \
+ (ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK) | \
+ (ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT) | \
+ (ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT) | \
+ (ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT) | \
+ (ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT) | \
+ (ADF_GEN4_ERRSOU3_RLTERROR_BIT))
+
+/* TI Misc status register */
+#define ADF_GEN4_TIMISCSTS 0x50054C
+
+/* TI Misc error reporting mask */
+#define ADF_GEN4_TIMISCCTL 0x500548
+
+/*
+ * TI Misc error reporting control mask
+ * BIT(0) - Enables error detection and logging in TIMISCSTS register
+ * BIT(1) - It has effect only when SRIOV enabled, this bit is 0 by default
+ * BIT(2) - Enables the D-F-x counter within the dispatch arbiter
+ * to start based on the command triggered from
+ * BIT(30) - Disables VFLR functionality
+ * By setting this bit will revert to CPM1.x functionality
+ * bits 1, 2 and 30 value should be preserved and not meant to be changed
+ * within RAS.
+ */
+#define ADF_GEN4_TIMISCCTL_BIT BIT(0)
+#define ADF_GEN4_TIMSCCTL_RELAY_BITMASK (BIT(1) | BIT(2) | BIT(30))
+
+/* RI CPP interface status register */
+#define ADF_GEN4_RICPPINTSTS 0x41A330
+
+/*
+ * Uncorrectable error mask in RICPPINTSTS register
+ * BIT(0) - RI asserted the CPP error signal during a push
+ * BIT(1) - RI detected the CPP error signal asserted during a pull
+ * BIT(2) - RI detected a push data parity error
+ * BIT(3) - RI detected a push valid parity error
+ */
+#define ADF_GEN4_RICPPINTSTS_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+/* RI CPP interface status register control */
+#define ADF_GEN4_RICPPINTCTL 0x41A32C
+
+/*
+ * Control bit mask for RICPPINTCTL register
+ * BIT(0) - value of 1 enables error detection and reporting
+ * on the RI CPP Push interface
+ * BIT(1) - value of 1 enables error detection and reporting
+ * on the RI CPP Pull interface
+ * BIT(2) - value of 1 enables error detection and reporting
+ * on the RI Parity
+ * BIT(3) - value of 1 enable checking parity on CPP
+ * BIT(4) - value of 1 enables the stop feature of the stop and stream
+ * for all RI CPP Command RFs
+ */
+#define ADF_GEN4_RICPPINTCTL_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4))
+
+/* Push ID of the command which triggered the transaction error on RI */
+#define ADF_GEN4_RIERRPUSHID 0x41A334
+
+/* Pull ID of the command which triggered the transaction error on RI */
+#define ADF_GEN4_RIERRPULLID 0x41A338
+
+/* TI CPP interface status register */
+#define ADF_GEN4_TICPPINTSTS 0x50053C
+
+/*
+ * Uncorrectable error mask in TICPPINTSTS register
+ * BIT(0) - value of 1 indicates that the TI asserted
+ * the CPP error signal during a push
+ * BIT(1) - value of 1 indicates that the TI detected
+ * the CPP error signal asserted during a pull
+ * BIT(2) - value of 1 indicates that the TI detected
+ * a pull data parity error
+ */
+#define ADF_GEN4_TICPPINTSTS_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2))
+
+/* TI CPP interface status register control */
+#define ADF_GEN4_TICPPINTCTL 0x500538
+
+/*
+ * Control bit mask for TICPPINTCTL register
+ * BIT(0) - value of 1 enables error detection and reporting on
+ * the TI CPP Push interface
+ * BIT(1) - value of 1 enables error detection and reporting on
+ * the TI CPP Push interface
+ * BIT(2) - value of 1 enables parity error detection and logging on
+ * the TI CPP Pull interface
+ * BIT(3) - value of 1 enables CPP CMD and Pull Data parity checking
+ * BIT(4) - value of 1 enables TI stop part of stop and scream mode on
+ * CPP/RF Parity error
+ */
+#define ADF_GEN4_TICPPINTCTL_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4))
+
+/* Push ID of the command which triggered the transaction error on TI */
+#define ADF_GEN4_TIERRPUSHID 0x500540
+
+/* Pull ID of the command which triggered the transaction error on TI */
+#define ADF_GEN4_TIERRPULLID 0x500544
+
+/* Correctable error in ARAM agent register */
+#define ADF_GEN4_REG_ARAMCERR 0x1700
+
+#define ADF_GEN4_REG_ARAMCERR_BIT BIT(0)
+
+/*
+ * Correctable error enablement in ARAM bit mask
+ * BIT(3) - enable ARAM RAM to fix and log correctable error
+ * BIT(26) - enables ARAM agent to generate interrupt for correctable error
+ */
+#define ADF_GEN4_REG_ARAMCERR_EN_BITMASK (BIT(3) | BIT(26))
+
+/* Correctable error address in ARAM agent register */
+#define ADF_GEN4_REG_ARAMCERRAD 0x1708
+
+/* Uncorrectable error in ARAM agent register */
+#define ADF_GEN4_REG_ARAMUERR 0x1704
+
+/*
+ * ARAM error bit mask
+ * BIT(0) - indicates error logged in ARAMCERR or ARAMUCERR
+ * BIT(18) - indicates uncorrectable multiple errors in ARAM agent
+ */
+#define ADF_GEN4_REG_ARAMUERR_ERROR_BIT BIT(0)
+#define ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT BIT(18)
+
+/*
+ * Uncorrectable error enablement in ARAM bit mask
+ * BIT(3) - enable ARAM RAM to fix and log uncorrectable error
+ * BIT(19) - enables ARAM agent to generate interrupt for uncorrectable error
+ */
+#define ADF_GEN4_REG_ARAMUERR_EN_BITMASK (BIT(3) | BIT(19))
+
+/* Unorrectable error address in ARAM agent register */
+#define ADF_GEN4_REG_ARAMUERRAD 0x170C
+
+/* Uncorrectable error transaction push/pull ID registers*/
+#define ADF_GEN4_REG_ERRPPID_LO 0x1714
+#define ADF_GEN4_REG_ERRPPID_HI 0x1718
+
+/* ARAM ECC block error enablement */
+#define ADF_GEN4_REG_ARAMCERRUERR_EN 0x1808
+
+/*
+ * ARAM ECC block error control bit masks
+ * BIT(0) - enable ARAM CD ECC block error detecting
+ * BIT(1) - enable ARAM pull request ECC error detecting
+ * BIT(2) - enable ARAM command dispatch ECC error detecting
+ * BIT(3) - enable ARAM read datapath push ECC error detecting
+ * BIT(4) - enable ARAM read datapath pull ECC error detecting
+ * BIT(5) - enable ARAM RMW ECC error detecting
+ * BIT(6) - enable ARAM write datapath RMW ECC error detecting
+ * BIT(7) - enable ARAM write datapath ECC error detecting
+ */
+#define ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK \
+ (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | \
+ BIT(5) | BIT(6) | BIT(7))
+
+/* ARAM misc memory target error registers*/
+#define ADF_GEN4_REG_CPPMEMTGTERR 0x1710
+
+/*
+ * ARAM misc memory target error bit masks
+ * BIT(0) - indicates an error in ARAM target memory
+ * BIT(1) - indicates multiple errors in ARAM target memory
+ * BIT(4) - indicates pull error in ARAM target memory
+ * BIT(5) - indicates parity pull error in ARAM target memory
+ * BIT(6) - indicates push error in ARAM target memory
+ */
+#define ADF_GEN4_REG_CPPMEMTGTERR_BITMASK \
+ (BIT(0) | BIT(4) | BIT(5) | BIT(6))
+
+#define ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT BIT(1)
+
+/*
+ * ARAM misc memory target error enablement mask
+ * BIT(2) - enables CPP memory to detect and log push/pull data error
+ * BIT(7) - enables push/pull error to generate interrupts to RI
+ * BIT(8) - enables ARAM to check parity on pull data and CPP command buses
+ * BIT(9) - enables ARAM to autopush to AE when push/parity error is detected
+ * on lookaside DT
+ */
+#define ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK \
+ (BIT(2) | BIT(7) | BIT(8) | BIT(9))
+
+/* ATU fault status register */
+#define ADF_GEN4_ATUFAULTSTATUS(i) (0x506000 + ((i) * 0x4))
+
+#define ADF_GEN4_ATUFAULTSTATUS_BIT BIT(0)
+
+/* Command Parity error detected on IOSFP Command to QAT */
+#define ADF_GEN4_RIMISCSTS_BIT BIT(0)
+
+void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops);
+
+#endif /* ADF_GEN4_RAS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
index 646c57922f..35ccb91d6e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include "adf_admin.h"
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_gen4_timer.h"
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
index beef9a5f6c..13f48d2f6d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <asm/errno.h>
#include "adf_accel_devices.h"
+#include "adf_admin.h"
#include "adf_cfg.h"
#include "adf_cfg_strings.h"
#include "adf_clock.h"
diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
index 803cbfd838..2661af6a2e 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/kstrtox.h>
#include <linux/types.h>
+#include "adf_admin.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_heartbeat.h"
diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c
index 0f9e2d59ce..81c39f3d07 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_init.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_init.c
@@ -9,6 +9,8 @@
#include "adf_common_drv.h"
#include "adf_dbgfs.h"
#include "adf_heartbeat.h"
+#include "adf_rl.h"
+#include "adf_sysfs_ras_counters.h"
static LIST_HEAD(service_table);
static DEFINE_MUTEX(service_lock);
@@ -61,7 +63,6 @@ int adf_service_unregister(struct service_hndl *service)
static int adf_dev_init(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
- struct list_head *list_itr;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
int ret;
@@ -120,6 +121,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
}
set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+ if (hw_data->ras_ops.enable_ras_errors)
+ hw_data->ras_ops.enable_ras_errors(accel_dev);
+
hw_data->enable_ints(accel_dev);
hw_data->enable_error_correction(accel_dev);
@@ -134,14 +138,16 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev)
}
adf_heartbeat_init(accel_dev);
+ ret = adf_rl_init(accel_dev);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
/*
* Subservice initialisation is divided into two stages: init and start.
* This is to facilitate any ordering dependencies between services
* prior to starting any of the accelerators.
*/
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
+ list_for_each_entry(service, &service_table, list) {
if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
dev_err(&GET_DEV(accel_dev),
"Failed to initialise service %s\n",
@@ -168,7 +174,6 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
- struct list_head *list_itr;
int ret;
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
@@ -211,9 +216,11 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
}
adf_heartbeat_start(accel_dev);
+ ret = adf_rl_start(accel_dev);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
+ list_for_each_entry(service, &service_table, list) {
if (service->event_hld(accel_dev, ADF_EVENT_START)) {
dev_err(&GET_DEV(accel_dev),
"Failed to start service %s\n",
@@ -246,6 +253,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev)
set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
adf_dbgfs_add(accel_dev);
+ adf_sysfs_start_ras(accel_dev);
return 0;
}
@@ -264,7 +272,6 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
- struct list_head *list_itr;
bool wait = false;
int ret;
@@ -272,7 +279,9 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
!test_bit(ADF_STATUS_STARTING, &accel_dev->status))
return;
+ adf_rl_stop(accel_dev);
adf_dbgfs_rm(accel_dev);
+ adf_sysfs_stop_ras(accel_dev);
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
@@ -289,8 +298,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev)
qat_comp_algs_unregister();
clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status);
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
+ list_for_each_entry(service, &service_table, list) {
if (!test_bit(accel_dev->accel_id, service->start_status))
continue;
ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
@@ -327,7 +335,6 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
- struct list_head *list_itr;
if (!hw_data) {
dev_err(&GET_DEV(accel_dev),
@@ -349,8 +356,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
&accel_dev->status);
}
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
+ list_for_each_entry(service, &service_table, list) {
if (!test_bit(accel_dev->accel_id, service->init_status))
continue;
if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
@@ -361,6 +367,11 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
clear_bit(accel_dev->accel_id, service->init_status);
}
+ adf_rl_exit(accel_dev);
+
+ if (hw_data->ras_ops.disable_ras_errors)
+ hw_data->ras_ops.disable_ras_errors(accel_dev);
+
adf_heartbeat_shutdown(accel_dev);
hw_data->disable_iov(accel_dev);
@@ -387,10 +398,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
- struct list_head *list_itr;
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
+ list_for_each_entry(service, &service_table, list) {
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
dev_err(&GET_DEV(accel_dev),
"Failed to restart service %s.\n",
@@ -402,10 +411,8 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
- struct list_head *list_itr;
- list_for_each(list_itr, &service_table) {
- service = list_entry(list_itr, struct service_hndl, list);
+ list_for_each_entry(service, &service_table, list) {
if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
dev_err(&GET_DEV(accel_dev),
"Failed to restart service %s.\n",
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index 2aba194a7c..3557a0d6de 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -132,6 +132,21 @@ static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev)
return false;
}
+static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev)
+{
+ struct adf_ras_ops *ras_ops = &accel_dev->hw_device->ras_ops;
+ bool reset_required;
+
+ if (ras_ops->handle_interrupt &&
+ ras_ops->handle_interrupt(accel_dev, &reset_required)) {
+ if (reset_required)
+ dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n");
+ return true;
+ }
+
+ return false;
+}
+
static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
{
struct adf_accel_dev *accel_dev = dev_ptr;
@@ -145,6 +160,9 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
if (adf_handle_pm_int(accel_dev))
return IRQ_HANDLED;
+ if (adf_handle_ras_int(accel_dev))
+ return IRQ_HANDLED;
+
dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n",
accel_dev->accel_id);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c
new file mode 100644
index 0000000000..f0a13c1901
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+
+#include "adf_accel_devices.h"
+#include "adf_pm_dbgfs.h"
+
+static ssize_t pm_status_read(struct file *f, char __user *buf, size_t count,
+ loff_t *pos)
+{
+ struct adf_accel_dev *accel_dev = file_inode(f)->i_private;
+ struct adf_pm pm = accel_dev->power_management;
+
+ if (pm.print_pm_status)
+ return pm.print_pm_status(accel_dev, buf, count, pos);
+
+ return count;
+}
+
+static const struct file_operations pm_status_fops = {
+ .owner = THIS_MODULE,
+ .read = pm_status_read,
+};
+
+void adf_pm_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_pm *pm = &accel_dev->power_management;
+
+ if (!pm->present || !pm->print_pm_status)
+ return;
+
+ pm->debugfs_pm_status = debugfs_create_file("pm_status", 0400,
+ accel_dev->debugfs_dir,
+ accel_dev, &pm_status_fops);
+}
+
+void adf_pm_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ struct adf_pm *pm = &accel_dev->power_management;
+
+ if (!pm->present)
+ return;
+
+ debugfs_remove(pm->debugfs_pm_status);
+ pm->debugfs_pm_status = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h
new file mode 100644
index 0000000000..83632e5aa0
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_PM_DBGFS_H_
+#define ADF_PM_DBGFS_H_
+
+struct adf_accel_dev;
+
+void adf_pm_dbgfs_rm(struct adf_accel_dev *accel_dev);
+void adf_pm_dbgfs_add(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_PM_DBGFS_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c
new file mode 100644
index 0000000000..de1b214dba
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c
@@ -0,0 +1,1168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#define dev_fmt(fmt) "RateLimiting: " fmt
+
+#include <asm/errno.h>
+#include <asm/div64.h>
+
+#include <linux/dev_printk.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/units.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_rl_admin.h"
+#include "adf_rl.h"
+#include "adf_sysfs_rl.h"
+
+#define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET 0U
+#define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET 0U
+#define RL_TOKEN_PCIE_SIZE 64
+#define RL_TOKEN_ASYM_SIZE 1024
+#define RL_CSR_SIZE 4U
+#define RL_CAPABILITY_MASK GENMASK(6, 4)
+#define RL_CAPABILITY_VALUE 0x70
+#define RL_VALIDATE_NON_ZERO(input) ((input) == 0)
+#define ROOT_MASK GENMASK(1, 0)
+#define CLUSTER_MASK GENMASK(3, 0)
+#define LEAF_MASK GENMASK(5, 0)
+
+static int validate_user_input(struct adf_accel_dev *accel_dev,
+ struct adf_rl_sla_input_data *sla_in,
+ bool is_update)
+{
+ const unsigned long rp_mask = sla_in->rp_mask;
+ size_t rp_mask_size;
+ int i, cnt;
+
+ if (sla_in->pir < sla_in->cir) {
+ dev_notice(&GET_DEV(accel_dev),
+ "PIR must be >= CIR, setting PIR to CIR\n");
+ sla_in->pir = sla_in->cir;
+ }
+
+ if (!is_update) {
+ cnt = 0;
+ rp_mask_size = sizeof(sla_in->rp_mask) * BITS_PER_BYTE;
+ for_each_set_bit(i, &rp_mask, rp_mask_size) {
+ if (++cnt > RL_RP_CNT_PER_LEAF_MAX) {
+ dev_notice(&GET_DEV(accel_dev),
+ "Too many ring pairs selected for this SLA\n");
+ return -EINVAL;
+ }
+ }
+
+ if (sla_in->srv >= ADF_SVC_NONE) {
+ dev_notice(&GET_DEV(accel_dev),
+ "Wrong service type\n");
+ return -EINVAL;
+ }
+
+ if (sla_in->type > RL_LEAF) {
+ dev_notice(&GET_DEV(accel_dev),
+ "Wrong node type\n");
+ return -EINVAL;
+ }
+
+ if (sla_in->parent_id < RL_PARENT_DEFAULT_ID ||
+ sla_in->parent_id >= RL_NODES_CNT_MAX) {
+ dev_notice(&GET_DEV(accel_dev),
+ "Wrong parent ID\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id)
+{
+ struct rl_sla *sla;
+
+ if (sla_id <= RL_SLA_EMPTY_ID || sla_id >= RL_NODES_CNT_MAX) {
+ dev_notice(&GET_DEV(accel_dev), "Provided ID is out of bounds\n");
+ return -EINVAL;
+ }
+
+ sla = accel_dev->rate_limiting->sla[sla_id];
+
+ if (!sla) {
+ dev_notice(&GET_DEV(accel_dev), "SLA with provided ID does not exist\n");
+ return -EINVAL;
+ }
+
+ if (sla->type != RL_LEAF) {
+ dev_notice(&GET_DEV(accel_dev), "This ID is reserved for internal use\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * find_parent() - Find the parent for a new SLA
+ * @rl_data: pointer to ratelimiting data
+ * @sla_in: pointer to user input data for a new SLA
+ *
+ * Function returns a pointer to the parent SLA. If the parent ID is provided
+ * as input in the user data, then such ID is validated and the parent SLA
+ * is returned.
+ * Otherwise, it returns the default parent SLA (root or cluster) for
+ * the new object.
+ *
+ * Return:
+ * * Pointer to the parent SLA object
+ * * NULL - when parent cannot be found
+ */
+static struct rl_sla *find_parent(struct adf_rl *rl_data,
+ struct adf_rl_sla_input_data *sla_in)
+{
+ int input_parent_id = sla_in->parent_id;
+ struct rl_sla *root = NULL;
+ struct rl_sla *parent_sla;
+ int i;
+
+ if (sla_in->type == RL_ROOT)
+ return NULL;
+
+ if (input_parent_id > RL_PARENT_DEFAULT_ID) {
+ parent_sla = rl_data->sla[input_parent_id];
+ /*
+ * SLA can be a parent if it has the same service as the child
+ * and its type is higher in the hierarchy,
+ * for example the parent type of a LEAF must be a CLUSTER.
+ */
+ if (parent_sla && parent_sla->srv == sla_in->srv &&
+ parent_sla->type == sla_in->type - 1)
+ return parent_sla;
+
+ return NULL;
+ }
+
+ /* If input_parent_id is not valid, get root for this service type. */
+ for (i = 0; i < RL_ROOT_MAX; i++) {
+ if (rl_data->root[i] && rl_data->root[i]->srv == sla_in->srv) {
+ root = rl_data->root[i];
+ break;
+ }
+ }
+
+ if (!root)
+ return NULL;
+
+ /*
+ * If the type of this SLA is cluster, then return the root.
+ * Otherwise, find the default (i.e. first) cluster for this service.
+ */
+ if (sla_in->type == RL_CLUSTER)
+ return root;
+
+ for (i = 0; i < RL_CLUSTER_MAX; i++) {
+ if (rl_data->cluster[i] && rl_data->cluster[i]->parent == root)
+ return rl_data->cluster[i];
+ }
+
+ return NULL;
+}
+
+static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv)
+{
+ switch (rl_srv) {
+ case ADF_SVC_ASYM:
+ return ASYM;
+ case ADF_SVC_SYM:
+ return SYM;
+ case ADF_SVC_DC:
+ return COMP;
+ default:
+ return UNUSED;
+ }
+}
+
+/**
+ * get_sla_arr_of_type() - Returns a pointer to SLA type specific array
+ * @rl_data: pointer to ratelimiting data
+ * @type: SLA type
+ * @sla_arr: pointer to variable where requested pointer will be stored
+ *
+ * Return: Max number of elements allowed for the returned array
+ */
+static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
+ struct rl_sla ***sla_arr)
+{
+ switch (type) {
+ case RL_LEAF:
+ *sla_arr = rl_data->leaf;
+ return RL_LEAF_MAX;
+ case RL_CLUSTER:
+ *sla_arr = rl_data->cluster;
+ return RL_CLUSTER_MAX;
+ case RL_ROOT:
+ *sla_arr = rl_data->root;
+ return RL_ROOT_MAX;
+ default:
+ *sla_arr = NULL;
+ return 0;
+ }
+}
+
+static bool is_service_enabled(struct adf_accel_dev *accel_dev,
+ enum adf_base_services rl_srv)
+{
+ enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv);
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u8 rps_per_bundle = hw_data->num_banks_per_vf;
+ int i;
+
+ for (i = 0; i < rps_per_bundle; i++) {
+ if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * prepare_rp_ids() - Creates an array of ring pair IDs from bitmask
+ * @accel_dev: pointer to acceleration device structure
+ * @sla: SLA object data where result will be written
+ * @rp_mask: bitmask of ring pair IDs
+ *
+ * Function tries to convert provided bitmap to an array of IDs. It checks if
+ * RPs aren't in use, are assigned to SLA service or if a number of provided
+ * IDs is not too big. If successful, writes the result into the field
+ * sla->ring_pairs_cnt.
+ *
+ * Return:
+ * * 0 - ok
+ * * -EINVAL - ring pairs array cannot be created from provided mask
+ */
+static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
+ const unsigned long rp_mask)
+{
+ enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv);
+ u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf;
+ bool *rp_in_use = accel_dev->rate_limiting->rp_in_use;
+ size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids);
+ u16 rp_id_max = GET_HW_DATA(accel_dev)->num_banks;
+ u16 cnt = 0;
+ u16 rp_id;
+
+ for_each_set_bit(rp_id, &rp_mask, rp_id_max) {
+ if (cnt >= rp_cnt_max) {
+ dev_notice(&GET_DEV(accel_dev),
+ "Assigned more ring pairs than supported");
+ return -EINVAL;
+ }
+
+ if (rp_in_use[rp_id]) {
+ dev_notice(&GET_DEV(accel_dev),
+ "RP %u already assigned to other SLA", rp_id);
+ return -EINVAL;
+ }
+
+ if (GET_SRV_TYPE(accel_dev, rp_id % rps_per_bundle) != arb_srv) {
+ dev_notice(&GET_DEV(accel_dev),
+ "RP %u does not support SLA service", rp_id);
+ return -EINVAL;
+ }
+
+ sla->ring_pairs_ids[cnt++] = rp_id;
+ }
+
+ sla->ring_pairs_cnt = cnt;
+
+ return 0;
+}
+
+static void mark_rps_usage(struct rl_sla *sla, bool *rp_in_use, bool used)
+{
+ u16 rp_id;
+ int i;
+
+ for (i = 0; i < sla->ring_pairs_cnt; i++) {
+ rp_id = sla->ring_pairs_ids[i];
+ rp_in_use[rp_id] = used;
+ }
+}
+
+static void assign_rps_to_leaf(struct adf_accel_dev *accel_dev,
+ struct rl_sla *sla, bool clear)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 base_offset = hw_data->rl_data.r2l_offset;
+ u32 node_id = clear ? 0U : (sla->node_id & LEAF_MASK);
+ u32 offset;
+ int i;
+
+ for (i = 0; i < sla->ring_pairs_cnt; i++) {
+ offset = base_offset + (RL_CSR_SIZE * sla->ring_pairs_ids[i]);
+ ADF_CSR_WR(pmisc_addr, offset, node_id);
+ }
+}
+
+static void assign_leaf_to_cluster(struct adf_accel_dev *accel_dev,
+ struct rl_sla *sla, bool clear)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 base_offset = hw_data->rl_data.l2c_offset;
+ u32 node_id = sla->node_id & LEAF_MASK;
+ u32 parent_id = clear ? 0U : (sla->parent->node_id & CLUSTER_MASK);
+ u32 offset;
+
+ offset = base_offset + (RL_CSR_SIZE * node_id);
+ ADF_CSR_WR(pmisc_addr, offset, parent_id);
+}
+
+static void assign_cluster_to_root(struct adf_accel_dev *accel_dev,
+ struct rl_sla *sla, bool clear)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u32 base_offset = hw_data->rl_data.c2s_offset;
+ u32 node_id = sla->node_id & CLUSTER_MASK;
+ u32 parent_id = clear ? 0U : (sla->parent->node_id & ROOT_MASK);
+ u32 offset;
+
+ offset = base_offset + (RL_CSR_SIZE * node_id);
+ ADF_CSR_WR(pmisc_addr, offset, parent_id);
+}
+
+static void assign_node_to_parent(struct adf_accel_dev *accel_dev,
+ struct rl_sla *sla, bool clear_assignment)
+{
+ switch (sla->type) {
+ case RL_LEAF:
+ assign_rps_to_leaf(accel_dev, sla, clear_assignment);
+ assign_leaf_to_cluster(accel_dev, sla, clear_assignment);
+ break;
+ case RL_CLUSTER:
+ assign_cluster_to_root(accel_dev, sla, clear_assignment);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * can_parent_afford_sla() - Verifies if parent allows to create an SLA
+ * @sla_in: pointer to user input data for a new SLA
+ * @sla_parent: pointer to parent SLA object
+ * @sla_cir: current child CIR value (only for update)
+ * @is_update: request is a update
+ *
+ * Algorithm verifies if parent has enough remaining budget to take assignment
+ * of a child with provided parameters. In update case current CIR value must be
+ * returned to budget first.
+ * PIR value cannot exceed the PIR assigned to parent.
+ *
+ * Return:
+ * * true - SLA can be created
+ * * false - SLA cannot be created
+ */
+static bool can_parent_afford_sla(struct adf_rl_sla_input_data *sla_in,
+ struct rl_sla *sla_parent, u32 sla_cir,
+ bool is_update)
+{
+ u32 rem_cir = sla_parent->rem_cir;
+
+ if (is_update)
+ rem_cir += sla_cir;
+
+ if (sla_in->cir > rem_cir || sla_in->pir > sla_parent->pir)
+ return false;
+
+ return true;
+}
+
+/**
+ * can_node_afford_update() - Verifies if SLA can be updated with input data
+ * @sla_in: pointer to user input data for a new SLA
+ * @sla: pointer to SLA object selected for update
+ *
+ * Algorithm verifies if a new CIR value is big enough to satisfy currently
+ * assigned child SLAs and if PIR can be updated
+ *
+ * Return:
+ * * true - SLA can be updated
+ * * false - SLA cannot be updated
+ */
+static bool can_node_afford_update(struct adf_rl_sla_input_data *sla_in,
+ struct rl_sla *sla)
+{
+ u32 cir_in_use = sla->cir - sla->rem_cir;
+
+ /* new CIR cannot be smaller then currently consumed value */
+ if (cir_in_use > sla_in->cir)
+ return false;
+
+ /* PIR of root/cluster cannot be reduced in node with assigned children */
+ if (sla_in->pir < sla->pir && sla->type != RL_LEAF && cir_in_use > 0)
+ return false;
+
+ return true;
+}
+
+static bool is_enough_budget(struct adf_rl *rl_data, struct rl_sla *sla,
+ struct adf_rl_sla_input_data *sla_in,
+ bool is_update)
+{
+ u32 max_val = rl_data->device_data->scale_ref;
+ struct rl_sla *parent = sla->parent;
+ bool ret = true;
+
+ if (sla_in->cir > max_val || sla_in->pir > max_val)
+ ret = false;
+
+ switch (sla->type) {
+ case RL_LEAF:
+ ret &= can_parent_afford_sla(sla_in, parent, sla->cir,
+ is_update);
+ break;
+ case RL_CLUSTER:
+ ret &= can_parent_afford_sla(sla_in, parent, sla->cir,
+ is_update);
+
+ if (is_update)
+ ret &= can_node_afford_update(sla_in, sla);
+
+ break;
+ case RL_ROOT:
+ if (is_update)
+ ret &= can_node_afford_update(sla_in, sla);
+
+ break;
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret;
+}
+
+static void update_budget(struct rl_sla *sla, u32 old_cir, bool is_update)
+{
+ switch (sla->type) {
+ case RL_LEAF:
+ if (is_update)
+ sla->parent->rem_cir += old_cir;
+
+ sla->parent->rem_cir -= sla->cir;
+ sla->rem_cir = 0;
+ break;
+ case RL_CLUSTER:
+ if (is_update) {
+ sla->parent->rem_cir += old_cir;
+ sla->rem_cir = sla->cir - (old_cir - sla->rem_cir);
+ } else {
+ sla->rem_cir = sla->cir;
+ }
+
+ sla->parent->rem_cir -= sla->cir;
+ break;
+ case RL_ROOT:
+ if (is_update)
+ sla->rem_cir = sla->cir - (old_cir - sla->rem_cir);
+ else
+ sla->rem_cir = sla->cir;
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * get_next_free_sla_id() - finds next free ID in the SLA array
+ * @rl_data: Pointer to ratelimiting data structure
+ *
+ * Return:
+ * * 0 : RL_NODES_CNT_MAX - correct ID
+ * * -ENOSPC - all SLA slots are in use
+ */
+static int get_next_free_sla_id(struct adf_rl *rl_data)
+{
+ int i = 0;
+
+ while (i < RL_NODES_CNT_MAX && rl_data->sla[i++])
+ ;
+
+ if (i == RL_NODES_CNT_MAX)
+ return -ENOSPC;
+
+ return i - 1;
+}
+
+/**
+ * get_next_free_node_id() - finds next free ID in the array of that node type
+ * @rl_data: Pointer to ratelimiting data structure
+ * @sla: Pointer to SLA object for which the ID is searched
+ *
+ * Return:
+ * * 0 : RL_[NODE_TYPE]_MAX - correct ID
+ * * -ENOSPC - all slots of that type are in use
+ */
+static int get_next_free_node_id(struct adf_rl *rl_data, struct rl_sla *sla)
+{
+ struct adf_hw_device_data *hw_device = GET_HW_DATA(rl_data->accel_dev);
+ int max_id, i, step, rp_per_leaf;
+ struct rl_sla **sla_list;
+
+ rp_per_leaf = hw_device->num_banks / hw_device->num_banks_per_vf;
+
+ /*
+ * Static nodes mapping:
+ * root0 - cluster[0,4,8,12] - leaf[0-15]
+ * root1 - cluster[1,5,9,13] - leaf[16-31]
+ * root2 - cluster[2,6,10,14] - leaf[32-47]
+ */
+ switch (sla->type) {
+ case RL_LEAF:
+ i = sla->srv * rp_per_leaf;
+ step = 1;
+ max_id = i + rp_per_leaf;
+ sla_list = rl_data->leaf;
+ break;
+ case RL_CLUSTER:
+ i = sla->srv;
+ step = 4;
+ max_id = RL_CLUSTER_MAX;
+ sla_list = rl_data->cluster;
+ break;
+ case RL_ROOT:
+ return sla->srv;
+ default:
+ return -EINVAL;
+ }
+
+ while (i < max_id && sla_list[i])
+ i += step;
+
+ if (i >= max_id)
+ return -ENOSPC;
+
+ return i;
+}
+
+u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
+ enum adf_base_services svc_type)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u64 avail_slice_cycles, allocated_tokens;
+
+ if (!sla_val)
+ return 0;
+
+ avail_slice_cycles = hw_data->clock_frequency;
+
+ switch (svc_type) {
+ case ADF_SVC_ASYM:
+ avail_slice_cycles *= device_data->slices.pke_cnt;
+ break;
+ case ADF_SVC_SYM:
+ avail_slice_cycles *= device_data->slices.cph_cnt;
+ break;
+ case ADF_SVC_DC:
+ avail_slice_cycles *= device_data->slices.dcpr_cnt;
+ break;
+ default:
+ break;
+ }
+
+ do_div(avail_slice_cycles, device_data->scan_interval);
+ allocated_tokens = avail_slice_cycles * sla_val;
+ do_div(allocated_tokens, device_data->scale_ref);
+
+ return allocated_tokens;
+}
+
+u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
+ enum adf_base_services svc_type)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ u64 allocated_ae_cycles, avail_ae_cycles;
+
+ if (!sla_val)
+ return 0;
+
+ avail_ae_cycles = hw_data->clock_frequency;
+ avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1;
+ do_div(avail_ae_cycles, device_data->scan_interval);
+
+ sla_val *= device_data->max_tp[svc_type];
+ sla_val /= device_data->scale_ref;
+
+ allocated_ae_cycles = (sla_val * avail_ae_cycles);
+ do_div(allocated_ae_cycles, device_data->max_tp[svc_type]);
+
+ return allocated_ae_cycles;
+}
+
+u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val,
+ enum adf_base_services svc_type, bool is_bw_out)
+{
+ struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
+ u64 sla_to_bytes, allocated_bw, sla_scaled;
+
+ if (!sla_val)
+ return 0;
+
+ sla_to_bytes = sla_val;
+ sla_to_bytes *= device_data->max_tp[svc_type];
+ do_div(sla_to_bytes, device_data->scale_ref);
+
+ sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE :
+ BYTES_PER_MBIT;
+ if (svc_type == ADF_SVC_DC && is_bw_out)
+ sla_to_bytes *= device_data->slices.dcpr_cnt -
+ device_data->dcpr_correction;
+
+ sla_scaled = sla_to_bytes * device_data->pcie_scale_mul;
+ do_div(sla_scaled, device_data->pcie_scale_div);
+ allocated_bw = sla_scaled;
+ do_div(allocated_bw, RL_TOKEN_PCIE_SIZE);
+ do_div(allocated_bw, device_data->scan_interval);
+
+ return allocated_bw;
+}
+
+/**
+ * add_new_sla_entry() - creates a new SLA object and fills it with user data
+ * @accel_dev: pointer to acceleration device structure
+ * @sla_in: pointer to user input data for a new SLA
+ * @sla_out: Pointer to variable that will contain the address of a new
+ * SLA object if the operation succeeds
+ *
+ * Return:
+ * * 0 - ok
+ * * -ENOMEM - memory allocation failed
+ * * -EINVAL - invalid user input
+ * * -ENOSPC - all available SLAs are in use
+ */
+static int add_new_sla_entry(struct adf_accel_dev *accel_dev,
+ struct adf_rl_sla_input_data *sla_in,
+ struct rl_sla **sla_out)
+{
+ struct adf_rl *rl_data = accel_dev->rate_limiting;
+ struct rl_sla *sla;
+ int ret = 0;
+
+ sla = kzalloc(sizeof(*sla), GFP_KERNEL);
+ if (!sla) {
+ ret = -ENOMEM;
+ goto ret_err;
+ }
+ *sla_out = sla;
+
+ if (!is_service_enabled(accel_dev, sla_in->srv)) {
+ dev_notice(&GET_DEV(accel_dev),
+ "Provided service is not enabled\n");
+ ret = -EINVAL;
+ goto ret_err;
+ }
+
+ sla->srv = sla_in->srv;
+ sla->type = sla_in->type;
+ ret = get_next_free_node_id(rl_data, sla);
+ if (ret < 0) {
+ dev_notice(&GET_DEV(accel_dev),
+ "Exceeded number of available nodes for that service\n");
+ goto ret_err;
+ }
+ sla->node_id = ret;
+
+ ret = get_next_free_sla_id(rl_data);
+ if (ret < 0) {
+ dev_notice(&GET_DEV(accel_dev),
+ "Allocated maximum SLAs number\n");
+ goto ret_err;
+ }
+ sla->sla_id = ret;
+
+ sla->parent = find_parent(rl_data, sla_in);
+ if (!sla->parent && sla->type != RL_ROOT) {
+ if (sla_in->parent_id != RL_PARENT_DEFAULT_ID)
+ dev_notice(&GET_DEV(accel_dev),
+ "Provided parent ID does not exist or cannot be parent for this SLA.");
+ else
+ dev_notice(&GET_DEV(accel_dev),
+ "Unable to find parent node for this service. Is service enabled?");
+ ret = -EINVAL;
+ goto ret_err;
+ }
+
+ if (sla->type == RL_LEAF) {
+ ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask);
+ if (!sla->ring_pairs_cnt || ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "Unable to find ring pairs to assign to the leaf");
+ if (!ret)
+ ret = -EINVAL;
+
+ goto ret_err;
+ }
+ }
+
+ return 0;
+
+ret_err:
+ kfree(sla);
+ *sla_out = NULL;
+
+ return ret;
+}
+
+static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
+{
+ struct adf_rl *rl_data = accel_dev->rate_limiting;
+ struct adf_rl_hw_data *device_data = rl_data->device_data;
+ struct adf_rl_sla_input_data sla_in = { };
+ int ret = 0;
+ int i;
+
+ /* Init root for each enabled service */
+ sla_in.type = RL_ROOT;
+ sla_in.parent_id = RL_PARENT_DEFAULT_ID;
+
+ for (i = 0; i < ADF_SVC_NONE; i++) {
+ if (!is_service_enabled(accel_dev, i))
+ continue;
+
+ sla_in.cir = device_data->scale_ref;
+ sla_in.pir = sla_in.cir;
+ sla_in.srv = i;
+
+ ret = adf_rl_add_sla(accel_dev, &sla_in);
+ if (ret)
+ return ret;
+ }
+
+ /* Init default cluster for each root */
+ sla_in.type = RL_CLUSTER;
+ for (i = 0; i < ADF_SVC_NONE; i++) {
+ if (!rl_data->root[i])
+ continue;
+
+ sla_in.cir = rl_data->root[i]->cir;
+ sla_in.pir = sla_in.cir;
+ sla_in.srv = rl_data->root[i]->srv;
+
+ ret = adf_rl_add_sla(accel_dev, &sla_in);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla)
+{
+ bool *rp_in_use = rl_data->rp_in_use;
+ struct rl_sla **sla_type_arr = NULL;
+ int i, sla_id, node_id;
+ u32 old_cir;
+
+ sla_id = sla->sla_id;
+ node_id = sla->node_id;
+ old_cir = sla->cir;
+ sla->cir = 0;
+ sla->pir = 0;
+
+ for (i = 0; i < sla->ring_pairs_cnt; i++)
+ rp_in_use[sla->ring_pairs_ids[i]] = false;
+
+ update_budget(sla, old_cir, true);
+ get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
+ assign_node_to_parent(rl_data->accel_dev, sla, true);
+ adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type);
+ mark_rps_usage(sla, rl_data->rp_in_use, false);
+
+ kfree(sla);
+ rl_data->sla[sla_id] = NULL;
+ sla_type_arr[node_id] = NULL;
+}
+
+/**
+ * add_update_sla() - handles the creation and the update of an SLA
+ * @accel_dev: pointer to acceleration device structure
+ * @sla_in: pointer to user input data for a new/updated SLA
+ * @is_update: flag to indicate if this is an update or an add operation
+ *
+ * Return:
+ * * 0 - ok
+ * * -ENOMEM - memory allocation failed
+ * * -EINVAL - user input data cannot be used to create SLA
+ * * -ENOSPC - all available SLAs are in use
+ */
+static int add_update_sla(struct adf_accel_dev *accel_dev,
+ struct adf_rl_sla_input_data *sla_in, bool is_update)
+{
+ struct adf_rl *rl_data = accel_dev->rate_limiting;
+ struct rl_sla **sla_type_arr = NULL;
+ struct rl_sla *sla = NULL;
+ u32 old_cir = 0;
+ int ret;
+
+ if (!sla_in) {
+ dev_warn(&GET_DEV(accel_dev),
+ "SLA input data pointer is missing\n");
+ return -EFAULT;
+ }
+
+ mutex_lock(&rl_data->rl_lock);
+
+ /* Input validation */
+ ret = validate_user_input(accel_dev, sla_in, is_update);
+ if (ret)
+ goto ret_err;
+
+ if (is_update) {
+ ret = validate_sla_id(accel_dev, sla_in->sla_id);
+ if (ret)
+ goto ret_err;
+
+ sla = rl_data->sla[sla_in->sla_id];
+ old_cir = sla->cir;
+ } else {
+ ret = add_new_sla_entry(accel_dev, sla_in, &sla);
+ if (ret)
+ goto ret_err;
+ }
+
+ if (!is_enough_budget(rl_data, sla, sla_in, is_update)) {
+ dev_notice(&GET_DEV(accel_dev),
+ "Input value exceeds the remaining budget%s\n",
+ is_update ? " or more budget is already in use" : "");
+ ret = -EINVAL;
+ goto ret_err;
+ }
+ sla->cir = sla_in->cir;
+ sla->pir = sla_in->pir;
+
+ /* Apply SLA */
+ assign_node_to_parent(accel_dev, sla, false);
+ ret = adf_rl_send_admin_add_update_msg(accel_dev, sla, is_update);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "Failed to apply an SLA\n");
+ goto ret_err;
+ }
+ update_budget(sla, old_cir, is_update);
+
+ if (!is_update) {
+ mark_rps_usage(sla, rl_data->rp_in_use, true);
+ get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
+ sla_type_arr[sla->node_id] = sla;
+ rl_data->sla[sla->sla_id] = sla;
+ }
+
+ sla_in->sla_id = sla->sla_id;
+ goto ret_ok;
+
+ret_err:
+ if (!is_update) {
+ sla_in->sla_id = -1;
+ kfree(sla);
+ }
+ret_ok:
+ mutex_unlock(&rl_data->rl_lock);
+ return ret;
+}
+
+/**
+ * adf_rl_add_sla() - handles the creation of an SLA
+ * @accel_dev: pointer to acceleration device structure
+ * @sla_in: pointer to user input data required to add an SLA
+ *
+ * Return:
+ * * 0 - ok
+ * * -ENOMEM - memory allocation failed
+ * * -EINVAL - invalid user input
+ * * -ENOSPC - all available SLAs are in use
+ */
+int adf_rl_add_sla(struct adf_accel_dev *accel_dev,
+ struct adf_rl_sla_input_data *sla_in)
+{
+ return add_update_sla(accel_dev, sla_in, false);
+}
+
+/**
+ * adf_rl_update_sla() - handles the update of an SLA
+ * @accel_dev: pointer to acceleration device structure
+ * @sla_in: pointer to user input data required to update an SLA
+ *
+ * Return:
+ * * 0 - ok
+ * * -EINVAL - user input data cannot be used to update SLA
+ */
+int adf_rl_update_sla(struct adf_accel_dev *accel_dev,
+ struct adf_rl_sla_input_data *sla_in)
+{
+ return add_update_sla(accel_dev, sla_in, true);
+}
+
+/**
+ * adf_rl_get_sla() - returns an existing SLA data
+ * @accel_dev: pointer to acceleration device structure
+ * @sla_in: pointer to user data where SLA info will be stored
+ *
+ * The sla_id for which data are requested should be set in sla_id structure
+ *
+ * Return:
+ * * 0 - ok
+ * * -EINVAL - provided sla_id does not exist
+ */
+int adf_rl_get_sla(struct adf_accel_dev *accel_dev,
+ struct adf_rl_sla_input_data *sla_in)
+{
+ struct rl_sla *sla;
+ int ret, i;
+
+ ret = validate_sla_id(accel_dev, sla_in->sla_id);
+ if (ret)
+ return ret;
+
+ sla = accel_dev->rate_limiting->sla[sla_in->sla_id];
+ sla_in->type = sla->type;
+ sla_in->srv = sla->srv;
+ sla_in->cir = sla->cir;
+ sla_in->pir = sla->pir;
+ sla_in->rp_mask = 0U;
+ if (sla->parent)
+ sla_in->parent_id = sla->parent->sla_id;
+ else
+ sla_in->parent_id = RL_PARENT_DEFAULT_ID;
+
+ for (i = 0; i < sla->ring_pairs_cnt; i++)
+ sla_in->rp_mask |= BIT(sla->ring_pairs_ids[i]);
+
+ return 0;
+}
+
+/**
+ * adf_rl_get_capability_remaining() - returns the remaining SLA value (CIR) for
+ * selected service or provided sla_id
+ * @accel_dev: pointer to acceleration device structure
+ * @srv: service ID for which capability is requested
+ * @sla_id: ID of the cluster or root to which we want assign a new SLA
+ *
+ * Check if the provided SLA id is valid. If it is and the service matches
+ * the requested service and the type is cluster or root, return the remaining
+ * capability.
+ * If the provided ID does not match the service or type, return the remaining
+ * capacity of the default cluster for that service.
+ *
+ * Return:
+ * * Positive value - correct remaining value
+ * * -EINVAL - algorithm cannot find a remaining value for provided data
+ */
+int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev,
+ enum adf_base_services srv, int sla_id)
+{
+ struct adf_rl *rl_data = accel_dev->rate_limiting;
+ struct rl_sla *sla = NULL;
+ int i;
+
+ if (srv >= ADF_SVC_NONE)
+ return -EINVAL;
+
+ if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) {
+ sla = rl_data->sla[sla_id];
+
+ if (sla->srv == srv && sla->type <= RL_CLUSTER)
+ goto ret_ok;
+ }
+
+ for (i = 0; i < RL_CLUSTER_MAX; i++) {
+ if (!rl_data->cluster[i])
+ continue;
+
+ if (rl_data->cluster[i]->srv == srv) {
+ sla = rl_data->cluster[i];
+ goto ret_ok;
+ }
+ }
+
+ return -EINVAL;
+ret_ok:
+ return sla->rem_cir;
+}
+
+/**
+ * adf_rl_remove_sla() - removes provided sla_id
+ * @accel_dev: pointer to acceleration device structure
+ * @sla_id: ID of the cluster or root to which we want assign an new SLA
+ *
+ * Return:
+ * * 0 - ok
+ * * -EINVAL - wrong sla_id or it still have assigned children
+ */
+int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id)
+{
+ struct adf_rl *rl_data = accel_dev->rate_limiting;
+ struct rl_sla *sla;
+ int ret = 0;
+
+ mutex_lock(&rl_data->rl_lock);
+ ret = validate_sla_id(accel_dev, sla_id);
+ if (ret)
+ goto err_ret;
+
+ sla = rl_data->sla[sla_id];
+
+ if (sla->type < RL_LEAF && sla->rem_cir != sla->cir) {
+ dev_notice(&GET_DEV(accel_dev),
+ "To remove parent SLA all its children must be removed first");
+ ret = -EINVAL;
+ goto err_ret;
+ }
+
+ clear_sla(rl_data, sla);
+
+err_ret:
+ mutex_unlock(&rl_data->rl_lock);
+ return ret;
+}
+
+/**
+ * adf_rl_remove_sla_all() - removes all SLAs from device
+ * @accel_dev: pointer to acceleration device structure
+ * @incl_default: set to true if default SLAs also should be removed
+ */
+void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default)
+{
+ struct adf_rl *rl_data = accel_dev->rate_limiting;
+ int end_type = incl_default ? RL_ROOT : RL_LEAF;
+ struct rl_sla **sla_type_arr = NULL;
+ u32 max_id;
+ int i, j;
+
+ mutex_lock(&rl_data->rl_lock);
+
+ /* Unregister and remove all SLAs */
+ for (j = RL_LEAF; j >= end_type; j--) {
+ max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr);
+
+ for (i = 0; i < max_id; i++) {
+ if (!sla_type_arr[i])
+ continue;
+
+ clear_sla(rl_data, sla_type_arr[i]);
+ }
+ }
+
+ mutex_unlock(&rl_data->rl_lock);
+}
+
+int adf_rl_init(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
+ struct adf_rl_hw_data *rl_hw_data = &hw_data->rl_data;
+ struct adf_rl *rl;
+ int ret = 0;
+
+ /* Validate device parameters */
+ if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) ||
+ RL_VALIDATE_NON_ZERO(rl_hw_data->scale_ref)) {
+ ret = -EOPNOTSUPP;
+ goto err_ret;
+ }
+
+ rl = kzalloc(sizeof(*rl), GFP_KERNEL);
+ if (!rl) {
+ ret = -ENOMEM;
+ goto err_ret;
+ }
+
+ mutex_init(&rl->rl_lock);
+ rl->device_data = &accel_dev->hw_device->rl_data;
+ rl->accel_dev = accel_dev;
+ accel_dev->rate_limiting = rl;
+
+err_ret:
+ return ret;
+}
+
+int adf_rl_start(struct adf_accel_dev *accel_dev)
+{
+ struct adf_rl_hw_data *rl_hw_data = &GET_HW_DATA(accel_dev)->rl_data;
+ void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
+ u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities;
+ int ret;
+
+ if (!accel_dev->rate_limiting) {
+ ret = -EOPNOTSUPP;
+ goto ret_err;
+ }
+
+ if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) {
+ dev_info(&GET_DEV(accel_dev), "not supported\n");
+ ret = -EOPNOTSUPP;
+ goto ret_free;
+ }
+
+ ADF_CSR_WR(pmisc_addr, rl_hw_data->pciin_tb_offset,
+ RL_TOKEN_GRANULARITY_PCIEIN_BUCKET);
+ ADF_CSR_WR(pmisc_addr, rl_hw_data->pciout_tb_offset,
+ RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET);
+
+ ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "initialization failed\n");
+ goto ret_free;
+ }
+
+ ret = initialize_default_nodes(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "failed to initialize default SLAs\n");
+ goto ret_sla_rm;
+ }
+
+ ret = adf_sysfs_rl_add(accel_dev);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev), "failed to add sysfs interface\n");
+ goto ret_sysfs_rm;
+ }
+
+ return 0;
+
+ret_sysfs_rm:
+ adf_sysfs_rl_rm(accel_dev);
+ret_sla_rm:
+ adf_rl_remove_sla_all(accel_dev, true);
+ret_free:
+ kfree(accel_dev->rate_limiting);
+ accel_dev->rate_limiting = NULL;
+ret_err:
+ return ret;
+}
+
+void adf_rl_stop(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->rate_limiting)
+ return;
+
+ adf_sysfs_rl_rm(accel_dev);
+ adf_rl_remove_sla_all(accel_dev, true);
+}
+
+void adf_rl_exit(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->rate_limiting)
+ return;
+
+ kfree(accel_dev->rate_limiting);
+ accel_dev->rate_limiting = NULL;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h
new file mode 100644
index 0000000000..269c6656fb
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_RL_H_
+#define ADF_RL_H_
+
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+struct adf_accel_dev;
+
+#define RL_ROOT_MAX 4
+#define RL_CLUSTER_MAX 16
+#define RL_LEAF_MAX 64
+#define RL_NODES_CNT_MAX (RL_ROOT_MAX + RL_CLUSTER_MAX + RL_LEAF_MAX)
+#define RL_RP_CNT_PER_LEAF_MAX 4U
+#define RL_RP_CNT_MAX 64
+#define RL_SLA_EMPTY_ID -1
+#define RL_PARENT_DEFAULT_ID -1
+
+enum rl_node_type {
+ RL_ROOT,
+ RL_CLUSTER,
+ RL_LEAF,
+};
+
+enum adf_base_services {
+ ADF_SVC_ASYM = 0,
+ ADF_SVC_SYM,
+ ADF_SVC_DC,
+ ADF_SVC_NONE,
+};
+
+/**
+ * struct adf_rl_sla_input_data - ratelimiting user input data structure
+ * @rp_mask: 64 bit bitmask of ring pair IDs which will be assigned to SLA.
+ * Eg. 0x5 -> RP0 and RP2 assigned; 0xA005 -> RP0,2,13,15 assigned.
+ * @sla_id: ID of current SLA for operations update, rm, get. For the add
+ * operation, this field will be updated with the ID of the newly
+ * added SLA
+ * @parent_id: ID of the SLA to which the current one should be assigned.
+ * Set to -1 to refer to the default parent.
+ * @cir: Committed information rate. Rate guaranteed to be achieved. Input value
+ * is expressed in permille scale, i.e. 1000 refers to the maximum
+ * device throughput for a selected service.
+ * @pir: Peak information rate. Maximum rate available that the SLA can achieve.
+ * Input value is expressed in permille scale, i.e. 1000 refers to
+ * the maximum device throughput for a selected service.
+ * @type: SLA type: root, cluster, node
+ * @srv: Service associated to the SLA: asym, sym dc.
+ *
+ * This structure is used to perform operations on an SLA.
+ * Depending on the operation, some of the parameters are ignored.
+ * The following list reports which parameters should be set for each operation.
+ * - add: all except sla_id
+ * - update: cir, pir, sla_id
+ * - rm: sla_id
+ * - rm_all: -
+ * - get: sla_id
+ * - get_capability_rem: srv, sla_id
+ */
+struct adf_rl_sla_input_data {
+ u64 rp_mask;
+ int sla_id;
+ int parent_id;
+ unsigned int cir;
+ unsigned int pir;
+ enum rl_node_type type;
+ enum adf_base_services srv;
+};
+
+struct rl_slice_cnt {
+ u8 dcpr_cnt;
+ u8 pke_cnt;
+ u8 cph_cnt;
+};
+
+struct adf_rl_interface_data {
+ struct adf_rl_sla_input_data input;
+ enum adf_base_services cap_rem_srv;
+ struct rw_semaphore lock;
+ bool sysfs_added;
+};
+
+struct adf_rl_hw_data {
+ u32 scale_ref;
+ u32 scan_interval;
+ u32 r2l_offset;
+ u32 l2c_offset;
+ u32 c2s_offset;
+ u32 pciin_tb_offset;
+ u32 pciout_tb_offset;
+ u32 pcie_scale_mul;
+ u32 pcie_scale_div;
+ u32 dcpr_correction;
+ u32 max_tp[RL_ROOT_MAX];
+ struct rl_slice_cnt slices;
+};
+
+/**
+ * struct adf_rl - ratelimiting data structure
+ * @accel_dev: pointer to acceleration device data
+ * @device_data: pointer to rate limiting data specific to a device type (or revision)
+ * @sla: array of pointers to SLA objects
+ * @root: array of pointers to root type SLAs, element number reflects node_id
+ * @cluster: array of pointers to cluster type SLAs, element number reflects node_id
+ * @leaf: array of pointers to leaf type SLAs, element number reflects node_id
+ * @rp_in_use: array of ring pair IDs already used in one of SLAs
+ * @rl_lock: mutex object which is protecting data in this structure
+ * @input: structure which is used for holding the data received from user
+ */
+struct adf_rl {
+ struct adf_accel_dev *accel_dev;
+ struct adf_rl_hw_data *device_data;
+ /* mapping sla_id to SLA objects */
+ struct rl_sla *sla[RL_NODES_CNT_MAX];
+ struct rl_sla *root[RL_ROOT_MAX];
+ struct rl_sla *cluster[RL_CLUSTER_MAX];
+ struct rl_sla *leaf[RL_LEAF_MAX];
+ bool rp_in_use[RL_RP_CNT_MAX];
+ /* Mutex protecting writing to SLAs lists */
+ struct mutex rl_lock;
+ struct adf_rl_interface_data user_input;
+};
+
+/**
+ * struct rl_sla - SLA object data structure
+ * @parent: pointer to the parent SLA (root/cluster)
+ * @type: SLA type
+ * @srv: service associated with this SLA
+ * @sla_id: ID of the SLA, used as element number in SLA array and as identifier
+ * shared with the user
+ * @node_id: ID of node, each of SLA type have a separate ID list
+ * @cir: committed information rate
+ * @pir: peak information rate (PIR >= CIR)
+ * @rem_cir: if this SLA is a parent then this field represents a remaining
+ * value to be used by child SLAs.
+ * @ring_pairs_ids: array with numeric ring pairs IDs assigned to this SLA
+ * @ring_pairs_cnt: number of assigned ring pairs listed in the array above
+ */
+struct rl_sla {
+ struct rl_sla *parent;
+ enum rl_node_type type;
+ enum adf_base_services srv;
+ u32 sla_id;
+ u32 node_id;
+ u32 cir;
+ u32 pir;
+ u32 rem_cir;
+ u16 ring_pairs_ids[RL_RP_CNT_PER_LEAF_MAX];
+ u16 ring_pairs_cnt;
+};
+
+int adf_rl_add_sla(struct adf_accel_dev *accel_dev,
+ struct adf_rl_sla_input_data *sla_in);
+int adf_rl_update_sla(struct adf_accel_dev *accel_dev,
+ struct adf_rl_sla_input_data *sla_in);
+int adf_rl_get_sla(struct adf_accel_dev *accel_dev,
+ struct adf_rl_sla_input_data *sla_in);
+int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev,
+ enum adf_base_services srv, int sla_id);
+int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id);
+void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default);
+
+int adf_rl_init(struct adf_accel_dev *accel_dev);
+int adf_rl_start(struct adf_accel_dev *accel_dev);
+void adf_rl_stop(struct adf_accel_dev *accel_dev);
+void adf_rl_exit(struct adf_accel_dev *accel_dev);
+
+u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val,
+ enum adf_base_services svc_type, bool is_bw_out);
+u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
+ enum adf_base_services svc_type);
+u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
+ enum adf_base_services svc_type);
+
+#endif /* ADF_RL_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
new file mode 100644
index 0000000000..698a14f4ce
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+
+#include "adf_admin.h"
+#include "adf_accel_devices.h"
+#include "adf_rl_admin.h"
+
+static void
+prep_admin_req_msg(struct rl_sla *sla, dma_addr_t dma_addr,
+ struct icp_qat_fw_init_admin_sla_config_params *fw_params,
+ struct icp_qat_fw_init_admin_req *req, bool is_update)
+{
+ req->cmd_id = is_update ? ICP_QAT_FW_RL_UPDATE : ICP_QAT_FW_RL_ADD;
+ req->init_cfg_ptr = dma_addr;
+ req->init_cfg_sz = sizeof(*fw_params);
+ req->node_id = sla->node_id;
+ req->node_type = sla->type;
+ req->rp_count = sla->ring_pairs_cnt;
+ req->svc_type = sla->srv;
+}
+
+static void
+prep_admin_req_params(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
+ struct icp_qat_fw_init_admin_sla_config_params *fw_params)
+{
+ fw_params->pcie_in_cir =
+ adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, false);
+ fw_params->pcie_in_pir =
+ adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, false);
+ fw_params->pcie_out_cir =
+ adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, true);
+ fw_params->pcie_out_pir =
+ adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, true);
+
+ fw_params->slice_util_cir =
+ adf_rl_calculate_slice_tokens(accel_dev, sla->cir, sla->srv);
+ fw_params->slice_util_pir =
+ adf_rl_calculate_slice_tokens(accel_dev, sla->pir, sla->srv);
+
+ fw_params->ae_util_cir =
+ adf_rl_calculate_ae_cycles(accel_dev, sla->cir, sla->srv);
+ fw_params->ae_util_pir =
+ adf_rl_calculate_ae_cycles(accel_dev, sla->pir, sla->srv);
+
+ memcpy(fw_params->rp_ids, sla->ring_pairs_ids,
+ sizeof(sla->ring_pairs_ids));
+}
+
+int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev,
+ struct rl_slice_cnt *slices_int)
+{
+ struct icp_qat_fw_init_admin_slice_cnt slices_resp = { };
+ int ret;
+
+ ret = adf_send_admin_rl_init(accel_dev, &slices_resp);
+ if (ret)
+ return ret;
+
+ slices_int->dcpr_cnt = slices_resp.dcpr_cnt;
+ slices_int->pke_cnt = slices_resp.pke_cnt;
+ /* For symmetric crypto, slice tokens are relative to the UCS slice */
+ slices_int->cph_cnt = slices_resp.ucs_cnt;
+
+ return 0;
+}
+
+int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev,
+ struct rl_sla *sla, bool is_update)
+{
+ struct icp_qat_fw_init_admin_sla_config_params *fw_params;
+ struct icp_qat_fw_init_admin_req req = { };
+ dma_addr_t dma_addr;
+ int ret;
+
+ fw_params = dma_alloc_coherent(&GET_DEV(accel_dev), sizeof(*fw_params),
+ &dma_addr, GFP_KERNEL);
+ if (!fw_params)
+ return -ENOMEM;
+
+ prep_admin_req_params(accel_dev, sla, fw_params);
+ prep_admin_req_msg(sla, dma_addr, fw_params, &req, is_update);
+ ret = adf_send_admin_rl_add_update(accel_dev, &req);
+
+ dma_free_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), fw_params,
+ dma_addr);
+
+ return ret;
+}
+
+int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id,
+ u8 node_type)
+{
+ return adf_send_admin_rl_delete(accel_dev, node_id, node_type);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h
new file mode 100644
index 0000000000..dd5419b7e8
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_RL_ADMIN_H_
+#define ADF_RL_ADMIN_H_
+
+#include <linux/types.h>
+
+#include "adf_rl.h"
+
+int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev,
+ struct rl_slice_cnt *slices_int);
+int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev,
+ struct rl_sla *sla, bool is_update);
+int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id,
+ u8 node_type);
+
+#endif /* ADF_RL_ADMIN_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
index 8f04b0d3c5..d450dad32c 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c
@@ -8,6 +8,8 @@
#include "adf_cfg_services.h"
#include "adf_common_drv.h"
+#define UNSET_RING_NUM -1
+
static const char * const state_operations[] = {
[DEV_DOWN] = "down",
[DEV_UP] = "up",
@@ -61,8 +63,8 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
}
ret = adf_dev_down(accel_dev, true);
- if (ret < 0)
- return -EINVAL;
+ if (ret)
+ return ret;
break;
case DEV_UP:
@@ -205,10 +207,90 @@ static DEVICE_ATTR_RW(pm_idle_enabled);
static DEVICE_ATTR_RW(state);
static DEVICE_ATTR_RW(cfg_services);
+static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_hw_device_data *hw_data;
+ struct adf_accel_dev *accel_dev;
+ enum adf_cfg_service_type svc;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ hw_data = GET_HW_DATA(accel_dev);
+
+ if (accel_dev->sysfs.ring_num == UNSET_RING_NUM)
+ return -EINVAL;
+
+ down_read(&accel_dev->sysfs.lock);
+ svc = GET_SRV_TYPE(accel_dev, accel_dev->sysfs.ring_num %
+ hw_data->num_banks_per_vf);
+ up_read(&accel_dev->sysfs.lock);
+
+ switch (svc) {
+ case COMP:
+ return sysfs_emit(buf, "%s\n", ADF_CFG_DC);
+ case SYM:
+ return sysfs_emit(buf, "%s\n", ADF_CFG_SYM);
+ case ASYM:
+ return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adf_accel_dev *accel_dev;
+ int num_rings, ret;
+ unsigned int ring;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ ret = kstrtouint(buf, 10, &ring);
+ if (ret)
+ return ret;
+
+ num_rings = GET_MAX_BANKS(accel_dev);
+ if (ring >= num_rings) {
+ dev_err(&GET_DEV(accel_dev),
+ "Device does not support more than %u ring pairs\n",
+ num_rings);
+ return -EINVAL;
+ }
+
+ down_write(&accel_dev->sysfs.lock);
+ accel_dev->sysfs.ring_num = ring;
+ up_write(&accel_dev->sysfs.lock);
+
+ return count;
+}
+static DEVICE_ATTR_RW(rp2srv);
+
+static ssize_t num_rps_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%u\n", GET_MAX_BANKS(accel_dev));
+}
+static DEVICE_ATTR_RO(num_rps);
+
static struct attribute *qat_attrs[] = {
&dev_attr_state.attr,
&dev_attr_cfg_services.attr,
&dev_attr_pm_idle_enabled.attr,
+ &dev_attr_rp2srv.attr,
+ &dev_attr_num_rps.attr,
NULL,
};
@@ -227,6 +309,8 @@ int adf_sysfs_init(struct adf_accel_dev *accel_dev)
"Failed to create qat attribute group: %d\n", ret);
}
+ accel_dev->sysfs.ring_num = UNSET_RING_NUM;
+
return ret;
}
EXPORT_SYMBOL_GPL(adf_sysfs_init);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
new file mode 100644
index 0000000000..e97c67c87b
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#include <linux/sysfs.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+
+#include "adf_common_drv.h"
+#include "adf_sysfs_ras_counters.h"
+
+static ssize_t errors_correctable_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+ unsigned long counter;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_CORR);
+ return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
+}
+
+static ssize_t errors_nonfatal_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+ unsigned long counter;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_UNCORR);
+ return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
+}
+
+static ssize_t errors_fatal_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct adf_accel_dev *accel_dev;
+ unsigned long counter;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_FATAL);
+ return scnprintf(buf, PAGE_SIZE, "%ld\n", counter);
+}
+
+static ssize_t reset_error_counters_store(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ struct adf_accel_dev *accel_dev;
+
+ if (buf[0] != '1' || count != 2)
+ return -EINVAL;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors);
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(errors_correctable);
+static DEVICE_ATTR_RO(errors_nonfatal);
+static DEVICE_ATTR_RO(errors_fatal);
+static DEVICE_ATTR_WO(reset_error_counters);
+
+static struct attribute *qat_ras_attrs[] = {
+ &dev_attr_errors_correctable.attr,
+ &dev_attr_errors_nonfatal.attr,
+ &dev_attr_errors_fatal.attr,
+ &dev_attr_reset_error_counters.attr,
+ NULL,
+};
+
+static struct attribute_group qat_ras_group = {
+ .attrs = qat_ras_attrs,
+ .name = "qat_ras",
+};
+
+void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->ras_errors.enabled)
+ return;
+
+ ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors);
+
+ if (device_add_group(&GET_DEV(accel_dev), &qat_ras_group))
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to create qat_ras attribute group.\n");
+
+ accel_dev->ras_errors.sysfs_added = true;
+}
+
+void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->ras_errors.enabled)
+ return;
+
+ if (accel_dev->ras_errors.sysfs_added) {
+ device_remove_group(&GET_DEV(accel_dev), &qat_ras_group);
+ accel_dev->ras_errors.sysfs_added = false;
+ }
+
+ ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors);
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h
new file mode 100644
index 0000000000..99e9d9cf57
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+
+#ifndef ADF_RAS_H
+#define ADF_RAS_H
+
+#include <linux/bitops.h>
+#include <linux/atomic.h>
+
+struct adf_accel_dev;
+
+void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev);
+void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev);
+
+#define ADF_RAS_ERR_CTR_READ(ras_errors, ERR) \
+ atomic_read(&(ras_errors).counter[ERR])
+
+#define ADF_RAS_ERR_CTR_CLEAR(ras_errors) \
+ do { \
+ for (int err = 0; err < ADF_RAS_ERRORS; ++err) \
+ atomic_set(&(ras_errors).counter[err], 0); \
+ } while (0)
+
+#define ADF_RAS_ERR_CTR_INC(ras_errors, ERR) \
+ atomic_inc(&(ras_errors).counter[ERR])
+
+#endif /* ADF_RAS_H */
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
new file mode 100644
index 0000000000..bedb514d4e
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c
@@ -0,0 +1,459 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation */
+
+#define dev_fmt(fmt) "RateLimiting: " fmt
+
+#include <linux/dev_printk.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include "adf_common_drv.h"
+#include "adf_rl.h"
+#include "adf_sysfs_rl.h"
+
+#define GET_RL_STRUCT(accel_dev) ((accel_dev)->rate_limiting->user_input)
+
+enum rl_ops {
+ ADD,
+ UPDATE,
+ RM,
+ RM_ALL,
+ GET,
+};
+
+enum rl_params {
+ RP_MASK,
+ ID,
+ CIR,
+ PIR,
+ SRV,
+ CAP_REM_SRV,
+};
+
+static const char *const rl_services[] = {
+ [ADF_SVC_ASYM] = "asym",
+ [ADF_SVC_SYM] = "sym",
+ [ADF_SVC_DC] = "dc",
+};
+
+static const char *const rl_operations[] = {
+ [ADD] = "add",
+ [UPDATE] = "update",
+ [RM] = "rm",
+ [RM_ALL] = "rm_all",
+ [GET] = "get",
+};
+
+static int set_param_u(struct device *dev, enum rl_params param, u64 set)
+{
+ struct adf_rl_interface_data *data;
+ struct adf_accel_dev *accel_dev;
+ int ret = 0;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ data = &GET_RL_STRUCT(accel_dev);
+
+ down_write(&data->lock);
+ switch (param) {
+ case RP_MASK:
+ data->input.rp_mask = set;
+ break;
+ case CIR:
+ data->input.cir = set;
+ break;
+ case PIR:
+ data->input.pir = set;
+ break;
+ case SRV:
+ data->input.srv = set;
+ break;
+ case CAP_REM_SRV:
+ data->cap_rem_srv = set;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ up_write(&data->lock);
+
+ return ret;
+}
+
+static int set_param_s(struct device *dev, enum rl_params param, int set)
+{
+ struct adf_rl_interface_data *data;
+ struct adf_accel_dev *accel_dev;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev || param != ID)
+ return -EINVAL;
+
+ data = &GET_RL_STRUCT(accel_dev);
+
+ down_write(&data->lock);
+ data->input.sla_id = set;
+ up_write(&data->lock);
+
+ return 0;
+}
+
+static int get_param_u(struct device *dev, enum rl_params param, u64 *get)
+{
+ struct adf_rl_interface_data *data;
+ struct adf_accel_dev *accel_dev;
+ int ret = 0;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ data = &GET_RL_STRUCT(accel_dev);
+
+ down_read(&data->lock);
+ switch (param) {
+ case RP_MASK:
+ *get = data->input.rp_mask;
+ break;
+ case CIR:
+ *get = data->input.cir;
+ break;
+ case PIR:
+ *get = data->input.pir;
+ break;
+ case SRV:
+ *get = data->input.srv;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ up_read(&data->lock);
+
+ return ret;
+}
+
+static int get_param_s(struct device *dev, enum rl_params param)
+{
+ struct adf_rl_interface_data *data;
+ struct adf_accel_dev *accel_dev;
+ int ret = 0;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ data = &GET_RL_STRUCT(accel_dev);
+
+ down_read(&data->lock);
+ if (param == ID)
+ ret = data->input.sla_id;
+ up_read(&data->lock);
+
+ return ret;
+}
+
+static ssize_t rp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u64 get;
+
+ ret = get_param_u(dev, RP_MASK, &get);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%#llx\n", get);
+}
+
+static ssize_t rp_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ u64 val;
+
+ err = kstrtou64(buf, 16, &val);
+ if (err)
+ return err;
+
+ err = set_param_u(dev, RP_MASK, val);
+ if (err)
+ return err;
+
+ return count;
+}
+static DEVICE_ATTR_RW(rp);
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%d\n", get_param_s(dev, ID));
+}
+
+static ssize_t id_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ int val;
+
+ err = kstrtoint(buf, 10, &val);
+ if (err)
+ return err;
+
+ err = set_param_s(dev, ID, val);
+ if (err)
+ return err;
+
+ return count;
+}
+static DEVICE_ATTR_RW(id);
+
+static ssize_t cir_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u64 get;
+
+ ret = get_param_u(dev, CIR, &get);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", get);
+}
+
+static ssize_t cir_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ int err;
+
+ err = kstrtouint(buf, 10, &val);
+ if (err)
+ return err;
+
+ err = set_param_u(dev, CIR, val);
+ if (err)
+ return err;
+
+ return count;
+}
+static DEVICE_ATTR_RW(cir);
+
+static ssize_t pir_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u64 get;
+
+ ret = get_param_u(dev, PIR, &get);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", get);
+}
+
+static ssize_t pir_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ int err;
+
+ err = kstrtouint(buf, 10, &val);
+ if (err)
+ return err;
+
+ err = set_param_u(dev, PIR, val);
+ if (err)
+ return err;
+
+ return count;
+}
+static DEVICE_ATTR_RW(pir);
+
+static ssize_t srv_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u64 get;
+
+ ret = get_param_u(dev, SRV, &get);
+ if (ret)
+ return ret;
+
+ if (get == ADF_SVC_NONE)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s\n", rl_services[get]);
+}
+
+static ssize_t srv_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ int ret;
+
+ ret = sysfs_match_string(rl_services, buf);
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+ ret = set_param_u(dev, SRV, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(srv);
+
+static ssize_t cap_rem_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct adf_rl_interface_data *data;
+ struct adf_accel_dev *accel_dev;
+ int ret, rem_cap;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ data = &GET_RL_STRUCT(accel_dev);
+
+ down_read(&data->lock);
+ rem_cap = adf_rl_get_capability_remaining(accel_dev, data->cap_rem_srv,
+ RL_SLA_EMPTY_ID);
+ up_read(&data->lock);
+ if (rem_cap < 0)
+ return rem_cap;
+
+ ret = sysfs_emit(buf, "%u\n", rem_cap);
+
+ return ret;
+}
+
+static ssize_t cap_rem_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned int val;
+ int ret;
+
+ ret = sysfs_match_string(rl_services, buf);
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+ ret = set_param_u(dev, CAP_REM_SRV, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+static DEVICE_ATTR_RW(cap_rem);
+
+static ssize_t sla_op_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct adf_rl_interface_data *data;
+ struct adf_accel_dev *accel_dev;
+ int ret;
+
+ accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev));
+ if (!accel_dev)
+ return -EINVAL;
+
+ data = &GET_RL_STRUCT(accel_dev);
+
+ ret = sysfs_match_string(rl_operations, buf);
+ if (ret < 0)
+ return ret;
+
+ down_write(&data->lock);
+ switch (ret) {
+ case ADD:
+ data->input.parent_id = RL_PARENT_DEFAULT_ID;
+ data->input.type = RL_LEAF;
+ data->input.sla_id = 0;
+ ret = adf_rl_add_sla(accel_dev, &data->input);
+ if (ret)
+ goto err_free_lock;
+ break;
+ case UPDATE:
+ ret = adf_rl_update_sla(accel_dev, &data->input);
+ if (ret)
+ goto err_free_lock;
+ break;
+ case RM:
+ ret = adf_rl_remove_sla(accel_dev, data->input.sla_id);
+ if (ret)
+ goto err_free_lock;
+ break;
+ case RM_ALL:
+ adf_rl_remove_sla_all(accel_dev, false);
+ break;
+ case GET:
+ ret = adf_rl_get_sla(accel_dev, &data->input);
+ if (ret)
+ goto err_free_lock;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free_lock;
+ }
+ up_write(&data->lock);
+
+ return count;
+
+err_free_lock:
+ up_write(&data->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(sla_op);
+
+static struct attribute *qat_rl_attrs[] = {
+ &dev_attr_rp.attr,
+ &dev_attr_id.attr,
+ &dev_attr_cir.attr,
+ &dev_attr_pir.attr,
+ &dev_attr_srv.attr,
+ &dev_attr_cap_rem.attr,
+ &dev_attr_sla_op.attr,
+ NULL,
+};
+
+static struct attribute_group qat_rl_group = {
+ .attrs = qat_rl_attrs,
+ .name = "qat_rl",
+};
+
+int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev)
+{
+ struct adf_rl_interface_data *data;
+ int ret;
+
+ data = &GET_RL_STRUCT(accel_dev);
+
+ ret = device_add_group(&GET_DEV(accel_dev), &qat_rl_group);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to create qat_rl attribute group\n");
+
+ data->cap_rem_srv = ADF_SVC_NONE;
+ data->input.srv = ADF_SVC_NONE;
+ data->sysfs_added = true;
+
+ return ret;
+}
+
+void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev)
+{
+ struct adf_rl_interface_data *data;
+
+ data = &GET_RL_STRUCT(accel_dev);
+ if (!data->sysfs_added)
+ return;
+
+ device_remove_group(&GET_DEV(accel_dev), &qat_rl_group);
+ data->sysfs_added = false;
+}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h
new file mode 100644
index 0000000000..22d36aa8a7
--- /dev/null
+++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright(c) 2023 Intel Corporation */
+#ifndef ADF_SYSFS_RL_H_
+#define ADF_SYSFS_RL_H_
+
+struct adf_accel_dev;
+
+int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev);
+void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev);
+
+#endif /* ADF_SYSFS_RL_H_ */
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
index 019a644383..cd418b51d9 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h
@@ -5,6 +5,8 @@
#include "icp_qat_fw.h"
+#define RL_MAX_RP_IDS 16
+
enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_INIT_AE = 0,
ICP_QAT_FW_TRNG_ENABLE = 1,
@@ -16,10 +18,17 @@ enum icp_qat_fw_init_admin_cmd_id {
ICP_QAT_FW_HEARTBEAT_SYNC = 7,
ICP_QAT_FW_HEARTBEAT_GET = 8,
ICP_QAT_FW_COMP_CAPABILITY_GET = 9,
+ ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10,
ICP_QAT_FW_DC_CHAIN_INIT = 11,
ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13,
+ ICP_QAT_FW_RL_INIT = 15,
ICP_QAT_FW_TIMER_GET = 19,
+ ICP_QAT_FW_CNV_STATS_GET = 20,
ICP_QAT_FW_PM_STATE_CONFIG = 128,
+ ICP_QAT_FW_PM_INFO = 129,
+ ICP_QAT_FW_RL_ADD = 134,
+ ICP_QAT_FW_RL_UPDATE = 135,
+ ICP_QAT_FW_RL_REMOVE = 136,
};
enum icp_qat_fw_init_admin_resp_status {
@@ -27,6 +36,30 @@ enum icp_qat_fw_init_admin_resp_status {
ICP_QAT_FW_INIT_RESP_STATUS_FAIL
};
+struct icp_qat_fw_init_admin_slice_cnt {
+ __u8 cpr_cnt;
+ __u8 xlt_cnt;
+ __u8 dcpr_cnt;
+ __u8 pke_cnt;
+ __u8 wat_cnt;
+ __u8 wcp_cnt;
+ __u8 ucs_cnt;
+ __u8 cph_cnt;
+ __u8 ath_cnt;
+};
+
+struct icp_qat_fw_init_admin_sla_config_params {
+ __u32 pcie_in_cir;
+ __u32 pcie_in_pir;
+ __u32 pcie_out_cir;
+ __u32 pcie_out_pir;
+ __u32 slice_util_cir;
+ __u32 slice_util_pir;
+ __u32 ae_util_cir;
+ __u32 ae_util_pir;
+ __u16 rp_ids[RL_MAX_RP_IDS];
+};
+
struct icp_qat_fw_init_admin_req {
__u16 init_cfg_sz;
__u8 resrvd1;
@@ -46,6 +79,13 @@ struct icp_qat_fw_init_admin_req {
struct {
__u32 heartbeat_ticks;
};
+ struct {
+ __u16 node_id;
+ __u8 node_type;
+ __u8 svc_type;
+ __u8 resrvd5[3];
+ __u8 rp_count;
+ };
__u32 idle_filter;
};
@@ -64,6 +104,10 @@ struct icp_qat_fw_init_admin_resp {
__u16 version_major_num;
};
__u32 extended_features;
+ struct {
+ __u16 error_count;
+ __u16 latest_error;
+ };
};
__u64 opaque_data;
union {
@@ -103,9 +147,46 @@ struct icp_qat_fw_init_admin_resp {
__u32 unsuccessful_count;
__u64 resrvd8;
};
+ struct icp_qat_fw_init_admin_slice_cnt slices;
+ __u16 fw_capabilities;
};
} __packed;
#define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC
+#define ICP_QAT_FW_CAPABILITIES_GET ICP_QAT_FW_CRYPTO_CAPABILITY_GET
+
+#define ICP_QAT_NUMBER_OF_PM_EVENTS 8
+
+struct icp_qat_fw_init_admin_pm_info {
+ __u16 max_pwrreq;
+ __u16 min_pwrreq;
+ __u16 resvrd1;
+ __u8 pwr_state;
+ __u8 resvrd2;
+ __u32 fusectl0;
+ struct_group(event_counters,
+ __u32 sys_pm;
+ __u32 host_msg;
+ __u32 unknown;
+ __u32 local_ssm;
+ __u32 timer;
+ );
+ __u32 event_log[ICP_QAT_NUMBER_OF_PM_EVENTS];
+ struct_group(pm,
+ __u32 fw_init;
+ __u32 pwrreq;
+ __u32 status;
+ __u32 main;
+ __u32 thread;
+ );
+ struct_group(ssm,
+ __u32 pm_enable;
+ __u32 pm_active_status;
+ __u32 pm_managed_status;
+ __u32 pm_domain_status;
+ __u32 active_constraint;
+ );
+ __u32 resvrd3[6];
+};
#endif
diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
index 0c8883e2cc..eb2ef225bc 100644
--- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h
@@ -3,6 +3,8 @@
#ifndef _ICP_QAT_HW_H_
#define _ICP_QAT_HW_H_
+#include <linux/bits.h>
+
enum icp_qat_hw_ae_id {
ICP_QAT_HW_AE_0 = 0,
ICP_QAT_HW_AE_1 = 1,
diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
index b533984906..bf8c0ee629 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
@@ -109,69 +109,6 @@ err:
acomp_request_complete(areq, ret);
}
-static int parse_zlib_header(u16 zlib_h)
-{
- int ret = -EINVAL;
- __be16 header;
- u8 *header_p;
- u8 cmf, flg;
-
- header = cpu_to_be16(zlib_h);
- header_p = (u8 *)&header;
-
- flg = header_p[0];
- cmf = header_p[1];
-
- if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K)
- return ret;
-
- if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE)
- return ret;
-
- if (flg & QAT_RFC_1950_DICT_MASK)
- return ret;
-
- return 0;
-}
-
-static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req,
- void *resp)
-{
- struct acomp_req *areq = qat_req->acompress_req;
- enum direction dir = qat_req->dir;
- __be32 qat_produced_adler;
-
- qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp));
-
- if (dir == COMPRESSION) {
- __be16 zlib_header;
-
- zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR);
- scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1);
- areq->dlen += QAT_RFC_1950_HDR_SIZE;
-
- scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen,
- QAT_RFC_1950_FOOTER_SIZE, 1);
- areq->dlen += QAT_RFC_1950_FOOTER_SIZE;
- } else {
- __be32 decomp_adler;
- int footer_offset;
- int consumed;
-
- consumed = qat_comp_get_consumed_ctr(resp);
- footer_offset = consumed + QAT_RFC_1950_HDR_SIZE;
- if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen)
- return -EBADMSG;
-
- scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset,
- QAT_RFC_1950_FOOTER_SIZE, 0);
-
- if (qat_produced_adler != decomp_adler)
- return -EBADMSG;
- }
- return 0;
-}
-
static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
void *resp)
{
@@ -293,18 +230,6 @@ static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
memset(ctx, 0, sizeof(*ctx));
}
-static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm)
-{
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
- struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
- int ret;
-
- ret = qat_comp_alg_init_tfm(acomp_tfm);
- ctx->qat_comp_callback = &qat_comp_rfc1950_callback;
-
- return ret;
-}
-
static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
unsigned int shdr, unsigned int sftr,
unsigned int dhdr, unsigned int dftr)
@@ -400,43 +325,6 @@ static int qat_comp_alg_decompress(struct acomp_req *req)
return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
}
-static int qat_comp_alg_rfc1950_compress(struct acomp_req *req)
-{
- if (!req->dst && req->dlen != 0)
- return -EINVAL;
-
- if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
- return -EINVAL;
-
- return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0,
- QAT_RFC_1950_HDR_SIZE,
- QAT_RFC_1950_FOOTER_SIZE);
-}
-
-static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req)
-{
- struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
- struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
- struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
- struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
- u16 zlib_header;
- int ret;
-
- if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
- return -EBADMSG;
-
- scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0);
-
- ret = parse_zlib_header(zlib_header);
- if (ret) {
- dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n");
- return ret;
- }
-
- return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE,
- QAT_RFC_1950_FOOTER_SIZE, 0, 0);
-}
-
static struct acomp_alg qat_acomp[] = { {
.base = {
.cra_name = "deflate",
@@ -452,22 +340,7 @@ static struct acomp_alg qat_acomp[] = { {
.decompress = qat_comp_alg_decompress,
.dst_free = sgl_free,
.reqsize = sizeof(struct qat_compression_req),
-}, {
- .base = {
- .cra_name = "zlib-deflate",
- .cra_driver_name = "qat_zlib_deflate",
- .cra_priority = 4001,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_ctxsize = sizeof(struct qat_compression_ctx),
- .cra_module = THIS_MODULE,
- },
- .init = qat_comp_alg_rfc1950_init_tfm,
- .exit = qat_comp_alg_exit_tfm,
- .compress = qat_comp_alg_rfc1950_compress,
- .decompress = qat_comp_alg_rfc1950_decompress,
- .dst_free = sgl_free,
- .reqsize = sizeof(struct qat_compression_req),
-} };
+}};
int qat_comp_algs_register(void)
{
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 4bd150d144..e27ea7e28c 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -200,7 +200,7 @@ static int qat_uclo_parse_num(char *str, unsigned int *num)
unsigned long ae = 0;
int i;
- strncpy(buf, str, 15);
+ strscpy(buf, str, sizeof(buf));
for (i = 0; i < 16; i++) {
if (!isdigit(buf[i])) {
buf[i] = '\0';
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 09551f9491..af14090cc4 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2014 - 2021 Intel Corporation */
#include <adf_accel_devices.h>
+#include <adf_admin.h>
#include <adf_common_drv.h>
#include <adf_gen2_config.h>
#include <adf_gen2_dc.h>
diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
index 1e748e8ce1..40b456b803 100644
--- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c
@@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_DH895XCC_FW);
MODULE_FIRMWARE(ADF_DH895XCC_MMP);
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_IMPORT_NS(CRYPTO_QAT);
diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
index fefb85ceae..d59cb1ba2a 100644
--- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c
@@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel");
MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
MODULE_VERSION(ADF_DRV_VERSION);
+MODULE_IMPORT_NS(CRYPTO_QAT);
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index b61e35b932..5744df30c8 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -581,7 +581,7 @@ err_cleanup:
return ret;
}
-static int mv_cesa_remove(struct platform_device *pdev)
+static void mv_cesa_remove(struct platform_device *pdev)
{
struct mv_cesa_dev *cesa = platform_get_drvdata(pdev);
int i;
@@ -594,8 +594,6 @@ static int mv_cesa_remove(struct platform_device *pdev)
mv_cesa_put_sram(pdev, i);
irq_set_affinity_hint(cesa->engines[i].irq, NULL);
}
-
- return 0;
}
static const struct platform_device_id mv_cesa_plat_id_table[] = {
@@ -606,7 +604,7 @@ MODULE_DEVICE_TABLE(platform, mv_cesa_plat_id_table);
static struct platform_driver marvell_cesa = {
.probe = mv_cesa_probe,
- .remove = mv_cesa_remove,
+ .remove_new = mv_cesa_remove,
.id_table = mv_cesa_plat_id_table,
.driver = {
.name = "marvell-cesa",
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index 6edd27ff8c..e4bd3f030c 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -419,8 +419,8 @@ int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
return 0;
free_iq:
- otx2_cpt_free_instruction_queues(lfs);
cptlf_hw_cleanup(lfs);
+ otx2_cpt_free_instruction_queues(lfs);
detach_rsrcs:
otx2_cpt_detach_rsrcs_msg(lfs);
clear_lfs_num:
@@ -431,11 +431,13 @@ EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
{
- lfs->lfs_num = 0;
/* Cleanup LFs hardware side */
cptlf_hw_cleanup(lfs);
+ /* Free instruction queues */
+ otx2_cpt_free_instruction_queues(lfs);
/* Send request to detach LFs */
otx2_cpt_detach_rsrcs_msg(lfs);
+ lfs->lfs_num = 0;
}
EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index bac729c885..215a1b17b6 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -249,8 +249,11 @@ static void cptvf_lf_shutdown(struct otx2_cptlfs_info *lfs)
otx2_cptlf_unregister_interrupts(lfs);
/* Cleanup LFs software side */
lf_sw_cleanup(lfs);
+ /* Free instruction queues */
+ otx2_cpt_free_instruction_queues(lfs);
/* Send request to detach LFs */
otx2_cpt_detach_rsrcs_msg(lfs);
+ lfs->lfs_num = 0;
}
static int cptvf_lf_init(struct otx2_cptvf_dev *cptvf)
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index f6b7bce0e6..2b3ebe0db3 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -908,7 +908,6 @@ static struct ahash_alg dcp_sha1_alg = {
.cra_name = "sha1",
.cra_driver_name = "sha1-dcp",
.cra_priority = 400,
- .cra_alignmask = 63,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct dcp_async_ctx),
@@ -935,7 +934,6 @@ static struct ahash_alg dcp_sha256_alg = {
.cra_name = "sha256",
.cra_driver_name = "sha256-dcp",
.cra_priority = 400,
- .cra_alignmask = 63,
.cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct dcp_async_ctx),
@@ -1131,7 +1129,7 @@ err_destroy_sha_thread:
return ret;
}
-static int mxs_dcp_remove(struct platform_device *pdev)
+static void mxs_dcp_remove(struct platform_device *pdev)
{
struct dcp *sdcp = platform_get_drvdata(pdev);
@@ -1150,8 +1148,6 @@ static int mxs_dcp_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
global_sdcp = NULL;
-
- return 0;
}
static const struct of_device_id mxs_dcp_dt_ids[] = {
@@ -1164,7 +1160,7 @@ MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
static struct platform_driver mxs_dcp_driver = {
.probe = mxs_dcp_probe,
- .remove = mxs_dcp_remove,
+ .remove_new = mxs_dcp_remove,
.driver = {
.name = "mxs-dcp",
.of_match_table = mxs_dcp_dt_ids,
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index d5a32d71a3..caea98622c 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -2011,7 +2011,7 @@ out_free_n2cp:
return err;
}
-static int n2_crypto_remove(struct platform_device *dev)
+static void n2_crypto_remove(struct platform_device *dev)
{
struct n2_crypto *np = dev_get_drvdata(&dev->dev);
@@ -2022,8 +2022,6 @@ static int n2_crypto_remove(struct platform_device *dev)
release_global_resources();
free_n2cp(np);
-
- return 0;
}
static struct n2_mau *alloc_ncp(void)
@@ -2109,7 +2107,7 @@ out_free_ncp:
return err;
}
-static int n2_mau_remove(struct platform_device *dev)
+static void n2_mau_remove(struct platform_device *dev)
{
struct n2_mau *mp = dev_get_drvdata(&dev->dev);
@@ -2118,8 +2116,6 @@ static int n2_mau_remove(struct platform_device *dev)
release_global_resources();
free_ncp(mp);
-
- return 0;
}
static const struct of_device_id n2_crypto_match[] = {
@@ -2146,7 +2142,7 @@ static struct platform_driver n2_crypto_driver = {
.of_match_table = n2_crypto_match,
},
.probe = n2_crypto_probe,
- .remove = n2_crypto_remove,
+ .remove_new = n2_crypto_remove,
};
static const struct of_device_id n2_mau_match[] = {
@@ -2173,7 +2169,7 @@ static struct platform_driver n2_mau_driver = {
.of_match_table = n2_mau_match,
},
.probe = n2_mau_probe,
- .remove = n2_mau_remove,
+ .remove_new = n2_mau_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index ed83023dd7..bad1adacbc 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -1255,7 +1255,7 @@ err_data:
return err;
}
-static int omap_aes_remove(struct platform_device *pdev)
+static void omap_aes_remove(struct platform_device *pdev)
{
struct omap_aes_dev *dd = platform_get_drvdata(pdev);
struct aead_engine_alg *aalg;
@@ -1285,8 +1285,6 @@ static int omap_aes_remove(struct platform_device *pdev)
pm_runtime_disable(dd->dev);
sysfs_remove_group(&dd->dev->kobj, &omap_aes_attr_group);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1307,7 +1305,7 @@ static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
static struct platform_driver omap_aes_driver = {
.probe = omap_aes_probe,
- .remove = omap_aes_remove,
+ .remove_new = omap_aes_remove,
.driver = {
.name = "omap-aes",
.pm = &omap_aes_pm_ops,
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 089dd45eae..209d3dc03a 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -1072,7 +1072,7 @@ err_data:
return err;
}
-static int omap_des_remove(struct platform_device *pdev)
+static void omap_des_remove(struct platform_device *pdev)
{
struct omap_des_dev *dd = platform_get_drvdata(pdev);
int i, j;
@@ -1089,8 +1089,6 @@ static int omap_des_remove(struct platform_device *pdev)
tasklet_kill(&dd->done_task);
omap_des_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1117,7 +1115,7 @@ static SIMPLE_DEV_PM_OPS(omap_des_pm_ops, omap_des_suspend, omap_des_resume);
static struct platform_driver omap_des_driver = {
.probe = omap_des_probe,
- .remove = omap_des_remove,
+ .remove_new = omap_des_remove,
.driver = {
.name = "omap-des",
.pm = &omap_des_pm_ops,
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index a6b4a0b3ac..5bcd9ab0f7 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -356,10 +356,10 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
if (big_endian)
for (i = 0; i < d; i++)
- hash[i] = be32_to_cpup((__be32 *)in + i);
+ put_unaligned(be32_to_cpup((__be32 *)in + i), &hash[i]);
else
for (i = 0; i < d; i++)
- hash[i] = le32_to_cpup((__le32 *)in + i);
+ put_unaligned(le32_to_cpup((__le32 *)in + i), &hash[i]);
}
static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
@@ -1435,7 +1435,6 @@ static struct ahash_engine_alg algs_sha1_md5[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -1458,7 +1457,6 @@ static struct ahash_engine_alg algs_sha1_md5[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -1483,7 +1481,6 @@ static struct ahash_engine_alg algs_sha1_md5[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
sizeof(struct omap_sham_hmac_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha1_init,
.cra_exit = omap_sham_cra_exit,
@@ -1508,7 +1505,6 @@ static struct ahash_engine_alg algs_sha1_md5[] = {
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
sizeof(struct omap_sham_hmac_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_md5_init,
.cra_exit = omap_sham_cra_exit,
@@ -1535,7 +1531,6 @@ static struct ahash_engine_alg algs_sha224_sha256[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -1558,7 +1553,6 @@ static struct ahash_engine_alg algs_sha224_sha256[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -1583,7 +1577,6 @@ static struct ahash_engine_alg algs_sha224_sha256[] = {
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
sizeof(struct omap_sham_hmac_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha224_init,
.cra_exit = omap_sham_cra_exit,
@@ -1608,7 +1601,6 @@ static struct ahash_engine_alg algs_sha224_sha256[] = {
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
sizeof(struct omap_sham_hmac_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha256_init,
.cra_exit = omap_sham_cra_exit,
@@ -1634,7 +1626,6 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -1657,7 +1648,6 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_init,
.cra_exit = omap_sham_cra_exit,
@@ -1682,7 +1672,6 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
sizeof(struct omap_sham_hmac_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha384_init,
.cra_exit = omap_sham_cra_exit,
@@ -1707,7 +1696,6 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct omap_sham_ctx) +
sizeof(struct omap_sham_hmac_ctx),
- .cra_alignmask = OMAP_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = omap_sham_cra_sha512_init,
.cra_exit = omap_sham_cra_exit,
@@ -2200,7 +2188,7 @@ data_err:
return err;
}
-static int omap_sham_remove(struct platform_device *pdev)
+static void omap_sham_remove(struct platform_device *pdev)
{
struct omap_sham_dev *dd;
int i, j;
@@ -2224,13 +2212,11 @@ static int omap_sham_remove(struct platform_device *pdev)
dma_release_channel(dd->dma_lch);
sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
-
- return 0;
}
static struct platform_driver omap_sham_driver = {
.probe = omap_sham_probe,
- .remove = omap_sham_remove,
+ .remove_new = omap_sham_remove,
.driver = {
.name = "omap-sham",
.of_match_table = omap_sham_of_match,
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index fce49c0dee..28b5fd8238 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -277,7 +277,7 @@ err_mem_path_disable:
return ret;
}
-static int qce_crypto_remove(struct platform_device *pdev)
+static void qce_crypto_remove(struct platform_device *pdev)
{
struct qce_device *qce = platform_get_drvdata(pdev);
@@ -287,7 +287,6 @@ static int qce_crypto_remove(struct platform_device *pdev)
clk_disable_unprepare(qce->bus);
clk_disable_unprepare(qce->iface);
clk_disable_unprepare(qce->core);
- return 0;
}
static const struct of_device_id qce_crypto_of_match[] = {
@@ -300,7 +299,7 @@ MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
static struct platform_driver qce_crypto_driver = {
.probe = qce_crypto_probe,
- .remove = qce_crypto_remove,
+ .remove_new = qce_crypto_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qce_crypto_of_match,
diff --git a/drivers/crypto/qcom-rng.c b/drivers/crypto/qcom-rng.c
index 825a729f20..c670d7d0c1 100644
--- a/drivers/crypto/qcom-rng.c
+++ b/drivers/crypto/qcom-rng.c
@@ -7,6 +7,7 @@
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/crypto.h>
+#include <linux/hw_random.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
@@ -28,17 +29,25 @@
#define WORD_SZ 4
+#define QCOM_TRNG_QUALITY 1024
+
struct qcom_rng {
struct mutex lock;
void __iomem *base;
struct clk *clk;
- unsigned int skip_init;
+ struct hwrng hwrng;
+ struct qcom_rng_of_data *of_data;
};
struct qcom_rng_ctx {
struct qcom_rng *rng;
};
+struct qcom_rng_of_data {
+ bool skip_init;
+ bool hwrng_support;
+};
+
static struct qcom_rng *qcom_rng_dev;
static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
@@ -66,11 +75,11 @@ static int qcom_rng_read(struct qcom_rng *rng, u8 *data, unsigned int max)
} else {
/* copy only remaining bytes */
memcpy(data, &val, max - currsize);
- break;
+ currsize = max;
}
} while (currsize < max);
- return 0;
+ return currsize;
}
static int qcom_rng_generate(struct crypto_rng *tfm,
@@ -92,6 +101,9 @@ static int qcom_rng_generate(struct crypto_rng *tfm,
mutex_unlock(&rng->lock);
clk_disable_unprepare(rng->clk);
+ if (ret >= 0)
+ ret = 0;
+
return ret;
}
@@ -101,6 +113,13 @@ static int qcom_rng_seed(struct crypto_rng *tfm, const u8 *seed,
return 0;
}
+static int qcom_hwrng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
+{
+ struct qcom_rng *qrng = container_of(hwrng, struct qcom_rng, hwrng);
+
+ return qcom_rng_read(qrng, data, max);
+}
+
static int qcom_rng_enable(struct qcom_rng *rng)
{
u32 val;
@@ -136,7 +155,7 @@ static int qcom_rng_init(struct crypto_tfm *tfm)
ctx->rng = qcom_rng_dev;
- if (!ctx->rng->skip_init)
+ if (!ctx->rng->of_data->skip_init)
return qcom_rng_enable(ctx->rng);
return 0;
@@ -177,27 +196,56 @@ static int qcom_rng_probe(struct platform_device *pdev)
if (IS_ERR(rng->clk))
return PTR_ERR(rng->clk);
- rng->skip_init = (unsigned long)device_get_match_data(&pdev->dev);
+ rng->of_data = (struct qcom_rng_of_data *)of_device_get_match_data(&pdev->dev);
qcom_rng_dev = rng;
ret = crypto_register_rng(&qcom_rng_alg);
if (ret) {
dev_err(&pdev->dev, "Register crypto rng failed: %d\n", ret);
qcom_rng_dev = NULL;
+ return ret;
}
+ if (rng->of_data->hwrng_support) {
+ rng->hwrng.name = "qcom_hwrng";
+ rng->hwrng.read = qcom_hwrng_read;
+ rng->hwrng.quality = QCOM_TRNG_QUALITY;
+ ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
+ if (ret) {
+ dev_err(&pdev->dev, "Register hwrng failed: %d\n", ret);
+ qcom_rng_dev = NULL;
+ goto fail;
+ }
+ }
+
+ return ret;
+fail:
+ crypto_unregister_rng(&qcom_rng_alg);
return ret;
}
-static int qcom_rng_remove(struct platform_device *pdev)
+static void qcom_rng_remove(struct platform_device *pdev)
{
crypto_unregister_rng(&qcom_rng_alg);
qcom_rng_dev = NULL;
-
- return 0;
}
+static struct qcom_rng_of_data qcom_prng_of_data = {
+ .skip_init = false,
+ .hwrng_support = false,
+};
+
+static struct qcom_rng_of_data qcom_prng_ee_of_data = {
+ .skip_init = true,
+ .hwrng_support = false,
+};
+
+static struct qcom_rng_of_data qcom_trng_of_data = {
+ .skip_init = true,
+ .hwrng_support = true,
+};
+
static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = {
{ .id = "QCOM8160", .driver_data = 1 },
{}
@@ -205,15 +253,16 @@ static const struct acpi_device_id __maybe_unused qcom_rng_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, qcom_rng_acpi_match);
static const struct of_device_id __maybe_unused qcom_rng_of_match[] = {
- { .compatible = "qcom,prng", .data = (void *)0},
- { .compatible = "qcom,prng-ee", .data = (void *)1},
+ { .compatible = "qcom,prng", .data = &qcom_prng_of_data },
+ { .compatible = "qcom,prng-ee", .data = &qcom_prng_ee_of_data },
+ { .compatible = "qcom,trng", .data = &qcom_trng_of_data },
{}
};
MODULE_DEVICE_TABLE(of, qcom_rng_of_match);
static struct platform_driver qcom_rng_driver = {
.probe = qcom_rng_probe,
- .remove = qcom_rng_remove,
+ .remove_new = qcom_rng_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = of_match_ptr(qcom_rng_of_match),
diff --git a/drivers/crypto/rockchip/rk3288_crypto.c b/drivers/crypto/rockchip/rk3288_crypto.c
index 77d5705a5d..70edf40bc5 100644
--- a/drivers/crypto/rockchip/rk3288_crypto.c
+++ b/drivers/crypto/rockchip/rk3288_crypto.c
@@ -405,7 +405,7 @@ err_crypto:
return err;
}
-static int rk_crypto_remove(struct platform_device *pdev)
+static void rk_crypto_remove(struct platform_device *pdev)
{
struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev);
struct rk_crypto_info *first;
@@ -424,12 +424,11 @@ static int rk_crypto_remove(struct platform_device *pdev)
}
rk_crypto_pm_exit(crypto_tmp);
crypto_engine_exit(crypto_tmp->engine);
- return 0;
}
static struct platform_driver crypto_driver = {
.probe = rk_crypto_probe,
- .remove = rk_crypto_remove,
+ .remove_new = rk_crypto_remove,
.driver = {
.name = "rk3288-crypto",
.pm = &rk_crypto_pm_ops,
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index 8c14318064..1b13b4aa16 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -393,7 +393,6 @@ struct rk_crypto_tmp rk_ahash_sha1 = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_ahash_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
}
@@ -426,7 +425,6 @@ struct rk_crypto_tmp rk_ahash_sha256 = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_ahash_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
}
@@ -459,7 +457,6 @@ struct rk_crypto_tmp rk_ahash_md5 = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct rk_ahash_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index fe8cf9ba80..8b6e3f5c94 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -224,9 +224,6 @@
/* HASH HW constants */
#define BUFLEN HASH_BLOCK_SIZE
-#define SSS_HASH_DMA_LEN_ALIGN 8
-#define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
-
#define SSS_HASH_QUEUE_LENGTH 10
/**
@@ -1746,7 +1743,6 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s5p_hash_ctx),
- .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = s5p_hash_cra_init,
.cra_exit = s5p_hash_cra_exit,
@@ -1771,7 +1767,6 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s5p_hash_ctx),
- .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = s5p_hash_cra_init,
.cra_exit = s5p_hash_cra_exit,
@@ -1796,7 +1791,6 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = HASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct s5p_hash_ctx),
- .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
.cra_module = THIS_MODULE,
.cra_init = s5p_hash_cra_init,
.cra_exit = s5p_hash_cra_exit,
@@ -2315,7 +2309,7 @@ err_clk:
return err;
}
-static int s5p_aes_remove(struct platform_device *pdev)
+static void s5p_aes_remove(struct platform_device *pdev)
{
struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
int i;
@@ -2337,13 +2331,11 @@ static int s5p_aes_remove(struct platform_device *pdev)
clk_disable_unprepare(pdata->clk);
s5p_dev = NULL;
-
- return 0;
}
static struct platform_driver s5p_aes_crypto = {
.probe = s5p_aes_probe,
- .remove = s5p_aes_remove,
+ .remove_new = s5p_aes_remove,
.driver = {
.name = "s5p-secss",
.of_match_table = s5p_sss_dt_match,
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 94eb6f6afa..78a4930c64 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -2467,7 +2467,7 @@ destroy_dma_pool:
return ret;
}
-static int sa_ul_remove(struct platform_device *pdev)
+static void sa_ul_remove(struct platform_device *pdev)
{
struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
@@ -2485,13 +2485,11 @@ static int sa_ul_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static struct platform_driver sa_ul_driver = {
.probe = sa_ul_probe,
- .remove = sa_ul_remove,
+ .remove_new = sa_ul_remove,
.driver = {
.name = "saul-crypto",
.of_match_table = of_match,
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 8e84dd98a2..fabe4f381f 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1478,7 +1478,7 @@ clk_ipg_disable:
return err;
}
-static int sahara_remove(struct platform_device *pdev)
+static void sahara_remove(struct platform_device *pdev)
{
struct sahara_dev *dev = platform_get_drvdata(pdev);
@@ -1490,13 +1490,11 @@ static int sahara_remove(struct platform_device *pdev)
clk_disable_unprepare(dev->clk_ahb);
dev_ptr = NULL;
-
- return 0;
}
static struct platform_driver sahara_driver = {
.probe = sahara_probe,
- .remove = sahara_remove,
+ .remove_new = sahara_remove,
.driver = {
.name = SAHARA_NAME,
.of_match_table = sahara_dt_ids,
diff --git a/drivers/crypto/starfive/jh7110-cryp.c b/drivers/crypto/starfive/jh7110-cryp.c
index 3a67ddc4d9..4f5b681820 100644
--- a/drivers/crypto/starfive/jh7110-cryp.c
+++ b/drivers/crypto/starfive/jh7110-cryp.c
@@ -168,7 +168,7 @@ static int starfive_cryp_probe(struct platform_device *pdev)
ret = devm_request_irq(&pdev->dev, irq, starfive_cryp_irq, 0, pdev->name,
(void *)cryp);
if (ret)
- return dev_err_probe(&pdev->dev, irq,
+ return dev_err_probe(&pdev->dev, ret,
"Failed to register interrupt handler\n");
clk_prepare_enable(cryp->hclk);
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index cc7650198d..b6d1808012 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -209,7 +209,8 @@ static int starfive_hash_copy_hash(struct ahash_request *req)
data = (u32 *)req->result;
for (count = 0; count < mlen; count++)
- data[count] = readl(ctx->cryp->base + STARFIVE_HASH_SHARDR);
+ put_unaligned(readl(ctx->cryp->base + STARFIVE_HASH_SHARDR),
+ &data[count]);
return 0;
}
@@ -628,7 +629,6 @@ static struct ahash_engine_alg algs_sha2_sm3[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
},
@@ -658,7 +658,6 @@ static struct ahash_engine_alg algs_sha2_sm3[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
},
@@ -687,7 +686,6 @@ static struct ahash_engine_alg algs_sha2_sm3[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
},
@@ -717,7 +715,6 @@ static struct ahash_engine_alg algs_sha2_sm3[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
},
@@ -746,7 +743,6 @@ static struct ahash_engine_alg algs_sha2_sm3[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
},
@@ -776,7 +772,6 @@ static struct ahash_engine_alg algs_sha2_sm3[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
},
@@ -805,7 +800,6 @@ static struct ahash_engine_alg algs_sha2_sm3[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
},
@@ -835,7 +829,6 @@ static struct ahash_engine_alg algs_sha2_sm3[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
},
@@ -864,7 +857,6 @@ static struct ahash_engine_alg algs_sha2_sm3[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
},
@@ -894,7 +886,6 @@ static struct ahash_engine_alg algs_sha2_sm3[] = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct starfive_cryp_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
},
diff --git a/drivers/crypto/stm32/stm32-crc32.c b/drivers/crypto/stm32/stm32-crc32.c
index 90a920e7f6..b0cf6d2fd3 100644
--- a/drivers/crypto/stm32/stm32-crc32.c
+++ b/drivers/crypto/stm32/stm32-crc32.c
@@ -104,7 +104,7 @@ static struct stm32_crc *stm32_crc_get_next_crc(void)
struct stm32_crc *crc;
spin_lock_bh(&crc_list.lock);
- crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
+ crc = list_first_entry_or_null(&crc_list.dev_list, struct stm32_crc, list);
if (crc)
list_move_tail(&crc->list, &crc_list.dev_list);
spin_unlock_bh(&crc_list.lock);
@@ -283,7 +283,6 @@ static struct shash_alg algs[] = {
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
.cra_module = THIS_MODULE,
.cra_init = stm32_crc32_cra_init,
@@ -305,7 +304,6 @@ static struct shash_alg algs[] = {
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
- .cra_alignmask = 3,
.cra_ctxsize = sizeof(struct stm32_crc_ctx),
.cra_module = THIS_MODULE,
.cra_init = stm32_crc32c_cra_init,
@@ -379,16 +377,11 @@ static int stm32_crc_probe(struct platform_device *pdev)
return 0;
}
-static int stm32_crc_remove(struct platform_device *pdev)
+static void stm32_crc_remove(struct platform_device *pdev)
{
struct stm32_crc *crc = platform_get_drvdata(pdev);
int ret = pm_runtime_get_sync(crc->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(crc->dev);
- return ret;
- }
-
spin_lock(&crc_list.lock);
list_del(&crc->list);
spin_unlock(&crc_list.lock);
@@ -401,9 +394,9 @@ static int stm32_crc_remove(struct platform_device *pdev)
pm_runtime_disable(crc->dev);
pm_runtime_put_noidle(crc->dev);
- clk_disable_unprepare(crc->clk);
-
- return 0;
+ if (ret >= 0)
+ clk_disable(crc->clk);
+ clk_unprepare(crc->clk);
}
static int __maybe_unused stm32_crc_suspend(struct device *dev)
@@ -472,7 +465,7 @@ MODULE_DEVICE_TABLE(of, stm32_dt_ids);
static struct platform_driver stm32_crc_driver = {
.probe = stm32_crc_probe,
- .remove = stm32_crc_remove,
+ .remove_new = stm32_crc_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &stm32_crc_pm_ops,
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index f095f00654..c3cbc26733 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -2084,17 +2084,12 @@ err_rst:
return ret;
}
-static int stm32_cryp_remove(struct platform_device *pdev)
+static void stm32_cryp_remove(struct platform_device *pdev)
{
struct stm32_cryp *cryp = platform_get_drvdata(pdev);
int ret;
- if (!cryp)
- return -ENODEV;
-
- ret = pm_runtime_resume_and_get(cryp->dev);
- if (ret < 0)
- return ret;
+ ret = pm_runtime_get_sync(cryp->dev);
if (cryp->caps->aeads_support)
crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs));
@@ -2109,9 +2104,8 @@ static int stm32_cryp_remove(struct platform_device *pdev)
pm_runtime_disable(cryp->dev);
pm_runtime_put_noidle(cryp->dev);
- clk_disable_unprepare(cryp->clk);
-
- return 0;
+ if (ret >= 0)
+ clk_disable_unprepare(cryp->clk);
}
#ifdef CONFIG_PM
@@ -2148,7 +2142,7 @@ static const struct dev_pm_ops stm32_cryp_pm_ops = {
static struct platform_driver stm32_cryp_driver = {
.probe = stm32_cryp_probe,
- .remove = stm32_cryp_remove,
+ .remove_new = stm32_cryp_remove,
.driver = {
.name = DRIVER_NAME,
.pm = &stm32_cryp_pm_ops,
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index 2b2382d433..34e0d7e381 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -1283,7 +1283,6 @@ static struct ahash_engine_alg algs_md5[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1313,7 +1312,6 @@ static struct ahash_engine_alg algs_md5[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1345,7 +1343,6 @@ static struct ahash_engine_alg algs_sha1[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1375,7 +1372,6 @@ static struct ahash_engine_alg algs_sha1[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1407,7 +1403,6 @@ static struct ahash_engine_alg algs_sha224[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1437,7 +1432,6 @@ static struct ahash_engine_alg algs_sha224[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1469,7 +1463,6 @@ static struct ahash_engine_alg algs_sha256[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1499,7 +1492,6 @@ static struct ahash_engine_alg algs_sha256[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1531,7 +1523,6 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1561,7 +1552,6 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1590,7 +1580,6 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1620,7 +1609,6 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1652,7 +1640,6 @@ static struct ahash_engine_alg algs_sha3[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA3_224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha3_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1682,7 +1669,6 @@ static struct ahash_engine_alg algs_sha3[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA3_224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha3_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1711,7 +1697,6 @@ static struct ahash_engine_alg algs_sha3[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA3_256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha3_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1741,7 +1726,6 @@ static struct ahash_engine_alg algs_sha3[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA3_256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha3_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1770,7 +1754,6 @@ static struct ahash_engine_alg algs_sha3[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha3_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1800,7 +1783,6 @@ static struct ahash_engine_alg algs_sha3[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha3_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1829,7 +1811,6 @@ static struct ahash_engine_alg algs_sha3[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA3_512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha3_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
@@ -1859,7 +1840,6 @@ static struct ahash_engine_alg algs_sha3[] = {
CRYPTO_ALG_KERN_DRIVER_ONLY,
.cra_blocksize = SHA3_512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
- .cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha3_hmac_init,
.cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 4ca4fbd227..511ddcb0ef 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -2119,13 +2119,14 @@ static int ahash_finup(struct ahash_request *areq)
static int ahash_digest(struct ahash_request *areq)
{
- struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
- struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
-
- ahash->init(areq);
- req_ctx->last = 1;
+ ahash_init(areq);
+ return ahash_finup(areq);
+}
- return ahash_process_req(areq, areq->nbytes);
+static int ahash_digest_sha224_swinit(struct ahash_request *areq)
+{
+ ahash_init_sha224_swinit(areq);
+ return ahash_finup(areq);
}
static int ahash_export(struct ahash_request *areq, void *out)
@@ -3136,7 +3137,7 @@ static int hw_supports(struct device *dev, __be32 desc_hdr_template)
return ret;
}
-static int talitos_remove(struct platform_device *ofdev)
+static void talitos_remove(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
struct talitos_private *priv = dev_get_drvdata(dev);
@@ -3170,8 +3171,6 @@ static int talitos_remove(struct platform_device *ofdev)
tasklet_kill(&priv->done_task[0]);
if (priv->irq[1])
tasklet_kill(&priv->done_task[1]);
-
- return 0;
}
static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
@@ -3242,6 +3241,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
(!strcmp(alg->cra_name, "sha224") ||
!strcmp(alg->cra_name, "hmac(sha224)"))) {
t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
+ t_alg->algt.alg.hash.digest =
+ ahash_digest_sha224_swinit;
t_alg->algt.desc_hdr_template =
DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
DESC_HDR_SEL0_MDEUA |
@@ -3259,7 +3260,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
alg->cra_priority = t_alg->algt.priority;
else
alg->cra_priority = TALITOS_CRA_PRIORITY;
- if (has_ftr_sec1(priv))
+ if (has_ftr_sec1(priv) && t_alg->algt.type != CRYPTO_ALG_TYPE_AHASH)
alg->cra_alignmask = 3;
else
alg->cra_alignmask = 0;
@@ -3559,7 +3560,7 @@ static struct platform_driver talitos_driver = {
.of_match_table = talitos_match,
},
.probe = talitos_probe,
- .remove = talitos_remove,
+ .remove_new = talitos_remove,
};
module_platform_driver(talitos_driver);
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index 2621ff8a93..de53eddf67 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -104,7 +104,8 @@ static void virtio_crypto_dataq_akcipher_callback(struct virtio_crypto_request *
}
static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher_ctx *ctx,
- struct virtio_crypto_ctrl_header *header, void *para,
+ struct virtio_crypto_ctrl_header *header,
+ struct virtio_crypto_akcipher_session_para *para,
const uint8_t *key, unsigned int keylen)
{
struct scatterlist outhdr_sg, key_sg, inhdr_sg, *sgs[3];
@@ -128,7 +129,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
ctrl = &vc_ctrl_req->ctrl;
memcpy(&ctrl->header, header, sizeof(ctrl->header));
- memcpy(&ctrl->u, para, sizeof(ctrl->u));
+ memcpy(&ctrl->u.akcipher_create_session.para, para, sizeof(*para));
input = &vc_ctrl_req->input;
input->status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
index 50a0a18f35..f729589d79 100644
--- a/drivers/crypto/vmx/aesp8-ppc.pl
+++ b/drivers/crypto/vmx/aesp8-ppc.pl
@@ -132,11 +132,12 @@ rcon:
.long 0x1b000000, 0x1b000000, 0x1b000000, 0x1b000000 ?rev
.long 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c, 0x0d0e0f0c ?rev
.long 0,0,0,0 ?asis
+.long 0x0f102132, 0x43546576, 0x8798a9ba, 0xcbdcedfe
Lconsts:
mflr r0
bcl 20,31,\$+4
mflr $ptr #vvvvv "distance between . and rcon
- addi $ptr,$ptr,-0x48
+ addi $ptr,$ptr,-0x58
mtlr r0
blr
.long 0
@@ -2495,6 +2496,17 @@ _aesp8_xts_encrypt6x:
li $x70,0x70
mtspr 256,r0
+ xxlor 2, 32+$eighty7, 32+$eighty7
+ vsldoi $eighty7,$tmp,$eighty7,1 # 0x010101..87
+ xxlor 1, 32+$eighty7, 32+$eighty7
+
+ # Load XOR Lconsts.
+ mr $x70, r6
+ bl Lconsts
+ lxvw4x 0, $x40, r6 # load XOR contents
+ mr r6, $x70
+ li $x70,0x70
+
subi $rounds,$rounds,3 # -4 in total
lvx $rndkey0,$x00,$key1 # load key schedule
@@ -2537,69 +2549,77 @@ Load_xts_enc_key:
?vperm v31,v31,$twk5,$keyperm
lvx v25,$x10,$key_ # pre-load round[2]
+ # Switch to use the following codes with 0x010101..87 to generate tweak.
+ # eighty7 = 0x010101..87
+ # vsrab tmp, tweak, seven # next tweak value, right shift 7 bits
+ # vand tmp, tmp, eighty7 # last byte with carry
+ # vaddubm tweak, tweak, tweak # left shift 1 bit (x2)
+ # xxlor vsx, 0, 0
+ # vpermxor tweak, tweak, tmp, vsx
+
vperm $in0,$inout,$inptail,$inpperm
subi $inp,$inp,31 # undo "caller"
vxor $twk0,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
vand $tmp,$tmp,$eighty7
vxor $out0,$in0,$twk0
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in1, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in1
lvx_u $in1,$x10,$inp
vxor $twk1,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
le?vperm $in1,$in1,$in1,$leperm
vand $tmp,$tmp,$eighty7
vxor $out1,$in1,$twk1
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in2, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in2
lvx_u $in2,$x20,$inp
andi. $taillen,$len,15
vxor $twk2,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
le?vperm $in2,$in2,$in2,$leperm
vand $tmp,$tmp,$eighty7
vxor $out2,$in2,$twk2
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in3, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in3
lvx_u $in3,$x30,$inp
sub $len,$len,$taillen
vxor $twk3,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
le?vperm $in3,$in3,$in3,$leperm
vand $tmp,$tmp,$eighty7
vxor $out3,$in3,$twk3
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in4, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in4
lvx_u $in4,$x40,$inp
subi $len,$len,0x60
vxor $twk4,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
le?vperm $in4,$in4,$in4,$leperm
vand $tmp,$tmp,$eighty7
vxor $out4,$in4,$twk4
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in5, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in5
lvx_u $in5,$x50,$inp
addi $inp,$inp,0x60
vxor $twk5,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
le?vperm $in5,$in5,$in5,$leperm
vand $tmp,$tmp,$eighty7
vxor $out5,$in5,$twk5
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in0, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in0
vxor v31,v31,$rndkey0
mtctr $rounds
@@ -2625,6 +2645,8 @@ Loop_xts_enc6x:
lvx v25,$x10,$key_ # round[4]
bdnz Loop_xts_enc6x
+ xxlor 32+$eighty7, 1, 1 # 0x010101..87
+
subic $len,$len,96 # $len-=96
vxor $in0,$twk0,v31 # xor with last round key
vcipher $out0,$out0,v24
@@ -2634,7 +2656,6 @@ Loop_xts_enc6x:
vaddubm $tweak,$tweak,$tweak
vcipher $out2,$out2,v24
vcipher $out3,$out3,v24
- vsldoi $tmp,$tmp,$tmp,15
vcipher $out4,$out4,v24
vcipher $out5,$out5,v24
@@ -2642,7 +2663,8 @@ Loop_xts_enc6x:
vand $tmp,$tmp,$eighty7
vcipher $out0,$out0,v25
vcipher $out1,$out1,v25
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in1, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in1
vcipher $out2,$out2,v25
vcipher $out3,$out3,v25
vxor $in1,$twk1,v31
@@ -2653,13 +2675,13 @@ Loop_xts_enc6x:
and r0,r0,$len
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
vcipher $out0,$out0,v26
vcipher $out1,$out1,v26
vand $tmp,$tmp,$eighty7
vcipher $out2,$out2,v26
vcipher $out3,$out3,v26
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in2, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in2
vcipher $out4,$out4,v26
vcipher $out5,$out5,v26
@@ -2673,7 +2695,6 @@ Loop_xts_enc6x:
vaddubm $tweak,$tweak,$tweak
vcipher $out0,$out0,v27
vcipher $out1,$out1,v27
- vsldoi $tmp,$tmp,$tmp,15
vcipher $out2,$out2,v27
vcipher $out3,$out3,v27
vand $tmp,$tmp,$eighty7
@@ -2681,7 +2702,8 @@ Loop_xts_enc6x:
vcipher $out5,$out5,v27
addi $key_,$sp,$FRAME+15 # rewind $key_
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in3, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in3
vcipher $out0,$out0,v28
vcipher $out1,$out1,v28
vxor $in3,$twk3,v31
@@ -2690,7 +2712,6 @@ Loop_xts_enc6x:
vcipher $out2,$out2,v28
vcipher $out3,$out3,v28
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
vcipher $out4,$out4,v28
vcipher $out5,$out5,v28
lvx v24,$x00,$key_ # re-pre-load round[1]
@@ -2698,7 +2719,8 @@ Loop_xts_enc6x:
vcipher $out0,$out0,v29
vcipher $out1,$out1,v29
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in4, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in4
vcipher $out2,$out2,v29
vcipher $out3,$out3,v29
vxor $in4,$twk4,v31
@@ -2708,14 +2730,14 @@ Loop_xts_enc6x:
vcipher $out5,$out5,v29
lvx v25,$x10,$key_ # re-pre-load round[2]
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
vcipher $out0,$out0,v30
vcipher $out1,$out1,v30
vand $tmp,$tmp,$eighty7
vcipher $out2,$out2,v30
vcipher $out3,$out3,v30
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in5, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in5
vcipher $out4,$out4,v30
vcipher $out5,$out5,v30
vxor $in5,$twk5,v31
@@ -2725,7 +2747,6 @@ Loop_xts_enc6x:
vcipherlast $out0,$out0,$in0
lvx_u $in0,$x00,$inp # load next input block
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
vcipherlast $out1,$out1,$in1
lvx_u $in1,$x10,$inp
vcipherlast $out2,$out2,$in2
@@ -2738,7 +2759,10 @@ Loop_xts_enc6x:
vcipherlast $out4,$out4,$in4
le?vperm $in2,$in2,$in2,$leperm
lvx_u $in4,$x40,$inp
- vxor $tweak,$tweak,$tmp
+ xxlor 10, 32+$in0, 32+$in0
+ xxlor 32+$in0, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in0
+ xxlor 32+$in0, 10, 10
vcipherlast $tmp,$out5,$in5 # last block might be needed
# in stealing mode
le?vperm $in3,$in3,$in3,$leperm
@@ -2771,6 +2795,8 @@ Loop_xts_enc6x:
mtctr $rounds
beq Loop_xts_enc6x # did $len-=96 borrow?
+ xxlor 32+$eighty7, 2, 2 # 0x010101..87
+
addic. $len,$len,0x60
beq Lxts_enc6x_zero
cmpwi $len,0x20
@@ -3147,6 +3173,17 @@ _aesp8_xts_decrypt6x:
li $x70,0x70
mtspr 256,r0
+ xxlor 2, 32+$eighty7, 32+$eighty7
+ vsldoi $eighty7,$tmp,$eighty7,1 # 0x010101..87
+ xxlor 1, 32+$eighty7, 32+$eighty7
+
+ # Load XOR Lconsts.
+ mr $x70, r6
+ bl Lconsts
+ lxvw4x 0, $x40, r6 # load XOR contents
+ mr r6, $x70
+ li $x70,0x70
+
subi $rounds,$rounds,3 # -4 in total
lvx $rndkey0,$x00,$key1 # load key schedule
@@ -3194,64 +3231,64 @@ Load_xts_dec_key:
vxor $twk0,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
vand $tmp,$tmp,$eighty7
vxor $out0,$in0,$twk0
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in1, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in1
lvx_u $in1,$x10,$inp
vxor $twk1,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
le?vperm $in1,$in1,$in1,$leperm
vand $tmp,$tmp,$eighty7
vxor $out1,$in1,$twk1
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in2, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in2
lvx_u $in2,$x20,$inp
andi. $taillen,$len,15
vxor $twk2,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
le?vperm $in2,$in2,$in2,$leperm
vand $tmp,$tmp,$eighty7
vxor $out2,$in2,$twk2
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in3, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in3
lvx_u $in3,$x30,$inp
sub $len,$len,$taillen
vxor $twk3,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
le?vperm $in3,$in3,$in3,$leperm
vand $tmp,$tmp,$eighty7
vxor $out3,$in3,$twk3
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in4, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in4
lvx_u $in4,$x40,$inp
subi $len,$len,0x60
vxor $twk4,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
le?vperm $in4,$in4,$in4,$leperm
vand $tmp,$tmp,$eighty7
vxor $out4,$in4,$twk4
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in5, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in5
lvx_u $in5,$x50,$inp
addi $inp,$inp,0x60
vxor $twk5,$tweak,$rndkey0
vsrab $tmp,$tweak,$seven # next tweak value
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
le?vperm $in5,$in5,$in5,$leperm
vand $tmp,$tmp,$eighty7
vxor $out5,$in5,$twk5
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in0, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in0
vxor v31,v31,$rndkey0
mtctr $rounds
@@ -3277,6 +3314,8 @@ Loop_xts_dec6x:
lvx v25,$x10,$key_ # round[4]
bdnz Loop_xts_dec6x
+ xxlor 32+$eighty7, 1, 1 # 0x010101..87
+
subic $len,$len,96 # $len-=96
vxor $in0,$twk0,v31 # xor with last round key
vncipher $out0,$out0,v24
@@ -3286,7 +3325,6 @@ Loop_xts_dec6x:
vaddubm $tweak,$tweak,$tweak
vncipher $out2,$out2,v24
vncipher $out3,$out3,v24
- vsldoi $tmp,$tmp,$tmp,15
vncipher $out4,$out4,v24
vncipher $out5,$out5,v24
@@ -3294,7 +3332,8 @@ Loop_xts_dec6x:
vand $tmp,$tmp,$eighty7
vncipher $out0,$out0,v25
vncipher $out1,$out1,v25
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in1, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in1
vncipher $out2,$out2,v25
vncipher $out3,$out3,v25
vxor $in1,$twk1,v31
@@ -3305,13 +3344,13 @@ Loop_xts_dec6x:
and r0,r0,$len
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
vncipher $out0,$out0,v26
vncipher $out1,$out1,v26
vand $tmp,$tmp,$eighty7
vncipher $out2,$out2,v26
vncipher $out3,$out3,v26
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in2, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in2
vncipher $out4,$out4,v26
vncipher $out5,$out5,v26
@@ -3325,7 +3364,6 @@ Loop_xts_dec6x:
vaddubm $tweak,$tweak,$tweak
vncipher $out0,$out0,v27
vncipher $out1,$out1,v27
- vsldoi $tmp,$tmp,$tmp,15
vncipher $out2,$out2,v27
vncipher $out3,$out3,v27
vand $tmp,$tmp,$eighty7
@@ -3333,7 +3371,8 @@ Loop_xts_dec6x:
vncipher $out5,$out5,v27
addi $key_,$sp,$FRAME+15 # rewind $key_
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in3, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in3
vncipher $out0,$out0,v28
vncipher $out1,$out1,v28
vxor $in3,$twk3,v31
@@ -3342,7 +3381,6 @@ Loop_xts_dec6x:
vncipher $out2,$out2,v28
vncipher $out3,$out3,v28
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
vncipher $out4,$out4,v28
vncipher $out5,$out5,v28
lvx v24,$x00,$key_ # re-pre-load round[1]
@@ -3350,7 +3388,8 @@ Loop_xts_dec6x:
vncipher $out0,$out0,v29
vncipher $out1,$out1,v29
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in4, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in4
vncipher $out2,$out2,v29
vncipher $out3,$out3,v29
vxor $in4,$twk4,v31
@@ -3360,14 +3399,14 @@ Loop_xts_dec6x:
vncipher $out5,$out5,v29
lvx v25,$x10,$key_ # re-pre-load round[2]
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
vncipher $out0,$out0,v30
vncipher $out1,$out1,v30
vand $tmp,$tmp,$eighty7
vncipher $out2,$out2,v30
vncipher $out3,$out3,v30
- vxor $tweak,$tweak,$tmp
+ xxlor 32+$in5, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in5
vncipher $out4,$out4,v30
vncipher $out5,$out5,v30
vxor $in5,$twk5,v31
@@ -3377,7 +3416,6 @@ Loop_xts_dec6x:
vncipherlast $out0,$out0,$in0
lvx_u $in0,$x00,$inp # load next input block
vaddubm $tweak,$tweak,$tweak
- vsldoi $tmp,$tmp,$tmp,15
vncipherlast $out1,$out1,$in1
lvx_u $in1,$x10,$inp
vncipherlast $out2,$out2,$in2
@@ -3390,7 +3428,10 @@ Loop_xts_dec6x:
vncipherlast $out4,$out4,$in4
le?vperm $in2,$in2,$in2,$leperm
lvx_u $in4,$x40,$inp
- vxor $tweak,$tweak,$tmp
+ xxlor 10, 32+$in0, 32+$in0
+ xxlor 32+$in0, 0, 0
+ vpermxor $tweak, $tweak, $tmp, $in0
+ xxlor 32+$in0, 10, 10
vncipherlast $out5,$out5,$in5
le?vperm $in3,$in3,$in3,$leperm
lvx_u $in5,$x50,$inp
@@ -3421,6 +3462,8 @@ Loop_xts_dec6x:
mtctr $rounds
beq Loop_xts_dec6x # did $len-=96 borrow?
+ xxlor 32+$eighty7, 2, 2 # 0x010101..87
+
addic. $len,$len,0x60
beq Lxts_dec6x_zero
cmpwi $len,0x20
diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
index ce335578b7..3c205324b2 100644
--- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c
+++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c
@@ -421,12 +421,10 @@ err_engine:
return err;
}
-static int zynqmp_aes_aead_remove(struct platform_device *pdev)
+static void zynqmp_aes_aead_remove(struct platform_device *pdev)
{
crypto_engine_exit(aes_drv_ctx.engine);
crypto_engine_unregister_aead(&aes_drv_ctx.alg.aead);
-
- return 0;
}
static const struct of_device_id zynqmp_aes_dt_ids[] = {
@@ -437,7 +435,7 @@ MODULE_DEVICE_TABLE(of, zynqmp_aes_dt_ids);
static struct platform_driver zynqmp_aes_driver = {
.probe = zynqmp_aes_aead_probe,
- .remove = zynqmp_aes_aead_remove,
+ .remove_new = zynqmp_aes_aead_remove,
.driver = {
.name = "zynqmp-aes",
.of_match_table = zynqmp_aes_dt_ids,
diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c
index 426bf1a72b..1bcec6f46c 100644
--- a/drivers/crypto/xilinx/zynqmp-sha.c
+++ b/drivers/crypto/xilinx/zynqmp-sha.c
@@ -182,7 +182,6 @@ static struct zynqmp_sha_drv_ctx sha3_drv_ctx = {
CRYPTO_ALG_NEED_FALLBACK,
.cra_blocksize = SHA3_384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct zynqmp_sha_tfm_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
}
}
@@ -238,20 +237,18 @@ err_shash:
return err;
}
-static int zynqmp_sha_remove(struct platform_device *pdev)
+static void zynqmp_sha_remove(struct platform_device *pdev)
{
sha3_drv_ctx.dev = platform_get_drvdata(pdev);
dma_free_coherent(sha3_drv_ctx.dev, ZYNQMP_DMA_ALLOC_FIXED_SIZE, ubuf, update_dma_addr);
dma_free_coherent(sha3_drv_ctx.dev, SHA3_384_DIGEST_SIZE, fbuf, final_dma_addr);
crypto_unregister_shash(&sha3_drv_ctx.sha3_384);
-
- return 0;
}
static struct platform_driver zynqmp_sha_driver = {
.probe = zynqmp_sha_probe,
- .remove = zynqmp_sha_remove,
+ .remove_new = zynqmp_sha_remove,
.driver = {
.name = "zynqmp-sha3-384",
},